repo_name
string
path
string
copies
string
size
string
content
string
license
string
NathanAtSamraksh/dart-linux
drivers/hwmon/w83l786ng.c
1966
21361
/* * w83l786ng.c - Linux kernel driver for hardware monitoring * Copyright (c) 2007 Kevin Lo <kevlo@kevlo.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation - version 2. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301 USA. */ /* * Supports following chips: * * Chip #vin #fanin #pwm #temp wchipid vendid i2c ISA * w83l786ng 3 2 2 2 0x7b 0x5ca3 yes no */ #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/i2c.h> #include <linux/hwmon.h> #include <linux/hwmon-vid.h> #include <linux/hwmon-sysfs.h> #include <linux/err.h> #include <linux/mutex.h> #include <linux/jiffies.h> /* Addresses to scan */ static const unsigned short normal_i2c[] = { 0x2e, 0x2f, I2C_CLIENT_END }; /* Insmod parameters */ static bool reset; module_param(reset, bool, 0); MODULE_PARM_DESC(reset, "Set to 1 to reset chip, not recommended"); #define W83L786NG_REG_IN_MIN(nr) (0x2C + (nr) * 2) #define W83L786NG_REG_IN_MAX(nr) (0x2B + (nr) * 2) #define W83L786NG_REG_IN(nr) ((nr) + 0x20) #define W83L786NG_REG_FAN(nr) ((nr) + 0x28) #define W83L786NG_REG_FAN_MIN(nr) ((nr) + 0x3B) #define W83L786NG_REG_CONFIG 0x40 #define W83L786NG_REG_ALARM1 0x41 #define W83L786NG_REG_ALARM2 0x42 #define W83L786NG_REG_GPIO_EN 0x47 #define W83L786NG_REG_MAN_ID2 0x4C #define W83L786NG_REG_MAN_ID1 0x4D #define W83L786NG_REG_CHIP_ID 0x4E #define W83L786NG_REG_DIODE 0x53 #define W83L786NG_REG_FAN_DIV 0x54 #define W83L786NG_REG_FAN_CFG 0x80 #define W83L786NG_REG_TOLERANCE 0x8D static const u8 W83L786NG_REG_TEMP[2][3] = { { 0x25, /* TEMP 0 in DataSheet */ 0x35, /* TEMP 0 Over in DataSheet */ 0x36 }, /* TEMP 0 Hyst in DataSheet */ { 0x26, /* TEMP 1 in DataSheet */ 0x37, /* TEMP 1 Over in DataSheet */ 0x38 } /* TEMP 1 Hyst in DataSheet */ }; static const u8 W83L786NG_PWM_MODE_SHIFT[] = {6, 7}; static const u8 W83L786NG_PWM_ENABLE_SHIFT[] = {2, 4}; /* FAN Duty Cycle, be used to control */ static const u8 W83L786NG_REG_PWM[] = {0x81, 0x87}; static inline u8 FAN_TO_REG(long rpm, int div) { if (rpm == 0) return 255; rpm = clamp_val(rpm, 1, 1000000); return clamp_val((1350000 + rpm * div / 2) / (rpm * div), 1, 254); } #define FAN_FROM_REG(val, div) ((val) == 0 ? -1 : \ ((val) == 255 ? 0 : \ 1350000 / ((val) * (div)))) /* for temp */ #define TEMP_TO_REG(val) (clamp_val(((val) < 0 ? (val) + 0x100 * 1000 \ : (val)) / 1000, 0, 0xff)) #define TEMP_FROM_REG(val) (((val) & 0x80 ? \ (val) - 0x100 : (val)) * 1000) /* * The analog voltage inputs have 8mV LSB. Since the sysfs output is * in mV as would be measured on the chip input pin, need to just * multiply/divide by 8 to translate from/to register values. */ #define IN_TO_REG(val) (clamp_val((((val) + 4) / 8), 0, 255)) #define IN_FROM_REG(val) ((val) * 8) #define DIV_FROM_REG(val) (1 << (val)) static inline u8 DIV_TO_REG(long val) { int i; val = clamp_val(val, 1, 128) >> 1; for (i = 0; i < 7; i++) { if (val == 0) break; val >>= 1; } return (u8)i; } struct w83l786ng_data { struct i2c_client *client; struct mutex update_lock; char valid; /* !=0 if following fields are valid */ unsigned long last_updated; /* In jiffies */ unsigned long last_nonvolatile; /* In jiffies, last time we update the * nonvolatile registers */ u8 in[3]; u8 in_max[3]; u8 in_min[3]; u8 fan[2]; u8 fan_div[2]; u8 fan_min[2]; u8 temp_type[2]; u8 temp[2][3]; u8 pwm[2]; u8 pwm_mode[2]; /* 0->DC variable voltage * 1->PWM variable duty cycle */ u8 pwm_enable[2]; /* 1->manual * 2->thermal cruise (also called SmartFan I) */ u8 tolerance[2]; }; static u8 w83l786ng_read_value(struct i2c_client *client, u8 reg) { return i2c_smbus_read_byte_data(client, reg); } static int w83l786ng_write_value(struct i2c_client *client, u8 reg, u8 value) { return i2c_smbus_write_byte_data(client, reg, value); } static struct w83l786ng_data *w83l786ng_update_device(struct device *dev) { struct w83l786ng_data *data = dev_get_drvdata(dev); struct i2c_client *client = data->client; int i, j; u8 reg_tmp, pwmcfg; mutex_lock(&data->update_lock); if (time_after(jiffies, data->last_updated + HZ + HZ / 2) || !data->valid) { dev_dbg(&client->dev, "Updating w83l786ng data.\n"); /* Update the voltages measured value and limits */ for (i = 0; i < 3; i++) { data->in[i] = w83l786ng_read_value(client, W83L786NG_REG_IN(i)); data->in_min[i] = w83l786ng_read_value(client, W83L786NG_REG_IN_MIN(i)); data->in_max[i] = w83l786ng_read_value(client, W83L786NG_REG_IN_MAX(i)); } /* Update the fan counts and limits */ for (i = 0; i < 2; i++) { data->fan[i] = w83l786ng_read_value(client, W83L786NG_REG_FAN(i)); data->fan_min[i] = w83l786ng_read_value(client, W83L786NG_REG_FAN_MIN(i)); } /* Update the fan divisor */ reg_tmp = w83l786ng_read_value(client, W83L786NG_REG_FAN_DIV); data->fan_div[0] = reg_tmp & 0x07; data->fan_div[1] = (reg_tmp >> 4) & 0x07; pwmcfg = w83l786ng_read_value(client, W83L786NG_REG_FAN_CFG); for (i = 0; i < 2; i++) { data->pwm_mode[i] = ((pwmcfg >> W83L786NG_PWM_MODE_SHIFT[i]) & 1) ? 0 : 1; data->pwm_enable[i] = ((pwmcfg >> W83L786NG_PWM_ENABLE_SHIFT[i]) & 3) + 1; data->pwm[i] = (w83l786ng_read_value(client, W83L786NG_REG_PWM[i]) & 0x0f) * 0x11; } /* Update the temperature sensors */ for (i = 0; i < 2; i++) { for (j = 0; j < 3; j++) { data->temp[i][j] = w83l786ng_read_value(client, W83L786NG_REG_TEMP[i][j]); } } /* Update Smart Fan I/II tolerance */ reg_tmp = w83l786ng_read_value(client, W83L786NG_REG_TOLERANCE); data->tolerance[0] = reg_tmp & 0x0f; data->tolerance[1] = (reg_tmp >> 4) & 0x0f; data->last_updated = jiffies; data->valid = 1; } mutex_unlock(&data->update_lock); return data; } /* following are the sysfs callback functions */ #define show_in_reg(reg) \ static ssize_t \ show_##reg(struct device *dev, struct device_attribute *attr, \ char *buf) \ { \ int nr = to_sensor_dev_attr(attr)->index; \ struct w83l786ng_data *data = w83l786ng_update_device(dev); \ return sprintf(buf, "%d\n", IN_FROM_REG(data->reg[nr])); \ } show_in_reg(in) show_in_reg(in_min) show_in_reg(in_max) #define store_in_reg(REG, reg) \ static ssize_t \ store_in_##reg(struct device *dev, struct device_attribute *attr, \ const char *buf, size_t count) \ { \ int nr = to_sensor_dev_attr(attr)->index; \ struct w83l786ng_data *data = dev_get_drvdata(dev); \ struct i2c_client *client = data->client; \ unsigned long val; \ int err = kstrtoul(buf, 10, &val); \ if (err) \ return err; \ mutex_lock(&data->update_lock); \ data->in_##reg[nr] = IN_TO_REG(val); \ w83l786ng_write_value(client, W83L786NG_REG_IN_##REG(nr), \ data->in_##reg[nr]); \ mutex_unlock(&data->update_lock); \ return count; \ } store_in_reg(MIN, min) store_in_reg(MAX, max) static struct sensor_device_attribute sda_in_input[] = { SENSOR_ATTR(in0_input, S_IRUGO, show_in, NULL, 0), SENSOR_ATTR(in1_input, S_IRUGO, show_in, NULL, 1), SENSOR_ATTR(in2_input, S_IRUGO, show_in, NULL, 2), }; static struct sensor_device_attribute sda_in_min[] = { SENSOR_ATTR(in0_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 0), SENSOR_ATTR(in1_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 1), SENSOR_ATTR(in2_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 2), }; static struct sensor_device_attribute sda_in_max[] = { SENSOR_ATTR(in0_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 0), SENSOR_ATTR(in1_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 1), SENSOR_ATTR(in2_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 2), }; #define show_fan_reg(reg) \ static ssize_t show_##reg(struct device *dev, struct device_attribute *attr, \ char *buf) \ { \ int nr = to_sensor_dev_attr(attr)->index; \ struct w83l786ng_data *data = w83l786ng_update_device(dev); \ return sprintf(buf, "%d\n", \ FAN_FROM_REG(data->reg[nr], DIV_FROM_REG(data->fan_div[nr]))); \ } show_fan_reg(fan); show_fan_reg(fan_min); static ssize_t store_fan_min(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int nr = to_sensor_dev_attr(attr)->index; struct w83l786ng_data *data = dev_get_drvdata(dev); struct i2c_client *client = data->client; unsigned long val; int err; err = kstrtoul(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); data->fan_min[nr] = FAN_TO_REG(val, DIV_FROM_REG(data->fan_div[nr])); w83l786ng_write_value(client, W83L786NG_REG_FAN_MIN(nr), data->fan_min[nr]); mutex_unlock(&data->update_lock); return count; } static ssize_t show_fan_div(struct device *dev, struct device_attribute *attr, char *buf) { int nr = to_sensor_dev_attr(attr)->index; struct w83l786ng_data *data = w83l786ng_update_device(dev); return sprintf(buf, "%u\n", DIV_FROM_REG(data->fan_div[nr])); } /* * Note: we save and restore the fan minimum here, because its value is * determined in part by the fan divisor. This follows the principle of * least surprise; the user doesn't expect the fan minimum to change just * because the divisor changed. */ static ssize_t store_fan_div(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int nr = to_sensor_dev_attr(attr)->index; struct w83l786ng_data *data = dev_get_drvdata(dev); struct i2c_client *client = data->client; unsigned long min; u8 tmp_fan_div; u8 fan_div_reg; u8 keep_mask = 0; u8 new_shift = 0; unsigned long val; int err; err = kstrtoul(buf, 10, &val); if (err) return err; /* Save fan_min */ mutex_lock(&data->update_lock); min = FAN_FROM_REG(data->fan_min[nr], DIV_FROM_REG(data->fan_div[nr])); data->fan_div[nr] = DIV_TO_REG(val); switch (nr) { case 0: keep_mask = 0xf8; new_shift = 0; break; case 1: keep_mask = 0x8f; new_shift = 4; break; } fan_div_reg = w83l786ng_read_value(client, W83L786NG_REG_FAN_DIV) & keep_mask; tmp_fan_div = (data->fan_div[nr] << new_shift) & ~keep_mask; w83l786ng_write_value(client, W83L786NG_REG_FAN_DIV, fan_div_reg | tmp_fan_div); /* Restore fan_min */ data->fan_min[nr] = FAN_TO_REG(min, DIV_FROM_REG(data->fan_div[nr])); w83l786ng_write_value(client, W83L786NG_REG_FAN_MIN(nr), data->fan_min[nr]); mutex_unlock(&data->update_lock); return count; } static struct sensor_device_attribute sda_fan_input[] = { SENSOR_ATTR(fan1_input, S_IRUGO, show_fan, NULL, 0), SENSOR_ATTR(fan2_input, S_IRUGO, show_fan, NULL, 1), }; static struct sensor_device_attribute sda_fan_min[] = { SENSOR_ATTR(fan1_min, S_IWUSR | S_IRUGO, show_fan_min, store_fan_min, 0), SENSOR_ATTR(fan2_min, S_IWUSR | S_IRUGO, show_fan_min, store_fan_min, 1), }; static struct sensor_device_attribute sda_fan_div[] = { SENSOR_ATTR(fan1_div, S_IWUSR | S_IRUGO, show_fan_div, store_fan_div, 0), SENSOR_ATTR(fan2_div, S_IWUSR | S_IRUGO, show_fan_div, store_fan_div, 1), }; /* read/write the temperature, includes measured value and limits */ static ssize_t show_temp(struct device *dev, struct device_attribute *attr, char *buf) { struct sensor_device_attribute_2 *sensor_attr = to_sensor_dev_attr_2(attr); int nr = sensor_attr->nr; int index = sensor_attr->index; struct w83l786ng_data *data = w83l786ng_update_device(dev); return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp[nr][index])); } static ssize_t store_temp(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct sensor_device_attribute_2 *sensor_attr = to_sensor_dev_attr_2(attr); int nr = sensor_attr->nr; int index = sensor_attr->index; struct w83l786ng_data *data = dev_get_drvdata(dev); struct i2c_client *client = data->client; long val; int err; err = kstrtol(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); data->temp[nr][index] = TEMP_TO_REG(val); w83l786ng_write_value(client, W83L786NG_REG_TEMP[nr][index], data->temp[nr][index]); mutex_unlock(&data->update_lock); return count; } static struct sensor_device_attribute_2 sda_temp_input[] = { SENSOR_ATTR_2(temp1_input, S_IRUGO, show_temp, NULL, 0, 0), SENSOR_ATTR_2(temp2_input, S_IRUGO, show_temp, NULL, 1, 0), }; static struct sensor_device_attribute_2 sda_temp_max[] = { SENSOR_ATTR_2(temp1_max, S_IRUGO | S_IWUSR, show_temp, store_temp, 0, 1), SENSOR_ATTR_2(temp2_max, S_IRUGO | S_IWUSR, show_temp, store_temp, 1, 1), }; static struct sensor_device_attribute_2 sda_temp_max_hyst[] = { SENSOR_ATTR_2(temp1_max_hyst, S_IRUGO | S_IWUSR, show_temp, store_temp, 0, 2), SENSOR_ATTR_2(temp2_max_hyst, S_IRUGO | S_IWUSR, show_temp, store_temp, 1, 2), }; #define show_pwm_reg(reg) \ static ssize_t show_##reg(struct device *dev, struct device_attribute *attr, \ char *buf) \ { \ struct w83l786ng_data *data = w83l786ng_update_device(dev); \ int nr = to_sensor_dev_attr(attr)->index; \ return sprintf(buf, "%d\n", data->reg[nr]); \ } show_pwm_reg(pwm_mode) show_pwm_reg(pwm_enable) show_pwm_reg(pwm) static ssize_t store_pwm_mode(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int nr = to_sensor_dev_attr(attr)->index; struct w83l786ng_data *data = dev_get_drvdata(dev); struct i2c_client *client = data->client; u8 reg; unsigned long val; int err; err = kstrtoul(buf, 10, &val); if (err) return err; if (val > 1) return -EINVAL; mutex_lock(&data->update_lock); data->pwm_mode[nr] = val; reg = w83l786ng_read_value(client, W83L786NG_REG_FAN_CFG); reg &= ~(1 << W83L786NG_PWM_MODE_SHIFT[nr]); if (!val) reg |= 1 << W83L786NG_PWM_MODE_SHIFT[nr]; w83l786ng_write_value(client, W83L786NG_REG_FAN_CFG, reg); mutex_unlock(&data->update_lock); return count; } static ssize_t store_pwm(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int nr = to_sensor_dev_attr(attr)->index; struct w83l786ng_data *data = dev_get_drvdata(dev); struct i2c_client *client = data->client; unsigned long val; int err; err = kstrtoul(buf, 10, &val); if (err) return err; val = clamp_val(val, 0, 255); val = DIV_ROUND_CLOSEST(val, 0x11); mutex_lock(&data->update_lock); data->pwm[nr] = val * 0x11; val |= w83l786ng_read_value(client, W83L786NG_REG_PWM[nr]) & 0xf0; w83l786ng_write_value(client, W83L786NG_REG_PWM[nr], val); mutex_unlock(&data->update_lock); return count; } static ssize_t store_pwm_enable(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int nr = to_sensor_dev_attr(attr)->index; struct w83l786ng_data *data = dev_get_drvdata(dev); struct i2c_client *client = data->client; u8 reg; unsigned long val; int err; err = kstrtoul(buf, 10, &val); if (err) return err; if (!val || val > 2) /* only modes 1 and 2 are supported */ return -EINVAL; mutex_lock(&data->update_lock); reg = w83l786ng_read_value(client, W83L786NG_REG_FAN_CFG); data->pwm_enable[nr] = val; reg &= ~(0x03 << W83L786NG_PWM_ENABLE_SHIFT[nr]); reg |= (val - 1) << W83L786NG_PWM_ENABLE_SHIFT[nr]; w83l786ng_write_value(client, W83L786NG_REG_FAN_CFG, reg); mutex_unlock(&data->update_lock); return count; } static struct sensor_device_attribute sda_pwm[] = { SENSOR_ATTR(pwm1, S_IWUSR | S_IRUGO, show_pwm, store_pwm, 0), SENSOR_ATTR(pwm2, S_IWUSR | S_IRUGO, show_pwm, store_pwm, 1), }; static struct sensor_device_attribute sda_pwm_mode[] = { SENSOR_ATTR(pwm1_mode, S_IWUSR | S_IRUGO, show_pwm_mode, store_pwm_mode, 0), SENSOR_ATTR(pwm2_mode, S_IWUSR | S_IRUGO, show_pwm_mode, store_pwm_mode, 1), }; static struct sensor_device_attribute sda_pwm_enable[] = { SENSOR_ATTR(pwm1_enable, S_IWUSR | S_IRUGO, show_pwm_enable, store_pwm_enable, 0), SENSOR_ATTR(pwm2_enable, S_IWUSR | S_IRUGO, show_pwm_enable, store_pwm_enable, 1), }; /* For Smart Fan I/Thermal Cruise and Smart Fan II */ static ssize_t show_tolerance(struct device *dev, struct device_attribute *attr, char *buf) { int nr = to_sensor_dev_attr(attr)->index; struct w83l786ng_data *data = w83l786ng_update_device(dev); return sprintf(buf, "%ld\n", (long)data->tolerance[nr]); } static ssize_t store_tolerance(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int nr = to_sensor_dev_attr(attr)->index; struct w83l786ng_data *data = dev_get_drvdata(dev); struct i2c_client *client = data->client; u8 tol_tmp, tol_mask; unsigned long val; int err; err = kstrtoul(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); tol_mask = w83l786ng_read_value(client, W83L786NG_REG_TOLERANCE) & ((nr == 1) ? 0x0f : 0xf0); tol_tmp = clamp_val(val, 0, 15); tol_tmp &= 0x0f; data->tolerance[nr] = tol_tmp; if (nr == 1) tol_tmp <<= 4; w83l786ng_write_value(client, W83L786NG_REG_TOLERANCE, tol_mask | tol_tmp); mutex_unlock(&data->update_lock); return count; } static struct sensor_device_attribute sda_tolerance[] = { SENSOR_ATTR(pwm1_tolerance, S_IWUSR | S_IRUGO, show_tolerance, store_tolerance, 0), SENSOR_ATTR(pwm2_tolerance, S_IWUSR | S_IRUGO, show_tolerance, store_tolerance, 1), }; #define IN_UNIT_ATTRS(X) \ &sda_in_input[X].dev_attr.attr, \ &sda_in_min[X].dev_attr.attr, \ &sda_in_max[X].dev_attr.attr #define FAN_UNIT_ATTRS(X) \ &sda_fan_input[X].dev_attr.attr, \ &sda_fan_min[X].dev_attr.attr, \ &sda_fan_div[X].dev_attr.attr #define TEMP_UNIT_ATTRS(X) \ &sda_temp_input[X].dev_attr.attr, \ &sda_temp_max[X].dev_attr.attr, \ &sda_temp_max_hyst[X].dev_attr.attr #define PWM_UNIT_ATTRS(X) \ &sda_pwm[X].dev_attr.attr, \ &sda_pwm_mode[X].dev_attr.attr, \ &sda_pwm_enable[X].dev_attr.attr #define TOLERANCE_UNIT_ATTRS(X) \ &sda_tolerance[X].dev_attr.attr static struct attribute *w83l786ng_attrs[] = { IN_UNIT_ATTRS(0), IN_UNIT_ATTRS(1), IN_UNIT_ATTRS(2), FAN_UNIT_ATTRS(0), FAN_UNIT_ATTRS(1), TEMP_UNIT_ATTRS(0), TEMP_UNIT_ATTRS(1), PWM_UNIT_ATTRS(0), PWM_UNIT_ATTRS(1), TOLERANCE_UNIT_ATTRS(0), TOLERANCE_UNIT_ATTRS(1), NULL }; ATTRIBUTE_GROUPS(w83l786ng); static int w83l786ng_detect(struct i2c_client *client, struct i2c_board_info *info) { struct i2c_adapter *adapter = client->adapter; u16 man_id; u8 chip_id; if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) return -ENODEV; /* Detection */ if ((w83l786ng_read_value(client, W83L786NG_REG_CONFIG) & 0x80)) { dev_dbg(&adapter->dev, "W83L786NG detection failed at 0x%02x\n", client->addr); return -ENODEV; } /* Identification */ man_id = (w83l786ng_read_value(client, W83L786NG_REG_MAN_ID1) << 8) + w83l786ng_read_value(client, W83L786NG_REG_MAN_ID2); chip_id = w83l786ng_read_value(client, W83L786NG_REG_CHIP_ID); if (man_id != 0x5CA3 || /* Winbond */ chip_id != 0x80) { /* W83L786NG */ dev_dbg(&adapter->dev, "Unsupported chip (man_id=0x%04X, chip_id=0x%02X)\n", man_id, chip_id); return -ENODEV; } strlcpy(info->type, "w83l786ng", I2C_NAME_SIZE); return 0; } static void w83l786ng_init_client(struct i2c_client *client) { u8 tmp; if (reset) w83l786ng_write_value(client, W83L786NG_REG_CONFIG, 0x80); /* Start monitoring */ tmp = w83l786ng_read_value(client, W83L786NG_REG_CONFIG); if (!(tmp & 0x01)) w83l786ng_write_value(client, W83L786NG_REG_CONFIG, tmp | 0x01); } static int w83l786ng_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct device *dev = &client->dev; struct w83l786ng_data *data; struct device *hwmon_dev; int i; u8 reg_tmp; data = devm_kzalloc(dev, sizeof(struct w83l786ng_data), GFP_KERNEL); if (!data) return -ENOMEM; data->client = client; mutex_init(&data->update_lock); /* Initialize the chip */ w83l786ng_init_client(client); /* A few vars need to be filled upon startup */ for (i = 0; i < 2; i++) { data->fan_min[i] = w83l786ng_read_value(client, W83L786NG_REG_FAN_MIN(i)); } /* Update the fan divisor */ reg_tmp = w83l786ng_read_value(client, W83L786NG_REG_FAN_DIV); data->fan_div[0] = reg_tmp & 0x07; data->fan_div[1] = (reg_tmp >> 4) & 0x07; hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name, data, w83l786ng_groups); return PTR_ERR_OR_ZERO(hwmon_dev); } static const struct i2c_device_id w83l786ng_id[] = { { "w83l786ng", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, w83l786ng_id); static struct i2c_driver w83l786ng_driver = { .class = I2C_CLASS_HWMON, .driver = { .name = "w83l786ng", }, .probe = w83l786ng_probe, .id_table = w83l786ng_id, .detect = w83l786ng_detect, .address_list = normal_i2c, }; module_i2c_driver(w83l786ng_driver); MODULE_AUTHOR("Kevin Lo"); MODULE_DESCRIPTION("w83l786ng driver"); MODULE_LICENSE("GPL");
gpl-2.0
hastalafiesta/Samsung_STE_Kernel
arch/sh/boards/mach-cayman/irq.c
2734
3581
/* * arch/sh/mach-cayman/irq.c - SH-5 Cayman Interrupt Support * * This file handles the board specific parts of the Cayman interrupt system * * Copyright (C) 2002 Stuart Menefy * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/io.h> #include <linux/irq.h> #include <linux/interrupt.h> #include <linux/signal.h> #include <cpu/irq.h> #include <asm/page.h> /* Setup for the SMSC FDC37C935 / LAN91C100FD */ #define SMSC_IRQ IRQ_IRL1 /* Setup for PCI Bus 2, which transmits interrupts via the EPLD */ #define PCI2_IRQ IRQ_IRL3 unsigned long epld_virt; #define EPLD_BASE 0x04002000 #define EPLD_STATUS_BASE (epld_virt + 0x10) #define EPLD_MASK_BASE (epld_virt + 0x20) /* Note the SMSC SuperIO chip and SMSC LAN chip interrupts are all muxed onto the same SH-5 interrupt */ static irqreturn_t cayman_interrupt_smsc(int irq, void *dev_id) { printk(KERN_INFO "CAYMAN: spurious SMSC interrupt\n"); return IRQ_NONE; } static irqreturn_t cayman_interrupt_pci2(int irq, void *dev_id) { printk(KERN_INFO "CAYMAN: spurious PCI interrupt, IRQ %d\n", irq); return IRQ_NONE; } static struct irqaction cayman_action_smsc = { .name = "Cayman SMSC Mux", .handler = cayman_interrupt_smsc, .flags = IRQF_DISABLED, }; static struct irqaction cayman_action_pci2 = { .name = "Cayman PCI2 Mux", .handler = cayman_interrupt_pci2, .flags = IRQF_DISABLED, }; static void enable_cayman_irq(struct irq_data *data) { unsigned int irq = data->irq; unsigned long flags; unsigned long mask; unsigned int reg; unsigned char bit; irq -= START_EXT_IRQS; reg = EPLD_MASK_BASE + ((irq / 8) << 2); bit = 1<<(irq % 8); local_irq_save(flags); mask = __raw_readl(reg); mask |= bit; __raw_writel(mask, reg); local_irq_restore(flags); } static void disable_cayman_irq(struct irq_data *data) { unsigned int irq = data->irq; unsigned long flags; unsigned long mask; unsigned int reg; unsigned char bit; irq -= START_EXT_IRQS; reg = EPLD_MASK_BASE + ((irq / 8) << 2); bit = 1<<(irq % 8); local_irq_save(flags); mask = __raw_readl(reg); mask &= ~bit; __raw_writel(mask, reg); local_irq_restore(flags); } struct irq_chip cayman_irq_type = { .name = "Cayman-IRQ", .irq_unmask = enable_cayman_irq, .irq_mask = disable_cayman_irq, }; int cayman_irq_demux(int evt) { int irq = intc_evt_to_irq[evt]; if (irq == SMSC_IRQ) { unsigned long status; int i; status = __raw_readl(EPLD_STATUS_BASE) & __raw_readl(EPLD_MASK_BASE) & 0xff; if (status == 0) { irq = -1; } else { for (i=0; i<8; i++) { if (status & (1<<i)) break; } irq = START_EXT_IRQS + i; } } if (irq == PCI2_IRQ) { unsigned long status; int i; status = __raw_readl(EPLD_STATUS_BASE + 3 * sizeof(u32)) & __raw_readl(EPLD_MASK_BASE + 3 * sizeof(u32)) & 0xff; if (status == 0) { irq = -1; } else { for (i=0; i<8; i++) { if (status & (1<<i)) break; } irq = START_EXT_IRQS + (3 * 8) + i; } } return irq; } void init_cayman_irq(void) { int i; epld_virt = (unsigned long)ioremap_nocache(EPLD_BASE, 1024); if (!epld_virt) { printk(KERN_ERR "Cayman IRQ: Unable to remap EPLD\n"); return; } for (i = 0; i < NR_EXT_IRQS; i++) { irq_set_chip_and_handler(START_EXT_IRQS + i, &cayman_irq_type, handle_level_irq); } /* Setup the SMSC interrupt */ setup_irq(SMSC_IRQ, &cayman_action_smsc); setup_irq(PCI2_IRQ, &cayman_action_pci2); }
gpl-2.0
jose51197/Infernal
arch/cris/arch-v32/drivers/mach-fs/nandflash.c
2734
4256
/* * arch/cris/arch-v32/drivers/nandflash.c * * Copyright (c) 2004 * * Derived from drivers/mtd/nand/spia.c * Copyright (C) 2000 Steven J. Hill (sjhill@realitydiluted.com) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/slab.h> #include <linux/init.h> #include <linux/module.h> #include <linux/mtd/mtd.h> #include <linux/mtd/nand.h> #include <linux/mtd/partitions.h> #include <arch/memmap.h> #include <hwregs/reg_map.h> #include <hwregs/reg_rdwr.h> #include <hwregs/gio_defs.h> #include <hwregs/bif_core_defs.h> #include <asm/io.h> #define CE_BIT 4 #define CLE_BIT 5 #define ALE_BIT 6 #define BY_BIT 7 struct mtd_info_wrapper { struct mtd_info info; struct nand_chip chip; }; /* Bitmask for control pins */ #define PIN_BITMASK ((1 << CE_BIT) | (1 << CLE_BIT) | (1 << ALE_BIT)) /* Bitmask for mtd nand control bits */ #define CTRL_BITMASK (NAND_NCE | NAND_CLE | NAND_ALE) static struct mtd_info *crisv32_mtd; /* * hardware specific access to control-lines */ static void crisv32_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl) { unsigned long flags; reg_gio_rw_pa_dout dout; struct nand_chip *this = mtd->priv; local_irq_save(flags); /* control bits change */ if (ctrl & NAND_CTRL_CHANGE) { dout = REG_RD(gio, regi_gio, rw_pa_dout); dout.data &= ~PIN_BITMASK; #if (CE_BIT == 4 && NAND_NCE == 1 && \ CLE_BIT == 5 && NAND_CLE == 2 && \ ALE_BIT == 6 && NAND_ALE == 4) /* Pins in same order as control bits, but shifted. * Optimize for this case; works for 2.6.18 */ dout.data |= ((ctrl & CTRL_BITMASK) ^ NAND_NCE) << CE_BIT; #else /* the slow way */ if (!(ctrl & NAND_NCE)) dout.data |= (1 << CE_BIT); if (ctrl & NAND_CLE) dout.data |= (1 << CLE_BIT); if (ctrl & NAND_ALE) dout.data |= (1 << ALE_BIT); #endif REG_WR(gio, regi_gio, rw_pa_dout, dout); } /* command to chip */ if (cmd != NAND_CMD_NONE) writeb(cmd, this->IO_ADDR_W); local_irq_restore(flags); } /* * read device ready pin */ static int crisv32_device_ready(struct mtd_info *mtd) { reg_gio_r_pa_din din = REG_RD(gio, regi_gio, r_pa_din); return ((din.data & (1 << BY_BIT)) >> BY_BIT); } /* * Main initialization routine */ struct mtd_info *__init crisv32_nand_flash_probe(void) { void __iomem *read_cs; void __iomem *write_cs; reg_bif_core_rw_grp3_cfg bif_cfg = REG_RD(bif_core, regi_bif_core, rw_grp3_cfg); reg_gio_rw_pa_oe pa_oe = REG_RD(gio, regi_gio, rw_pa_oe); struct mtd_info_wrapper *wrapper; struct nand_chip *this; int err = 0; /* Allocate memory for MTD device structure and private data */ wrapper = kzalloc(sizeof(struct mtd_info_wrapper), GFP_KERNEL); if (!wrapper) { printk(KERN_ERR "Unable to allocate CRISv32 NAND MTD " "device structure.\n"); err = -ENOMEM; return NULL; } read_cs = ioremap(MEM_CSP0_START | MEM_NON_CACHEABLE, 8192); write_cs = ioremap(MEM_CSP1_START | MEM_NON_CACHEABLE, 8192); if (!read_cs || !write_cs) { printk(KERN_ERR "CRISv32 NAND ioremap failed\n"); err = -EIO; goto out_mtd; } /* Get pointer to private data */ this = &wrapper->chip; crisv32_mtd = &wrapper->info; pa_oe.oe |= 1 << CE_BIT; pa_oe.oe |= 1 << ALE_BIT; pa_oe.oe |= 1 << CLE_BIT; pa_oe.oe &= ~(1 << BY_BIT); REG_WR(gio, regi_gio, rw_pa_oe, pa_oe); bif_cfg.gated_csp0 = regk_bif_core_rd; bif_cfg.gated_csp1 = regk_bif_core_wr; REG_WR(bif_core, regi_bif_core, rw_grp3_cfg, bif_cfg); /* Link the private data with the MTD structure */ crisv32_mtd->priv = this; /* Set address of NAND IO lines */ this->IO_ADDR_R = read_cs; this->IO_ADDR_W = write_cs; this->cmd_ctrl = crisv32_hwcontrol; this->dev_ready = crisv32_device_ready; /* 20 us command delay time */ this->chip_delay = 20; this->ecc.mode = NAND_ECC_SOFT; /* Enable the following for a flash based bad block table */ /* this->options = NAND_USE_FLASH_BBT; */ /* Scan to find existence of the device */ if (nand_scan(crisv32_mtd, 1)) { err = -ENXIO; goto out_ior; } return crisv32_mtd; out_ior: iounmap((void *)read_cs); iounmap((void *)write_cs); out_mtd: kfree(wrapper); return NULL; }
gpl-2.0
BOOTMGR/GT-I9070_kernel
drivers/staging/rtl8712/rtl871x_sta_mgt.c
3246
9573
/****************************************************************************** * rtl871x_sta_mgt.c * * Copyright(c) 2007 - 2010 Realtek Corporation. All rights reserved. * Linux device driver for RTL8192SU * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA * * Modifications for inclusion into the Linux staging tree are * Copyright(c) 2010 Larry Finger. All rights reserved. * * Contact information: * WLAN FAE <wlanfae@realtek.com> * Larry Finger <Larry.Finger@lwfinger.net> * ******************************************************************************/ #define _RTL871X_STA_MGT_C_ #include "osdep_service.h" #include "drv_types.h" #include "recv_osdep.h" #include "xmit_osdep.h" #include "sta_info.h" static void _init_stainfo(struct sta_info *psta) { memset((u8 *)psta, 0, sizeof(struct sta_info)); spin_lock_init(&psta->lock); _init_listhead(&psta->list); _init_listhead(&psta->hash_list); _r8712_init_sta_xmit_priv(&psta->sta_xmitpriv); _r8712_init_sta_recv_priv(&psta->sta_recvpriv); #ifdef CONFIG_R8712_AP _init_listhead(&psta->auth_list); #endif } u32 _r8712_init_sta_priv(struct sta_priv *pstapriv) { struct sta_info *psta; s32 i; pstapriv->pallocated_stainfo_buf = _malloc(sizeof(struct sta_info) * NUM_STA + 4); if (pstapriv->pallocated_stainfo_buf == NULL) return _FAIL; pstapriv->pstainfo_buf = pstapriv->pallocated_stainfo_buf + 4 - ((addr_t)(pstapriv->pallocated_stainfo_buf) & 3); _init_queue(&pstapriv->free_sta_queue); spin_lock_init(&pstapriv->sta_hash_lock); pstapriv->asoc_sta_count = 0; _init_queue(&pstapriv->sleep_q); _init_queue(&pstapriv->wakeup_q); psta = (struct sta_info *)(pstapriv->pstainfo_buf); for (i = 0; i < NUM_STA; i++) { _init_stainfo(psta); _init_listhead(&(pstapriv->sta_hash[i])); list_insert_tail(&psta->list, get_list_head(&pstapriv->free_sta_queue)); psta++; } #ifdef CONFIG_R8712_AP _init_listhead(&pstapriv->asoc_list); _init_listhead(&pstapriv->auth_list); #endif return _SUCCESS; } /* this function is used to free the memory of lock || sema for all stainfos */ static void mfree_all_stainfo(struct sta_priv *pstapriv) { unsigned long irqL; struct list_head *plist, *phead; struct sta_info *psta = NULL; spin_lock_irqsave(&pstapriv->sta_hash_lock, irqL); phead = get_list_head(&pstapriv->free_sta_queue); plist = get_next(phead); while ((end_of_queue_search(phead, plist)) == false) { psta = LIST_CONTAINOR(plist, struct sta_info, list); plist = get_next(plist); } spin_unlock_irqrestore(&pstapriv->sta_hash_lock, irqL); } static void mfree_sta_priv_lock(struct sta_priv *pstapriv) { mfree_all_stainfo(pstapriv); /* be done before free sta_hash_lock */ } u32 _r8712_free_sta_priv(struct sta_priv *pstapriv) { if (pstapriv) { mfree_sta_priv_lock(pstapriv); kfree(pstapriv->pallocated_stainfo_buf); } return _SUCCESS; } struct sta_info *r8712_alloc_stainfo(struct sta_priv *pstapriv, u8 *hwaddr) { uint tmp_aid; s32 index; struct list_head *phash_list; struct sta_info *psta; struct __queue *pfree_sta_queue; struct recv_reorder_ctrl *preorder_ctrl; int i = 0; u16 wRxSeqInitialValue = 0xffff; unsigned long flags; pfree_sta_queue = &pstapriv->free_sta_queue; spin_lock_irqsave(&(pfree_sta_queue->lock), flags); if (_queue_empty(pfree_sta_queue) == true) psta = NULL; else { psta = LIST_CONTAINOR(get_next(&pfree_sta_queue->queue), struct sta_info, list); list_delete(&(psta->list)); tmp_aid = psta->aid; _init_stainfo(psta); memcpy(psta->hwaddr, hwaddr, ETH_ALEN); index = wifi_mac_hash(hwaddr); if (index >= NUM_STA) { psta = NULL; goto exit; } phash_list = &(pstapriv->sta_hash[index]); list_insert_tail(&psta->hash_list, phash_list); pstapriv->asoc_sta_count++ ; /* For the SMC router, the sequence number of first packet of WPS handshake * will be 0. In this case, this packet will be dropped by recv_decache function * if we use the 0x00 as the default value for tid_rxseq variable. So, we * initialize the tid_rxseq variable as the 0xffff. */ for (i = 0; i < 16; i++) memcpy(&psta->sta_recvpriv.rxcache.tid_rxseq[i], &wRxSeqInitialValue, 2); /* for A-MPDU Rx reordering buffer control */ for (i = 0; i < 16 ; i++) { preorder_ctrl = &psta->recvreorder_ctrl[i]; preorder_ctrl->padapter = pstapriv->padapter; preorder_ctrl->indicate_seq = 0xffff; preorder_ctrl->wend_b = 0xffff; preorder_ctrl->wsize_b = 64; _init_queue(&preorder_ctrl->pending_recvframe_queue); r8712_init_recv_timer(preorder_ctrl); } } exit: spin_unlock_irqrestore(&(pfree_sta_queue->lock), flags); return psta; } /* using pstapriv->sta_hash_lock to protect */ void r8712_free_stainfo(struct _adapter *padapter, struct sta_info *psta) { int i; unsigned long irqL0; struct __queue *pfree_sta_queue; struct recv_reorder_ctrl *preorder_ctrl; struct sta_xmit_priv *pstaxmitpriv; struct xmit_priv *pxmitpriv = &padapter->xmitpriv; struct sta_priv *pstapriv = &padapter->stapriv; if (psta == NULL) return; pfree_sta_queue = &pstapriv->free_sta_queue; pstaxmitpriv = &psta->sta_xmitpriv; spin_lock_irqsave(&(pxmitpriv->vo_pending.lock), irqL0); r8712_free_xmitframe_queue(pxmitpriv, &pstaxmitpriv->vo_q.sta_pending); list_delete(&(pstaxmitpriv->vo_q.tx_pending)); spin_unlock_irqrestore(&(pxmitpriv->vo_pending.lock), irqL0); spin_lock_irqsave(&(pxmitpriv->vi_pending.lock), irqL0); r8712_free_xmitframe_queue(pxmitpriv, &pstaxmitpriv->vi_q.sta_pending); list_delete(&(pstaxmitpriv->vi_q.tx_pending)); spin_unlock_irqrestore(&(pxmitpriv->vi_pending.lock), irqL0); spin_lock_irqsave(&(pxmitpriv->bk_pending.lock), irqL0); r8712_free_xmitframe_queue(pxmitpriv, &pstaxmitpriv->bk_q.sta_pending); list_delete(&(pstaxmitpriv->bk_q.tx_pending)); spin_unlock_irqrestore(&(pxmitpriv->bk_pending.lock), irqL0); spin_lock_irqsave(&(pxmitpriv->be_pending.lock), irqL0); r8712_free_xmitframe_queue(pxmitpriv, &pstaxmitpriv->be_q.sta_pending); list_delete(&(pstaxmitpriv->be_q.tx_pending)); spin_unlock_irqrestore(&(pxmitpriv->be_pending.lock), irqL0); list_delete(&psta->hash_list); pstapriv->asoc_sta_count--; /* re-init sta_info; 20061114 */ _r8712_init_sta_xmit_priv(&psta->sta_xmitpriv); _r8712_init_sta_recv_priv(&psta->sta_recvpriv); /* for A-MPDU Rx reordering buffer control, * cancel reordering_ctrl_timer */ for (i = 0; i < 16; i++) { preorder_ctrl = &psta->recvreorder_ctrl[i]; _cancel_timer_ex(&preorder_ctrl->reordering_ctrl_timer); } spin_lock(&(pfree_sta_queue->lock)); /* insert into free_sta_queue; 20061114 */ list_insert_tail(&psta->list, get_list_head(pfree_sta_queue)); spin_unlock(&(pfree_sta_queue->lock)); } /* free all stainfo which in sta_hash[all] */ void r8712_free_all_stainfo(struct _adapter *padapter) { unsigned long irqL; struct list_head *plist, *phead; s32 index; struct sta_info *psta = NULL; struct sta_priv *pstapriv = &padapter->stapriv; struct sta_info *pbcmc_stainfo = r8712_get_bcmc_stainfo(padapter); if (pstapriv->asoc_sta_count == 1) return; spin_lock_irqsave(&pstapriv->sta_hash_lock, irqL); for (index = 0; index < NUM_STA; index++) { phead = &(pstapriv->sta_hash[index]); plist = get_next(phead); while ((end_of_queue_search(phead, plist)) == false) { psta = LIST_CONTAINOR(plist, struct sta_info, hash_list); plist = get_next(plist); if (pbcmc_stainfo != psta) r8712_free_stainfo(padapter , psta); } } spin_unlock_irqrestore(&pstapriv->sta_hash_lock, irqL); } /* any station allocated can be searched by hash list */ struct sta_info *r8712_get_stainfo(struct sta_priv *pstapriv, u8 *hwaddr) { unsigned long irqL; struct list_head *plist, *phead; struct sta_info *psta = NULL; u32 index; if (hwaddr == NULL) return NULL; index = wifi_mac_hash(hwaddr); spin_lock_irqsave(&pstapriv->sta_hash_lock, irqL); phead = &(pstapriv->sta_hash[index]); plist = get_next(phead); while ((end_of_queue_search(phead, plist)) == false) { psta = LIST_CONTAINOR(plist, struct sta_info, hash_list); if ((!memcmp(psta->hwaddr, hwaddr, ETH_ALEN))) { /* if found the matched address */ break; } psta = NULL; plist = get_next(plist); } spin_unlock_irqrestore(&pstapriv->sta_hash_lock, irqL); return psta; } void r8712_init_bcmc_stainfo(struct _adapter *padapter) { struct sta_info *psta; struct tx_servq *ptxservq; unsigned char bcast_addr[6] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; struct sta_priv *pstapriv = &padapter->stapriv; psta = r8712_alloc_stainfo(pstapriv, bcast_addr); if (psta == NULL) return; ptxservq = &(psta->sta_xmitpriv.be_q); } struct sta_info *r8712_get_bcmc_stainfo(struct _adapter *padapter) { struct sta_info *psta; struct sta_priv *pstapriv = &padapter->stapriv; u8 bc_addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; psta = r8712_get_stainfo(pstapriv, bc_addr); return psta; } u8 r8712_access_ctrl(struct wlan_acl_pool *pacl_list, u8 *mac_addr) { return true; }
gpl-2.0
sub-b/android_kernel_samsung_s3ve3g-old
arch/arm/mach-msm/qdsp6v2/board-msm8x60-audio.c
3246
74760
/* Copyright (c) 2010-2012, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/kernel.h> #include <linux/platform_device.h> #include <linux/gpio.h> #include <linux/delay.h> #include <linux/debugfs.h> #include <linux/mfd/pmic8058.h> #include <linux/mfd/pmic8901.h> #include <linux/mfd/msm-adie-codec.h> #include <linux/regulator/consumer.h> #include <linux/regulator/machine.h> #include <mach/qdsp6v2/audio_dev_ctl.h> #include <sound/apr_audio.h> #include <asm/mach-types.h> #include <asm/uaccess.h> #include <mach/board-msm8660.h> #include "snddev_icodec.h" #include "snddev_ecodec.h" #include "timpani_profile_8x60.h" #include "snddev_hdmi.h" #include "snddev_mi2s.h" #include "snddev_virtual.h" #ifdef CONFIG_DEBUG_FS static struct dentry *debugfs_hsed_config; static void snddev_hsed_config_modify_setting(int type); static void snddev_hsed_config_restore_setting(void); #endif /* GPIO_CLASS_D0_EN */ #define SNDDEV_GPIO_CLASS_D0_EN 227 /* GPIO_CLASS_D1_EN */ #define SNDDEV_GPIO_CLASS_D1_EN 229 #define SNDDEV_GPIO_MIC2_ANCR_SEL 294 #define SNDDEV_GPIO_MIC1_ANCL_SEL 295 #define SNDDEV_GPIO_HS_MIC4_SEL 296 #define DSP_RAM_BASE_8x60 0x46700000 #define DSP_RAM_SIZE_8x60 0x2000000 static int dspcrashd_pdata_8x60 = 0xDEADDEAD; static struct resource resources_dspcrashd_8x60[] = { { .name = "msm_dspcrashd", .start = DSP_RAM_BASE_8x60, .end = DSP_RAM_BASE_8x60 + DSP_RAM_SIZE_8x60, .flags = IORESOURCE_DMA, }, }; struct platform_device msm_device_dspcrashd_8x60 = { .name = "msm_dspcrashd", .num_resources = ARRAY_SIZE(resources_dspcrashd_8x60), .resource = resources_dspcrashd_8x60, .dev = { .platform_data = &dspcrashd_pdata_8x60 }, }; static struct resource msm_cdcclk_ctl_resources[] = { { .name = "msm_snddev_tx_mclk", .start = 108, .end = 108, .flags = IORESOURCE_IO, }, { .name = "msm_snddev_rx_mclk", .start = 109, .end = 109, .flags = IORESOURCE_IO, }, }; static struct platform_device msm_cdcclk_ctl_device = { .name = "msm_cdcclk_ctl", .num_resources = ARRAY_SIZE(msm_cdcclk_ctl_resources), .resource = msm_cdcclk_ctl_resources, }; static struct resource msm_aux_pcm_resources[] = { { .name = "aux_pcm_dout", .start = 111, .end = 111, .flags = IORESOURCE_IO, }, { .name = "aux_pcm_din", .start = 112, .end = 112, .flags = IORESOURCE_IO, }, { .name = "aux_pcm_syncout", .start = 113, .end = 113, .flags = IORESOURCE_IO, }, { .name = "aux_pcm_clkin_a", .start = 114, .end = 114, .flags = IORESOURCE_IO, }, }; static struct platform_device msm_aux_pcm_device = { .name = "msm_aux_pcm", .num_resources = ARRAY_SIZE(msm_aux_pcm_resources), .resource = msm_aux_pcm_resources, }; static struct resource msm_mi2s_gpio_resources[] = { { .name = "mi2s_ws", .start = 101, .end = 101, .flags = IORESOURCE_IO, }, { .name = "mi2s_sclk", .start = 102, .end = 102, .flags = IORESOURCE_IO, }, { .name = "mi2s_mclk", .start = 103, .end = 103, .flags = IORESOURCE_IO, }, { .name = "fm_mi2s_sd", .start = 107, .end = 107, .flags = IORESOURCE_IO, }, }; static struct platform_device msm_mi2s_device = { .name = "msm_mi2s", .num_resources = ARRAY_SIZE(msm_mi2s_gpio_resources), .resource = msm_mi2s_gpio_resources, }; /* Must be same size as msm_icodec_gpio_resources */ static int msm_icodec_gpio_defaults[] = { 0, 0, }; static struct resource msm_icodec_gpio_resources[] = { { .name = "msm_icodec_speaker_left", .start = SNDDEV_GPIO_CLASS_D0_EN, .end = SNDDEV_GPIO_CLASS_D0_EN, .flags = IORESOURCE_IO, }, { .name = "msm_icodec_speaker_right", .start = SNDDEV_GPIO_CLASS_D1_EN, .end = SNDDEV_GPIO_CLASS_D1_EN, .flags = IORESOURCE_IO, }, }; static struct platform_device msm_icodec_gpio_device = { .name = "msm_icodec_gpio", .num_resources = ARRAY_SIZE(msm_icodec_gpio_resources), .resource = msm_icodec_gpio_resources, .dev = { .platform_data = &msm_icodec_gpio_defaults }, }; static struct regulator *s3; static struct regulator *mvs; static int msm_snddev_enable_dmic_power(void) { int ret; s3 = regulator_get(NULL, "8058_s3"); if (IS_ERR(s3)) { ret = -EBUSY; goto fail_get_s3; } ret = regulator_set_voltage(s3, 1800000, 1800000); if (ret) { pr_err("%s: error setting voltage\n", __func__); goto fail_s3; } ret = regulator_enable(s3); if (ret) { pr_err("%s: error enabling regulator\n", __func__); goto fail_s3; } mvs = regulator_get(NULL, "8901_mvs0"); if (IS_ERR(mvs)) goto fail_mvs0_get; ret = regulator_enable(mvs); if (ret) { pr_err("%s: error setting regulator\n", __func__); goto fail_mvs0_enable; } return ret; fail_mvs0_enable: regulator_put(mvs); mvs = NULL; fail_mvs0_get: regulator_disable(s3); fail_s3: regulator_put(s3); s3 = NULL; fail_get_s3: return ret; } static void msm_snddev_disable_dmic_power(void) { int ret; if (mvs) { ret = regulator_disable(mvs); if (ret < 0) pr_err("%s: error disabling vreg mvs\n", __func__); regulator_put(mvs); mvs = NULL; } if (s3) { ret = regulator_disable(s3); if (ret < 0) pr_err("%s: error disabling regulator s3\n", __func__); regulator_put(s3); s3 = NULL; } } #define PM8901_MPP_3 (2) /* PM8901 MPP starts from 0 */ static int config_class_d0_gpio(int enable) { int rc; struct pm8xxx_mpp_config_data class_d0_mpp = { .type = PM8XXX_MPP_TYPE_D_OUTPUT, .level = PM8901_MPP_DIG_LEVEL_MSMIO, }; if (enable) { class_d0_mpp.control = PM8XXX_MPP_DOUT_CTRL_HIGH; rc = pm8xxx_mpp_config(PM8901_MPP_PM_TO_SYS(PM8901_MPP_3), &class_d0_mpp); if (rc) { pr_err("%s: CLASS_D0_EN failed\n", __func__); return rc; } rc = gpio_request(SNDDEV_GPIO_CLASS_D0_EN, "CLASSD0_EN"); if (rc) { pr_err("%s: spkr pamp gpio pm8901 mpp3 request" "failed\n", __func__); class_d0_mpp.control = PM8XXX_MPP_DOUT_CTRL_LOW; pm8xxx_mpp_config(PM8901_MPP_PM_TO_SYS(PM8901_MPP_3), &class_d0_mpp); return rc; } gpio_direction_output(SNDDEV_GPIO_CLASS_D0_EN, 1); gpio_set_value_cansleep(SNDDEV_GPIO_CLASS_D0_EN, 1); } else { class_d0_mpp.control = PM8XXX_MPP_DOUT_CTRL_LOW; pm8xxx_mpp_config(PM8901_MPP_PM_TO_SYS(PM8901_MPP_3), &class_d0_mpp); gpio_set_value_cansleep(SNDDEV_GPIO_CLASS_D0_EN, 0); gpio_free(SNDDEV_GPIO_CLASS_D0_EN); } return 0; } static int config_class_d1_gpio(int enable) { int rc; if (enable) { rc = gpio_request(SNDDEV_GPIO_CLASS_D1_EN, "CLASSD1_EN"); if (rc) { pr_err("%s: Right Channel spkr gpio request" " failed\n", __func__); return rc; } gpio_direction_output(SNDDEV_GPIO_CLASS_D1_EN, 1); gpio_set_value_cansleep(SNDDEV_GPIO_CLASS_D1_EN, 1); } else { gpio_set_value_cansleep(SNDDEV_GPIO_CLASS_D1_EN, 0); gpio_free(SNDDEV_GPIO_CLASS_D1_EN); } return 0; } static atomic_t pamp_ref_cnt; static int msm_snddev_poweramp_on(void) { int rc; if (atomic_inc_return(&pamp_ref_cnt) > 1) return 0; pr_debug("%s: enable stereo spkr amp\n", __func__); rc = config_class_d0_gpio(1); if (rc) { pr_err("%s: d0 gpio configuration failed\n", __func__); goto config_gpio_fail; } rc = config_class_d1_gpio(1); if (rc) { pr_err("%s: d1 gpio configuration failed\n", __func__); goto config_gpio_fail; } config_gpio_fail: return rc; } static void msm_snddev_poweramp_off(void) { if (atomic_dec_return(&pamp_ref_cnt) == 0) { pr_debug("%s: disable stereo spkr amp\n", __func__); config_class_d0_gpio(0); config_class_d1_gpio(0); msleep(30); } } /* Regulator 8058_l10 supplies regulator 8058_ncp. */ static struct regulator *snddev_reg_ncp; static struct regulator *snddev_reg_l10; static atomic_t preg_ref_cnt; static int msm_snddev_voltage_on(void) { int rc; pr_debug("%s\n", __func__); if (atomic_inc_return(&preg_ref_cnt) > 1) return 0; snddev_reg_l10 = regulator_get(NULL, "8058_l10"); if (IS_ERR(snddev_reg_l10)) { pr_err("%s: regulator_get(%s) failed (%ld)\n", __func__, "l10", PTR_ERR(snddev_reg_l10)); return -EBUSY; } rc = regulator_set_voltage(snddev_reg_l10, 2600000, 2600000); if (rc < 0) pr_err("%s: regulator_set_voltage(l10) failed (%d)\n", __func__, rc); rc = regulator_enable(snddev_reg_l10); if (rc < 0) pr_err("%s: regulator_enable(l10) failed (%d)\n", __func__, rc); snddev_reg_ncp = regulator_get(NULL, "8058_ncp"); if (IS_ERR(snddev_reg_ncp)) { pr_err("%s: regulator_get(%s) failed (%ld)\n", __func__, "ncp", PTR_ERR(snddev_reg_ncp)); return -EBUSY; } rc = regulator_set_voltage(snddev_reg_ncp, 1800000, 1800000); if (rc < 0) { pr_err("%s: regulator_set_voltage(ncp) failed (%d)\n", __func__, rc); goto regulator_fail; } rc = regulator_enable(snddev_reg_ncp); if (rc < 0) { pr_err("%s: regulator_enable(ncp) failed (%d)\n", __func__, rc); goto regulator_fail; } return rc; regulator_fail: regulator_put(snddev_reg_ncp); snddev_reg_ncp = NULL; return rc; } static void msm_snddev_voltage_off(void) { int rc; pr_debug("%s\n", __func__); if (!snddev_reg_ncp) goto done; if (atomic_dec_return(&preg_ref_cnt) == 0) { rc = regulator_disable(snddev_reg_ncp); if (rc < 0) pr_err("%s: regulator_disable(ncp) failed (%d)\n", __func__, rc); regulator_put(snddev_reg_ncp); snddev_reg_ncp = NULL; } done: if (!snddev_reg_l10) return; rc = regulator_disable(snddev_reg_l10); if (rc < 0) pr_err("%s: regulator_disable(l10) failed (%d)\n", __func__, rc); regulator_put(snddev_reg_l10); snddev_reg_l10 = NULL; } static int msm_snddev_enable_amic_power(void) { int ret = 0; #ifdef CONFIG_PMIC8058_OTHC if (machine_is_msm8x60_fluid()) { ret = pm8058_micbias_enable(OTHC_MICBIAS_0, OTHC_SIGNAL_ALWAYS_ON); if (ret) pr_err("%s: Enabling amic power failed\n", __func__); ret = gpio_request(SNDDEV_GPIO_MIC2_ANCR_SEL, "MIC2_ANCR_SEL"); if (ret) { pr_err("%s: spkr pamp gpio %d request failed\n", __func__, SNDDEV_GPIO_MIC2_ANCR_SEL); return ret; } gpio_direction_output(SNDDEV_GPIO_MIC2_ANCR_SEL, 0); ret = gpio_request(SNDDEV_GPIO_MIC1_ANCL_SEL, "MIC1_ANCL_SEL"); if (ret) { pr_err("%s: mic1 ancl gpio %d request failed\n", __func__, SNDDEV_GPIO_MIC1_ANCL_SEL); gpio_free(SNDDEV_GPIO_MIC2_ANCR_SEL); return ret; } gpio_direction_output(SNDDEV_GPIO_MIC1_ANCL_SEL, 0); } else { ret = pm8058_micbias_enable(OTHC_MICBIAS_2, OTHC_SIGNAL_ALWAYS_ON); if (ret) pr_err("%s: Enabling amic power failed\n", __func__); } #endif return ret; } static void msm_snddev_disable_amic_power(void) { #ifdef CONFIG_PMIC8058_OTHC int ret; if (machine_is_msm8x60_fluid()) { ret = pm8058_micbias_enable(OTHC_MICBIAS_0, OTHC_SIGNAL_OFF); gpio_free(SNDDEV_GPIO_MIC1_ANCL_SEL); gpio_free(SNDDEV_GPIO_MIC2_ANCR_SEL); } else ret = pm8058_micbias_enable(OTHC_MICBIAS_2, OTHC_SIGNAL_OFF); if (ret) pr_err("%s: Disabling amic power failed\n", __func__); #endif } static int msm_snddev_enable_anc_power(void) { int ret = 0; #ifdef CONFIG_PMIC8058_OTHC ret = pm8058_micbias_enable(OTHC_MICBIAS_2, OTHC_SIGNAL_ALWAYS_ON); if (ret) pr_err("%s: Enabling anc micbias 2 failed\n", __func__); if (machine_is_msm8x60_fluid()) { ret = pm8058_micbias_enable(OTHC_MICBIAS_0, OTHC_SIGNAL_ALWAYS_ON); if (ret) pr_err("%s: Enabling anc micbias 0 failed\n", __func__); ret = gpio_request(SNDDEV_GPIO_MIC2_ANCR_SEL, "MIC2_ANCR_SEL"); if (ret) { pr_err("%s: mic2 ancr gpio %d request failed\n", __func__, SNDDEV_GPIO_MIC2_ANCR_SEL); return ret; } gpio_direction_output(SNDDEV_GPIO_MIC2_ANCR_SEL, 1); ret = gpio_request(SNDDEV_GPIO_MIC1_ANCL_SEL, "MIC1_ANCL_SEL"); if (ret) { pr_err("%s: mic1 ancl gpio %d request failed\n", __func__, SNDDEV_GPIO_MIC1_ANCL_SEL); gpio_free(SNDDEV_GPIO_MIC2_ANCR_SEL); return ret; } gpio_direction_output(SNDDEV_GPIO_MIC1_ANCL_SEL, 1); } #endif return ret; } static void msm_snddev_disable_anc_power(void) { #ifdef CONFIG_PMIC8058_OTHC int ret; ret = pm8058_micbias_enable(OTHC_MICBIAS_2, OTHC_SIGNAL_OFF); if (machine_is_msm8x60_fluid()) { ret |= pm8058_micbias_enable(OTHC_MICBIAS_0, OTHC_SIGNAL_OFF); gpio_free(SNDDEV_GPIO_MIC2_ANCR_SEL); gpio_free(SNDDEV_GPIO_MIC1_ANCL_SEL); } if (ret) pr_err("%s: Disabling anc power failed\n", __func__); #endif } static int msm_snddev_enable_amic_sec_power(void) { #ifdef CONFIG_PMIC8058_OTHC int ret; if (machine_is_msm8x60_fluid()) { ret = pm8058_micbias_enable(OTHC_MICBIAS_2, OTHC_SIGNAL_ALWAYS_ON); if (ret) pr_err("%s: Enabling amic2 power failed\n", __func__); ret = gpio_request(SNDDEV_GPIO_HS_MIC4_SEL, "HS_MIC4_SEL"); if (ret) { pr_err("%s: spkr pamp gpio %d request failed\n", __func__, SNDDEV_GPIO_HS_MIC4_SEL); return ret; } gpio_direction_output(SNDDEV_GPIO_HS_MIC4_SEL, 1); } #endif msm_snddev_enable_amic_power(); return 0; } static void msm_snddev_disable_amic_sec_power(void) { #ifdef CONFIG_PMIC8058_OTHC int ret; if (machine_is_msm8x60_fluid()) { ret = pm8058_micbias_enable(OTHC_MICBIAS_2, OTHC_SIGNAL_OFF); if (ret) pr_err("%s: Disabling amic2 power failed\n", __func__); gpio_free(SNDDEV_GPIO_HS_MIC4_SEL); } #endif msm_snddev_disable_amic_power(); } static int msm_snddev_enable_dmic_sec_power(void) { int ret; ret = msm_snddev_enable_dmic_power(); if (ret) { pr_err("%s: Error: Enabling dmic power failed\n", __func__); return ret; } #ifdef CONFIG_PMIC8058_OTHC ret = pm8058_micbias_enable(OTHC_MICBIAS_2, OTHC_SIGNAL_ALWAYS_ON); if (ret) { pr_err("%s: Error: Enabling micbias failed\n", __func__); msm_snddev_disable_dmic_power(); return ret; } #endif return 0; } static void msm_snddev_disable_dmic_sec_power(void) { msm_snddev_disable_dmic_power(); #ifdef CONFIG_PMIC8058_OTHC pm8058_micbias_enable(OTHC_MICBIAS_2, OTHC_SIGNAL_OFF); #endif } static struct adie_codec_action_unit iearpiece_48KHz_osr256_actions[] = EAR_PRI_MONO_8000_OSR_256; static struct adie_codec_hwsetting_entry iearpiece_settings[] = { { .freq_plan = 48000, .osr = 256, .actions = iearpiece_48KHz_osr256_actions, .action_sz = ARRAY_SIZE(iearpiece_48KHz_osr256_actions), } }; static struct adie_codec_dev_profile iearpiece_profile = { .path_type = ADIE_CODEC_RX, .settings = iearpiece_settings, .setting_sz = ARRAY_SIZE(iearpiece_settings), }; static struct snddev_icodec_data snddev_iearpiece_data = { .capability = (SNDDEV_CAP_RX | SNDDEV_CAP_VOICE), .name = "handset_rx", .copp_id = 0, .profile = &iearpiece_profile, .channel_mode = 1, .default_sample_rate = 48000, }; static struct platform_device msm_iearpiece_device = { .name = "snddev_icodec", .dev = { .platform_data = &snddev_iearpiece_data }, }; static struct adie_codec_action_unit imic_48KHz_osr256_actions[] = AMIC_PRI_MONO_OSR_256; static struct adie_codec_hwsetting_entry imic_settings[] = { { .freq_plan = 48000, .osr = 256, .actions = imic_48KHz_osr256_actions, .action_sz = ARRAY_SIZE(imic_48KHz_osr256_actions), } }; static struct adie_codec_dev_profile imic_profile = { .path_type = ADIE_CODEC_TX, .settings = imic_settings, .setting_sz = ARRAY_SIZE(imic_settings), }; static struct snddev_icodec_data snddev_imic_data = { .capability = (SNDDEV_CAP_TX | SNDDEV_CAP_VOICE), .name = "handset_tx", .copp_id = 1, .profile = &imic_profile, .channel_mode = 1, .default_sample_rate = 48000, .pamp_on = msm_snddev_enable_amic_power, .pamp_off = msm_snddev_disable_amic_power, }; static struct platform_device msm_imic_device = { .name = "snddev_icodec", .dev = { .platform_data = &snddev_imic_data }, }; static struct snddev_icodec_data snddev_fluid_ispkr_mic_data = { .capability = (SNDDEV_CAP_TX | SNDDEV_CAP_VOICE), .name = "speaker_mono_tx", .copp_id = PRIMARY_I2S_TX, .profile = &imic_profile, .channel_mode = 1, .default_sample_rate = 48000, .pamp_on = msm_snddev_enable_amic_power, .pamp_off = msm_snddev_disable_amic_power, }; static struct platform_device msm_fluid_ispkr_mic_device = { .name = "snddev_icodec", .dev = { .platform_data = &snddev_fluid_ispkr_mic_data }, }; static struct adie_codec_action_unit headset_ab_cpls_48KHz_osr256_actions[] = HEADSET_AB_CPLS_48000_OSR_256; static struct adie_codec_hwsetting_entry headset_ab_cpls_settings[] = { { .freq_plan = 48000, .osr = 256, .actions = headset_ab_cpls_48KHz_osr256_actions, .action_sz = ARRAY_SIZE(headset_ab_cpls_48KHz_osr256_actions), } }; static struct adie_codec_dev_profile headset_ab_cpls_profile = { .path_type = ADIE_CODEC_RX, .settings = headset_ab_cpls_settings, .setting_sz = ARRAY_SIZE(headset_ab_cpls_settings), }; static struct snddev_icodec_data snddev_ihs_stereo_rx_data = { .capability = (SNDDEV_CAP_RX | SNDDEV_CAP_VOICE), .name = "headset_stereo_rx", .copp_id = 0, .profile = &headset_ab_cpls_profile, .channel_mode = 2, .default_sample_rate = 48000, .voltage_on = msm_snddev_voltage_on, .voltage_off = msm_snddev_voltage_off, }; static struct platform_device msm_headset_stereo_device = { .name = "snddev_icodec", .dev = { .platform_data = &snddev_ihs_stereo_rx_data }, }; static struct adie_codec_action_unit headset_anc_48KHz_osr256_actions[] = ANC_HEADSET_CPLS_AMIC1_AUXL_RX1_48000_OSR_256; static struct adie_codec_hwsetting_entry headset_anc_settings[] = { { .freq_plan = 48000, .osr = 256, .actions = headset_anc_48KHz_osr256_actions, .action_sz = ARRAY_SIZE(headset_anc_48KHz_osr256_actions), } }; static struct adie_codec_dev_profile headset_anc_profile = { .path_type = ADIE_CODEC_RX, .settings = headset_anc_settings, .setting_sz = ARRAY_SIZE(headset_anc_settings), }; static struct snddev_icodec_data snddev_anc_headset_data = { .capability = (SNDDEV_CAP_RX | SNDDEV_CAP_VOICE | SNDDEV_CAP_ANC), .name = "anc_headset_stereo_rx", .copp_id = PRIMARY_I2S_RX, .profile = &headset_anc_profile, .channel_mode = 2, .default_sample_rate = 48000, .pamp_on = msm_snddev_enable_anc_power, .pamp_off = msm_snddev_disable_anc_power, .voltage_on = msm_snddev_voltage_on, .voltage_off = msm_snddev_voltage_off, }; static struct platform_device msm_anc_headset_device = { .name = "snddev_icodec", .dev = { .platform_data = &snddev_anc_headset_data }, }; static struct adie_codec_action_unit ispkr_stereo_48KHz_osr256_actions[] = SPEAKER_PRI_STEREO_48000_OSR_256; static struct adie_codec_hwsetting_entry ispkr_stereo_settings[] = { { .freq_plan = 48000, .osr = 256, .actions = ispkr_stereo_48KHz_osr256_actions, .action_sz = ARRAY_SIZE(ispkr_stereo_48KHz_osr256_actions), } }; static struct adie_codec_dev_profile ispkr_stereo_profile = { .path_type = ADIE_CODEC_RX, .settings = ispkr_stereo_settings, .setting_sz = ARRAY_SIZE(ispkr_stereo_settings), }; static struct snddev_icodec_data snddev_ispkr_stereo_data = { .capability = (SNDDEV_CAP_RX | SNDDEV_CAP_VOICE), .name = "speaker_stereo_rx", .copp_id = 0, .profile = &ispkr_stereo_profile, .channel_mode = 2, .default_sample_rate = 48000, .pamp_on = msm_snddev_poweramp_on, .pamp_off = msm_snddev_poweramp_off, }; static struct platform_device msm_ispkr_stereo_device = { .name = "snddev_icodec", .dev = { .platform_data = &snddev_ispkr_stereo_data }, }; static struct adie_codec_action_unit idmic_mono_48KHz_osr256_actions[] = DMIC1_PRI_MONO_OSR_256; static struct adie_codec_hwsetting_entry idmic_mono_settings[] = { { .freq_plan = 48000, .osr = 256, .actions = idmic_mono_48KHz_osr256_actions, .action_sz = ARRAY_SIZE(idmic_mono_48KHz_osr256_actions), } }; static struct adie_codec_dev_profile idmic_mono_profile = { .path_type = ADIE_CODEC_TX, .settings = idmic_mono_settings, .setting_sz = ARRAY_SIZE(idmic_mono_settings), }; static struct snddev_icodec_data snddev_ispkr_mic_data = { .capability = (SNDDEV_CAP_TX | SNDDEV_CAP_VOICE), .name = "speaker_mono_tx", .copp_id = PRIMARY_I2S_TX, .profile = &idmic_mono_profile, .channel_mode = 1, .default_sample_rate = 48000, .pamp_on = msm_snddev_enable_dmic_power, .pamp_off = msm_snddev_disable_dmic_power, }; static struct platform_device msm_ispkr_mic_device = { .name = "snddev_icodec", .dev = { .platform_data = &snddev_ispkr_mic_data }, }; static struct adie_codec_action_unit iearpiece_ffa_48KHz_osr256_actions[] = EAR_PRI_MONO_8000_OSR_256; static struct adie_codec_hwsetting_entry iearpiece_ffa_settings[] = { { .freq_plan = 48000, .osr = 256, .actions = iearpiece_ffa_48KHz_osr256_actions, .action_sz = ARRAY_SIZE(iearpiece_ffa_48KHz_osr256_actions), } }; static struct adie_codec_dev_profile iearpiece_ffa_profile = { .path_type = ADIE_CODEC_RX, .settings = iearpiece_ffa_settings, .setting_sz = ARRAY_SIZE(iearpiece_ffa_settings), }; static struct snddev_icodec_data snddev_iearpiece_ffa_data = { .capability = (SNDDEV_CAP_RX | SNDDEV_CAP_VOICE), .name = "handset_rx", .copp_id = 0, .profile = &iearpiece_ffa_profile, .channel_mode = 1, .default_sample_rate = 48000, }; static struct platform_device msm_iearpiece_ffa_device = { .name = "snddev_icodec", .dev = { .platform_data = &snddev_iearpiece_ffa_data }, }; static struct snddev_icodec_data snddev_imic_ffa_data = { .capability = (SNDDEV_CAP_TX | SNDDEV_CAP_VOICE), .name = "handset_tx", .copp_id = PRIMARY_I2S_TX, .profile = &idmic_mono_profile, .channel_mode = 1, .default_sample_rate = 48000, .pamp_on = msm_snddev_enable_dmic_power, .pamp_off = msm_snddev_disable_dmic_power, }; static struct platform_device msm_imic_ffa_device = { .name = "snddev_icodec", .dev = { .platform_data = &snddev_imic_ffa_data }, }; static struct adie_codec_action_unit dual_mic_endfire_8KHz_osr256_actions[] = DMIC1_PRI_STEREO_OSR_256; static struct adie_codec_hwsetting_entry dual_mic_endfire_settings[] = { { .freq_plan = 48000, .osr = 256, .actions = dual_mic_endfire_8KHz_osr256_actions, .action_sz = ARRAY_SIZE(dual_mic_endfire_8KHz_osr256_actions), } }; static struct adie_codec_dev_profile dual_mic_endfire_profile = { .path_type = ADIE_CODEC_TX, .settings = dual_mic_endfire_settings, .setting_sz = ARRAY_SIZE(dual_mic_endfire_settings), }; static struct snddev_icodec_data snddev_dual_mic_endfire_data = { .capability = (SNDDEV_CAP_TX | SNDDEV_CAP_VOICE), .name = "handset_dual_mic_endfire_tx", .copp_id = PRIMARY_I2S_TX, .profile = &dual_mic_endfire_profile, .channel_mode = 2, .default_sample_rate = 48000, .pamp_on = msm_snddev_enable_dmic_power, .pamp_off = msm_snddev_disable_dmic_power, }; static struct platform_device msm_hs_dual_mic_endfire_device = { .name = "snddev_icodec", .dev = { .platform_data = &snddev_dual_mic_endfire_data }, }; static struct snddev_icodec_data snddev_dual_mic_spkr_endfire_data = { .capability = (SNDDEV_CAP_TX | SNDDEV_CAP_VOICE), .name = "speaker_dual_mic_endfire_tx", .copp_id = PRIMARY_I2S_TX, .profile = &dual_mic_endfire_profile, .channel_mode = 2, .default_sample_rate = 48000, .pamp_on = msm_snddev_enable_dmic_power, .pamp_off = msm_snddev_disable_dmic_power, }; static struct platform_device msm_spkr_dual_mic_endfire_device = { .name = "snddev_icodec", .id = 15, .dev = { .platform_data = &snddev_dual_mic_spkr_endfire_data }, }; static struct adie_codec_action_unit dual_mic_broadside_8osr256_actions[] = HS_DMIC2_STEREO_OSR_256; static struct adie_codec_hwsetting_entry dual_mic_broadside_settings[] = { { .freq_plan = 48000, .osr = 256, .actions = dual_mic_broadside_8osr256_actions, .action_sz = ARRAY_SIZE(dual_mic_broadside_8osr256_actions), } }; static struct adie_codec_dev_profile dual_mic_broadside_profile = { .path_type = ADIE_CODEC_TX, .settings = dual_mic_broadside_settings, .setting_sz = ARRAY_SIZE(dual_mic_broadside_settings), }; static struct snddev_icodec_data snddev_hs_dual_mic_broadside_data = { .capability = (SNDDEV_CAP_TX | SNDDEV_CAP_VOICE), .name = "handset_dual_mic_broadside_tx", .copp_id = PRIMARY_I2S_TX, .profile = &dual_mic_broadside_profile, .channel_mode = 2, .default_sample_rate = 48000, .pamp_on = msm_snddev_enable_dmic_sec_power, .pamp_off = msm_snddev_disable_dmic_sec_power, }; static struct platform_device msm_hs_dual_mic_broadside_device = { .name = "snddev_icodec", .id = 21, .dev = { .platform_data = &snddev_hs_dual_mic_broadside_data }, }; static struct snddev_icodec_data snddev_spkr_dual_mic_broadside_data = { .capability = (SNDDEV_CAP_TX | SNDDEV_CAP_VOICE), .name = "speaker_dual_mic_broadside_tx", .copp_id = PRIMARY_I2S_TX, .profile = &dual_mic_broadside_profile, .channel_mode = 2, .default_sample_rate = 48000, .pamp_on = msm_snddev_enable_dmic_sec_power, .pamp_off = msm_snddev_disable_dmic_sec_power, }; static struct platform_device msm_spkr_dual_mic_broadside_device = { .name = "snddev_icodec", .id = 18, .dev = { .platform_data = &snddev_spkr_dual_mic_broadside_data }, }; static struct adie_codec_action_unit fluid_dual_mic_endfire_8KHz_osr256_actions[] = FLUID_AMIC_DUAL_8000_OSR_256; static struct adie_codec_hwsetting_entry fluid_dual_mic_endfire_settings[] = { { .freq_plan = 48000, .osr = 256, .actions = fluid_dual_mic_endfire_8KHz_osr256_actions, .action_sz = ARRAY_SIZE(fluid_dual_mic_endfire_8KHz_osr256_actions), } }; static struct adie_codec_dev_profile fluid_dual_mic_endfire_profile = { .path_type = ADIE_CODEC_TX, .settings = fluid_dual_mic_endfire_settings, .setting_sz = ARRAY_SIZE(fluid_dual_mic_endfire_settings), }; static struct snddev_icodec_data snddev_fluid_dual_mic_endfire_data = { .capability = (SNDDEV_CAP_TX | SNDDEV_CAP_VOICE), .name = "handset_dual_mic_endfire_tx", .copp_id = PRIMARY_I2S_TX, .profile = &fluid_dual_mic_endfire_profile, .channel_mode = 2, .default_sample_rate = 48000, .pamp_on = msm_snddev_enable_amic_sec_power, .pamp_off = msm_snddev_disable_amic_sec_power, }; static struct platform_device msm_fluid_hs_dual_mic_endfire_device = { .name = "snddev_icodec", .dev = { .platform_data = &snddev_fluid_dual_mic_endfire_data }, }; static struct snddev_icodec_data snddev_fluid_dual_mic_spkr_endfire_data = { .capability = (SNDDEV_CAP_TX | SNDDEV_CAP_VOICE), .name = "speaker_dual_mic_endfire_tx", .copp_id = PRIMARY_I2S_TX, .profile = &fluid_dual_mic_endfire_profile, .channel_mode = 2, .default_sample_rate = 48000, .pamp_on = msm_snddev_enable_amic_sec_power, .pamp_off = msm_snddev_disable_amic_sec_power, }; static struct platform_device msm_fluid_spkr_dual_mic_endfire_device = { .name = "snddev_icodec", .dev = { .platform_data = &snddev_fluid_dual_mic_spkr_endfire_data }, }; static struct adie_codec_action_unit fluid_dual_mic_broadside_8KHz_osr256_actions[] = FLUID_AMIC_DUAL_BROADSIDE_8000_OSR_256; static struct adie_codec_hwsetting_entry fluid_dual_mic_broadside_settings[] = { { .freq_plan = 48000, .osr = 256, .actions = fluid_dual_mic_broadside_8KHz_osr256_actions, .action_sz = ARRAY_SIZE(fluid_dual_mic_broadside_8KHz_osr256_actions), } }; static struct adie_codec_dev_profile fluid_dual_mic_broadside_profile = { .path_type = ADIE_CODEC_TX, .settings = fluid_dual_mic_broadside_settings, .setting_sz = ARRAY_SIZE(fluid_dual_mic_broadside_settings), }; static struct snddev_icodec_data snddev_fluid_dual_mic_broadside_data = { .capability = (SNDDEV_CAP_TX | SNDDEV_CAP_VOICE), .name = "handset_dual_mic_broadside_tx", .copp_id = PRIMARY_I2S_TX, .profile = &fluid_dual_mic_broadside_profile, .channel_mode = 2, .default_sample_rate = 48000, .pamp_on = msm_snddev_enable_amic_power, .pamp_off = msm_snddev_disable_amic_power, }; static struct platform_device msm_fluid_hs_dual_mic_broadside_device = { .name = "snddev_icodec", .dev = { .platform_data = &snddev_fluid_dual_mic_broadside_data }, }; static struct snddev_icodec_data snddev_fluid_dual_mic_spkr_broadside_data = { .capability = (SNDDEV_CAP_TX | SNDDEV_CAP_VOICE), .name = "speaker_dual_mic_broadside_tx", .copp_id = PRIMARY_I2S_TX, .profile = &fluid_dual_mic_broadside_profile, .channel_mode = 2, .default_sample_rate = 48000, .pamp_on = msm_snddev_enable_amic_power, .pamp_off = msm_snddev_disable_amic_power, }; static struct platform_device msm_fluid_spkr_dual_mic_broadside_device = { .name = "snddev_icodec", .dev = { .platform_data = &snddev_fluid_dual_mic_spkr_broadside_data }, }; static struct snddev_hdmi_data snddev_hdmi_stereo_rx_data = { .capability = SNDDEV_CAP_RX , .name = "hdmi_stereo_rx", .copp_id = HDMI_RX, .channel_mode = 0, .default_sample_rate = 48000, }; static struct platform_device msm_snddev_hdmi_stereo_rx_device = { .name = "snddev_hdmi", .dev = { .platform_data = &snddev_hdmi_stereo_rx_data }, }; static struct snddev_mi2s_data snddev_mi2s_fm_tx_data = { .capability = SNDDEV_CAP_TX , .name = "fmradio_stereo_tx", .copp_id = MI2S_TX, .channel_mode = 2, /* stereo */ .sd_lines = MI2S_SD3, /* sd3 */ .sample_rate = 48000, }; static struct platform_device msm_mi2s_fm_tx_device = { .name = "snddev_mi2s", .dev = { .platform_data = &snddev_mi2s_fm_tx_data }, }; static struct snddev_mi2s_data snddev_mi2s_fm_rx_data = { .capability = SNDDEV_CAP_RX , .name = "fmradio_stereo_rx", .copp_id = MI2S_RX, .channel_mode = 2, /* stereo */ .sd_lines = MI2S_SD3, /* sd3 */ .sample_rate = 48000, }; static struct platform_device msm_mi2s_fm_rx_device = { .name = "snddev_mi2s", .id = 1, .dev = { .platform_data = &snddev_mi2s_fm_rx_data }, }; static struct adie_codec_action_unit iheadset_mic_tx_osr256_actions[] = HEADSET_AMIC2_TX_MONO_PRI_OSR_256; static struct adie_codec_hwsetting_entry iheadset_mic_tx_settings[] = { { .freq_plan = 48000, .osr = 256, .actions = iheadset_mic_tx_osr256_actions, .action_sz = ARRAY_SIZE(iheadset_mic_tx_osr256_actions), } }; static struct adie_codec_dev_profile iheadset_mic_profile = { .path_type = ADIE_CODEC_TX, .settings = iheadset_mic_tx_settings, .setting_sz = ARRAY_SIZE(iheadset_mic_tx_settings), }; static struct snddev_icodec_data snddev_headset_mic_data = { .capability = (SNDDEV_CAP_TX | SNDDEV_CAP_VOICE), .name = "headset_mono_tx", .copp_id = PRIMARY_I2S_TX, .profile = &iheadset_mic_profile, .channel_mode = 1, .default_sample_rate = 48000, }; static struct platform_device msm_headset_mic_device = { .name = "snddev_icodec", .dev = { .platform_data = &snddev_headset_mic_data }, }; static struct adie_codec_action_unit ihs_stereo_speaker_stereo_rx_48KHz_osr256_actions[] = SPEAKER_HPH_AB_CPL_PRI_STEREO_48000_OSR_256; static struct adie_codec_hwsetting_entry ihs_stereo_speaker_stereo_rx_settings[] = { { .freq_plan = 48000, .osr = 256, .actions = ihs_stereo_speaker_stereo_rx_48KHz_osr256_actions, .action_sz = ARRAY_SIZE(ihs_stereo_speaker_stereo_rx_48KHz_osr256_actions), } }; static struct adie_codec_dev_profile ihs_stereo_speaker_stereo_rx_profile = { .path_type = ADIE_CODEC_RX, .settings = ihs_stereo_speaker_stereo_rx_settings, .setting_sz = ARRAY_SIZE(ihs_stereo_speaker_stereo_rx_settings), }; static struct snddev_icodec_data snddev_ihs_stereo_speaker_stereo_rx_data = { .capability = (SNDDEV_CAP_RX | SNDDEV_CAP_VOICE), .name = "headset_stereo_speaker_stereo_rx", .copp_id = 0, .profile = &ihs_stereo_speaker_stereo_rx_profile, .channel_mode = 2, .default_sample_rate = 48000, .pamp_on = msm_snddev_poweramp_on, .pamp_off = msm_snddev_poweramp_off, .voltage_on = msm_snddev_voltage_on, .voltage_off = msm_snddev_voltage_off, }; static struct platform_device msm_ihs_stereo_speaker_stereo_rx_device = { .name = "snddev_icodec", .id = 22, .dev = { .platform_data = &snddev_ihs_stereo_speaker_stereo_rx_data }, }; /* define the value for BT_SCO */ static struct snddev_ecodec_data snddev_bt_sco_earpiece_data = { .capability = (SNDDEV_CAP_RX | SNDDEV_CAP_VOICE), .name = "bt_sco_rx", .copp_id = PCM_RX, .channel_mode = 1, }; static struct snddev_ecodec_data snddev_bt_sco_mic_data = { .capability = (SNDDEV_CAP_TX | SNDDEV_CAP_VOICE), .name = "bt_sco_tx", .copp_id = PCM_TX, .channel_mode = 1, }; struct platform_device msm_bt_sco_earpiece_device = { .name = "msm_snddev_ecodec", .dev = { .platform_data = &snddev_bt_sco_earpiece_data }, }; struct platform_device msm_bt_sco_mic_device = { .name = "msm_snddev_ecodec", .dev = { .platform_data = &snddev_bt_sco_mic_data }, }; static struct adie_codec_action_unit itty_mono_tx_actions[] = TTY_HEADSET_MONO_TX_OSR_256; static struct adie_codec_hwsetting_entry itty_mono_tx_settings[] = { { .freq_plan = 48000, .osr = 256, .actions = itty_mono_tx_actions, .action_sz = ARRAY_SIZE(itty_mono_tx_actions), }, }; static struct adie_codec_dev_profile itty_mono_tx_profile = { .path_type = ADIE_CODEC_TX, .settings = itty_mono_tx_settings, .setting_sz = ARRAY_SIZE(itty_mono_tx_settings), }; static struct snddev_icodec_data snddev_itty_mono_tx_data = { .capability = (SNDDEV_CAP_TX | SNDDEV_CAP_VOICE | SNDDEV_CAP_TTY), .name = "tty_headset_mono_tx", .copp_id = PRIMARY_I2S_TX, .profile = &itty_mono_tx_profile, .channel_mode = 1, .default_sample_rate = 48000, }; static struct platform_device msm_itty_mono_tx_device = { .name = "snddev_icodec", .dev = { .platform_data = &snddev_itty_mono_tx_data }, }; static struct adie_codec_action_unit itty_mono_rx_actions[] = TTY_HEADSET_MONO_RX_8000_OSR_256; static struct adie_codec_hwsetting_entry itty_mono_rx_settings[] = { { .freq_plan = 48000, .osr = 256, .actions = itty_mono_rx_actions, .action_sz = ARRAY_SIZE(itty_mono_rx_actions), }, }; static struct adie_codec_dev_profile itty_mono_rx_profile = { .path_type = ADIE_CODEC_RX, .settings = itty_mono_rx_settings, .setting_sz = ARRAY_SIZE(itty_mono_rx_settings), }; static struct snddev_icodec_data snddev_itty_mono_rx_data = { .capability = (SNDDEV_CAP_RX | SNDDEV_CAP_VOICE | SNDDEV_CAP_TTY), .name = "tty_headset_mono_rx", .copp_id = PRIMARY_I2S_RX, .profile = &itty_mono_rx_profile, .channel_mode = 1, .default_sample_rate = 48000, .voltage_on = msm_snddev_voltage_on, .voltage_off = msm_snddev_voltage_off, }; static struct platform_device msm_itty_mono_rx_device = { .name = "snddev_icodec", .dev = { .platform_data = &snddev_itty_mono_rx_data }, }; static struct adie_codec_action_unit linein_pri_actions[] = LINEIN_PRI_STEREO_OSR_256; static struct adie_codec_hwsetting_entry linein_pri_settings[] = { { .freq_plan = 48000, .osr = 256, .actions = linein_pri_actions, .action_sz = ARRAY_SIZE(linein_pri_actions), }, }; static struct adie_codec_dev_profile linein_pri_profile = { .path_type = ADIE_CODEC_TX, .settings = linein_pri_settings, .setting_sz = ARRAY_SIZE(linein_pri_settings), }; static struct snddev_icodec_data snddev_linein_pri_data = { .capability = SNDDEV_CAP_TX, .name = "linein_pri_tx", .copp_id = PRIMARY_I2S_TX, .profile = &linein_pri_profile, .channel_mode = 2, .default_sample_rate = 48000, .voltage_on = msm_snddev_voltage_on, .voltage_off = msm_snddev_voltage_off, }; static struct platform_device msm_linein_pri_device = { .name = "snddev_icodec", .dev = { .platform_data = &snddev_linein_pri_data }, }; static struct adie_codec_action_unit auxpga_lp_lo_actions[] = LB_AUXPGA_LO_STEREO; static struct adie_codec_hwsetting_entry auxpga_lp_lo_settings[] = { { .freq_plan = 48000, .osr = 256, .actions = auxpga_lp_lo_actions, .action_sz = ARRAY_SIZE(auxpga_lp_lo_actions), }, }; static struct adie_codec_dev_profile auxpga_lp_lo_profile = { .path_type = ADIE_CODEC_LB, .settings = auxpga_lp_lo_settings, .setting_sz = ARRAY_SIZE(auxpga_lp_lo_settings), }; static struct snddev_icodec_data snddev_auxpga_lp_lo_data = { .capability = SNDDEV_CAP_LB, .name = "speaker_stereo_lb", .copp_id = PRIMARY_I2S_RX, .profile = &auxpga_lp_lo_profile, .channel_mode = 2, .default_sample_rate = 48000, .pamp_on = msm_snddev_poweramp_on, .pamp_off = msm_snddev_poweramp_off, .dev_vol_type = SNDDEV_DEV_VOL_ANALOG, }; static struct platform_device msm_auxpga_lp_lo_device = { .name = "snddev_icodec", .dev = { .platform_data = &snddev_auxpga_lp_lo_data }, }; static struct adie_codec_action_unit auxpga_lp_hs_actions[] = LB_AUXPGA_HPH_AB_CPLS_STEREO; static struct adie_codec_hwsetting_entry auxpga_lp_hs_settings[] = { { .freq_plan = 48000, .osr = 256, .actions = auxpga_lp_hs_actions, .action_sz = ARRAY_SIZE(auxpga_lp_hs_actions), }, }; static struct adie_codec_dev_profile auxpga_lp_hs_profile = { .path_type = ADIE_CODEC_LB, .settings = auxpga_lp_hs_settings, .setting_sz = ARRAY_SIZE(auxpga_lp_hs_settings), }; static struct snddev_icodec_data snddev_auxpga_lp_hs_data = { .capability = SNDDEV_CAP_LB, .name = "hs_stereo_lb", .copp_id = PRIMARY_I2S_RX, .profile = &auxpga_lp_hs_profile, .channel_mode = 2, .default_sample_rate = 48000, .voltage_on = msm_snddev_voltage_on, .voltage_off = msm_snddev_voltage_off, .dev_vol_type = SNDDEV_DEV_VOL_ANALOG, }; static struct platform_device msm_auxpga_lp_hs_device = { .name = "snddev_icodec", .dev = { .platform_data = &snddev_auxpga_lp_hs_data }, }; #ifdef CONFIG_MSM8X60_FTM_AUDIO_DEVICES static struct adie_codec_action_unit ftm_headset_mono_rx_actions[] = HPH_PRI_AB_CPLS_MONO; static struct adie_codec_hwsetting_entry ftm_headset_mono_rx_settings[] = { { .freq_plan = 48000, .osr = 256, .actions = ftm_headset_mono_rx_actions, .action_sz = ARRAY_SIZE(ftm_headset_mono_rx_actions), }, }; static struct adie_codec_dev_profile ftm_headset_mono_rx_profile = { .path_type = ADIE_CODEC_RX, .settings = ftm_headset_mono_rx_settings, .setting_sz = ARRAY_SIZE(ftm_headset_mono_rx_settings), }; static struct snddev_icodec_data ftm_headset_mono_rx_data = { .capability = SNDDEV_CAP_RX, .name = "ftm_headset_mono_rx", .copp_id = PRIMARY_I2S_RX, .profile = &ftm_headset_mono_rx_profile, .channel_mode = 1, .default_sample_rate = 48000, .voltage_on = msm_snddev_voltage_on, .voltage_off = msm_snddev_voltage_off, .dev_vol_type = SNDDEV_DEV_VOL_DIGITAL, }; static struct platform_device ftm_headset_mono_rx_device = { .name = "snddev_icodec", .dev = { .platform_data = &ftm_headset_mono_rx_data}, }; static struct adie_codec_action_unit ftm_headset_mono_diff_rx_actions[] = HEADSET_MONO_DIFF_RX; static struct adie_codec_hwsetting_entry ftm_headset_mono_diff_rx_settings[] = { { .freq_plan = 48000, .osr = 256, .actions = ftm_headset_mono_diff_rx_actions, .action_sz = ARRAY_SIZE(ftm_headset_mono_diff_rx_actions), }, }; static struct adie_codec_dev_profile ftm_headset_mono_diff_rx_profile = { .path_type = ADIE_CODEC_RX, .settings = ftm_headset_mono_diff_rx_settings, .setting_sz = ARRAY_SIZE(ftm_headset_mono_diff_rx_settings), }; static struct snddev_icodec_data ftm_headset_mono_diff_rx_data = { .capability = SNDDEV_CAP_RX, .name = "ftm_headset_mono_diff_rx", .copp_id = PRIMARY_I2S_RX, .profile = &ftm_headset_mono_diff_rx_profile, .channel_mode = 1, .default_sample_rate = 48000, .voltage_on = msm_snddev_voltage_on, .voltage_off = msm_snddev_voltage_off, .dev_vol_type = SNDDEV_DEV_VOL_DIGITAL, }; static struct platform_device ftm_headset_mono_diff_rx_device = { .name = "snddev_icodec", .dev = { .platform_data = &ftm_headset_mono_diff_rx_data}, }; static struct adie_codec_action_unit ftm_spkr_mono_rx_actions[] = SPEAKER_PRI_STEREO_48000_OSR_256; static struct adie_codec_hwsetting_entry ftm_spkr_mono_rx_settings[] = { { .freq_plan = 48000, .osr = 256, .actions = ftm_spkr_mono_rx_actions, .action_sz = ARRAY_SIZE(ftm_spkr_mono_rx_actions), }, }; static struct adie_codec_dev_profile ftm_spkr_mono_rx_profile = { .path_type = ADIE_CODEC_RX, .settings = ftm_spkr_mono_rx_settings, .setting_sz = ARRAY_SIZE(ftm_spkr_mono_rx_settings), }; static struct snddev_icodec_data ftm_spkr_mono_rx_data = { .capability = SNDDEV_CAP_RX, .name = "ftm_spkr_mono_rx", .copp_id = PRIMARY_I2S_RX, .profile = &ftm_spkr_mono_rx_profile, .channel_mode = 1, .default_sample_rate = 48000, .pamp_on = msm_snddev_poweramp_on, .pamp_off = msm_snddev_poweramp_off, .dev_vol_type = SNDDEV_DEV_VOL_DIGITAL, }; static struct platform_device ftm_spkr_mono_rx_device = { .name = "snddev_icodec", .dev = { .platform_data = &ftm_spkr_mono_rx_data}, }; static struct adie_codec_action_unit ftm_spkr_l_rx_actions[] = FTM_SPKR_L_RX; static struct adie_codec_hwsetting_entry ftm_spkr_l_rx_settings[] = { { .freq_plan = 48000, .osr = 256, .actions = ftm_spkr_l_rx_actions, .action_sz = ARRAY_SIZE(ftm_spkr_l_rx_actions), }, }; static struct adie_codec_dev_profile ftm_spkr_l_rx_profile = { .path_type = ADIE_CODEC_RX, .settings = ftm_spkr_l_rx_settings, .setting_sz = ARRAY_SIZE(ftm_spkr_l_rx_settings), }; static struct snddev_icodec_data ftm_spkr_l_rx_data = { .capability = SNDDEV_CAP_RX, .name = "ftm_spkr_l_rx", .copp_id = PRIMARY_I2S_RX, .profile = &ftm_spkr_l_rx_profile, .channel_mode = 1, .default_sample_rate = 48000, .pamp_on = msm_snddev_poweramp_on, .pamp_off = msm_snddev_poweramp_off, .dev_vol_type = SNDDEV_DEV_VOL_DIGITAL, }; static struct platform_device ftm_spkr_l_rx_device = { .name = "snddev_icodec", .dev = { .platform_data = &ftm_spkr_l_rx_data}, }; static struct adie_codec_action_unit ftm_spkr_r_rx_actions[] = SPKR_R_RX; static struct adie_codec_hwsetting_entry ftm_spkr_r_rx_settings[] = { { .freq_plan = 48000, .osr = 256, .actions = ftm_spkr_r_rx_actions, .action_sz = ARRAY_SIZE(ftm_spkr_r_rx_actions), }, }; static struct adie_codec_dev_profile ftm_spkr_r_rx_profile = { .path_type = ADIE_CODEC_RX, .settings = ftm_spkr_r_rx_settings, .setting_sz = ARRAY_SIZE(ftm_spkr_r_rx_settings), }; static struct snddev_icodec_data ftm_spkr_r_rx_data = { .capability = SNDDEV_CAP_RX, .name = "ftm_spkr_r_rx", .copp_id = PRIMARY_I2S_RX, .profile = &ftm_spkr_r_rx_profile, .channel_mode = 1, .default_sample_rate = 48000, .pamp_on = msm_snddev_poweramp_on, .pamp_off = msm_snddev_poweramp_off, .dev_vol_type = SNDDEV_DEV_VOL_DIGITAL, }; static struct platform_device ftm_spkr_r_rx_device = { .name = "snddev_icodec", .dev = { .platform_data = &ftm_spkr_r_rx_data}, }; static struct adie_codec_action_unit ftm_spkr_mono_diff_rx_actions[] = SPKR_MONO_DIFF_RX; static struct adie_codec_hwsetting_entry ftm_spkr_mono_diff_rx_settings[] = { { .freq_plan = 48000, .osr = 256, .actions = ftm_spkr_mono_diff_rx_actions, .action_sz = ARRAY_SIZE(ftm_spkr_mono_diff_rx_actions), }, }; static struct adie_codec_dev_profile ftm_spkr_mono_diff_rx_profile = { .path_type = ADIE_CODEC_RX, .settings = ftm_spkr_mono_diff_rx_settings, .setting_sz = ARRAY_SIZE(ftm_spkr_mono_diff_rx_settings), }; static struct snddev_icodec_data ftm_spkr_mono_diff_rx_data = { .capability = SNDDEV_CAP_RX, .name = "ftm_spkr_mono_diff_rx", .copp_id = PRIMARY_I2S_RX, .profile = &ftm_spkr_mono_diff_rx_profile, .channel_mode = 1, .default_sample_rate = 48000, .pamp_on = msm_snddev_poweramp_on, .pamp_off = msm_snddev_poweramp_off, .dev_vol_type = SNDDEV_DEV_VOL_DIGITAL, }; static struct platform_device ftm_spkr_mono_diff_rx_device = { .name = "snddev_icodec", .dev = { .platform_data = &ftm_spkr_mono_diff_rx_data}, }; static struct adie_codec_action_unit ftm_headset_mono_l_rx_actions[] = HPH_PRI_AB_CPLS_MONO_LEFT; static struct adie_codec_hwsetting_entry ftm_headset_mono_l_rx_settings[] = { { .freq_plan = 48000, .osr = 256, .actions = ftm_headset_mono_l_rx_actions, .action_sz = ARRAY_SIZE(ftm_headset_mono_l_rx_actions), }, }; static struct adie_codec_dev_profile ftm_headset_mono_l_rx_profile = { .path_type = ADIE_CODEC_RX, .settings = ftm_headset_mono_l_rx_settings, .setting_sz = ARRAY_SIZE(ftm_headset_mono_l_rx_settings), }; static struct snddev_icodec_data ftm_headset_mono_l_rx_data = { .capability = SNDDEV_CAP_RX, .name = "ftm_headset_mono_l_rx", .copp_id = PRIMARY_I2S_RX, .profile = &ftm_headset_mono_l_rx_profile, .channel_mode = 1, .default_sample_rate = 48000, .voltage_on = msm_snddev_voltage_on, .voltage_off = msm_snddev_voltage_off, .dev_vol_type = SNDDEV_DEV_VOL_DIGITAL, }; static struct platform_device ftm_headset_mono_l_rx_device = { .name = "snddev_icodec", .dev = { .platform_data = &ftm_headset_mono_l_rx_data}, }; static struct adie_codec_action_unit ftm_headset_mono_r_rx_actions[] = HPH_PRI_AB_CPLS_MONO_RIGHT; static struct adie_codec_hwsetting_entry ftm_headset_mono_r_rx_settings[] = { { .freq_plan = 48000, .osr = 256, .actions = ftm_headset_mono_r_rx_actions, .action_sz = ARRAY_SIZE(ftm_headset_mono_r_rx_actions), }, }; static struct adie_codec_dev_profile ftm_headset_mono_r_rx_profile = { .path_type = ADIE_CODEC_RX, .settings = ftm_headset_mono_r_rx_settings, .setting_sz = ARRAY_SIZE(ftm_headset_mono_r_rx_settings), }; static struct snddev_icodec_data ftm_headset_mono_r_rx_data = { .capability = SNDDEV_CAP_RX, .name = "ftm_headset_mono_r_rx", .copp_id = PRIMARY_I2S_RX, .profile = &ftm_headset_mono_r_rx_profile, .channel_mode = 1, .default_sample_rate = 48000, .voltage_on = msm_snddev_voltage_on, .voltage_off = msm_snddev_voltage_off, .dev_vol_type = SNDDEV_DEV_VOL_DIGITAL, }; static struct platform_device ftm_headset_mono_r_rx_device = { .name = "snddev_icodec", .dev = { .platform_data = &ftm_headset_mono_r_rx_data}, }; static struct adie_codec_action_unit ftm_linein_l_tx_actions[] = LINEIN_MONO_L_TX; static struct adie_codec_hwsetting_entry ftm_linein_l_tx_settings[] = { { .freq_plan = 48000, .osr = 256, .actions = ftm_linein_l_tx_actions, .action_sz = ARRAY_SIZE(ftm_linein_l_tx_actions), }, }; static struct adie_codec_dev_profile ftm_linein_l_tx_profile = { .path_type = ADIE_CODEC_TX, .settings = ftm_linein_l_tx_settings, .setting_sz = ARRAY_SIZE(ftm_linein_l_tx_settings), }; static struct snddev_icodec_data ftm_linein_l_tx_data = { .capability = SNDDEV_CAP_TX, .name = "ftm_linein_l_tx", .copp_id = PRIMARY_I2S_TX, .profile = &ftm_linein_l_tx_profile, .channel_mode = 1, .default_sample_rate = 48000, .dev_vol_type = SNDDEV_DEV_VOL_DIGITAL, }; static struct platform_device ftm_linein_l_tx_device = { .name = "snddev_icodec", .dev = { .platform_data = &ftm_linein_l_tx_data }, }; static struct adie_codec_action_unit ftm_linein_r_tx_actions[] = LINEIN_MONO_R_TX; static struct adie_codec_hwsetting_entry ftm_linein_r_tx_settings[] = { { .freq_plan = 48000, .osr = 256, .actions = ftm_linein_r_tx_actions, .action_sz = ARRAY_SIZE(ftm_linein_r_tx_actions), }, }; static struct adie_codec_dev_profile ftm_linein_r_tx_profile = { .path_type = ADIE_CODEC_TX, .settings = ftm_linein_r_tx_settings, .setting_sz = ARRAY_SIZE(ftm_linein_r_tx_settings), }; static struct snddev_icodec_data ftm_linein_r_tx_data = { .capability = SNDDEV_CAP_TX, .name = "ftm_linein_r_tx", .copp_id = PRIMARY_I2S_TX, .profile = &ftm_linein_r_tx_profile, .channel_mode = 1, .default_sample_rate = 48000, .dev_vol_type = SNDDEV_DEV_VOL_DIGITAL, }; static struct platform_device ftm_linein_r_tx_device = { .name = "snddev_icodec", .dev = { .platform_data = &ftm_linein_r_tx_data }, }; static struct adie_codec_action_unit ftm_aux_out_rx_actions[] = AUX_OUT_RX; static struct adie_codec_hwsetting_entry ftm_aux_out_rx_settings[] = { { .freq_plan = 48000, .osr = 256, .actions = ftm_aux_out_rx_actions, .action_sz = ARRAY_SIZE(ftm_aux_out_rx_actions), }, }; static struct adie_codec_dev_profile ftm_aux_out_rx_profile = { .path_type = ADIE_CODEC_RX, .settings = ftm_aux_out_rx_settings, .setting_sz = ARRAY_SIZE(ftm_aux_out_rx_settings), }; static struct snddev_icodec_data ftm_aux_out_rx_data = { .capability = SNDDEV_CAP_RX, .name = "ftm_aux_out_rx", .copp_id = PRIMARY_I2S_RX, .profile = &ftm_aux_out_rx_profile, .channel_mode = 2, .default_sample_rate = 48000, .dev_vol_type = SNDDEV_DEV_VOL_DIGITAL, }; static struct platform_device ftm_aux_out_rx_device = { .name = "snddev_icodec", .dev = { .platform_data = &ftm_aux_out_rx_data}, }; static struct adie_codec_action_unit ftm_dmic1_left_tx_actions[] = DMIC1_LEFT_TX; static struct adie_codec_hwsetting_entry ftm_dmic1_left_tx_settings[] = { { .freq_plan = 48000, .osr = 256, .actions = ftm_dmic1_left_tx_actions, .action_sz = ARRAY_SIZE(ftm_dmic1_left_tx_actions), }, }; static struct adie_codec_dev_profile ftm_dmic1_left_tx_profile = { .path_type = ADIE_CODEC_TX, .settings = ftm_dmic1_left_tx_settings, .setting_sz = ARRAY_SIZE(ftm_dmic1_left_tx_settings), }; static struct snddev_icodec_data ftm_dmic1_left_tx_data = { .capability = SNDDEV_CAP_TX, .name = "ftm_dmic1_left_tx", .copp_id = PRIMARY_I2S_TX, .profile = &ftm_dmic1_left_tx_profile, .channel_mode = 1, .default_sample_rate = 48000, .pamp_on = msm_snddev_enable_dmic_power, .pamp_off = msm_snddev_disable_dmic_power, .dev_vol_type = SNDDEV_DEV_VOL_DIGITAL, }; static struct platform_device ftm_dmic1_left_tx_device = { .name = "snddev_icodec", .dev = { .platform_data = &ftm_dmic1_left_tx_data}, }; static struct adie_codec_action_unit ftm_dmic1_right_tx_actions[] = DMIC1_RIGHT_TX; static struct adie_codec_hwsetting_entry ftm_dmic1_right_tx_settings[] = { { .freq_plan = 48000, .osr = 256, .actions = ftm_dmic1_right_tx_actions, .action_sz = ARRAY_SIZE(ftm_dmic1_right_tx_actions), }, }; static struct adie_codec_dev_profile ftm_dmic1_right_tx_profile = { .path_type = ADIE_CODEC_TX, .settings = ftm_dmic1_right_tx_settings, .setting_sz = ARRAY_SIZE(ftm_dmic1_right_tx_settings), }; static struct snddev_icodec_data ftm_dmic1_right_tx_data = { .capability = SNDDEV_CAP_TX, .name = "ftm_dmic1_right_tx", .copp_id = PRIMARY_I2S_TX, .profile = &ftm_dmic1_right_tx_profile, .channel_mode = 1, .default_sample_rate = 48000, .pamp_on = msm_snddev_enable_dmic_power, .pamp_off = msm_snddev_disable_dmic_power, .dev_vol_type = SNDDEV_DEV_VOL_DIGITAL, }; static struct platform_device ftm_dmic1_right_tx_device = { .name = "snddev_icodec", .dev = { .platform_data = &ftm_dmic1_right_tx_data}, }; static struct adie_codec_action_unit ftm_dmic1_l_and_r_tx_actions[] = DMIC1_LEFT_AND_RIGHT_TX; static struct adie_codec_hwsetting_entry ftm_dmic1_l_and_r_tx_settings[] = { { .freq_plan = 48000, .osr = 256, .actions = ftm_dmic1_l_and_r_tx_actions, .action_sz = ARRAY_SIZE(ftm_dmic1_l_and_r_tx_actions), }, }; static struct adie_codec_dev_profile ftm_dmic1_l_and_r_tx_profile = { .path_type = ADIE_CODEC_TX, .settings = ftm_dmic1_l_and_r_tx_settings, .setting_sz = ARRAY_SIZE(ftm_dmic1_l_and_r_tx_settings), }; static struct snddev_icodec_data ftm_dmic1_l_and_r_tx_data = { .capability = SNDDEV_CAP_TX, .name = "ftm_dmic1_l_and_r_tx", .copp_id = PRIMARY_I2S_TX, .profile = &ftm_dmic1_l_and_r_tx_profile, .channel_mode = 2, .default_sample_rate = 48000, .pamp_on = msm_snddev_enable_dmic_power, .pamp_off = msm_snddev_disable_dmic_power, .dev_vol_type = SNDDEV_DEV_VOL_DIGITAL, }; static struct platform_device ftm_dmic1_l_and_r_tx_device = { .name = "snddev_icodec", .dev = { .platform_data = &ftm_dmic1_l_and_r_tx_data}, }; static struct adie_codec_action_unit ftm_dmic2_left_tx_actions[] = DMIC2_LEFT_TX; static struct adie_codec_hwsetting_entry ftm_dmic2_left_tx_settings[] = { { .freq_plan = 48000, .osr = 256, .actions = ftm_dmic2_left_tx_actions, .action_sz = ARRAY_SIZE(ftm_dmic2_left_tx_actions), }, }; static struct adie_codec_dev_profile ftm_dmic2_left_tx_profile = { .path_type = ADIE_CODEC_TX, .settings = ftm_dmic2_left_tx_settings, .setting_sz = ARRAY_SIZE(ftm_dmic2_left_tx_settings), }; static struct snddev_icodec_data ftm_dmic2_left_tx_data = { .capability = SNDDEV_CAP_TX, .name = "ftm_dmic2_left_tx", .copp_id = PRIMARY_I2S_TX, .profile = &ftm_dmic2_left_tx_profile, .channel_mode = 1, .default_sample_rate = 48000, .pamp_on = msm_snddev_enable_dmic_power, .pamp_off = msm_snddev_disable_dmic_power, .dev_vol_type = SNDDEV_DEV_VOL_DIGITAL, }; static struct platform_device ftm_dmic2_left_tx_device = { .name = "snddev_icodec", .dev = { .platform_data = &ftm_dmic2_left_tx_data }, }; static struct adie_codec_action_unit ftm_dmic2_right_tx_actions[] = DMIC2_RIGHT_TX; static struct adie_codec_hwsetting_entry ftm_dmic2_right_tx_settings[] = { { .freq_plan = 48000, .osr = 256, .actions = ftm_dmic2_right_tx_actions, .action_sz = ARRAY_SIZE(ftm_dmic2_right_tx_actions), }, }; static struct adie_codec_dev_profile ftm_dmic2_right_tx_profile = { .path_type = ADIE_CODEC_TX, .settings = ftm_dmic2_right_tx_settings, .setting_sz = ARRAY_SIZE(ftm_dmic2_right_tx_settings), }; static struct snddev_icodec_data ftm_dmic2_right_tx_data = { .capability = SNDDEV_CAP_TX, .name = "ftm_dmic2_right_tx", .copp_id = PRIMARY_I2S_TX, .profile = &ftm_dmic2_right_tx_profile, .channel_mode = 1, .default_sample_rate = 48000, .pamp_on = msm_snddev_enable_dmic_power, .pamp_off = msm_snddev_disable_dmic_power, .dev_vol_type = SNDDEV_DEV_VOL_DIGITAL, }; static struct platform_device ftm_dmic2_right_tx_device = { .name = "snddev_icodec", .dev = { .platform_data = &ftm_dmic2_right_tx_data }, }; static struct adie_codec_action_unit ftm_dmic2_l_and_r_tx_actions[] = DMIC2_LEFT_AND_RIGHT_TX; static struct adie_codec_hwsetting_entry ftm_dmic2_l_and_r_tx_settings[] = { { .freq_plan = 48000, .osr = 256, .actions = ftm_dmic2_l_and_r_tx_actions, .action_sz = ARRAY_SIZE(ftm_dmic2_l_and_r_tx_actions), }, }; static struct adie_codec_dev_profile ftm_dmic2_l_and_r_tx_profile = { .path_type = ADIE_CODEC_TX, .settings = ftm_dmic2_l_and_r_tx_settings, .setting_sz = ARRAY_SIZE(ftm_dmic2_l_and_r_tx_settings), }; static struct snddev_icodec_data ftm_dmic2_l_and_r_tx_data = { .capability = SNDDEV_CAP_TX, .name = "ftm_dmic2_l_and_r_tx", .copp_id = PRIMARY_I2S_TX, .profile = &ftm_dmic2_l_and_r_tx_profile, .channel_mode = 2, .default_sample_rate = 48000, .pamp_on = msm_snddev_enable_dmic_power, .pamp_off = msm_snddev_disable_dmic_power, .dev_vol_type = SNDDEV_DEV_VOL_DIGITAL, }; static struct platform_device ftm_dmic2_l_and_r_tx_device = { .name = "snddev_icodec", .dev = { .platform_data = &ftm_dmic2_l_and_r_tx_data}, }; static struct adie_codec_action_unit ftm_handset_mic1_aux_in_actions[] = HANDSET_MIC1_AUX_IN; static struct adie_codec_hwsetting_entry ftm_handset_mic1_aux_in_settings[] = { { .freq_plan = 48000, .osr = 256, .actions = ftm_handset_mic1_aux_in_actions, .action_sz = ARRAY_SIZE(ftm_handset_mic1_aux_in_actions), }, }; static struct adie_codec_dev_profile ftm_handset_mic1_aux_in_profile = { .path_type = ADIE_CODEC_TX, .settings = ftm_handset_mic1_aux_in_settings, .setting_sz = ARRAY_SIZE(ftm_handset_mic1_aux_in_settings), }; static struct snddev_icodec_data ftm_handset_mic1_aux_in_data = { .capability = SNDDEV_CAP_TX, .name = "ftm_handset_mic1_aux_in", .copp_id = PRIMARY_I2S_TX, .profile = &ftm_handset_mic1_aux_in_profile, .channel_mode = 2, .default_sample_rate = 48000, /* Assumption is that inputs are not tied to analog mic, so * no need to enable mic bias. */ .dev_vol_type = SNDDEV_DEV_VOL_DIGITAL, }; static struct platform_device ftm_handset_mic1_aux_in_device = { .name = "snddev_icodec", .dev = { .platform_data = &ftm_handset_mic1_aux_in_data}, }; static struct snddev_mi2s_data snddev_mi2s_sd0_rx_data = { .capability = SNDDEV_CAP_RX , .name = "mi2s_sd0_rx", .copp_id = MI2S_RX, .channel_mode = 2, /* stereo */ .sd_lines = MI2S_SD0, /* sd0 */ .sample_rate = 48000, }; static struct platform_device ftm_mi2s_sd0_rx_device = { .name = "snddev_mi2s", .dev = { .platform_data = &snddev_mi2s_sd0_rx_data }, }; static struct snddev_mi2s_data snddev_mi2s_sd1_rx_data = { .capability = SNDDEV_CAP_RX , .name = "mi2s_sd1_rx", .copp_id = MI2S_RX, .channel_mode = 2, /* stereo */ .sd_lines = MI2S_SD1, /* sd1 */ .sample_rate = 48000, }; static struct platform_device ftm_mi2s_sd1_rx_device = { .name = "snddev_mi2s", .dev = { .platform_data = &snddev_mi2s_sd1_rx_data }, }; static struct snddev_mi2s_data snddev_mi2s_sd2_rx_data = { .capability = SNDDEV_CAP_RX , .name = "mi2s_sd2_rx", .copp_id = MI2S_RX, .channel_mode = 2, /* stereo */ .sd_lines = MI2S_SD2, /* sd2 */ .sample_rate = 48000, }; static struct platform_device ftm_mi2s_sd2_rx_device = { .name = "snddev_mi2s", .dev = { .platform_data = &snddev_mi2s_sd2_rx_data }, }; /* earpiece */ static struct adie_codec_action_unit ftm_handset_adie_lp_rx_actions[] = EAR_PRI_MONO_LB; static struct adie_codec_hwsetting_entry ftm_handset_adie_lp_rx_settings[] = { { .freq_plan = 48000, .osr = 256, .actions = ftm_handset_adie_lp_rx_actions, .action_sz = ARRAY_SIZE(ftm_handset_adie_lp_rx_actions), } }; static struct adie_codec_dev_profile ftm_handset_adie_lp_rx_profile = { .path_type = ADIE_CODEC_RX, .settings = ftm_handset_adie_lp_rx_settings, .setting_sz = ARRAY_SIZE(ftm_handset_adie_lp_rx_settings), }; static struct snddev_icodec_data ftm_handset_adie_lp_rx_data = { .capability = (SNDDEV_CAP_RX | SNDDEV_CAP_VOICE), .name = "ftm_handset_adie_lp_rx", .copp_id = 0, .profile = &ftm_handset_adie_lp_rx_profile, .channel_mode = 1, .default_sample_rate = 48000, .dev_vol_type = SNDDEV_DEV_VOL_DIGITAL, }; static struct platform_device ftm_handset_adie_lp_rx_device = { .name = "snddev_icodec", .dev = { .platform_data = &ftm_handset_adie_lp_rx_data }, }; static struct adie_codec_action_unit ftm_headset_l_adie_lp_rx_actions[] = FTM_HPH_PRI_AB_CPLS_MONO_LB_LEFT; static struct adie_codec_hwsetting_entry ftm_headset_l_adie_lp_rx_settings[] = { { .freq_plan = 48000, .osr = 256, .actions = ftm_headset_l_adie_lp_rx_actions, .action_sz = ARRAY_SIZE(ftm_headset_l_adie_lp_rx_actions), }, }; static struct adie_codec_dev_profile ftm_headset_l_adie_lp_rx_profile = { .path_type = ADIE_CODEC_RX, .settings = ftm_headset_l_adie_lp_rx_settings, .setting_sz = ARRAY_SIZE(ftm_headset_l_adie_lp_rx_settings), }; static struct snddev_icodec_data ftm_headset_l_adie_lp_rx_data = { .capability = SNDDEV_CAP_RX, .name = "ftm_headset_l_adie_lp_rx", .copp_id = PRIMARY_I2S_RX, .profile = &ftm_headset_l_adie_lp_rx_profile, .channel_mode = 1, .default_sample_rate = 48000, .voltage_on = msm_snddev_voltage_on, .voltage_off = msm_snddev_voltage_off, .dev_vol_type = SNDDEV_DEV_VOL_DIGITAL, }; static struct platform_device ftm_headset_l_adie_lp_rx_device = { .name = "snddev_icodec", .dev = { .platform_data = &ftm_headset_l_adie_lp_rx_data }, }; static struct adie_codec_action_unit ftm_headset_r_adie_lp_rx_actions[] = FTM_HPH_PRI_AB_CPLS_MONO_LB_RIGHT; static struct adie_codec_hwsetting_entry ftm_headset_r_adie_lp_rx_settings[] = { { .freq_plan = 48000, .osr = 256, .actions = ftm_headset_r_adie_lp_rx_actions, .action_sz = ARRAY_SIZE(ftm_headset_r_adie_lp_rx_actions), }, }; static struct adie_codec_dev_profile ftm_headset_r_adie_lp_rx_profile = { .path_type = ADIE_CODEC_RX, .settings = ftm_headset_r_adie_lp_rx_settings, .setting_sz = ARRAY_SIZE(ftm_headset_r_adie_lp_rx_settings), }; static struct snddev_icodec_data ftm_headset_r_adie_lp_rx_data = { .capability = SNDDEV_CAP_RX, .name = "ftm_headset_r_adie_lp_rx", .copp_id = PRIMARY_I2S_RX, .profile = &ftm_headset_r_adie_lp_rx_profile, .channel_mode = 1, .default_sample_rate = 48000, .voltage_on = msm_snddev_voltage_on, .voltage_off = msm_snddev_voltage_off, .dev_vol_type = SNDDEV_DEV_VOL_DIGITAL, }; static struct platform_device ftm_headset_r_adie_lp_rx_device = { .name = "snddev_icodec", .dev = { .platform_data = &ftm_headset_r_adie_lp_rx_data }, }; static struct adie_codec_action_unit ftm_spkr_l_rx_lp_actions[] = FTM_SPKR_L_RX; static struct adie_codec_hwsetting_entry ftm_spkr_l_rx_lp_settings[] = { { .freq_plan = 48000, .osr = 256, .actions = ftm_spkr_l_rx_lp_actions, .action_sz = ARRAY_SIZE(ftm_spkr_l_rx_lp_actions), }, }; static struct adie_codec_dev_profile ftm_spkr_l_rx_lp_profile = { .path_type = ADIE_CODEC_RX, .settings = ftm_spkr_l_rx_lp_settings, .setting_sz = ARRAY_SIZE(ftm_spkr_l_rx_lp_settings), }; static struct snddev_icodec_data ftm_spkr_l_rx_lp_data = { .capability = SNDDEV_CAP_RX, .name = "ftm_spk_l_adie_lp_rx", .copp_id = PRIMARY_I2S_RX, .profile = &ftm_spkr_l_rx_lp_profile, .channel_mode = 1, .default_sample_rate = 48000, .pamp_on = msm_snddev_poweramp_on, .pamp_off = msm_snddev_poweramp_off, .dev_vol_type = SNDDEV_DEV_VOL_DIGITAL, }; static struct platform_device ftm_spk_l_adie_lp_rx_device = { .name = "snddev_icodec", .dev = { .platform_data = &ftm_spkr_l_rx_lp_data}, }; static struct adie_codec_action_unit ftm_spkr_r_adie_lp_rx_actions[] = SPKR_R_RX; static struct adie_codec_hwsetting_entry ftm_spkr_r_adie_lp_rx_settings[] = { { .freq_plan = 48000, .osr = 256, .actions = ftm_spkr_r_adie_lp_rx_actions, .action_sz = ARRAY_SIZE(ftm_spkr_r_adie_lp_rx_actions), }, }; static struct adie_codec_dev_profile ftm_spkr_r_adie_lp_rx_profile = { .path_type = ADIE_CODEC_RX, .settings = ftm_spkr_r_adie_lp_rx_settings, .setting_sz = ARRAY_SIZE(ftm_spkr_r_adie_lp_rx_settings), }; static struct snddev_icodec_data ftm_spkr_r_adie_lp_rx_data = { .capability = SNDDEV_CAP_RX, .name = "ftm_spk_r_adie_lp_rx", .copp_id = PRIMARY_I2S_RX, .profile = &ftm_spkr_r_adie_lp_rx_profile, .channel_mode = 1, .default_sample_rate = 48000, .pamp_on = msm_snddev_poweramp_on, .pamp_off = msm_snddev_poweramp_off, .dev_vol_type = SNDDEV_DEV_VOL_DIGITAL, }; static struct platform_device ftm_spk_r_adie_lp_rx_device = { .name = "snddev_icodec", .dev = { .platform_data = &ftm_spkr_r_adie_lp_rx_data}, }; static struct adie_codec_action_unit ftm_spkr_adie_lp_rx_actions[] = FTM_SPKR_RX_LB; static struct adie_codec_hwsetting_entry ftm_spkr_adie_lp_rx_settings[] = { { .freq_plan = 48000, .osr = 256, .actions = ftm_spkr_adie_lp_rx_actions, .action_sz = ARRAY_SIZE(ftm_spkr_adie_lp_rx_actions), }, }; static struct adie_codec_dev_profile ftm_spkr_adie_lp_rx_profile = { .path_type = ADIE_CODEC_RX, .settings = ftm_spkr_adie_lp_rx_settings, .setting_sz = ARRAY_SIZE(ftm_spkr_adie_lp_rx_settings), }; static struct snddev_icodec_data ftm_spkr_adie_lp_rx_data = { .capability = SNDDEV_CAP_RX, .name = "ftm_spk_adie_lp_rx", .copp_id = PRIMARY_I2S_RX, .profile = &ftm_spkr_adie_lp_rx_profile, .channel_mode = 1, .default_sample_rate = 48000, .pamp_on = msm_snddev_poweramp_on, .pamp_off = msm_snddev_poweramp_off, .dev_vol_type = SNDDEV_DEV_VOL_DIGITAL, }; static struct platform_device ftm_spk_adie_lp_rx_device = { .name = "snddev_icodec", .dev = { .platform_data = &ftm_spkr_adie_lp_rx_data}, }; static struct adie_codec_action_unit ftm_handset_dual_tx_lp_actions[] = FTM_AMIC_DUAL_HANDSET_TX_LB; static struct adie_codec_hwsetting_entry ftm_handset_dual_tx_lp_settings[] = { { .freq_plan = 48000, .osr = 256, .actions = ftm_handset_dual_tx_lp_actions, .action_sz = ARRAY_SIZE(ftm_handset_dual_tx_lp_actions), } }; static struct adie_codec_dev_profile ftm_handset_dual_tx_lp_profile = { .path_type = ADIE_CODEC_TX, .settings = ftm_handset_dual_tx_lp_settings, .setting_sz = ARRAY_SIZE(ftm_handset_dual_tx_lp_settings), }; static struct snddev_icodec_data ftm_handset_dual_tx_lp_data = { .capability = (SNDDEV_CAP_TX | SNDDEV_CAP_VOICE), .name = "handset_mic1_handset_mic2", .copp_id = 1, .profile = &ftm_handset_dual_tx_lp_profile, .channel_mode = 2, .default_sample_rate = 48000, .pamp_on = msm_snddev_enable_amic_power, .pamp_off = msm_snddev_disable_amic_power, .dev_vol_type = SNDDEV_DEV_VOL_DIGITAL, }; static struct platform_device ftm_handset_dual_tx_lp_device = { .name = "snddev_icodec", .dev = { .platform_data = &ftm_handset_dual_tx_lp_data }, }; static struct adie_codec_action_unit ftm_handset_mic_adie_lp_tx_actions[] = FTM_HANDSET_LB_TX; static struct adie_codec_hwsetting_entry ftm_handset_mic_adie_lp_tx_settings[] = { { .freq_plan = 48000, .osr = 256, .actions = ftm_handset_mic_adie_lp_tx_actions, .action_sz = ARRAY_SIZE(ftm_handset_mic_adie_lp_tx_actions), } }; static struct adie_codec_dev_profile ftm_handset_mic_adie_lp_tx_profile = { .path_type = ADIE_CODEC_TX, .settings = ftm_handset_mic_adie_lp_tx_settings, .setting_sz = ARRAY_SIZE(ftm_handset_mic_adie_lp_tx_settings), }; static struct snddev_icodec_data ftm_handset_mic_adie_lp_tx_data = { .capability = (SNDDEV_CAP_TX | SNDDEV_CAP_VOICE), .name = "ftm_handset_mic_adie_lp_tx", .copp_id = 1, .profile = &ftm_handset_mic_adie_lp_tx_profile, .channel_mode = 1, .default_sample_rate = 48000, .pamp_on = msm_snddev_enable_amic_power, .pamp_off = msm_snddev_disable_amic_power, .dev_vol_type = SNDDEV_DEV_VOL_DIGITAL, }; static struct platform_device ftm_handset_mic_adie_lp_tx_device = { .name = "snddev_icodec", .dev = { .platform_data = &ftm_handset_mic_adie_lp_tx_data }, }; static struct adie_codec_action_unit ftm_headset_mic_adie_lp_tx_actions[] = FTM_HEADSET_LB_TX; static struct adie_codec_hwsetting_entry ftm_headset_mic_adie_lp_tx_settings[] = { { .freq_plan = 48000, .osr = 256, .actions = ftm_headset_mic_adie_lp_tx_actions, .action_sz = ARRAY_SIZE(ftm_headset_mic_adie_lp_tx_actions), } }; static struct adie_codec_dev_profile ftm_headset_mic_adie_lp_tx_profile = { .path_type = ADIE_CODEC_TX, .settings = ftm_headset_mic_adie_lp_tx_settings, .setting_sz = ARRAY_SIZE(ftm_headset_mic_adie_lp_tx_settings), }; static struct snddev_icodec_data ftm_headset_mic_adie_lp_tx_data = { .capability = (SNDDEV_CAP_TX | SNDDEV_CAP_VOICE), .name = "ftm_headset_mic_adie_lp_tx", .copp_id = PRIMARY_I2S_TX, .profile = &ftm_headset_mic_adie_lp_tx_profile, .channel_mode = 1, .default_sample_rate = 48000, .dev_vol_type = SNDDEV_DEV_VOL_DIGITAL, }; static struct platform_device ftm_headset_mic_adie_lp_tx_device = { .name = "snddev_icodec", .dev = { .platform_data = &ftm_headset_mic_adie_lp_tx_data }, }; #endif /* CONFIG_MSM8X60_FTM_AUDIO_DEVICES */ static struct snddev_virtual_data snddev_uplink_rx_data = { .capability = SNDDEV_CAP_RX, .name = "uplink_rx", .copp_id = VOICE_PLAYBACK_TX, }; static struct platform_device msm_uplink_rx_device = { .name = "snddev_virtual", .dev = { .platform_data = &snddev_uplink_rx_data }, }; static struct snddev_hdmi_data snddev_hdmi_non_linear_pcm_rx_data = { .capability = SNDDEV_CAP_RX , .name = "hdmi_pass_through", .default_sample_rate = 48000, .on_apps = 1, }; static struct platform_device msm_snddev_hdmi_non_linear_pcm_rx_device = { .name = "snddev_hdmi", .dev = { .platform_data = &snddev_hdmi_non_linear_pcm_rx_data }, }; #ifdef CONFIG_DEBUG_FS static struct adie_codec_action_unit ihs_stereo_rx_class_d_legacy_48KHz_osr256_actions[] = HPH_PRI_D_LEG_STEREO; static struct adie_codec_hwsetting_entry ihs_stereo_rx_class_d_legacy_settings[] = { { .freq_plan = 48000, .osr = 256, .actions = ihs_stereo_rx_class_d_legacy_48KHz_osr256_actions, .action_sz = ARRAY_SIZE (ihs_stereo_rx_class_d_legacy_48KHz_osr256_actions), } }; static struct adie_codec_action_unit ihs_stereo_rx_class_ab_legacy_48KHz_osr256_actions[] = HPH_PRI_AB_LEG_STEREO; static struct adie_codec_hwsetting_entry ihs_stereo_rx_class_ab_legacy_settings[] = { { .freq_plan = 48000, .osr = 256, .actions = ihs_stereo_rx_class_ab_legacy_48KHz_osr256_actions, .action_sz = ARRAY_SIZE (ihs_stereo_rx_class_ab_legacy_48KHz_osr256_actions), } }; static void snddev_hsed_config_modify_setting(int type) { struct platform_device *device; struct snddev_icodec_data *icodec_data; device = &msm_headset_stereo_device; icodec_data = (struct snddev_icodec_data *)device->dev.platform_data; if (icodec_data) { if (type == 1) { icodec_data->voltage_on = NULL; icodec_data->voltage_off = NULL; icodec_data->profile->settings = ihs_stereo_rx_class_d_legacy_settings; icodec_data->profile->setting_sz = ARRAY_SIZE(ihs_stereo_rx_class_d_legacy_settings); } else if (type == 2) { icodec_data->voltage_on = NULL; icodec_data->voltage_off = NULL; icodec_data->profile->settings = ihs_stereo_rx_class_ab_legacy_settings; icodec_data->profile->setting_sz = ARRAY_SIZE(ihs_stereo_rx_class_ab_legacy_settings); } } } static void snddev_hsed_config_restore_setting(void) { struct platform_device *device; struct snddev_icodec_data *icodec_data; device = &msm_headset_stereo_device; icodec_data = (struct snddev_icodec_data *)device->dev.platform_data; if (icodec_data) { icodec_data->voltage_on = msm_snddev_voltage_on; icodec_data->voltage_off = msm_snddev_voltage_off; icodec_data->profile->settings = headset_ab_cpls_settings; icodec_data->profile->setting_sz = ARRAY_SIZE(headset_ab_cpls_settings); } } static ssize_t snddev_hsed_config_debug_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { char *lb_str = filp->private_data; char cmd; if (get_user(cmd, ubuf)) return -EFAULT; if (!strcmp(lb_str, "msm_hsed_config")) { switch (cmd) { case '0': snddev_hsed_config_restore_setting(); break; case '1': snddev_hsed_config_modify_setting(1); break; case '2': snddev_hsed_config_modify_setting(2); break; default: break; } } return cnt; } static int snddev_hsed_config_debug_open(struct inode *inode, struct file *file) { file->private_data = inode->i_private; return 0; } static const struct file_operations snddev_hsed_config_debug_fops = { .open = snddev_hsed_config_debug_open, .write = snddev_hsed_config_debug_write }; #endif static struct platform_device *snd_devices_ffa[] __initdata = { &msm_iearpiece_ffa_device, &msm_imic_ffa_device, &msm_ispkr_stereo_device, &msm_snddev_hdmi_stereo_rx_device, &msm_headset_mic_device, &msm_ispkr_mic_device, &msm_bt_sco_earpiece_device, &msm_bt_sco_mic_device, &msm_headset_stereo_device, &msm_itty_mono_tx_device, &msm_itty_mono_rx_device, &msm_mi2s_fm_tx_device, &msm_mi2s_fm_rx_device, &msm_hs_dual_mic_endfire_device, &msm_spkr_dual_mic_endfire_device, &msm_hs_dual_mic_broadside_device, &msm_spkr_dual_mic_broadside_device, &msm_ihs_stereo_speaker_stereo_rx_device, &msm_anc_headset_device, &msm_auxpga_lp_hs_device, &msm_auxpga_lp_lo_device, &msm_linein_pri_device, &msm_icodec_gpio_device, &msm_snddev_hdmi_non_linear_pcm_rx_device, }; static struct platform_device *snd_devices_surf[] __initdata = { &msm_iearpiece_device, &msm_imic_device, &msm_ispkr_stereo_device, &msm_snddev_hdmi_stereo_rx_device, &msm_headset_mic_device, &msm_ispkr_mic_device, &msm_bt_sco_earpiece_device, &msm_bt_sco_mic_device, &msm_headset_stereo_device, &msm_itty_mono_tx_device, &msm_itty_mono_rx_device, &msm_mi2s_fm_tx_device, &msm_mi2s_fm_rx_device, &msm_ihs_stereo_speaker_stereo_rx_device, &msm_auxpga_lp_hs_device, &msm_auxpga_lp_lo_device, &msm_linein_pri_device, &msm_icodec_gpio_device, &msm_snddev_hdmi_non_linear_pcm_rx_device, }; static struct platform_device *snd_devices_fluid[] __initdata = { &msm_iearpiece_device, &msm_imic_device, &msm_ispkr_stereo_device, &msm_snddev_hdmi_stereo_rx_device, &msm_headset_stereo_device, &msm_headset_mic_device, &msm_fluid_ispkr_mic_device, &msm_bt_sco_earpiece_device, &msm_bt_sco_mic_device, &msm_mi2s_fm_tx_device, &msm_mi2s_fm_rx_device, &msm_fluid_hs_dual_mic_endfire_device, &msm_fluid_spkr_dual_mic_endfire_device, &msm_fluid_hs_dual_mic_broadside_device, &msm_fluid_spkr_dual_mic_broadside_device, &msm_anc_headset_device, &msm_auxpga_lp_hs_device, &msm_auxpga_lp_lo_device, &msm_icodec_gpio_device, &msm_snddev_hdmi_non_linear_pcm_rx_device, }; static struct platform_device *snd_devices_common[] __initdata = { &msm_aux_pcm_device, &msm_cdcclk_ctl_device, &msm_mi2s_device, &msm_uplink_rx_device, &msm_device_dspcrashd_8x60, }; #ifdef CONFIG_MSM8X60_FTM_AUDIO_DEVICES static struct platform_device *snd_devices_ftm[] __initdata = { &ftm_headset_mono_rx_device, &ftm_headset_mono_l_rx_device, &ftm_headset_mono_r_rx_device, &ftm_headset_mono_diff_rx_device, &ftm_spkr_mono_rx_device, &ftm_spkr_l_rx_device, &ftm_spkr_r_rx_device, &ftm_spkr_mono_diff_rx_device, &ftm_linein_l_tx_device, &ftm_linein_r_tx_device, &ftm_aux_out_rx_device, &ftm_dmic1_left_tx_device, &ftm_dmic1_right_tx_device, &ftm_dmic1_l_and_r_tx_device, &ftm_dmic2_left_tx_device, &ftm_dmic2_right_tx_device, &ftm_dmic2_l_and_r_tx_device, &ftm_handset_mic1_aux_in_device, &ftm_mi2s_sd0_rx_device, &ftm_mi2s_sd1_rx_device, &ftm_mi2s_sd2_rx_device, &ftm_handset_mic_adie_lp_tx_device, &ftm_headset_mic_adie_lp_tx_device, &ftm_handset_adie_lp_rx_device, &ftm_headset_l_adie_lp_rx_device, &ftm_headset_r_adie_lp_rx_device, &ftm_spk_l_adie_lp_rx_device, &ftm_spk_r_adie_lp_rx_device, &ftm_spk_adie_lp_rx_device, &ftm_handset_dual_tx_lp_device, }; #else static struct platform_device *snd_devices_ftm[] __initdata = {}; #endif void __init msm_snddev_init(void) { int i; int dev_id; atomic_set(&pamp_ref_cnt, 0); atomic_set(&preg_ref_cnt, 0); for (i = 0, dev_id = 0; i < ARRAY_SIZE(snd_devices_common); i++) snd_devices_common[i]->id = dev_id++; platform_add_devices(snd_devices_common, ARRAY_SIZE(snd_devices_common)); /* Auto detect device base on machine info */ if (machine_is_msm8x60_surf() || machine_is_msm8x60_fusion()) { for (i = 0; i < ARRAY_SIZE(snd_devices_surf); i++) snd_devices_surf[i]->id = dev_id++; platform_add_devices(snd_devices_surf, ARRAY_SIZE(snd_devices_surf)); } else if (machine_is_msm8x60_ffa() || machine_is_msm8x60_fusn_ffa()) { for (i = 0; i < ARRAY_SIZE(snd_devices_ffa); i++) snd_devices_ffa[i]->id = dev_id++; platform_add_devices(snd_devices_ffa, ARRAY_SIZE(snd_devices_ffa)); } else if (machine_is_msm8x60_fluid()) { for (i = 0; i < ARRAY_SIZE(snd_devices_fluid); i++) snd_devices_fluid[i]->id = dev_id++; platform_add_devices(snd_devices_fluid, ARRAY_SIZE(snd_devices_fluid)); } if (machine_is_msm8x60_surf() || machine_is_msm8x60_ffa() || machine_is_msm8x60_fusion() || machine_is_msm8x60_fusn_ffa()) { for (i = 0; i < ARRAY_SIZE(snd_devices_ftm); i++) snd_devices_ftm[i]->id = dev_id++; platform_add_devices(snd_devices_ftm, ARRAY_SIZE(snd_devices_ftm)); } #ifdef CONFIG_DEBUG_FS debugfs_hsed_config = debugfs_create_file("msm_hsed_config", S_IFREG | S_IRUGO, NULL, (void *) "msm_hsed_config", &snddev_hsed_config_debug_fops); #endif }
gpl-2.0
maz-1/android_kernel_lge_msm8974
arch/mips/cavium-octeon/smp.c
4270
10535
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2004-2008, 2009, 2010 Cavium Networks */ #include <linux/cpu.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/smp.h> #include <linux/interrupt.h> #include <linux/kernel_stat.h> #include <linux/sched.h> #include <linux/module.h> #include <asm/mmu_context.h> #include <asm/time.h> #include <asm/setup.h> #include <asm/octeon/octeon.h> #include "octeon_boot.h" volatile unsigned long octeon_processor_boot = 0xff; volatile unsigned long octeon_processor_sp; volatile unsigned long octeon_processor_gp; #ifdef CONFIG_HOTPLUG_CPU uint64_t octeon_bootloader_entry_addr; EXPORT_SYMBOL(octeon_bootloader_entry_addr); #endif static irqreturn_t mailbox_interrupt(int irq, void *dev_id) { const int coreid = cvmx_get_core_num(); uint64_t action; /* Load the mailbox register to figure out what we're supposed to do */ action = cvmx_read_csr(CVMX_CIU_MBOX_CLRX(coreid)) & 0xffff; /* Clear the mailbox to clear the interrupt */ cvmx_write_csr(CVMX_CIU_MBOX_CLRX(coreid), action); if (action & SMP_CALL_FUNCTION) smp_call_function_interrupt(); if (action & SMP_RESCHEDULE_YOURSELF) scheduler_ipi(); /* Check if we've been told to flush the icache */ if (action & SMP_ICACHE_FLUSH) asm volatile ("synci 0($0)\n"); return IRQ_HANDLED; } /** * Cause the function described by call_data to be executed on the passed * cpu. When the function has finished, increment the finished field of * call_data. */ void octeon_send_ipi_single(int cpu, unsigned int action) { int coreid = cpu_logical_map(cpu); /* pr_info("SMP: Mailbox send cpu=%d, coreid=%d, action=%u\n", cpu, coreid, action); */ cvmx_write_csr(CVMX_CIU_MBOX_SETX(coreid), action); } static inline void octeon_send_ipi_mask(const struct cpumask *mask, unsigned int action) { unsigned int i; for_each_cpu_mask(i, *mask) octeon_send_ipi_single(i, action); } /** * Detect available CPUs, populate cpu_possible_mask */ static void octeon_smp_hotplug_setup(void) { #ifdef CONFIG_HOTPLUG_CPU struct linux_app_boot_info *labi; labi = (struct linux_app_boot_info *)PHYS_TO_XKSEG_CACHED(LABI_ADDR_IN_BOOTLOADER); if (labi->labi_signature != LABI_SIGNATURE) panic("The bootloader version on this board is incorrect."); octeon_bootloader_entry_addr = labi->InitTLBStart_addr; #endif } static void octeon_smp_setup(void) { const int coreid = cvmx_get_core_num(); int cpus; int id; int core_mask = octeon_get_boot_coremask(); #ifdef CONFIG_HOTPLUG_CPU unsigned int num_cores = cvmx_octeon_num_cores(); #endif /* The present CPUs are initially just the boot cpu (CPU 0). */ for (id = 0; id < NR_CPUS; id++) { set_cpu_possible(id, id == 0); set_cpu_present(id, id == 0); } __cpu_number_map[coreid] = 0; __cpu_logical_map[0] = coreid; /* The present CPUs get the lowest CPU numbers. */ cpus = 1; for (id = 0; id < NR_CPUS; id++) { if ((id != coreid) && (core_mask & (1 << id))) { set_cpu_possible(cpus, true); set_cpu_present(cpus, true); __cpu_number_map[id] = cpus; __cpu_logical_map[cpus] = id; cpus++; } } #ifdef CONFIG_HOTPLUG_CPU /* * The possible CPUs are all those present on the chip. We * will assign CPU numbers for possible cores as well. Cores * are always consecutively numberd from 0. */ for (id = 0; id < num_cores && id < NR_CPUS; id++) { if (!(core_mask & (1 << id))) { set_cpu_possible(cpus, true); __cpu_number_map[id] = cpus; __cpu_logical_map[cpus] = id; cpus++; } } #endif octeon_smp_hotplug_setup(); } /** * Firmware CPU startup hook * */ static void octeon_boot_secondary(int cpu, struct task_struct *idle) { int count; pr_info("SMP: Booting CPU%02d (CoreId %2d)...\n", cpu, cpu_logical_map(cpu)); octeon_processor_sp = __KSTK_TOS(idle); octeon_processor_gp = (unsigned long)(task_thread_info(idle)); octeon_processor_boot = cpu_logical_map(cpu); mb(); count = 10000; while (octeon_processor_sp && count) { /* Waiting for processor to get the SP and GP */ udelay(1); count--; } if (count == 0) pr_err("Secondary boot timeout\n"); } /** * After we've done initial boot, this function is called to allow the * board code to clean up state, if needed */ static void __cpuinit octeon_init_secondary(void) { unsigned int sr; sr = set_c0_status(ST0_BEV); write_c0_ebase((u32)ebase); write_c0_status(sr); octeon_check_cpu_bist(); octeon_init_cvmcount(); octeon_irq_setup_secondary(); raw_local_irq_enable(); } /** * Callout to firmware before smp_init * */ void octeon_prepare_cpus(unsigned int max_cpus) { #ifdef CONFIG_HOTPLUG_CPU struct linux_app_boot_info *labi; labi = (struct linux_app_boot_info *)PHYS_TO_XKSEG_CACHED(LABI_ADDR_IN_BOOTLOADER); if (labi->labi_signature != LABI_SIGNATURE) panic("The bootloader version on this board is incorrect."); #endif /* * Only the low order mailbox bits are used for IPIs, leave * the other bits alone. */ cvmx_write_csr(CVMX_CIU_MBOX_CLRX(cvmx_get_core_num()), 0xffff); if (request_irq(OCTEON_IRQ_MBOX0, mailbox_interrupt, IRQF_PERCPU | IRQF_NO_THREAD, "SMP-IPI", mailbox_interrupt)) { panic("Cannot request_irq(OCTEON_IRQ_MBOX0)"); } } /** * Last chance for the board code to finish SMP initialization before * the CPU is "online". */ static void octeon_smp_finish(void) { #ifdef CONFIG_CAVIUM_GDB unsigned long tmp; /* Pulse MCD0 signal on Ctrl-C to stop all the cores. Also set the MCD0 to be not masked by this core so we know the signal is received by someone */ asm volatile ("dmfc0 %0, $22\n" "ori %0, %0, 0x9100\n" "dmtc0 %0, $22\n" : "=r" (tmp)); #endif octeon_user_io_init(); /* to generate the first CPU timer interrupt */ write_c0_compare(read_c0_count() + mips_hpt_frequency / HZ); } /** * Hook for after all CPUs are online */ static void octeon_cpus_done(void) { #ifdef CONFIG_CAVIUM_GDB unsigned long tmp; /* Pulse MCD0 signal on Ctrl-C to stop all the cores. Also set the MCD0 to be not masked by this core so we know the signal is received by someone */ asm volatile ("dmfc0 %0, $22\n" "ori %0, %0, 0x9100\n" "dmtc0 %0, $22\n" : "=r" (tmp)); #endif } #ifdef CONFIG_HOTPLUG_CPU /* State of each CPU. */ DEFINE_PER_CPU(int, cpu_state); extern void fixup_irqs(void); static DEFINE_SPINLOCK(smp_reserve_lock); static int octeon_cpu_disable(void) { unsigned int cpu = smp_processor_id(); if (cpu == 0) return -EBUSY; spin_lock(&smp_reserve_lock); set_cpu_online(cpu, false); cpu_clear(cpu, cpu_callin_map); local_irq_disable(); fixup_irqs(); local_irq_enable(); flush_cache_all(); local_flush_tlb_all(); spin_unlock(&smp_reserve_lock); return 0; } static void octeon_cpu_die(unsigned int cpu) { int coreid = cpu_logical_map(cpu); uint32_t mask, new_mask; const struct cvmx_bootmem_named_block_desc *block_desc; while (per_cpu(cpu_state, cpu) != CPU_DEAD) cpu_relax(); /* * This is a bit complicated strategics of getting/settig available * cores mask, copied from bootloader */ mask = 1 << coreid; /* LINUX_APP_BOOT_BLOCK is initialized in bootoct binary */ block_desc = cvmx_bootmem_find_named_block(LINUX_APP_BOOT_BLOCK_NAME); if (!block_desc) { struct linux_app_boot_info *labi; labi = (struct linux_app_boot_info *)PHYS_TO_XKSEG_CACHED(LABI_ADDR_IN_BOOTLOADER); labi->avail_coremask |= mask; new_mask = labi->avail_coremask; } else { /* alternative, already initialized */ uint32_t *p = (uint32_t *)PHYS_TO_XKSEG_CACHED(block_desc->base_addr + AVAIL_COREMASK_OFFSET_IN_LINUX_APP_BOOT_BLOCK); *p |= mask; new_mask = *p; } pr_info("Reset core %d. Available Coremask = 0x%x \n", coreid, new_mask); mb(); cvmx_write_csr(CVMX_CIU_PP_RST, 1 << coreid); cvmx_write_csr(CVMX_CIU_PP_RST, 0); } void play_dead(void) { int cpu = cpu_number_map(cvmx_get_core_num()); idle_task_exit(); octeon_processor_boot = 0xff; per_cpu(cpu_state, cpu) = CPU_DEAD; mb(); while (1) /* core will be reset here */ ; } extern void kernel_entry(unsigned long arg1, ...); static void start_after_reset(void) { kernel_entry(0, 0, 0); /* set a2 = 0 for secondary core */ } static int octeon_update_boot_vector(unsigned int cpu) { int coreid = cpu_logical_map(cpu); uint32_t avail_coremask; const struct cvmx_bootmem_named_block_desc *block_desc; struct boot_init_vector *boot_vect = (struct boot_init_vector *)PHYS_TO_XKSEG_CACHED(BOOTLOADER_BOOT_VECTOR); block_desc = cvmx_bootmem_find_named_block(LINUX_APP_BOOT_BLOCK_NAME); if (!block_desc) { struct linux_app_boot_info *labi; labi = (struct linux_app_boot_info *)PHYS_TO_XKSEG_CACHED(LABI_ADDR_IN_BOOTLOADER); avail_coremask = labi->avail_coremask; labi->avail_coremask &= ~(1 << coreid); } else { /* alternative, already initialized */ avail_coremask = *(uint32_t *)PHYS_TO_XKSEG_CACHED( block_desc->base_addr + AVAIL_COREMASK_OFFSET_IN_LINUX_APP_BOOT_BLOCK); } if (!(avail_coremask & (1 << coreid))) { /* core not available, assume, that catched by simple-executive */ cvmx_write_csr(CVMX_CIU_PP_RST, 1 << coreid); cvmx_write_csr(CVMX_CIU_PP_RST, 0); } boot_vect[coreid].app_start_func_addr = (uint32_t) (unsigned long) start_after_reset; boot_vect[coreid].code_addr = octeon_bootloader_entry_addr; mb(); cvmx_write_csr(CVMX_CIU_NMI, (1 << coreid) & avail_coremask); return 0; } static int __cpuinit octeon_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { unsigned int cpu = (unsigned long)hcpu; switch (action) { case CPU_UP_PREPARE: octeon_update_boot_vector(cpu); break; case CPU_ONLINE: pr_info("Cpu %d online\n", cpu); break; case CPU_DEAD: break; } return NOTIFY_OK; } static int __cpuinit register_cavium_notifier(void) { hotcpu_notifier(octeon_cpu_callback, 0); return 0; } late_initcall(register_cavium_notifier); #endif /* CONFIG_HOTPLUG_CPU */ struct plat_smp_ops octeon_smp_ops = { .send_ipi_single = octeon_send_ipi_single, .send_ipi_mask = octeon_send_ipi_mask, .init_secondary = octeon_init_secondary, .smp_finish = octeon_smp_finish, .cpus_done = octeon_cpus_done, .boot_secondary = octeon_boot_secondary, .smp_setup = octeon_smp_setup, .prepare_cpus = octeon_prepare_cpus, #ifdef CONFIG_HOTPLUG_CPU .cpu_disable = octeon_cpu_disable, .cpu_die = octeon_cpu_die, #endif };
gpl-2.0
TeamExodus/kernel_xiaomi_cancro
arch/arm/mach-shmobile/clock-r8a7779.c
4782
5381
/* * r8a7779 clock framework support * * Copyright (C) 2011 Renesas Solutions Corp. * Copyright (C) 2011 Magnus Damm * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/io.h> #include <linux/sh_clk.h> #include <linux/clkdev.h> #include <mach/common.h> #define FRQMR 0xffc80014 #define MSTPCR0 0xffc80030 #define MSTPCR1 0xffc80034 #define MSTPCR3 0xffc8003c #define MSTPSR1 0xffc80044 #define MSTPSR4 0xffc80048 #define MSTPSR6 0xffc8004c #define MSTPCR4 0xffc80050 #define MSTPCR5 0xffc80054 #define MSTPCR6 0xffc80058 #define MSTPCR7 0xffc80040 /* ioremap() through clock mapping mandatory to avoid * collision with ARM coherent DMA virtual memory range. */ static struct clk_mapping cpg_mapping = { .phys = 0xffc80000, .len = 0x80, }; /* * Default rate for the root input clock, reset this with clk_set_rate() * from the platform code. */ static struct clk plla_clk = { .rate = 1500000000, .mapping = &cpg_mapping, }; static struct clk *main_clks[] = { &plla_clk, }; static int divisors[] = { 0, 0, 0, 6, 8, 12, 16, 0, 24, 32, 36, 0, 0, 0, 0, 0 }; static struct clk_div_mult_table div4_div_mult_table = { .divisors = divisors, .nr_divisors = ARRAY_SIZE(divisors), }; static struct clk_div4_table div4_table = { .div_mult_table = &div4_div_mult_table, }; enum { DIV4_S, DIV4_OUT, DIV4_S4, DIV4_S3, DIV4_S1, DIV4_P, DIV4_NR }; static struct clk div4_clks[DIV4_NR] = { [DIV4_S] = SH_CLK_DIV4(&plla_clk, FRQMR, 20, 0x0018, CLK_ENABLE_ON_INIT), [DIV4_OUT] = SH_CLK_DIV4(&plla_clk, FRQMR, 16, 0x0700, CLK_ENABLE_ON_INIT), [DIV4_S4] = SH_CLK_DIV4(&plla_clk, FRQMR, 12, 0x0040, CLK_ENABLE_ON_INIT), [DIV4_S3] = SH_CLK_DIV4(&plla_clk, FRQMR, 8, 0x0010, CLK_ENABLE_ON_INIT), [DIV4_S1] = SH_CLK_DIV4(&plla_clk, FRQMR, 4, 0x0060, CLK_ENABLE_ON_INIT), [DIV4_P] = SH_CLK_DIV4(&plla_clk, FRQMR, 0, 0x0300, CLK_ENABLE_ON_INIT), }; enum { MSTP026, MSTP025, MSTP024, MSTP023, MSTP022, MSTP021, MSTP016, MSTP015, MSTP014, MSTP_NR }; static struct clk mstp_clks[MSTP_NR] = { [MSTP026] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 26, 0), /* SCIF0 */ [MSTP025] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 25, 0), /* SCIF1 */ [MSTP024] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 24, 0), /* SCIF2 */ [MSTP023] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 23, 0), /* SCIF3 */ [MSTP022] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 22, 0), /* SCIF4 */ [MSTP021] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 21, 0), /* SCIF5 */ [MSTP016] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 16, 0), /* TMU0 */ [MSTP015] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 15, 0), /* TMU1 */ [MSTP014] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 14, 0), /* TMU2 */ }; static unsigned long mul4_recalc(struct clk *clk) { return clk->parent->rate * 4; } static struct sh_clk_ops mul4_clk_ops = { .recalc = mul4_recalc, }; struct clk clkz_clk = { .ops = &mul4_clk_ops, .parent = &div4_clks[DIV4_S], }; struct clk clkzs_clk = { /* clks x 4 / 4 = clks */ .parent = &div4_clks[DIV4_S], }; static struct clk *late_main_clks[] = { &clkz_clk, &clkzs_clk, }; static struct clk_lookup lookups[] = { /* main clocks */ CLKDEV_CON_ID("plla_clk", &plla_clk), CLKDEV_CON_ID("clkz_clk", &clkz_clk), CLKDEV_CON_ID("clkzs_clk", &clkzs_clk), /* DIV4 clocks */ CLKDEV_CON_ID("shyway_clk", &div4_clks[DIV4_S]), CLKDEV_CON_ID("bus_clk", &div4_clks[DIV4_OUT]), CLKDEV_CON_ID("shyway4_clk", &div4_clks[DIV4_S4]), CLKDEV_CON_ID("shyway3_clk", &div4_clks[DIV4_S3]), CLKDEV_CON_ID("shyway1_clk", &div4_clks[DIV4_S1]), CLKDEV_CON_ID("peripheral_clk", &div4_clks[DIV4_P]), /* MSTP32 clocks */ CLKDEV_DEV_ID("sh_tmu.0", &mstp_clks[MSTP016]), /* TMU00 */ CLKDEV_DEV_ID("sh_tmu.1", &mstp_clks[MSTP016]), /* TMU01 */ CLKDEV_DEV_ID("sh-sci.0", &mstp_clks[MSTP026]), /* SCIF0 */ CLKDEV_DEV_ID("sh-sci.1", &mstp_clks[MSTP025]), /* SCIF1 */ CLKDEV_DEV_ID("sh-sci.2", &mstp_clks[MSTP024]), /* SCIF2 */ CLKDEV_DEV_ID("sh-sci.3", &mstp_clks[MSTP023]), /* SCIF3 */ CLKDEV_DEV_ID("sh-sci.4", &mstp_clks[MSTP022]), /* SCIF4 */ CLKDEV_DEV_ID("sh-sci.5", &mstp_clks[MSTP021]), /* SCIF6 */ }; void __init r8a7779_clock_init(void) { int k, ret = 0; for (k = 0; !ret && (k < ARRAY_SIZE(main_clks)); k++) ret = clk_register(main_clks[k]); if (!ret) ret = sh_clk_div4_register(div4_clks, DIV4_NR, &div4_table); if (!ret) ret = sh_clk_mstp32_register(mstp_clks, MSTP_NR); for (k = 0; !ret && (k < ARRAY_SIZE(late_main_clks)); k++) ret = clk_register(late_main_clks[k]); clkdev_add_table(lookups, ARRAY_SIZE(lookups)); if (!ret) shmobile_clk_init(); else panic("failed to setup r8a7779 clocks\n"); }
gpl-2.0
omnirom/android_kernel_xiaomi_aries
arch/arm/mach-vexpress/ct-ca9x4.c
4782
5540
/* * Versatile Express Core Tile Cortex A9x4 Support */ #include <linux/init.h> #include <linux/gfp.h> #include <linux/device.h> #include <linux/dma-mapping.h> #include <linux/platform_device.h> #include <linux/amba/bus.h> #include <linux/amba/clcd.h> #include <linux/clkdev.h> #include <asm/hardware/arm_timer.h> #include <asm/hardware/cache-l2x0.h> #include <asm/hardware/gic.h> #include <asm/pmu.h> #include <asm/smp_scu.h> #include <asm/smp_twd.h> #include <mach/ct-ca9x4.h> #include <asm/hardware/timer-sp.h> #include <asm/mach/map.h> #include <asm/mach/time.h> #include "core.h" #include <mach/motherboard.h> #include <plat/clcd.h> static struct map_desc ct_ca9x4_io_desc[] __initdata = { { .virtual = V2T_PERIPH, .pfn = __phys_to_pfn(CT_CA9X4_MPIC), .length = SZ_8K, .type = MT_DEVICE, }, }; static void __init ct_ca9x4_map_io(void) { iotable_init(ct_ca9x4_io_desc, ARRAY_SIZE(ct_ca9x4_io_desc)); } #ifdef CONFIG_HAVE_ARM_TWD static DEFINE_TWD_LOCAL_TIMER(twd_local_timer, A9_MPCORE_TWD, IRQ_LOCALTIMER); static void __init ca9x4_twd_init(void) { int err = twd_local_timer_register(&twd_local_timer); if (err) pr_err("twd_local_timer_register failed %d\n", err); } #else #define ca9x4_twd_init() do {} while(0) #endif static void __init ct_ca9x4_init_irq(void) { gic_init(0, 29, ioremap(A9_MPCORE_GIC_DIST, SZ_4K), ioremap(A9_MPCORE_GIC_CPU, SZ_256)); ca9x4_twd_init(); } static void ct_ca9x4_clcd_enable(struct clcd_fb *fb) { v2m_cfg_write(SYS_CFG_MUXFPGA | SYS_CFG_SITE_DB1, 0); v2m_cfg_write(SYS_CFG_DVIMODE | SYS_CFG_SITE_DB1, 2); } static int ct_ca9x4_clcd_setup(struct clcd_fb *fb) { unsigned long framesize = 1024 * 768 * 2; fb->panel = versatile_clcd_get_panel("XVGA"); if (!fb->panel) return -EINVAL; return versatile_clcd_setup_dma(fb, framesize); } static struct clcd_board ct_ca9x4_clcd_data = { .name = "CT-CA9X4", .caps = CLCD_CAP_5551 | CLCD_CAP_565, .check = clcdfb_check, .decode = clcdfb_decode, .enable = ct_ca9x4_clcd_enable, .setup = ct_ca9x4_clcd_setup, .mmap = versatile_clcd_mmap_dma, .remove = versatile_clcd_remove_dma, }; static AMBA_AHB_DEVICE(clcd, "ct:clcd", 0, CT_CA9X4_CLCDC, IRQ_CT_CA9X4_CLCDC, &ct_ca9x4_clcd_data); static AMBA_APB_DEVICE(dmc, "ct:dmc", 0, CT_CA9X4_DMC, IRQ_CT_CA9X4_DMC, NULL); static AMBA_APB_DEVICE(smc, "ct:smc", 0, CT_CA9X4_SMC, IRQ_CT_CA9X4_SMC, NULL); static AMBA_APB_DEVICE(gpio, "ct:gpio", 0, CT_CA9X4_GPIO, IRQ_CT_CA9X4_GPIO, NULL); static struct amba_device *ct_ca9x4_amba_devs[] __initdata = { &clcd_device, &dmc_device, &smc_device, &gpio_device, }; static long ct_round(struct clk *clk, unsigned long rate) { return rate; } static int ct_set(struct clk *clk, unsigned long rate) { return v2m_cfg_write(SYS_CFG_OSC | SYS_CFG_SITE_DB1 | 1, rate); } static const struct clk_ops osc1_clk_ops = { .round = ct_round, .set = ct_set, }; static struct clk osc1_clk = { .ops = &osc1_clk_ops, .rate = 24000000, }; static struct clk ct_sp804_clk = { .rate = 1000000, }; static struct clk_lookup lookups[] = { { /* CLCD */ .dev_id = "ct:clcd", .clk = &osc1_clk, }, { /* SP804 timers */ .dev_id = "sp804", .con_id = "ct-timer0", .clk = &ct_sp804_clk, }, { /* SP804 timers */ .dev_id = "sp804", .con_id = "ct-timer1", .clk = &ct_sp804_clk, }, }; static struct resource pmu_resources[] = { [0] = { .start = IRQ_CT_CA9X4_PMU_CPU0, .end = IRQ_CT_CA9X4_PMU_CPU0, .flags = IORESOURCE_IRQ, }, [1] = { .start = IRQ_CT_CA9X4_PMU_CPU1, .end = IRQ_CT_CA9X4_PMU_CPU1, .flags = IORESOURCE_IRQ, }, [2] = { .start = IRQ_CT_CA9X4_PMU_CPU2, .end = IRQ_CT_CA9X4_PMU_CPU2, .flags = IORESOURCE_IRQ, }, [3] = { .start = IRQ_CT_CA9X4_PMU_CPU3, .end = IRQ_CT_CA9X4_PMU_CPU3, .flags = IORESOURCE_IRQ, }, }; static struct platform_device pmu_device = { .name = "arm-pmu", .id = ARM_PMU_DEVICE_CPU, .num_resources = ARRAY_SIZE(pmu_resources), .resource = pmu_resources, }; static void __init ct_ca9x4_init_early(void) { clkdev_add_table(lookups, ARRAY_SIZE(lookups)); } static void __init ct_ca9x4_init(void) { int i; #ifdef CONFIG_CACHE_L2X0 void __iomem *l2x0_base = ioremap(CT_CA9X4_L2CC, SZ_4K); /* set RAM latencies to 1 cycle for this core tile. */ writel(0, l2x0_base + L2X0_TAG_LATENCY_CTRL); writel(0, l2x0_base + L2X0_DATA_LATENCY_CTRL); l2x0_init(l2x0_base, 0x00400000, 0xfe0fffff); #endif for (i = 0; i < ARRAY_SIZE(ct_ca9x4_amba_devs); i++) amba_device_register(ct_ca9x4_amba_devs[i], &iomem_resource); platform_device_register(&pmu_device); } #ifdef CONFIG_SMP static void *ct_ca9x4_scu_base __initdata; static void __init ct_ca9x4_init_cpu_map(void) { int i, ncores; ct_ca9x4_scu_base = ioremap(A9_MPCORE_SCU, SZ_128); if (WARN_ON(!ct_ca9x4_scu_base)) return; ncores = scu_get_core_count(ct_ca9x4_scu_base); if (ncores > nr_cpu_ids) { pr_warn("SMP: %u cores greater than maximum (%u), clipping\n", ncores, nr_cpu_ids); ncores = nr_cpu_ids; } for (i = 0; i < ncores; ++i) set_cpu_possible(i, true); set_smp_cross_call(gic_raise_softirq); } static void __init ct_ca9x4_smp_enable(unsigned int max_cpus) { scu_enable(ct_ca9x4_scu_base); } #endif struct ct_desc ct_ca9x4_desc __initdata = { .id = V2M_CT_ID_CA9, .name = "CA9x4", .map_io = ct_ca9x4_map_io, .init_early = ct_ca9x4_init_early, .init_irq = ct_ca9x4_init_irq, .init_tile = ct_ca9x4_init, #ifdef CONFIG_SMP .init_cpu_map = ct_ca9x4_init_cpu_map, .smp_enable = ct_ca9x4_smp_enable, #endif };
gpl-2.0
archos-sa/archos-gpl-gen9-kernel-ics
arch/sh/kernel/sys_sh64.c
7854
1554
/* * arch/sh/kernel/sys_sh64.c * * Copyright (C) 2000, 2001 Paolo Alberelli * * This file contains various random system calls that * have a non-standard calling sequence on the Linux/SH5 * platform. * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/errno.h> #include <linux/rwsem.h> #include <linux/sched.h> #include <linux/mm.h> #include <linux/fs.h> #include <linux/smp.h> #include <linux/sem.h> #include <linux/msg.h> #include <linux/shm.h> #include <linux/stat.h> #include <linux/mman.h> #include <linux/file.h> #include <linux/syscalls.h> #include <linux/ipc.h> #include <asm/uaccess.h> #include <asm/ptrace.h> #include <asm/unistd.h> /* * Do a system call from kernel instead of calling sys_execve so we * end up with proper pt_regs. */ int kernel_execve(const char *filename, const char *const argv[], const char *const envp[]) { register unsigned long __sc0 __asm__ ("r9") = ((0x13 << 16) | __NR_execve); register unsigned long __sc2 __asm__ ("r2") = (unsigned long) filename; register unsigned long __sc3 __asm__ ("r3") = (unsigned long) argv; register unsigned long __sc4 __asm__ ("r4") = (unsigned long) envp; __asm__ __volatile__ ("trapa %1 !\t\t\t execve(%2,%3,%4)" : "=r" (__sc0) : "r" (__sc0), "r" (__sc2), "r" (__sc3), "r" (__sc4) ); __asm__ __volatile__ ("!dummy %0 %1 %2 %3" : : "r" (__sc0), "r" (__sc2), "r" (__sc3), "r" (__sc4) : "memory"); return __sc0; }
gpl-2.0
CallMeAldy/kernel_lge_mako
arch/sh/boot/romimage/mmcif-sh7724.c
11950
1997
/* * sh7724 MMCIF loader * * Copyright (C) 2010 Magnus Damm * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/mmc/sh_mmcif.h> #include <linux/mmc/boot.h> #include <mach/romimage.h> #define MMCIF_BASE (void __iomem *)0xa4ca0000 #define MSTPCR2 0xa4150038 #define PTWCR 0xa4050146 #define PTXCR 0xa4050148 #define PSELA 0xa405014e #define PSELE 0xa4050156 #define HIZCRC 0xa405015c #define DRVCRA 0xa405018a /* SH7724 specific MMCIF loader * * loads the romImage from an MMC card starting from block 512 * use the following line to write the romImage to an MMC card * # dd if=arch/sh/boot/romImage of=/dev/sdx bs=512 seek=512 */ asmlinkage void mmcif_loader(unsigned char *buf, unsigned long no_bytes) { mmcif_update_progress(MMC_PROGRESS_ENTER); /* enable clock to the MMCIF hardware block */ __raw_writel(__raw_readl(MSTPCR2) & ~0x20000000, MSTPCR2); /* setup pins D7-D0 */ __raw_writew(0x0000, PTWCR); /* setup pins MMC_CLK, MMC_CMD */ __raw_writew(__raw_readw(PTXCR) & ~0x000f, PTXCR); /* select D3-D0 pin function */ __raw_writew(__raw_readw(PSELA) & ~0x2000, PSELA); /* select D7-D4 pin function */ __raw_writew(__raw_readw(PSELE) & ~0x3000, PSELE); /* disable Hi-Z for the MMC pins */ __raw_writew(__raw_readw(HIZCRC) & ~0x0620, HIZCRC); /* high drive capability for MMC pins */ __raw_writew(__raw_readw(DRVCRA) | 0x3000, DRVCRA); mmcif_update_progress(MMC_PROGRESS_INIT); /* setup MMCIF hardware */ sh_mmcif_boot_init(MMCIF_BASE); mmcif_update_progress(MMC_PROGRESS_LOAD); /* load kernel via MMCIF interface */ sh_mmcif_boot_do_read(MMCIF_BASE, 512, (no_bytes + SH_MMCIF_BBS - 1) / SH_MMCIF_BBS, buf); /* disable clock to the MMCIF hardware block */ __raw_writel(__raw_readl(MSTPCR2) | 0x20000000, MSTPCR2); mmcif_update_progress(MMC_PROGRESS_DONE); }
gpl-2.0
arunthomas/linux
drivers/bus/brcmstb_gisb.c
431
10353
/* * Copyright (C) 2014 Broadcom Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/init.h> #include <linux/types.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/interrupt.h> #include <linux/sysfs.h> #include <linux/io.h> #include <linux/string.h> #include <linux/device.h> #include <linux/list.h> #include <linux/of.h> #include <linux/bitops.h> #include <linux/pm.h> #ifdef CONFIG_ARM #include <asm/bug.h> #include <asm/signal.h> #endif #define ARB_ERR_CAP_CLEAR (1 << 0) #define ARB_ERR_CAP_STATUS_TIMEOUT (1 << 12) #define ARB_ERR_CAP_STATUS_TEA (1 << 11) #define ARB_ERR_CAP_STATUS_BS_SHIFT (1 << 2) #define ARB_ERR_CAP_STATUS_BS_MASK 0x3c #define ARB_ERR_CAP_STATUS_WRITE (1 << 1) #define ARB_ERR_CAP_STATUS_VALID (1 << 0) enum { ARB_TIMER, ARB_ERR_CAP_CLR, ARB_ERR_CAP_HI_ADDR, ARB_ERR_CAP_ADDR, ARB_ERR_CAP_DATA, ARB_ERR_CAP_STATUS, ARB_ERR_CAP_MASTER, }; static const int gisb_offsets_bcm7038[] = { [ARB_TIMER] = 0x00c, [ARB_ERR_CAP_CLR] = 0x0c4, [ARB_ERR_CAP_HI_ADDR] = -1, [ARB_ERR_CAP_ADDR] = 0x0c8, [ARB_ERR_CAP_DATA] = 0x0cc, [ARB_ERR_CAP_STATUS] = 0x0d0, [ARB_ERR_CAP_MASTER] = -1, }; static const int gisb_offsets_bcm7400[] = { [ARB_TIMER] = 0x00c, [ARB_ERR_CAP_CLR] = 0x0c8, [ARB_ERR_CAP_HI_ADDR] = -1, [ARB_ERR_CAP_ADDR] = 0x0cc, [ARB_ERR_CAP_DATA] = 0x0d0, [ARB_ERR_CAP_STATUS] = 0x0d4, [ARB_ERR_CAP_MASTER] = 0x0d8, }; static const int gisb_offsets_bcm7435[] = { [ARB_TIMER] = 0x00c, [ARB_ERR_CAP_CLR] = 0x168, [ARB_ERR_CAP_HI_ADDR] = -1, [ARB_ERR_CAP_ADDR] = 0x16c, [ARB_ERR_CAP_DATA] = 0x170, [ARB_ERR_CAP_STATUS] = 0x174, [ARB_ERR_CAP_MASTER] = 0x178, }; static const int gisb_offsets_bcm7445[] = { [ARB_TIMER] = 0x008, [ARB_ERR_CAP_CLR] = 0x7e4, [ARB_ERR_CAP_HI_ADDR] = 0x7e8, [ARB_ERR_CAP_ADDR] = 0x7ec, [ARB_ERR_CAP_DATA] = 0x7f0, [ARB_ERR_CAP_STATUS] = 0x7f4, [ARB_ERR_CAP_MASTER] = 0x7f8, }; struct brcmstb_gisb_arb_device { void __iomem *base; const int *gisb_offsets; struct mutex lock; struct list_head next; u32 valid_mask; const char *master_names[sizeof(u32) * BITS_PER_BYTE]; u32 saved_timeout; }; static LIST_HEAD(brcmstb_gisb_arb_device_list); static u32 gisb_read(struct brcmstb_gisb_arb_device *gdev, int reg) { int offset = gdev->gisb_offsets[reg]; /* return 1 if the hardware doesn't have ARB_ERR_CAP_MASTER */ if (offset == -1) return 1; return ioread32(gdev->base + offset); } static void gisb_write(struct brcmstb_gisb_arb_device *gdev, u32 val, int reg) { int offset = gdev->gisb_offsets[reg]; if (offset == -1) return; iowrite32(val, gdev->base + reg); } static ssize_t gisb_arb_get_timeout(struct device *dev, struct device_attribute *attr, char *buf) { struct platform_device *pdev = to_platform_device(dev); struct brcmstb_gisb_arb_device *gdev = platform_get_drvdata(pdev); u32 timeout; mutex_lock(&gdev->lock); timeout = gisb_read(gdev, ARB_TIMER); mutex_unlock(&gdev->lock); return sprintf(buf, "%d", timeout); } static ssize_t gisb_arb_set_timeout(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct platform_device *pdev = to_platform_device(dev); struct brcmstb_gisb_arb_device *gdev = platform_get_drvdata(pdev); int val, ret; ret = kstrtoint(buf, 10, &val); if (ret < 0) return ret; if (val == 0 || val >= 0xffffffff) return -EINVAL; mutex_lock(&gdev->lock); gisb_write(gdev, val, ARB_TIMER); mutex_unlock(&gdev->lock); return count; } static const char * brcmstb_gisb_master_to_str(struct brcmstb_gisb_arb_device *gdev, u32 masters) { u32 mask = gdev->valid_mask & masters; if (hweight_long(mask) != 1) return NULL; return gdev->master_names[ffs(mask) - 1]; } static int brcmstb_gisb_arb_decode_addr(struct brcmstb_gisb_arb_device *gdev, const char *reason) { u32 cap_status; unsigned long arb_addr; u32 master; const char *m_name; char m_fmt[11]; cap_status = gisb_read(gdev, ARB_ERR_CAP_STATUS); /* Invalid captured address, bail out */ if (!(cap_status & ARB_ERR_CAP_STATUS_VALID)) return 1; /* Read the address and master */ arb_addr = gisb_read(gdev, ARB_ERR_CAP_ADDR) & 0xffffffff; #if (IS_ENABLED(CONFIG_PHYS_ADDR_T_64BIT)) arb_addr |= (u64)gisb_read(gdev, ARB_ERR_CAP_HI_ADDR) << 32; #endif master = gisb_read(gdev, ARB_ERR_CAP_MASTER); m_name = brcmstb_gisb_master_to_str(gdev, master); if (!m_name) { snprintf(m_fmt, sizeof(m_fmt), "0x%08x", master); m_name = m_fmt; } pr_crit("%s: %s at 0x%lx [%c %s], core: %s\n", __func__, reason, arb_addr, cap_status & ARB_ERR_CAP_STATUS_WRITE ? 'W' : 'R', cap_status & ARB_ERR_CAP_STATUS_TIMEOUT ? "timeout" : "", m_name); /* clear the GISB error */ gisb_write(gdev, ARB_ERR_CAP_CLEAR, ARB_ERR_CAP_CLR); return 0; } #ifdef CONFIG_ARM static int brcmstb_bus_error_handler(unsigned long addr, unsigned int fsr, struct pt_regs *regs) { int ret = 0; struct brcmstb_gisb_arb_device *gdev; /* iterate over each GISB arb registered handlers */ list_for_each_entry(gdev, &brcmstb_gisb_arb_device_list, next) ret |= brcmstb_gisb_arb_decode_addr(gdev, "bus error"); /* * If it was an imprecise abort, then we need to correct the * return address to be _after_ the instruction. */ if (fsr & (1 << 10)) regs->ARM_pc += 4; return ret; } #endif static irqreturn_t brcmstb_gisb_timeout_handler(int irq, void *dev_id) { brcmstb_gisb_arb_decode_addr(dev_id, "timeout"); return IRQ_HANDLED; } static irqreturn_t brcmstb_gisb_tea_handler(int irq, void *dev_id) { brcmstb_gisb_arb_decode_addr(dev_id, "target abort"); return IRQ_HANDLED; } static DEVICE_ATTR(gisb_arb_timeout, S_IWUSR | S_IRUGO, gisb_arb_get_timeout, gisb_arb_set_timeout); static struct attribute *gisb_arb_sysfs_attrs[] = { &dev_attr_gisb_arb_timeout.attr, NULL, }; static struct attribute_group gisb_arb_sysfs_attr_group = { .attrs = gisb_arb_sysfs_attrs, }; static const struct of_device_id brcmstb_gisb_arb_of_match[] = { { .compatible = "brcm,gisb-arb", .data = gisb_offsets_bcm7445 }, { .compatible = "brcm,bcm7445-gisb-arb", .data = gisb_offsets_bcm7445 }, { .compatible = "brcm,bcm7435-gisb-arb", .data = gisb_offsets_bcm7435 }, { .compatible = "brcm,bcm7400-gisb-arb", .data = gisb_offsets_bcm7400 }, { .compatible = "brcm,bcm7038-gisb-arb", .data = gisb_offsets_bcm7038 }, { }, }; static int __init brcmstb_gisb_arb_probe(struct platform_device *pdev) { struct device_node *dn = pdev->dev.of_node; struct brcmstb_gisb_arb_device *gdev; const struct of_device_id *of_id; struct resource *r; int err, timeout_irq, tea_irq; unsigned int num_masters, j = 0; int i, first, last; r = platform_get_resource(pdev, IORESOURCE_MEM, 0); timeout_irq = platform_get_irq(pdev, 0); tea_irq = platform_get_irq(pdev, 1); gdev = devm_kzalloc(&pdev->dev, sizeof(*gdev), GFP_KERNEL); if (!gdev) return -ENOMEM; mutex_init(&gdev->lock); INIT_LIST_HEAD(&gdev->next); gdev->base = devm_ioremap_resource(&pdev->dev, r); if (IS_ERR(gdev->base)) return PTR_ERR(gdev->base); of_id = of_match_node(brcmstb_gisb_arb_of_match, dn); if (!of_id) { pr_err("failed to look up compatible string\n"); return -EINVAL; } gdev->gisb_offsets = of_id->data; err = devm_request_irq(&pdev->dev, timeout_irq, brcmstb_gisb_timeout_handler, 0, pdev->name, gdev); if (err < 0) return err; err = devm_request_irq(&pdev->dev, tea_irq, brcmstb_gisb_tea_handler, 0, pdev->name, gdev); if (err < 0) return err; /* If we do not have a valid mask, assume all masters are enabled */ if (of_property_read_u32(dn, "brcm,gisb-arb-master-mask", &gdev->valid_mask)) gdev->valid_mask = 0xffffffff; /* Proceed with reading the litteral names if we agree on the * number of masters */ num_masters = of_property_count_strings(dn, "brcm,gisb-arb-master-names"); if (hweight_long(gdev->valid_mask) == num_masters) { first = ffs(gdev->valid_mask) - 1; last = fls(gdev->valid_mask) - 1; for (i = first; i < last; i++) { if (!(gdev->valid_mask & BIT(i))) continue; of_property_read_string_index(dn, "brcm,gisb-arb-master-names", j, &gdev->master_names[i]); j++; } } err = sysfs_create_group(&pdev->dev.kobj, &gisb_arb_sysfs_attr_group); if (err) return err; platform_set_drvdata(pdev, gdev); list_add_tail(&gdev->next, &brcmstb_gisb_arb_device_list); #ifdef CONFIG_ARM hook_fault_code(22, brcmstb_bus_error_handler, SIGBUS, 0, "imprecise external abort"); #endif dev_info(&pdev->dev, "registered mem: %p, irqs: %d, %d\n", gdev->base, timeout_irq, tea_irq); return 0; } #ifdef CONFIG_PM_SLEEP static int brcmstb_gisb_arb_suspend(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct brcmstb_gisb_arb_device *gdev = platform_get_drvdata(pdev); gdev->saved_timeout = gisb_read(gdev, ARB_TIMER); return 0; } /* Make sure we provide the same timeout value that was configured before, and * do this before the GISB timeout interrupt handler has any chance to run. */ static int brcmstb_gisb_arb_resume_noirq(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct brcmstb_gisb_arb_device *gdev = platform_get_drvdata(pdev); gisb_write(gdev, gdev->saved_timeout, ARB_TIMER); return 0; } #else #define brcmstb_gisb_arb_suspend NULL #define brcmstb_gisb_arb_resume_noirq NULL #endif static const struct dev_pm_ops brcmstb_gisb_arb_pm_ops = { .suspend = brcmstb_gisb_arb_suspend, .resume_noirq = brcmstb_gisb_arb_resume_noirq, }; static struct platform_driver brcmstb_gisb_arb_driver = { .driver = { .name = "brcm-gisb-arb", .of_match_table = brcmstb_gisb_arb_of_match, .pm = &brcmstb_gisb_arb_pm_ops, }, }; static int __init brcm_gisb_driver_init(void) { return platform_driver_probe(&brcmstb_gisb_arb_driver, brcmstb_gisb_arb_probe); } module_init(brcm_gisb_driver_init);
gpl-2.0
jiangliu/linux
drivers/staging/iio/light/isl29018.c
431
17285
/* * A iio driver for the light sensor ISL 29018. * * IIO driver for monitoring ambient light intensity in luxi, proximity * sensing and infrared sensing. * * Copyright (c) 2010, NVIDIA Corporation. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ #include <linux/module.h> #include <linux/i2c.h> #include <linux/err.h> #include <linux/mutex.h> #include <linux/delay.h> #include <linux/regmap.h> #include <linux/slab.h> #include <linux/iio/iio.h> #include <linux/iio/sysfs.h> #define CONVERSION_TIME_MS 100 #define ISL29018_REG_ADD_COMMAND1 0x00 #define COMMMAND1_OPMODE_SHIFT 5 #define COMMMAND1_OPMODE_MASK (7 << COMMMAND1_OPMODE_SHIFT) #define COMMMAND1_OPMODE_POWER_DOWN 0 #define COMMMAND1_OPMODE_ALS_ONCE 1 #define COMMMAND1_OPMODE_IR_ONCE 2 #define COMMMAND1_OPMODE_PROX_ONCE 3 #define ISL29018_REG_ADD_COMMANDII 0x01 #define COMMANDII_RESOLUTION_SHIFT 2 #define COMMANDII_RESOLUTION_MASK (0x3 << COMMANDII_RESOLUTION_SHIFT) #define COMMANDII_RANGE_SHIFT 0 #define COMMANDII_RANGE_MASK (0x3 << COMMANDII_RANGE_SHIFT) #define COMMANDII_SCHEME_SHIFT 7 #define COMMANDII_SCHEME_MASK (0x1 << COMMANDII_SCHEME_SHIFT) #define ISL29018_REG_ADD_DATA_LSB 0x02 #define ISL29018_REG_ADD_DATA_MSB 0x03 #define ISL29018_REG_TEST 0x08 #define ISL29018_TEST_SHIFT 0 #define ISL29018_TEST_MASK (0xFF << ISL29018_TEST_SHIFT) struct isl29018_chip { struct device *dev; struct regmap *regmap; struct mutex lock; unsigned int lux_scale; unsigned int lux_uscale; unsigned int range; unsigned int adc_bit; int prox_scheme; bool suspended; }; static int isl29018_set_range(struct isl29018_chip *chip, unsigned long range, unsigned int *new_range) { static const unsigned long supp_ranges[] = {1000, 4000, 16000, 64000}; int i; for (i = 0; i < ARRAY_SIZE(supp_ranges); ++i) { if (range <= supp_ranges[i]) { *new_range = (unsigned int)supp_ranges[i]; break; } } if (i >= ARRAY_SIZE(supp_ranges)) return -EINVAL; return regmap_update_bits(chip->regmap, ISL29018_REG_ADD_COMMANDII, COMMANDII_RANGE_MASK, i << COMMANDII_RANGE_SHIFT); } static int isl29018_set_resolution(struct isl29018_chip *chip, unsigned long adcbit, unsigned int *conf_adc_bit) { static const unsigned long supp_adcbit[] = {16, 12, 8, 4}; int i; for (i = 0; i < ARRAY_SIZE(supp_adcbit); ++i) { if (adcbit >= supp_adcbit[i]) { *conf_adc_bit = (unsigned int)supp_adcbit[i]; break; } } if (i >= ARRAY_SIZE(supp_adcbit)) return -EINVAL; return regmap_update_bits(chip->regmap, ISL29018_REG_ADD_COMMANDII, COMMANDII_RESOLUTION_MASK, i << COMMANDII_RESOLUTION_SHIFT); } static int isl29018_read_sensor_input(struct isl29018_chip *chip, int mode) { int status; unsigned int lsb; unsigned int msb; /* Set mode */ status = regmap_write(chip->regmap, ISL29018_REG_ADD_COMMAND1, mode << COMMMAND1_OPMODE_SHIFT); if (status) { dev_err(chip->dev, "Error in setting operating mode err %d\n", status); return status; } msleep(CONVERSION_TIME_MS); status = regmap_read(chip->regmap, ISL29018_REG_ADD_DATA_LSB, &lsb); if (status < 0) { dev_err(chip->dev, "Error in reading LSB DATA with err %d\n", status); return status; } status = regmap_read(chip->regmap, ISL29018_REG_ADD_DATA_MSB, &msb); if (status < 0) { dev_err(chip->dev, "Error in reading MSB DATA with error %d\n", status); return status; } dev_vdbg(chip->dev, "MSB 0x%x and LSB 0x%x\n", msb, lsb); return (msb << 8) | lsb; } static int isl29018_read_lux(struct isl29018_chip *chip, int *lux) { int lux_data; unsigned int data_x_range, lux_unshifted; lux_data = isl29018_read_sensor_input(chip, COMMMAND1_OPMODE_ALS_ONCE); if (lux_data < 0) return lux_data; /* To support fractional scaling, separate the unshifted lux * into two calculations: int scaling and micro-scaling. * lux_uscale ranges from 0-999999, so about 20 bits. Split * the /1,000,000 in two to reduce the risk of over/underflow. */ data_x_range = lux_data * chip->range; lux_unshifted = data_x_range * chip->lux_scale; lux_unshifted += data_x_range / 1000 * chip->lux_uscale / 1000; *lux = lux_unshifted >> chip->adc_bit; return 0; } static int isl29018_read_ir(struct isl29018_chip *chip, int *ir) { int ir_data; ir_data = isl29018_read_sensor_input(chip, COMMMAND1_OPMODE_IR_ONCE); if (ir_data < 0) return ir_data; *ir = ir_data; return 0; } static int isl29018_read_proximity_ir(struct isl29018_chip *chip, int scheme, int *near_ir) { int status; int prox_data = -1; int ir_data = -1; /* Do proximity sensing with required scheme */ status = regmap_update_bits(chip->regmap, ISL29018_REG_ADD_COMMANDII, COMMANDII_SCHEME_MASK, scheme << COMMANDII_SCHEME_SHIFT); if (status) { dev_err(chip->dev, "Error in setting operating mode\n"); return status; } prox_data = isl29018_read_sensor_input(chip, COMMMAND1_OPMODE_PROX_ONCE); if (prox_data < 0) return prox_data; if (scheme == 1) { *near_ir = prox_data; return 0; } ir_data = isl29018_read_sensor_input(chip, COMMMAND1_OPMODE_IR_ONCE); if (ir_data < 0) return ir_data; if (prox_data >= ir_data) *near_ir = prox_data - ir_data; else *near_ir = 0; return 0; } /* Sysfs interface */ /* range */ static ssize_t show_range(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *indio_dev = dev_to_iio_dev(dev); struct isl29018_chip *chip = iio_priv(indio_dev); return sprintf(buf, "%u\n", chip->range); } static ssize_t store_range(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct iio_dev *indio_dev = dev_to_iio_dev(dev); struct isl29018_chip *chip = iio_priv(indio_dev); int status; unsigned long lval; unsigned int new_range; if (kstrtoul(buf, 10, &lval)) return -EINVAL; if (!(lval == 1000UL || lval == 4000UL || lval == 16000UL || lval == 64000UL)) { dev_err(dev, "The range is not supported\n"); return -EINVAL; } mutex_lock(&chip->lock); status = isl29018_set_range(chip, lval, &new_range); if (status < 0) { mutex_unlock(&chip->lock); dev_err(dev, "Error in setting max range with err %d\n", status); return status; } chip->range = new_range; mutex_unlock(&chip->lock); return count; } /* resolution */ static ssize_t show_resolution(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *indio_dev = dev_to_iio_dev(dev); struct isl29018_chip *chip = iio_priv(indio_dev); return sprintf(buf, "%u\n", chip->adc_bit); } static ssize_t store_resolution(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct iio_dev *indio_dev = dev_to_iio_dev(dev); struct isl29018_chip *chip = iio_priv(indio_dev); int status; unsigned int val; unsigned int new_adc_bit; if (kstrtouint(buf, 10, &val)) return -EINVAL; if (!(val == 4 || val == 8 || val == 12 || val == 16)) { dev_err(dev, "The resolution is not supported\n"); return -EINVAL; } mutex_lock(&chip->lock); status = isl29018_set_resolution(chip, val, &new_adc_bit); if (status < 0) { mutex_unlock(&chip->lock); dev_err(dev, "Error in setting resolution\n"); return status; } chip->adc_bit = new_adc_bit; mutex_unlock(&chip->lock); return count; } /* proximity scheme */ static ssize_t show_prox_infrared_suppression(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *indio_dev = dev_to_iio_dev(dev); struct isl29018_chip *chip = iio_priv(indio_dev); /* return the "proximity scheme" i.e. if the chip does on chip infrared suppression (1 means perform on chip suppression) */ return sprintf(buf, "%d\n", chip->prox_scheme); } static ssize_t store_prox_infrared_suppression(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct iio_dev *indio_dev = dev_to_iio_dev(dev); struct isl29018_chip *chip = iio_priv(indio_dev); int val; if (kstrtoint(buf, 10, &val)) return -EINVAL; if (!(val == 0 || val == 1)) { dev_err(dev, "The mode is not supported\n"); return -EINVAL; } /* get the "proximity scheme" i.e. if the chip does on chip infrared suppression (1 means perform on chip suppression) */ mutex_lock(&chip->lock); chip->prox_scheme = val; mutex_unlock(&chip->lock); return count; } /* Channel IO */ static int isl29018_write_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, int val, int val2, long mask) { struct isl29018_chip *chip = iio_priv(indio_dev); int ret = -EINVAL; mutex_lock(&chip->lock); if (mask == IIO_CHAN_INFO_CALIBSCALE && chan->type == IIO_LIGHT) { chip->lux_scale = val; /* With no write_raw_get_fmt(), val2 is a MICRO fraction. */ chip->lux_uscale = val2; ret = 0; } mutex_unlock(&chip->lock); return ret; } static int isl29018_read_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, int *val, int *val2, long mask) { int ret = -EINVAL; struct isl29018_chip *chip = iio_priv(indio_dev); mutex_lock(&chip->lock); if (chip->suspended) { mutex_unlock(&chip->lock); return -EBUSY; } switch (mask) { case IIO_CHAN_INFO_RAW: case IIO_CHAN_INFO_PROCESSED: switch (chan->type) { case IIO_LIGHT: ret = isl29018_read_lux(chip, val); break; case IIO_INTENSITY: ret = isl29018_read_ir(chip, val); break; case IIO_PROXIMITY: ret = isl29018_read_proximity_ir(chip, chip->prox_scheme, val); break; default: break; } if (!ret) ret = IIO_VAL_INT; break; case IIO_CHAN_INFO_CALIBSCALE: if (chan->type == IIO_LIGHT) { *val = chip->lux_scale; *val2 = chip->lux_uscale; ret = IIO_VAL_INT_PLUS_MICRO; } break; default: break; } mutex_unlock(&chip->lock); return ret; } static const struct iio_chan_spec isl29018_channels[] = { { .type = IIO_LIGHT, .indexed = 1, .channel = 0, .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED) | BIT(IIO_CHAN_INFO_CALIBSCALE), }, { .type = IIO_INTENSITY, .modified = 1, .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), .channel2 = IIO_MOD_LIGHT_IR, }, { /* Unindexed in current ABI. But perhaps it should be. */ .type = IIO_PROXIMITY, .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), } }; static IIO_DEVICE_ATTR(range, S_IRUGO | S_IWUSR, show_range, store_range, 0); static IIO_CONST_ATTR(range_available, "1000 4000 16000 64000"); static IIO_CONST_ATTR(adc_resolution_available, "4 8 12 16"); static IIO_DEVICE_ATTR(adc_resolution, S_IRUGO | S_IWUSR, show_resolution, store_resolution, 0); static IIO_DEVICE_ATTR(proximity_on_chip_ambient_infrared_suppression, S_IRUGO | S_IWUSR, show_prox_infrared_suppression, store_prox_infrared_suppression, 0); #define ISL29018_DEV_ATTR(name) (&iio_dev_attr_##name.dev_attr.attr) #define ISL29018_CONST_ATTR(name) (&iio_const_attr_##name.dev_attr.attr) static struct attribute *isl29018_attributes[] = { ISL29018_DEV_ATTR(range), ISL29018_CONST_ATTR(range_available), ISL29018_DEV_ATTR(adc_resolution), ISL29018_CONST_ATTR(adc_resolution_available), ISL29018_DEV_ATTR(proximity_on_chip_ambient_infrared_suppression), NULL }; static const struct attribute_group isl29108_group = { .attrs = isl29018_attributes, }; static int isl29018_chip_init(struct isl29018_chip *chip) { int status; int new_adc_bit; unsigned int new_range; /* Code added per Intersil Application Note 1534: * When VDD sinks to approximately 1.8V or below, some of * the part's registers may change their state. When VDD * recovers to 2.25V (or greater), the part may thus be in an * unknown mode of operation. The user can return the part to * a known mode of operation either by (a) setting VDD = 0V for * 1 second or more and then powering back up with a slew rate * of 0.5V/ms or greater, or (b) via I2C disable all ALS/PROX * conversions, clear the test registers, and then rewrite all * registers to the desired values. * ... * FOR ISL29011, ISL29018, ISL29021, ISL29023 * 1. Write 0x00 to register 0x08 (TEST) * 2. Write 0x00 to register 0x00 (CMD1) * 3. Rewrite all registers to the desired values * * ISL29018 Data Sheet (FN6619.1, Feb 11, 2010) essentially says * the same thing EXCEPT the data sheet asks for a 1ms delay after * writing the CMD1 register. */ status = regmap_write(chip->regmap, ISL29018_REG_TEST, 0x0); if (status < 0) { dev_err(chip->dev, "Failed to clear isl29018 TEST reg." "(%d)\n", status); return status; } /* See Intersil AN1534 comments above. * "Operating Mode" (COMMAND1) register is reprogrammed when * data is read from the device. */ status = regmap_write(chip->regmap, ISL29018_REG_ADD_COMMAND1, 0); if (status < 0) { dev_err(chip->dev, "Failed to clear isl29018 CMD1 reg." "(%d)\n", status); return status; } msleep(1); /* per data sheet, page 10 */ /* set defaults */ status = isl29018_set_range(chip, chip->range, &new_range); if (status < 0) { dev_err(chip->dev, "Init of isl29018 fails\n"); return status; } status = isl29018_set_resolution(chip, chip->adc_bit, &new_adc_bit); return 0; } static const struct iio_info isl29108_info = { .attrs = &isl29108_group, .driver_module = THIS_MODULE, .read_raw = &isl29018_read_raw, .write_raw = &isl29018_write_raw, }; static bool is_volatile_reg(struct device *dev, unsigned int reg) { switch (reg) { case ISL29018_REG_ADD_DATA_LSB: case ISL29018_REG_ADD_DATA_MSB: case ISL29018_REG_ADD_COMMAND1: case ISL29018_REG_TEST: return true; default: return false; } } /* * isl29018_regmap_config: regmap configuration. * Use RBTREE mechanism for caching. */ static const struct regmap_config isl29018_regmap_config = { .reg_bits = 8, .val_bits = 8, .volatile_reg = is_volatile_reg, .max_register = ISL29018_REG_TEST, .num_reg_defaults_raw = ISL29018_REG_TEST + 1, .cache_type = REGCACHE_RBTREE, }; static int isl29018_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct isl29018_chip *chip; struct iio_dev *indio_dev; int err; indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*chip)); if (indio_dev == NULL) { dev_err(&client->dev, "iio allocation fails\n"); return -ENOMEM; } chip = iio_priv(indio_dev); i2c_set_clientdata(client, indio_dev); chip->dev = &client->dev; mutex_init(&chip->lock); chip->lux_scale = 1; chip->lux_uscale = 0; chip->range = 1000; chip->adc_bit = 16; chip->suspended = false; chip->regmap = devm_regmap_init_i2c(client, &isl29018_regmap_config); if (IS_ERR(chip->regmap)) { err = PTR_ERR(chip->regmap); dev_err(chip->dev, "regmap initialization failed: %d\n", err); return err; } err = isl29018_chip_init(chip); if (err) return err; indio_dev->info = &isl29108_info; indio_dev->channels = isl29018_channels; indio_dev->num_channels = ARRAY_SIZE(isl29018_channels); indio_dev->name = id->name; indio_dev->dev.parent = &client->dev; indio_dev->modes = INDIO_DIRECT_MODE; err = devm_iio_device_register(&client->dev, indio_dev); if (err) { dev_err(&client->dev, "iio registration fails\n"); return err; } return 0; } #ifdef CONFIG_PM_SLEEP static int isl29018_suspend(struct device *dev) { struct isl29018_chip *chip = iio_priv(dev_get_drvdata(dev)); mutex_lock(&chip->lock); /* Since this driver uses only polling commands, we are by default in * auto shutdown (ie, power-down) mode. * So we do not have much to do here. */ chip->suspended = true; mutex_unlock(&chip->lock); return 0; } static int isl29018_resume(struct device *dev) { struct isl29018_chip *chip = iio_priv(dev_get_drvdata(dev)); int err; mutex_lock(&chip->lock); err = isl29018_chip_init(chip); if (!err) chip->suspended = false; mutex_unlock(&chip->lock); return err; } static SIMPLE_DEV_PM_OPS(isl29018_pm_ops, isl29018_suspend, isl29018_resume); #define ISL29018_PM_OPS (&isl29018_pm_ops) #else #define ISL29018_PM_OPS NULL #endif static const struct i2c_device_id isl29018_id[] = { {"isl29018", 0}, {} }; MODULE_DEVICE_TABLE(i2c, isl29018_id); static const struct of_device_id isl29018_of_match[] = { { .compatible = "isil,isl29018", }, { }, }; MODULE_DEVICE_TABLE(of, isl29018_of_match); static struct i2c_driver isl29018_driver = { .class = I2C_CLASS_HWMON, .driver = { .name = "isl29018", .pm = ISL29018_PM_OPS, .owner = THIS_MODULE, .of_match_table = isl29018_of_match, }, .probe = isl29018_probe, .id_table = isl29018_id, }; module_i2c_driver(isl29018_driver); MODULE_DESCRIPTION("ISL29018 Ambient Light Sensor driver"); MODULE_LICENSE("GPL");
gpl-2.0
pacificIT/linux-udoo
drivers/rtc/rtc-at32ap700x.c
431
6926
/* * An RTC driver for the AVR32 AT32AP700x processor series. * * Copyright (C) 2007 Atmel Corporation * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/rtc.h> #include <linux/io.h> /* * This is a bare-bones RTC. It runs during most system sleep states, but has * no battery backup and gets reset during system restart. It must be * initialized from an external clock (network, I2C, etc) before it can be of * much use. * * The alarm functionality is limited by the hardware, not supporting * periodic interrupts. */ #define RTC_CTRL 0x00 #define RTC_CTRL_EN 0 #define RTC_CTRL_PCLR 1 #define RTC_CTRL_TOPEN 2 #define RTC_CTRL_PSEL 8 #define RTC_VAL 0x04 #define RTC_TOP 0x08 #define RTC_IER 0x10 #define RTC_IER_TOPI 0 #define RTC_IDR 0x14 #define RTC_IDR_TOPI 0 #define RTC_IMR 0x18 #define RTC_IMR_TOPI 0 #define RTC_ISR 0x1c #define RTC_ISR_TOPI 0 #define RTC_ICR 0x20 #define RTC_ICR_TOPI 0 #define RTC_BIT(name) (1 << RTC_##name) #define RTC_BF(name, value) ((value) << RTC_##name) #define rtc_readl(dev, reg) \ __raw_readl((dev)->regs + RTC_##reg) #define rtc_writel(dev, reg, value) \ __raw_writel((value), (dev)->regs + RTC_##reg) struct rtc_at32ap700x { struct rtc_device *rtc; void __iomem *regs; unsigned long alarm_time; unsigned long irq; /* Protect against concurrent register access. */ spinlock_t lock; }; static int at32_rtc_readtime(struct device *dev, struct rtc_time *tm) { struct rtc_at32ap700x *rtc = dev_get_drvdata(dev); unsigned long now; now = rtc_readl(rtc, VAL); rtc_time_to_tm(now, tm); return 0; } static int at32_rtc_settime(struct device *dev, struct rtc_time *tm) { struct rtc_at32ap700x *rtc = dev_get_drvdata(dev); unsigned long now; int ret; ret = rtc_tm_to_time(tm, &now); if (ret == 0) rtc_writel(rtc, VAL, now); return ret; } static int at32_rtc_readalarm(struct device *dev, struct rtc_wkalrm *alrm) { struct rtc_at32ap700x *rtc = dev_get_drvdata(dev); spin_lock_irq(&rtc->lock); rtc_time_to_tm(rtc->alarm_time, &alrm->time); alrm->enabled = rtc_readl(rtc, IMR) & RTC_BIT(IMR_TOPI) ? 1 : 0; alrm->pending = rtc_readl(rtc, ISR) & RTC_BIT(ISR_TOPI) ? 1 : 0; spin_unlock_irq(&rtc->lock); return 0; } static int at32_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm) { struct rtc_at32ap700x *rtc = dev_get_drvdata(dev); unsigned long rtc_unix_time; unsigned long alarm_unix_time; int ret; rtc_unix_time = rtc_readl(rtc, VAL); ret = rtc_tm_to_time(&alrm->time, &alarm_unix_time); if (ret) return ret; if (alarm_unix_time < rtc_unix_time) return -EINVAL; spin_lock_irq(&rtc->lock); rtc->alarm_time = alarm_unix_time; rtc_writel(rtc, TOP, rtc->alarm_time); if (alrm->enabled) rtc_writel(rtc, CTRL, rtc_readl(rtc, CTRL) | RTC_BIT(CTRL_TOPEN)); else rtc_writel(rtc, CTRL, rtc_readl(rtc, CTRL) & ~RTC_BIT(CTRL_TOPEN)); spin_unlock_irq(&rtc->lock); return ret; } static int at32_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled) { struct rtc_at32ap700x *rtc = dev_get_drvdata(dev); int ret = 0; spin_lock_irq(&rtc->lock); if (enabled) { if (rtc_readl(rtc, VAL) > rtc->alarm_time) { ret = -EINVAL; goto out; } rtc_writel(rtc, CTRL, rtc_readl(rtc, CTRL) | RTC_BIT(CTRL_TOPEN)); rtc_writel(rtc, ICR, RTC_BIT(ICR_TOPI)); rtc_writel(rtc, IER, RTC_BIT(IER_TOPI)); } else { rtc_writel(rtc, CTRL, rtc_readl(rtc, CTRL) & ~RTC_BIT(CTRL_TOPEN)); rtc_writel(rtc, IDR, RTC_BIT(IDR_TOPI)); rtc_writel(rtc, ICR, RTC_BIT(ICR_TOPI)); } out: spin_unlock_irq(&rtc->lock); return ret; } static irqreturn_t at32_rtc_interrupt(int irq, void *dev_id) { struct rtc_at32ap700x *rtc = (struct rtc_at32ap700x *)dev_id; unsigned long isr = rtc_readl(rtc, ISR); unsigned long events = 0; int ret = IRQ_NONE; spin_lock(&rtc->lock); if (isr & RTC_BIT(ISR_TOPI)) { rtc_writel(rtc, ICR, RTC_BIT(ICR_TOPI)); rtc_writel(rtc, IDR, RTC_BIT(IDR_TOPI)); rtc_writel(rtc, CTRL, rtc_readl(rtc, CTRL) & ~RTC_BIT(CTRL_TOPEN)); rtc_writel(rtc, VAL, rtc->alarm_time); events = RTC_AF | RTC_IRQF; rtc_update_irq(rtc->rtc, 1, events); ret = IRQ_HANDLED; } spin_unlock(&rtc->lock); return ret; } static struct rtc_class_ops at32_rtc_ops = { .read_time = at32_rtc_readtime, .set_time = at32_rtc_settime, .read_alarm = at32_rtc_readalarm, .set_alarm = at32_rtc_setalarm, .alarm_irq_enable = at32_rtc_alarm_irq_enable, }; static int __init at32_rtc_probe(struct platform_device *pdev) { struct resource *regs; struct rtc_at32ap700x *rtc; int irq; int ret; rtc = devm_kzalloc(&pdev->dev, sizeof(struct rtc_at32ap700x), GFP_KERNEL); if (!rtc) return -ENOMEM; regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!regs) { dev_dbg(&pdev->dev, "no mmio resource defined\n"); return -ENXIO; } irq = platform_get_irq(pdev, 0); if (irq <= 0) { dev_dbg(&pdev->dev, "could not get irq\n"); return -ENXIO; } rtc->irq = irq; rtc->regs = devm_ioremap(&pdev->dev, regs->start, resource_size(regs)); if (!rtc->regs) { dev_dbg(&pdev->dev, "could not map I/O memory\n"); return -ENOMEM; } spin_lock_init(&rtc->lock); /* * Maybe init RTC: count from zero at 1 Hz, disable wrap irq. * * Do not reset VAL register, as it can hold an old time * from last JTAG reset. */ if (!(rtc_readl(rtc, CTRL) & RTC_BIT(CTRL_EN))) { rtc_writel(rtc, CTRL, RTC_BIT(CTRL_PCLR)); rtc_writel(rtc, IDR, RTC_BIT(IDR_TOPI)); rtc_writel(rtc, CTRL, RTC_BF(CTRL_PSEL, 0xe) | RTC_BIT(CTRL_EN)); } ret = devm_request_irq(&pdev->dev, irq, at32_rtc_interrupt, IRQF_SHARED, "rtc", rtc); if (ret) { dev_dbg(&pdev->dev, "could not request irq %d\n", irq); return ret; } platform_set_drvdata(pdev, rtc); rtc->rtc = devm_rtc_device_register(&pdev->dev, pdev->name, &at32_rtc_ops, THIS_MODULE); if (IS_ERR(rtc->rtc)) { dev_dbg(&pdev->dev, "could not register rtc device\n"); return PTR_ERR(rtc->rtc); } device_init_wakeup(&pdev->dev, 1); dev_info(&pdev->dev, "Atmel RTC for AT32AP700x at %08lx irq %ld\n", (unsigned long)rtc->regs, rtc->irq); return 0; } static int __exit at32_rtc_remove(struct platform_device *pdev) { device_init_wakeup(&pdev->dev, 0); return 0; } MODULE_ALIAS("platform:at32ap700x_rtc"); static struct platform_driver at32_rtc_driver = { .remove = __exit_p(at32_rtc_remove), .driver = { .name = "at32ap700x_rtc", }, }; module_platform_driver_probe(at32_rtc_driver, at32_rtc_probe); MODULE_AUTHOR("Hans-Christian Egtvedt <hcegtvedt@atmel.com>"); MODULE_DESCRIPTION("Real time clock for AVR32 AT32AP700x"); MODULE_LICENSE("GPL");
gpl-2.0
existz/htc-kernel-msm7x30
drivers/video/aty/aty128fb.c
943
67382
/* $Id: aty128fb.c,v 1.1.1.1.36.1 1999/12/11 09:03:05 Exp $ * linux/drivers/video/aty128fb.c -- Frame buffer device for ATI Rage128 * * Copyright (C) 1999-2003, Brad Douglas <brad@neruo.com> * Copyright (C) 1999, Anthony Tong <atong@uiuc.edu> * * Ani Joshi / Jeff Garzik * - Code cleanup * * Michel Danzer <michdaen@iiic.ethz.ch> * - 15/16 bit cleanup * - fix panning * * Benjamin Herrenschmidt * - pmac-specific PM stuff * - various fixes & cleanups * * Andreas Hundt <andi@convergence.de> * - FB_ACTIVATE fixes * * Paul Mackerras <paulus@samba.org> * - Convert to new framebuffer API, * fix colormap setting at 16 bits/pixel (565) * * Paul Mundt * - PCI hotplug * * Jon Smirl <jonsmirl@yahoo.com> * - PCI ID update * - replace ROM BIOS search * * Based off of Geert's atyfb.c and vfb.c. * * TODO: * - monitor sensing (DDC) * - virtual display * - other platform support (only ppc/x86 supported) * - hardware cursor support * * Please cc: your patches to brad@neruo.com. */ /* * A special note of gratitude to ATI's devrel for providing documentation, * example code and hardware. Thanks Nitya. -atong and brad */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/mm.h> #include <linux/vmalloc.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/uaccess.h> #include <linux/fb.h> #include <linux/init.h> #include <linux/pci.h> #include <linux/ioport.h> #include <linux/console.h> #include <linux/backlight.h> #include <asm/io.h> #ifdef CONFIG_PPC_PMAC #include <asm/machdep.h> #include <asm/pmac_feature.h> #include <asm/prom.h> #include <asm/pci-bridge.h> #include "../macmodes.h" #endif #ifdef CONFIG_PMAC_BACKLIGHT #include <asm/backlight.h> #endif #ifdef CONFIG_BOOTX_TEXT #include <asm/btext.h> #endif /* CONFIG_BOOTX_TEXT */ #ifdef CONFIG_MTRR #include <asm/mtrr.h> #endif #include <video/aty128.h> /* Debug flag */ #undef DEBUG #ifdef DEBUG #define DBG(fmt, args...) printk(KERN_DEBUG "aty128fb: %s " fmt, __func__, ##args); #else #define DBG(fmt, args...) #endif #ifndef CONFIG_PPC_PMAC /* default mode */ static struct fb_var_screeninfo default_var __devinitdata = { /* 640x480, 60 Hz, Non-Interlaced (25.175 MHz dotclock) */ 640, 480, 640, 480, 0, 0, 8, 0, {0, 8, 0}, {0, 8, 0}, {0, 8, 0}, {0, 0, 0}, 0, 0, -1, -1, 0, 39722, 48, 16, 33, 10, 96, 2, 0, FB_VMODE_NONINTERLACED }; #else /* CONFIG_PPC_PMAC */ /* default to 1024x768 at 75Hz on PPC - this will work * on the iMac, the usual 640x480 @ 60Hz doesn't. */ static struct fb_var_screeninfo default_var = { /* 1024x768, 75 Hz, Non-Interlaced (78.75 MHz dotclock) */ 1024, 768, 1024, 768, 0, 0, 8, 0, {0, 8, 0}, {0, 8, 0}, {0, 8, 0}, {0, 0, 0}, 0, 0, -1, -1, 0, 12699, 160, 32, 28, 1, 96, 3, FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED }; #endif /* CONFIG_PPC_PMAC */ /* default modedb mode */ /* 640x480, 60 Hz, Non-Interlaced (25.172 MHz dotclock) */ static struct fb_videomode defaultmode __devinitdata = { .refresh = 60, .xres = 640, .yres = 480, .pixclock = 39722, .left_margin = 48, .right_margin = 16, .upper_margin = 33, .lower_margin = 10, .hsync_len = 96, .vsync_len = 2, .sync = 0, .vmode = FB_VMODE_NONINTERLACED }; /* Chip generations */ enum { rage_128, rage_128_pci, rage_128_pro, rage_128_pro_pci, rage_M3, rage_M3_pci, rage_M4, rage_128_ultra, }; /* Must match above enum */ static const char *r128_family[] __devinitdata = { "AGP", "PCI", "PRO AGP", "PRO PCI", "M3 AGP", "M3 PCI", "M4 AGP", "Ultra AGP", }; /* * PCI driver prototypes */ static int aty128_probe(struct pci_dev *pdev, const struct pci_device_id *ent); static void aty128_remove(struct pci_dev *pdev); static int aty128_pci_suspend(struct pci_dev *pdev, pm_message_t state); static int aty128_pci_resume(struct pci_dev *pdev); static int aty128_do_resume(struct pci_dev *pdev); /* supported Rage128 chipsets */ static struct pci_device_id aty128_pci_tbl[] = { { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_LE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_M3_pci }, { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_LF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_M3 }, { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_MF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_M4 }, { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_ML, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_M4 }, { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_PA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_128_pro }, { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_PB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_128_pro }, { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_PC, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_128_pro }, { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_PD, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_128_pro_pci }, { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_PE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_128_pro }, { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_PF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_128_pro }, { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_PG, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_128_pro }, { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_PH, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_128_pro }, { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_PI, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_128_pro }, { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_PJ, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_128_pro }, { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_PK, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_128_pro }, { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_PL, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_128_pro }, { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_PM, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_128_pro }, { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_PN, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_128_pro }, { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_PO, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_128_pro }, { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_PP, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_128_pro_pci }, { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_PQ, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_128_pro }, { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_PR, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_128_pro_pci }, { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_PS, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_128_pro }, { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_PT, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_128_pro }, { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_PU, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_128_pro }, { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_PV, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_128_pro }, { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_PW, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_128_pro }, { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_PX, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_128_pro }, { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_RE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_128_pci }, { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_RF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_128 }, { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_RG, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_128 }, { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_RK, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_128_pci }, { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_RL, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_128 }, { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_SE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_128 }, { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_SF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_128_pci }, { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_SG, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_128 }, { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_SH, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_128 }, { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_SK, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_128 }, { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_SL, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_128 }, { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_SM, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_128 }, { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_SN, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_128 }, { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_TF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_128_ultra }, { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_TL, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_128_ultra }, { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_TR, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_128_ultra }, { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_TS, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_128_ultra }, { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_TT, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_128_ultra }, { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_TU, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_128_ultra }, { 0, } }; MODULE_DEVICE_TABLE(pci, aty128_pci_tbl); static struct pci_driver aty128fb_driver = { .name = "aty128fb", .id_table = aty128_pci_tbl, .probe = aty128_probe, .remove = __devexit_p(aty128_remove), .suspend = aty128_pci_suspend, .resume = aty128_pci_resume, }; /* packed BIOS settings */ #ifndef CONFIG_PPC typedef struct { u8 clock_chip_type; u8 struct_size; u8 accelerator_entry; u8 VGA_entry; u16 VGA_table_offset; u16 POST_table_offset; u16 XCLK; u16 MCLK; u8 num_PLL_blocks; u8 size_PLL_blocks; u16 PCLK_ref_freq; u16 PCLK_ref_divider; u32 PCLK_min_freq; u32 PCLK_max_freq; u16 MCLK_ref_freq; u16 MCLK_ref_divider; u32 MCLK_min_freq; u32 MCLK_max_freq; u16 XCLK_ref_freq; u16 XCLK_ref_divider; u32 XCLK_min_freq; u32 XCLK_max_freq; } __attribute__ ((packed)) PLL_BLOCK; #endif /* !CONFIG_PPC */ /* onboard memory information */ struct aty128_meminfo { u8 ML; u8 MB; u8 Trcd; u8 Trp; u8 Twr; u8 CL; u8 Tr2w; u8 LoopLatency; u8 DspOn; u8 Rloop; const char *name; }; /* various memory configurations */ static const struct aty128_meminfo sdr_128 = { 4, 4, 3, 3, 1, 3, 1, 16, 30, 16, "128-bit SDR SGRAM (1:1)" }; static const struct aty128_meminfo sdr_64 = { 4, 8, 3, 3, 1, 3, 1, 17, 46, 17, "64-bit SDR SGRAM (1:1)" }; static const struct aty128_meminfo sdr_sgram = { 4, 4, 1, 2, 1, 2, 1, 16, 24, 16, "64-bit SDR SGRAM (2:1)" }; static const struct aty128_meminfo ddr_sgram = { 4, 4, 3, 3, 2, 3, 1, 16, 31, 16, "64-bit DDR SGRAM" }; static struct fb_fix_screeninfo aty128fb_fix __devinitdata = { .id = "ATY Rage128", .type = FB_TYPE_PACKED_PIXELS, .visual = FB_VISUAL_PSEUDOCOLOR, .xpanstep = 8, .ypanstep = 1, .mmio_len = 0x2000, .accel = FB_ACCEL_ATI_RAGE128, }; static char *mode_option __devinitdata = NULL; #ifdef CONFIG_PPC_PMAC static int default_vmode __devinitdata = VMODE_1024_768_60; static int default_cmode __devinitdata = CMODE_8; #endif static int default_crt_on __devinitdata = 0; static int default_lcd_on __devinitdata = 1; #ifdef CONFIG_MTRR static bool mtrr = true; #endif #ifdef CONFIG_PMAC_BACKLIGHT static int backlight __devinitdata = 1; #else static int backlight __devinitdata = 0; #endif /* PLL constants */ struct aty128_constants { u32 ref_clk; u32 ppll_min; u32 ppll_max; u32 ref_divider; u32 xclk; u32 fifo_width; u32 fifo_depth; }; struct aty128_crtc { u32 gen_cntl; u32 h_total, h_sync_strt_wid; u32 v_total, v_sync_strt_wid; u32 pitch; u32 offset, offset_cntl; u32 xoffset, yoffset; u32 vxres, vyres; u32 depth, bpp; }; struct aty128_pll { u32 post_divider; u32 feedback_divider; u32 vclk; }; struct aty128_ddafifo { u32 dda_config; u32 dda_on_off; }; /* register values for a specific mode */ struct aty128fb_par { struct aty128_crtc crtc; struct aty128_pll pll; struct aty128_ddafifo fifo_reg; u32 accel_flags; struct aty128_constants constants; /* PLL and others */ void __iomem *regbase; /* remapped mmio */ u32 vram_size; /* onboard video ram */ int chip_gen; const struct aty128_meminfo *mem; /* onboard mem info */ #ifdef CONFIG_MTRR struct { int vram; int vram_valid; } mtrr; #endif int blitter_may_be_busy; int fifo_slots; /* free slots in FIFO (64 max) */ int pm_reg; int crt_on, lcd_on; struct pci_dev *pdev; struct fb_info *next; int asleep; int lock_blank; u8 red[32]; /* see aty128fb_setcolreg */ u8 green[64]; u8 blue[32]; u32 pseudo_palette[16]; /* used for TRUECOLOR */ }; #define round_div(n, d) ((n+(d/2))/d) static int aty128fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info); static int aty128fb_set_par(struct fb_info *info); static int aty128fb_setcolreg(u_int regno, u_int red, u_int green, u_int blue, u_int transp, struct fb_info *info); static int aty128fb_pan_display(struct fb_var_screeninfo *var, struct fb_info *fb); static int aty128fb_blank(int blank, struct fb_info *fb); static int aty128fb_ioctl(struct fb_info *info, u_int cmd, unsigned long arg); static int aty128fb_sync(struct fb_info *info); /* * Internal routines */ static int aty128_encode_var(struct fb_var_screeninfo *var, const struct aty128fb_par *par); static int aty128_decode_var(struct fb_var_screeninfo *var, struct aty128fb_par *par); #if 0 static void __devinit aty128_get_pllinfo(struct aty128fb_par *par, void __iomem *bios); static void __devinit __iomem *aty128_map_ROM(struct pci_dev *pdev, const struct aty128fb_par *par); #endif static void aty128_timings(struct aty128fb_par *par); static void aty128_init_engine(struct aty128fb_par *par); static void aty128_reset_engine(const struct aty128fb_par *par); static void aty128_flush_pixel_cache(const struct aty128fb_par *par); static void do_wait_for_fifo(u16 entries, struct aty128fb_par *par); static void wait_for_fifo(u16 entries, struct aty128fb_par *par); static void wait_for_idle(struct aty128fb_par *par); static u32 depth_to_dst(u32 depth); #ifdef CONFIG_FB_ATY128_BACKLIGHT static void aty128_bl_set_power(struct fb_info *info, int power); #endif #define BIOS_IN8(v) (readb(bios + (v))) #define BIOS_IN16(v) (readb(bios + (v)) | \ (readb(bios + (v) + 1) << 8)) #define BIOS_IN32(v) (readb(bios + (v)) | \ (readb(bios + (v) + 1) << 8) | \ (readb(bios + (v) + 2) << 16) | \ (readb(bios + (v) + 3) << 24)) static struct fb_ops aty128fb_ops = { .owner = THIS_MODULE, .fb_check_var = aty128fb_check_var, .fb_set_par = aty128fb_set_par, .fb_setcolreg = aty128fb_setcolreg, .fb_pan_display = aty128fb_pan_display, .fb_blank = aty128fb_blank, .fb_ioctl = aty128fb_ioctl, .fb_sync = aty128fb_sync, .fb_fillrect = cfb_fillrect, .fb_copyarea = cfb_copyarea, .fb_imageblit = cfb_imageblit, }; /* * Functions to read from/write to the mmio registers * - endian conversions may possibly be avoided by * using the other register aperture. TODO. */ static inline u32 _aty_ld_le32(volatile unsigned int regindex, const struct aty128fb_par *par) { return readl (par->regbase + regindex); } static inline void _aty_st_le32(volatile unsigned int regindex, u32 val, const struct aty128fb_par *par) { writel (val, par->regbase + regindex); } static inline u8 _aty_ld_8(unsigned int regindex, const struct aty128fb_par *par) { return readb (par->regbase + regindex); } static inline void _aty_st_8(unsigned int regindex, u8 val, const struct aty128fb_par *par) { writeb (val, par->regbase + regindex); } #define aty_ld_le32(regindex) _aty_ld_le32(regindex, par) #define aty_st_le32(regindex, val) _aty_st_le32(regindex, val, par) #define aty_ld_8(regindex) _aty_ld_8(regindex, par) #define aty_st_8(regindex, val) _aty_st_8(regindex, val, par) /* * Functions to read from/write to the pll registers */ #define aty_ld_pll(pll_index) _aty_ld_pll(pll_index, par) #define aty_st_pll(pll_index, val) _aty_st_pll(pll_index, val, par) static u32 _aty_ld_pll(unsigned int pll_index, const struct aty128fb_par *par) { aty_st_8(CLOCK_CNTL_INDEX, pll_index & 0x3F); return aty_ld_le32(CLOCK_CNTL_DATA); } static void _aty_st_pll(unsigned int pll_index, u32 val, const struct aty128fb_par *par) { aty_st_8(CLOCK_CNTL_INDEX, (pll_index & 0x3F) | PLL_WR_EN); aty_st_le32(CLOCK_CNTL_DATA, val); } /* return true when the PLL has completed an atomic update */ static int aty_pll_readupdate(const struct aty128fb_par *par) { return !(aty_ld_pll(PPLL_REF_DIV) & PPLL_ATOMIC_UPDATE_R); } static void aty_pll_wait_readupdate(const struct aty128fb_par *par) { unsigned long timeout = jiffies + HZ/100; // should be more than enough int reset = 1; while (time_before(jiffies, timeout)) if (aty_pll_readupdate(par)) { reset = 0; break; } if (reset) /* reset engine?? */ printk(KERN_DEBUG "aty128fb: PLL write timeout!\n"); } /* tell PLL to update */ static void aty_pll_writeupdate(const struct aty128fb_par *par) { aty_pll_wait_readupdate(par); aty_st_pll(PPLL_REF_DIV, aty_ld_pll(PPLL_REF_DIV) | PPLL_ATOMIC_UPDATE_W); } /* write to the scratch register to test r/w functionality */ static int __devinit register_test(const struct aty128fb_par *par) { u32 val; int flag = 0; val = aty_ld_le32(BIOS_0_SCRATCH); aty_st_le32(BIOS_0_SCRATCH, 0x55555555); if (aty_ld_le32(BIOS_0_SCRATCH) == 0x55555555) { aty_st_le32(BIOS_0_SCRATCH, 0xAAAAAAAA); if (aty_ld_le32(BIOS_0_SCRATCH) == 0xAAAAAAAA) flag = 1; } aty_st_le32(BIOS_0_SCRATCH, val); // restore value return flag; } /* * Accelerator engine functions */ static void do_wait_for_fifo(u16 entries, struct aty128fb_par *par) { int i; for (;;) { for (i = 0; i < 2000000; i++) { par->fifo_slots = aty_ld_le32(GUI_STAT) & 0x0fff; if (par->fifo_slots >= entries) return; } aty128_reset_engine(par); } } static void wait_for_idle(struct aty128fb_par *par) { int i; do_wait_for_fifo(64, par); for (;;) { for (i = 0; i < 2000000; i++) { if (!(aty_ld_le32(GUI_STAT) & (1 << 31))) { aty128_flush_pixel_cache(par); par->blitter_may_be_busy = 0; return; } } aty128_reset_engine(par); } } static void wait_for_fifo(u16 entries, struct aty128fb_par *par) { if (par->fifo_slots < entries) do_wait_for_fifo(64, par); par->fifo_slots -= entries; } static void aty128_flush_pixel_cache(const struct aty128fb_par *par) { int i; u32 tmp; tmp = aty_ld_le32(PC_NGUI_CTLSTAT); tmp &= ~(0x00ff); tmp |= 0x00ff; aty_st_le32(PC_NGUI_CTLSTAT, tmp); for (i = 0; i < 2000000; i++) if (!(aty_ld_le32(PC_NGUI_CTLSTAT) & PC_BUSY)) break; } static void aty128_reset_engine(const struct aty128fb_par *par) { u32 gen_reset_cntl, clock_cntl_index, mclk_cntl; aty128_flush_pixel_cache(par); clock_cntl_index = aty_ld_le32(CLOCK_CNTL_INDEX); mclk_cntl = aty_ld_pll(MCLK_CNTL); aty_st_pll(MCLK_CNTL, mclk_cntl | 0x00030000); gen_reset_cntl = aty_ld_le32(GEN_RESET_CNTL); aty_st_le32(GEN_RESET_CNTL, gen_reset_cntl | SOFT_RESET_GUI); aty_ld_le32(GEN_RESET_CNTL); aty_st_le32(GEN_RESET_CNTL, gen_reset_cntl & ~(SOFT_RESET_GUI)); aty_ld_le32(GEN_RESET_CNTL); aty_st_pll(MCLK_CNTL, mclk_cntl); aty_st_le32(CLOCK_CNTL_INDEX, clock_cntl_index); aty_st_le32(GEN_RESET_CNTL, gen_reset_cntl); /* use old pio mode */ aty_st_le32(PM4_BUFFER_CNTL, PM4_BUFFER_CNTL_NONPM4); DBG("engine reset"); } static void aty128_init_engine(struct aty128fb_par *par) { u32 pitch_value; wait_for_idle(par); /* 3D scaler not spoken here */ wait_for_fifo(1, par); aty_st_le32(SCALE_3D_CNTL, 0x00000000); aty128_reset_engine(par); pitch_value = par->crtc.pitch; if (par->crtc.bpp == 24) { pitch_value = pitch_value * 3; } wait_for_fifo(4, par); /* setup engine offset registers */ aty_st_le32(DEFAULT_OFFSET, 0x00000000); /* setup engine pitch registers */ aty_st_le32(DEFAULT_PITCH, pitch_value); /* set the default scissor register to max dimensions */ aty_st_le32(DEFAULT_SC_BOTTOM_RIGHT, (0x1FFF << 16) | 0x1FFF); /* set the drawing controls registers */ aty_st_le32(DP_GUI_MASTER_CNTL, GMC_SRC_PITCH_OFFSET_DEFAULT | GMC_DST_PITCH_OFFSET_DEFAULT | GMC_SRC_CLIP_DEFAULT | GMC_DST_CLIP_DEFAULT | GMC_BRUSH_SOLIDCOLOR | (depth_to_dst(par->crtc.depth) << 8) | GMC_SRC_DSTCOLOR | GMC_BYTE_ORDER_MSB_TO_LSB | GMC_DP_CONVERSION_TEMP_6500 | ROP3_PATCOPY | GMC_DP_SRC_RECT | GMC_3D_FCN_EN_CLR | GMC_DST_CLR_CMP_FCN_CLEAR | GMC_AUX_CLIP_CLEAR | GMC_WRITE_MASK_SET); wait_for_fifo(8, par); /* clear the line drawing registers */ aty_st_le32(DST_BRES_ERR, 0); aty_st_le32(DST_BRES_INC, 0); aty_st_le32(DST_BRES_DEC, 0); /* set brush color registers */ aty_st_le32(DP_BRUSH_FRGD_CLR, 0xFFFFFFFF); /* white */ aty_st_le32(DP_BRUSH_BKGD_CLR, 0x00000000); /* black */ /* set source color registers */ aty_st_le32(DP_SRC_FRGD_CLR, 0xFFFFFFFF); /* white */ aty_st_le32(DP_SRC_BKGD_CLR, 0x00000000); /* black */ /* default write mask */ aty_st_le32(DP_WRITE_MASK, 0xFFFFFFFF); /* Wait for all the writes to be completed before returning */ wait_for_idle(par); } /* convert depth values to their register representation */ static u32 depth_to_dst(u32 depth) { if (depth <= 8) return DST_8BPP; else if (depth <= 15) return DST_15BPP; else if (depth == 16) return DST_16BPP; else if (depth <= 24) return DST_24BPP; else if (depth <= 32) return DST_32BPP; return -EINVAL; } /* * PLL informations retreival */ #ifndef __sparc__ static void __iomem * __devinit aty128_map_ROM(const struct aty128fb_par *par, struct pci_dev *dev) { u16 dptr; u8 rom_type; void __iomem *bios; size_t rom_size; /* Fix from ATI for problem with Rage128 hardware not leaving ROM enabled */ unsigned int temp; temp = aty_ld_le32(RAGE128_MPP_TB_CONFIG); temp &= 0x00ffffffu; temp |= 0x04 << 24; aty_st_le32(RAGE128_MPP_TB_CONFIG, temp); temp = aty_ld_le32(RAGE128_MPP_TB_CONFIG); bios = pci_map_rom(dev, &rom_size); if (!bios) { printk(KERN_ERR "aty128fb: ROM failed to map\n"); return NULL; } /* Very simple test to make sure it appeared */ if (BIOS_IN16(0) != 0xaa55) { printk(KERN_DEBUG "aty128fb: Invalid ROM signature %x should " " be 0xaa55\n", BIOS_IN16(0)); goto failed; } /* Look for the PCI data to check the ROM type */ dptr = BIOS_IN16(0x18); /* Check the PCI data signature. If it's wrong, we still assume a normal x86 ROM * for now, until I've verified this works everywhere. The goal here is more * to phase out Open Firmware images. * * Currently, we only look at the first PCI data, we could iteratre and deal with * them all, and we should use fb_bios_start relative to start of image and not * relative start of ROM, but so far, I never found a dual-image ATI card * * typedef struct { * u32 signature; + 0x00 * u16 vendor; + 0x04 * u16 device; + 0x06 * u16 reserved_1; + 0x08 * u16 dlen; + 0x0a * u8 drevision; + 0x0c * u8 class_hi; + 0x0d * u16 class_lo; + 0x0e * u16 ilen; + 0x10 * u16 irevision; + 0x12 * u8 type; + 0x14 * u8 indicator; + 0x15 * u16 reserved_2; + 0x16 * } pci_data_t; */ if (BIOS_IN32(dptr) != (('R' << 24) | ('I' << 16) | ('C' << 8) | 'P')) { printk(KERN_WARNING "aty128fb: PCI DATA signature in ROM incorrect: %08x\n", BIOS_IN32(dptr)); goto anyway; } rom_type = BIOS_IN8(dptr + 0x14); switch(rom_type) { case 0: printk(KERN_INFO "aty128fb: Found Intel x86 BIOS ROM Image\n"); break; case 1: printk(KERN_INFO "aty128fb: Found Open Firmware ROM Image\n"); goto failed; case 2: printk(KERN_INFO "aty128fb: Found HP PA-RISC ROM Image\n"); goto failed; default: printk(KERN_INFO "aty128fb: Found unknown type %d ROM Image\n", rom_type); goto failed; } anyway: return bios; failed: pci_unmap_rom(dev, bios); return NULL; } static void __devinit aty128_get_pllinfo(struct aty128fb_par *par, unsigned char __iomem *bios) { unsigned int bios_hdr; unsigned int bios_pll; bios_hdr = BIOS_IN16(0x48); bios_pll = BIOS_IN16(bios_hdr + 0x30); par->constants.ppll_max = BIOS_IN32(bios_pll + 0x16); par->constants.ppll_min = BIOS_IN32(bios_pll + 0x12); par->constants.xclk = BIOS_IN16(bios_pll + 0x08); par->constants.ref_divider = BIOS_IN16(bios_pll + 0x10); par->constants.ref_clk = BIOS_IN16(bios_pll + 0x0e); DBG("ppll_max %d ppll_min %d xclk %d ref_divider %d ref clock %d\n", par->constants.ppll_max, par->constants.ppll_min, par->constants.xclk, par->constants.ref_divider, par->constants.ref_clk); } #ifdef CONFIG_X86 static void __iomem * __devinit aty128_find_mem_vbios(struct aty128fb_par *par) { /* I simplified this code as we used to miss the signatures in * a lot of case. It's now closer to XFree, we just don't check * for signatures at all... Something better will have to be done * if we end up having conflicts */ u32 segstart; unsigned char __iomem *rom_base = NULL; for (segstart=0x000c0000; segstart<0x000f0000; segstart+=0x00001000) { rom_base = ioremap(segstart, 0x10000); if (rom_base == NULL) return NULL; if (readb(rom_base) == 0x55 && readb(rom_base + 1) == 0xaa) break; iounmap(rom_base); rom_base = NULL; } return rom_base; } #endif #endif /* ndef(__sparc__) */ /* fill in known card constants if pll_block is not available */ static void __devinit aty128_timings(struct aty128fb_par *par) { #ifdef CONFIG_PPC_OF /* instead of a table lookup, assume OF has properly * setup the PLL registers and use their values * to set the XCLK values and reference divider values */ u32 x_mpll_ref_fb_div; u32 xclk_cntl; u32 Nx, M; unsigned PostDivSet[] = { 0, 1, 2, 4, 8, 3, 6, 12 }; #endif if (!par->constants.ref_clk) par->constants.ref_clk = 2950; #ifdef CONFIG_PPC_OF x_mpll_ref_fb_div = aty_ld_pll(X_MPLL_REF_FB_DIV); xclk_cntl = aty_ld_pll(XCLK_CNTL) & 0x7; Nx = (x_mpll_ref_fb_div & 0x00ff00) >> 8; M = x_mpll_ref_fb_div & 0x0000ff; par->constants.xclk = round_div((2 * Nx * par->constants.ref_clk), (M * PostDivSet[xclk_cntl])); par->constants.ref_divider = aty_ld_pll(PPLL_REF_DIV) & PPLL_REF_DIV_MASK; #endif if (!par->constants.ref_divider) { par->constants.ref_divider = 0x3b; aty_st_pll(X_MPLL_REF_FB_DIV, 0x004c4c1e); aty_pll_writeupdate(par); } aty_st_pll(PPLL_REF_DIV, par->constants.ref_divider); aty_pll_writeupdate(par); /* from documentation */ if (!par->constants.ppll_min) par->constants.ppll_min = 12500; if (!par->constants.ppll_max) par->constants.ppll_max = 25000; /* 23000 on some cards? */ if (!par->constants.xclk) par->constants.xclk = 0x1d4d; /* same as mclk */ par->constants.fifo_width = 128; par->constants.fifo_depth = 32; switch (aty_ld_le32(MEM_CNTL) & 0x3) { case 0: par->mem = &sdr_128; break; case 1: par->mem = &sdr_sgram; break; case 2: par->mem = &ddr_sgram; break; default: par->mem = &sdr_sgram; } } /* * CRTC programming */ /* Program the CRTC registers */ static void aty128_set_crtc(const struct aty128_crtc *crtc, const struct aty128fb_par *par) { aty_st_le32(CRTC_GEN_CNTL, crtc->gen_cntl); aty_st_le32(CRTC_H_TOTAL_DISP, crtc->h_total); aty_st_le32(CRTC_H_SYNC_STRT_WID, crtc->h_sync_strt_wid); aty_st_le32(CRTC_V_TOTAL_DISP, crtc->v_total); aty_st_le32(CRTC_V_SYNC_STRT_WID, crtc->v_sync_strt_wid); aty_st_le32(CRTC_PITCH, crtc->pitch); aty_st_le32(CRTC_OFFSET, crtc->offset); aty_st_le32(CRTC_OFFSET_CNTL, crtc->offset_cntl); /* Disable ATOMIC updating. Is this the right place? */ aty_st_pll(PPLL_CNTL, aty_ld_pll(PPLL_CNTL) & ~(0x00030000)); } static int aty128_var_to_crtc(const struct fb_var_screeninfo *var, struct aty128_crtc *crtc, const struct aty128fb_par *par) { u32 xres, yres, vxres, vyres, xoffset, yoffset, bpp, dst; u32 left, right, upper, lower, hslen, vslen, sync, vmode; u32 h_total, h_disp, h_sync_strt, h_sync_wid, h_sync_pol; u32 v_total, v_disp, v_sync_strt, v_sync_wid, v_sync_pol, c_sync; u32 depth, bytpp; u8 mode_bytpp[7] = { 0, 0, 1, 2, 2, 3, 4 }; /* input */ xres = var->xres; yres = var->yres; vxres = var->xres_virtual; vyres = var->yres_virtual; xoffset = var->xoffset; yoffset = var->yoffset; bpp = var->bits_per_pixel; left = var->left_margin; right = var->right_margin; upper = var->upper_margin; lower = var->lower_margin; hslen = var->hsync_len; vslen = var->vsync_len; sync = var->sync; vmode = var->vmode; if (bpp != 16) depth = bpp; else depth = (var->green.length == 6) ? 16 : 15; /* check for mode eligibility * accept only non interlaced modes */ if ((vmode & FB_VMODE_MASK) != FB_VMODE_NONINTERLACED) return -EINVAL; /* convert (and round up) and validate */ xres = (xres + 7) & ~7; xoffset = (xoffset + 7) & ~7; if (vxres < xres + xoffset) vxres = xres + xoffset; if (vyres < yres + yoffset) vyres = yres + yoffset; /* convert depth into ATI register depth */ dst = depth_to_dst(depth); if (dst == -EINVAL) { printk(KERN_ERR "aty128fb: Invalid depth or RGBA\n"); return -EINVAL; } /* convert register depth to bytes per pixel */ bytpp = mode_bytpp[dst]; /* make sure there is enough video ram for the mode */ if ((u32)(vxres * vyres * bytpp) > par->vram_size) { printk(KERN_ERR "aty128fb: Not enough memory for mode\n"); return -EINVAL; } h_disp = (xres >> 3) - 1; h_total = (((xres + right + hslen + left) >> 3) - 1) & 0xFFFFL; v_disp = yres - 1; v_total = (yres + upper + vslen + lower - 1) & 0xFFFFL; /* check to make sure h_total and v_total are in range */ if (((h_total >> 3) - 1) > 0x1ff || (v_total - 1) > 0x7FF) { printk(KERN_ERR "aty128fb: invalid width ranges\n"); return -EINVAL; } h_sync_wid = (hslen + 7) >> 3; if (h_sync_wid == 0) h_sync_wid = 1; else if (h_sync_wid > 0x3f) /* 0x3f = max hwidth */ h_sync_wid = 0x3f; h_sync_strt = (h_disp << 3) + right; v_sync_wid = vslen; if (v_sync_wid == 0) v_sync_wid = 1; else if (v_sync_wid > 0x1f) /* 0x1f = max vwidth */ v_sync_wid = 0x1f; v_sync_strt = v_disp + lower; h_sync_pol = sync & FB_SYNC_HOR_HIGH_ACT ? 0 : 1; v_sync_pol = sync & FB_SYNC_VERT_HIGH_ACT ? 0 : 1; c_sync = sync & FB_SYNC_COMP_HIGH_ACT ? (1 << 4) : 0; crtc->gen_cntl = 0x3000000L | c_sync | (dst << 8); crtc->h_total = h_total | (h_disp << 16); crtc->v_total = v_total | (v_disp << 16); crtc->h_sync_strt_wid = h_sync_strt | (h_sync_wid << 16) | (h_sync_pol << 23); crtc->v_sync_strt_wid = v_sync_strt | (v_sync_wid << 16) | (v_sync_pol << 23); crtc->pitch = vxres >> 3; crtc->offset = 0; if ((var->activate & FB_ACTIVATE_MASK) == FB_ACTIVATE_NOW) crtc->offset_cntl = 0x00010000; else crtc->offset_cntl = 0; crtc->vxres = vxres; crtc->vyres = vyres; crtc->xoffset = xoffset; crtc->yoffset = yoffset; crtc->depth = depth; crtc->bpp = bpp; return 0; } static int aty128_pix_width_to_var(int pix_width, struct fb_var_screeninfo *var) { /* fill in pixel info */ var->red.msb_right = 0; var->green.msb_right = 0; var->blue.offset = 0; var->blue.msb_right = 0; var->transp.offset = 0; var->transp.length = 0; var->transp.msb_right = 0; switch (pix_width) { case CRTC_PIX_WIDTH_8BPP: var->bits_per_pixel = 8; var->red.offset = 0; var->red.length = 8; var->green.offset = 0; var->green.length = 8; var->blue.length = 8; break; case CRTC_PIX_WIDTH_15BPP: var->bits_per_pixel = 16; var->red.offset = 10; var->red.length = 5; var->green.offset = 5; var->green.length = 5; var->blue.length = 5; break; case CRTC_PIX_WIDTH_16BPP: var->bits_per_pixel = 16; var->red.offset = 11; var->red.length = 5; var->green.offset = 5; var->green.length = 6; var->blue.length = 5; break; case CRTC_PIX_WIDTH_24BPP: var->bits_per_pixel = 24; var->red.offset = 16; var->red.length = 8; var->green.offset = 8; var->green.length = 8; var->blue.length = 8; break; case CRTC_PIX_WIDTH_32BPP: var->bits_per_pixel = 32; var->red.offset = 16; var->red.length = 8; var->green.offset = 8; var->green.length = 8; var->blue.length = 8; var->transp.offset = 24; var->transp.length = 8; break; default: printk(KERN_ERR "aty128fb: Invalid pixel width\n"); return -EINVAL; } return 0; } static int aty128_crtc_to_var(const struct aty128_crtc *crtc, struct fb_var_screeninfo *var) { u32 xres, yres, left, right, upper, lower, hslen, vslen, sync; u32 h_total, h_disp, h_sync_strt, h_sync_dly, h_sync_wid, h_sync_pol; u32 v_total, v_disp, v_sync_strt, v_sync_wid, v_sync_pol, c_sync; u32 pix_width; /* fun with masking */ h_total = crtc->h_total & 0x1ff; h_disp = (crtc->h_total >> 16) & 0xff; h_sync_strt = (crtc->h_sync_strt_wid >> 3) & 0x1ff; h_sync_dly = crtc->h_sync_strt_wid & 0x7; h_sync_wid = (crtc->h_sync_strt_wid >> 16) & 0x3f; h_sync_pol = (crtc->h_sync_strt_wid >> 23) & 0x1; v_total = crtc->v_total & 0x7ff; v_disp = (crtc->v_total >> 16) & 0x7ff; v_sync_strt = crtc->v_sync_strt_wid & 0x7ff; v_sync_wid = (crtc->v_sync_strt_wid >> 16) & 0x1f; v_sync_pol = (crtc->v_sync_strt_wid >> 23) & 0x1; c_sync = crtc->gen_cntl & CRTC_CSYNC_EN ? 1 : 0; pix_width = crtc->gen_cntl & CRTC_PIX_WIDTH_MASK; /* do conversions */ xres = (h_disp + 1) << 3; yres = v_disp + 1; left = ((h_total - h_sync_strt - h_sync_wid) << 3) - h_sync_dly; right = ((h_sync_strt - h_disp) << 3) + h_sync_dly; hslen = h_sync_wid << 3; upper = v_total - v_sync_strt - v_sync_wid; lower = v_sync_strt - v_disp; vslen = v_sync_wid; sync = (h_sync_pol ? 0 : FB_SYNC_HOR_HIGH_ACT) | (v_sync_pol ? 0 : FB_SYNC_VERT_HIGH_ACT) | (c_sync ? FB_SYNC_COMP_HIGH_ACT : 0); aty128_pix_width_to_var(pix_width, var); var->xres = xres; var->yres = yres; var->xres_virtual = crtc->vxres; var->yres_virtual = crtc->vyres; var->xoffset = crtc->xoffset; var->yoffset = crtc->yoffset; var->left_margin = left; var->right_margin = right; var->upper_margin = upper; var->lower_margin = lower; var->hsync_len = hslen; var->vsync_len = vslen; var->sync = sync; var->vmode = FB_VMODE_NONINTERLACED; return 0; } static void aty128_set_crt_enable(struct aty128fb_par *par, int on) { if (on) { aty_st_le32(CRTC_EXT_CNTL, aty_ld_le32(CRTC_EXT_CNTL) | CRT_CRTC_ON); aty_st_le32(DAC_CNTL, (aty_ld_le32(DAC_CNTL) | DAC_PALETTE2_SNOOP_EN)); } else aty_st_le32(CRTC_EXT_CNTL, aty_ld_le32(CRTC_EXT_CNTL) & ~CRT_CRTC_ON); } static void aty128_set_lcd_enable(struct aty128fb_par *par, int on) { u32 reg; #ifdef CONFIG_FB_ATY128_BACKLIGHT struct fb_info *info = pci_get_drvdata(par->pdev); #endif if (on) { reg = aty_ld_le32(LVDS_GEN_CNTL); reg |= LVDS_ON | LVDS_EN | LVDS_BLON | LVDS_DIGION; reg &= ~LVDS_DISPLAY_DIS; aty_st_le32(LVDS_GEN_CNTL, reg); #ifdef CONFIG_FB_ATY128_BACKLIGHT aty128_bl_set_power(info, FB_BLANK_UNBLANK); #endif } else { #ifdef CONFIG_FB_ATY128_BACKLIGHT aty128_bl_set_power(info, FB_BLANK_POWERDOWN); #endif reg = aty_ld_le32(LVDS_GEN_CNTL); reg |= LVDS_DISPLAY_DIS; aty_st_le32(LVDS_GEN_CNTL, reg); mdelay(100); reg &= ~(LVDS_ON /*| LVDS_EN*/); aty_st_le32(LVDS_GEN_CNTL, reg); } } static void aty128_set_pll(struct aty128_pll *pll, const struct aty128fb_par *par) { u32 div3; unsigned char post_conv[] = /* register values for post dividers */ { 2, 0, 1, 4, 2, 2, 6, 2, 3, 2, 2, 2, 7 }; /* select PPLL_DIV_3 */ aty_st_le32(CLOCK_CNTL_INDEX, aty_ld_le32(CLOCK_CNTL_INDEX) | (3 << 8)); /* reset PLL */ aty_st_pll(PPLL_CNTL, aty_ld_pll(PPLL_CNTL) | PPLL_RESET | PPLL_ATOMIC_UPDATE_EN); /* write the reference divider */ aty_pll_wait_readupdate(par); aty_st_pll(PPLL_REF_DIV, par->constants.ref_divider & 0x3ff); aty_pll_writeupdate(par); div3 = aty_ld_pll(PPLL_DIV_3); div3 &= ~PPLL_FB3_DIV_MASK; div3 |= pll->feedback_divider; div3 &= ~PPLL_POST3_DIV_MASK; div3 |= post_conv[pll->post_divider] << 16; /* write feedback and post dividers */ aty_pll_wait_readupdate(par); aty_st_pll(PPLL_DIV_3, div3); aty_pll_writeupdate(par); aty_pll_wait_readupdate(par); aty_st_pll(HTOTAL_CNTL, 0); /* no horiz crtc adjustment */ aty_pll_writeupdate(par); /* clear the reset, just in case */ aty_st_pll(PPLL_CNTL, aty_ld_pll(PPLL_CNTL) & ~PPLL_RESET); } static int aty128_var_to_pll(u32 period_in_ps, struct aty128_pll *pll, const struct aty128fb_par *par) { const struct aty128_constants c = par->constants; unsigned char post_dividers[] = {1,2,4,8,3,6,12}; u32 output_freq; u32 vclk; /* in .01 MHz */ int i = 0; u32 n, d; vclk = 100000000 / period_in_ps; /* convert units to 10 kHz */ /* adjust pixel clock if necessary */ if (vclk > c.ppll_max) vclk = c.ppll_max; if (vclk * 12 < c.ppll_min) vclk = c.ppll_min/12; /* now, find an acceptable divider */ for (i = 0; i < ARRAY_SIZE(post_dividers); i++) { output_freq = post_dividers[i] * vclk; if (output_freq >= c.ppll_min && output_freq <= c.ppll_max) { pll->post_divider = post_dividers[i]; break; } } if (i == ARRAY_SIZE(post_dividers)) return -EINVAL; /* calculate feedback divider */ n = c.ref_divider * output_freq; d = c.ref_clk; pll->feedback_divider = round_div(n, d); pll->vclk = vclk; DBG("post %d feedback %d vlck %d output %d ref_divider %d " "vclk_per: %d\n", pll->post_divider, pll->feedback_divider, vclk, output_freq, c.ref_divider, period_in_ps); return 0; } static int aty128_pll_to_var(const struct aty128_pll *pll, struct fb_var_screeninfo *var) { var->pixclock = 100000000 / pll->vclk; return 0; } static void aty128_set_fifo(const struct aty128_ddafifo *dsp, const struct aty128fb_par *par) { aty_st_le32(DDA_CONFIG, dsp->dda_config); aty_st_le32(DDA_ON_OFF, dsp->dda_on_off); } static int aty128_ddafifo(struct aty128_ddafifo *dsp, const struct aty128_pll *pll, u32 depth, const struct aty128fb_par *par) { const struct aty128_meminfo *m = par->mem; u32 xclk = par->constants.xclk; u32 fifo_width = par->constants.fifo_width; u32 fifo_depth = par->constants.fifo_depth; s32 x, b, p, ron, roff; u32 n, d, bpp; /* round up to multiple of 8 */ bpp = (depth+7) & ~7; n = xclk * fifo_width; d = pll->vclk * bpp; x = round_div(n, d); ron = 4 * m->MB + 3 * ((m->Trcd - 2 > 0) ? m->Trcd - 2 : 0) + 2 * m->Trp + m->Twr + m->CL + m->Tr2w + x; DBG("x %x\n", x); b = 0; while (x) { x >>= 1; b++; } p = b + 1; ron <<= (11 - p); n <<= (11 - p); x = round_div(n, d); roff = x * (fifo_depth - 4); if ((ron + m->Rloop) >= roff) { printk(KERN_ERR "aty128fb: Mode out of range!\n"); return -EINVAL; } DBG("p: %x rloop: %x x: %x ron: %x roff: %x\n", p, m->Rloop, x, ron, roff); dsp->dda_config = p << 16 | m->Rloop << 20 | x; dsp->dda_on_off = ron << 16 | roff; return 0; } /* * This actually sets the video mode. */ static int aty128fb_set_par(struct fb_info *info) { struct aty128fb_par *par = info->par; u32 config; int err; if ((err = aty128_decode_var(&info->var, par)) != 0) return err; if (par->blitter_may_be_busy) wait_for_idle(par); /* clear all registers that may interfere with mode setting */ aty_st_le32(OVR_CLR, 0); aty_st_le32(OVR_WID_LEFT_RIGHT, 0); aty_st_le32(OVR_WID_TOP_BOTTOM, 0); aty_st_le32(OV0_SCALE_CNTL, 0); aty_st_le32(MPP_TB_CONFIG, 0); aty_st_le32(MPP_GP_CONFIG, 0); aty_st_le32(SUBPIC_CNTL, 0); aty_st_le32(VIPH_CONTROL, 0); aty_st_le32(I2C_CNTL_1, 0); /* turn off i2c */ aty_st_le32(GEN_INT_CNTL, 0); /* turn off interrupts */ aty_st_le32(CAP0_TRIG_CNTL, 0); aty_st_le32(CAP1_TRIG_CNTL, 0); aty_st_8(CRTC_EXT_CNTL + 1, 4); /* turn video off */ aty128_set_crtc(&par->crtc, par); aty128_set_pll(&par->pll, par); aty128_set_fifo(&par->fifo_reg, par); config = aty_ld_le32(CNFG_CNTL) & ~3; #if defined(__BIG_ENDIAN) if (par->crtc.bpp == 32) config |= 2; /* make aperture do 32 bit swapping */ else if (par->crtc.bpp == 16) config |= 1; /* make aperture do 16 bit swapping */ #endif aty_st_le32(CNFG_CNTL, config); aty_st_8(CRTC_EXT_CNTL + 1, 0); /* turn the video back on */ info->fix.line_length = (par->crtc.vxres * par->crtc.bpp) >> 3; info->fix.visual = par->crtc.bpp == 8 ? FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_DIRECTCOLOR; if (par->chip_gen == rage_M3) { aty128_set_crt_enable(par, par->crt_on); aty128_set_lcd_enable(par, par->lcd_on); } if (par->accel_flags & FB_ACCELF_TEXT) aty128_init_engine(par); #ifdef CONFIG_BOOTX_TEXT btext_update_display(info->fix.smem_start, (((par->crtc.h_total>>16) & 0xff)+1)*8, ((par->crtc.v_total>>16) & 0x7ff)+1, par->crtc.bpp, par->crtc.vxres*par->crtc.bpp/8); #endif /* CONFIG_BOOTX_TEXT */ return 0; } /* * encode/decode the User Defined Part of the Display */ static int aty128_decode_var(struct fb_var_screeninfo *var, struct aty128fb_par *par) { int err; struct aty128_crtc crtc; struct aty128_pll pll; struct aty128_ddafifo fifo_reg; if ((err = aty128_var_to_crtc(var, &crtc, par))) return err; if ((err = aty128_var_to_pll(var->pixclock, &pll, par))) return err; if ((err = aty128_ddafifo(&fifo_reg, &pll, crtc.depth, par))) return err; par->crtc = crtc; par->pll = pll; par->fifo_reg = fifo_reg; par->accel_flags = var->accel_flags; return 0; } static int aty128_encode_var(struct fb_var_screeninfo *var, const struct aty128fb_par *par) { int err; if ((err = aty128_crtc_to_var(&par->crtc, var))) return err; if ((err = aty128_pll_to_var(&par->pll, var))) return err; var->nonstd = 0; var->activate = 0; var->height = -1; var->width = -1; var->accel_flags = par->accel_flags; return 0; } static int aty128fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info) { struct aty128fb_par par; int err; par = *(struct aty128fb_par *)info->par; if ((err = aty128_decode_var(var, &par)) != 0) return err; aty128_encode_var(var, &par); return 0; } /* * Pan or Wrap the Display */ static int aty128fb_pan_display(struct fb_var_screeninfo *var, struct fb_info *fb) { struct aty128fb_par *par = fb->par; u32 xoffset, yoffset; u32 offset; u32 xres, yres; xres = (((par->crtc.h_total >> 16) & 0xff) + 1) << 3; yres = ((par->crtc.v_total >> 16) & 0x7ff) + 1; xoffset = (var->xoffset +7) & ~7; yoffset = var->yoffset; if (xoffset+xres > par->crtc.vxres || yoffset+yres > par->crtc.vyres) return -EINVAL; par->crtc.xoffset = xoffset; par->crtc.yoffset = yoffset; offset = ((yoffset * par->crtc.vxres + xoffset)*(par->crtc.bpp >> 3)) & ~7; if (par->crtc.bpp == 24) offset += 8 * (offset % 3); /* Must be multiple of 8 and 3 */ aty_st_le32(CRTC_OFFSET, offset); return 0; } /* * Helper function to store a single palette register */ static void aty128_st_pal(u_int regno, u_int red, u_int green, u_int blue, struct aty128fb_par *par) { if (par->chip_gen == rage_M3) { #if 0 /* Note: For now, on M3, we set palette on both heads, which may * be useless. Can someone with a M3 check this ? * * This code would still be useful if using the second CRTC to * do mirroring */ aty_st_le32(DAC_CNTL, aty_ld_le32(DAC_CNTL) | DAC_PALETTE_ACCESS_CNTL); aty_st_8(PALETTE_INDEX, regno); aty_st_le32(PALETTE_DATA, (red<<16)|(green<<8)|blue); #endif aty_st_le32(DAC_CNTL, aty_ld_le32(DAC_CNTL) & ~DAC_PALETTE_ACCESS_CNTL); } aty_st_8(PALETTE_INDEX, regno); aty_st_le32(PALETTE_DATA, (red<<16)|(green<<8)|blue); } static int aty128fb_sync(struct fb_info *info) { struct aty128fb_par *par = info->par; if (par->blitter_may_be_busy) wait_for_idle(par); return 0; } #ifndef MODULE static int __devinit aty128fb_setup(char *options) { char *this_opt; if (!options || !*options) return 0; while ((this_opt = strsep(&options, ",")) != NULL) { if (!strncmp(this_opt, "lcd:", 4)) { default_lcd_on = simple_strtoul(this_opt+4, NULL, 0); continue; } else if (!strncmp(this_opt, "crt:", 4)) { default_crt_on = simple_strtoul(this_opt+4, NULL, 0); continue; } else if (!strncmp(this_opt, "backlight:", 10)) { backlight = simple_strtoul(this_opt+10, NULL, 0); continue; } #ifdef CONFIG_MTRR if(!strncmp(this_opt, "nomtrr", 6)) { mtrr = 0; continue; } #endif #ifdef CONFIG_PPC_PMAC /* vmode and cmode deprecated */ if (!strncmp(this_opt, "vmode:", 6)) { unsigned int vmode = simple_strtoul(this_opt+6, NULL, 0); if (vmode > 0 && vmode <= VMODE_MAX) default_vmode = vmode; continue; } else if (!strncmp(this_opt, "cmode:", 6)) { unsigned int cmode = simple_strtoul(this_opt+6, NULL, 0); switch (cmode) { case 0: case 8: default_cmode = CMODE_8; break; case 15: case 16: default_cmode = CMODE_16; break; case 24: case 32: default_cmode = CMODE_32; break; } continue; } #endif /* CONFIG_PPC_PMAC */ mode_option = this_opt; } return 0; } #endif /* MODULE */ /* Backlight */ #ifdef CONFIG_FB_ATY128_BACKLIGHT #define MAX_LEVEL 0xFF static int aty128_bl_get_level_brightness(struct aty128fb_par *par, int level) { struct fb_info *info = pci_get_drvdata(par->pdev); int atylevel; /* Get and convert the value */ /* No locking of bl_curve since we read a single value */ atylevel = MAX_LEVEL - (info->bl_curve[level] * FB_BACKLIGHT_MAX / MAX_LEVEL); if (atylevel < 0) atylevel = 0; else if (atylevel > MAX_LEVEL) atylevel = MAX_LEVEL; return atylevel; } /* We turn off the LCD completely instead of just dimming the backlight. * This provides greater power saving and the display is useless without * backlight anyway */ #define BACKLIGHT_LVDS_OFF /* That one prevents proper CRT output with LCD off */ #undef BACKLIGHT_DAC_OFF static int aty128_bl_update_status(struct backlight_device *bd) { struct aty128fb_par *par = bl_get_data(bd); unsigned int reg = aty_ld_le32(LVDS_GEN_CNTL); int level; if (bd->props.power != FB_BLANK_UNBLANK || bd->props.fb_blank != FB_BLANK_UNBLANK || !par->lcd_on) level = 0; else level = bd->props.brightness; reg |= LVDS_BL_MOD_EN | LVDS_BLON; if (level > 0) { reg |= LVDS_DIGION; if (!(reg & LVDS_ON)) { reg &= ~LVDS_BLON; aty_st_le32(LVDS_GEN_CNTL, reg); aty_ld_le32(LVDS_GEN_CNTL); mdelay(10); reg |= LVDS_BLON; aty_st_le32(LVDS_GEN_CNTL, reg); } reg &= ~LVDS_BL_MOD_LEVEL_MASK; reg |= (aty128_bl_get_level_brightness(par, level) << LVDS_BL_MOD_LEVEL_SHIFT); #ifdef BACKLIGHT_LVDS_OFF reg |= LVDS_ON | LVDS_EN; reg &= ~LVDS_DISPLAY_DIS; #endif aty_st_le32(LVDS_GEN_CNTL, reg); #ifdef BACKLIGHT_DAC_OFF aty_st_le32(DAC_CNTL, aty_ld_le32(DAC_CNTL) & (~DAC_PDWN)); #endif } else { reg &= ~LVDS_BL_MOD_LEVEL_MASK; reg |= (aty128_bl_get_level_brightness(par, 0) << LVDS_BL_MOD_LEVEL_SHIFT); #ifdef BACKLIGHT_LVDS_OFF reg |= LVDS_DISPLAY_DIS; aty_st_le32(LVDS_GEN_CNTL, reg); aty_ld_le32(LVDS_GEN_CNTL); udelay(10); reg &= ~(LVDS_ON | LVDS_EN | LVDS_BLON | LVDS_DIGION); #endif aty_st_le32(LVDS_GEN_CNTL, reg); #ifdef BACKLIGHT_DAC_OFF aty_st_le32(DAC_CNTL, aty_ld_le32(DAC_CNTL) | DAC_PDWN); #endif } return 0; } static int aty128_bl_get_brightness(struct backlight_device *bd) { return bd->props.brightness; } static struct backlight_ops aty128_bl_data = { .get_brightness = aty128_bl_get_brightness, .update_status = aty128_bl_update_status, }; static void aty128_bl_set_power(struct fb_info *info, int power) { if (info->bl_dev) { info->bl_dev->props.power = power; backlight_update_status(info->bl_dev); } } static void aty128_bl_init(struct aty128fb_par *par) { struct backlight_properties props; struct fb_info *info = pci_get_drvdata(par->pdev); struct backlight_device *bd; char name[12]; /* Could be extended to Rage128Pro LVDS output too */ if (par->chip_gen != rage_M3) return; #ifdef CONFIG_PMAC_BACKLIGHT if (!pmac_has_backlight_type("ati")) return; #endif snprintf(name, sizeof(name), "aty128bl%d", info->node); memset(&props, 0, sizeof(struct backlight_properties)); props.max_brightness = FB_BACKLIGHT_LEVELS - 1; bd = backlight_device_register(name, info->dev, par, &aty128_bl_data, &props); if (IS_ERR(bd)) { info->bl_dev = NULL; printk(KERN_WARNING "aty128: Backlight registration failed\n"); goto error; } info->bl_dev = bd; fb_bl_default_curve(info, 0, 63 * FB_BACKLIGHT_MAX / MAX_LEVEL, 219 * FB_BACKLIGHT_MAX / MAX_LEVEL); bd->props.brightness = bd->props.max_brightness; bd->props.power = FB_BLANK_UNBLANK; backlight_update_status(bd); printk("aty128: Backlight initialized (%s)\n", name); return; error: return; } static void aty128_bl_exit(struct backlight_device *bd) { backlight_device_unregister(bd); printk("aty128: Backlight unloaded\n"); } #endif /* CONFIG_FB_ATY128_BACKLIGHT */ /* * Initialisation */ #ifdef CONFIG_PPC_PMAC__disabled static void aty128_early_resume(void *data) { struct aty128fb_par *par = data; if (try_acquire_console_sem()) return; pci_restore_state(par->pdev); aty128_do_resume(par->pdev); release_console_sem(); } #endif /* CONFIG_PPC_PMAC */ static int __devinit aty128_init(struct pci_dev *pdev, const struct pci_device_id *ent) { struct fb_info *info = pci_get_drvdata(pdev); struct aty128fb_par *par = info->par; struct fb_var_screeninfo var; char video_card[50]; u8 chip_rev; u32 dac; /* Get the chip revision */ chip_rev = (aty_ld_le32(CNFG_CNTL) >> 16) & 0x1F; strcpy(video_card, "Rage128 XX "); video_card[8] = ent->device >> 8; video_card[9] = ent->device & 0xFF; /* range check to make sure */ if (ent->driver_data < ARRAY_SIZE(r128_family)) strlcat(video_card, r128_family[ent->driver_data], sizeof(video_card)); printk(KERN_INFO "aty128fb: %s [chip rev 0x%x] ", video_card, chip_rev); if (par->vram_size % (1024 * 1024) == 0) printk("%dM %s\n", par->vram_size / (1024*1024), par->mem->name); else printk("%dk %s\n", par->vram_size / 1024, par->mem->name); par->chip_gen = ent->driver_data; /* fill in info */ info->fbops = &aty128fb_ops; info->flags = FBINFO_FLAG_DEFAULT; par->lcd_on = default_lcd_on; par->crt_on = default_crt_on; var = default_var; #ifdef CONFIG_PPC_PMAC if (machine_is(powermac)) { /* Indicate sleep capability */ if (par->chip_gen == rage_M3) { pmac_call_feature(PMAC_FTR_DEVICE_CAN_WAKE, NULL, 0, 1); #if 0 /* Disable the early video resume hack for now as it's causing problems, among * others we now rely on the PCI core restoring the config space for us, which * isn't the case with that hack, and that code path causes various things to * be called with interrupts off while they shouldn't. I'm leaving the code in * as it can be useful for debugging purposes */ pmac_set_early_video_resume(aty128_early_resume, par); #endif } /* Find default mode */ if (mode_option) { if (!mac_find_mode(&var, info, mode_option, 8)) var = default_var; } else { if (default_vmode <= 0 || default_vmode > VMODE_MAX) default_vmode = VMODE_1024_768_60; /* iMacs need that resolution * PowerMac2,1 first r128 iMacs * PowerMac2,2 summer 2000 iMacs * PowerMac4,1 january 2001 iMacs "flower power" */ if (of_machine_is_compatible("PowerMac2,1") || of_machine_is_compatible("PowerMac2,2") || of_machine_is_compatible("PowerMac4,1")) default_vmode = VMODE_1024_768_75; /* iBook SE */ if (of_machine_is_compatible("PowerBook2,2")) default_vmode = VMODE_800_600_60; /* PowerBook Firewire (Pismo), iBook Dual USB */ if (of_machine_is_compatible("PowerBook3,1") || of_machine_is_compatible("PowerBook4,1")) default_vmode = VMODE_1024_768_60; /* PowerBook Titanium */ if (of_machine_is_compatible("PowerBook3,2")) default_vmode = VMODE_1152_768_60; if (default_cmode > 16) default_cmode = CMODE_32; else if (default_cmode > 8) default_cmode = CMODE_16; else default_cmode = CMODE_8; if (mac_vmode_to_var(default_vmode, default_cmode, &var)) var = default_var; } } else #endif /* CONFIG_PPC_PMAC */ { if (mode_option) if (fb_find_mode(&var, info, mode_option, NULL, 0, &defaultmode, 8) == 0) var = default_var; } var.accel_flags &= ~FB_ACCELF_TEXT; // var.accel_flags |= FB_ACCELF_TEXT;/* FIXME Will add accel later */ if (aty128fb_check_var(&var, info)) { printk(KERN_ERR "aty128fb: Cannot set default mode.\n"); return 0; } /* setup the DAC the way we like it */ dac = aty_ld_le32(DAC_CNTL); dac |= (DAC_8BIT_EN | DAC_RANGE_CNTL); dac |= DAC_MASK; if (par->chip_gen == rage_M3) dac |= DAC_PALETTE2_SNOOP_EN; aty_st_le32(DAC_CNTL, dac); /* turn off bus mastering, just in case */ aty_st_le32(BUS_CNTL, aty_ld_le32(BUS_CNTL) | BUS_MASTER_DIS); info->var = var; fb_alloc_cmap(&info->cmap, 256, 0); var.activate = FB_ACTIVATE_NOW; aty128_init_engine(par); par->pm_reg = pci_find_capability(pdev, PCI_CAP_ID_PM); par->pdev = pdev; par->asleep = 0; par->lock_blank = 0; #ifdef CONFIG_FB_ATY128_BACKLIGHT if (backlight) aty128_bl_init(par); #endif if (register_framebuffer(info) < 0) return 0; printk(KERN_INFO "fb%d: %s frame buffer device on %s\n", info->node, info->fix.id, video_card); return 1; /* success! */ } #ifdef CONFIG_PCI /* register a card ++ajoshi */ static int __devinit aty128_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { unsigned long fb_addr, reg_addr; struct aty128fb_par *par; struct fb_info *info; int err; #ifndef __sparc__ void __iomem *bios = NULL; #endif /* Enable device in PCI config */ if ((err = pci_enable_device(pdev))) { printk(KERN_ERR "aty128fb: Cannot enable PCI device: %d\n", err); return -ENODEV; } fb_addr = pci_resource_start(pdev, 0); if (!request_mem_region(fb_addr, pci_resource_len(pdev, 0), "aty128fb FB")) { printk(KERN_ERR "aty128fb: cannot reserve frame " "buffer memory\n"); return -ENODEV; } reg_addr = pci_resource_start(pdev, 2); if (!request_mem_region(reg_addr, pci_resource_len(pdev, 2), "aty128fb MMIO")) { printk(KERN_ERR "aty128fb: cannot reserve MMIO region\n"); goto err_free_fb; } /* We have the resources. Now virtualize them */ info = framebuffer_alloc(sizeof(struct aty128fb_par), &pdev->dev); if (info == NULL) { printk(KERN_ERR "aty128fb: can't alloc fb_info_aty128\n"); goto err_free_mmio; } par = info->par; info->pseudo_palette = par->pseudo_palette; /* Virtualize mmio region */ info->fix.mmio_start = reg_addr; par->regbase = pci_ioremap_bar(pdev, 2); if (!par->regbase) goto err_free_info; /* Grab memory size from the card */ // How does this relate to the resource length from the PCI hardware? par->vram_size = aty_ld_le32(CNFG_MEMSIZE) & 0x03FFFFFF; /* Virtualize the framebuffer */ info->screen_base = ioremap(fb_addr, par->vram_size); if (!info->screen_base) goto err_unmap_out; /* Set up info->fix */ info->fix = aty128fb_fix; info->fix.smem_start = fb_addr; info->fix.smem_len = par->vram_size; info->fix.mmio_start = reg_addr; /* If we can't test scratch registers, something is seriously wrong */ if (!register_test(par)) { printk(KERN_ERR "aty128fb: Can't write to video register!\n"); goto err_out; } #ifndef __sparc__ bios = aty128_map_ROM(par, pdev); #ifdef CONFIG_X86 if (bios == NULL) bios = aty128_find_mem_vbios(par); #endif if (bios == NULL) printk(KERN_INFO "aty128fb: BIOS not located, guessing timings.\n"); else { printk(KERN_INFO "aty128fb: Rage128 BIOS located\n"); aty128_get_pllinfo(par, bios); pci_unmap_rom(pdev, bios); } #endif /* __sparc__ */ aty128_timings(par); pci_set_drvdata(pdev, info); if (!aty128_init(pdev, ent)) goto err_out; #ifdef CONFIG_MTRR if (mtrr) { par->mtrr.vram = mtrr_add(info->fix.smem_start, par->vram_size, MTRR_TYPE_WRCOMB, 1); par->mtrr.vram_valid = 1; /* let there be speed */ printk(KERN_INFO "aty128fb: Rage128 MTRR set to ON\n"); } #endif /* CONFIG_MTRR */ return 0; err_out: iounmap(info->screen_base); err_unmap_out: iounmap(par->regbase); err_free_info: framebuffer_release(info); err_free_mmio: release_mem_region(pci_resource_start(pdev, 2), pci_resource_len(pdev, 2)); err_free_fb: release_mem_region(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0)); return -ENODEV; } static void __devexit aty128_remove(struct pci_dev *pdev) { struct fb_info *info = pci_get_drvdata(pdev); struct aty128fb_par *par; if (!info) return; par = info->par; unregister_framebuffer(info); #ifdef CONFIG_FB_ATY128_BACKLIGHT aty128_bl_exit(info->bl_dev); #endif #ifdef CONFIG_MTRR if (par->mtrr.vram_valid) mtrr_del(par->mtrr.vram, info->fix.smem_start, par->vram_size); #endif /* CONFIG_MTRR */ iounmap(par->regbase); iounmap(info->screen_base); release_mem_region(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0)); release_mem_region(pci_resource_start(pdev, 2), pci_resource_len(pdev, 2)); framebuffer_release(info); } #endif /* CONFIG_PCI */ /* * Blank the display. */ static int aty128fb_blank(int blank, struct fb_info *fb) { struct aty128fb_par *par = fb->par; u8 state; if (par->lock_blank || par->asleep) return 0; switch (blank) { case FB_BLANK_NORMAL: state = 4; break; case FB_BLANK_VSYNC_SUSPEND: state = 6; break; case FB_BLANK_HSYNC_SUSPEND: state = 5; break; case FB_BLANK_POWERDOWN: state = 7; break; case FB_BLANK_UNBLANK: default: state = 0; break; } aty_st_8(CRTC_EXT_CNTL+1, state); if (par->chip_gen == rage_M3) { aty128_set_crt_enable(par, par->crt_on && !blank); aty128_set_lcd_enable(par, par->lcd_on && !blank); } return 0; } /* * Set a single color register. The values supplied are already * rounded down to the hardware's capabilities (according to the * entries in the var structure). Return != 0 for invalid regno. */ static int aty128fb_setcolreg(u_int regno, u_int red, u_int green, u_int blue, u_int transp, struct fb_info *info) { struct aty128fb_par *par = info->par; if (regno > 255 || (par->crtc.depth == 16 && regno > 63) || (par->crtc.depth == 15 && regno > 31)) return 1; red >>= 8; green >>= 8; blue >>= 8; if (regno < 16) { int i; u32 *pal = info->pseudo_palette; switch (par->crtc.depth) { case 15: pal[regno] = (regno << 10) | (regno << 5) | regno; break; case 16: pal[regno] = (regno << 11) | (regno << 6) | regno; break; case 24: pal[regno] = (regno << 16) | (regno << 8) | regno; break; case 32: i = (regno << 8) | regno; pal[regno] = (i << 16) | i; break; } } if (par->crtc.depth == 16 && regno > 0) { /* * With the 5-6-5 split of bits for RGB at 16 bits/pixel, we * have 32 slots for R and B values but 64 slots for G values. * Thus the R and B values go in one slot but the G value * goes in a different slot, and we have to avoid disturbing * the other fields in the slots we touch. */ par->green[regno] = green; if (regno < 32) { par->red[regno] = red; par->blue[regno] = blue; aty128_st_pal(regno * 8, red, par->green[regno*2], blue, par); } red = par->red[regno/2]; blue = par->blue[regno/2]; regno <<= 2; } else if (par->crtc.bpp == 16) regno <<= 3; aty128_st_pal(regno, red, green, blue, par); return 0; } #define ATY_MIRROR_LCD_ON 0x00000001 #define ATY_MIRROR_CRT_ON 0x00000002 /* out param: u32* backlight value: 0 to 15 */ #define FBIO_ATY128_GET_MIRROR _IOR('@', 1, __u32) /* in param: u32* backlight value: 0 to 15 */ #define FBIO_ATY128_SET_MIRROR _IOW('@', 2, __u32) static int aty128fb_ioctl(struct fb_info *info, u_int cmd, u_long arg) { struct aty128fb_par *par = info->par; u32 value; int rc; switch (cmd) { case FBIO_ATY128_SET_MIRROR: if (par->chip_gen != rage_M3) return -EINVAL; rc = get_user(value, (__u32 __user *)arg); if (rc) return rc; par->lcd_on = (value & 0x01) != 0; par->crt_on = (value & 0x02) != 0; if (!par->crt_on && !par->lcd_on) par->lcd_on = 1; aty128_set_crt_enable(par, par->crt_on); aty128_set_lcd_enable(par, par->lcd_on); return 0; case FBIO_ATY128_GET_MIRROR: if (par->chip_gen != rage_M3) return -EINVAL; value = (par->crt_on << 1) | par->lcd_on; return put_user(value, (__u32 __user *)arg); } return -EINVAL; } #if 0 /* * Accelerated functions */ static inline void aty128_rectcopy(int srcx, int srcy, int dstx, int dsty, u_int width, u_int height, struct fb_info_aty128 *par) { u32 save_dp_datatype, save_dp_cntl, dstval; if (!width || !height) return; dstval = depth_to_dst(par->current_par.crtc.depth); if (dstval == DST_24BPP) { srcx *= 3; dstx *= 3; width *= 3; } else if (dstval == -EINVAL) { printk("aty128fb: invalid depth or RGBA\n"); return; } wait_for_fifo(2, par); save_dp_datatype = aty_ld_le32(DP_DATATYPE); save_dp_cntl = aty_ld_le32(DP_CNTL); wait_for_fifo(6, par); aty_st_le32(SRC_Y_X, (srcy << 16) | srcx); aty_st_le32(DP_MIX, ROP3_SRCCOPY | DP_SRC_RECT); aty_st_le32(DP_CNTL, DST_X_LEFT_TO_RIGHT | DST_Y_TOP_TO_BOTTOM); aty_st_le32(DP_DATATYPE, save_dp_datatype | dstval | SRC_DSTCOLOR); aty_st_le32(DST_Y_X, (dsty << 16) | dstx); aty_st_le32(DST_HEIGHT_WIDTH, (height << 16) | width); par->blitter_may_be_busy = 1; wait_for_fifo(2, par); aty_st_le32(DP_DATATYPE, save_dp_datatype); aty_st_le32(DP_CNTL, save_dp_cntl); } /* * Text mode accelerated functions */ static void fbcon_aty128_bmove(struct display *p, int sy, int sx, int dy, int dx, int height, int width) { sx *= fontwidth(p); sy *= fontheight(p); dx *= fontwidth(p); dy *= fontheight(p); width *= fontwidth(p); height *= fontheight(p); aty128_rectcopy(sx, sy, dx, dy, width, height, (struct fb_info_aty128 *)p->fb_info); } #endif /* 0 */ static void aty128_set_suspend(struct aty128fb_par *par, int suspend) { u32 pmgt; struct pci_dev *pdev = par->pdev; if (!par->pm_reg) return; /* Set the chip into the appropriate suspend mode (we use D2, * D3 would require a complete re-initialisation of the chip, * including PCI config registers, clocks, AGP configuration, ...) * * For resume, the core will have already brought us back to D0 */ if (suspend) { /* Make sure CRTC2 is reset. Remove that the day we decide to * actually use CRTC2 and replace it with real code for disabling * the CRTC2 output during sleep */ aty_st_le32(CRTC2_GEN_CNTL, aty_ld_le32(CRTC2_GEN_CNTL) & ~(CRTC2_EN)); /* Set the power management mode to be PCI based */ /* Use this magic value for now */ pmgt = 0x0c005407; aty_st_pll(POWER_MANAGEMENT, pmgt); (void)aty_ld_pll(POWER_MANAGEMENT); aty_st_le32(BUS_CNTL1, 0x00000010); aty_st_le32(MEM_POWER_MISC, 0x0c830000); mdelay(100); /* Switch PCI power management to D2 */ pci_set_power_state(pdev, PCI_D2); } } static int aty128_pci_suspend(struct pci_dev *pdev, pm_message_t state) { struct fb_info *info = pci_get_drvdata(pdev); struct aty128fb_par *par = info->par; /* Because we may change PCI D state ourselves, we need to * first save the config space content so the core can * restore it properly on resume. */ pci_save_state(pdev); /* We don't do anything but D2, for now we return 0, but * we may want to change that. How do we know if the BIOS * can properly take care of D3 ? Also, with swsusp, we * know we'll be rebooted, ... */ #ifndef CONFIG_PPC_PMAC /* HACK ALERT ! Once I find a proper way to say to each driver * individually what will happen with it's PCI slot, I'll change * that. On laptops, the AGP slot is just unclocked, so D2 is * expected, while on desktops, the card is powered off */ return 0; #endif /* CONFIG_PPC_PMAC */ if (state.event == pdev->dev.power.power_state.event) return 0; printk(KERN_DEBUG "aty128fb: suspending...\n"); acquire_console_sem(); fb_set_suspend(info, 1); /* Make sure engine is reset */ wait_for_idle(par); aty128_reset_engine(par); wait_for_idle(par); /* Blank display and LCD */ aty128fb_blank(FB_BLANK_POWERDOWN, info); /* Sleep */ par->asleep = 1; par->lock_blank = 1; #ifdef CONFIG_PPC_PMAC /* On powermac, we have hooks to properly suspend/resume AGP now, * use them here. We'll ultimately need some generic support here, * but the generic code isn't quite ready for that yet */ pmac_suspend_agp_for_card(pdev); #endif /* CONFIG_PPC_PMAC */ /* We need a way to make sure the fbdev layer will _not_ touch the * framebuffer before we put the chip to suspend state. On 2.4, I * used dummy fb ops, 2.5 need proper support for this at the * fbdev level */ if (state.event != PM_EVENT_ON) aty128_set_suspend(par, 1); release_console_sem(); pdev->dev.power.power_state = state; return 0; } static int aty128_do_resume(struct pci_dev *pdev) { struct fb_info *info = pci_get_drvdata(pdev); struct aty128fb_par *par = info->par; if (pdev->dev.power.power_state.event == PM_EVENT_ON) return 0; /* PCI state will have been restored by the core, so * we should be in D0 now with our config space fully * restored */ /* Wakeup chip */ aty128_set_suspend(par, 0); par->asleep = 0; /* Restore display & engine */ aty128_reset_engine(par); wait_for_idle(par); aty128fb_set_par(info); fb_pan_display(info, &info->var); fb_set_cmap(&info->cmap, info); /* Refresh */ fb_set_suspend(info, 0); /* Unblank */ par->lock_blank = 0; aty128fb_blank(0, info); #ifdef CONFIG_PPC_PMAC /* On powermac, we have hooks to properly suspend/resume AGP now, * use them here. We'll ultimately need some generic support here, * but the generic code isn't quite ready for that yet */ pmac_resume_agp_for_card(pdev); #endif /* CONFIG_PPC_PMAC */ pdev->dev.power.power_state = PMSG_ON; printk(KERN_DEBUG "aty128fb: resumed !\n"); return 0; } static int aty128_pci_resume(struct pci_dev *pdev) { int rc; acquire_console_sem(); rc = aty128_do_resume(pdev); release_console_sem(); return rc; } static int __devinit aty128fb_init(void) { #ifndef MODULE char *option = NULL; if (fb_get_options("aty128fb", &option)) return -ENODEV; aty128fb_setup(option); #endif return pci_register_driver(&aty128fb_driver); } static void __exit aty128fb_exit(void) { pci_unregister_driver(&aty128fb_driver); } module_init(aty128fb_init); module_exit(aty128fb_exit); MODULE_AUTHOR("(c)1999-2003 Brad Douglas <brad@neruo.com>"); MODULE_DESCRIPTION("FBDev driver for ATI Rage128 / Pro cards"); MODULE_LICENSE("GPL"); module_param(mode_option, charp, 0); MODULE_PARM_DESC(mode_option, "Specify resolution as \"<xres>x<yres>[-<bpp>][@<refresh>]\" "); #ifdef CONFIG_MTRR module_param_named(nomtrr, mtrr, invbool, 0); MODULE_PARM_DESC(nomtrr, "bool: Disable MTRR support (0 or 1=disabled) (default=0)"); #endif
gpl-2.0
pocketbook-free/kernel_622
drivers/media/dvb/dvb-usb/dvb-usb-i2c.c
1455
1061
/* dvb-usb-i2c.c is part of the DVB USB library. * * Copyright (C) 2004-6 Patrick Boettcher (patrick.boettcher@desy.de) * see dvb-usb-init.c for copyright information. * * This file contains functions for (de-)initializing an I2C adapter. */ #include "dvb-usb-common.h" int dvb_usb_i2c_init(struct dvb_usb_device *d) { int ret = 0; if (!(d->props.caps & DVB_USB_IS_AN_I2C_ADAPTER)) return 0; if (d->props.i2c_algo == NULL) { err("no i2c algorithm specified"); return -EINVAL; } strlcpy(d->i2c_adap.name, d->desc->name, sizeof(d->i2c_adap.name)); d->i2c_adap.class = I2C_CLASS_TV_DIGITAL, d->i2c_adap.algo = d->props.i2c_algo; d->i2c_adap.algo_data = NULL; d->i2c_adap.dev.parent = &d->udev->dev; i2c_set_adapdata(&d->i2c_adap, d); if ((ret = i2c_add_adapter(&d->i2c_adap)) < 0) err("could not add i2c adapter"); d->state |= DVB_USB_STATE_I2C; return ret; } int dvb_usb_i2c_exit(struct dvb_usb_device *d) { if (d->state & DVB_USB_STATE_I2C) i2c_del_adapter(&d->i2c_adap); d->state &= ~DVB_USB_STATE_I2C; return 0; }
gpl-2.0
yangyang1989/linux-2.6.32.2-mini2440
drivers/pnp/card.c
1711
10228
/* * card.c - contains functions for managing groups of PnP devices * * Copyright 2002 Adam Belay <ambx1@neo.rr.com> */ #include <linux/module.h> #include <linux/ctype.h> #include <linux/slab.h> #include <linux/pnp.h> #include <linux/dma-mapping.h> #include "base.h" LIST_HEAD(pnp_cards); static LIST_HEAD(pnp_card_drivers); static const struct pnp_card_device_id *match_card(struct pnp_card_driver *drv, struct pnp_card *card) { const struct pnp_card_device_id *drv_id = drv->id_table; while (*drv_id->id) { if (compare_pnp_id(card->id, drv_id->id)) { int i = 0; for (;;) { int found; struct pnp_dev *dev; if (i == PNP_MAX_DEVICES || !*drv_id->devs[i].id) return drv_id; found = 0; card_for_each_dev(card, dev) { if (compare_pnp_id(dev->id, drv_id->devs[i].id)) { found = 1; break; } } if (!found) break; i++; } } drv_id++; } return NULL; } static void card_remove(struct pnp_dev *dev) { dev->card_link = NULL; } static void card_remove_first(struct pnp_dev *dev) { struct pnp_card_driver *drv = to_pnp_card_driver(dev->driver); if (!dev->card || !drv) return; if (drv->remove) drv->remove(dev->card_link); drv->link.remove = &card_remove; kfree(dev->card_link); card_remove(dev); } static int card_probe(struct pnp_card *card, struct pnp_card_driver *drv) { const struct pnp_card_device_id *id; struct pnp_card_link *clink; struct pnp_dev *dev; if (!drv->probe) return 0; id = match_card(drv, card); if (!id) return 0; clink = pnp_alloc(sizeof(*clink)); if (!clink) return 0; clink->card = card; clink->driver = drv; clink->pm_state = PMSG_ON; if (drv->probe(clink, id) >= 0) return 1; /* Recovery */ card_for_each_dev(card, dev) { if (dev->card_link == clink) pnp_release_card_device(dev); } kfree(clink); return 0; } /** * pnp_add_card_id - adds an EISA id to the specified card * @id: pointer to a pnp_id structure * @card: pointer to the desired card */ static struct pnp_id *pnp_add_card_id(struct pnp_card *card, char *id) { struct pnp_id *dev_id, *ptr; dev_id = kzalloc(sizeof(struct pnp_id), GFP_KERNEL); if (!dev_id) return NULL; dev_id->id[0] = id[0]; dev_id->id[1] = id[1]; dev_id->id[2] = id[2]; dev_id->id[3] = tolower(id[3]); dev_id->id[4] = tolower(id[4]); dev_id->id[5] = tolower(id[5]); dev_id->id[6] = tolower(id[6]); dev_id->id[7] = '\0'; dev_id->next = NULL; ptr = card->id; while (ptr && ptr->next) ptr = ptr->next; if (ptr) ptr->next = dev_id; else card->id = dev_id; return dev_id; } static void pnp_free_card_ids(struct pnp_card *card) { struct pnp_id *id; struct pnp_id *next; id = card->id; while (id) { next = id->next; kfree(id); id = next; } } static void pnp_release_card(struct device *dmdev) { struct pnp_card *card = to_pnp_card(dmdev); pnp_free_card_ids(card); kfree(card); } struct pnp_card *pnp_alloc_card(struct pnp_protocol *protocol, int id, char *pnpid) { struct pnp_card *card; struct pnp_id *dev_id; card = kzalloc(sizeof(struct pnp_card), GFP_KERNEL); if (!card) return NULL; card->protocol = protocol; card->number = id; card->dev.parent = &card->protocol->dev; dev_set_name(&card->dev, "%02x:%02x", card->protocol->number, card->number); card->dev.coherent_dma_mask = DMA_BIT_MASK(24); card->dev.dma_mask = &card->dev.coherent_dma_mask; dev_id = pnp_add_card_id(card, pnpid); if (!dev_id) { kfree(card); return NULL; } return card; } static ssize_t pnp_show_card_name(struct device *dmdev, struct device_attribute *attr, char *buf) { char *str = buf; struct pnp_card *card = to_pnp_card(dmdev); str += sprintf(str, "%s\n", card->name); return (str - buf); } static DEVICE_ATTR(name, S_IRUGO, pnp_show_card_name, NULL); static ssize_t pnp_show_card_ids(struct device *dmdev, struct device_attribute *attr, char *buf) { char *str = buf; struct pnp_card *card = to_pnp_card(dmdev); struct pnp_id *pos = card->id; while (pos) { str += sprintf(str, "%s\n", pos->id); pos = pos->next; } return (str - buf); } static DEVICE_ATTR(card_id, S_IRUGO, pnp_show_card_ids, NULL); static int pnp_interface_attach_card(struct pnp_card *card) { int rc = device_create_file(&card->dev, &dev_attr_name); if (rc) return rc; rc = device_create_file(&card->dev, &dev_attr_card_id); if (rc) goto err_name; return 0; err_name: device_remove_file(&card->dev, &dev_attr_name); return rc; } /** * pnp_add_card - adds a PnP card to the PnP Layer * @card: pointer to the card to add */ int pnp_add_card(struct pnp_card *card) { int error; struct list_head *pos, *temp; card->dev.bus = NULL; card->dev.release = &pnp_release_card; error = device_register(&card->dev); if (error) { dev_err(&card->dev, "could not register (err=%d)\n", error); return error; } pnp_interface_attach_card(card); spin_lock(&pnp_lock); list_add_tail(&card->global_list, &pnp_cards); list_add_tail(&card->protocol_list, &card->protocol->cards); spin_unlock(&pnp_lock); /* we wait until now to add devices in order to ensure the drivers * will be able to use all of the related devices on the card * without waiting an unreasonable length of time */ list_for_each(pos, &card->devices) { struct pnp_dev *dev = card_to_pnp_dev(pos); __pnp_add_device(dev); } /* match with card drivers */ list_for_each_safe(pos, temp, &pnp_card_drivers) { struct pnp_card_driver *drv = list_entry(pos, struct pnp_card_driver, global_list); card_probe(card, drv); } return 0; } /** * pnp_remove_card - removes a PnP card from the PnP Layer * @card: pointer to the card to remove */ void pnp_remove_card(struct pnp_card *card) { struct list_head *pos, *temp; device_unregister(&card->dev); spin_lock(&pnp_lock); list_del(&card->global_list); list_del(&card->protocol_list); spin_unlock(&pnp_lock); list_for_each_safe(pos, temp, &card->devices) { struct pnp_dev *dev = card_to_pnp_dev(pos); pnp_remove_card_device(dev); } } /** * pnp_add_card_device - adds a device to the specified card * @card: pointer to the card to add to * @dev: pointer to the device to add */ int pnp_add_card_device(struct pnp_card *card, struct pnp_dev *dev) { dev->dev.parent = &card->dev; dev->card_link = NULL; dev_set_name(&dev->dev, "%02x:%02x.%02x", dev->protocol->number, card->number, dev->number); spin_lock(&pnp_lock); dev->card = card; list_add_tail(&dev->card_list, &card->devices); spin_unlock(&pnp_lock); return 0; } /** * pnp_remove_card_device- removes a device from the specified card * @dev: pointer to the device to remove */ void pnp_remove_card_device(struct pnp_dev *dev) { spin_lock(&pnp_lock); dev->card = NULL; list_del(&dev->card_list); spin_unlock(&pnp_lock); __pnp_remove_device(dev); } /** * pnp_request_card_device - Searches for a PnP device under the specified card * @clink: pointer to the card link, cannot be NULL * @id: pointer to a PnP ID structure that explains the rules for finding the device * @from: Starting place to search from. If NULL it will start from the begining. */ struct pnp_dev *pnp_request_card_device(struct pnp_card_link *clink, const char *id, struct pnp_dev *from) { struct list_head *pos; struct pnp_dev *dev; struct pnp_card_driver *drv; struct pnp_card *card; if (!clink || !id) return NULL; card = clink->card; drv = clink->driver; if (!from) { pos = card->devices.next; } else { if (from->card != card) return NULL; pos = from->card_list.next; } while (pos != &card->devices) { dev = card_to_pnp_dev(pos); if ((!dev->card_link) && compare_pnp_id(dev->id, id)) goto found; pos = pos->next; } return NULL; found: dev->card_link = clink; dev->dev.driver = &drv->link.driver; if (pnp_bus_type.probe(&dev->dev)) goto err_out; if (device_bind_driver(&dev->dev)) goto err_out; return dev; err_out: dev->dev.driver = NULL; dev->card_link = NULL; return NULL; } /** * pnp_release_card_device - call this when the driver no longer needs the device * @dev: pointer to the PnP device stucture */ void pnp_release_card_device(struct pnp_dev *dev) { struct pnp_card_driver *drv = dev->card_link->driver; drv->link.remove = &card_remove; device_release_driver(&dev->dev); drv->link.remove = &card_remove_first; } /* * suspend/resume callbacks */ static int card_suspend(struct pnp_dev *dev, pm_message_t state) { struct pnp_card_link *link = dev->card_link; if (link->pm_state.event == state.event) return 0; link->pm_state = state; return link->driver->suspend(link, state); } static int card_resume(struct pnp_dev *dev) { struct pnp_card_link *link = dev->card_link; if (link->pm_state.event == PM_EVENT_ON) return 0; link->pm_state = PMSG_ON; link->driver->resume(link); return 0; } /** * pnp_register_card_driver - registers a PnP card driver with the PnP Layer * @drv: pointer to the driver to register */ int pnp_register_card_driver(struct pnp_card_driver *drv) { int error; struct list_head *pos, *temp; drv->link.name = drv->name; drv->link.id_table = NULL; /* this will disable auto matching */ drv->link.flags = drv->flags; drv->link.probe = NULL; drv->link.remove = &card_remove_first; drv->link.suspend = drv->suspend ? card_suspend : NULL; drv->link.resume = drv->resume ? card_resume : NULL; error = pnp_register_driver(&drv->link); if (error < 0) return error; spin_lock(&pnp_lock); list_add_tail(&drv->global_list, &pnp_card_drivers); spin_unlock(&pnp_lock); list_for_each_safe(pos, temp, &pnp_cards) { struct pnp_card *card = list_entry(pos, struct pnp_card, global_list); card_probe(card, drv); } return 0; } /** * pnp_unregister_card_driver - unregisters a PnP card driver from the PnP Layer * @drv: pointer to the driver to unregister */ void pnp_unregister_card_driver(struct pnp_card_driver *drv) { spin_lock(&pnp_lock); list_del(&drv->global_list); spin_unlock(&pnp_lock); pnp_unregister_driver(&drv->link); } EXPORT_SYMBOL(pnp_request_card_device); EXPORT_SYMBOL(pnp_release_card_device); EXPORT_SYMBOL(pnp_register_card_driver); EXPORT_SYMBOL(pnp_unregister_card_driver);
gpl-2.0
ruleless/linux
drivers/input/tablet/hanwang.c
1967
12263
/* * USB Hanwang tablet support * * Copyright (c) 2010 Xing Wei <weixing@hanwang.com.cn> * */ /* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/types.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/usb/input.h> #define DRIVER_AUTHOR "Xing Wei <weixing@hanwang.com.cn>" #define DRIVER_DESC "USB Hanwang tablet driver" #define DRIVER_LICENSE "GPL" MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE(DRIVER_LICENSE); #define USB_VENDOR_ID_HANWANG 0x0b57 #define HANWANG_TABLET_INT_CLASS 0x0003 #define HANWANG_TABLET_INT_SUB_CLASS 0x0001 #define HANWANG_TABLET_INT_PROTOCOL 0x0002 #define ART_MASTER_PKGLEN_MAX 10 /* device IDs */ #define STYLUS_DEVICE_ID 0x02 #define TOUCH_DEVICE_ID 0x03 #define CURSOR_DEVICE_ID 0x06 #define ERASER_DEVICE_ID 0x0A #define PAD_DEVICE_ID 0x0F /* match vendor and interface info */ #define HANWANG_TABLET_DEVICE(vend, cl, sc, pr) \ .match_flags = USB_DEVICE_ID_MATCH_VENDOR \ | USB_DEVICE_ID_MATCH_INT_INFO, \ .idVendor = (vend), \ .bInterfaceClass = (cl), \ .bInterfaceSubClass = (sc), \ .bInterfaceProtocol = (pr) enum hanwang_tablet_type { HANWANG_ART_MASTER_III, HANWANG_ART_MASTER_HD, HANWANG_ART_MASTER_II, }; struct hanwang { unsigned char *data; dma_addr_t data_dma; struct input_dev *dev; struct usb_device *usbdev; struct urb *irq; const struct hanwang_features *features; unsigned int current_tool; unsigned int current_id; char name[64]; char phys[32]; }; struct hanwang_features { unsigned short pid; char *name; enum hanwang_tablet_type type; int pkg_len; int max_x; int max_y; int max_tilt_x; int max_tilt_y; int max_pressure; }; static const struct hanwang_features features_array[] = { { 0x8528, "Hanwang Art Master III 0906", HANWANG_ART_MASTER_III, ART_MASTER_PKGLEN_MAX, 0x5757, 0x3692, 0x3f, 0x7f, 2048 }, { 0x8529, "Hanwang Art Master III 0604", HANWANG_ART_MASTER_III, ART_MASTER_PKGLEN_MAX, 0x3d84, 0x2672, 0x3f, 0x7f, 2048 }, { 0x852a, "Hanwang Art Master III 1308", HANWANG_ART_MASTER_III, ART_MASTER_PKGLEN_MAX, 0x7f00, 0x4f60, 0x3f, 0x7f, 2048 }, { 0x8401, "Hanwang Art Master HD 5012", HANWANG_ART_MASTER_HD, ART_MASTER_PKGLEN_MAX, 0x678e, 0x4150, 0x3f, 0x7f, 1024 }, { 0x8503, "Hanwang Art Master II", HANWANG_ART_MASTER_II, ART_MASTER_PKGLEN_MAX, 0x27de, 0x1cfe, 0x3f, 0x7f, 1024 }, }; static const int hw_eventtypes[] = { EV_KEY, EV_ABS, EV_MSC, }; static const int hw_absevents[] = { ABS_X, ABS_Y, ABS_TILT_X, ABS_TILT_Y, ABS_WHEEL, ABS_RX, ABS_RY, ABS_PRESSURE, ABS_MISC, }; static const int hw_btnevents[] = { BTN_STYLUS, BTN_STYLUS2, BTN_TOOL_PEN, BTN_TOOL_RUBBER, BTN_TOOL_MOUSE, BTN_TOOL_FINGER, BTN_0, BTN_1, BTN_2, BTN_3, BTN_4, BTN_5, BTN_6, BTN_7, BTN_8, }; static const int hw_mscevents[] = { MSC_SERIAL, }; static void hanwang_parse_packet(struct hanwang *hanwang) { unsigned char *data = hanwang->data; struct input_dev *input_dev = hanwang->dev; struct usb_device *dev = hanwang->usbdev; enum hanwang_tablet_type type = hanwang->features->type; int i; u16 p; if (type == HANWANG_ART_MASTER_II) { hanwang->current_tool = BTN_TOOL_PEN; hanwang->current_id = STYLUS_DEVICE_ID; } switch (data[0]) { case 0x02: /* data packet */ switch (data[1]) { case 0x80: /* tool prox out */ if (type != HANWANG_ART_MASTER_II) { hanwang->current_id = 0; input_report_key(input_dev, hanwang->current_tool, 0); } break; case 0x00: /* artmaster ii pen leave */ if (type == HANWANG_ART_MASTER_II) { hanwang->current_id = 0; input_report_key(input_dev, hanwang->current_tool, 0); } break; case 0xc2: /* first time tool prox in */ switch (data[3] & 0xf0) { case 0x20: /* art_master III */ case 0x30: /* art_master_HD */ hanwang->current_id = STYLUS_DEVICE_ID; hanwang->current_tool = BTN_TOOL_PEN; input_report_key(input_dev, BTN_TOOL_PEN, 1); break; case 0xa0: /* art_master III */ case 0xb0: /* art_master_HD */ hanwang->current_id = ERASER_DEVICE_ID; hanwang->current_tool = BTN_TOOL_RUBBER; input_report_key(input_dev, BTN_TOOL_RUBBER, 1); break; default: hanwang->current_id = 0; dev_dbg(&dev->dev, "unknown tablet tool %02x\n", data[0]); break; } break; default: /* tool data packet */ switch (type) { case HANWANG_ART_MASTER_III: p = (data[6] << 3) | ((data[7] & 0xc0) >> 5) | (data[1] & 0x01); break; case HANWANG_ART_MASTER_HD: case HANWANG_ART_MASTER_II: p = (data[7] >> 6) | (data[6] << 2); break; default: p = 0; break; } input_report_abs(input_dev, ABS_X, be16_to_cpup((__be16 *)&data[2])); input_report_abs(input_dev, ABS_Y, be16_to_cpup((__be16 *)&data[4])); input_report_abs(input_dev, ABS_PRESSURE, p); input_report_abs(input_dev, ABS_TILT_X, data[7] & 0x3f); input_report_abs(input_dev, ABS_TILT_Y, data[8] & 0x7f); input_report_key(input_dev, BTN_STYLUS, data[1] & 0x02); if (type != HANWANG_ART_MASTER_II) input_report_key(input_dev, BTN_STYLUS2, data[1] & 0x04); else input_report_key(input_dev, BTN_TOOL_PEN, 1); break; } input_report_abs(input_dev, ABS_MISC, hanwang->current_id); input_event(input_dev, EV_MSC, MSC_SERIAL, hanwang->features->pid); break; case 0x0c: /* roll wheel */ hanwang->current_id = PAD_DEVICE_ID; switch (type) { case HANWANG_ART_MASTER_III: input_report_key(input_dev, BTN_TOOL_FINGER, data[1] || data[2] || data[3]); input_report_abs(input_dev, ABS_WHEEL, data[1]); input_report_key(input_dev, BTN_0, data[2]); for (i = 0; i < 8; i++) input_report_key(input_dev, BTN_1 + i, data[3] & (1 << i)); break; case HANWANG_ART_MASTER_HD: input_report_key(input_dev, BTN_TOOL_FINGER, data[1] || data[2] || data[3] || data[4] || data[5] || data[6]); input_report_abs(input_dev, ABS_RX, ((data[1] & 0x1f) << 8) | data[2]); input_report_abs(input_dev, ABS_RY, ((data[3] & 0x1f) << 8) | data[4]); input_report_key(input_dev, BTN_0, data[5] & 0x01); for (i = 0; i < 4; i++) { input_report_key(input_dev, BTN_1 + i, data[5] & (1 << i)); input_report_key(input_dev, BTN_5 + i, data[6] & (1 << i)); } break; case HANWANG_ART_MASTER_II: dev_dbg(&dev->dev, "error packet %02x\n", data[0]); return; } input_report_abs(input_dev, ABS_MISC, hanwang->current_id); input_event(input_dev, EV_MSC, MSC_SERIAL, 0xffffffff); break; default: dev_dbg(&dev->dev, "error packet %02x\n", data[0]); break; } input_sync(input_dev); } static void hanwang_irq(struct urb *urb) { struct hanwang *hanwang = urb->context; struct usb_device *dev = hanwang->usbdev; int retval; switch (urb->status) { case 0: /* success */; hanwang_parse_packet(hanwang); break; case -ECONNRESET: case -ENOENT: case -ESHUTDOWN: /* this urb is terminated, clean up */ dev_err(&dev->dev, "%s - urb shutting down with status: %d", __func__, urb->status); return; default: dev_err(&dev->dev, "%s - nonzero urb status received: %d", __func__, urb->status); break; } retval = usb_submit_urb(urb, GFP_ATOMIC); if (retval) dev_err(&dev->dev, "%s - usb_submit_urb failed with result %d", __func__, retval); } static int hanwang_open(struct input_dev *dev) { struct hanwang *hanwang = input_get_drvdata(dev); hanwang->irq->dev = hanwang->usbdev; if (usb_submit_urb(hanwang->irq, GFP_KERNEL)) return -EIO; return 0; } static void hanwang_close(struct input_dev *dev) { struct hanwang *hanwang = input_get_drvdata(dev); usb_kill_urb(hanwang->irq); } static bool get_features(struct usb_device *dev, struct hanwang *hanwang) { int i; for (i = 0; i < ARRAY_SIZE(features_array); i++) { if (le16_to_cpu(dev->descriptor.idProduct) == features_array[i].pid) { hanwang->features = &features_array[i]; return true; } } return false; } static int hanwang_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct usb_device *dev = interface_to_usbdev(intf); struct usb_endpoint_descriptor *endpoint; struct hanwang *hanwang; struct input_dev *input_dev; int error; int i; hanwang = kzalloc(sizeof(struct hanwang), GFP_KERNEL); input_dev = input_allocate_device(); if (!hanwang || !input_dev) { error = -ENOMEM; goto fail1; } if (!get_features(dev, hanwang)) { error = -ENXIO; goto fail1; } hanwang->data = usb_alloc_coherent(dev, hanwang->features->pkg_len, GFP_KERNEL, &hanwang->data_dma); if (!hanwang->data) { error = -ENOMEM; goto fail1; } hanwang->irq = usb_alloc_urb(0, GFP_KERNEL); if (!hanwang->irq) { error = -ENOMEM; goto fail2; } hanwang->usbdev = dev; hanwang->dev = input_dev; usb_make_path(dev, hanwang->phys, sizeof(hanwang->phys)); strlcat(hanwang->phys, "/input0", sizeof(hanwang->phys)); strlcpy(hanwang->name, hanwang->features->name, sizeof(hanwang->name)); input_dev->name = hanwang->name; input_dev->phys = hanwang->phys; usb_to_input_id(dev, &input_dev->id); input_dev->dev.parent = &intf->dev; input_set_drvdata(input_dev, hanwang); input_dev->open = hanwang_open; input_dev->close = hanwang_close; for (i = 0; i < ARRAY_SIZE(hw_eventtypes); ++i) __set_bit(hw_eventtypes[i], input_dev->evbit); for (i = 0; i < ARRAY_SIZE(hw_absevents); ++i) __set_bit(hw_absevents[i], input_dev->absbit); for (i = 0; i < ARRAY_SIZE(hw_btnevents); ++i) __set_bit(hw_btnevents[i], input_dev->keybit); for (i = 0; i < ARRAY_SIZE(hw_mscevents); ++i) __set_bit(hw_mscevents[i], input_dev->mscbit); input_set_abs_params(input_dev, ABS_X, 0, hanwang->features->max_x, 4, 0); input_set_abs_params(input_dev, ABS_Y, 0, hanwang->features->max_y, 4, 0); input_set_abs_params(input_dev, ABS_TILT_X, 0, hanwang->features->max_tilt_x, 0, 0); input_set_abs_params(input_dev, ABS_TILT_Y, 0, hanwang->features->max_tilt_y, 0, 0); input_set_abs_params(input_dev, ABS_PRESSURE, 0, hanwang->features->max_pressure, 0, 0); endpoint = &intf->cur_altsetting->endpoint[0].desc; usb_fill_int_urb(hanwang->irq, dev, usb_rcvintpipe(dev, endpoint->bEndpointAddress), hanwang->data, hanwang->features->pkg_len, hanwang_irq, hanwang, endpoint->bInterval); hanwang->irq->transfer_dma = hanwang->data_dma; hanwang->irq->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; error = input_register_device(hanwang->dev); if (error) goto fail3; usb_set_intfdata(intf, hanwang); return 0; fail3: usb_free_urb(hanwang->irq); fail2: usb_free_coherent(dev, hanwang->features->pkg_len, hanwang->data, hanwang->data_dma); fail1: input_free_device(input_dev); kfree(hanwang); return error; } static void hanwang_disconnect(struct usb_interface *intf) { struct hanwang *hanwang = usb_get_intfdata(intf); input_unregister_device(hanwang->dev); usb_free_urb(hanwang->irq); usb_free_coherent(interface_to_usbdev(intf), hanwang->features->pkg_len, hanwang->data, hanwang->data_dma); kfree(hanwang); usb_set_intfdata(intf, NULL); } static const struct usb_device_id hanwang_ids[] = { { HANWANG_TABLET_DEVICE(USB_VENDOR_ID_HANWANG, HANWANG_TABLET_INT_CLASS, HANWANG_TABLET_INT_SUB_CLASS, HANWANG_TABLET_INT_PROTOCOL) }, {} }; MODULE_DEVICE_TABLE(usb, hanwang_ids); static struct usb_driver hanwang_driver = { .name = "hanwang", .probe = hanwang_probe, .disconnect = hanwang_disconnect, .id_table = hanwang_ids, }; module_usb_driver(hanwang_driver);
gpl-2.0
rootfs/vzkernel
drivers/sbus/char/uctrl.c
2479
10936
/* uctrl.c: TS102 Microcontroller interface on Tadpole Sparcbook 3 * * Copyright 1999 Derrick J Brashear (shadow@dementia.org) * Copyright 2008 David S. Miller (davem@davemloft.net) */ #include <linux/module.h> #include <linux/errno.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/slab.h> #include <linux/mutex.h> #include <linux/ioport.h> #include <linux/init.h> #include <linux/miscdevice.h> #include <linux/mm.h> #include <linux/of.h> #include <linux/of_device.h> #include <asm/openprom.h> #include <asm/oplib.h> #include <asm/irq.h> #include <asm/io.h> #include <asm/pgtable.h> #define UCTRL_MINOR 174 #define DEBUG 1 #ifdef DEBUG #define dprintk(x) printk x #else #define dprintk(x) #endif struct uctrl_regs { u32 uctrl_intr; u32 uctrl_data; u32 uctrl_stat; u32 uctrl_xxx[5]; }; struct ts102_regs { u32 card_a_intr; u32 card_a_stat; u32 card_a_ctrl; u32 card_a_xxx; u32 card_b_intr; u32 card_b_stat; u32 card_b_ctrl; u32 card_b_xxx; u32 uctrl_intr; u32 uctrl_data; u32 uctrl_stat; u32 uctrl_xxx; u32 ts102_xxx[4]; }; /* Bits for uctrl_intr register */ #define UCTRL_INTR_TXE_REQ 0x01 /* transmit FIFO empty int req */ #define UCTRL_INTR_TXNF_REQ 0x02 /* transmit FIFO not full int req */ #define UCTRL_INTR_RXNE_REQ 0x04 /* receive FIFO not empty int req */ #define UCTRL_INTR_RXO_REQ 0x08 /* receive FIFO overflow int req */ #define UCTRL_INTR_TXE_MSK 0x10 /* transmit FIFO empty mask */ #define UCTRL_INTR_TXNF_MSK 0x20 /* transmit FIFO not full mask */ #define UCTRL_INTR_RXNE_MSK 0x40 /* receive FIFO not empty mask */ #define UCTRL_INTR_RXO_MSK 0x80 /* receive FIFO overflow mask */ /* Bits for uctrl_stat register */ #define UCTRL_STAT_TXE_STA 0x01 /* transmit FIFO empty status */ #define UCTRL_STAT_TXNF_STA 0x02 /* transmit FIFO not full status */ #define UCTRL_STAT_RXNE_STA 0x04 /* receive FIFO not empty status */ #define UCTRL_STAT_RXO_STA 0x08 /* receive FIFO overflow status */ static DEFINE_MUTEX(uctrl_mutex); static const char *uctrl_extstatus[16] = { "main power available", "internal battery attached", "external battery attached", "external VGA attached", "external keyboard attached", "external mouse attached", "lid down", "internal battery currently charging", "external battery currently charging", "internal battery currently discharging", "external battery currently discharging", }; /* Everything required for one transaction with the uctrl */ struct uctrl_txn { u8 opcode; u8 inbits; u8 outbits; u8 *inbuf; u8 *outbuf; }; struct uctrl_status { u8 current_temp; /* 0x07 */ u8 reset_status; /* 0x0b */ u16 event_status; /* 0x0c */ u16 error_status; /* 0x10 */ u16 external_status; /* 0x11, 0x1b */ u8 internal_charge; /* 0x18 */ u8 external_charge; /* 0x19 */ u16 control_lcd; /* 0x20 */ u8 control_bitport; /* 0x21 */ u8 speaker_volume; /* 0x23 */ u8 control_tft_brightness; /* 0x24 */ u8 control_kbd_repeat_delay; /* 0x28 */ u8 control_kbd_repeat_period; /* 0x29 */ u8 control_screen_contrast; /* 0x2F */ }; enum uctrl_opcode { READ_SERIAL_NUMBER=0x1, READ_ETHERNET_ADDRESS=0x2, READ_HARDWARE_VERSION=0x3, READ_MICROCONTROLLER_VERSION=0x4, READ_MAX_TEMPERATURE=0x5, READ_MIN_TEMPERATURE=0x6, READ_CURRENT_TEMPERATURE=0x7, READ_SYSTEM_VARIANT=0x8, READ_POWERON_CYCLES=0x9, READ_POWERON_SECONDS=0xA, READ_RESET_STATUS=0xB, READ_EVENT_STATUS=0xC, READ_REAL_TIME_CLOCK=0xD, READ_EXTERNAL_VGA_PORT=0xE, READ_MICROCONTROLLER_ROM_CHECKSUM=0xF, READ_ERROR_STATUS=0x10, READ_EXTERNAL_STATUS=0x11, READ_USER_CONFIGURATION_AREA=0x12, READ_MICROCONTROLLER_VOLTAGE=0x13, READ_INTERNAL_BATTERY_VOLTAGE=0x14, READ_DCIN_VOLTAGE=0x15, READ_HORIZONTAL_POINTER_VOLTAGE=0x16, READ_VERTICAL_POINTER_VOLTAGE=0x17, READ_INTERNAL_BATTERY_CHARGE_LEVEL=0x18, READ_EXTERNAL_BATTERY_CHARGE_LEVEL=0x19, READ_REAL_TIME_CLOCK_ALARM=0x1A, READ_EVENT_STATUS_NO_RESET=0x1B, READ_INTERNAL_KEYBOARD_LAYOUT=0x1C, READ_EXTERNAL_KEYBOARD_LAYOUT=0x1D, READ_EEPROM_STATUS=0x1E, CONTROL_LCD=0x20, CONTROL_BITPORT=0x21, SPEAKER_VOLUME=0x23, CONTROL_TFT_BRIGHTNESS=0x24, CONTROL_WATCHDOG=0x25, CONTROL_FACTORY_EEPROM_AREA=0x26, CONTROL_KBD_TIME_UNTIL_REPEAT=0x28, CONTROL_KBD_TIME_BETWEEN_REPEATS=0x29, CONTROL_TIMEZONE=0x2A, CONTROL_MARK_SPACE_RATIO=0x2B, CONTROL_DIAGNOSTIC_MODE=0x2E, CONTROL_SCREEN_CONTRAST=0x2F, RING_BELL=0x30, SET_DIAGNOSTIC_STATUS=0x32, CLEAR_KEY_COMBINATION_TABLE=0x33, PERFORM_SOFTWARE_RESET=0x34, SET_REAL_TIME_CLOCK=0x35, RECALIBRATE_POINTING_STICK=0x36, SET_BELL_FREQUENCY=0x37, SET_INTERNAL_BATTERY_CHARGE_RATE=0x39, SET_EXTERNAL_BATTERY_CHARGE_RATE=0x3A, SET_REAL_TIME_CLOCK_ALARM=0x3B, READ_EEPROM=0x40, WRITE_EEPROM=0x41, WRITE_TO_STATUS_DISPLAY=0x42, DEFINE_SPECIAL_CHARACTER=0x43, DEFINE_KEY_COMBINATION_ENTRY=0x50, DEFINE_STRING_TABLE_ENTRY=0x51, DEFINE_STATUS_SCREEN_DISPLAY=0x52, PERFORM_EMU_COMMANDS=0x64, READ_EMU_REGISTER=0x65, WRITE_EMU_REGISTER=0x66, READ_EMU_RAM=0x67, WRITE_EMU_RAM=0x68, READ_BQ_REGISTER=0x69, WRITE_BQ_REGISTER=0x6A, SET_USER_PASSWORD=0x70, VERIFY_USER_PASSWORD=0x71, GET_SYSTEM_PASSWORD_KEY=0x72, VERIFY_SYSTEM_PASSWORD=0x73, POWER_OFF=0x82, POWER_RESTART=0x83, }; static struct uctrl_driver { struct uctrl_regs __iomem *regs; int irq; int pending; struct uctrl_status status; } *global_driver; static void uctrl_get_event_status(struct uctrl_driver *); static void uctrl_get_external_status(struct uctrl_driver *); static long uctrl_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { switch (cmd) { default: return -EINVAL; } return 0; } static int uctrl_open(struct inode *inode, struct file *file) { mutex_lock(&uctrl_mutex); uctrl_get_event_status(global_driver); uctrl_get_external_status(global_driver); mutex_unlock(&uctrl_mutex); return 0; } static irqreturn_t uctrl_interrupt(int irq, void *dev_id) { return IRQ_HANDLED; } static const struct file_operations uctrl_fops = { .owner = THIS_MODULE, .llseek = no_llseek, .unlocked_ioctl = uctrl_ioctl, .open = uctrl_open, }; static struct miscdevice uctrl_dev = { UCTRL_MINOR, "uctrl", &uctrl_fops }; /* Wait for space to write, then write to it */ #define WRITEUCTLDATA(value) \ { \ unsigned int i; \ for (i = 0; i < 10000; i++) { \ if (UCTRL_STAT_TXNF_STA & sbus_readl(&driver->regs->uctrl_stat)) \ break; \ } \ dprintk(("write data 0x%02x\n", value)); \ sbus_writel(value, &driver->regs->uctrl_data); \ } /* Wait for something to read, read it, then clear the bit */ #define READUCTLDATA(value) \ { \ unsigned int i; \ value = 0; \ for (i = 0; i < 10000; i++) { \ if ((UCTRL_STAT_RXNE_STA & sbus_readl(&driver->regs->uctrl_stat)) == 0) \ break; \ udelay(1); \ } \ value = sbus_readl(&driver->regs->uctrl_data); \ dprintk(("read data 0x%02x\n", value)); \ sbus_writel(UCTRL_STAT_RXNE_STA, &driver->regs->uctrl_stat); \ } static void uctrl_do_txn(struct uctrl_driver *driver, struct uctrl_txn *txn) { int stat, incnt, outcnt, bytecnt, intr; u32 byte; stat = sbus_readl(&driver->regs->uctrl_stat); intr = sbus_readl(&driver->regs->uctrl_intr); sbus_writel(stat, &driver->regs->uctrl_stat); dprintk(("interrupt stat 0x%x int 0x%x\n", stat, intr)); incnt = txn->inbits; outcnt = txn->outbits; byte = (txn->opcode << 8); WRITEUCTLDATA(byte); bytecnt = 0; while (incnt > 0) { byte = (txn->inbuf[bytecnt] << 8); WRITEUCTLDATA(byte); incnt--; bytecnt++; } /* Get the ack */ READUCTLDATA(byte); dprintk(("ack was %x\n", (byte >> 8))); bytecnt = 0; while (outcnt > 0) { READUCTLDATA(byte); txn->outbuf[bytecnt] = (byte >> 8); dprintk(("set byte to %02x\n", byte)); outcnt--; bytecnt++; } } static void uctrl_get_event_status(struct uctrl_driver *driver) { struct uctrl_txn txn; u8 outbits[2]; txn.opcode = READ_EVENT_STATUS; txn.inbits = 0; txn.outbits = 2; txn.inbuf = NULL; txn.outbuf = outbits; uctrl_do_txn(driver, &txn); dprintk(("bytes %x %x\n", (outbits[0] & 0xff), (outbits[1] & 0xff))); driver->status.event_status = ((outbits[0] & 0xff) << 8) | (outbits[1] & 0xff); dprintk(("ev is %x\n", driver->status.event_status)); } static void uctrl_get_external_status(struct uctrl_driver *driver) { struct uctrl_txn txn; u8 outbits[2]; int i, v; txn.opcode = READ_EXTERNAL_STATUS; txn.inbits = 0; txn.outbits = 2; txn.inbuf = NULL; txn.outbuf = outbits; uctrl_do_txn(driver, &txn); dprintk(("bytes %x %x\n", (outbits[0] & 0xff), (outbits[1] & 0xff))); driver->status.external_status = ((outbits[0] * 256) + (outbits[1])); dprintk(("ex is %x\n", driver->status.external_status)); v = driver->status.external_status; for (i = 0; v != 0; i++, v >>= 1) { if (v & 1) { dprintk(("%s%s", " ", uctrl_extstatus[i])); } } dprintk(("\n")); } static int uctrl_probe(struct platform_device *op) { struct uctrl_driver *p; int err = -ENOMEM; p = kzalloc(sizeof(*p), GFP_KERNEL); if (!p) { printk(KERN_ERR "uctrl: Unable to allocate device struct.\n"); goto out; } p->regs = of_ioremap(&op->resource[0], 0, resource_size(&op->resource[0]), "uctrl"); if (!p->regs) { printk(KERN_ERR "uctrl: Unable to map registers.\n"); goto out_free; } p->irq = op->archdata.irqs[0]; err = request_irq(p->irq, uctrl_interrupt, 0, "uctrl", p); if (err) { printk(KERN_ERR "uctrl: Unable to register irq.\n"); goto out_iounmap; } err = misc_register(&uctrl_dev); if (err) { printk(KERN_ERR "uctrl: Unable to register misc device.\n"); goto out_free_irq; } sbus_writel(UCTRL_INTR_RXNE_REQ|UCTRL_INTR_RXNE_MSK, &p->regs->uctrl_intr); printk(KERN_INFO "%s: uctrl regs[0x%p] (irq %d)\n", op->dev.of_node->full_name, p->regs, p->irq); uctrl_get_event_status(p); uctrl_get_external_status(p); dev_set_drvdata(&op->dev, p); global_driver = p; out: return err; out_free_irq: free_irq(p->irq, p); out_iounmap: of_iounmap(&op->resource[0], p->regs, resource_size(&op->resource[0])); out_free: kfree(p); goto out; } static int uctrl_remove(struct platform_device *op) { struct uctrl_driver *p = dev_get_drvdata(&op->dev); if (p) { misc_deregister(&uctrl_dev); free_irq(p->irq, p); of_iounmap(&op->resource[0], p->regs, resource_size(&op->resource[0])); kfree(p); } return 0; } static const struct of_device_id uctrl_match[] = { { .name = "uctrl", }, {}, }; MODULE_DEVICE_TABLE(of, uctrl_match); static struct platform_driver uctrl_driver = { .driver = { .name = "uctrl", .owner = THIS_MODULE, .of_match_table = uctrl_match, }, .probe = uctrl_probe, .remove = uctrl_remove, }; module_platform_driver(uctrl_driver); MODULE_LICENSE("GPL");
gpl-2.0
leyarx/android_kernel_wexler_qc750
drivers/staging/vt6656/key.c
2479
29439
/* * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc. * All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * * * File: key.c * * Purpose: Implement functions for 802.11i Key management * * Author: Jerry Chen * * Date: May 29, 2003 * * Functions: * KeyvInitTable - Init Key management table * KeybGetKey - Get Key from table * KeybSetKey - Set Key to table * KeybRemoveKey - Remove Key from table * KeybGetTransmitKey - Get Transmit Key from table * * Revision History: * */ #include "tmacro.h" #include "key.h" #include "mac.h" #include "rndis.h" #include "control.h" /*--------------------- Static Definitions -------------------------*/ /*--------------------- Static Classes ----------------------------*/ /*--------------------- Static Variables --------------------------*/ static int msglevel =MSG_LEVEL_INFO; //static int msglevel =MSG_LEVEL_DEBUG; /*--------------------- Static Functions --------------------------*/ /*--------------------- Export Variables --------------------------*/ /*--------------------- Static Definitions -------------------------*/ /*--------------------- Static Classes ----------------------------*/ /*--------------------- Static Variables --------------------------*/ /*--------------------- Static Functions --------------------------*/ static void s_vCheckKeyTableValid(void *pDeviceHandler, PSKeyManagement pTable) { PSDevice pDevice = (PSDevice) pDeviceHandler; int i; WORD wLength = 0; BYTE pbyData[MAX_KEY_TABLE]; for (i=0;i<MAX_KEY_TABLE;i++) { if ((pTable->KeyTable[i].bInUse == TRUE) && (pTable->KeyTable[i].PairwiseKey.bKeyValid == FALSE) && (pTable->KeyTable[i].GroupKey[0].bKeyValid == FALSE) && (pTable->KeyTable[i].GroupKey[1].bKeyValid == FALSE) && (pTable->KeyTable[i].GroupKey[2].bKeyValid == FALSE) && (pTable->KeyTable[i].GroupKey[3].bKeyValid == FALSE) ) { pTable->KeyTable[i].bInUse = FALSE; pTable->KeyTable[i].wKeyCtl = 0; pTable->KeyTable[i].bSoftWEP = FALSE; pbyData[wLength++] = (BYTE) i; //MACvDisableKeyEntry(pDevice, i); } } if ( wLength != 0 ) { CONTROLnsRequestOut(pDevice, MESSAGE_TYPE_CLRKEYENTRY, 0, 0, wLength, pbyData ); } } /*--------------------- Export Functions --------------------------*/ /* * Description: Init Key management table * * Parameters: * In: * pTable - Pointer to Key table * Out: * none * * Return Value: none * */ void KeyvInitTable(void *pDeviceHandler, PSKeyManagement pTable) { PSDevice pDevice = (PSDevice) pDeviceHandler; int i; int jj; BYTE pbyData[MAX_KEY_TABLE+1]; spin_lock_irq(&pDevice->lock); for (i=0;i<MAX_KEY_TABLE;i++) { pTable->KeyTable[i].bInUse = FALSE; pTable->KeyTable[i].PairwiseKey.bKeyValid = FALSE; pTable->KeyTable[i].PairwiseKey.pvKeyTable = (void *)&pTable->KeyTable[i]; for (jj=0; jj < MAX_GROUP_KEY; jj++) { pTable->KeyTable[i].GroupKey[jj].bKeyValid = FALSE; pTable->KeyTable[i].GroupKey[jj].pvKeyTable = (void *) &(pTable->KeyTable[i]); } pTable->KeyTable[i].wKeyCtl = 0; pTable->KeyTable[i].dwGTKeyIndex = 0; pTable->KeyTable[i].bSoftWEP = FALSE; pbyData[i] = (BYTE) i; } pbyData[i] = (BYTE) i; CONTROLnsRequestOut(pDevice, MESSAGE_TYPE_CLRKEYENTRY, 0, 0, 11, pbyData ); spin_unlock_irq(&pDevice->lock); return; } /* * Description: Get Key from table * * Parameters: * In: * pTable - Pointer to Key table * pbyBSSID - BSSID of Key * dwKeyIndex - Key Index (0xFFFFFFFF means pairwise key) * Out: * pKey - Key return * * Return Value: TRUE if found otherwise FALSE * */ BOOL KeybGetKey(PSKeyManagement pTable, PBYTE pbyBSSID, DWORD dwKeyIndex, PSKeyItem *pKey) { int i; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"KeybGetKey() \n"); *pKey = NULL; for (i=0;i<MAX_KEY_TABLE;i++) { if ((pTable->KeyTable[i].bInUse == TRUE) && !compare_ether_addr(pTable->KeyTable[i].abyBSSID, pbyBSSID)) { if (dwKeyIndex == 0xFFFFFFFF) { if (pTable->KeyTable[i].PairwiseKey.bKeyValid == TRUE) { *pKey = &(pTable->KeyTable[i].PairwiseKey); return (TRUE); } else { return (FALSE); } } else if (dwKeyIndex < MAX_GROUP_KEY) { if (pTable->KeyTable[i].GroupKey[dwKeyIndex].bKeyValid == TRUE) { *pKey = &(pTable->KeyTable[i].GroupKey[dwKeyIndex]); return (TRUE); } else { return (FALSE); } } else { return (FALSE); } } } return (FALSE); } /* * Description: Set Key to table * * Parameters: * In: * pTable - Pointer to Key table * pbyBSSID - BSSID of Key * dwKeyIndex - Key index (reference to NDIS DDK) * uKeyLength - Key length * KeyRSC - Key RSC * pbyKey - Pointer to key * Out: * none * * Return Value: TRUE if success otherwise FALSE * */ BOOL KeybSetKey( void *pDeviceHandler, PSKeyManagement pTable, PBYTE pbyBSSID, DWORD dwKeyIndex, unsigned long uKeyLength, PQWORD pKeyRSC, PBYTE pbyKey, BYTE byKeyDecMode ) { PSDevice pDevice = (PSDevice) pDeviceHandler; int i,j; unsigned int ii; PSKeyItem pKey; unsigned int uKeyIdx; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Enter KeybSetKey: %lX\n", dwKeyIndex); j = (MAX_KEY_TABLE-1); for (i=0;i<(MAX_KEY_TABLE-1);i++) { if ((pTable->KeyTable[i].bInUse == FALSE) && (j == (MAX_KEY_TABLE-1))) { // found empty table j = i; } if ((pTable->KeyTable[i].bInUse == TRUE) && !compare_ether_addr(pTable->KeyTable[i].abyBSSID, pbyBSSID)) { // found table already exist if ((dwKeyIndex & PAIRWISE_KEY) != 0) { // Pairwise key pKey = &(pTable->KeyTable[i].PairwiseKey); pTable->KeyTable[i].wKeyCtl &= 0xFFF0; // clear pairwise key control filed pTable->KeyTable[i].wKeyCtl |= byKeyDecMode; uKeyIdx = 4; // use HW key entry 4 for pairwise key } else { // Group key if ((dwKeyIndex & 0x000000FF) >= MAX_GROUP_KEY) return (FALSE); pKey = &(pTable->KeyTable[i].GroupKey[dwKeyIndex & 0x000000FF]); if ((dwKeyIndex & TRANSMIT_KEY) != 0) { // Group transmit key pTable->KeyTable[i].dwGTKeyIndex = dwKeyIndex; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Group transmit key(R)[%lX]: %d\n", pTable->KeyTable[i].dwGTKeyIndex, i); } pTable->KeyTable[i].wKeyCtl &= 0xFF0F; // clear group key control filed pTable->KeyTable[i].wKeyCtl |= (byKeyDecMode << 4); pTable->KeyTable[i].wKeyCtl |= 0x0040; // use group key for group address uKeyIdx = (dwKeyIndex & 0x000000FF); } pTable->KeyTable[i].wKeyCtl |= 0x8000; // enable on-fly pKey->bKeyValid = TRUE; pKey->uKeyLength = uKeyLength; pKey->dwKeyIndex = dwKeyIndex; pKey->byCipherSuite = byKeyDecMode; memcpy(pKey->abyKey, pbyKey, uKeyLength); if (byKeyDecMode == KEY_CTL_WEP) { if (uKeyLength == WLAN_WEP40_KEYLEN) pKey->abyKey[15] &= 0x7F; if (uKeyLength == WLAN_WEP104_KEYLEN) pKey->abyKey[15] |= 0x80; } MACvSetKeyEntry(pDevice, pTable->KeyTable[i].wKeyCtl, i, uKeyIdx, pbyBSSID, (PDWORD)pKey->abyKey); if ((dwKeyIndex & USE_KEYRSC) == 0) { // RSC set by NIC memset(&(pKey->KeyRSC), 0, sizeof(QWORD)); } else { memcpy(&(pKey->KeyRSC), pKeyRSC, sizeof(QWORD)); } pKey->dwTSC47_16 = 0; pKey->wTSC15_0 = 0; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"KeybSetKey(R): \n"); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->bKeyValid: %d\n ", pKey->bKeyValid); //DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->uKeyLength: %d\n ", pKey->uKeyLength); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->abyKey: "); for (ii = 0; ii < pKey->uKeyLength; ii++) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%02x ", pKey->abyKey[ii]); } DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"\n"); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->dwTSC47_16: %lx\n ", pKey->dwTSC47_16); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->wTSC15_0: %x\n ", pKey->wTSC15_0); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->dwKeyIndex: %lx\n ", pKey->dwKeyIndex); return (TRUE); } } if (j < (MAX_KEY_TABLE-1)) { memcpy(pTable->KeyTable[j].abyBSSID, pbyBSSID, ETH_ALEN); pTable->KeyTable[j].bInUse = TRUE; if ((dwKeyIndex & PAIRWISE_KEY) != 0) { // Pairwise key pKey = &(pTable->KeyTable[j].PairwiseKey); pTable->KeyTable[j].wKeyCtl &= 0xFFF0; // clear pairwise key control filed pTable->KeyTable[j].wKeyCtl |= byKeyDecMode; uKeyIdx = 4; // use HW key entry 4 for pairwise key } else { // Group key if ((dwKeyIndex & 0x000000FF) >= MAX_GROUP_KEY) return (FALSE); pKey = &(pTable->KeyTable[j].GroupKey[dwKeyIndex & 0x000000FF]); if ((dwKeyIndex & TRANSMIT_KEY) != 0) { // Group transmit key pTable->KeyTable[j].dwGTKeyIndex = dwKeyIndex; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Group transmit key(N)[%lX]: %d\n", pTable->KeyTable[j].dwGTKeyIndex, j); } pTable->KeyTable[j].wKeyCtl &= 0xFF0F; // clear group key control filed pTable->KeyTable[j].wKeyCtl |= (byKeyDecMode << 4); pTable->KeyTable[j].wKeyCtl |= 0x0040; // use group key for group address uKeyIdx = (dwKeyIndex & 0x000000FF); } pTable->KeyTable[j].wKeyCtl |= 0x8000; // enable on-fly pKey->bKeyValid = TRUE; pKey->uKeyLength = uKeyLength; pKey->dwKeyIndex = dwKeyIndex; pKey->byCipherSuite = byKeyDecMode; memcpy(pKey->abyKey, pbyKey, uKeyLength); if (byKeyDecMode == KEY_CTL_WEP) { if (uKeyLength == WLAN_WEP40_KEYLEN) pKey->abyKey[15] &= 0x7F; if (uKeyLength == WLAN_WEP104_KEYLEN) pKey->abyKey[15] |= 0x80; } MACvSetKeyEntry(pDevice, pTable->KeyTable[j].wKeyCtl, j, uKeyIdx, pbyBSSID, (PDWORD)pKey->abyKey); if ((dwKeyIndex & USE_KEYRSC) == 0) { // RSC set by NIC memset(&(pKey->KeyRSC), 0, sizeof(QWORD)); } else { memcpy(&(pKey->KeyRSC), pKeyRSC, sizeof(QWORD)); } pKey->dwTSC47_16 = 0; pKey->wTSC15_0 = 0; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"KeybSetKey(N): \n"); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->bKeyValid: %d\n ", pKey->bKeyValid); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->uKeyLength: %d\n ", (int)pKey->uKeyLength); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->abyKey: "); for (ii = 0; ii < pKey->uKeyLength; ii++) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%02x ", pKey->abyKey[ii]); } DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"\n"); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->dwTSC47_16: %lx\n ", pKey->dwTSC47_16); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->wTSC15_0: %x\n ", pKey->wTSC15_0); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->dwKeyIndex: %lx\n ", pKey->dwKeyIndex); return (TRUE); } return (FALSE); } /* * Description: Remove Key from table * * Parameters: * In: * pTable - Pointer to Key table * pbyBSSID - BSSID of Key * dwKeyIndex - Key Index (reference to NDIS DDK) * Out: * none * * Return Value: TRUE if success otherwise FALSE * */ BOOL KeybRemoveKey( void *pDeviceHandler, PSKeyManagement pTable, PBYTE pbyBSSID, DWORD dwKeyIndex ) { PSDevice pDevice = (PSDevice) pDeviceHandler; int i; BOOL bReturnValue = FALSE; if (is_broadcast_ether_addr(pbyBSSID)) { // dealte all key if ((dwKeyIndex & PAIRWISE_KEY) != 0) { for (i=0;i<MAX_KEY_TABLE;i++) { pTable->KeyTable[i].PairwiseKey.bKeyValid = FALSE; } bReturnValue = TRUE; } else if ((dwKeyIndex & 0x000000FF) < MAX_GROUP_KEY) { for (i=0;i<MAX_KEY_TABLE;i++) { pTable->KeyTable[i].GroupKey[dwKeyIndex & 0x000000FF].bKeyValid = FALSE; if ((dwKeyIndex & 0x7FFFFFFF) == (pTable->KeyTable[i].dwGTKeyIndex & 0x7FFFFFFF)) { // remove Group transmit key pTable->KeyTable[i].dwGTKeyIndex = 0; } } bReturnValue = TRUE; } else { bReturnValue = FALSE; } } else { for (i=0;i<MAX_KEY_TABLE;i++) { if ( (pTable->KeyTable[i].bInUse == TRUE) && !compare_ether_addr(pTable->KeyTable[i].abyBSSID, pbyBSSID)) { if ((dwKeyIndex & PAIRWISE_KEY) != 0) { pTable->KeyTable[i].PairwiseKey.bKeyValid = FALSE; bReturnValue = TRUE; break; } else if ((dwKeyIndex & 0x000000FF) < MAX_GROUP_KEY) { pTable->KeyTable[i].GroupKey[dwKeyIndex & 0x000000FF].bKeyValid = FALSE; if ((dwKeyIndex & 0x7FFFFFFF) == (pTable->KeyTable[i].dwGTKeyIndex & 0x7FFFFFFF)) { // remove Group transmit key pTable->KeyTable[i].dwGTKeyIndex = 0; } bReturnValue = TRUE; break; } else { bReturnValue = FALSE; break; } } //pTable->KeyTable[i].bInUse == TRUE } //for bReturnValue = TRUE; } s_vCheckKeyTableValid(pDevice,pTable); return bReturnValue; } /* * Description: Remove Key from table * * Parameters: * In: * pTable - Pointer to Key table * pbyBSSID - BSSID of Key * Out: * none * * Return Value: TRUE if success otherwise FALSE * */ BOOL KeybRemoveAllKey( void *pDeviceHandler, PSKeyManagement pTable, PBYTE pbyBSSID ) { PSDevice pDevice = (PSDevice) pDeviceHandler; int i,u; for (i=0;i<MAX_KEY_TABLE;i++) { if ((pTable->KeyTable[i].bInUse == TRUE) && !compare_ether_addr(pTable->KeyTable[i].abyBSSID, pbyBSSID)) { pTable->KeyTable[i].PairwiseKey.bKeyValid = FALSE; for (u = 0; u < MAX_GROUP_KEY; u++) pTable->KeyTable[i].GroupKey[u].bKeyValid = FALSE; pTable->KeyTable[i].dwGTKeyIndex = 0; s_vCheckKeyTableValid(pDevice, pTable); return (TRUE); } } return (FALSE); } /* * Description: Remove WEP Key from table * * Parameters: * In: * pTable - Pointer to Key table * Out: * none * * Return Value: TRUE if success otherwise FALSE * */ void KeyvRemoveWEPKey( void *pDeviceHandler, PSKeyManagement pTable, DWORD dwKeyIndex ) { PSDevice pDevice = (PSDevice) pDeviceHandler; if ((dwKeyIndex & 0x000000FF) < MAX_GROUP_KEY) { if (pTable->KeyTable[MAX_KEY_TABLE-1].bInUse == TRUE) { if (pTable->KeyTable[MAX_KEY_TABLE-1].GroupKey[dwKeyIndex & 0x000000FF].byCipherSuite == KEY_CTL_WEP) { pTable->KeyTable[MAX_KEY_TABLE-1].GroupKey[dwKeyIndex & 0x000000FF].bKeyValid = FALSE; if ((dwKeyIndex & 0x7FFFFFFF) == (pTable->KeyTable[MAX_KEY_TABLE-1].dwGTKeyIndex & 0x7FFFFFFF)) { // remove Group transmit key pTable->KeyTable[MAX_KEY_TABLE-1].dwGTKeyIndex = 0; } } } s_vCheckKeyTableValid(pDevice, pTable); } return; } void KeyvRemoveAllWEPKey(void *pDeviceHandler, PSKeyManagement pTable) { PSDevice pDevice = (PSDevice) pDeviceHandler; int i; for (i = 0; i < MAX_GROUP_KEY; i++) KeyvRemoveWEPKey(pDevice, pTable, i); } /* * Description: Get Transmit Key from table * * Parameters: * In: * pTable - Pointer to Key table * pbyBSSID - BSSID of Key * Out: * pKey - Key return * * Return Value: TRUE if found otherwise FALSE * */ BOOL KeybGetTransmitKey(PSKeyManagement pTable, PBYTE pbyBSSID, DWORD dwKeyType, PSKeyItem *pKey) { int i, ii; *pKey = NULL; for (i = 0; i < MAX_KEY_TABLE; i++) { if ((pTable->KeyTable[i].bInUse == TRUE) && !compare_ether_addr(pTable->KeyTable[i].abyBSSID, pbyBSSID)) { if (dwKeyType == PAIRWISE_KEY) { if (pTable->KeyTable[i].PairwiseKey.bKeyValid == TRUE) { *pKey = &(pTable->KeyTable[i].PairwiseKey); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"KeybGetTransmitKey:"); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"PAIRWISE_KEY: KeyTable.abyBSSID: "); for (ii = 0; ii < 6; ii++) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"%x ", pTable->KeyTable[i].abyBSSID[ii]); } DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"\n"); return (TRUE); } else { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"PairwiseKey.bKeyValid == FALSE\n"); return (FALSE); } } // End of Type == PAIRWISE else { if (pTable->KeyTable[i].dwGTKeyIndex == 0) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"ERROR: dwGTKeyIndex == 0 !!!\n"); return FALSE; } if (pTable->KeyTable[i].GroupKey[(pTable->KeyTable[i].dwGTKeyIndex&0x000000FF)].bKeyValid == TRUE) { *pKey = &(pTable->KeyTable[i].GroupKey[(pTable->KeyTable[i].dwGTKeyIndex&0x000000FF)]); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"KeybGetTransmitKey:"); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"GROUP_KEY: KeyTable.abyBSSID\n"); for (ii = 0; ii < 6; ii++) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"%x ", pTable->KeyTable[i].abyBSSID[ii]); } DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"\n"); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"dwGTKeyIndex: %lX\n", pTable->KeyTable[i].dwGTKeyIndex); return (TRUE); } else { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"GroupKey.bKeyValid == FALSE\n"); return (FALSE); } } // End of Type = GROUP } // BSSID match } DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"ERROR: NO Match BSSID !!! "); for (ii = 0; ii < 6; ii++) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"%02x ", *(pbyBSSID+ii)); } DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"\n"); return (FALSE); } /* * Description: Check Pairewise Key * * Parameters: * In: * pTable - Pointer to Key table * Out: * none * * Return Value: TRUE if found otherwise FALSE * */ BOOL KeybCheckPairewiseKey(PSKeyManagement pTable, PSKeyItem *pKey) { int i; *pKey = NULL; for (i=0;i<MAX_KEY_TABLE;i++) { if ((pTable->KeyTable[i].bInUse == TRUE) && (pTable->KeyTable[i].PairwiseKey.bKeyValid == TRUE)) { *pKey = &(pTable->KeyTable[i].PairwiseKey); return (TRUE); } } return (FALSE); } /* * Description: Set Key to table * * Parameters: * In: * pTable - Pointer to Key table * dwKeyIndex - Key index (reference to NDIS DDK) * uKeyLength - Key length * KeyRSC - Key RSC * pbyKey - Pointer to key * Out: * none * * Return Value: TRUE if success otherwise FALSE * */ BOOL KeybSetDefaultKey( void *pDeviceHandler, PSKeyManagement pTable, DWORD dwKeyIndex, unsigned long uKeyLength, PQWORD pKeyRSC, PBYTE pbyKey, BYTE byKeyDecMode ) { PSDevice pDevice = (PSDevice) pDeviceHandler; unsigned int ii; PSKeyItem pKey; unsigned int uKeyIdx; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Enter KeybSetDefaultKey: %1x, %d\n", (int) dwKeyIndex, (int) uKeyLength); if ((dwKeyIndex & PAIRWISE_KEY) != 0) { // Pairwise key return (FALSE); } else if ((dwKeyIndex & 0x000000FF) >= MAX_GROUP_KEY) { return (FALSE); } pTable->KeyTable[MAX_KEY_TABLE-1].bInUse = TRUE; for (ii = 0; ii < ETH_ALEN; ii++) pTable->KeyTable[MAX_KEY_TABLE-1].abyBSSID[ii] = 0xFF; // Group key pKey = &(pTable->KeyTable[MAX_KEY_TABLE-1].GroupKey[dwKeyIndex & 0x000000FF]); if ((dwKeyIndex & TRANSMIT_KEY) != 0) { // Group transmit key pTable->KeyTable[MAX_KEY_TABLE-1].dwGTKeyIndex = dwKeyIndex; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Group transmit key(R)[%lX]: %d\n", pTable->KeyTable[MAX_KEY_TABLE-1].dwGTKeyIndex, MAX_KEY_TABLE-1); } pTable->KeyTable[MAX_KEY_TABLE-1].wKeyCtl &= 0x7F00; // clear all key control filed pTable->KeyTable[MAX_KEY_TABLE-1].wKeyCtl |= (byKeyDecMode << 4); pTable->KeyTable[MAX_KEY_TABLE-1].wKeyCtl |= (byKeyDecMode); pTable->KeyTable[MAX_KEY_TABLE-1].wKeyCtl |= 0x0044; // use group key for all address uKeyIdx = (dwKeyIndex & 0x000000FF); if ((uKeyLength == WLAN_WEP232_KEYLEN) && (byKeyDecMode == KEY_CTL_WEP)) { pTable->KeyTable[MAX_KEY_TABLE-1].wKeyCtl |= 0x4000; // disable on-fly disable address match pTable->KeyTable[MAX_KEY_TABLE-1].bSoftWEP = TRUE; } else { if (pTable->KeyTable[MAX_KEY_TABLE-1].bSoftWEP == FALSE) pTable->KeyTable[MAX_KEY_TABLE-1].wKeyCtl |= 0xC000; // enable on-fly disable address match } pKey->bKeyValid = TRUE; pKey->uKeyLength = uKeyLength; pKey->dwKeyIndex = dwKeyIndex; pKey->byCipherSuite = byKeyDecMode; memcpy(pKey->abyKey, pbyKey, uKeyLength); if (byKeyDecMode == KEY_CTL_WEP) { if (uKeyLength == WLAN_WEP40_KEYLEN) pKey->abyKey[15] &= 0x7F; if (uKeyLength == WLAN_WEP104_KEYLEN) pKey->abyKey[15] |= 0x80; } MACvSetKeyEntry(pDevice, pTable->KeyTable[MAX_KEY_TABLE-1].wKeyCtl, MAX_KEY_TABLE-1, uKeyIdx, pTable->KeyTable[MAX_KEY_TABLE-1].abyBSSID, (PDWORD) pKey->abyKey); if ((dwKeyIndex & USE_KEYRSC) == 0) { // RSC set by NIC memset(&(pKey->KeyRSC), 0, sizeof(QWORD)); } else { memcpy(&(pKey->KeyRSC), pKeyRSC, sizeof(QWORD)); } pKey->dwTSC47_16 = 0; pKey->wTSC15_0 = 0; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"KeybSetKey(R): \n"); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->bKeyValid: %d\n", pKey->bKeyValid); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->uKeyLength: %d\n", (int)pKey->uKeyLength); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->abyKey: \n"); for (ii = 0; ii < pKey->uKeyLength; ii++) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"%x", pKey->abyKey[ii]); } DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"\n"); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->dwTSC47_16: %lx\n", pKey->dwTSC47_16); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->wTSC15_0: %x\n", pKey->wTSC15_0); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->dwKeyIndex: %lx\n", pKey->dwKeyIndex); return (TRUE); } /* * Description: Set Key to table * * Parameters: * In: * pTable - Pointer to Key table * dwKeyIndex - Key index (reference to NDIS DDK) * uKeyLength - Key length * KeyRSC - Key RSC * pbyKey - Pointer to key * Out: * none * * Return Value: TRUE if success otherwise FALSE * */ BOOL KeybSetAllGroupKey( void *pDeviceHandler, PSKeyManagement pTable, DWORD dwKeyIndex, unsigned long uKeyLength, PQWORD pKeyRSC, PBYTE pbyKey, BYTE byKeyDecMode ) { PSDevice pDevice = (PSDevice) pDeviceHandler; int i; unsigned int ii; PSKeyItem pKey; unsigned int uKeyIdx; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Enter KeybSetAllGroupKey: %lX\n", dwKeyIndex); if ((dwKeyIndex & PAIRWISE_KEY) != 0) { // Pairwise key return (FALSE); } else if ((dwKeyIndex & 0x000000FF) >= MAX_GROUP_KEY) { return (FALSE); } for (i=0; i < MAX_KEY_TABLE-1; i++) { if (pTable->KeyTable[i].bInUse == TRUE) { // found table already exist // Group key pKey = &(pTable->KeyTable[i].GroupKey[dwKeyIndex & 0x000000FF]); if ((dwKeyIndex & TRANSMIT_KEY) != 0) { // Group transmit key pTable->KeyTable[i].dwGTKeyIndex = dwKeyIndex; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Group transmit key(R)[%lX]: %d\n", pTable->KeyTable[i].dwGTKeyIndex, i); } pTable->KeyTable[i].wKeyCtl &= 0xFF0F; // clear group key control filed pTable->KeyTable[i].wKeyCtl |= (byKeyDecMode << 4); pTable->KeyTable[i].wKeyCtl |= 0x0040; // use group key for group address uKeyIdx = (dwKeyIndex & 0x000000FF); pTable->KeyTable[i].wKeyCtl |= 0x8000; // enable on-fly pKey->bKeyValid = TRUE; pKey->uKeyLength = uKeyLength; pKey->dwKeyIndex = dwKeyIndex; pKey->byCipherSuite = byKeyDecMode; memcpy(pKey->abyKey, pbyKey, uKeyLength); if (byKeyDecMode == KEY_CTL_WEP) { if (uKeyLength == WLAN_WEP40_KEYLEN) pKey->abyKey[15] &= 0x7F; if (uKeyLength == WLAN_WEP104_KEYLEN) pKey->abyKey[15] |= 0x80; } MACvSetKeyEntry(pDevice, pTable->KeyTable[i].wKeyCtl, i, uKeyIdx, pTable->KeyTable[i].abyBSSID, (PDWORD) pKey->abyKey); if ((dwKeyIndex & USE_KEYRSC) == 0) { // RSC set by NIC memset(&(pKey->KeyRSC), 0, sizeof(QWORD)); } else { memcpy(&(pKey->KeyRSC), pKeyRSC, sizeof(QWORD)); } pKey->dwTSC47_16 = 0; pKey->wTSC15_0 = 0; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"KeybSetKey(R): \n"); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->bKeyValid: %d\n ", pKey->bKeyValid); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->uKeyLength: %d\n ", (int)pKey->uKeyLength); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->abyKey: "); for (ii = 0; ii < pKey->uKeyLength; ii++) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"%02x ", pKey->abyKey[ii]); } DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"\n"); //DBG_PRN_GRP12(("pKey->dwTSC47_16: %lX\n ", pKey->dwTSC47_16)); //DBG_PRN_GRP12(("pKey->wTSC15_0: %X\n ", pKey->wTSC15_0)); //DBG_PRN_GRP12(("pKey->dwKeyIndex: %lX\n ", pKey->dwKeyIndex)); } // (pTable->KeyTable[i].bInUse == TRUE) } return (TRUE); }
gpl-2.0
MikeC84/android_kernel_motorola_shamu
drivers/sbus/char/flash.c
2479
4867
/* flash.c: Allow mmap access to the OBP Flash, for OBP updates. * * Copyright (C) 1997 Eddie C. Dost (ecd@skynet.be) */ #include <linux/module.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/miscdevice.h> #include <linux/fcntl.h> #include <linux/poll.h> #include <linux/init.h> #include <linux/mutex.h> #include <linux/spinlock.h> #include <linux/mm.h> #include <linux/of.h> #include <linux/of_device.h> #include <asm/uaccess.h> #include <asm/pgtable.h> #include <asm/io.h> #include <asm/upa.h> static DEFINE_MUTEX(flash_mutex); static DEFINE_SPINLOCK(flash_lock); static struct { unsigned long read_base; /* Physical read address */ unsigned long write_base; /* Physical write address */ unsigned long read_size; /* Size of read area */ unsigned long write_size; /* Size of write area */ unsigned long busy; /* In use? */ } flash; #define FLASH_MINOR 152 static int flash_mmap(struct file *file, struct vm_area_struct *vma) { unsigned long addr; unsigned long size; spin_lock(&flash_lock); if (flash.read_base == flash.write_base) { addr = flash.read_base; size = flash.read_size; } else { if ((vma->vm_flags & VM_READ) && (vma->vm_flags & VM_WRITE)) { spin_unlock(&flash_lock); return -EINVAL; } if (vma->vm_flags & VM_READ) { addr = flash.read_base; size = flash.read_size; } else if (vma->vm_flags & VM_WRITE) { addr = flash.write_base; size = flash.write_size; } else { spin_unlock(&flash_lock); return -ENXIO; } } spin_unlock(&flash_lock); if ((vma->vm_pgoff << PAGE_SHIFT) > size) return -ENXIO; addr = vma->vm_pgoff + (addr >> PAGE_SHIFT); if (vma->vm_end - (vma->vm_start + (vma->vm_pgoff << PAGE_SHIFT)) > size) size = vma->vm_end - (vma->vm_start + (vma->vm_pgoff << PAGE_SHIFT)); vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); if (io_remap_pfn_range(vma, vma->vm_start, addr, size, vma->vm_page_prot)) return -EAGAIN; return 0; } static long long flash_llseek(struct file *file, long long offset, int origin) { mutex_lock(&flash_mutex); switch (origin) { case 0: file->f_pos = offset; break; case 1: file->f_pos += offset; if (file->f_pos > flash.read_size) file->f_pos = flash.read_size; break; case 2: file->f_pos = flash.read_size; break; default: mutex_unlock(&flash_mutex); return -EINVAL; } mutex_unlock(&flash_mutex); return file->f_pos; } static ssize_t flash_read(struct file * file, char __user * buf, size_t count, loff_t *ppos) { loff_t p = *ppos; int i; if (count > flash.read_size - p) count = flash.read_size - p; for (i = 0; i < count; i++) { u8 data = upa_readb(flash.read_base + p + i); if (put_user(data, buf)) return -EFAULT; buf++; } *ppos += count; return count; } static int flash_open(struct inode *inode, struct file *file) { mutex_lock(&flash_mutex); if (test_and_set_bit(0, (void *)&flash.busy) != 0) { mutex_unlock(&flash_mutex); return -EBUSY; } mutex_unlock(&flash_mutex); return 0; } static int flash_release(struct inode *inode, struct file *file) { spin_lock(&flash_lock); flash.busy = 0; spin_unlock(&flash_lock); return 0; } static const struct file_operations flash_fops = { /* no write to the Flash, use mmap * and play flash dependent tricks. */ .owner = THIS_MODULE, .llseek = flash_llseek, .read = flash_read, .mmap = flash_mmap, .open = flash_open, .release = flash_release, }; static struct miscdevice flash_dev = { FLASH_MINOR, "flash", &flash_fops }; static int flash_probe(struct platform_device *op) { struct device_node *dp = op->dev.of_node; struct device_node *parent; parent = dp->parent; if (strcmp(parent->name, "sbus") && strcmp(parent->name, "sbi") && strcmp(parent->name, "ebus")) return -ENODEV; flash.read_base = op->resource[0].start; flash.read_size = resource_size(&op->resource[0]); if (op->resource[1].flags) { flash.write_base = op->resource[1].start; flash.write_size = resource_size(&op->resource[1]); } else { flash.write_base = op->resource[0].start; flash.write_size = resource_size(&op->resource[0]); } flash.busy = 0; printk(KERN_INFO "%s: OBP Flash, RD %lx[%lx] WR %lx[%lx]\n", op->dev.of_node->full_name, flash.read_base, flash.read_size, flash.write_base, flash.write_size); return misc_register(&flash_dev); } static int flash_remove(struct platform_device *op) { misc_deregister(&flash_dev); return 0; } static const struct of_device_id flash_match[] = { { .name = "flashprom", }, {}, }; MODULE_DEVICE_TABLE(of, flash_match); static struct platform_driver flash_driver = { .driver = { .name = "flash", .owner = THIS_MODULE, .of_match_table = flash_match, }, .probe = flash_probe, .remove = flash_remove, }; module_platform_driver(flash_driver); MODULE_LICENSE("GPL");
gpl-2.0
xperiasailors/android_kernel_sony_msm8974
drivers/net/ethernet/xscale/ixp2000/enp2611.c
9647
6201
/* * IXP2400 MSF network device driver for the Radisys ENP2611 * Copyright (C) 2004, 2005 Lennert Buytenhek <buytenh@wantstofly.org> * Dedicated to Marija Kulikova. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/init.h> #include <linux/moduleparam.h> #include <asm/hardware/uengine.h> #include <asm/mach-types.h> #include <asm/io.h> #include "ixpdev.h" #include "caleb.h" #include "ixp2400-msf.h" #include "pm3386.h" /*********************************************************************** * The Radisys ENP2611 is a PCI form factor board with three SFP GBIC * slots, connected via two PMC/Sierra 3386s and an SPI-3 bridge FPGA * to the IXP2400. * * +-------------+ * SFP GBIC #0 ---+ | +---------+ * | PM3386 #0 +-------+ | * SFP GBIC #1 ---+ | | "Caleb" | +---------+ * +-------------+ | | | | * | SPI-3 +---------+ IXP2400 | * +-------------+ | bridge | | | * SFP GBIC #2 ---+ | | FPGA | +---------+ * | PM3386 #1 +-------+ | * | | +---------+ * +-------------+ * ^ ^ ^ * | 1.25Gbaud | 104MHz | 104MHz * | SERDES ea. | SPI-3 ea. | SPI-3 * ***********************************************************************/ static struct ixp2400_msf_parameters enp2611_msf_parameters = { .rx_mode = IXP2400_RX_MODE_UTOPIA_POS | IXP2400_RX_MODE_1x32 | IXP2400_RX_MODE_MPHY | IXP2400_RX_MODE_MPHY_32 | IXP2400_RX_MODE_MPHY_POLLED_STATUS | IXP2400_RX_MODE_MPHY_LEVEL3 | IXP2400_RX_MODE_RBUF_SIZE_64, .rxclk01_multiplier = IXP2400_PLL_MULTIPLIER_16, .rx_poll_ports = 3, .rx_channel_mode = { IXP2400_PORT_RX_MODE_MASTER | IXP2400_PORT_RX_MODE_POS_PHY | IXP2400_PORT_RX_MODE_POS_PHY_L3 | IXP2400_PORT_RX_MODE_ODD_PARITY | IXP2400_PORT_RX_MODE_2_CYCLE_DECODE, IXP2400_PORT_RX_MODE_MASTER | IXP2400_PORT_RX_MODE_POS_PHY | IXP2400_PORT_RX_MODE_POS_PHY_L3 | IXP2400_PORT_RX_MODE_ODD_PARITY | IXP2400_PORT_RX_MODE_2_CYCLE_DECODE, IXP2400_PORT_RX_MODE_MASTER | IXP2400_PORT_RX_MODE_POS_PHY | IXP2400_PORT_RX_MODE_POS_PHY_L3 | IXP2400_PORT_RX_MODE_ODD_PARITY | IXP2400_PORT_RX_MODE_2_CYCLE_DECODE, IXP2400_PORT_RX_MODE_MASTER | IXP2400_PORT_RX_MODE_POS_PHY | IXP2400_PORT_RX_MODE_POS_PHY_L3 | IXP2400_PORT_RX_MODE_ODD_PARITY | IXP2400_PORT_RX_MODE_2_CYCLE_DECODE }, .tx_mode = IXP2400_TX_MODE_UTOPIA_POS | IXP2400_TX_MODE_1x32 | IXP2400_TX_MODE_MPHY | IXP2400_TX_MODE_MPHY_32 | IXP2400_TX_MODE_MPHY_POLLED_STATUS | IXP2400_TX_MODE_MPHY_LEVEL3 | IXP2400_TX_MODE_TBUF_SIZE_64, .txclk01_multiplier = IXP2400_PLL_MULTIPLIER_16, .tx_poll_ports = 3, .tx_channel_mode = { IXP2400_PORT_TX_MODE_MASTER | IXP2400_PORT_TX_MODE_POS_PHY | IXP2400_PORT_TX_MODE_ODD_PARITY | IXP2400_PORT_TX_MODE_2_CYCLE_DECODE, IXP2400_PORT_TX_MODE_MASTER | IXP2400_PORT_TX_MODE_POS_PHY | IXP2400_PORT_TX_MODE_ODD_PARITY | IXP2400_PORT_TX_MODE_2_CYCLE_DECODE, IXP2400_PORT_TX_MODE_MASTER | IXP2400_PORT_TX_MODE_POS_PHY | IXP2400_PORT_TX_MODE_ODD_PARITY | IXP2400_PORT_TX_MODE_2_CYCLE_DECODE, IXP2400_PORT_TX_MODE_MASTER | IXP2400_PORT_TX_MODE_POS_PHY | IXP2400_PORT_TX_MODE_ODD_PARITY | IXP2400_PORT_TX_MODE_2_CYCLE_DECODE } }; static struct net_device *nds[3]; static struct timer_list link_check_timer; /* @@@ Poll the SFP moddef0 line too. */ /* @@@ Try to use the pm3386 DOOL interrupt as well. */ static void enp2611_check_link_status(unsigned long __dummy) { int i; for (i = 0; i < 3; i++) { struct net_device *dev; int status; dev = nds[i]; if (dev == NULL) continue; status = pm3386_is_link_up(i); if (status && !netif_carrier_ok(dev)) { /* @@@ Should report autonegotiation status. */ printk(KERN_INFO "%s: NIC Link is Up\n", dev->name); pm3386_enable_tx(i); caleb_enable_tx(i); netif_carrier_on(dev); } else if (!status && netif_carrier_ok(dev)) { printk(KERN_INFO "%s: NIC Link is Down\n", dev->name); netif_carrier_off(dev); caleb_disable_tx(i); pm3386_disable_tx(i); } } link_check_timer.expires = jiffies + HZ / 10; add_timer(&link_check_timer); } static void enp2611_set_port_admin_status(int port, int up) { if (up) { caleb_enable_rx(port); pm3386_set_carrier(port, 1); pm3386_enable_rx(port); } else { caleb_disable_tx(port); pm3386_disable_tx(port); /* @@@ Flush out pending packets. */ pm3386_set_carrier(port, 0); pm3386_disable_rx(port); caleb_disable_rx(port); } } static int __init enp2611_init_module(void) { int ports; int i; if (!machine_is_enp2611()) return -ENODEV; caleb_reset(); pm3386_reset(); ports = pm3386_port_count(); for (i = 0; i < ports; i++) { nds[i] = ixpdev_alloc(i, sizeof(struct ixpdev_priv)); if (nds[i] == NULL) { while (--i >= 0) free_netdev(nds[i]); return -ENOMEM; } pm3386_init_port(i); pm3386_get_mac(i, nds[i]->dev_addr); } ixp2400_msf_init(&enp2611_msf_parameters); if (ixpdev_init(ports, nds, enp2611_set_port_admin_status)) { for (i = 0; i < ports; i++) if (nds[i]) free_netdev(nds[i]); return -EINVAL; } init_timer(&link_check_timer); link_check_timer.function = enp2611_check_link_status; link_check_timer.expires = jiffies; add_timer(&link_check_timer); return 0; } static void __exit enp2611_cleanup_module(void) { int i; del_timer_sync(&link_check_timer); ixpdev_deinit(); for (i = 0; i < 3; i++) free_netdev(nds[i]); } module_init(enp2611_init_module); module_exit(enp2611_cleanup_module); MODULE_LICENSE("GPL");
gpl-2.0
myjang0507/Polaris-slte-
net/bridge/netfilter/ebt_dnat.c
11951
1797
/* * ebt_dnat * * Authors: * Bart De Schuymer <bdschuym@pandora.be> * * June, 2002 * */ #include <linux/module.h> #include <net/sock.h> #include <linux/netfilter.h> #include <linux/netfilter/x_tables.h> #include <linux/netfilter_bridge/ebtables.h> #include <linux/netfilter_bridge/ebt_nat.h> static unsigned int ebt_dnat_tg(struct sk_buff *skb, const struct xt_action_param *par) { const struct ebt_nat_info *info = par->targinfo; if (!skb_make_writable(skb, 0)) return EBT_DROP; memcpy(eth_hdr(skb)->h_dest, info->mac, ETH_ALEN); return info->target; } static int ebt_dnat_tg_check(const struct xt_tgchk_param *par) { const struct ebt_nat_info *info = par->targinfo; unsigned int hook_mask; if (BASE_CHAIN && info->target == EBT_RETURN) return -EINVAL; hook_mask = par->hook_mask & ~(1 << NF_BR_NUMHOOKS); if ((strcmp(par->table, "nat") != 0 || (hook_mask & ~((1 << NF_BR_PRE_ROUTING) | (1 << NF_BR_LOCAL_OUT)))) && (strcmp(par->table, "broute") != 0 || hook_mask & ~(1 << NF_BR_BROUTING))) return -EINVAL; if (INVALID_TARGET) return -EINVAL; return 0; } static struct xt_target ebt_dnat_tg_reg __read_mostly = { .name = "dnat", .revision = 0, .family = NFPROTO_BRIDGE, .hooks = (1 << NF_BR_NUMHOOKS) | (1 << NF_BR_PRE_ROUTING) | (1 << NF_BR_LOCAL_OUT) | (1 << NF_BR_BROUTING), .target = ebt_dnat_tg, .checkentry = ebt_dnat_tg_check, .targetsize = sizeof(struct ebt_nat_info), .me = THIS_MODULE, }; static int __init ebt_dnat_init(void) { return xt_register_target(&ebt_dnat_tg_reg); } static void __exit ebt_dnat_fini(void) { xt_unregister_target(&ebt_dnat_tg_reg); } module_init(ebt_dnat_init); module_exit(ebt_dnat_fini); MODULE_DESCRIPTION("Ebtables: Destination MAC address translation"); MODULE_LICENSE("GPL");
gpl-2.0
tripleoxygen/kernel_zeebo
ipc/ipcns_notifier.c
13231
2265
/* * linux/ipc/ipcns_notifier.c * Copyright (C) 2007 BULL SA. Nadia Derbey * * Notification mechanism for ipc namespaces: * The callback routine registered in the memory chain invokes the ipcns * notifier chain with the IPCNS_MEMCHANGED event. * Each callback routine registered in the ipcns namespace recomputes msgmni * for the owning namespace. */ #include <linux/msg.h> #include <linux/rcupdate.h> #include <linux/notifier.h> #include <linux/nsproxy.h> #include <linux/ipc_namespace.h> #include "util.h" static BLOCKING_NOTIFIER_HEAD(ipcns_chain); static int ipcns_callback(struct notifier_block *self, unsigned long action, void *arg) { struct ipc_namespace *ns; switch (action) { case IPCNS_MEMCHANGED: /* amount of lowmem has changed */ case IPCNS_CREATED: case IPCNS_REMOVED: /* * It's time to recompute msgmni */ ns = container_of(self, struct ipc_namespace, ipcns_nb); /* * No need to get a reference on the ns: the 1st job of * free_ipc_ns() is to unregister the callback routine. * blocking_notifier_chain_unregister takes the wr lock to do * it. * When this callback routine is called the rd lock is held by * blocking_notifier_call_chain. * So the ipc ns cannot be freed while we are here. */ recompute_msgmni(ns); break; default: break; } return NOTIFY_OK; } int register_ipcns_notifier(struct ipc_namespace *ns) { int rc; memset(&ns->ipcns_nb, 0, sizeof(ns->ipcns_nb)); ns->ipcns_nb.notifier_call = ipcns_callback; ns->ipcns_nb.priority = IPCNS_CALLBACK_PRI; rc = blocking_notifier_chain_register(&ipcns_chain, &ns->ipcns_nb); if (!rc) ns->auto_msgmni = 1; return rc; } int cond_register_ipcns_notifier(struct ipc_namespace *ns) { int rc; memset(&ns->ipcns_nb, 0, sizeof(ns->ipcns_nb)); ns->ipcns_nb.notifier_call = ipcns_callback; ns->ipcns_nb.priority = IPCNS_CALLBACK_PRI; rc = blocking_notifier_chain_cond_register(&ipcns_chain, &ns->ipcns_nb); if (!rc) ns->auto_msgmni = 1; return rc; } void unregister_ipcns_notifier(struct ipc_namespace *ns) { blocking_notifier_chain_unregister(&ipcns_chain, &ns->ipcns_nb); ns->auto_msgmni = 0; } int ipcns_notify(unsigned long val) { return blocking_notifier_call_chain(&ipcns_chain, val, NULL); }
gpl-2.0
q-li/linux-sunxi
lib/zlib_deflate/deftree.c
13743
40510
/* +++ trees.c */ /* trees.c -- output deflated data using Huffman coding * Copyright (C) 1995-1996 Jean-loup Gailly * For conditions of distribution and use, see copyright notice in zlib.h */ /* * ALGORITHM * * The "deflation" process uses several Huffman trees. The more * common source values are represented by shorter bit sequences. * * Each code tree is stored in a compressed form which is itself * a Huffman encoding of the lengths of all the code strings (in * ascending order by source values). The actual code strings are * reconstructed from the lengths in the inflate process, as described * in the deflate specification. * * REFERENCES * * Deutsch, L.P.,"'Deflate' Compressed Data Format Specification". * Available in ftp.uu.net:/pub/archiving/zip/doc/deflate-1.1.doc * * Storer, James A. * Data Compression: Methods and Theory, pp. 49-50. * Computer Science Press, 1988. ISBN 0-7167-8156-5. * * Sedgewick, R. * Algorithms, p290. * Addison-Wesley, 1983. ISBN 0-201-06672-6. */ /* From: trees.c,v 1.11 1996/07/24 13:41:06 me Exp $ */ /* #include "deflate.h" */ #include <linux/zutil.h> #include "defutil.h" #ifdef DEBUG_ZLIB # include <ctype.h> #endif /* =========================================================================== * Constants */ #define MAX_BL_BITS 7 /* Bit length codes must not exceed MAX_BL_BITS bits */ #define END_BLOCK 256 /* end of block literal code */ #define REP_3_6 16 /* repeat previous bit length 3-6 times (2 bits of repeat count) */ #define REPZ_3_10 17 /* repeat a zero length 3-10 times (3 bits of repeat count) */ #define REPZ_11_138 18 /* repeat a zero length 11-138 times (7 bits of repeat count) */ static const int extra_lbits[LENGTH_CODES] /* extra bits for each length code */ = {0,0,0,0,0,0,0,0,1,1,1,1,2,2,2,2,3,3,3,3,4,4,4,4,5,5,5,5,0}; static const int extra_dbits[D_CODES] /* extra bits for each distance code */ = {0,0,0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8,9,9,10,10,11,11,12,12,13,13}; static const int extra_blbits[BL_CODES]/* extra bits for each bit length code */ = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,3,7}; static const uch bl_order[BL_CODES] = {16,17,18,0,8,7,9,6,10,5,11,4,12,3,13,2,14,1,15}; /* The lengths of the bit length codes are sent in order of decreasing * probability, to avoid transmitting the lengths for unused bit length codes. */ #define Buf_size (8 * 2*sizeof(char)) /* Number of bits used within bi_buf. (bi_buf might be implemented on * more than 16 bits on some systems.) */ /* =========================================================================== * Local data. These are initialized only once. */ static ct_data static_ltree[L_CODES+2]; /* The static literal tree. Since the bit lengths are imposed, there is no * need for the L_CODES extra codes used during heap construction. However * The codes 286 and 287 are needed to build a canonical tree (see zlib_tr_init * below). */ static ct_data static_dtree[D_CODES]; /* The static distance tree. (Actually a trivial tree since all codes use * 5 bits.) */ static uch dist_code[512]; /* distance codes. The first 256 values correspond to the distances * 3 .. 258, the last 256 values correspond to the top 8 bits of * the 15 bit distances. */ static uch length_code[MAX_MATCH-MIN_MATCH+1]; /* length code for each normalized match length (0 == MIN_MATCH) */ static int base_length[LENGTH_CODES]; /* First normalized length for each code (0 = MIN_MATCH) */ static int base_dist[D_CODES]; /* First normalized distance for each code (0 = distance of 1) */ struct static_tree_desc_s { const ct_data *static_tree; /* static tree or NULL */ const int *extra_bits; /* extra bits for each code or NULL */ int extra_base; /* base index for extra_bits */ int elems; /* max number of elements in the tree */ int max_length; /* max bit length for the codes */ }; static static_tree_desc static_l_desc = {static_ltree, extra_lbits, LITERALS+1, L_CODES, MAX_BITS}; static static_tree_desc static_d_desc = {static_dtree, extra_dbits, 0, D_CODES, MAX_BITS}; static static_tree_desc static_bl_desc = {(const ct_data *)0, extra_blbits, 0, BL_CODES, MAX_BL_BITS}; /* =========================================================================== * Local (static) routines in this file. */ static void tr_static_init (void); static void init_block (deflate_state *s); static void pqdownheap (deflate_state *s, ct_data *tree, int k); static void gen_bitlen (deflate_state *s, tree_desc *desc); static void gen_codes (ct_data *tree, int max_code, ush *bl_count); static void build_tree (deflate_state *s, tree_desc *desc); static void scan_tree (deflate_state *s, ct_data *tree, int max_code); static void send_tree (deflate_state *s, ct_data *tree, int max_code); static int build_bl_tree (deflate_state *s); static void send_all_trees (deflate_state *s, int lcodes, int dcodes, int blcodes); static void compress_block (deflate_state *s, ct_data *ltree, ct_data *dtree); static void set_data_type (deflate_state *s); static unsigned bi_reverse (unsigned value, int length); static void bi_windup (deflate_state *s); static void bi_flush (deflate_state *s); static void copy_block (deflate_state *s, char *buf, unsigned len, int header); #ifndef DEBUG_ZLIB # define send_code(s, c, tree) send_bits(s, tree[c].Code, tree[c].Len) /* Send a code of the given tree. c and tree must not have side effects */ #else /* DEBUG_ZLIB */ # define send_code(s, c, tree) \ { if (z_verbose>2) fprintf(stderr,"\ncd %3d ",(c)); \ send_bits(s, tree[c].Code, tree[c].Len); } #endif #define d_code(dist) \ ((dist) < 256 ? dist_code[dist] : dist_code[256+((dist)>>7)]) /* Mapping from a distance to a distance code. dist is the distance - 1 and * must not have side effects. dist_code[256] and dist_code[257] are never * used. */ /* =========================================================================== * Send a value on a given number of bits. * IN assertion: length <= 16 and value fits in length bits. */ #ifdef DEBUG_ZLIB static void send_bits (deflate_state *s, int value, int length); static void send_bits( deflate_state *s, int value, /* value to send */ int length /* number of bits */ ) { Tracevv((stderr," l %2d v %4x ", length, value)); Assert(length > 0 && length <= 15, "invalid length"); s->bits_sent += (ulg)length; /* If not enough room in bi_buf, use (valid) bits from bi_buf and * (16 - bi_valid) bits from value, leaving (width - (16-bi_valid)) * unused bits in value. */ if (s->bi_valid > (int)Buf_size - length) { s->bi_buf |= (value << s->bi_valid); put_short(s, s->bi_buf); s->bi_buf = (ush)value >> (Buf_size - s->bi_valid); s->bi_valid += length - Buf_size; } else { s->bi_buf |= value << s->bi_valid; s->bi_valid += length; } } #else /* !DEBUG_ZLIB */ #define send_bits(s, value, length) \ { int len = length;\ if (s->bi_valid > (int)Buf_size - len) {\ int val = value;\ s->bi_buf |= (val << s->bi_valid);\ put_short(s, s->bi_buf);\ s->bi_buf = (ush)val >> (Buf_size - s->bi_valid);\ s->bi_valid += len - Buf_size;\ } else {\ s->bi_buf |= (value) << s->bi_valid;\ s->bi_valid += len;\ }\ } #endif /* DEBUG_ZLIB */ /* =========================================================================== * Initialize the various 'constant' tables. In a multi-threaded environment, * this function may be called by two threads concurrently, but this is * harmless since both invocations do exactly the same thing. */ static void tr_static_init(void) { static int static_init_done; int n; /* iterates over tree elements */ int bits; /* bit counter */ int length; /* length value */ int code; /* code value */ int dist; /* distance index */ ush bl_count[MAX_BITS+1]; /* number of codes at each bit length for an optimal tree */ if (static_init_done) return; /* Initialize the mapping length (0..255) -> length code (0..28) */ length = 0; for (code = 0; code < LENGTH_CODES-1; code++) { base_length[code] = length; for (n = 0; n < (1<<extra_lbits[code]); n++) { length_code[length++] = (uch)code; } } Assert (length == 256, "tr_static_init: length != 256"); /* Note that the length 255 (match length 258) can be represented * in two different ways: code 284 + 5 bits or code 285, so we * overwrite length_code[255] to use the best encoding: */ length_code[length-1] = (uch)code; /* Initialize the mapping dist (0..32K) -> dist code (0..29) */ dist = 0; for (code = 0 ; code < 16; code++) { base_dist[code] = dist; for (n = 0; n < (1<<extra_dbits[code]); n++) { dist_code[dist++] = (uch)code; } } Assert (dist == 256, "tr_static_init: dist != 256"); dist >>= 7; /* from now on, all distances are divided by 128 */ for ( ; code < D_CODES; code++) { base_dist[code] = dist << 7; for (n = 0; n < (1<<(extra_dbits[code]-7)); n++) { dist_code[256 + dist++] = (uch)code; } } Assert (dist == 256, "tr_static_init: 256+dist != 512"); /* Construct the codes of the static literal tree */ for (bits = 0; bits <= MAX_BITS; bits++) bl_count[bits] = 0; n = 0; while (n <= 143) static_ltree[n++].Len = 8, bl_count[8]++; while (n <= 255) static_ltree[n++].Len = 9, bl_count[9]++; while (n <= 279) static_ltree[n++].Len = 7, bl_count[7]++; while (n <= 287) static_ltree[n++].Len = 8, bl_count[8]++; /* Codes 286 and 287 do not exist, but we must include them in the * tree construction to get a canonical Huffman tree (longest code * all ones) */ gen_codes((ct_data *)static_ltree, L_CODES+1, bl_count); /* The static distance tree is trivial: */ for (n = 0; n < D_CODES; n++) { static_dtree[n].Len = 5; static_dtree[n].Code = bi_reverse((unsigned)n, 5); } static_init_done = 1; } /* =========================================================================== * Initialize the tree data structures for a new zlib stream. */ void zlib_tr_init( deflate_state *s ) { tr_static_init(); s->compressed_len = 0L; s->l_desc.dyn_tree = s->dyn_ltree; s->l_desc.stat_desc = &static_l_desc; s->d_desc.dyn_tree = s->dyn_dtree; s->d_desc.stat_desc = &static_d_desc; s->bl_desc.dyn_tree = s->bl_tree; s->bl_desc.stat_desc = &static_bl_desc; s->bi_buf = 0; s->bi_valid = 0; s->last_eob_len = 8; /* enough lookahead for inflate */ #ifdef DEBUG_ZLIB s->bits_sent = 0L; #endif /* Initialize the first block of the first file: */ init_block(s); } /* =========================================================================== * Initialize a new block. */ static void init_block( deflate_state *s ) { int n; /* iterates over tree elements */ /* Initialize the trees. */ for (n = 0; n < L_CODES; n++) s->dyn_ltree[n].Freq = 0; for (n = 0; n < D_CODES; n++) s->dyn_dtree[n].Freq = 0; for (n = 0; n < BL_CODES; n++) s->bl_tree[n].Freq = 0; s->dyn_ltree[END_BLOCK].Freq = 1; s->opt_len = s->static_len = 0L; s->last_lit = s->matches = 0; } #define SMALLEST 1 /* Index within the heap array of least frequent node in the Huffman tree */ /* =========================================================================== * Remove the smallest element from the heap and recreate the heap with * one less element. Updates heap and heap_len. */ #define pqremove(s, tree, top) \ {\ top = s->heap[SMALLEST]; \ s->heap[SMALLEST] = s->heap[s->heap_len--]; \ pqdownheap(s, tree, SMALLEST); \ } /* =========================================================================== * Compares to subtrees, using the tree depth as tie breaker when * the subtrees have equal frequency. This minimizes the worst case length. */ #define smaller(tree, n, m, depth) \ (tree[n].Freq < tree[m].Freq || \ (tree[n].Freq == tree[m].Freq && depth[n] <= depth[m])) /* =========================================================================== * Restore the heap property by moving down the tree starting at node k, * exchanging a node with the smallest of its two sons if necessary, stopping * when the heap property is re-established (each father smaller than its * two sons). */ static void pqdownheap( deflate_state *s, ct_data *tree, /* the tree to restore */ int k /* node to move down */ ) { int v = s->heap[k]; int j = k << 1; /* left son of k */ while (j <= s->heap_len) { /* Set j to the smallest of the two sons: */ if (j < s->heap_len && smaller(tree, s->heap[j+1], s->heap[j], s->depth)) { j++; } /* Exit if v is smaller than both sons */ if (smaller(tree, v, s->heap[j], s->depth)) break; /* Exchange v with the smallest son */ s->heap[k] = s->heap[j]; k = j; /* And continue down the tree, setting j to the left son of k */ j <<= 1; } s->heap[k] = v; } /* =========================================================================== * Compute the optimal bit lengths for a tree and update the total bit length * for the current block. * IN assertion: the fields freq and dad are set, heap[heap_max] and * above are the tree nodes sorted by increasing frequency. * OUT assertions: the field len is set to the optimal bit length, the * array bl_count contains the frequencies for each bit length. * The length opt_len is updated; static_len is also updated if stree is * not null. */ static void gen_bitlen( deflate_state *s, tree_desc *desc /* the tree descriptor */ ) { ct_data *tree = desc->dyn_tree; int max_code = desc->max_code; const ct_data *stree = desc->stat_desc->static_tree; const int *extra = desc->stat_desc->extra_bits; int base = desc->stat_desc->extra_base; int max_length = desc->stat_desc->max_length; int h; /* heap index */ int n, m; /* iterate over the tree elements */ int bits; /* bit length */ int xbits; /* extra bits */ ush f; /* frequency */ int overflow = 0; /* number of elements with bit length too large */ for (bits = 0; bits <= MAX_BITS; bits++) s->bl_count[bits] = 0; /* In a first pass, compute the optimal bit lengths (which may * overflow in the case of the bit length tree). */ tree[s->heap[s->heap_max]].Len = 0; /* root of the heap */ for (h = s->heap_max+1; h < HEAP_SIZE; h++) { n = s->heap[h]; bits = tree[tree[n].Dad].Len + 1; if (bits > max_length) bits = max_length, overflow++; tree[n].Len = (ush)bits; /* We overwrite tree[n].Dad which is no longer needed */ if (n > max_code) continue; /* not a leaf node */ s->bl_count[bits]++; xbits = 0; if (n >= base) xbits = extra[n-base]; f = tree[n].Freq; s->opt_len += (ulg)f * (bits + xbits); if (stree) s->static_len += (ulg)f * (stree[n].Len + xbits); } if (overflow == 0) return; Trace((stderr,"\nbit length overflow\n")); /* This happens for example on obj2 and pic of the Calgary corpus */ /* Find the first bit length which could increase: */ do { bits = max_length-1; while (s->bl_count[bits] == 0) bits--; s->bl_count[bits]--; /* move one leaf down the tree */ s->bl_count[bits+1] += 2; /* move one overflow item as its brother */ s->bl_count[max_length]--; /* The brother of the overflow item also moves one step up, * but this does not affect bl_count[max_length] */ overflow -= 2; } while (overflow > 0); /* Now recompute all bit lengths, scanning in increasing frequency. * h is still equal to HEAP_SIZE. (It is simpler to reconstruct all * lengths instead of fixing only the wrong ones. This idea is taken * from 'ar' written by Haruhiko Okumura.) */ for (bits = max_length; bits != 0; bits--) { n = s->bl_count[bits]; while (n != 0) { m = s->heap[--h]; if (m > max_code) continue; if (tree[m].Len != (unsigned) bits) { Trace((stderr,"code %d bits %d->%d\n", m, tree[m].Len, bits)); s->opt_len += ((long)bits - (long)tree[m].Len) *(long)tree[m].Freq; tree[m].Len = (ush)bits; } n--; } } } /* =========================================================================== * Generate the codes for a given tree and bit counts (which need not be * optimal). * IN assertion: the array bl_count contains the bit length statistics for * the given tree and the field len is set for all tree elements. * OUT assertion: the field code is set for all tree elements of non * zero code length. */ static void gen_codes( ct_data *tree, /* the tree to decorate */ int max_code, /* largest code with non zero frequency */ ush *bl_count /* number of codes at each bit length */ ) { ush next_code[MAX_BITS+1]; /* next code value for each bit length */ ush code = 0; /* running code value */ int bits; /* bit index */ int n; /* code index */ /* The distribution counts are first used to generate the code values * without bit reversal. */ for (bits = 1; bits <= MAX_BITS; bits++) { next_code[bits] = code = (code + bl_count[bits-1]) << 1; } /* Check that the bit counts in bl_count are consistent. The last code * must be all ones. */ Assert (code + bl_count[MAX_BITS]-1 == (1<<MAX_BITS)-1, "inconsistent bit counts"); Tracev((stderr,"\ngen_codes: max_code %d ", max_code)); for (n = 0; n <= max_code; n++) { int len = tree[n].Len; if (len == 0) continue; /* Now reverse the bits */ tree[n].Code = bi_reverse(next_code[len]++, len); Tracecv(tree != static_ltree, (stderr,"\nn %3d %c l %2d c %4x (%x) ", n, (isgraph(n) ? n : ' '), len, tree[n].Code, next_code[len]-1)); } } /* =========================================================================== * Construct one Huffman tree and assigns the code bit strings and lengths. * Update the total bit length for the current block. * IN assertion: the field freq is set for all tree elements. * OUT assertions: the fields len and code are set to the optimal bit length * and corresponding code. The length opt_len is updated; static_len is * also updated if stree is not null. The field max_code is set. */ static void build_tree( deflate_state *s, tree_desc *desc /* the tree descriptor */ ) { ct_data *tree = desc->dyn_tree; const ct_data *stree = desc->stat_desc->static_tree; int elems = desc->stat_desc->elems; int n, m; /* iterate over heap elements */ int max_code = -1; /* largest code with non zero frequency */ int node; /* new node being created */ /* Construct the initial heap, with least frequent element in * heap[SMALLEST]. The sons of heap[n] are heap[2*n] and heap[2*n+1]. * heap[0] is not used. */ s->heap_len = 0, s->heap_max = HEAP_SIZE; for (n = 0; n < elems; n++) { if (tree[n].Freq != 0) { s->heap[++(s->heap_len)] = max_code = n; s->depth[n] = 0; } else { tree[n].Len = 0; } } /* The pkzip format requires that at least one distance code exists, * and that at least one bit should be sent even if there is only one * possible code. So to avoid special checks later on we force at least * two codes of non zero frequency. */ while (s->heap_len < 2) { node = s->heap[++(s->heap_len)] = (max_code < 2 ? ++max_code : 0); tree[node].Freq = 1; s->depth[node] = 0; s->opt_len--; if (stree) s->static_len -= stree[node].Len; /* node is 0 or 1 so it does not have extra bits */ } desc->max_code = max_code; /* The elements heap[heap_len/2+1 .. heap_len] are leaves of the tree, * establish sub-heaps of increasing lengths: */ for (n = s->heap_len/2; n >= 1; n--) pqdownheap(s, tree, n); /* Construct the Huffman tree by repeatedly combining the least two * frequent nodes. */ node = elems; /* next internal node of the tree */ do { pqremove(s, tree, n); /* n = node of least frequency */ m = s->heap[SMALLEST]; /* m = node of next least frequency */ s->heap[--(s->heap_max)] = n; /* keep the nodes sorted by frequency */ s->heap[--(s->heap_max)] = m; /* Create a new node father of n and m */ tree[node].Freq = tree[n].Freq + tree[m].Freq; s->depth[node] = (uch) (max(s->depth[n], s->depth[m]) + 1); tree[n].Dad = tree[m].Dad = (ush)node; #ifdef DUMP_BL_TREE if (tree == s->bl_tree) { fprintf(stderr,"\nnode %d(%d), sons %d(%d) %d(%d)", node, tree[node].Freq, n, tree[n].Freq, m, tree[m].Freq); } #endif /* and insert the new node in the heap */ s->heap[SMALLEST] = node++; pqdownheap(s, tree, SMALLEST); } while (s->heap_len >= 2); s->heap[--(s->heap_max)] = s->heap[SMALLEST]; /* At this point, the fields freq and dad are set. We can now * generate the bit lengths. */ gen_bitlen(s, (tree_desc *)desc); /* The field len is now set, we can generate the bit codes */ gen_codes ((ct_data *)tree, max_code, s->bl_count); } /* =========================================================================== * Scan a literal or distance tree to determine the frequencies of the codes * in the bit length tree. */ static void scan_tree( deflate_state *s, ct_data *tree, /* the tree to be scanned */ int max_code /* and its largest code of non zero frequency */ ) { int n; /* iterates over all tree elements */ int prevlen = -1; /* last emitted length */ int curlen; /* length of current code */ int nextlen = tree[0].Len; /* length of next code */ int count = 0; /* repeat count of the current code */ int max_count = 7; /* max repeat count */ int min_count = 4; /* min repeat count */ if (nextlen == 0) max_count = 138, min_count = 3; tree[max_code+1].Len = (ush)0xffff; /* guard */ for (n = 0; n <= max_code; n++) { curlen = nextlen; nextlen = tree[n+1].Len; if (++count < max_count && curlen == nextlen) { continue; } else if (count < min_count) { s->bl_tree[curlen].Freq += count; } else if (curlen != 0) { if (curlen != prevlen) s->bl_tree[curlen].Freq++; s->bl_tree[REP_3_6].Freq++; } else if (count <= 10) { s->bl_tree[REPZ_3_10].Freq++; } else { s->bl_tree[REPZ_11_138].Freq++; } count = 0; prevlen = curlen; if (nextlen == 0) { max_count = 138, min_count = 3; } else if (curlen == nextlen) { max_count = 6, min_count = 3; } else { max_count = 7, min_count = 4; } } } /* =========================================================================== * Send a literal or distance tree in compressed form, using the codes in * bl_tree. */ static void send_tree( deflate_state *s, ct_data *tree, /* the tree to be scanned */ int max_code /* and its largest code of non zero frequency */ ) { int n; /* iterates over all tree elements */ int prevlen = -1; /* last emitted length */ int curlen; /* length of current code */ int nextlen = tree[0].Len; /* length of next code */ int count = 0; /* repeat count of the current code */ int max_count = 7; /* max repeat count */ int min_count = 4; /* min repeat count */ /* tree[max_code+1].Len = -1; */ /* guard already set */ if (nextlen == 0) max_count = 138, min_count = 3; for (n = 0; n <= max_code; n++) { curlen = nextlen; nextlen = tree[n+1].Len; if (++count < max_count && curlen == nextlen) { continue; } else if (count < min_count) { do { send_code(s, curlen, s->bl_tree); } while (--count != 0); } else if (curlen != 0) { if (curlen != prevlen) { send_code(s, curlen, s->bl_tree); count--; } Assert(count >= 3 && count <= 6, " 3_6?"); send_code(s, REP_3_6, s->bl_tree); send_bits(s, count-3, 2); } else if (count <= 10) { send_code(s, REPZ_3_10, s->bl_tree); send_bits(s, count-3, 3); } else { send_code(s, REPZ_11_138, s->bl_tree); send_bits(s, count-11, 7); } count = 0; prevlen = curlen; if (nextlen == 0) { max_count = 138, min_count = 3; } else if (curlen == nextlen) { max_count = 6, min_count = 3; } else { max_count = 7, min_count = 4; } } } /* =========================================================================== * Construct the Huffman tree for the bit lengths and return the index in * bl_order of the last bit length code to send. */ static int build_bl_tree( deflate_state *s ) { int max_blindex; /* index of last bit length code of non zero freq */ /* Determine the bit length frequencies for literal and distance trees */ scan_tree(s, (ct_data *)s->dyn_ltree, s->l_desc.max_code); scan_tree(s, (ct_data *)s->dyn_dtree, s->d_desc.max_code); /* Build the bit length tree: */ build_tree(s, (tree_desc *)(&(s->bl_desc))); /* opt_len now includes the length of the tree representations, except * the lengths of the bit lengths codes and the 5+5+4 bits for the counts. */ /* Determine the number of bit length codes to send. The pkzip format * requires that at least 4 bit length codes be sent. (appnote.txt says * 3 but the actual value used is 4.) */ for (max_blindex = BL_CODES-1; max_blindex >= 3; max_blindex--) { if (s->bl_tree[bl_order[max_blindex]].Len != 0) break; } /* Update opt_len to include the bit length tree and counts */ s->opt_len += 3*(max_blindex+1) + 5+5+4; Tracev((stderr, "\ndyn trees: dyn %ld, stat %ld", s->opt_len, s->static_len)); return max_blindex; } /* =========================================================================== * Send the header for a block using dynamic Huffman trees: the counts, the * lengths of the bit length codes, the literal tree and the distance tree. * IN assertion: lcodes >= 257, dcodes >= 1, blcodes >= 4. */ static void send_all_trees( deflate_state *s, int lcodes, /* number of codes for each tree */ int dcodes, /* number of codes for each tree */ int blcodes /* number of codes for each tree */ ) { int rank; /* index in bl_order */ Assert (lcodes >= 257 && dcodes >= 1 && blcodes >= 4, "not enough codes"); Assert (lcodes <= L_CODES && dcodes <= D_CODES && blcodes <= BL_CODES, "too many codes"); Tracev((stderr, "\nbl counts: ")); send_bits(s, lcodes-257, 5); /* not +255 as stated in appnote.txt */ send_bits(s, dcodes-1, 5); send_bits(s, blcodes-4, 4); /* not -3 as stated in appnote.txt */ for (rank = 0; rank < blcodes; rank++) { Tracev((stderr, "\nbl code %2d ", bl_order[rank])); send_bits(s, s->bl_tree[bl_order[rank]].Len, 3); } Tracev((stderr, "\nbl tree: sent %ld", s->bits_sent)); send_tree(s, (ct_data *)s->dyn_ltree, lcodes-1); /* literal tree */ Tracev((stderr, "\nlit tree: sent %ld", s->bits_sent)); send_tree(s, (ct_data *)s->dyn_dtree, dcodes-1); /* distance tree */ Tracev((stderr, "\ndist tree: sent %ld", s->bits_sent)); } /* =========================================================================== * Send a stored block */ void zlib_tr_stored_block( deflate_state *s, char *buf, /* input block */ ulg stored_len, /* length of input block */ int eof /* true if this is the last block for a file */ ) { send_bits(s, (STORED_BLOCK<<1)+eof, 3); /* send block type */ s->compressed_len = (s->compressed_len + 3 + 7) & (ulg)~7L; s->compressed_len += (stored_len + 4) << 3; copy_block(s, buf, (unsigned)stored_len, 1); /* with header */ } /* Send just the `stored block' type code without any length bytes or data. */ void zlib_tr_stored_type_only( deflate_state *s ) { send_bits(s, (STORED_BLOCK << 1), 3); bi_windup(s); s->compressed_len = (s->compressed_len + 3) & ~7L; } /* =========================================================================== * Send one empty static block to give enough lookahead for inflate. * This takes 10 bits, of which 7 may remain in the bit buffer. * The current inflate code requires 9 bits of lookahead. If the * last two codes for the previous block (real code plus EOB) were coded * on 5 bits or less, inflate may have only 5+3 bits of lookahead to decode * the last real code. In this case we send two empty static blocks instead * of one. (There are no problems if the previous block is stored or fixed.) * To simplify the code, we assume the worst case of last real code encoded * on one bit only. */ void zlib_tr_align( deflate_state *s ) { send_bits(s, STATIC_TREES<<1, 3); send_code(s, END_BLOCK, static_ltree); s->compressed_len += 10L; /* 3 for block type, 7 for EOB */ bi_flush(s); /* Of the 10 bits for the empty block, we have already sent * (10 - bi_valid) bits. The lookahead for the last real code (before * the EOB of the previous block) was thus at least one plus the length * of the EOB plus what we have just sent of the empty static block. */ if (1 + s->last_eob_len + 10 - s->bi_valid < 9) { send_bits(s, STATIC_TREES<<1, 3); send_code(s, END_BLOCK, static_ltree); s->compressed_len += 10L; bi_flush(s); } s->last_eob_len = 7; } /* =========================================================================== * Determine the best encoding for the current block: dynamic trees, static * trees or store, and output the encoded block to the zip file. This function * returns the total compressed length for the file so far. */ ulg zlib_tr_flush_block( deflate_state *s, char *buf, /* input block, or NULL if too old */ ulg stored_len, /* length of input block */ int eof /* true if this is the last block for a file */ ) { ulg opt_lenb, static_lenb; /* opt_len and static_len in bytes */ int max_blindex = 0; /* index of last bit length code of non zero freq */ /* Build the Huffman trees unless a stored block is forced */ if (s->level > 0) { /* Check if the file is ascii or binary */ if (s->data_type == Z_UNKNOWN) set_data_type(s); /* Construct the literal and distance trees */ build_tree(s, (tree_desc *)(&(s->l_desc))); Tracev((stderr, "\nlit data: dyn %ld, stat %ld", s->opt_len, s->static_len)); build_tree(s, (tree_desc *)(&(s->d_desc))); Tracev((stderr, "\ndist data: dyn %ld, stat %ld", s->opt_len, s->static_len)); /* At this point, opt_len and static_len are the total bit lengths of * the compressed block data, excluding the tree representations. */ /* Build the bit length tree for the above two trees, and get the index * in bl_order of the last bit length code to send. */ max_blindex = build_bl_tree(s); /* Determine the best encoding. Compute first the block length in bytes*/ opt_lenb = (s->opt_len+3+7)>>3; static_lenb = (s->static_len+3+7)>>3; Tracev((stderr, "\nopt %lu(%lu) stat %lu(%lu) stored %lu lit %u ", opt_lenb, s->opt_len, static_lenb, s->static_len, stored_len, s->last_lit)); if (static_lenb <= opt_lenb) opt_lenb = static_lenb; } else { Assert(buf != (char*)0, "lost buf"); opt_lenb = static_lenb = stored_len + 5; /* force a stored block */ } /* If compression failed and this is the first and last block, * and if the .zip file can be seeked (to rewrite the local header), * the whole file is transformed into a stored file: */ #ifdef STORED_FILE_OK # ifdef FORCE_STORED_FILE if (eof && s->compressed_len == 0L) { /* force stored file */ # else if (stored_len <= opt_lenb && eof && s->compressed_len==0L && seekable()) { # endif /* Since LIT_BUFSIZE <= 2*WSIZE, the input data must be there: */ if (buf == (char*)0) error ("block vanished"); copy_block(s, buf, (unsigned)stored_len, 0); /* without header */ s->compressed_len = stored_len << 3; s->method = STORED; } else #endif /* STORED_FILE_OK */ #ifdef FORCE_STORED if (buf != (char*)0) { /* force stored block */ #else if (stored_len+4 <= opt_lenb && buf != (char*)0) { /* 4: two words for the lengths */ #endif /* The test buf != NULL is only necessary if LIT_BUFSIZE > WSIZE. * Otherwise we can't have processed more than WSIZE input bytes since * the last block flush, because compression would have been * successful. If LIT_BUFSIZE <= WSIZE, it is never too late to * transform a block into a stored block. */ zlib_tr_stored_block(s, buf, stored_len, eof); #ifdef FORCE_STATIC } else if (static_lenb >= 0) { /* force static trees */ #else } else if (static_lenb == opt_lenb) { #endif send_bits(s, (STATIC_TREES<<1)+eof, 3); compress_block(s, (ct_data *)static_ltree, (ct_data *)static_dtree); s->compressed_len += 3 + s->static_len; } else { send_bits(s, (DYN_TREES<<1)+eof, 3); send_all_trees(s, s->l_desc.max_code+1, s->d_desc.max_code+1, max_blindex+1); compress_block(s, (ct_data *)s->dyn_ltree, (ct_data *)s->dyn_dtree); s->compressed_len += 3 + s->opt_len; } Assert (s->compressed_len == s->bits_sent, "bad compressed size"); init_block(s); if (eof) { bi_windup(s); s->compressed_len += 7; /* align on byte boundary */ } Tracev((stderr,"\ncomprlen %lu(%lu) ", s->compressed_len>>3, s->compressed_len-7*eof)); return s->compressed_len >> 3; } /* =========================================================================== * Save the match info and tally the frequency counts. Return true if * the current block must be flushed. */ int zlib_tr_tally( deflate_state *s, unsigned dist, /* distance of matched string */ unsigned lc /* match length-MIN_MATCH or unmatched char (if dist==0) */ ) { s->d_buf[s->last_lit] = (ush)dist; s->l_buf[s->last_lit++] = (uch)lc; if (dist == 0) { /* lc is the unmatched char */ s->dyn_ltree[lc].Freq++; } else { s->matches++; /* Here, lc is the match length - MIN_MATCH */ dist--; /* dist = match distance - 1 */ Assert((ush)dist < (ush)MAX_DIST(s) && (ush)lc <= (ush)(MAX_MATCH-MIN_MATCH) && (ush)d_code(dist) < (ush)D_CODES, "zlib_tr_tally: bad match"); s->dyn_ltree[length_code[lc]+LITERALS+1].Freq++; s->dyn_dtree[d_code(dist)].Freq++; } /* Try to guess if it is profitable to stop the current block here */ if ((s->last_lit & 0xfff) == 0 && s->level > 2) { /* Compute an upper bound for the compressed length */ ulg out_length = (ulg)s->last_lit*8L; ulg in_length = (ulg)((long)s->strstart - s->block_start); int dcode; for (dcode = 0; dcode < D_CODES; dcode++) { out_length += (ulg)s->dyn_dtree[dcode].Freq * (5L+extra_dbits[dcode]); } out_length >>= 3; Tracev((stderr,"\nlast_lit %u, in %ld, out ~%ld(%ld%%) ", s->last_lit, in_length, out_length, 100L - out_length*100L/in_length)); if (s->matches < s->last_lit/2 && out_length < in_length/2) return 1; } return (s->last_lit == s->lit_bufsize-1); /* We avoid equality with lit_bufsize because of wraparound at 64K * on 16 bit machines and because stored blocks are restricted to * 64K-1 bytes. */ } /* =========================================================================== * Send the block data compressed using the given Huffman trees */ static void compress_block( deflate_state *s, ct_data *ltree, /* literal tree */ ct_data *dtree /* distance tree */ ) { unsigned dist; /* distance of matched string */ int lc; /* match length or unmatched char (if dist == 0) */ unsigned lx = 0; /* running index in l_buf */ unsigned code; /* the code to send */ int extra; /* number of extra bits to send */ if (s->last_lit != 0) do { dist = s->d_buf[lx]; lc = s->l_buf[lx++]; if (dist == 0) { send_code(s, lc, ltree); /* send a literal byte */ Tracecv(isgraph(lc), (stderr," '%c' ", lc)); } else { /* Here, lc is the match length - MIN_MATCH */ code = length_code[lc]; send_code(s, code+LITERALS+1, ltree); /* send the length code */ extra = extra_lbits[code]; if (extra != 0) { lc -= base_length[code]; send_bits(s, lc, extra); /* send the extra length bits */ } dist--; /* dist is now the match distance - 1 */ code = d_code(dist); Assert (code < D_CODES, "bad d_code"); send_code(s, code, dtree); /* send the distance code */ extra = extra_dbits[code]; if (extra != 0) { dist -= base_dist[code]; send_bits(s, dist, extra); /* send the extra distance bits */ } } /* literal or match pair ? */ /* Check that the overlay between pending_buf and d_buf+l_buf is ok: */ Assert(s->pending < s->lit_bufsize + 2*lx, "pendingBuf overflow"); } while (lx < s->last_lit); send_code(s, END_BLOCK, ltree); s->last_eob_len = ltree[END_BLOCK].Len; } /* =========================================================================== * Set the data type to ASCII or BINARY, using a crude approximation: * binary if more than 20% of the bytes are <= 6 or >= 128, ascii otherwise. * IN assertion: the fields freq of dyn_ltree are set and the total of all * frequencies does not exceed 64K (to fit in an int on 16 bit machines). */ static void set_data_type( deflate_state *s ) { int n = 0; unsigned ascii_freq = 0; unsigned bin_freq = 0; while (n < 7) bin_freq += s->dyn_ltree[n++].Freq; while (n < 128) ascii_freq += s->dyn_ltree[n++].Freq; while (n < LITERALS) bin_freq += s->dyn_ltree[n++].Freq; s->data_type = (Byte)(bin_freq > (ascii_freq >> 2) ? Z_BINARY : Z_ASCII); } /* =========================================================================== * Copy a stored block, storing first the length and its * one's complement if requested. */ static void copy_block( deflate_state *s, char *buf, /* the input data */ unsigned len, /* its length */ int header /* true if block header must be written */ ) { bi_windup(s); /* align on byte boundary */ s->last_eob_len = 8; /* enough lookahead for inflate */ if (header) { put_short(s, (ush)len); put_short(s, (ush)~len); #ifdef DEBUG_ZLIB s->bits_sent += 2*16; #endif } #ifdef DEBUG_ZLIB s->bits_sent += (ulg)len<<3; #endif /* bundle up the put_byte(s, *buf++) calls */ memcpy(&s->pending_buf[s->pending], buf, len); s->pending += len; }
gpl-2.0
narantech/linux-pc64
drivers/parport/daisy.c
13743
12562
/* * IEEE 1284.3 Parallel port daisy chain and multiplexor code * * Copyright (C) 1999, 2000 Tim Waugh <tim@cyberelk.demon.co.uk> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * ??-12-1998: Initial implementation. * 31-01-1999: Make port-cloning transparent. * 13-02-1999: Move DeviceID technique from parport_probe. * 13-03-1999: Get DeviceID from non-IEEE 1284.3 devices too. * 22-02-2000: Count devices that are actually detected. * * Any part of this program may be used in documents licensed under * the GNU Free Documentation License, Version 1.1 or any later version * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/parport.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/sched.h> #include <asm/current.h> #include <asm/uaccess.h> #undef DEBUG #ifdef DEBUG #define DPRINTK(stuff...) printk(stuff) #else #define DPRINTK(stuff...) #endif static struct daisydev { struct daisydev *next; struct parport *port; int daisy; int devnum; } *topology = NULL; static DEFINE_SPINLOCK(topology_lock); static int numdevs = 0; /* Forward-declaration of lower-level functions. */ static int mux_present(struct parport *port); static int num_mux_ports(struct parport *port); static int select_port(struct parport *port); static int assign_addrs(struct parport *port); /* Add a device to the discovered topology. */ static void add_dev(int devnum, struct parport *port, int daisy) { struct daisydev *newdev, **p; newdev = kmalloc(sizeof(struct daisydev), GFP_KERNEL); if (newdev) { newdev->port = port; newdev->daisy = daisy; newdev->devnum = devnum; spin_lock(&topology_lock); for (p = &topology; *p && (*p)->devnum<devnum; p = &(*p)->next) ; newdev->next = *p; *p = newdev; spin_unlock(&topology_lock); } } /* Clone a parport (actually, make an alias). */ static struct parport *clone_parport(struct parport *real, int muxport) { struct parport *extra = parport_register_port(real->base, real->irq, real->dma, real->ops); if (extra) { extra->portnum = real->portnum; extra->physport = real; extra->muxport = muxport; real->slaves[muxport-1] = extra; } return extra; } /* Discover the IEEE1284.3 topology on a port -- muxes and daisy chains. * Return value is number of devices actually detected. */ int parport_daisy_init(struct parport *port) { int detected = 0; char *deviceid; static const char *th[] = { /*0*/"th", "st", "nd", "rd", "th" }; int num_ports; int i; int last_try = 0; again: /* Because this is called before any other devices exist, * we don't have to claim exclusive access. */ /* If mux present on normal port, need to create new * parports for each extra port. */ if (port->muxport < 0 && mux_present(port) && /* don't be fooled: a mux must have 2 or 4 ports. */ ((num_ports = num_mux_ports(port)) == 2 || num_ports == 4)) { /* Leave original as port zero. */ port->muxport = 0; printk(KERN_INFO "%s: 1st (default) port of %d-way multiplexor\n", port->name, num_ports); for (i = 1; i < num_ports; i++) { /* Clone the port. */ struct parport *extra = clone_parport(port, i); if (!extra) { if (signal_pending(current)) break; schedule(); continue; } printk(KERN_INFO "%s: %d%s port of %d-way multiplexor on %s\n", extra->name, i + 1, th[i + 1], num_ports, port->name); /* Analyse that port too. We won't recurse forever because of the 'port->muxport < 0' test above. */ parport_daisy_init(extra); } } if (port->muxport >= 0) select_port(port); parport_daisy_deselect_all(port); detected += assign_addrs(port); /* Count the potential legacy device at the end. */ add_dev(numdevs++, port, -1); /* Find out the legacy device's IEEE 1284 device ID. */ deviceid = kmalloc(1024, GFP_KERNEL); if (deviceid) { if (parport_device_id(numdevs - 1, deviceid, 1024) > 2) detected++; kfree(deviceid); } if (!detected && !last_try) { /* No devices were detected. Perhaps they are in some funny state; let's try to reset them and see if they wake up. */ parport_daisy_fini(port); parport_write_control(port, PARPORT_CONTROL_SELECT); udelay(50); parport_write_control(port, PARPORT_CONTROL_SELECT | PARPORT_CONTROL_INIT); udelay(50); last_try = 1; goto again; } return detected; } /* Forget about devices on a physical port. */ void parport_daisy_fini(struct parport *port) { struct daisydev **p; spin_lock(&topology_lock); p = &topology; while (*p) { struct daisydev *dev = *p; if (dev->port != port) { p = &dev->next; continue; } *p = dev->next; kfree(dev); } /* Gaps in the numbering could be handled better. How should someone enumerate through all IEEE1284.3 devices in the topology?. */ if (!topology) numdevs = 0; spin_unlock(&topology_lock); return; } /** * parport_open - find a device by canonical device number * @devnum: canonical device number * @name: name to associate with the device * * This function is similar to parport_register_device(), except * that it locates a device by its number rather than by the port * it is attached to. * * All parameters except for @devnum are the same as for * parport_register_device(). The return value is the same as * for parport_register_device(). **/ struct pardevice *parport_open(int devnum, const char *name) { struct daisydev *p = topology; struct parport *port; struct pardevice *dev; int daisy; spin_lock(&topology_lock); while (p && p->devnum != devnum) p = p->next; if (!p) { spin_unlock(&topology_lock); return NULL; } daisy = p->daisy; port = parport_get_port(p->port); spin_unlock(&topology_lock); dev = parport_register_device(port, name, NULL, NULL, NULL, 0, NULL); parport_put_port(port); if (!dev) return NULL; dev->daisy = daisy; /* Check that there really is a device to select. */ if (daisy >= 0) { int selected; parport_claim_or_block(dev); selected = port->daisy; parport_release(dev); if (selected != daisy) { /* No corresponding device. */ parport_unregister_device(dev); return NULL; } } return dev; } /** * parport_close - close a device opened with parport_open() * @dev: device to close * * This is to parport_open() as parport_unregister_device() is to * parport_register_device(). **/ void parport_close(struct pardevice *dev) { parport_unregister_device(dev); } /* Send a daisy-chain-style CPP command packet. */ static int cpp_daisy(struct parport *port, int cmd) { unsigned char s; parport_data_forward(port); parport_write_data(port, 0xaa); udelay(2); parport_write_data(port, 0x55); udelay(2); parport_write_data(port, 0x00); udelay(2); parport_write_data(port, 0xff); udelay(2); s = parport_read_status(port) & (PARPORT_STATUS_BUSY | PARPORT_STATUS_PAPEROUT | PARPORT_STATUS_SELECT | PARPORT_STATUS_ERROR); if (s != (PARPORT_STATUS_BUSY | PARPORT_STATUS_PAPEROUT | PARPORT_STATUS_SELECT | PARPORT_STATUS_ERROR)) { DPRINTK(KERN_DEBUG "%s: cpp_daisy: aa5500ff(%02x)\n", port->name, s); return -ENXIO; } parport_write_data(port, 0x87); udelay(2); s = parport_read_status(port) & (PARPORT_STATUS_BUSY | PARPORT_STATUS_PAPEROUT | PARPORT_STATUS_SELECT | PARPORT_STATUS_ERROR); if (s != (PARPORT_STATUS_SELECT | PARPORT_STATUS_ERROR)) { DPRINTK(KERN_DEBUG "%s: cpp_daisy: aa5500ff87(%02x)\n", port->name, s); return -ENXIO; } parport_write_data(port, 0x78); udelay(2); parport_write_data(port, cmd); udelay(2); parport_frob_control(port, PARPORT_CONTROL_STROBE, PARPORT_CONTROL_STROBE); udelay(1); s = parport_read_status(port); parport_frob_control(port, PARPORT_CONTROL_STROBE, 0); udelay(1); parport_write_data(port, 0xff); udelay(2); return s; } /* Send a mux-style CPP command packet. */ static int cpp_mux(struct parport *port, int cmd) { unsigned char s; int rc; parport_data_forward(port); parport_write_data(port, 0xaa); udelay(2); parport_write_data(port, 0x55); udelay(2); parport_write_data(port, 0xf0); udelay(2); parport_write_data(port, 0x0f); udelay(2); parport_write_data(port, 0x52); udelay(2); parport_write_data(port, 0xad); udelay(2); parport_write_data(port, cmd); udelay(2); s = parport_read_status(port); if (!(s & PARPORT_STATUS_ACK)) { DPRINTK(KERN_DEBUG "%s: cpp_mux: aa55f00f52ad%02x(%02x)\n", port->name, cmd, s); return -EIO; } rc = (((s & PARPORT_STATUS_SELECT ? 1 : 0) << 0) | ((s & PARPORT_STATUS_PAPEROUT ? 1 : 0) << 1) | ((s & PARPORT_STATUS_BUSY ? 0 : 1) << 2) | ((s & PARPORT_STATUS_ERROR ? 0 : 1) << 3)); return rc; } void parport_daisy_deselect_all(struct parport *port) { cpp_daisy(port, 0x30); } int parport_daisy_select(struct parport *port, int daisy, int mode) { switch (mode) { // For these modes we should switch to EPP mode: case IEEE1284_MODE_EPP: case IEEE1284_MODE_EPPSL: case IEEE1284_MODE_EPPSWE: return !(cpp_daisy(port, 0x20 + daisy) & PARPORT_STATUS_ERROR); // For these modes we should switch to ECP mode: case IEEE1284_MODE_ECP: case IEEE1284_MODE_ECPRLE: case IEEE1284_MODE_ECPSWE: return !(cpp_daisy(port, 0xd0 + daisy) & PARPORT_STATUS_ERROR); // Nothing was told for BECP in Daisy chain specification. // May be it's wise to use ECP? case IEEE1284_MODE_BECP: // Others use compat mode case IEEE1284_MODE_NIBBLE: case IEEE1284_MODE_BYTE: case IEEE1284_MODE_COMPAT: default: return !(cpp_daisy(port, 0xe0 + daisy) & PARPORT_STATUS_ERROR); } } static int mux_present(struct parport *port) { return cpp_mux(port, 0x51) == 3; } static int num_mux_ports(struct parport *port) { return cpp_mux(port, 0x58); } static int select_port(struct parport *port) { int muxport = port->muxport; return cpp_mux(port, 0x60 + muxport) == muxport; } static int assign_addrs(struct parport *port) { unsigned char s; unsigned char daisy; int thisdev = numdevs; int detected; char *deviceid; parport_data_forward(port); parport_write_data(port, 0xaa); udelay(2); parport_write_data(port, 0x55); udelay(2); parport_write_data(port, 0x00); udelay(2); parport_write_data(port, 0xff); udelay(2); s = parport_read_status(port) & (PARPORT_STATUS_BUSY | PARPORT_STATUS_PAPEROUT | PARPORT_STATUS_SELECT | PARPORT_STATUS_ERROR); if (s != (PARPORT_STATUS_BUSY | PARPORT_STATUS_PAPEROUT | PARPORT_STATUS_SELECT | PARPORT_STATUS_ERROR)) { DPRINTK(KERN_DEBUG "%s: assign_addrs: aa5500ff(%02x)\n", port->name, s); return 0; } parport_write_data(port, 0x87); udelay(2); s = parport_read_status(port) & (PARPORT_STATUS_BUSY | PARPORT_STATUS_PAPEROUT | PARPORT_STATUS_SELECT | PARPORT_STATUS_ERROR); if (s != (PARPORT_STATUS_SELECT | PARPORT_STATUS_ERROR)) { DPRINTK(KERN_DEBUG "%s: assign_addrs: aa5500ff87(%02x)\n", port->name, s); return 0; } parport_write_data(port, 0x78); udelay(2); s = parport_read_status(port); for (daisy = 0; (s & (PARPORT_STATUS_PAPEROUT|PARPORT_STATUS_SELECT)) == (PARPORT_STATUS_PAPEROUT|PARPORT_STATUS_SELECT) && daisy < 4; ++daisy) { parport_write_data(port, daisy); udelay(2); parport_frob_control(port, PARPORT_CONTROL_STROBE, PARPORT_CONTROL_STROBE); udelay(1); parport_frob_control(port, PARPORT_CONTROL_STROBE, 0); udelay(1); add_dev(numdevs++, port, daisy); /* See if this device thought it was the last in the * chain. */ if (!(s & PARPORT_STATUS_BUSY)) break; /* We are seeing pass through status now. We see last_dev from next device or if last_dev does not work status lines from some non-daisy chain device. */ s = parport_read_status(port); } parport_write_data(port, 0xff); udelay(2); detected = numdevs - thisdev; DPRINTK(KERN_DEBUG "%s: Found %d daisy-chained devices\n", port->name, detected); /* Ask the new devices to introduce themselves. */ deviceid = kmalloc(1024, GFP_KERNEL); if (!deviceid) return 0; for (daisy = 0; thisdev < numdevs; thisdev++, daisy++) parport_device_id(thisdev, deviceid, 1024); kfree(deviceid); return detected; }
gpl-2.0
nadanomics/linux
kernel/irq/msi.c
176
9126
/* * linux/kernel/irq/msi.c * * Copyright (C) 2014 Intel Corp. * Author: Jiang Liu <jiang.liu@linux.intel.com> * * This file is licensed under GPLv2. * * This file contains common code to support Message Signalled Interrupt for * PCI compatible and non PCI compatible devices. */ #include <linux/types.h> #include <linux/device.h> #include <linux/irq.h> #include <linux/irqdomain.h> #include <linux/msi.h> /* Temparory solution for building, will be removed later */ #include <linux/pci.h> struct msi_desc *alloc_msi_entry(struct device *dev) { struct msi_desc *desc = kzalloc(sizeof(*desc), GFP_KERNEL); if (!desc) return NULL; INIT_LIST_HEAD(&desc->list); desc->dev = dev; return desc; } void free_msi_entry(struct msi_desc *entry) { kfree(entry); } void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg) { *msg = entry->msg; } void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg) { struct msi_desc *entry = irq_get_msi_desc(irq); __get_cached_msi_msg(entry, msg); } EXPORT_SYMBOL_GPL(get_cached_msi_msg); #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN static inline void irq_chip_write_msi_msg(struct irq_data *data, struct msi_msg *msg) { data->chip->irq_write_msi_msg(data, msg); } /** * msi_domain_set_affinity - Generic affinity setter function for MSI domains * @irq_data: The irq data associated to the interrupt * @mask: The affinity mask to set * @force: Flag to enforce setting (disable online checks) * * Intended to be used by MSI interrupt controllers which are * implemented with hierarchical domains. */ int msi_domain_set_affinity(struct irq_data *irq_data, const struct cpumask *mask, bool force) { struct irq_data *parent = irq_data->parent_data; struct msi_msg msg; int ret; ret = parent->chip->irq_set_affinity(parent, mask, force); if (ret >= 0 && ret != IRQ_SET_MASK_OK_DONE) { BUG_ON(irq_chip_compose_msi_msg(irq_data, &msg)); irq_chip_write_msi_msg(irq_data, &msg); } return ret; } static void msi_domain_activate(struct irq_domain *domain, struct irq_data *irq_data) { struct msi_msg msg; BUG_ON(irq_chip_compose_msi_msg(irq_data, &msg)); irq_chip_write_msi_msg(irq_data, &msg); } static void msi_domain_deactivate(struct irq_domain *domain, struct irq_data *irq_data) { struct msi_msg msg; memset(&msg, 0, sizeof(msg)); irq_chip_write_msi_msg(irq_data, &msg); } static int msi_domain_alloc(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs, void *arg) { struct msi_domain_info *info = domain->host_data; struct msi_domain_ops *ops = info->ops; irq_hw_number_t hwirq = ops->get_hwirq(info, arg); int i, ret; if (irq_find_mapping(domain, hwirq) > 0) return -EEXIST; ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg); if (ret < 0) return ret; for (i = 0; i < nr_irqs; i++) { ret = ops->msi_init(domain, info, virq + i, hwirq + i, arg); if (ret < 0) { if (ops->msi_free) { for (i--; i > 0; i--) ops->msi_free(domain, info, virq + i); } irq_domain_free_irqs_top(domain, virq, nr_irqs); return ret; } } return 0; } static void msi_domain_free(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs) { struct msi_domain_info *info = domain->host_data; int i; if (info->ops->msi_free) { for (i = 0; i < nr_irqs; i++) info->ops->msi_free(domain, info, virq + i); } irq_domain_free_irqs_top(domain, virq, nr_irqs); } static const struct irq_domain_ops msi_domain_ops = { .alloc = msi_domain_alloc, .free = msi_domain_free, .activate = msi_domain_activate, .deactivate = msi_domain_deactivate, }; #ifdef GENERIC_MSI_DOMAIN_OPS static irq_hw_number_t msi_domain_ops_get_hwirq(struct msi_domain_info *info, msi_alloc_info_t *arg) { return arg->hwirq; } static int msi_domain_ops_prepare(struct irq_domain *domain, struct device *dev, int nvec, msi_alloc_info_t *arg) { memset(arg, 0, sizeof(*arg)); return 0; } static void msi_domain_ops_set_desc(msi_alloc_info_t *arg, struct msi_desc *desc) { arg->desc = desc; } #else #define msi_domain_ops_get_hwirq NULL #define msi_domain_ops_prepare NULL #define msi_domain_ops_set_desc NULL #endif /* !GENERIC_MSI_DOMAIN_OPS */ static int msi_domain_ops_init(struct irq_domain *domain, struct msi_domain_info *info, unsigned int virq, irq_hw_number_t hwirq, msi_alloc_info_t *arg) { irq_domain_set_hwirq_and_chip(domain, virq, hwirq, info->chip, info->chip_data); if (info->handler && info->handler_name) { __irq_set_handler(virq, info->handler, 0, info->handler_name); if (info->handler_data) irq_set_handler_data(virq, info->handler_data); } return 0; } static int msi_domain_ops_check(struct irq_domain *domain, struct msi_domain_info *info, struct device *dev) { return 0; } static struct msi_domain_ops msi_domain_ops_default = { .get_hwirq = msi_domain_ops_get_hwirq, .msi_init = msi_domain_ops_init, .msi_check = msi_domain_ops_check, .msi_prepare = msi_domain_ops_prepare, .set_desc = msi_domain_ops_set_desc, }; static void msi_domain_update_dom_ops(struct msi_domain_info *info) { struct msi_domain_ops *ops = info->ops; if (ops == NULL) { info->ops = &msi_domain_ops_default; return; } if (ops->get_hwirq == NULL) ops->get_hwirq = msi_domain_ops_default.get_hwirq; if (ops->msi_init == NULL) ops->msi_init = msi_domain_ops_default.msi_init; if (ops->msi_check == NULL) ops->msi_check = msi_domain_ops_default.msi_check; if (ops->msi_prepare == NULL) ops->msi_prepare = msi_domain_ops_default.msi_prepare; if (ops->set_desc == NULL) ops->set_desc = msi_domain_ops_default.set_desc; } static void msi_domain_update_chip_ops(struct msi_domain_info *info) { struct irq_chip *chip = info->chip; BUG_ON(!chip); if (!chip->irq_mask) chip->irq_mask = pci_msi_mask_irq; if (!chip->irq_unmask) chip->irq_unmask = pci_msi_unmask_irq; if (!chip->irq_set_affinity) chip->irq_set_affinity = msi_domain_set_affinity; } /** * msi_create_irq_domain - Create a MSI interrupt domain * @of_node: Optional device-tree node of the interrupt controller * @info: MSI domain info * @parent: Parent irq domain */ struct irq_domain *msi_create_irq_domain(struct device_node *node, struct msi_domain_info *info, struct irq_domain *parent) { if (info->flags & MSI_FLAG_USE_DEF_DOM_OPS) msi_domain_update_dom_ops(info); if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS) msi_domain_update_chip_ops(info); return irq_domain_add_hierarchy(parent, 0, 0, node, &msi_domain_ops, info); } /** * msi_domain_alloc_irqs - Allocate interrupts from a MSI interrupt domain * @domain: The domain to allocate from * @dev: Pointer to device struct of the device for which the interrupts * are allocated * @nvec: The number of interrupts to allocate * * Returns 0 on success or an error code. */ int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev, int nvec) { struct msi_domain_info *info = domain->host_data; struct msi_domain_ops *ops = info->ops; msi_alloc_info_t arg; struct msi_desc *desc; int i, ret, virq = -1; ret = ops->msi_check(domain, info, dev); if (ret == 0) ret = ops->msi_prepare(domain, dev, nvec, &arg); if (ret) return ret; for_each_msi_entry(desc, dev) { ops->set_desc(&arg, desc); if (info->flags & MSI_FLAG_IDENTITY_MAP) virq = (int)ops->get_hwirq(info, &arg); else virq = -1; virq = __irq_domain_alloc_irqs(domain, virq, desc->nvec_used, dev_to_node(dev), &arg, false); if (virq < 0) { ret = -ENOSPC; if (ops->handle_error) ret = ops->handle_error(domain, desc, ret); if (ops->msi_finish) ops->msi_finish(&arg, ret); return ret; } for (i = 0; i < desc->nvec_used; i++) irq_set_msi_desc_off(virq, i, desc); } if (ops->msi_finish) ops->msi_finish(&arg, 0); for_each_msi_entry(desc, dev) { if (desc->nvec_used == 1) dev_dbg(dev, "irq %d for MSI\n", virq); else dev_dbg(dev, "irq [%d-%d] for MSI\n", virq, virq + desc->nvec_used - 1); } return 0; } /** * msi_domain_free_irqs - Free interrupts from a MSI interrupt @domain associated tp @dev * @domain: The domain to managing the interrupts * @dev: Pointer to device struct of the device for which the interrupts * are free */ void msi_domain_free_irqs(struct irq_domain *domain, struct device *dev) { struct msi_desc *desc; for_each_msi_entry(desc, dev) { /* * We might have failed to allocate an MSI early * enough that there is no IRQ associated to this * entry. If that's the case, don't do anything. */ if (desc->irq) { irq_domain_free_irqs(desc->irq, desc->nvec_used); desc->irq = 0; } } } /** * msi_get_domain_info - Get the MSI interrupt domain info for @domain * @domain: The interrupt domain to retrieve data from * * Returns the pointer to the msi_domain_info stored in * @domain->host_data. */ struct msi_domain_info *msi_get_domain_info(struct irq_domain *domain) { return (struct msi_domain_info *)domain->host_data; } #endif /* CONFIG_GENERIC_MSI_IRQ_DOMAIN */
gpl-2.0
zihongli/linux-3.6.0-MIAT
drivers/input/gameport/fm801-gp.c
176
4013
/* * FM801 gameport driver for Linux * * Copyright (c) by Takashi Iwai <tiwai@suse.de> * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * */ #include <asm/io.h> #include <linux/delay.h> #include <linux/errno.h> #include <linux/ioport.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/gameport.h> #define PCI_VENDOR_ID_FORTEMEDIA 0x1319 #define PCI_DEVICE_ID_FM801_GP 0x0802 #define HAVE_COOKED struct fm801_gp { struct gameport *gameport; struct resource *res_port; }; #ifdef HAVE_COOKED static int fm801_gp_cooked_read(struct gameport *gameport, int *axes, int *buttons) { unsigned short w; w = inw(gameport->io + 2); *buttons = (~w >> 14) & 0x03; axes[0] = (w == 0xffff) ? -1 : ((w & 0x1fff) << 5); w = inw(gameport->io + 4); axes[1] = (w == 0xffff) ? -1 : ((w & 0x1fff) << 5); w = inw(gameport->io + 6); *buttons |= ((~w >> 14) & 0x03) << 2; axes[2] = (w == 0xffff) ? -1 : ((w & 0x1fff) << 5); w = inw(gameport->io + 8); axes[3] = (w == 0xffff) ? -1 : ((w & 0x1fff) << 5); outw(0xff, gameport->io); /* reset */ return 0; } #endif static int fm801_gp_open(struct gameport *gameport, int mode) { switch (mode) { #ifdef HAVE_COOKED case GAMEPORT_MODE_COOKED: return 0; #endif case GAMEPORT_MODE_RAW: return 0; default: return -1; } return 0; } static int __devinit fm801_gp_probe(struct pci_dev *pci, const struct pci_device_id *id) { struct fm801_gp *gp; struct gameport *port; int error; gp = kzalloc(sizeof(struct fm801_gp), GFP_KERNEL); port = gameport_allocate_port(); if (!gp || !port) { printk(KERN_ERR "fm801-gp: Memory allocation failed\n"); error = -ENOMEM; goto err_out_free; } error = pci_enable_device(pci); if (error) goto err_out_free; port->open = fm801_gp_open; #ifdef HAVE_COOKED port->cooked_read = fm801_gp_cooked_read; #endif gameport_set_name(port, "FM801"); gameport_set_phys(port, "pci%s/gameport0", pci_name(pci)); port->dev.parent = &pci->dev; port->io = pci_resource_start(pci, 0); gp->gameport = port; gp->res_port = request_region(port->io, 0x10, "FM801 GP"); if (!gp->res_port) { printk(KERN_DEBUG "fm801-gp: unable to grab region 0x%x-0x%x\n", port->io, port->io + 0x0f); error = -EBUSY; goto err_out_disable_dev; } pci_set_drvdata(pci, gp); outb(0x60, port->io + 0x0d); /* enable joystick 1 and 2 */ gameport_register_port(port); return 0; err_out_disable_dev: pci_disable_device(pci); err_out_free: gameport_free_port(port); kfree(gp); return error; } static void __devexit fm801_gp_remove(struct pci_dev *pci) { struct fm801_gp *gp = pci_get_drvdata(pci); gameport_unregister_port(gp->gameport); release_resource(gp->res_port); kfree(gp); pci_disable_device(pci); } static const struct pci_device_id fm801_gp_id_table[] = { { PCI_VENDOR_ID_FORTEMEDIA, PCI_DEVICE_ID_FM801_GP, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, { 0 } }; MODULE_DEVICE_TABLE(pci, fm801_gp_id_table); static struct pci_driver fm801_gp_driver = { .name = "FM801_gameport", .id_table = fm801_gp_id_table, .probe = fm801_gp_probe, .remove = __devexit_p(fm801_gp_remove), }; module_pci_driver(fm801_gp_driver); MODULE_DESCRIPTION("FM801 gameport driver"); MODULE_AUTHOR("Takashi Iwai <tiwai@suse.de>"); MODULE_LICENSE("GPL");
gpl-2.0
Perferom/android_kernel_htc_msm7x27
drivers/staging/wlags49_h2/wl_sysfs.c
944
4034
/* * ex: sw=4 */ #include <linux/kernel.h> #include <linux/netdevice.h> #include <linux/if_arp.h> #include <net/sock.h> #include <linux/rtnetlink.h> #include <linux/wireless.h> #include <net/iw_handler.h> #include <linux/sysfs.h> #include <debug.h> #include <hcf.h> #include <hcfdef.h> #include <wl_if.h> #include <wl_internal.h> #include <wl_util.h> #include <wl_main.h> #include <wl_wext.h> #include <wl_priv.h> static inline int dev_isalive(const struct net_device *dev) { return dev->reg_state == NETREG_REGISTERED; } /* * empirically even if tallies are defined as 32 bits entities, only * high 16 bits are relevant; low half is always zero. It means tallies * are pretty much useless for traffic counting but at least give overview * about where error come from */ static ssize_t show_tallies(struct device *d, struct device_attribute *attr, char *buf) { struct net_device *dev = to_net_dev(d); struct wl_private *lp = wl_priv(dev); unsigned long flags; CFG_HERMES_TALLIES_STRCT tallies; ssize_t ret = -EINVAL; read_lock(&dev_base_lock); if (dev_isalive(dev)) { wl_lock(lp, &flags); ret = wl_get_tallies(lp, &tallies); if (ret == 0) { wl_unlock(lp, &flags); ret = snprintf(buf, PAGE_SIZE, "TxUnicastFrames: %u\n" "TxMulticastFrames: %u\n" "TxFragments: %u\n" "TxUnicastOctets: %u\n" "TxMulticastOctets: %u\n" "TxDeferredTransmissions: %u\n" "TxSingleRetryFrames: %u\n" "TxMultipleRetryFrames: %u\n" "TxRetryLimitExceeded: %u\n" "TxDiscards: %u\n" "RxUnicastFrames: %u\n" "RxMulticastFrames: %u\n" "RxFragments: %u\n" "RxUnicastOctets: %u\n" "RxMulticastOctets: %u\n" "RxFCSErrors: %u\n" "RxDiscardsNoBuffer: %u\n" "TxDiscardsWrongSA: %u\n" "RxWEPUndecryptable: %u\n" "RxMsgInMsgFragments: %u\n" "RxMsgInBadMsgFragments: %u\n" "RxDiscardsWEPICVError: %u\n" "RxDiscardsWEPExcluded: %u\n" , (unsigned int)tallies.TxUnicastFrames, (unsigned int)tallies.TxMulticastFrames, (unsigned int)tallies.TxFragments, (unsigned int)tallies.TxUnicastOctets, (unsigned int)tallies.TxMulticastOctets, (unsigned int)tallies.TxDeferredTransmissions, (unsigned int)tallies.TxSingleRetryFrames, (unsigned int)tallies.TxMultipleRetryFrames, (unsigned int)tallies.TxRetryLimitExceeded, (unsigned int)tallies.TxDiscards, (unsigned int)tallies.RxUnicastFrames, (unsigned int)tallies.RxMulticastFrames, (unsigned int)tallies.RxFragments, (unsigned int)tallies.RxUnicastOctets, (unsigned int)tallies.RxMulticastOctets, (unsigned int)tallies.RxFCSErrors, (unsigned int)tallies.RxDiscardsNoBuffer, (unsigned int)tallies.TxDiscardsWrongSA, (unsigned int)tallies.RxWEPUndecryptable, (unsigned int)tallies.RxMsgInMsgFragments, (unsigned int)tallies.RxMsgInBadMsgFragments, (unsigned int)tallies.RxDiscardsWEPICVError, (unsigned int)tallies.RxDiscardsWEPExcluded); } else { wl_unlock( lp, &flags ); } } read_unlock(&dev_base_lock); return ret; } static DEVICE_ATTR(tallies, S_IRUGO, show_tallies, NULL); static struct attribute *wlags_attrs[] = { &dev_attr_tallies.attr, NULL }; static struct attribute_group wlags_group = { .name = "wlags", .attrs = wlags_attrs, }; void register_wlags_sysfs(struct net_device *net) { struct device *dev = &(net->dev); struct wl_private *lp = wl_priv(net); lp->sysfsCreated = sysfs_create_group(&dev->kobj, &wlags_group); } void unregister_wlags_sysfs(struct net_device *net) { struct device *dev = &(net->dev); struct wl_private *lp = wl_priv(net); if (lp->sysfsCreated) sysfs_remove_group(&dev->kobj, &wlags_group); }
gpl-2.0
milaq/android_kernel_htc_kovsky
drivers/net/usb/catc.c
1200
23977
/* * Copyright (c) 2001 Vojtech Pavlik * * CATC EL1210A NetMate USB Ethernet driver * * Sponsored by SuSE * * Based on the work of * Donald Becker * * Old chipset support added by Simon Evans <spse@secret.org.uk> 2002 * - adds support for Belkin F5U011 */ /* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * Should you need to contact me, the author, you can do so either by * e-mail - mail your message to <vojtech@suse.cz>, or by paper mail: * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic */ #include <linux/init.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/spinlock.h> #include <linux/ethtool.h> #include <linux/crc32.h> #include <linux/bitops.h> #include <linux/gfp.h> #include <asm/uaccess.h> #undef DEBUG #include <linux/usb.h> /* * Version information. */ #define DRIVER_VERSION "v2.8" #define DRIVER_AUTHOR "Vojtech Pavlik <vojtech@suse.cz>" #define DRIVER_DESC "CATC EL1210A NetMate USB Ethernet driver" #define SHORT_DRIVER_DESC "EL1210A NetMate USB Ethernet" MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL"); static const char driver_name[] = "catc"; /* * Some defines. */ #define STATS_UPDATE (HZ) /* Time between stats updates */ #define TX_TIMEOUT (5*HZ) /* Max time the queue can be stopped */ #define PKT_SZ 1536 /* Max Ethernet packet size */ #define RX_MAX_BURST 15 /* Max packets per rx buffer (> 0, < 16) */ #define TX_MAX_BURST 15 /* Max full sized packets per tx buffer (> 0) */ #define CTRL_QUEUE 16 /* Max control requests in flight (power of two) */ #define RX_PKT_SZ 1600 /* Max size of receive packet for F5U011 */ /* * Control requests. */ enum control_requests { ReadMem = 0xf1, GetMac = 0xf2, Reset = 0xf4, SetMac = 0xf5, SetRxMode = 0xf5, /* F5U011 only */ WriteROM = 0xf8, SetReg = 0xfa, GetReg = 0xfb, WriteMem = 0xfc, ReadROM = 0xfd, }; /* * Registers. */ enum register_offsets { TxBufCount = 0x20, RxBufCount = 0x21, OpModes = 0x22, TxQed = 0x23, RxQed = 0x24, MaxBurst = 0x25, RxUnit = 0x60, EthStatus = 0x61, StationAddr0 = 0x67, EthStats = 0x69, LEDCtrl = 0x81, }; enum eth_stats { TxSingleColl = 0x00, TxMultiColl = 0x02, TxExcessColl = 0x04, RxFramErr = 0x06, }; enum op_mode_bits { Op3MemWaits = 0x03, OpLenInclude = 0x08, OpRxMerge = 0x10, OpTxMerge = 0x20, OpWin95bugfix = 0x40, OpLoopback = 0x80, }; enum rx_filter_bits { RxEnable = 0x01, RxPolarity = 0x02, RxForceOK = 0x04, RxMultiCast = 0x08, RxPromisc = 0x10, AltRxPromisc = 0x20, /* F5U011 uses different bit */ }; enum led_values { LEDFast = 0x01, LEDSlow = 0x02, LEDFlash = 0x03, LEDPulse = 0x04, LEDLink = 0x08, }; enum link_status { LinkNoChange = 0, LinkGood = 1, LinkBad = 2 }; /* * The catc struct. */ #define CTRL_RUNNING 0 #define RX_RUNNING 1 #define TX_RUNNING 2 struct catc { struct net_device *netdev; struct usb_device *usbdev; unsigned long flags; unsigned int tx_ptr, tx_idx; unsigned int ctrl_head, ctrl_tail; spinlock_t tx_lock, ctrl_lock; u8 tx_buf[2][TX_MAX_BURST * (PKT_SZ + 2)]; u8 rx_buf[RX_MAX_BURST * (PKT_SZ + 2)]; u8 irq_buf[2]; u8 ctrl_buf[64]; struct usb_ctrlrequest ctrl_dr; struct timer_list timer; u8 stats_buf[8]; u16 stats_vals[4]; unsigned long last_stats; u8 multicast[64]; struct ctrl_queue { u8 dir; u8 request; u16 value; u16 index; void *buf; int len; void (*callback)(struct catc *catc, struct ctrl_queue *q); } ctrl_queue[CTRL_QUEUE]; struct urb *tx_urb, *rx_urb, *irq_urb, *ctrl_urb; u8 is_f5u011; /* Set if device is an F5U011 */ u8 rxmode[2]; /* Used for F5U011 */ atomic_t recq_sz; /* Used for F5U011 - counter of waiting rx packets */ }; /* * Useful macros. */ #define catc_get_mac(catc, mac) catc_ctrl_msg(catc, USB_DIR_IN, GetMac, 0, 0, mac, 6) #define catc_reset(catc) catc_ctrl_msg(catc, USB_DIR_OUT, Reset, 0, 0, NULL, 0) #define catc_set_reg(catc, reg, val) catc_ctrl_msg(catc, USB_DIR_OUT, SetReg, val, reg, NULL, 0) #define catc_get_reg(catc, reg, buf) catc_ctrl_msg(catc, USB_DIR_IN, GetReg, 0, reg, buf, 1) #define catc_write_mem(catc, addr, buf, size) catc_ctrl_msg(catc, USB_DIR_OUT, WriteMem, 0, addr, buf, size) #define catc_read_mem(catc, addr, buf, size) catc_ctrl_msg(catc, USB_DIR_IN, ReadMem, 0, addr, buf, size) #define f5u011_rxmode(catc, rxmode) catc_ctrl_msg(catc, USB_DIR_OUT, SetRxMode, 0, 1, rxmode, 2) #define f5u011_rxmode_async(catc, rxmode) catc_ctrl_async(catc, USB_DIR_OUT, SetRxMode, 0, 1, &rxmode, 2, NULL) #define f5u011_mchash_async(catc, hash) catc_ctrl_async(catc, USB_DIR_OUT, SetRxMode, 0, 2, &hash, 8, NULL) #define catc_set_reg_async(catc, reg, val) catc_ctrl_async(catc, USB_DIR_OUT, SetReg, val, reg, NULL, 0, NULL) #define catc_get_reg_async(catc, reg, cb) catc_ctrl_async(catc, USB_DIR_IN, GetReg, 0, reg, NULL, 1, cb) #define catc_write_mem_async(catc, addr, buf, size) catc_ctrl_async(catc, USB_DIR_OUT, WriteMem, 0, addr, buf, size, NULL) /* * Receive routines. */ static void catc_rx_done(struct urb *urb) { struct catc *catc = urb->context; u8 *pkt_start = urb->transfer_buffer; struct sk_buff *skb; int pkt_len, pkt_offset = 0; int status = urb->status; if (!catc->is_f5u011) { clear_bit(RX_RUNNING, &catc->flags); pkt_offset = 2; } if (status) { dbg("rx_done, status %d, length %d", status, urb->actual_length); return; } do { if(!catc->is_f5u011) { pkt_len = le16_to_cpup((__le16*)pkt_start); if (pkt_len > urb->actual_length) { catc->netdev->stats.rx_length_errors++; catc->netdev->stats.rx_errors++; break; } } else { pkt_len = urb->actual_length; } if (!(skb = dev_alloc_skb(pkt_len))) return; skb_copy_to_linear_data(skb, pkt_start + pkt_offset, pkt_len); skb_put(skb, pkt_len); skb->protocol = eth_type_trans(skb, catc->netdev); netif_rx(skb); catc->netdev->stats.rx_packets++; catc->netdev->stats.rx_bytes += pkt_len; /* F5U011 only does one packet per RX */ if (catc->is_f5u011) break; pkt_start += (((pkt_len + 1) >> 6) + 1) << 6; } while (pkt_start - (u8 *) urb->transfer_buffer < urb->actual_length); if (catc->is_f5u011) { if (atomic_read(&catc->recq_sz)) { int state; atomic_dec(&catc->recq_sz); dbg("getting extra packet"); urb->dev = catc->usbdev; if ((state = usb_submit_urb(urb, GFP_ATOMIC)) < 0) { dbg("submit(rx_urb) status %d", state); } } else { clear_bit(RX_RUNNING, &catc->flags); } } } static void catc_irq_done(struct urb *urb) { struct catc *catc = urb->context; u8 *data = urb->transfer_buffer; int status = urb->status; unsigned int hasdata = 0, linksts = LinkNoChange; int res; if (!catc->is_f5u011) { hasdata = data[1] & 0x80; if (data[1] & 0x40) linksts = LinkGood; else if (data[1] & 0x20) linksts = LinkBad; } else { hasdata = (unsigned int)(be16_to_cpup((__be16*)data) & 0x0fff); if (data[0] == 0x90) linksts = LinkGood; else if (data[0] == 0xA0) linksts = LinkBad; } switch (status) { case 0: /* success */ break; case -ECONNRESET: /* unlink */ case -ENOENT: case -ESHUTDOWN: return; /* -EPIPE: should clear the halt */ default: /* error */ dbg("irq_done, status %d, data %02x %02x.", status, data[0], data[1]); goto resubmit; } if (linksts == LinkGood) { netif_carrier_on(catc->netdev); dbg("link ok"); } if (linksts == LinkBad) { netif_carrier_off(catc->netdev); dbg("link bad"); } if (hasdata) { if (test_and_set_bit(RX_RUNNING, &catc->flags)) { if (catc->is_f5u011) atomic_inc(&catc->recq_sz); } else { catc->rx_urb->dev = catc->usbdev; if ((res = usb_submit_urb(catc->rx_urb, GFP_ATOMIC)) < 0) { err("submit(rx_urb) status %d", res); } } } resubmit: res = usb_submit_urb (urb, GFP_ATOMIC); if (res) err ("can't resubmit intr, %s-%s, status %d", catc->usbdev->bus->bus_name, catc->usbdev->devpath, res); } /* * Transmit routines. */ static int catc_tx_run(struct catc *catc) { int status; if (catc->is_f5u011) catc->tx_ptr = (catc->tx_ptr + 63) & ~63; catc->tx_urb->transfer_buffer_length = catc->tx_ptr; catc->tx_urb->transfer_buffer = catc->tx_buf[catc->tx_idx]; catc->tx_urb->dev = catc->usbdev; if ((status = usb_submit_urb(catc->tx_urb, GFP_ATOMIC)) < 0) err("submit(tx_urb), status %d", status); catc->tx_idx = !catc->tx_idx; catc->tx_ptr = 0; catc->netdev->trans_start = jiffies; return status; } static void catc_tx_done(struct urb *urb) { struct catc *catc = urb->context; unsigned long flags; int r, status = urb->status; if (status == -ECONNRESET) { dbg("Tx Reset."); urb->status = 0; catc->netdev->trans_start = jiffies; catc->netdev->stats.tx_errors++; clear_bit(TX_RUNNING, &catc->flags); netif_wake_queue(catc->netdev); return; } if (status) { dbg("tx_done, status %d, length %d", status, urb->actual_length); return; } spin_lock_irqsave(&catc->tx_lock, flags); if (catc->tx_ptr) { r = catc_tx_run(catc); if (unlikely(r < 0)) clear_bit(TX_RUNNING, &catc->flags); } else { clear_bit(TX_RUNNING, &catc->flags); } netif_wake_queue(catc->netdev); spin_unlock_irqrestore(&catc->tx_lock, flags); } static netdev_tx_t catc_start_xmit(struct sk_buff *skb, struct net_device *netdev) { struct catc *catc = netdev_priv(netdev); unsigned long flags; int r = 0; char *tx_buf; spin_lock_irqsave(&catc->tx_lock, flags); catc->tx_ptr = (((catc->tx_ptr - 1) >> 6) + 1) << 6; tx_buf = catc->tx_buf[catc->tx_idx] + catc->tx_ptr; if (catc->is_f5u011) *(__be16 *)tx_buf = cpu_to_be16(skb->len); else *(__le16 *)tx_buf = cpu_to_le16(skb->len); skb_copy_from_linear_data(skb, tx_buf + 2, skb->len); catc->tx_ptr += skb->len + 2; if (!test_and_set_bit(TX_RUNNING, &catc->flags)) { r = catc_tx_run(catc); if (r < 0) clear_bit(TX_RUNNING, &catc->flags); } if ((catc->is_f5u011 && catc->tx_ptr) || (catc->tx_ptr >= ((TX_MAX_BURST - 1) * (PKT_SZ + 2)))) netif_stop_queue(netdev); spin_unlock_irqrestore(&catc->tx_lock, flags); if (r >= 0) { catc->netdev->stats.tx_bytes += skb->len; catc->netdev->stats.tx_packets++; } dev_kfree_skb(skb); return NETDEV_TX_OK; } static void catc_tx_timeout(struct net_device *netdev) { struct catc *catc = netdev_priv(netdev); dev_warn(&netdev->dev, "Transmit timed out.\n"); usb_unlink_urb(catc->tx_urb); } /* * Control messages. */ static int catc_ctrl_msg(struct catc *catc, u8 dir, u8 request, u16 value, u16 index, void *buf, int len) { int retval = usb_control_msg(catc->usbdev, dir ? usb_rcvctrlpipe(catc->usbdev, 0) : usb_sndctrlpipe(catc->usbdev, 0), request, 0x40 | dir, value, index, buf, len, 1000); return retval < 0 ? retval : 0; } static void catc_ctrl_run(struct catc *catc) { struct ctrl_queue *q = catc->ctrl_queue + catc->ctrl_tail; struct usb_device *usbdev = catc->usbdev; struct urb *urb = catc->ctrl_urb; struct usb_ctrlrequest *dr = &catc->ctrl_dr; int status; dr->bRequest = q->request; dr->bRequestType = 0x40 | q->dir; dr->wValue = cpu_to_le16(q->value); dr->wIndex = cpu_to_le16(q->index); dr->wLength = cpu_to_le16(q->len); urb->pipe = q->dir ? usb_rcvctrlpipe(usbdev, 0) : usb_sndctrlpipe(usbdev, 0); urb->transfer_buffer_length = q->len; urb->transfer_buffer = catc->ctrl_buf; urb->setup_packet = (void *) dr; urb->dev = usbdev; if (!q->dir && q->buf && q->len) memcpy(catc->ctrl_buf, q->buf, q->len); if ((status = usb_submit_urb(catc->ctrl_urb, GFP_KERNEL))) err("submit(ctrl_urb) status %d", status); } static void catc_ctrl_done(struct urb *urb) { struct catc *catc = urb->context; struct ctrl_queue *q; unsigned long flags; int status = urb->status; if (status) dbg("ctrl_done, status %d, len %d.", status, urb->actual_length); spin_lock_irqsave(&catc->ctrl_lock, flags); q = catc->ctrl_queue + catc->ctrl_tail; if (q->dir) { if (q->buf && q->len) memcpy(q->buf, catc->ctrl_buf, q->len); else q->buf = catc->ctrl_buf; } if (q->callback) q->callback(catc, q); catc->ctrl_tail = (catc->ctrl_tail + 1) & (CTRL_QUEUE - 1); if (catc->ctrl_head != catc->ctrl_tail) catc_ctrl_run(catc); else clear_bit(CTRL_RUNNING, &catc->flags); spin_unlock_irqrestore(&catc->ctrl_lock, flags); } static int catc_ctrl_async(struct catc *catc, u8 dir, u8 request, u16 value, u16 index, void *buf, int len, void (*callback)(struct catc *catc, struct ctrl_queue *q)) { struct ctrl_queue *q; int retval = 0; unsigned long flags; spin_lock_irqsave(&catc->ctrl_lock, flags); q = catc->ctrl_queue + catc->ctrl_head; q->dir = dir; q->request = request; q->value = value; q->index = index; q->buf = buf; q->len = len; q->callback = callback; catc->ctrl_head = (catc->ctrl_head + 1) & (CTRL_QUEUE - 1); if (catc->ctrl_head == catc->ctrl_tail) { err("ctrl queue full"); catc->ctrl_tail = (catc->ctrl_tail + 1) & (CTRL_QUEUE - 1); retval = -1; } if (!test_and_set_bit(CTRL_RUNNING, &catc->flags)) catc_ctrl_run(catc); spin_unlock_irqrestore(&catc->ctrl_lock, flags); return retval; } /* * Statistics. */ static void catc_stats_done(struct catc *catc, struct ctrl_queue *q) { int index = q->index - EthStats; u16 data, last; catc->stats_buf[index] = *((char *)q->buf); if (index & 1) return; data = ((u16)catc->stats_buf[index] << 8) | catc->stats_buf[index + 1]; last = catc->stats_vals[index >> 1]; switch (index) { case TxSingleColl: case TxMultiColl: catc->netdev->stats.collisions += data - last; break; case TxExcessColl: catc->netdev->stats.tx_aborted_errors += data - last; catc->netdev->stats.tx_errors += data - last; break; case RxFramErr: catc->netdev->stats.rx_frame_errors += data - last; catc->netdev->stats.rx_errors += data - last; break; } catc->stats_vals[index >> 1] = data; } static void catc_stats_timer(unsigned long data) { struct catc *catc = (void *) data; int i; for (i = 0; i < 8; i++) catc_get_reg_async(catc, EthStats + 7 - i, catc_stats_done); mod_timer(&catc->timer, jiffies + STATS_UPDATE); } /* * Receive modes. Broadcast, Multicast, Promisc. */ static void catc_multicast(unsigned char *addr, u8 *multicast) { u32 crc; crc = ether_crc_le(6, addr); multicast[(crc >> 3) & 0x3f] |= 1 << (crc & 7); } static void catc_set_multicast_list(struct net_device *netdev) { struct catc *catc = netdev_priv(netdev); struct netdev_hw_addr *ha; u8 broadcast[6]; u8 rx = RxEnable | RxPolarity | RxMultiCast; memset(broadcast, 0xff, 6); memset(catc->multicast, 0, 64); catc_multicast(broadcast, catc->multicast); catc_multicast(netdev->dev_addr, catc->multicast); if (netdev->flags & IFF_PROMISC) { memset(catc->multicast, 0xff, 64); rx |= (!catc->is_f5u011) ? RxPromisc : AltRxPromisc; } if (netdev->flags & IFF_ALLMULTI) { memset(catc->multicast, 0xff, 64); } else { netdev_for_each_mc_addr(ha, netdev) { u32 crc = ether_crc_le(6, ha->addr); if (!catc->is_f5u011) { catc->multicast[(crc >> 3) & 0x3f] |= 1 << (crc & 7); } else { catc->multicast[7-(crc >> 29)] |= 1 << ((crc >> 26) & 7); } } } if (!catc->is_f5u011) { catc_set_reg_async(catc, RxUnit, rx); catc_write_mem_async(catc, 0xfa80, catc->multicast, 64); } else { f5u011_mchash_async(catc, catc->multicast); if (catc->rxmode[0] != rx) { catc->rxmode[0] = rx; dbg("Setting RX mode to %2.2X %2.2X", catc->rxmode[0], catc->rxmode[1]); f5u011_rxmode_async(catc, catc->rxmode); } } } static void catc_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { struct catc *catc = netdev_priv(dev); strncpy(info->driver, driver_name, ETHTOOL_BUSINFO_LEN); strncpy(info->version, DRIVER_VERSION, ETHTOOL_BUSINFO_LEN); usb_make_path (catc->usbdev, info->bus_info, sizeof info->bus_info); } static int catc_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct catc *catc = netdev_priv(dev); if (!catc->is_f5u011) return -EOPNOTSUPP; cmd->supported = SUPPORTED_10baseT_Half | SUPPORTED_TP; cmd->advertising = ADVERTISED_10baseT_Half | ADVERTISED_TP; cmd->speed = SPEED_10; cmd->duplex = DUPLEX_HALF; cmd->port = PORT_TP; cmd->phy_address = 0; cmd->transceiver = XCVR_INTERNAL; cmd->autoneg = AUTONEG_DISABLE; cmd->maxtxpkt = 1; cmd->maxrxpkt = 1; return 0; } static const struct ethtool_ops ops = { .get_drvinfo = catc_get_drvinfo, .get_settings = catc_get_settings, .get_link = ethtool_op_get_link }; /* * Open, close. */ static int catc_open(struct net_device *netdev) { struct catc *catc = netdev_priv(netdev); int status; catc->irq_urb->dev = catc->usbdev; if ((status = usb_submit_urb(catc->irq_urb, GFP_KERNEL)) < 0) { err("submit(irq_urb) status %d", status); return -1; } netif_start_queue(netdev); if (!catc->is_f5u011) mod_timer(&catc->timer, jiffies + STATS_UPDATE); return 0; } static int catc_stop(struct net_device *netdev) { struct catc *catc = netdev_priv(netdev); netif_stop_queue(netdev); if (!catc->is_f5u011) del_timer_sync(&catc->timer); usb_kill_urb(catc->rx_urb); usb_kill_urb(catc->tx_urb); usb_kill_urb(catc->irq_urb); usb_kill_urb(catc->ctrl_urb); return 0; } static const struct net_device_ops catc_netdev_ops = { .ndo_open = catc_open, .ndo_stop = catc_stop, .ndo_start_xmit = catc_start_xmit, .ndo_tx_timeout = catc_tx_timeout, .ndo_set_multicast_list = catc_set_multicast_list, .ndo_change_mtu = eth_change_mtu, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, }; /* * USB probe, disconnect. */ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct usb_device *usbdev = interface_to_usbdev(intf); struct net_device *netdev; struct catc *catc; u8 broadcast[6]; int i, pktsz; if (usb_set_interface(usbdev, intf->altsetting->desc.bInterfaceNumber, 1)) { err("Can't set altsetting 1."); return -EIO; } netdev = alloc_etherdev(sizeof(struct catc)); if (!netdev) return -ENOMEM; catc = netdev_priv(netdev); netdev->netdev_ops = &catc_netdev_ops; netdev->watchdog_timeo = TX_TIMEOUT; SET_ETHTOOL_OPS(netdev, &ops); catc->usbdev = usbdev; catc->netdev = netdev; spin_lock_init(&catc->tx_lock); spin_lock_init(&catc->ctrl_lock); init_timer(&catc->timer); catc->timer.data = (long) catc; catc->timer.function = catc_stats_timer; catc->ctrl_urb = usb_alloc_urb(0, GFP_KERNEL); catc->tx_urb = usb_alloc_urb(0, GFP_KERNEL); catc->rx_urb = usb_alloc_urb(0, GFP_KERNEL); catc->irq_urb = usb_alloc_urb(0, GFP_KERNEL); if ((!catc->ctrl_urb) || (!catc->tx_urb) || (!catc->rx_urb) || (!catc->irq_urb)) { err("No free urbs available."); usb_free_urb(catc->ctrl_urb); usb_free_urb(catc->tx_urb); usb_free_urb(catc->rx_urb); usb_free_urb(catc->irq_urb); free_netdev(netdev); return -ENOMEM; } /* The F5U011 has the same vendor/product as the netmate but a device version of 0x130 */ if (le16_to_cpu(usbdev->descriptor.idVendor) == 0x0423 && le16_to_cpu(usbdev->descriptor.idProduct) == 0xa && le16_to_cpu(catc->usbdev->descriptor.bcdDevice) == 0x0130) { dbg("Testing for f5u011"); catc->is_f5u011 = 1; atomic_set(&catc->recq_sz, 0); pktsz = RX_PKT_SZ; } else { pktsz = RX_MAX_BURST * (PKT_SZ + 2); } usb_fill_control_urb(catc->ctrl_urb, usbdev, usb_sndctrlpipe(usbdev, 0), NULL, NULL, 0, catc_ctrl_done, catc); usb_fill_bulk_urb(catc->tx_urb, usbdev, usb_sndbulkpipe(usbdev, 1), NULL, 0, catc_tx_done, catc); usb_fill_bulk_urb(catc->rx_urb, usbdev, usb_rcvbulkpipe(usbdev, 1), catc->rx_buf, pktsz, catc_rx_done, catc); usb_fill_int_urb(catc->irq_urb, usbdev, usb_rcvintpipe(usbdev, 2), catc->irq_buf, 2, catc_irq_done, catc, 1); if (!catc->is_f5u011) { dbg("Checking memory size\n"); i = 0x12345678; catc_write_mem(catc, 0x7a80, &i, 4); i = 0x87654321; catc_write_mem(catc, 0xfa80, &i, 4); catc_read_mem(catc, 0x7a80, &i, 4); switch (i) { case 0x12345678: catc_set_reg(catc, TxBufCount, 8); catc_set_reg(catc, RxBufCount, 32); dbg("64k Memory\n"); break; default: dev_warn(&intf->dev, "Couldn't detect memory size, assuming 32k\n"); case 0x87654321: catc_set_reg(catc, TxBufCount, 4); catc_set_reg(catc, RxBufCount, 16); dbg("32k Memory\n"); break; } dbg("Getting MAC from SEEROM."); catc_get_mac(catc, netdev->dev_addr); dbg("Setting MAC into registers."); for (i = 0; i < 6; i++) catc_set_reg(catc, StationAddr0 - i, netdev->dev_addr[i]); dbg("Filling the multicast list."); memset(broadcast, 0xff, 6); catc_multicast(broadcast, catc->multicast); catc_multicast(netdev->dev_addr, catc->multicast); catc_write_mem(catc, 0xfa80, catc->multicast, 64); dbg("Clearing error counters."); for (i = 0; i < 8; i++) catc_set_reg(catc, EthStats + i, 0); catc->last_stats = jiffies; dbg("Enabling."); catc_set_reg(catc, MaxBurst, RX_MAX_BURST); catc_set_reg(catc, OpModes, OpTxMerge | OpRxMerge | OpLenInclude | Op3MemWaits); catc_set_reg(catc, LEDCtrl, LEDLink); catc_set_reg(catc, RxUnit, RxEnable | RxPolarity | RxMultiCast); } else { dbg("Performing reset\n"); catc_reset(catc); catc_get_mac(catc, netdev->dev_addr); dbg("Setting RX Mode"); catc->rxmode[0] = RxEnable | RxPolarity | RxMultiCast; catc->rxmode[1] = 0; f5u011_rxmode(catc, catc->rxmode); } dbg("Init done."); printk(KERN_INFO "%s: %s USB Ethernet at usb-%s-%s, %pM.\n", netdev->name, (catc->is_f5u011) ? "Belkin F5U011" : "CATC EL1210A NetMate", usbdev->bus->bus_name, usbdev->devpath, netdev->dev_addr); usb_set_intfdata(intf, catc); SET_NETDEV_DEV(netdev, &intf->dev); if (register_netdev(netdev) != 0) { usb_set_intfdata(intf, NULL); usb_free_urb(catc->ctrl_urb); usb_free_urb(catc->tx_urb); usb_free_urb(catc->rx_urb); usb_free_urb(catc->irq_urb); free_netdev(netdev); return -EIO; } return 0; } static void catc_disconnect(struct usb_interface *intf) { struct catc *catc = usb_get_intfdata(intf); usb_set_intfdata(intf, NULL); if (catc) { unregister_netdev(catc->netdev); usb_free_urb(catc->ctrl_urb); usb_free_urb(catc->tx_urb); usb_free_urb(catc->rx_urb); usb_free_urb(catc->irq_urb); free_netdev(catc->netdev); } } /* * Module functions and tables. */ static struct usb_device_id catc_id_table [] = { { USB_DEVICE(0x0423, 0xa) }, /* CATC Netmate, Belkin F5U011 */ { USB_DEVICE(0x0423, 0xc) }, /* CATC Netmate II, Belkin F5U111 */ { USB_DEVICE(0x08d1, 0x1) }, /* smartBridges smartNIC */ { } }; MODULE_DEVICE_TABLE(usb, catc_id_table); static struct usb_driver catc_driver = { .name = driver_name, .probe = catc_probe, .disconnect = catc_disconnect, .id_table = catc_id_table, }; static int __init catc_init(void) { int result = usb_register(&catc_driver); if (result == 0) printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":" DRIVER_DESC "\n"); return result; } static void __exit catc_exit(void) { usb_deregister(&catc_driver); } module_init(catc_init); module_exit(catc_exit);
gpl-2.0
goodwin/android_kernel_lge_hammerhead-1
drivers/gpu/drm/i915/intel_sdvo.c
2736
80067
/* * Copyright 2006 Dave Airlie <airlied@linux.ie> * Copyright © 2006-2007 Intel Corporation * Jesse Barnes <jesse.barnes@intel.com> * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Authors: * Eric Anholt <eric@anholt.net> */ #include <linux/i2c.h> #include <linux/slab.h> #include <linux/delay.h> #include <linux/export.h> #include "drmP.h" #include "drm.h" #include "drm_crtc.h" #include "drm_edid.h" #include "intel_drv.h" #include "i915_drm.h" #include "i915_drv.h" #include "intel_sdvo_regs.h" #define SDVO_TMDS_MASK (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1) #define SDVO_RGB_MASK (SDVO_OUTPUT_RGB0 | SDVO_OUTPUT_RGB1) #define SDVO_LVDS_MASK (SDVO_OUTPUT_LVDS0 | SDVO_OUTPUT_LVDS1) #define SDVO_TV_MASK (SDVO_OUTPUT_CVBS0 | SDVO_OUTPUT_SVID0) #define SDVO_OUTPUT_MASK (SDVO_TMDS_MASK | SDVO_RGB_MASK | SDVO_LVDS_MASK |\ SDVO_TV_MASK) #define IS_TV(c) (c->output_flag & SDVO_TV_MASK) #define IS_TMDS(c) (c->output_flag & SDVO_TMDS_MASK) #define IS_LVDS(c) (c->output_flag & SDVO_LVDS_MASK) #define IS_TV_OR_LVDS(c) (c->output_flag & (SDVO_TV_MASK | SDVO_LVDS_MASK)) #define IS_DIGITAL(c) (c->output_flag & (SDVO_TMDS_MASK | SDVO_LVDS_MASK)) static const char *tv_format_names[] = { "NTSC_M" , "NTSC_J" , "NTSC_443", "PAL_B" , "PAL_D" , "PAL_G" , "PAL_H" , "PAL_I" , "PAL_M" , "PAL_N" , "PAL_NC" , "PAL_60" , "SECAM_B" , "SECAM_D" , "SECAM_G" , "SECAM_K" , "SECAM_K1", "SECAM_L" , "SECAM_60" }; #define TV_FORMAT_NUM (sizeof(tv_format_names) / sizeof(*tv_format_names)) struct intel_sdvo { struct intel_encoder base; struct i2c_adapter *i2c; u8 slave_addr; struct i2c_adapter ddc; /* Register for the SDVO device: SDVOB or SDVOC */ int sdvo_reg; /* Active outputs controlled by this SDVO output */ uint16_t controlled_output; /* * Capabilities of the SDVO device returned by * i830_sdvo_get_capabilities() */ struct intel_sdvo_caps caps; /* Pixel clock limitations reported by the SDVO device, in kHz */ int pixel_clock_min, pixel_clock_max; /* * For multiple function SDVO device, * this is for current attached outputs. */ uint16_t attached_output; /* * Hotplug activation bits for this device */ uint8_t hotplug_active[2]; /** * This is used to select the color range of RBG outputs in HDMI mode. * It is only valid when using TMDS encoding and 8 bit per color mode. */ uint32_t color_range; /** * This is set if we're going to treat the device as TV-out. * * While we have these nice friendly flags for output types that ought * to decide this for us, the S-Video output on our HDMI+S-Video card * shows up as RGB1 (VGA). */ bool is_tv; /* This is for current tv format name */ int tv_format_index; /** * This is set if we treat the device as HDMI, instead of DVI. */ bool is_hdmi; bool has_hdmi_monitor; bool has_hdmi_audio; /** * This is set if we detect output of sdvo device as LVDS and * have a valid fixed mode to use with the panel. */ bool is_lvds; /** * This is sdvo fixed pannel mode pointer */ struct drm_display_mode *sdvo_lvds_fixed_mode; /* DDC bus used by this SDVO encoder */ uint8_t ddc_bus; /* Input timings for adjusted_mode */ struct intel_sdvo_dtd input_dtd; }; struct intel_sdvo_connector { struct intel_connector base; /* Mark the type of connector */ uint16_t output_flag; enum hdmi_force_audio force_audio; /* This contains all current supported TV format */ u8 tv_format_supported[TV_FORMAT_NUM]; int format_supported_num; struct drm_property *tv_format; /* add the property for the SDVO-TV */ struct drm_property *left; struct drm_property *right; struct drm_property *top; struct drm_property *bottom; struct drm_property *hpos; struct drm_property *vpos; struct drm_property *contrast; struct drm_property *saturation; struct drm_property *hue; struct drm_property *sharpness; struct drm_property *flicker_filter; struct drm_property *flicker_filter_adaptive; struct drm_property *flicker_filter_2d; struct drm_property *tv_chroma_filter; struct drm_property *tv_luma_filter; struct drm_property *dot_crawl; /* add the property for the SDVO-TV/LVDS */ struct drm_property *brightness; /* Add variable to record current setting for the above property */ u32 left_margin, right_margin, top_margin, bottom_margin; /* this is to get the range of margin.*/ u32 max_hscan, max_vscan; u32 max_hpos, cur_hpos; u32 max_vpos, cur_vpos; u32 cur_brightness, max_brightness; u32 cur_contrast, max_contrast; u32 cur_saturation, max_saturation; u32 cur_hue, max_hue; u32 cur_sharpness, max_sharpness; u32 cur_flicker_filter, max_flicker_filter; u32 cur_flicker_filter_adaptive, max_flicker_filter_adaptive; u32 cur_flicker_filter_2d, max_flicker_filter_2d; u32 cur_tv_chroma_filter, max_tv_chroma_filter; u32 cur_tv_luma_filter, max_tv_luma_filter; u32 cur_dot_crawl, max_dot_crawl; }; static struct intel_sdvo *to_intel_sdvo(struct drm_encoder *encoder) { return container_of(encoder, struct intel_sdvo, base.base); } static struct intel_sdvo *intel_attached_sdvo(struct drm_connector *connector) { return container_of(intel_attached_encoder(connector), struct intel_sdvo, base); } static struct intel_sdvo_connector *to_intel_sdvo_connector(struct drm_connector *connector) { return container_of(to_intel_connector(connector), struct intel_sdvo_connector, base); } static bool intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, uint16_t flags); static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo, struct intel_sdvo_connector *intel_sdvo_connector, int type); static bool intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo, struct intel_sdvo_connector *intel_sdvo_connector); /** * Writes the SDVOB or SDVOC with the given value, but always writes both * SDVOB and SDVOC to work around apparent hardware issues (according to * comments in the BIOS). */ static void intel_sdvo_write_sdvox(struct intel_sdvo *intel_sdvo, u32 val) { struct drm_device *dev = intel_sdvo->base.base.dev; struct drm_i915_private *dev_priv = dev->dev_private; u32 bval = val, cval = val; int i; if (intel_sdvo->sdvo_reg == PCH_SDVOB) { I915_WRITE(intel_sdvo->sdvo_reg, val); I915_READ(intel_sdvo->sdvo_reg); return; } if (intel_sdvo->sdvo_reg == SDVOB) { cval = I915_READ(SDVOC); } else { bval = I915_READ(SDVOB); } /* * Write the registers twice for luck. Sometimes, * writing them only once doesn't appear to 'stick'. * The BIOS does this too. Yay, magic */ for (i = 0; i < 2; i++) { I915_WRITE(SDVOB, bval); I915_READ(SDVOB); I915_WRITE(SDVOC, cval); I915_READ(SDVOC); } } static bool intel_sdvo_read_byte(struct intel_sdvo *intel_sdvo, u8 addr, u8 *ch) { struct i2c_msg msgs[] = { { .addr = intel_sdvo->slave_addr, .flags = 0, .len = 1, .buf = &addr, }, { .addr = intel_sdvo->slave_addr, .flags = I2C_M_RD, .len = 1, .buf = ch, } }; int ret; if ((ret = i2c_transfer(intel_sdvo->i2c, msgs, 2)) == 2) return true; DRM_DEBUG_KMS("i2c transfer returned %d\n", ret); return false; } #define SDVO_CMD_NAME_ENTRY(cmd) {cmd, #cmd} /** Mapping of command numbers to names, for debug output */ static const struct _sdvo_cmd_name { u8 cmd; const char *name; } sdvo_cmd_names[] = { SDVO_CMD_NAME_ENTRY(SDVO_CMD_RESET), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DEVICE_CAPS), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FIRMWARE_REV), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TRAINED_INPUTS), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ACTIVE_OUTPUTS), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ACTIVE_OUTPUTS), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_IN_OUT_MAP), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_IN_OUT_MAP), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ATTACHED_DISPLAYS), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HOT_PLUG_SUPPORT), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ACTIVE_HOT_PLUG), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ACTIVE_HOT_PLUG), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INTERRUPT_EVENT_SOURCE), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TARGET_INPUT), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TARGET_OUTPUT), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART1), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART2), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART2), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART1), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART2), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART1), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART2), SDVO_CMD_NAME_ENTRY(SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_PIXEL_CLOCK_RANGE), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_CLOCK_RATE_MULTS), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_CLOCK_RATE_MULT), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CLOCK_RATE_MULT), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_TV_FORMATS), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_FORMAT), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_FORMAT), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_POWER_STATES), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_POWER_STATE), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ENCODER_POWER_STATE), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_DISPLAY_POWER_STATE), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CONTROL_BUS_SWITCH), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SCALED_HDTV_RESOLUTION_SUPPORT), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS), /* Add the op code for SDVO enhancements */ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_HPOS), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HPOS), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HPOS), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_VPOS), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_VPOS), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_VPOS), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_SATURATION), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SATURATION), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_SATURATION), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_HUE), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HUE), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HUE), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_CONTRAST), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_CONTRAST), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CONTRAST), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_BRIGHTNESS), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_BRIGHTNESS), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_BRIGHTNESS), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_OVERSCAN_H), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OVERSCAN_H), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OVERSCAN_H), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_OVERSCAN_V), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OVERSCAN_V), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OVERSCAN_V), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER_ADAPTIVE), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER_ADAPTIVE), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER_ADAPTIVE), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER_2D), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER_2D), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER_2D), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_SHARPNESS), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SHARPNESS), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_SHARPNESS), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DOT_CRAWL), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_DOT_CRAWL), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_TV_CHROMA_FILTER), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_CHROMA_FILTER), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_CHROMA_FILTER), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_TV_LUMA_FILTER), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_LUMA_FILTER), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_LUMA_FILTER), /* HDMI op code */ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPP_ENCODE), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ENCODE), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ENCODE), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_PIXEL_REPLI), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PIXEL_REPLI), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_COLORIMETRY_CAP), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_COLORIMETRY), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_COLORIMETRY), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_AUDIO_ENCRYPT_PREFER), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_AUDIO_STAT), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_AUDIO_STAT), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_INDEX), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_INDEX), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_INFO), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_AV_SPLIT), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_AV_SPLIT), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_TXRATE), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_TXRATE), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_DATA), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_DATA), }; #define IS_SDVOB(reg) (reg == SDVOB || reg == PCH_SDVOB) #define SDVO_NAME(svdo) (IS_SDVOB((svdo)->sdvo_reg) ? "SDVOB" : "SDVOC") static void intel_sdvo_debug_write(struct intel_sdvo *intel_sdvo, u8 cmd, const void *args, int args_len) { int i; DRM_DEBUG_KMS("%s: W: %02X ", SDVO_NAME(intel_sdvo), cmd); for (i = 0; i < args_len; i++) DRM_LOG_KMS("%02X ", ((u8 *)args)[i]); for (; i < 8; i++) DRM_LOG_KMS(" "); for (i = 0; i < ARRAY_SIZE(sdvo_cmd_names); i++) { if (cmd == sdvo_cmd_names[i].cmd) { DRM_LOG_KMS("(%s)", sdvo_cmd_names[i].name); break; } } if (i == ARRAY_SIZE(sdvo_cmd_names)) DRM_LOG_KMS("(%02X)", cmd); DRM_LOG_KMS("\n"); } static const char *cmd_status_names[] = { "Power on", "Success", "Not supported", "Invalid arg", "Pending", "Target not specified", "Scaling not supported" }; static bool intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo, u8 cmd, const void *args, int args_len) { u8 buf[args_len*2 + 2], status; struct i2c_msg msgs[args_len + 3]; int i, ret; intel_sdvo_debug_write(intel_sdvo, cmd, args, args_len); for (i = 0; i < args_len; i++) { msgs[i].addr = intel_sdvo->slave_addr; msgs[i].flags = 0; msgs[i].len = 2; msgs[i].buf = buf + 2 *i; buf[2*i + 0] = SDVO_I2C_ARG_0 - i; buf[2*i + 1] = ((u8*)args)[i]; } msgs[i].addr = intel_sdvo->slave_addr; msgs[i].flags = 0; msgs[i].len = 2; msgs[i].buf = buf + 2*i; buf[2*i + 0] = SDVO_I2C_OPCODE; buf[2*i + 1] = cmd; /* the following two are to read the response */ status = SDVO_I2C_CMD_STATUS; msgs[i+1].addr = intel_sdvo->slave_addr; msgs[i+1].flags = 0; msgs[i+1].len = 1; msgs[i+1].buf = &status; msgs[i+2].addr = intel_sdvo->slave_addr; msgs[i+2].flags = I2C_M_RD; msgs[i+2].len = 1; msgs[i+2].buf = &status; ret = i2c_transfer(intel_sdvo->i2c, msgs, i+3); if (ret < 0) { DRM_DEBUG_KMS("I2c transfer returned %d\n", ret); return false; } if (ret != i+3) { /* failure in I2C transfer */ DRM_DEBUG_KMS("I2c transfer returned %d/%d\n", ret, i+3); return false; } return true; } static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo, void *response, int response_len) { u8 retry = 5; u8 status; int i; DRM_DEBUG_KMS("%s: R: ", SDVO_NAME(intel_sdvo)); /* * The documentation states that all commands will be * processed within 15µs, and that we need only poll * the status byte a maximum of 3 times in order for the * command to be complete. * * Check 5 times in case the hardware failed to read the docs. */ if (!intel_sdvo_read_byte(intel_sdvo, SDVO_I2C_CMD_STATUS, &status)) goto log_fail; while (status == SDVO_CMD_STATUS_PENDING && retry--) { udelay(15); if (!intel_sdvo_read_byte(intel_sdvo, SDVO_I2C_CMD_STATUS, &status)) goto log_fail; } if (status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP) DRM_LOG_KMS("(%s)", cmd_status_names[status]); else DRM_LOG_KMS("(??? %d)", status); if (status != SDVO_CMD_STATUS_SUCCESS) goto log_fail; /* Read the command response */ for (i = 0; i < response_len; i++) { if (!intel_sdvo_read_byte(intel_sdvo, SDVO_I2C_RETURN_0 + i, &((u8 *)response)[i])) goto log_fail; DRM_LOG_KMS(" %02X", ((u8 *)response)[i]); } DRM_LOG_KMS("\n"); return true; log_fail: DRM_LOG_KMS("... failed\n"); return false; } static int intel_sdvo_get_pixel_multiplier(struct drm_display_mode *mode) { if (mode->clock >= 100000) return 1; else if (mode->clock >= 50000) return 2; else return 4; } static bool intel_sdvo_set_control_bus_switch(struct intel_sdvo *intel_sdvo, u8 ddc_bus) { /* This must be the immediately preceding write before the i2c xfer */ return intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_CONTROL_BUS_SWITCH, &ddc_bus, 1); } static bool intel_sdvo_set_value(struct intel_sdvo *intel_sdvo, u8 cmd, const void *data, int len) { if (!intel_sdvo_write_cmd(intel_sdvo, cmd, data, len)) return false; return intel_sdvo_read_response(intel_sdvo, NULL, 0); } static bool intel_sdvo_get_value(struct intel_sdvo *intel_sdvo, u8 cmd, void *value, int len) { if (!intel_sdvo_write_cmd(intel_sdvo, cmd, NULL, 0)) return false; return intel_sdvo_read_response(intel_sdvo, value, len); } static bool intel_sdvo_set_target_input(struct intel_sdvo *intel_sdvo) { struct intel_sdvo_set_target_input_args targets = {0}; return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_TARGET_INPUT, &targets, sizeof(targets)); } /** * Return whether each input is trained. * * This function is making an assumption about the layout of the response, * which should be checked against the docs. */ static bool intel_sdvo_get_trained_inputs(struct intel_sdvo *intel_sdvo, bool *input_1, bool *input_2) { struct intel_sdvo_get_trained_inputs_response response; BUILD_BUG_ON(sizeof(response) != 1); if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_TRAINED_INPUTS, &response, sizeof(response))) return false; *input_1 = response.input0_trained; *input_2 = response.input1_trained; return true; } static bool intel_sdvo_set_active_outputs(struct intel_sdvo *intel_sdvo, u16 outputs) { return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_ACTIVE_OUTPUTS, &outputs, sizeof(outputs)); } static bool intel_sdvo_set_encoder_power_state(struct intel_sdvo *intel_sdvo, int mode) { u8 state = SDVO_ENCODER_STATE_ON; switch (mode) { case DRM_MODE_DPMS_ON: state = SDVO_ENCODER_STATE_ON; break; case DRM_MODE_DPMS_STANDBY: state = SDVO_ENCODER_STATE_STANDBY; break; case DRM_MODE_DPMS_SUSPEND: state = SDVO_ENCODER_STATE_SUSPEND; break; case DRM_MODE_DPMS_OFF: state = SDVO_ENCODER_STATE_OFF; break; } return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_ENCODER_POWER_STATE, &state, sizeof(state)); } static bool intel_sdvo_get_input_pixel_clock_range(struct intel_sdvo *intel_sdvo, int *clock_min, int *clock_max) { struct intel_sdvo_pixel_clock_range clocks; BUILD_BUG_ON(sizeof(clocks) != 4); if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE, &clocks, sizeof(clocks))) return false; /* Convert the values from units of 10 kHz to kHz. */ *clock_min = clocks.min * 10; *clock_max = clocks.max * 10; return true; } static bool intel_sdvo_set_target_output(struct intel_sdvo *intel_sdvo, u16 outputs) { return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_TARGET_OUTPUT, &outputs, sizeof(outputs)); } static bool intel_sdvo_set_timing(struct intel_sdvo *intel_sdvo, u8 cmd, struct intel_sdvo_dtd *dtd) { return intel_sdvo_set_value(intel_sdvo, cmd, &dtd->part1, sizeof(dtd->part1)) && intel_sdvo_set_value(intel_sdvo, cmd + 1, &dtd->part2, sizeof(dtd->part2)); } static bool intel_sdvo_set_input_timing(struct intel_sdvo *intel_sdvo, struct intel_sdvo_dtd *dtd) { return intel_sdvo_set_timing(intel_sdvo, SDVO_CMD_SET_INPUT_TIMINGS_PART1, dtd); } static bool intel_sdvo_set_output_timing(struct intel_sdvo *intel_sdvo, struct intel_sdvo_dtd *dtd) { return intel_sdvo_set_timing(intel_sdvo, SDVO_CMD_SET_OUTPUT_TIMINGS_PART1, dtd); } static bool intel_sdvo_create_preferred_input_timing(struct intel_sdvo *intel_sdvo, uint16_t clock, uint16_t width, uint16_t height) { struct intel_sdvo_preferred_input_timing_args args; memset(&args, 0, sizeof(args)); args.clock = clock; args.width = width; args.height = height; args.interlace = 0; if (intel_sdvo->is_lvds && (intel_sdvo->sdvo_lvds_fixed_mode->hdisplay != width || intel_sdvo->sdvo_lvds_fixed_mode->vdisplay != height)) args.scaled = 1; return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING, &args, sizeof(args)); } static bool intel_sdvo_get_preferred_input_timing(struct intel_sdvo *intel_sdvo, struct intel_sdvo_dtd *dtd) { BUILD_BUG_ON(sizeof(dtd->part1) != 8); BUILD_BUG_ON(sizeof(dtd->part2) != 8); return intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1, &dtd->part1, sizeof(dtd->part1)) && intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2, &dtd->part2, sizeof(dtd->part2)); } static bool intel_sdvo_set_clock_rate_mult(struct intel_sdvo *intel_sdvo, u8 val) { return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_CLOCK_RATE_MULT, &val, 1); } static void intel_sdvo_get_dtd_from_mode(struct intel_sdvo_dtd *dtd, const struct drm_display_mode *mode) { uint16_t width, height; uint16_t h_blank_len, h_sync_len, v_blank_len, v_sync_len; uint16_t h_sync_offset, v_sync_offset; int mode_clock; width = mode->crtc_hdisplay; height = mode->crtc_vdisplay; /* do some mode translations */ h_blank_len = mode->crtc_hblank_end - mode->crtc_hblank_start; h_sync_len = mode->crtc_hsync_end - mode->crtc_hsync_start; v_blank_len = mode->crtc_vblank_end - mode->crtc_vblank_start; v_sync_len = mode->crtc_vsync_end - mode->crtc_vsync_start; h_sync_offset = mode->crtc_hsync_start - mode->crtc_hblank_start; v_sync_offset = mode->crtc_vsync_start - mode->crtc_vblank_start; mode_clock = mode->clock; mode_clock /= intel_mode_get_pixel_multiplier(mode) ?: 1; mode_clock /= 10; dtd->part1.clock = mode_clock; dtd->part1.h_active = width & 0xff; dtd->part1.h_blank = h_blank_len & 0xff; dtd->part1.h_high = (((width >> 8) & 0xf) << 4) | ((h_blank_len >> 8) & 0xf); dtd->part1.v_active = height & 0xff; dtd->part1.v_blank = v_blank_len & 0xff; dtd->part1.v_high = (((height >> 8) & 0xf) << 4) | ((v_blank_len >> 8) & 0xf); dtd->part2.h_sync_off = h_sync_offset & 0xff; dtd->part2.h_sync_width = h_sync_len & 0xff; dtd->part2.v_sync_off_width = (v_sync_offset & 0xf) << 4 | (v_sync_len & 0xf); dtd->part2.sync_off_width_high = ((h_sync_offset & 0x300) >> 2) | ((h_sync_len & 0x300) >> 4) | ((v_sync_offset & 0x30) >> 2) | ((v_sync_len & 0x30) >> 4); dtd->part2.dtd_flags = 0x18; if (mode->flags & DRM_MODE_FLAG_PHSYNC) dtd->part2.dtd_flags |= 0x2; if (mode->flags & DRM_MODE_FLAG_PVSYNC) dtd->part2.dtd_flags |= 0x4; dtd->part2.sdvo_flags = 0; dtd->part2.v_sync_off_high = v_sync_offset & 0xc0; dtd->part2.reserved = 0; } static void intel_sdvo_get_mode_from_dtd(struct drm_display_mode * mode, const struct intel_sdvo_dtd *dtd) { mode->hdisplay = dtd->part1.h_active; mode->hdisplay += ((dtd->part1.h_high >> 4) & 0x0f) << 8; mode->hsync_start = mode->hdisplay + dtd->part2.h_sync_off; mode->hsync_start += (dtd->part2.sync_off_width_high & 0xc0) << 2; mode->hsync_end = mode->hsync_start + dtd->part2.h_sync_width; mode->hsync_end += (dtd->part2.sync_off_width_high & 0x30) << 4; mode->htotal = mode->hdisplay + dtd->part1.h_blank; mode->htotal += (dtd->part1.h_high & 0xf) << 8; mode->vdisplay = dtd->part1.v_active; mode->vdisplay += ((dtd->part1.v_high >> 4) & 0x0f) << 8; mode->vsync_start = mode->vdisplay; mode->vsync_start += (dtd->part2.v_sync_off_width >> 4) & 0xf; mode->vsync_start += (dtd->part2.sync_off_width_high & 0x0c) << 2; mode->vsync_start += dtd->part2.v_sync_off_high & 0xc0; mode->vsync_end = mode->vsync_start + (dtd->part2.v_sync_off_width & 0xf); mode->vsync_end += (dtd->part2.sync_off_width_high & 0x3) << 4; mode->vtotal = mode->vdisplay + dtd->part1.v_blank; mode->vtotal += (dtd->part1.v_high & 0xf) << 8; mode->clock = dtd->part1.clock * 10; mode->flags &= ~(DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC); if (dtd->part2.dtd_flags & 0x2) mode->flags |= DRM_MODE_FLAG_PHSYNC; if (dtd->part2.dtd_flags & 0x4) mode->flags |= DRM_MODE_FLAG_PVSYNC; } static bool intel_sdvo_check_supp_encode(struct intel_sdvo *intel_sdvo) { struct intel_sdvo_encode encode; BUILD_BUG_ON(sizeof(encode) != 2); return intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_SUPP_ENCODE, &encode, sizeof(encode)); } static bool intel_sdvo_set_encode(struct intel_sdvo *intel_sdvo, uint8_t mode) { return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_ENCODE, &mode, 1); } static bool intel_sdvo_set_colorimetry(struct intel_sdvo *intel_sdvo, uint8_t mode) { return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_COLORIMETRY, &mode, 1); } #if 0 static void intel_sdvo_dump_hdmi_buf(struct intel_sdvo *intel_sdvo) { int i, j; uint8_t set_buf_index[2]; uint8_t av_split; uint8_t buf_size; uint8_t buf[48]; uint8_t *pos; intel_sdvo_get_value(encoder, SDVO_CMD_GET_HBUF_AV_SPLIT, &av_split, 1); for (i = 0; i <= av_split; i++) { set_buf_index[0] = i; set_buf_index[1] = 0; intel_sdvo_write_cmd(encoder, SDVO_CMD_SET_HBUF_INDEX, set_buf_index, 2); intel_sdvo_write_cmd(encoder, SDVO_CMD_GET_HBUF_INFO, NULL, 0); intel_sdvo_read_response(encoder, &buf_size, 1); pos = buf; for (j = 0; j <= buf_size; j += 8) { intel_sdvo_write_cmd(encoder, SDVO_CMD_GET_HBUF_DATA, NULL, 0); intel_sdvo_read_response(encoder, pos, 8); pos += 8; } } } #endif static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo) { struct dip_infoframe avi_if = { .type = DIP_TYPE_AVI, .ver = DIP_VERSION_AVI, .len = DIP_LEN_AVI, }; uint8_t tx_rate = SDVO_HBUF_TX_VSYNC; uint8_t set_buf_index[2] = { 1, 0 }; uint64_t *data = (uint64_t *)&avi_if; unsigned i; intel_dip_infoframe_csum(&avi_if); if (!intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_HBUF_INDEX, set_buf_index, 2)) return false; for (i = 0; i < sizeof(avi_if); i += 8) { if (!intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_HBUF_DATA, data, 8)) return false; data++; } return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_HBUF_TXRATE, &tx_rate, 1); } static bool intel_sdvo_set_tv_format(struct intel_sdvo *intel_sdvo) { struct intel_sdvo_tv_format format; uint32_t format_map; format_map = 1 << intel_sdvo->tv_format_index; memset(&format, 0, sizeof(format)); memcpy(&format, &format_map, min(sizeof(format), sizeof(format_map))); BUILD_BUG_ON(sizeof(format) != 6); return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_TV_FORMAT, &format, sizeof(format)); } static bool intel_sdvo_set_output_timings_from_mode(struct intel_sdvo *intel_sdvo, struct drm_display_mode *mode) { struct intel_sdvo_dtd output_dtd; if (!intel_sdvo_set_target_output(intel_sdvo, intel_sdvo->attached_output)) return false; intel_sdvo_get_dtd_from_mode(&output_dtd, mode); if (!intel_sdvo_set_output_timing(intel_sdvo, &output_dtd)) return false; return true; } static bool intel_sdvo_set_input_timings_for_mode(struct intel_sdvo *intel_sdvo, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { /* Reset the input timing to the screen. Assume always input 0. */ if (!intel_sdvo_set_target_input(intel_sdvo)) return false; if (!intel_sdvo_create_preferred_input_timing(intel_sdvo, mode->clock / 10, mode->hdisplay, mode->vdisplay)) return false; if (!intel_sdvo_get_preferred_input_timing(intel_sdvo, &intel_sdvo->input_dtd)) return false; intel_sdvo_get_mode_from_dtd(adjusted_mode, &intel_sdvo->input_dtd); return true; } static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct intel_sdvo *intel_sdvo = to_intel_sdvo(encoder); int multiplier; /* We need to construct preferred input timings based on our * output timings. To do that, we have to set the output * timings, even though this isn't really the right place in * the sequence to do it. Oh well. */ if (intel_sdvo->is_tv) { if (!intel_sdvo_set_output_timings_from_mode(intel_sdvo, mode)) return false; (void) intel_sdvo_set_input_timings_for_mode(intel_sdvo, mode, adjusted_mode); } else if (intel_sdvo->is_lvds) { if (!intel_sdvo_set_output_timings_from_mode(intel_sdvo, intel_sdvo->sdvo_lvds_fixed_mode)) return false; (void) intel_sdvo_set_input_timings_for_mode(intel_sdvo, mode, adjusted_mode); } /* Make the CRTC code factor in the SDVO pixel multiplier. The * SDVO device will factor out the multiplier during mode_set. */ multiplier = intel_sdvo_get_pixel_multiplier(adjusted_mode); intel_mode_set_pixel_multiplier(adjusted_mode, multiplier); return true; } static void intel_sdvo_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct drm_crtc *crtc = encoder->crtc; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_sdvo *intel_sdvo = to_intel_sdvo(encoder); u32 sdvox; struct intel_sdvo_in_out_map in_out; struct intel_sdvo_dtd input_dtd, output_dtd; int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode); int rate; if (!mode) return; /* First, set the input mapping for the first input to our controlled * output. This is only correct if we're a single-input device, in * which case the first input is the output from the appropriate SDVO * channel on the motherboard. In a two-input device, the first input * will be SDVOB and the second SDVOC. */ in_out.in0 = intel_sdvo->attached_output; in_out.in1 = 0; intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_IN_OUT_MAP, &in_out, sizeof(in_out)); /* Set the output timings to the screen */ if (!intel_sdvo_set_target_output(intel_sdvo, intel_sdvo->attached_output)) return; /* lvds has a special fixed output timing. */ if (intel_sdvo->is_lvds) intel_sdvo_get_dtd_from_mode(&output_dtd, intel_sdvo->sdvo_lvds_fixed_mode); else intel_sdvo_get_dtd_from_mode(&output_dtd, mode); (void) intel_sdvo_set_output_timing(intel_sdvo, &output_dtd); /* Set the input timing to the screen. Assume always input 0. */ if (!intel_sdvo_set_target_input(intel_sdvo)) return; if (intel_sdvo->has_hdmi_monitor) { intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_HDMI); intel_sdvo_set_colorimetry(intel_sdvo, SDVO_COLORIMETRY_RGB256); intel_sdvo_set_avi_infoframe(intel_sdvo); } else intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_DVI); if (intel_sdvo->is_tv && !intel_sdvo_set_tv_format(intel_sdvo)) return; /* We have tried to get input timing in mode_fixup, and filled into * adjusted_mode. */ intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode); (void) intel_sdvo_set_input_timing(intel_sdvo, &input_dtd); switch (pixel_multiplier) { default: case 1: rate = SDVO_CLOCK_RATE_MULT_1X; break; case 2: rate = SDVO_CLOCK_RATE_MULT_2X; break; case 4: rate = SDVO_CLOCK_RATE_MULT_4X; break; } if (!intel_sdvo_set_clock_rate_mult(intel_sdvo, rate)) return; /* Set the SDVO control regs. */ if (INTEL_INFO(dev)->gen >= 4) { /* The real mode polarity is set by the SDVO commands, using * struct intel_sdvo_dtd. */ sdvox = SDVO_VSYNC_ACTIVE_HIGH | SDVO_HSYNC_ACTIVE_HIGH; if (intel_sdvo->is_hdmi) sdvox |= intel_sdvo->color_range; if (INTEL_INFO(dev)->gen < 5) sdvox |= SDVO_BORDER_ENABLE; } else { sdvox = I915_READ(intel_sdvo->sdvo_reg); switch (intel_sdvo->sdvo_reg) { case SDVOB: sdvox &= SDVOB_PRESERVE_MASK; break; case SDVOC: sdvox &= SDVOC_PRESERVE_MASK; break; } sdvox |= (9 << 19) | SDVO_BORDER_ENABLE; } if (INTEL_PCH_TYPE(dev) >= PCH_CPT) sdvox |= TRANSCODER_CPT(intel_crtc->pipe); else sdvox |= TRANSCODER(intel_crtc->pipe); if (intel_sdvo->has_hdmi_audio) sdvox |= SDVO_AUDIO_ENABLE; if (INTEL_INFO(dev)->gen >= 4) { /* done in crtc_mode_set as the dpll_md reg must be written early */ } else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) { /* done in crtc_mode_set as it lives inside the dpll register */ } else { sdvox |= (pixel_multiplier - 1) << SDVO_PORT_MULTIPLY_SHIFT; } if (input_dtd.part2.sdvo_flags & SDVO_NEED_TO_STALL && INTEL_INFO(dev)->gen < 5) sdvox |= SDVO_STALL_SELECT; intel_sdvo_write_sdvox(intel_sdvo, sdvox); } static void intel_sdvo_dpms(struct drm_encoder *encoder, int mode) { struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_sdvo *intel_sdvo = to_intel_sdvo(encoder); struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); u32 temp; if (mode != DRM_MODE_DPMS_ON) { intel_sdvo_set_active_outputs(intel_sdvo, 0); if (0) intel_sdvo_set_encoder_power_state(intel_sdvo, mode); if (mode == DRM_MODE_DPMS_OFF) { temp = I915_READ(intel_sdvo->sdvo_reg); if ((temp & SDVO_ENABLE) != 0) { intel_sdvo_write_sdvox(intel_sdvo, temp & ~SDVO_ENABLE); } } } else { bool input1, input2; int i; u8 status; temp = I915_READ(intel_sdvo->sdvo_reg); if ((temp & SDVO_ENABLE) == 0) intel_sdvo_write_sdvox(intel_sdvo, temp | SDVO_ENABLE); for (i = 0; i < 2; i++) intel_wait_for_vblank(dev, intel_crtc->pipe); status = intel_sdvo_get_trained_inputs(intel_sdvo, &input1, &input2); /* Warn if the device reported failure to sync. * A lot of SDVO devices fail to notify of sync, but it's * a given it the status is a success, we succeeded. */ if (status == SDVO_CMD_STATUS_SUCCESS && !input1) { DRM_DEBUG_KMS("First %s output reported failure to " "sync\n", SDVO_NAME(intel_sdvo)); } if (0) intel_sdvo_set_encoder_power_state(intel_sdvo, mode); intel_sdvo_set_active_outputs(intel_sdvo, intel_sdvo->attached_output); } return; } static int intel_sdvo_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector); if (mode->flags & DRM_MODE_FLAG_DBLSCAN) return MODE_NO_DBLESCAN; if (intel_sdvo->pixel_clock_min > mode->clock) return MODE_CLOCK_LOW; if (intel_sdvo->pixel_clock_max < mode->clock) return MODE_CLOCK_HIGH; if (intel_sdvo->is_lvds) { if (mode->hdisplay > intel_sdvo->sdvo_lvds_fixed_mode->hdisplay) return MODE_PANEL; if (mode->vdisplay > intel_sdvo->sdvo_lvds_fixed_mode->vdisplay) return MODE_PANEL; } return MODE_OK; } static bool intel_sdvo_get_capabilities(struct intel_sdvo *intel_sdvo, struct intel_sdvo_caps *caps) { BUILD_BUG_ON(sizeof(*caps) != 8); if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_DEVICE_CAPS, caps, sizeof(*caps))) return false; DRM_DEBUG_KMS("SDVO capabilities:\n" " vendor_id: %d\n" " device_id: %d\n" " device_rev_id: %d\n" " sdvo_version_major: %d\n" " sdvo_version_minor: %d\n" " sdvo_inputs_mask: %d\n" " smooth_scaling: %d\n" " sharp_scaling: %d\n" " up_scaling: %d\n" " down_scaling: %d\n" " stall_support: %d\n" " output_flags: %d\n", caps->vendor_id, caps->device_id, caps->device_rev_id, caps->sdvo_version_major, caps->sdvo_version_minor, caps->sdvo_inputs_mask, caps->smooth_scaling, caps->sharp_scaling, caps->up_scaling, caps->down_scaling, caps->stall_support, caps->output_flags); return true; } static int intel_sdvo_supports_hotplug(struct intel_sdvo *intel_sdvo) { struct drm_device *dev = intel_sdvo->base.base.dev; u8 response[2]; /* HW Erratum: SDVO Hotplug is broken on all i945G chips, there's noise * on the line. */ if (IS_I945G(dev) || IS_I945GM(dev)) return false; return intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT, &response, 2) && response[0]; } static void intel_sdvo_enable_hotplug(struct intel_encoder *encoder) { struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base); intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &intel_sdvo->hotplug_active, 2); } static bool intel_sdvo_multifunc_encoder(struct intel_sdvo *intel_sdvo) { /* Is there more than one type of output? */ return hweight16(intel_sdvo->caps.output_flags) > 1; } static struct edid * intel_sdvo_get_edid(struct drm_connector *connector) { struct intel_sdvo *sdvo = intel_attached_sdvo(connector); return drm_get_edid(connector, &sdvo->ddc); } /* Mac mini hack -- use the same DDC as the analog connector */ static struct edid * intel_sdvo_get_analog_edid(struct drm_connector *connector) { struct drm_i915_private *dev_priv = connector->dev->dev_private; return drm_get_edid(connector, &dev_priv->gmbus[dev_priv->crt_ddc_pin].adapter); } enum drm_connector_status intel_sdvo_tmds_sink_detect(struct drm_connector *connector) { struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector); enum drm_connector_status status; struct edid *edid; edid = intel_sdvo_get_edid(connector); if (edid == NULL && intel_sdvo_multifunc_encoder(intel_sdvo)) { u8 ddc, saved_ddc = intel_sdvo->ddc_bus; /* * Don't use the 1 as the argument of DDC bus switch to get * the EDID. It is used for SDVO SPD ROM. */ for (ddc = intel_sdvo->ddc_bus >> 1; ddc > 1; ddc >>= 1) { intel_sdvo->ddc_bus = ddc; edid = intel_sdvo_get_edid(connector); if (edid) break; } /* * If we found the EDID on the other bus, * assume that is the correct DDC bus. */ if (edid == NULL) intel_sdvo->ddc_bus = saved_ddc; } /* * When there is no edid and no monitor is connected with VGA * port, try to use the CRT ddc to read the EDID for DVI-connector. */ if (edid == NULL) edid = intel_sdvo_get_analog_edid(connector); status = connector_status_unknown; if (edid != NULL) { /* DDC bus is shared, match EDID to connector type */ if (edid->input & DRM_EDID_INPUT_DIGITAL) { status = connector_status_connected; if (intel_sdvo->is_hdmi) { intel_sdvo->has_hdmi_monitor = drm_detect_hdmi_monitor(edid); intel_sdvo->has_hdmi_audio = drm_detect_monitor_audio(edid); } } else status = connector_status_disconnected; connector->display_info.raw_edid = NULL; kfree(edid); } if (status == connector_status_connected) { struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); if (intel_sdvo_connector->force_audio != HDMI_AUDIO_AUTO) intel_sdvo->has_hdmi_audio = (intel_sdvo_connector->force_audio == HDMI_AUDIO_ON); } return status; } static bool intel_sdvo_connector_matches_edid(struct intel_sdvo_connector *sdvo, struct edid *edid) { bool monitor_is_digital = !!(edid->input & DRM_EDID_INPUT_DIGITAL); bool connector_is_digital = !!IS_DIGITAL(sdvo); DRM_DEBUG_KMS("connector_is_digital? %d, monitor_is_digital? %d\n", connector_is_digital, monitor_is_digital); return connector_is_digital == monitor_is_digital; } static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connector, bool force) { uint16_t response; struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector); struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); enum drm_connector_status ret; if (!intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0)) return connector_status_unknown; /* add 30ms delay when the output type might be TV */ if (intel_sdvo->caps.output_flags & (SDVO_OUTPUT_SVID0 | SDVO_OUTPUT_CVBS0)) mdelay(30); if (!intel_sdvo_read_response(intel_sdvo, &response, 2)) return connector_status_unknown; DRM_DEBUG_KMS("SDVO response %d %d [%x]\n", response & 0xff, response >> 8, intel_sdvo_connector->output_flag); if (response == 0) return connector_status_disconnected; intel_sdvo->attached_output = response; intel_sdvo->has_hdmi_monitor = false; intel_sdvo->has_hdmi_audio = false; if ((intel_sdvo_connector->output_flag & response) == 0) ret = connector_status_disconnected; else if (IS_TMDS(intel_sdvo_connector)) ret = intel_sdvo_tmds_sink_detect(connector); else { struct edid *edid; /* if we have an edid check it matches the connection */ edid = intel_sdvo_get_edid(connector); if (edid == NULL) edid = intel_sdvo_get_analog_edid(connector); if (edid != NULL) { if (intel_sdvo_connector_matches_edid(intel_sdvo_connector, edid)) ret = connector_status_connected; else ret = connector_status_disconnected; connector->display_info.raw_edid = NULL; kfree(edid); } else ret = connector_status_connected; } /* May update encoder flag for like clock for SDVO TV, etc.*/ if (ret == connector_status_connected) { intel_sdvo->is_tv = false; intel_sdvo->is_lvds = false; intel_sdvo->base.needs_tv_clock = false; if (response & SDVO_TV_MASK) { intel_sdvo->is_tv = true; intel_sdvo->base.needs_tv_clock = true; } if (response & SDVO_LVDS_MASK) intel_sdvo->is_lvds = intel_sdvo->sdvo_lvds_fixed_mode != NULL; } return ret; } static void intel_sdvo_get_ddc_modes(struct drm_connector *connector) { struct edid *edid; /* set the bus switch and get the modes */ edid = intel_sdvo_get_edid(connector); /* * Mac mini hack. On this device, the DVI-I connector shares one DDC * link between analog and digital outputs. So, if the regular SDVO * DDC fails, check to see if the analog output is disconnected, in * which case we'll look there for the digital DDC data. */ if (edid == NULL) edid = intel_sdvo_get_analog_edid(connector); if (edid != NULL) { if (intel_sdvo_connector_matches_edid(to_intel_sdvo_connector(connector), edid)) { drm_mode_connector_update_edid_property(connector, edid); drm_add_edid_modes(connector, edid); } connector->display_info.raw_edid = NULL; kfree(edid); } } /* * Set of SDVO TV modes. * Note! This is in reply order (see loop in get_tv_modes). * XXX: all 60Hz refresh? */ static const struct drm_display_mode sdvo_tv_modes[] = { { DRM_MODE("320x200", DRM_MODE_TYPE_DRIVER, 5815, 320, 321, 384, 416, 0, 200, 201, 232, 233, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, { DRM_MODE("320x240", DRM_MODE_TYPE_DRIVER, 6814, 320, 321, 384, 416, 0, 240, 241, 272, 273, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, { DRM_MODE("400x300", DRM_MODE_TYPE_DRIVER, 9910, 400, 401, 464, 496, 0, 300, 301, 332, 333, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, { DRM_MODE("640x350", DRM_MODE_TYPE_DRIVER, 16913, 640, 641, 704, 736, 0, 350, 351, 382, 383, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, { DRM_MODE("640x400", DRM_MODE_TYPE_DRIVER, 19121, 640, 641, 704, 736, 0, 400, 401, 432, 433, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 22654, 640, 641, 704, 736, 0, 480, 481, 512, 513, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, { DRM_MODE("704x480", DRM_MODE_TYPE_DRIVER, 24624, 704, 705, 768, 800, 0, 480, 481, 512, 513, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, { DRM_MODE("704x576", DRM_MODE_TYPE_DRIVER, 29232, 704, 705, 768, 800, 0, 576, 577, 608, 609, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, { DRM_MODE("720x350", DRM_MODE_TYPE_DRIVER, 18751, 720, 721, 784, 816, 0, 350, 351, 382, 383, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, { DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 21199, 720, 721, 784, 816, 0, 400, 401, 432, 433, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 25116, 720, 721, 784, 816, 0, 480, 481, 512, 513, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, { DRM_MODE("720x540", DRM_MODE_TYPE_DRIVER, 28054, 720, 721, 784, 816, 0, 540, 541, 572, 573, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 29816, 720, 721, 784, 816, 0, 576, 577, 608, 609, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, { DRM_MODE("768x576", DRM_MODE_TYPE_DRIVER, 31570, 768, 769, 832, 864, 0, 576, 577, 608, 609, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 34030, 800, 801, 864, 896, 0, 600, 601, 632, 633, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, { DRM_MODE("832x624", DRM_MODE_TYPE_DRIVER, 36581, 832, 833, 896, 928, 0, 624, 625, 656, 657, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, { DRM_MODE("920x766", DRM_MODE_TYPE_DRIVER, 48707, 920, 921, 984, 1016, 0, 766, 767, 798, 799, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 53827, 1024, 1025, 1088, 1120, 0, 768, 769, 800, 801, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 87265, 1280, 1281, 1344, 1376, 0, 1024, 1025, 1056, 1057, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, }; static void intel_sdvo_get_tv_modes(struct drm_connector *connector) { struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector); struct intel_sdvo_sdtv_resolution_request tv_res; uint32_t reply = 0, format_map = 0; int i; /* Read the list of supported input resolutions for the selected TV * format. */ format_map = 1 << intel_sdvo->tv_format_index; memcpy(&tv_res, &format_map, min(sizeof(format_map), sizeof(struct intel_sdvo_sdtv_resolution_request))); if (!intel_sdvo_set_target_output(intel_sdvo, intel_sdvo->attached_output)) return; BUILD_BUG_ON(sizeof(tv_res) != 3); if (!intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT, &tv_res, sizeof(tv_res))) return; if (!intel_sdvo_read_response(intel_sdvo, &reply, 3)) return; for (i = 0; i < ARRAY_SIZE(sdvo_tv_modes); i++) if (reply & (1 << i)) { struct drm_display_mode *nmode; nmode = drm_mode_duplicate(connector->dev, &sdvo_tv_modes[i]); if (nmode) drm_mode_probed_add(connector, nmode); } } static void intel_sdvo_get_lvds_modes(struct drm_connector *connector) { struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector); struct drm_i915_private *dev_priv = connector->dev->dev_private; struct drm_display_mode *newmode; /* * Attempt to get the mode list from DDC. * Assume that the preferred modes are * arranged in priority order. */ intel_ddc_get_modes(connector, intel_sdvo->i2c); if (list_empty(&connector->probed_modes) == false) goto end; /* Fetch modes from VBT */ if (dev_priv->sdvo_lvds_vbt_mode != NULL) { newmode = drm_mode_duplicate(connector->dev, dev_priv->sdvo_lvds_vbt_mode); if (newmode != NULL) { /* Guarantee the mode is preferred */ newmode->type = (DRM_MODE_TYPE_PREFERRED | DRM_MODE_TYPE_DRIVER); drm_mode_probed_add(connector, newmode); } } end: list_for_each_entry(newmode, &connector->probed_modes, head) { if (newmode->type & DRM_MODE_TYPE_PREFERRED) { intel_sdvo->sdvo_lvds_fixed_mode = drm_mode_duplicate(connector->dev, newmode); drm_mode_set_crtcinfo(intel_sdvo->sdvo_lvds_fixed_mode, 0); intel_sdvo->is_lvds = true; break; } } } static int intel_sdvo_get_modes(struct drm_connector *connector) { struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); if (IS_TV(intel_sdvo_connector)) intel_sdvo_get_tv_modes(connector); else if (IS_LVDS(intel_sdvo_connector)) intel_sdvo_get_lvds_modes(connector); else intel_sdvo_get_ddc_modes(connector); return !list_empty(&connector->probed_modes); } static void intel_sdvo_destroy_enhance_property(struct drm_connector *connector) { struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); struct drm_device *dev = connector->dev; if (intel_sdvo_connector->left) drm_property_destroy(dev, intel_sdvo_connector->left); if (intel_sdvo_connector->right) drm_property_destroy(dev, intel_sdvo_connector->right); if (intel_sdvo_connector->top) drm_property_destroy(dev, intel_sdvo_connector->top); if (intel_sdvo_connector->bottom) drm_property_destroy(dev, intel_sdvo_connector->bottom); if (intel_sdvo_connector->hpos) drm_property_destroy(dev, intel_sdvo_connector->hpos); if (intel_sdvo_connector->vpos) drm_property_destroy(dev, intel_sdvo_connector->vpos); if (intel_sdvo_connector->saturation) drm_property_destroy(dev, intel_sdvo_connector->saturation); if (intel_sdvo_connector->contrast) drm_property_destroy(dev, intel_sdvo_connector->contrast); if (intel_sdvo_connector->hue) drm_property_destroy(dev, intel_sdvo_connector->hue); if (intel_sdvo_connector->sharpness) drm_property_destroy(dev, intel_sdvo_connector->sharpness); if (intel_sdvo_connector->flicker_filter) drm_property_destroy(dev, intel_sdvo_connector->flicker_filter); if (intel_sdvo_connector->flicker_filter_2d) drm_property_destroy(dev, intel_sdvo_connector->flicker_filter_2d); if (intel_sdvo_connector->flicker_filter_adaptive) drm_property_destroy(dev, intel_sdvo_connector->flicker_filter_adaptive); if (intel_sdvo_connector->tv_luma_filter) drm_property_destroy(dev, intel_sdvo_connector->tv_luma_filter); if (intel_sdvo_connector->tv_chroma_filter) drm_property_destroy(dev, intel_sdvo_connector->tv_chroma_filter); if (intel_sdvo_connector->dot_crawl) drm_property_destroy(dev, intel_sdvo_connector->dot_crawl); if (intel_sdvo_connector->brightness) drm_property_destroy(dev, intel_sdvo_connector->brightness); } static void intel_sdvo_destroy(struct drm_connector *connector) { struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); if (intel_sdvo_connector->tv_format) drm_property_destroy(connector->dev, intel_sdvo_connector->tv_format); intel_sdvo_destroy_enhance_property(connector); drm_sysfs_connector_remove(connector); drm_connector_cleanup(connector); kfree(connector); } static bool intel_sdvo_detect_hdmi_audio(struct drm_connector *connector) { struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector); struct edid *edid; bool has_audio = false; if (!intel_sdvo->is_hdmi) return false; edid = intel_sdvo_get_edid(connector); if (edid != NULL && edid->input & DRM_EDID_INPUT_DIGITAL) has_audio = drm_detect_monitor_audio(edid); return has_audio; } static int intel_sdvo_set_property(struct drm_connector *connector, struct drm_property *property, uint64_t val) { struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector); struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); struct drm_i915_private *dev_priv = connector->dev->dev_private; uint16_t temp_value; uint8_t cmd; int ret; ret = drm_connector_property_set_value(connector, property, val); if (ret) return ret; if (property == dev_priv->force_audio_property) { int i = val; bool has_audio; if (i == intel_sdvo_connector->force_audio) return 0; intel_sdvo_connector->force_audio = i; if (i == HDMI_AUDIO_AUTO) has_audio = intel_sdvo_detect_hdmi_audio(connector); else has_audio = (i == HDMI_AUDIO_ON); if (has_audio == intel_sdvo->has_hdmi_audio) return 0; intel_sdvo->has_hdmi_audio = has_audio; goto done; } if (property == dev_priv->broadcast_rgb_property) { if (val == !!intel_sdvo->color_range) return 0; intel_sdvo->color_range = val ? SDVO_COLOR_RANGE_16_235 : 0; goto done; } #define CHECK_PROPERTY(name, NAME) \ if (intel_sdvo_connector->name == property) { \ if (intel_sdvo_connector->cur_##name == temp_value) return 0; \ if (intel_sdvo_connector->max_##name < temp_value) return -EINVAL; \ cmd = SDVO_CMD_SET_##NAME; \ intel_sdvo_connector->cur_##name = temp_value; \ goto set_value; \ } if (property == intel_sdvo_connector->tv_format) { if (val >= TV_FORMAT_NUM) return -EINVAL; if (intel_sdvo->tv_format_index == intel_sdvo_connector->tv_format_supported[val]) return 0; intel_sdvo->tv_format_index = intel_sdvo_connector->tv_format_supported[val]; goto done; } else if (IS_TV_OR_LVDS(intel_sdvo_connector)) { temp_value = val; if (intel_sdvo_connector->left == property) { drm_connector_property_set_value(connector, intel_sdvo_connector->right, val); if (intel_sdvo_connector->left_margin == temp_value) return 0; intel_sdvo_connector->left_margin = temp_value; intel_sdvo_connector->right_margin = temp_value; temp_value = intel_sdvo_connector->max_hscan - intel_sdvo_connector->left_margin; cmd = SDVO_CMD_SET_OVERSCAN_H; goto set_value; } else if (intel_sdvo_connector->right == property) { drm_connector_property_set_value(connector, intel_sdvo_connector->left, val); if (intel_sdvo_connector->right_margin == temp_value) return 0; intel_sdvo_connector->left_margin = temp_value; intel_sdvo_connector->right_margin = temp_value; temp_value = intel_sdvo_connector->max_hscan - intel_sdvo_connector->left_margin; cmd = SDVO_CMD_SET_OVERSCAN_H; goto set_value; } else if (intel_sdvo_connector->top == property) { drm_connector_property_set_value(connector, intel_sdvo_connector->bottom, val); if (intel_sdvo_connector->top_margin == temp_value) return 0; intel_sdvo_connector->top_margin = temp_value; intel_sdvo_connector->bottom_margin = temp_value; temp_value = intel_sdvo_connector->max_vscan - intel_sdvo_connector->top_margin; cmd = SDVO_CMD_SET_OVERSCAN_V; goto set_value; } else if (intel_sdvo_connector->bottom == property) { drm_connector_property_set_value(connector, intel_sdvo_connector->top, val); if (intel_sdvo_connector->bottom_margin == temp_value) return 0; intel_sdvo_connector->top_margin = temp_value; intel_sdvo_connector->bottom_margin = temp_value; temp_value = intel_sdvo_connector->max_vscan - intel_sdvo_connector->top_margin; cmd = SDVO_CMD_SET_OVERSCAN_V; goto set_value; } CHECK_PROPERTY(hpos, HPOS) CHECK_PROPERTY(vpos, VPOS) CHECK_PROPERTY(saturation, SATURATION) CHECK_PROPERTY(contrast, CONTRAST) CHECK_PROPERTY(hue, HUE) CHECK_PROPERTY(brightness, BRIGHTNESS) CHECK_PROPERTY(sharpness, SHARPNESS) CHECK_PROPERTY(flicker_filter, FLICKER_FILTER) CHECK_PROPERTY(flicker_filter_2d, FLICKER_FILTER_2D) CHECK_PROPERTY(flicker_filter_adaptive, FLICKER_FILTER_ADAPTIVE) CHECK_PROPERTY(tv_chroma_filter, TV_CHROMA_FILTER) CHECK_PROPERTY(tv_luma_filter, TV_LUMA_FILTER) CHECK_PROPERTY(dot_crawl, DOT_CRAWL) } return -EINVAL; /* unknown property */ set_value: if (!intel_sdvo_set_value(intel_sdvo, cmd, &temp_value, 2)) return -EIO; done: if (intel_sdvo->base.base.crtc) { struct drm_crtc *crtc = intel_sdvo->base.base.crtc; drm_crtc_helper_set_mode(crtc, &crtc->mode, crtc->x, crtc->y, crtc->fb); } return 0; #undef CHECK_PROPERTY } static const struct drm_encoder_helper_funcs intel_sdvo_helper_funcs = { .dpms = intel_sdvo_dpms, .mode_fixup = intel_sdvo_mode_fixup, .prepare = intel_encoder_prepare, .mode_set = intel_sdvo_mode_set, .commit = intel_encoder_commit, }; static const struct drm_connector_funcs intel_sdvo_connector_funcs = { .dpms = drm_helper_connector_dpms, .detect = intel_sdvo_detect, .fill_modes = drm_helper_probe_single_connector_modes, .set_property = intel_sdvo_set_property, .destroy = intel_sdvo_destroy, }; static const struct drm_connector_helper_funcs intel_sdvo_connector_helper_funcs = { .get_modes = intel_sdvo_get_modes, .mode_valid = intel_sdvo_mode_valid, .best_encoder = intel_best_encoder, }; static void intel_sdvo_enc_destroy(struct drm_encoder *encoder) { struct intel_sdvo *intel_sdvo = to_intel_sdvo(encoder); if (intel_sdvo->sdvo_lvds_fixed_mode != NULL) drm_mode_destroy(encoder->dev, intel_sdvo->sdvo_lvds_fixed_mode); i2c_del_adapter(&intel_sdvo->ddc); intel_encoder_destroy(encoder); } static const struct drm_encoder_funcs intel_sdvo_enc_funcs = { .destroy = intel_sdvo_enc_destroy, }; static void intel_sdvo_guess_ddc_bus(struct intel_sdvo *sdvo) { uint16_t mask = 0; unsigned int num_bits; /* Make a mask of outputs less than or equal to our own priority in the * list. */ switch (sdvo->controlled_output) { case SDVO_OUTPUT_LVDS1: mask |= SDVO_OUTPUT_LVDS1; case SDVO_OUTPUT_LVDS0: mask |= SDVO_OUTPUT_LVDS0; case SDVO_OUTPUT_TMDS1: mask |= SDVO_OUTPUT_TMDS1; case SDVO_OUTPUT_TMDS0: mask |= SDVO_OUTPUT_TMDS0; case SDVO_OUTPUT_RGB1: mask |= SDVO_OUTPUT_RGB1; case SDVO_OUTPUT_RGB0: mask |= SDVO_OUTPUT_RGB0; break; } /* Count bits to find what number we are in the priority list. */ mask &= sdvo->caps.output_flags; num_bits = hweight16(mask); /* If more than 3 outputs, default to DDC bus 3 for now. */ if (num_bits > 3) num_bits = 3; /* Corresponds to SDVO_CONTROL_BUS_DDCx */ sdvo->ddc_bus = 1 << num_bits; } /** * Choose the appropriate DDC bus for control bus switch command for this * SDVO output based on the controlled output. * * DDC bus number assignment is in a priority order of RGB outputs, then TMDS * outputs, then LVDS outputs. */ static void intel_sdvo_select_ddc_bus(struct drm_i915_private *dev_priv, struct intel_sdvo *sdvo, u32 reg) { struct sdvo_device_mapping *mapping; if (IS_SDVOB(reg)) mapping = &(dev_priv->sdvo_mappings[0]); else mapping = &(dev_priv->sdvo_mappings[1]); if (mapping->initialized) sdvo->ddc_bus = 1 << ((mapping->ddc_pin & 0xf0) >> 4); else intel_sdvo_guess_ddc_bus(sdvo); } static void intel_sdvo_select_i2c_bus(struct drm_i915_private *dev_priv, struct intel_sdvo *sdvo, u32 reg) { struct sdvo_device_mapping *mapping; u8 pin; if (IS_SDVOB(reg)) mapping = &dev_priv->sdvo_mappings[0]; else mapping = &dev_priv->sdvo_mappings[1]; pin = GMBUS_PORT_DPB; if (mapping->initialized) pin = mapping->i2c_pin; if (pin < GMBUS_NUM_PORTS) { sdvo->i2c = &dev_priv->gmbus[pin].adapter; intel_gmbus_set_speed(sdvo->i2c, GMBUS_RATE_1MHZ); intel_gmbus_force_bit(sdvo->i2c, true); } else { sdvo->i2c = &dev_priv->gmbus[GMBUS_PORT_DPB].adapter; } } static bool intel_sdvo_is_hdmi_connector(struct intel_sdvo *intel_sdvo, int device) { return intel_sdvo_check_supp_encode(intel_sdvo); } static u8 intel_sdvo_get_slave_addr(struct drm_device *dev, int sdvo_reg) { struct drm_i915_private *dev_priv = dev->dev_private; struct sdvo_device_mapping *my_mapping, *other_mapping; if (IS_SDVOB(sdvo_reg)) { my_mapping = &dev_priv->sdvo_mappings[0]; other_mapping = &dev_priv->sdvo_mappings[1]; } else { my_mapping = &dev_priv->sdvo_mappings[1]; other_mapping = &dev_priv->sdvo_mappings[0]; } /* If the BIOS described our SDVO device, take advantage of it. */ if (my_mapping->slave_addr) return my_mapping->slave_addr; /* If the BIOS only described a different SDVO device, use the * address that it isn't using. */ if (other_mapping->slave_addr) { if (other_mapping->slave_addr == 0x70) return 0x72; else return 0x70; } /* No SDVO device info is found for another DVO port, * so use mapping assumption we had before BIOS parsing. */ if (IS_SDVOB(sdvo_reg)) return 0x70; else return 0x72; } static void intel_sdvo_connector_init(struct intel_sdvo_connector *connector, struct intel_sdvo *encoder) { drm_connector_init(encoder->base.base.dev, &connector->base.base, &intel_sdvo_connector_funcs, connector->base.base.connector_type); drm_connector_helper_add(&connector->base.base, &intel_sdvo_connector_helper_funcs); connector->base.base.interlace_allowed = 1; connector->base.base.doublescan_allowed = 0; connector->base.base.display_info.subpixel_order = SubPixelHorizontalRGB; intel_connector_attach_encoder(&connector->base, &encoder->base); drm_sysfs_connector_add(&connector->base.base); } static void intel_sdvo_add_hdmi_properties(struct intel_sdvo_connector *connector) { struct drm_device *dev = connector->base.base.dev; intel_attach_force_audio_property(&connector->base.base); if (INTEL_INFO(dev)->gen >= 4 && IS_MOBILE(dev)) intel_attach_broadcast_rgb_property(&connector->base.base); } static bool intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device) { struct drm_encoder *encoder = &intel_sdvo->base.base; struct drm_connector *connector; struct intel_encoder *intel_encoder = to_intel_encoder(encoder); struct intel_connector *intel_connector; struct intel_sdvo_connector *intel_sdvo_connector; intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL); if (!intel_sdvo_connector) return false; if (device == 0) { intel_sdvo->controlled_output |= SDVO_OUTPUT_TMDS0; intel_sdvo_connector->output_flag = SDVO_OUTPUT_TMDS0; } else if (device == 1) { intel_sdvo->controlled_output |= SDVO_OUTPUT_TMDS1; intel_sdvo_connector->output_flag = SDVO_OUTPUT_TMDS1; } intel_connector = &intel_sdvo_connector->base; connector = &intel_connector->base; if (intel_sdvo_supports_hotplug(intel_sdvo) & (1 << device)) { connector->polled = DRM_CONNECTOR_POLL_HPD; intel_sdvo->hotplug_active[0] |= 1 << device; /* Some SDVO devices have one-shot hotplug interrupts. * Ensure that they get re-enabled when an interrupt happens. */ intel_encoder->hot_plug = intel_sdvo_enable_hotplug; intel_sdvo_enable_hotplug(intel_encoder); } else connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT; encoder->encoder_type = DRM_MODE_ENCODER_TMDS; connector->connector_type = DRM_MODE_CONNECTOR_DVID; if (intel_sdvo_is_hdmi_connector(intel_sdvo, device)) { connector->connector_type = DRM_MODE_CONNECTOR_HDMIA; intel_sdvo->is_hdmi = true; } intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) | (1 << INTEL_ANALOG_CLONE_BIT)); intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo); if (intel_sdvo->is_hdmi) intel_sdvo_add_hdmi_properties(intel_sdvo_connector); return true; } static bool intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int type) { struct drm_encoder *encoder = &intel_sdvo->base.base; struct drm_connector *connector; struct intel_connector *intel_connector; struct intel_sdvo_connector *intel_sdvo_connector; intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL); if (!intel_sdvo_connector) return false; intel_connector = &intel_sdvo_connector->base; connector = &intel_connector->base; encoder->encoder_type = DRM_MODE_ENCODER_TVDAC; connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO; intel_sdvo->controlled_output |= type; intel_sdvo_connector->output_flag = type; intel_sdvo->is_tv = true; intel_sdvo->base.needs_tv_clock = true; intel_sdvo->base.clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT; intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo); if (!intel_sdvo_tv_create_property(intel_sdvo, intel_sdvo_connector, type)) goto err; if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector)) goto err; return true; err: intel_sdvo_destroy(connector); return false; } static bool intel_sdvo_analog_init(struct intel_sdvo *intel_sdvo, int device) { struct drm_encoder *encoder = &intel_sdvo->base.base; struct drm_connector *connector; struct intel_connector *intel_connector; struct intel_sdvo_connector *intel_sdvo_connector; intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL); if (!intel_sdvo_connector) return false; intel_connector = &intel_sdvo_connector->base; connector = &intel_connector->base; connector->polled = DRM_CONNECTOR_POLL_CONNECT; encoder->encoder_type = DRM_MODE_ENCODER_DAC; connector->connector_type = DRM_MODE_CONNECTOR_VGA; if (device == 0) { intel_sdvo->controlled_output |= SDVO_OUTPUT_RGB0; intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB0; } else if (device == 1) { intel_sdvo->controlled_output |= SDVO_OUTPUT_RGB1; intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB1; } intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) | (1 << INTEL_ANALOG_CLONE_BIT)); intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo); return true; } static bool intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device) { struct drm_encoder *encoder = &intel_sdvo->base.base; struct drm_connector *connector; struct intel_connector *intel_connector; struct intel_sdvo_connector *intel_sdvo_connector; intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL); if (!intel_sdvo_connector) return false; intel_connector = &intel_sdvo_connector->base; connector = &intel_connector->base; encoder->encoder_type = DRM_MODE_ENCODER_LVDS; connector->connector_type = DRM_MODE_CONNECTOR_LVDS; if (device == 0) { intel_sdvo->controlled_output |= SDVO_OUTPUT_LVDS0; intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS0; } else if (device == 1) { intel_sdvo->controlled_output |= SDVO_OUTPUT_LVDS1; intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1; } intel_sdvo->base.clone_mask = ((1 << INTEL_ANALOG_CLONE_BIT) | (1 << INTEL_SDVO_LVDS_CLONE_BIT)); intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo); if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector)) goto err; return true; err: intel_sdvo_destroy(connector); return false; } static bool intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, uint16_t flags) { intel_sdvo->is_tv = false; intel_sdvo->base.needs_tv_clock = false; intel_sdvo->is_lvds = false; /* SDVO requires XXX1 function may not exist unless it has XXX0 function.*/ if (flags & SDVO_OUTPUT_TMDS0) if (!intel_sdvo_dvi_init(intel_sdvo, 0)) return false; if ((flags & SDVO_TMDS_MASK) == SDVO_TMDS_MASK) if (!intel_sdvo_dvi_init(intel_sdvo, 1)) return false; /* TV has no XXX1 function block */ if (flags & SDVO_OUTPUT_SVID0) if (!intel_sdvo_tv_init(intel_sdvo, SDVO_OUTPUT_SVID0)) return false; if (flags & SDVO_OUTPUT_CVBS0) if (!intel_sdvo_tv_init(intel_sdvo, SDVO_OUTPUT_CVBS0)) return false; if (flags & SDVO_OUTPUT_RGB0) if (!intel_sdvo_analog_init(intel_sdvo, 0)) return false; if ((flags & SDVO_RGB_MASK) == SDVO_RGB_MASK) if (!intel_sdvo_analog_init(intel_sdvo, 1)) return false; if (flags & SDVO_OUTPUT_LVDS0) if (!intel_sdvo_lvds_init(intel_sdvo, 0)) return false; if ((flags & SDVO_LVDS_MASK) == SDVO_LVDS_MASK) if (!intel_sdvo_lvds_init(intel_sdvo, 1)) return false; if ((flags & SDVO_OUTPUT_MASK) == 0) { unsigned char bytes[2]; intel_sdvo->controlled_output = 0; memcpy(bytes, &intel_sdvo->caps.output_flags, 2); DRM_DEBUG_KMS("%s: Unknown SDVO output type (0x%02x%02x)\n", SDVO_NAME(intel_sdvo), bytes[0], bytes[1]); return false; } intel_sdvo->base.crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); return true; } static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo, struct intel_sdvo_connector *intel_sdvo_connector, int type) { struct drm_device *dev = intel_sdvo->base.base.dev; struct intel_sdvo_tv_format format; uint32_t format_map, i; if (!intel_sdvo_set_target_output(intel_sdvo, type)) return false; BUILD_BUG_ON(sizeof(format) != 6); if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_SUPPORTED_TV_FORMATS, &format, sizeof(format))) return false; memcpy(&format_map, &format, min(sizeof(format_map), sizeof(format))); if (format_map == 0) return false; intel_sdvo_connector->format_supported_num = 0; for (i = 0 ; i < TV_FORMAT_NUM; i++) if (format_map & (1 << i)) intel_sdvo_connector->tv_format_supported[intel_sdvo_connector->format_supported_num++] = i; intel_sdvo_connector->tv_format = drm_property_create(dev, DRM_MODE_PROP_ENUM, "mode", intel_sdvo_connector->format_supported_num); if (!intel_sdvo_connector->tv_format) return false; for (i = 0; i < intel_sdvo_connector->format_supported_num; i++) drm_property_add_enum( intel_sdvo_connector->tv_format, i, i, tv_format_names[intel_sdvo_connector->tv_format_supported[i]]); intel_sdvo->tv_format_index = intel_sdvo_connector->tv_format_supported[0]; drm_connector_attach_property(&intel_sdvo_connector->base.base, intel_sdvo_connector->tv_format, 0); return true; } #define ENHANCEMENT(name, NAME) do { \ if (enhancements.name) { \ if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_MAX_##NAME, &data_value, 4) || \ !intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_##NAME, &response, 2)) \ return false; \ intel_sdvo_connector->max_##name = data_value[0]; \ intel_sdvo_connector->cur_##name = response; \ intel_sdvo_connector->name = \ drm_property_create_range(dev, 0, #name, 0, data_value[0]); \ if (!intel_sdvo_connector->name) return false; \ drm_connector_attach_property(connector, \ intel_sdvo_connector->name, \ intel_sdvo_connector->cur_##name); \ DRM_DEBUG_KMS(#name ": max %d, default %d, current %d\n", \ data_value[0], data_value[1], response); \ } \ } while (0) static bool intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo, struct intel_sdvo_connector *intel_sdvo_connector, struct intel_sdvo_enhancements_reply enhancements) { struct drm_device *dev = intel_sdvo->base.base.dev; struct drm_connector *connector = &intel_sdvo_connector->base.base; uint16_t response, data_value[2]; /* when horizontal overscan is supported, Add the left/right property */ if (enhancements.overscan_h) { if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_MAX_OVERSCAN_H, &data_value, 4)) return false; if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_OVERSCAN_H, &response, 2)) return false; intel_sdvo_connector->max_hscan = data_value[0]; intel_sdvo_connector->left_margin = data_value[0] - response; intel_sdvo_connector->right_margin = intel_sdvo_connector->left_margin; intel_sdvo_connector->left = drm_property_create_range(dev, 0, "left_margin", 0, data_value[0]); if (!intel_sdvo_connector->left) return false; drm_connector_attach_property(connector, intel_sdvo_connector->left, intel_sdvo_connector->left_margin); intel_sdvo_connector->right = drm_property_create_range(dev, 0, "right_margin", 0, data_value[0]); if (!intel_sdvo_connector->right) return false; drm_connector_attach_property(connector, intel_sdvo_connector->right, intel_sdvo_connector->right_margin); DRM_DEBUG_KMS("h_overscan: max %d, " "default %d, current %d\n", data_value[0], data_value[1], response); } if (enhancements.overscan_v) { if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_MAX_OVERSCAN_V, &data_value, 4)) return false; if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_OVERSCAN_V, &response, 2)) return false; intel_sdvo_connector->max_vscan = data_value[0]; intel_sdvo_connector->top_margin = data_value[0] - response; intel_sdvo_connector->bottom_margin = intel_sdvo_connector->top_margin; intel_sdvo_connector->top = drm_property_create_range(dev, 0, "top_margin", 0, data_value[0]); if (!intel_sdvo_connector->top) return false; drm_connector_attach_property(connector, intel_sdvo_connector->top, intel_sdvo_connector->top_margin); intel_sdvo_connector->bottom = drm_property_create_range(dev, 0, "bottom_margin", 0, data_value[0]); if (!intel_sdvo_connector->bottom) return false; drm_connector_attach_property(connector, intel_sdvo_connector->bottom, intel_sdvo_connector->bottom_margin); DRM_DEBUG_KMS("v_overscan: max %d, " "default %d, current %d\n", data_value[0], data_value[1], response); } ENHANCEMENT(hpos, HPOS); ENHANCEMENT(vpos, VPOS); ENHANCEMENT(saturation, SATURATION); ENHANCEMENT(contrast, CONTRAST); ENHANCEMENT(hue, HUE); ENHANCEMENT(sharpness, SHARPNESS); ENHANCEMENT(brightness, BRIGHTNESS); ENHANCEMENT(flicker_filter, FLICKER_FILTER); ENHANCEMENT(flicker_filter_adaptive, FLICKER_FILTER_ADAPTIVE); ENHANCEMENT(flicker_filter_2d, FLICKER_FILTER_2D); ENHANCEMENT(tv_chroma_filter, TV_CHROMA_FILTER); ENHANCEMENT(tv_luma_filter, TV_LUMA_FILTER); if (enhancements.dot_crawl) { if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_DOT_CRAWL, &response, 2)) return false; intel_sdvo_connector->max_dot_crawl = 1; intel_sdvo_connector->cur_dot_crawl = response & 0x1; intel_sdvo_connector->dot_crawl = drm_property_create_range(dev, 0, "dot_crawl", 0, 1); if (!intel_sdvo_connector->dot_crawl) return false; drm_connector_attach_property(connector, intel_sdvo_connector->dot_crawl, intel_sdvo_connector->cur_dot_crawl); DRM_DEBUG_KMS("dot crawl: current %d\n", response); } return true; } static bool intel_sdvo_create_enhance_property_lvds(struct intel_sdvo *intel_sdvo, struct intel_sdvo_connector *intel_sdvo_connector, struct intel_sdvo_enhancements_reply enhancements) { struct drm_device *dev = intel_sdvo->base.base.dev; struct drm_connector *connector = &intel_sdvo_connector->base.base; uint16_t response, data_value[2]; ENHANCEMENT(brightness, BRIGHTNESS); return true; } #undef ENHANCEMENT static bool intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo, struct intel_sdvo_connector *intel_sdvo_connector) { union { struct intel_sdvo_enhancements_reply reply; uint16_t response; } enhancements; BUILD_BUG_ON(sizeof(enhancements) != 2); enhancements.response = 0; intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS, &enhancements, sizeof(enhancements)); if (enhancements.response == 0) { DRM_DEBUG_KMS("No enhancement is supported\n"); return true; } if (IS_TV(intel_sdvo_connector)) return intel_sdvo_create_enhance_property_tv(intel_sdvo, intel_sdvo_connector, enhancements.reply); else if (IS_LVDS(intel_sdvo_connector)) return intel_sdvo_create_enhance_property_lvds(intel_sdvo, intel_sdvo_connector, enhancements.reply); else return true; } static int intel_sdvo_ddc_proxy_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num) { struct intel_sdvo *sdvo = adapter->algo_data; if (!intel_sdvo_set_control_bus_switch(sdvo, sdvo->ddc_bus)) return -EIO; return sdvo->i2c->algo->master_xfer(sdvo->i2c, msgs, num); } static u32 intel_sdvo_ddc_proxy_func(struct i2c_adapter *adapter) { struct intel_sdvo *sdvo = adapter->algo_data; return sdvo->i2c->algo->functionality(sdvo->i2c); } static const struct i2c_algorithm intel_sdvo_ddc_proxy = { .master_xfer = intel_sdvo_ddc_proxy_xfer, .functionality = intel_sdvo_ddc_proxy_func }; static bool intel_sdvo_init_ddc_proxy(struct intel_sdvo *sdvo, struct drm_device *dev) { sdvo->ddc.owner = THIS_MODULE; sdvo->ddc.class = I2C_CLASS_DDC; snprintf(sdvo->ddc.name, I2C_NAME_SIZE, "SDVO DDC proxy"); sdvo->ddc.dev.parent = &dev->pdev->dev; sdvo->ddc.algo_data = sdvo; sdvo->ddc.algo = &intel_sdvo_ddc_proxy; return i2c_add_adapter(&sdvo->ddc) == 0; } bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg) { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_encoder *intel_encoder; struct intel_sdvo *intel_sdvo; int i; intel_sdvo = kzalloc(sizeof(struct intel_sdvo), GFP_KERNEL); if (!intel_sdvo) return false; intel_sdvo->sdvo_reg = sdvo_reg; intel_sdvo->slave_addr = intel_sdvo_get_slave_addr(dev, sdvo_reg) >> 1; intel_sdvo_select_i2c_bus(dev_priv, intel_sdvo, sdvo_reg); if (!intel_sdvo_init_ddc_proxy(intel_sdvo, dev)) { kfree(intel_sdvo); return false; } /* encoder type will be decided later */ intel_encoder = &intel_sdvo->base; intel_encoder->type = INTEL_OUTPUT_SDVO; drm_encoder_init(dev, &intel_encoder->base, &intel_sdvo_enc_funcs, 0); /* Read the regs to test if we can talk to the device */ for (i = 0; i < 0x40; i++) { u8 byte; if (!intel_sdvo_read_byte(intel_sdvo, i, &byte)) { DRM_DEBUG_KMS("No SDVO device found on SDVO%c\n", IS_SDVOB(sdvo_reg) ? 'B' : 'C'); goto err; } } if (IS_SDVOB(sdvo_reg)) dev_priv->hotplug_supported_mask |= SDVOB_HOTPLUG_INT_STATUS; else dev_priv->hotplug_supported_mask |= SDVOC_HOTPLUG_INT_STATUS; drm_encoder_helper_add(&intel_encoder->base, &intel_sdvo_helper_funcs); /* In default case sdvo lvds is false */ if (!intel_sdvo_get_capabilities(intel_sdvo, &intel_sdvo->caps)) goto err; /* Set up hotplug command - note paranoia about contents of reply. * We assume that the hardware is in a sane state, and only touch * the bits we think we understand. */ intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_ACTIVE_HOT_PLUG, &intel_sdvo->hotplug_active, 2); intel_sdvo->hotplug_active[0] &= ~0x3; if (intel_sdvo_output_setup(intel_sdvo, intel_sdvo->caps.output_flags) != true) { DRM_DEBUG_KMS("SDVO output failed to setup on SDVO%c\n", IS_SDVOB(sdvo_reg) ? 'B' : 'C'); goto err; } intel_sdvo_select_ddc_bus(dev_priv, intel_sdvo, sdvo_reg); /* Set the input timing to the screen. Assume always input 0. */ if (!intel_sdvo_set_target_input(intel_sdvo)) goto err; if (!intel_sdvo_get_input_pixel_clock_range(intel_sdvo, &intel_sdvo->pixel_clock_min, &intel_sdvo->pixel_clock_max)) goto err; DRM_DEBUG_KMS("%s device VID/DID: %02X:%02X.%02X, " "clock range %dMHz - %dMHz, " "input 1: %c, input 2: %c, " "output 1: %c, output 2: %c\n", SDVO_NAME(intel_sdvo), intel_sdvo->caps.vendor_id, intel_sdvo->caps.device_id, intel_sdvo->caps.device_rev_id, intel_sdvo->pixel_clock_min / 1000, intel_sdvo->pixel_clock_max / 1000, (intel_sdvo->caps.sdvo_inputs_mask & 0x1) ? 'Y' : 'N', (intel_sdvo->caps.sdvo_inputs_mask & 0x2) ? 'Y' : 'N', /* check currently supported outputs */ intel_sdvo->caps.output_flags & (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_RGB0) ? 'Y' : 'N', intel_sdvo->caps.output_flags & (SDVO_OUTPUT_TMDS1 | SDVO_OUTPUT_RGB1) ? 'Y' : 'N'); return true; err: drm_encoder_cleanup(&intel_encoder->base); i2c_del_adapter(&intel_sdvo->ddc); kfree(intel_sdvo); return false; }
gpl-2.0
Fusion-Devices/android_kernel_lge_mako
drivers/video/omap2/dss/display.c
4784
13131
/* * linux/drivers/video/omap2/dss/display.c * * Copyright (C) 2009 Nokia Corporation * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com> * * Some code and ideas taken from drivers/video/omap/ driver * by Imre Deak. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program. If not, see <http://www.gnu.org/licenses/>. */ #define DSS_SUBSYS_NAME "DISPLAY" #include <linux/kernel.h> #include <linux/module.h> #include <linux/jiffies.h> #include <linux/platform_device.h> #include <video/omapdss.h> #include "dss.h" #include "dss_features.h" static ssize_t display_enabled_show(struct device *dev, struct device_attribute *attr, char *buf) { struct omap_dss_device *dssdev = to_dss_device(dev); bool enabled = dssdev->state != OMAP_DSS_DISPLAY_DISABLED; return snprintf(buf, PAGE_SIZE, "%d\n", enabled); } static ssize_t display_enabled_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { struct omap_dss_device *dssdev = to_dss_device(dev); int r; bool enabled; r = strtobool(buf, &enabled); if (r) return r; if (enabled != (dssdev->state != OMAP_DSS_DISPLAY_DISABLED)) { if (enabled) { r = dssdev->driver->enable(dssdev); if (r) return r; } else { dssdev->driver->disable(dssdev); } } return size; } static ssize_t display_tear_show(struct device *dev, struct device_attribute *attr, char *buf) { struct omap_dss_device *dssdev = to_dss_device(dev); return snprintf(buf, PAGE_SIZE, "%d\n", dssdev->driver->get_te ? dssdev->driver->get_te(dssdev) : 0); } static ssize_t display_tear_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { struct omap_dss_device *dssdev = to_dss_device(dev); int r; bool te; if (!dssdev->driver->enable_te || !dssdev->driver->get_te) return -ENOENT; r = strtobool(buf, &te); if (r) return r; r = dssdev->driver->enable_te(dssdev, te); if (r) return r; return size; } static ssize_t display_timings_show(struct device *dev, struct device_attribute *attr, char *buf) { struct omap_dss_device *dssdev = to_dss_device(dev); struct omap_video_timings t; if (!dssdev->driver->get_timings) return -ENOENT; dssdev->driver->get_timings(dssdev, &t); return snprintf(buf, PAGE_SIZE, "%u,%u/%u/%u/%u,%u/%u/%u/%u\n", t.pixel_clock, t.x_res, t.hfp, t.hbp, t.hsw, t.y_res, t.vfp, t.vbp, t.vsw); } static ssize_t display_timings_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { struct omap_dss_device *dssdev = to_dss_device(dev); struct omap_video_timings t; int r, found; if (!dssdev->driver->set_timings || !dssdev->driver->check_timings) return -ENOENT; found = 0; #ifdef CONFIG_OMAP2_DSS_VENC if (strncmp("pal", buf, 3) == 0) { t = omap_dss_pal_timings; found = 1; } else if (strncmp("ntsc", buf, 4) == 0) { t = omap_dss_ntsc_timings; found = 1; } #endif if (!found && sscanf(buf, "%u,%hu/%hu/%hu/%hu,%hu/%hu/%hu/%hu", &t.pixel_clock, &t.x_res, &t.hfp, &t.hbp, &t.hsw, &t.y_res, &t.vfp, &t.vbp, &t.vsw) != 9) return -EINVAL; r = dssdev->driver->check_timings(dssdev, &t); if (r) return r; dssdev->driver->set_timings(dssdev, &t); return size; } static ssize_t display_rotate_show(struct device *dev, struct device_attribute *attr, char *buf) { struct omap_dss_device *dssdev = to_dss_device(dev); int rotate; if (!dssdev->driver->get_rotate) return -ENOENT; rotate = dssdev->driver->get_rotate(dssdev); return snprintf(buf, PAGE_SIZE, "%u\n", rotate); } static ssize_t display_rotate_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { struct omap_dss_device *dssdev = to_dss_device(dev); int rot, r; if (!dssdev->driver->set_rotate || !dssdev->driver->get_rotate) return -ENOENT; r = kstrtoint(buf, 0, &rot); if (r) return r; r = dssdev->driver->set_rotate(dssdev, rot); if (r) return r; return size; } static ssize_t display_mirror_show(struct device *dev, struct device_attribute *attr, char *buf) { struct omap_dss_device *dssdev = to_dss_device(dev); int mirror; if (!dssdev->driver->get_mirror) return -ENOENT; mirror = dssdev->driver->get_mirror(dssdev); return snprintf(buf, PAGE_SIZE, "%u\n", mirror); } static ssize_t display_mirror_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { struct omap_dss_device *dssdev = to_dss_device(dev); int r; bool mirror; if (!dssdev->driver->set_mirror || !dssdev->driver->get_mirror) return -ENOENT; r = strtobool(buf, &mirror); if (r) return r; r = dssdev->driver->set_mirror(dssdev, mirror); if (r) return r; return size; } static ssize_t display_wss_show(struct device *dev, struct device_attribute *attr, char *buf) { struct omap_dss_device *dssdev = to_dss_device(dev); unsigned int wss; if (!dssdev->driver->get_wss) return -ENOENT; wss = dssdev->driver->get_wss(dssdev); return snprintf(buf, PAGE_SIZE, "0x%05x\n", wss); } static ssize_t display_wss_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { struct omap_dss_device *dssdev = to_dss_device(dev); u32 wss; int r; if (!dssdev->driver->get_wss || !dssdev->driver->set_wss) return -ENOENT; r = kstrtou32(buf, 0, &wss); if (r) return r; if (wss > 0xfffff) return -EINVAL; r = dssdev->driver->set_wss(dssdev, wss); if (r) return r; return size; } static DEVICE_ATTR(enabled, S_IRUGO|S_IWUSR, display_enabled_show, display_enabled_store); static DEVICE_ATTR(tear_elim, S_IRUGO|S_IWUSR, display_tear_show, display_tear_store); static DEVICE_ATTR(timings, S_IRUGO|S_IWUSR, display_timings_show, display_timings_store); static DEVICE_ATTR(rotate, S_IRUGO|S_IWUSR, display_rotate_show, display_rotate_store); static DEVICE_ATTR(mirror, S_IRUGO|S_IWUSR, display_mirror_show, display_mirror_store); static DEVICE_ATTR(wss, S_IRUGO|S_IWUSR, display_wss_show, display_wss_store); static struct device_attribute *display_sysfs_attrs[] = { &dev_attr_enabled, &dev_attr_tear_elim, &dev_attr_timings, &dev_attr_rotate, &dev_attr_mirror, &dev_attr_wss, NULL }; void omapdss_default_get_resolution(struct omap_dss_device *dssdev, u16 *xres, u16 *yres) { *xres = dssdev->panel.timings.x_res; *yres = dssdev->panel.timings.y_res; } EXPORT_SYMBOL(omapdss_default_get_resolution); int omapdss_default_get_recommended_bpp(struct omap_dss_device *dssdev) { switch (dssdev->type) { case OMAP_DISPLAY_TYPE_DPI: if (dssdev->phy.dpi.data_lines == 24) return 24; else return 16; case OMAP_DISPLAY_TYPE_DBI: if (dssdev->ctrl.pixel_size == 24) return 24; else return 16; case OMAP_DISPLAY_TYPE_DSI: if (dsi_get_pixel_size(dssdev->panel.dsi_pix_fmt) > 16) return 24; else return 16; case OMAP_DISPLAY_TYPE_VENC: case OMAP_DISPLAY_TYPE_SDI: case OMAP_DISPLAY_TYPE_HDMI: return 24; default: BUG(); } } EXPORT_SYMBOL(omapdss_default_get_recommended_bpp); /* Checks if replication logic should be used. Only use for active matrix, * when overlay is in RGB12U or RGB16 mode, and LCD interface is * 18bpp or 24bpp */ bool dss_use_replication(struct omap_dss_device *dssdev, enum omap_color_mode mode) { int bpp; if (mode != OMAP_DSS_COLOR_RGB12U && mode != OMAP_DSS_COLOR_RGB16) return false; if (dssdev->type == OMAP_DISPLAY_TYPE_DPI && (dssdev->panel.config & OMAP_DSS_LCD_TFT) == 0) return false; switch (dssdev->type) { case OMAP_DISPLAY_TYPE_DPI: bpp = dssdev->phy.dpi.data_lines; break; case OMAP_DISPLAY_TYPE_HDMI: case OMAP_DISPLAY_TYPE_VENC: case OMAP_DISPLAY_TYPE_SDI: bpp = 24; break; case OMAP_DISPLAY_TYPE_DBI: bpp = dssdev->ctrl.pixel_size; break; case OMAP_DISPLAY_TYPE_DSI: bpp = dsi_get_pixel_size(dssdev->panel.dsi_pix_fmt); break; default: BUG(); } return bpp > 16; } void dss_init_device(struct platform_device *pdev, struct omap_dss_device *dssdev) { struct device_attribute *attr; int i; int r; switch (dssdev->type) { #ifdef CONFIG_OMAP2_DSS_DPI case OMAP_DISPLAY_TYPE_DPI: r = dpi_init_display(dssdev); break; #endif #ifdef CONFIG_OMAP2_DSS_RFBI case OMAP_DISPLAY_TYPE_DBI: r = rfbi_init_display(dssdev); break; #endif #ifdef CONFIG_OMAP2_DSS_VENC case OMAP_DISPLAY_TYPE_VENC: r = venc_init_display(dssdev); break; #endif #ifdef CONFIG_OMAP2_DSS_SDI case OMAP_DISPLAY_TYPE_SDI: r = sdi_init_display(dssdev); break; #endif #ifdef CONFIG_OMAP2_DSS_DSI case OMAP_DISPLAY_TYPE_DSI: r = dsi_init_display(dssdev); break; #endif case OMAP_DISPLAY_TYPE_HDMI: r = hdmi_init_display(dssdev); break; default: DSSERR("Support for display '%s' not compiled in.\n", dssdev->name); return; } if (r) { DSSERR("failed to init display %s\n", dssdev->name); return; } /* create device sysfs files */ i = 0; while ((attr = display_sysfs_attrs[i++]) != NULL) { r = device_create_file(&dssdev->dev, attr); if (r) DSSERR("failed to create sysfs file\n"); } /* create display? sysfs links */ r = sysfs_create_link(&pdev->dev.kobj, &dssdev->dev.kobj, dev_name(&dssdev->dev)); if (r) DSSERR("failed to create sysfs display link\n"); } void dss_uninit_device(struct platform_device *pdev, struct omap_dss_device *dssdev) { struct device_attribute *attr; int i = 0; sysfs_remove_link(&pdev->dev.kobj, dev_name(&dssdev->dev)); while ((attr = display_sysfs_attrs[i++]) != NULL) device_remove_file(&dssdev->dev, attr); if (dssdev->manager) dssdev->manager->unset_device(dssdev->manager); } static int dss_suspend_device(struct device *dev, void *data) { int r; struct omap_dss_device *dssdev = to_dss_device(dev); if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE) { dssdev->activate_after_resume = false; return 0; } if (!dssdev->driver->suspend) { DSSERR("display '%s' doesn't implement suspend\n", dssdev->name); return -ENOSYS; } r = dssdev->driver->suspend(dssdev); if (r) return r; dssdev->activate_after_resume = true; return 0; } int dss_suspend_all_devices(void) { int r; struct bus_type *bus = dss_get_bus(); r = bus_for_each_dev(bus, NULL, NULL, dss_suspend_device); if (r) { /* resume all displays that were suspended */ dss_resume_all_devices(); return r; } return 0; } static int dss_resume_device(struct device *dev, void *data) { int r; struct omap_dss_device *dssdev = to_dss_device(dev); if (dssdev->activate_after_resume && dssdev->driver->resume) { r = dssdev->driver->resume(dssdev); if (r) return r; } dssdev->activate_after_resume = false; return 0; } int dss_resume_all_devices(void) { struct bus_type *bus = dss_get_bus(); return bus_for_each_dev(bus, NULL, NULL, dss_resume_device); } static int dss_disable_device(struct device *dev, void *data) { struct omap_dss_device *dssdev = to_dss_device(dev); if (dssdev->state != OMAP_DSS_DISPLAY_DISABLED) dssdev->driver->disable(dssdev); return 0; } void dss_disable_all_devices(void) { struct bus_type *bus = dss_get_bus(); bus_for_each_dev(bus, NULL, NULL, dss_disable_device); } void omap_dss_get_device(struct omap_dss_device *dssdev) { get_device(&dssdev->dev); } EXPORT_SYMBOL(omap_dss_get_device); void omap_dss_put_device(struct omap_dss_device *dssdev) { put_device(&dssdev->dev); } EXPORT_SYMBOL(omap_dss_put_device); /* ref count of the found device is incremented. ref count * of from-device is decremented. */ struct omap_dss_device *omap_dss_get_next_device(struct omap_dss_device *from) { struct device *dev; struct device *dev_start = NULL; struct omap_dss_device *dssdev = NULL; int match(struct device *dev, void *data) { return 1; } if (from) dev_start = &from->dev; dev = bus_find_device(dss_get_bus(), dev_start, NULL, match); if (dev) dssdev = to_dss_device(dev); if (from) put_device(&from->dev); return dssdev; } EXPORT_SYMBOL(omap_dss_get_next_device); struct omap_dss_device *omap_dss_find_device(void *data, int (*match)(struct omap_dss_device *dssdev, void *data)) { struct omap_dss_device *dssdev = NULL; while ((dssdev = omap_dss_get_next_device(dssdev)) != NULL) { if (match(dssdev, data)) return dssdev; } return NULL; } EXPORT_SYMBOL(omap_dss_find_device); int omap_dss_start_device(struct omap_dss_device *dssdev) { if (!dssdev->driver) { DSSDBG("no driver\n"); return -ENODEV; } if (!try_module_get(dssdev->dev.driver->owner)) { return -ENODEV; } return 0; } EXPORT_SYMBOL(omap_dss_start_device); void omap_dss_stop_device(struct omap_dss_device *dssdev) { module_put(dssdev->dev.driver->owner); } EXPORT_SYMBOL(omap_dss_stop_device);
gpl-2.0
AOSParadox/android_kernel_motorola_msm8226
arch/arm/mach-shmobile/setup-sh7377.c
4784
10886
/* * sh7377 processor support * * Copyright (C) 2010 Magnus Damm * Copyright (C) 2008 Yoshihiro Shimoda * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/platform_device.h> #include <linux/uio_driver.h> #include <linux/delay.h> #include <linux/input.h> #include <linux/io.h> #include <linux/serial_sci.h> #include <linux/sh_intc.h> #include <linux/sh_timer.h> #include <mach/hardware.h> #include <mach/common.h> #include <asm/mach/map.h> #include <mach/irqs.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <asm/mach/time.h> static struct map_desc sh7377_io_desc[] __initdata = { /* create a 1:1 entity map for 0xe6xxxxxx * used by CPGA, INTC and PFC. */ { .virtual = 0xe6000000, .pfn = __phys_to_pfn(0xe6000000), .length = 256 << 20, .type = MT_DEVICE_NONSHARED }, }; void __init sh7377_map_io(void) { iotable_init(sh7377_io_desc, ARRAY_SIZE(sh7377_io_desc)); } /* SCIFA0 */ static struct plat_sci_port scif0_platform_data = { .mapbase = 0xe6c40000, .flags = UPF_BOOT_AUTOCONF, .scscr = SCSCR_RE | SCSCR_TE, .scbrr_algo_id = SCBRR_ALGO_4, .type = PORT_SCIFA, .irqs = { evt2irq(0xc00), evt2irq(0xc00), evt2irq(0xc00), evt2irq(0xc00) }, }; static struct platform_device scif0_device = { .name = "sh-sci", .id = 0, .dev = { .platform_data = &scif0_platform_data, }, }; /* SCIFA1 */ static struct plat_sci_port scif1_platform_data = { .mapbase = 0xe6c50000, .flags = UPF_BOOT_AUTOCONF, .scscr = SCSCR_RE | SCSCR_TE, .scbrr_algo_id = SCBRR_ALGO_4, .type = PORT_SCIFA, .irqs = { evt2irq(0xc20), evt2irq(0xc20), evt2irq(0xc20), evt2irq(0xc20) }, }; static struct platform_device scif1_device = { .name = "sh-sci", .id = 1, .dev = { .platform_data = &scif1_platform_data, }, }; /* SCIFA2 */ static struct plat_sci_port scif2_platform_data = { .mapbase = 0xe6c60000, .flags = UPF_BOOT_AUTOCONF, .scscr = SCSCR_RE | SCSCR_TE, .scbrr_algo_id = SCBRR_ALGO_4, .type = PORT_SCIFA, .irqs = { evt2irq(0xc40), evt2irq(0xc40), evt2irq(0xc40), evt2irq(0xc40) }, }; static struct platform_device scif2_device = { .name = "sh-sci", .id = 2, .dev = { .platform_data = &scif2_platform_data, }, }; /* SCIFA3 */ static struct plat_sci_port scif3_platform_data = { .mapbase = 0xe6c70000, .flags = UPF_BOOT_AUTOCONF, .scscr = SCSCR_RE | SCSCR_TE, .scbrr_algo_id = SCBRR_ALGO_4, .type = PORT_SCIFA, .irqs = { evt2irq(0xc60), evt2irq(0xc60), evt2irq(0xc60), evt2irq(0xc60) }, }; static struct platform_device scif3_device = { .name = "sh-sci", .id = 3, .dev = { .platform_data = &scif3_platform_data, }, }; /* SCIFA4 */ static struct plat_sci_port scif4_platform_data = { .mapbase = 0xe6c80000, .flags = UPF_BOOT_AUTOCONF, .scscr = SCSCR_RE | SCSCR_TE, .scbrr_algo_id = SCBRR_ALGO_4, .type = PORT_SCIFA, .irqs = { evt2irq(0xd20), evt2irq(0xd20), evt2irq(0xd20), evt2irq(0xd20) }, }; static struct platform_device scif4_device = { .name = "sh-sci", .id = 4, .dev = { .platform_data = &scif4_platform_data, }, }; /* SCIFA5 */ static struct plat_sci_port scif5_platform_data = { .mapbase = 0xe6cb0000, .flags = UPF_BOOT_AUTOCONF, .scscr = SCSCR_RE | SCSCR_TE, .scbrr_algo_id = SCBRR_ALGO_4, .type = PORT_SCIFA, .irqs = { evt2irq(0xd40), evt2irq(0xd40), evt2irq(0xd40), evt2irq(0xd40) }, }; static struct platform_device scif5_device = { .name = "sh-sci", .id = 5, .dev = { .platform_data = &scif5_platform_data, }, }; /* SCIFA6 */ static struct plat_sci_port scif6_platform_data = { .mapbase = 0xe6cc0000, .flags = UPF_BOOT_AUTOCONF, .scscr = SCSCR_RE | SCSCR_TE, .scbrr_algo_id = SCBRR_ALGO_4, .type = PORT_SCIFA, .irqs = { intcs_evt2irq(0x1a80), intcs_evt2irq(0x1a80), intcs_evt2irq(0x1a80), intcs_evt2irq(0x1a80) }, }; static struct platform_device scif6_device = { .name = "sh-sci", .id = 6, .dev = { .platform_data = &scif6_platform_data, }, }; /* SCIFB */ static struct plat_sci_port scif7_platform_data = { .mapbase = 0xe6c30000, .flags = UPF_BOOT_AUTOCONF, .scscr = SCSCR_RE | SCSCR_TE, .scbrr_algo_id = SCBRR_ALGO_4, .type = PORT_SCIFB, .irqs = { evt2irq(0xd60), evt2irq(0xd60), evt2irq(0xd60), evt2irq(0xd60) }, }; static struct platform_device scif7_device = { .name = "sh-sci", .id = 7, .dev = { .platform_data = &scif7_platform_data, }, }; static struct sh_timer_config cmt10_platform_data = { .name = "CMT10", .channel_offset = 0x10, .timer_bit = 0, .clockevent_rating = 125, .clocksource_rating = 125, }; static struct resource cmt10_resources[] = { [0] = { .name = "CMT10", .start = 0xe6138010, .end = 0xe613801b, .flags = IORESOURCE_MEM, }, [1] = { .start = evt2irq(0xb00), /* CMT1_CMT10 */ .flags = IORESOURCE_IRQ, }, }; static struct platform_device cmt10_device = { .name = "sh_cmt", .id = 10, .dev = { .platform_data = &cmt10_platform_data, }, .resource = cmt10_resources, .num_resources = ARRAY_SIZE(cmt10_resources), }; /* VPU */ static struct uio_info vpu_platform_data = { .name = "VPU5HG", .version = "0", .irq = intcs_evt2irq(0x980), }; static struct resource vpu_resources[] = { [0] = { .name = "VPU", .start = 0xfe900000, .end = 0xfe900157, .flags = IORESOURCE_MEM, }, }; static struct platform_device vpu_device = { .name = "uio_pdrv_genirq", .id = 0, .dev = { .platform_data = &vpu_platform_data, }, .resource = vpu_resources, .num_resources = ARRAY_SIZE(vpu_resources), }; /* VEU0 */ static struct uio_info veu0_platform_data = { .name = "VEU0", .version = "0", .irq = intcs_evt2irq(0x700), }; static struct resource veu0_resources[] = { [0] = { .name = "VEU0", .start = 0xfe920000, .end = 0xfe9200cb, .flags = IORESOURCE_MEM, }, }; static struct platform_device veu0_device = { .name = "uio_pdrv_genirq", .id = 1, .dev = { .platform_data = &veu0_platform_data, }, .resource = veu0_resources, .num_resources = ARRAY_SIZE(veu0_resources), }; /* VEU1 */ static struct uio_info veu1_platform_data = { .name = "VEU1", .version = "0", .irq = intcs_evt2irq(0x720), }; static struct resource veu1_resources[] = { [0] = { .name = "VEU1", .start = 0xfe924000, .end = 0xfe9240cb, .flags = IORESOURCE_MEM, }, }; static struct platform_device veu1_device = { .name = "uio_pdrv_genirq", .id = 2, .dev = { .platform_data = &veu1_platform_data, }, .resource = veu1_resources, .num_resources = ARRAY_SIZE(veu1_resources), }; /* VEU2 */ static struct uio_info veu2_platform_data = { .name = "VEU2", .version = "0", .irq = intcs_evt2irq(0x740), }; static struct resource veu2_resources[] = { [0] = { .name = "VEU2", .start = 0xfe928000, .end = 0xfe928307, .flags = IORESOURCE_MEM, }, }; static struct platform_device veu2_device = { .name = "uio_pdrv_genirq", .id = 3, .dev = { .platform_data = &veu2_platform_data, }, .resource = veu2_resources, .num_resources = ARRAY_SIZE(veu2_resources), }; /* VEU3 */ static struct uio_info veu3_platform_data = { .name = "VEU3", .version = "0", .irq = intcs_evt2irq(0x760), }; static struct resource veu3_resources[] = { [0] = { .name = "VEU3", .start = 0xfe92c000, .end = 0xfe92c307, .flags = IORESOURCE_MEM, }, }; static struct platform_device veu3_device = { .name = "uio_pdrv_genirq", .id = 4, .dev = { .platform_data = &veu3_platform_data, }, .resource = veu3_resources, .num_resources = ARRAY_SIZE(veu3_resources), }; /* JPU */ static struct uio_info jpu_platform_data = { .name = "JPU", .version = "0", .irq = intcs_evt2irq(0x560), }; static struct resource jpu_resources[] = { [0] = { .name = "JPU", .start = 0xfe980000, .end = 0xfe9902d3, .flags = IORESOURCE_MEM, }, }; static struct platform_device jpu_device = { .name = "uio_pdrv_genirq", .id = 5, .dev = { .platform_data = &jpu_platform_data, }, .resource = jpu_resources, .num_resources = ARRAY_SIZE(jpu_resources), }; /* SPU2DSP0 */ static struct uio_info spu0_platform_data = { .name = "SPU2DSP0", .version = "0", .irq = evt2irq(0x1800), }; static struct resource spu0_resources[] = { [0] = { .name = "SPU2DSP0", .start = 0xfe200000, .end = 0xfe2fffff, .flags = IORESOURCE_MEM, }, }; static struct platform_device spu0_device = { .name = "uio_pdrv_genirq", .id = 6, .dev = { .platform_data = &spu0_platform_data, }, .resource = spu0_resources, .num_resources = ARRAY_SIZE(spu0_resources), }; /* SPU2DSP1 */ static struct uio_info spu1_platform_data = { .name = "SPU2DSP1", .version = "0", .irq = evt2irq(0x1820), }; static struct resource spu1_resources[] = { [0] = { .name = "SPU2DSP1", .start = 0xfe300000, .end = 0xfe3fffff, .flags = IORESOURCE_MEM, }, }; static struct platform_device spu1_device = { .name = "uio_pdrv_genirq", .id = 7, .dev = { .platform_data = &spu1_platform_data, }, .resource = spu1_resources, .num_resources = ARRAY_SIZE(spu1_resources), }; static struct platform_device *sh7377_early_devices[] __initdata = { &scif0_device, &scif1_device, &scif2_device, &scif3_device, &scif4_device, &scif5_device, &scif6_device, &scif7_device, &cmt10_device, }; static struct platform_device *sh7377_devices[] __initdata = { &vpu_device, &veu0_device, &veu1_device, &veu2_device, &veu3_device, &jpu_device, &spu0_device, &spu1_device, }; void __init sh7377_add_standard_devices(void) { platform_add_devices(sh7377_early_devices, ARRAY_SIZE(sh7377_early_devices)); platform_add_devices(sh7377_devices, ARRAY_SIZE(sh7377_devices)); } static void __init sh7377_earlytimer_init(void) { sh7377_clock_init(); shmobile_earlytimer_init(); } #define SMSTPCR3 0xe615013c #define SMSTPCR3_CMT1 (1 << 29) void __init sh7377_add_early_devices(void) { /* enable clock to CMT1 */ __raw_writel(__raw_readl(SMSTPCR3) & ~SMSTPCR3_CMT1, SMSTPCR3); early_platform_add_devices(sh7377_early_devices, ARRAY_SIZE(sh7377_early_devices)); /* setup early console here as well */ shmobile_setup_console(); /* override timer setup with soc-specific code */ shmobile_timer.init = sh7377_earlytimer_init; }
gpl-2.0
RenderBroken/msm8974_motox2014_render_kernel
drivers/scsi/lpfc/lpfc_hbadisc.c
4784
182773
/******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * * Copyright (C) 2004-2012 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * * * * This program is free software; you can redistribute it and/or * * modify it under the terms of version 2 of the GNU General * * Public License as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful. * * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * * TO BE LEGALLY INVALID. See the GNU General Public License for * * more details, a copy of which can be found in the file COPYING * * included with this package. * *******************************************************************/ #include <linux/blkdev.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/pci.h> #include <linux/kthread.h> #include <linux/interrupt.h> #include <scsi/scsi.h> #include <scsi/scsi_device.h> #include <scsi/scsi_host.h> #include <scsi/scsi_transport_fc.h> #include "lpfc_hw4.h" #include "lpfc_hw.h" #include "lpfc_nl.h" #include "lpfc_disc.h" #include "lpfc_sli.h" #include "lpfc_sli4.h" #include "lpfc_scsi.h" #include "lpfc.h" #include "lpfc_logmsg.h" #include "lpfc_crtn.h" #include "lpfc_vport.h" #include "lpfc_debugfs.h" /* AlpaArray for assignment of scsid for scan-down and bind_method */ static uint8_t lpfcAlpaArray[] = { 0xEF, 0xE8, 0xE4, 0xE2, 0xE1, 0xE0, 0xDC, 0xDA, 0xD9, 0xD6, 0xD5, 0xD4, 0xD3, 0xD2, 0xD1, 0xCE, 0xCD, 0xCC, 0xCB, 0xCA, 0xC9, 0xC7, 0xC6, 0xC5, 0xC3, 0xBC, 0xBA, 0xB9, 0xB6, 0xB5, 0xB4, 0xB3, 0xB2, 0xB1, 0xAE, 0xAD, 0xAC, 0xAB, 0xAA, 0xA9, 0xA7, 0xA6, 0xA5, 0xA3, 0x9F, 0x9E, 0x9D, 0x9B, 0x98, 0x97, 0x90, 0x8F, 0x88, 0x84, 0x82, 0x81, 0x80, 0x7C, 0x7A, 0x79, 0x76, 0x75, 0x74, 0x73, 0x72, 0x71, 0x6E, 0x6D, 0x6C, 0x6B, 0x6A, 0x69, 0x67, 0x66, 0x65, 0x63, 0x5C, 0x5A, 0x59, 0x56, 0x55, 0x54, 0x53, 0x52, 0x51, 0x4E, 0x4D, 0x4C, 0x4B, 0x4A, 0x49, 0x47, 0x46, 0x45, 0x43, 0x3C, 0x3A, 0x39, 0x36, 0x35, 0x34, 0x33, 0x32, 0x31, 0x2E, 0x2D, 0x2C, 0x2B, 0x2A, 0x29, 0x27, 0x26, 0x25, 0x23, 0x1F, 0x1E, 0x1D, 0x1B, 0x18, 0x17, 0x10, 0x0F, 0x08, 0x04, 0x02, 0x01 }; static void lpfc_disc_timeout_handler(struct lpfc_vport *); static void lpfc_disc_flush_list(struct lpfc_vport *vport); static void lpfc_unregister_fcfi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *); static int lpfc_fcf_inuse(struct lpfc_hba *); void lpfc_terminate_rport_io(struct fc_rport *rport) { struct lpfc_rport_data *rdata; struct lpfc_nodelist * ndlp; struct lpfc_hba *phba; rdata = rport->dd_data; ndlp = rdata->pnode; if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { if (rport->roles & FC_RPORT_ROLE_FCP_TARGET) printk(KERN_ERR "Cannot find remote node" " to terminate I/O Data x%x\n", rport->port_id); return; } phba = ndlp->phba; lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_RPORT, "rport terminate: sid:x%x did:x%x flg:x%x", ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag); if (ndlp->nlp_sid != NLP_NO_SID) { lpfc_sli_abort_iocb(ndlp->vport, &phba->sli.ring[phba->sli.fcp_ring], ndlp->nlp_sid, 0, LPFC_CTX_TGT); } } /* * This function will be called when dev_loss_tmo fire. */ void lpfc_dev_loss_tmo_callbk(struct fc_rport *rport) { struct lpfc_rport_data *rdata; struct lpfc_nodelist * ndlp; struct lpfc_vport *vport; struct lpfc_hba *phba; struct lpfc_work_evt *evtp; int put_node; int put_rport; rdata = rport->dd_data; ndlp = rdata->pnode; if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) return; vport = ndlp->vport; phba = vport->phba; lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT, "rport devlosscb: sid:x%x did:x%x flg:x%x", ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag); /* Don't defer this if we are in the process of deleting the vport * or unloading the driver. The unload will cleanup the node * appropriately we just need to cleanup the ndlp rport info here. */ if (vport->load_flag & FC_UNLOADING) { put_node = rdata->pnode != NULL; put_rport = ndlp->rport != NULL; rdata->pnode = NULL; ndlp->rport = NULL; if (put_node) lpfc_nlp_put(ndlp); if (put_rport) put_device(&rport->dev); return; } if (ndlp->nlp_state == NLP_STE_MAPPED_NODE) return; evtp = &ndlp->dev_loss_evt; if (!list_empty(&evtp->evt_listp)) return; spin_lock_irq(&phba->hbalock); /* We need to hold the node by incrementing the reference * count until this queued work is done */ evtp->evt_arg1 = lpfc_nlp_get(ndlp); if (evtp->evt_arg1) { evtp->evt = LPFC_EVT_DEV_LOSS; list_add_tail(&evtp->evt_listp, &phba->work_list); lpfc_worker_wake_up(phba); } spin_unlock_irq(&phba->hbalock); return; } /** * lpfc_dev_loss_tmo_handler - Remote node devloss timeout handler * @ndlp: Pointer to remote node object. * * This function is called from the worker thread when devloss timeout timer * expires. For SLI4 host, this routine shall return 1 when at lease one * remote node, including this @ndlp, is still in use of FCF; otherwise, this * routine shall return 0 when there is no remote node is still in use of FCF * when devloss timeout happened to this @ndlp. **/ static int lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) { struct lpfc_rport_data *rdata; struct fc_rport *rport; struct lpfc_vport *vport; struct lpfc_hba *phba; uint8_t *name; int put_node; int put_rport; int warn_on = 0; int fcf_inuse = 0; rport = ndlp->rport; if (!rport) return fcf_inuse; rdata = rport->dd_data; name = (uint8_t *) &ndlp->nlp_portname; vport = ndlp->vport; phba = vport->phba; if (phba->sli_rev == LPFC_SLI_REV4) fcf_inuse = lpfc_fcf_inuse(phba); lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT, "rport devlosstmo:did:x%x type:x%x id:x%x", ndlp->nlp_DID, ndlp->nlp_type, rport->scsi_target_id); /* Don't defer this if we are in the process of deleting the vport * or unloading the driver. The unload will cleanup the node * appropriately we just need to cleanup the ndlp rport info here. */ if (vport->load_flag & FC_UNLOADING) { if (ndlp->nlp_sid != NLP_NO_SID) { /* flush the target */ lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring], ndlp->nlp_sid, 0, LPFC_CTX_TGT); } put_node = rdata->pnode != NULL; put_rport = ndlp->rport != NULL; rdata->pnode = NULL; ndlp->rport = NULL; if (put_node) lpfc_nlp_put(ndlp); if (put_rport) put_device(&rport->dev); return fcf_inuse; } if (ndlp->nlp_state == NLP_STE_MAPPED_NODE) { lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "0284 Devloss timeout Ignored on " "WWPN %x:%x:%x:%x:%x:%x:%x:%x " "NPort x%x\n", *name, *(name+1), *(name+2), *(name+3), *(name+4), *(name+5), *(name+6), *(name+7), ndlp->nlp_DID); return fcf_inuse; } if (ndlp->nlp_type & NLP_FABRIC) { /* We will clean up these Nodes in linkup */ put_node = rdata->pnode != NULL; put_rport = ndlp->rport != NULL; rdata->pnode = NULL; ndlp->rport = NULL; if (put_node) lpfc_nlp_put(ndlp); if (put_rport) put_device(&rport->dev); return fcf_inuse; } if (ndlp->nlp_sid != NLP_NO_SID) { warn_on = 1; /* flush the target */ lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring], ndlp->nlp_sid, 0, LPFC_CTX_TGT); } if (warn_on) { lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, "0203 Devloss timeout on " "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x " "NPort x%06x Data: x%x x%x x%x\n", *name, *(name+1), *(name+2), *(name+3), *(name+4), *(name+5), *(name+6), *(name+7), ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); } else { lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "0204 Devloss timeout on " "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x " "NPort x%06x Data: x%x x%x x%x\n", *name, *(name+1), *(name+2), *(name+3), *(name+4), *(name+5), *(name+6), *(name+7), ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); } put_node = rdata->pnode != NULL; put_rport = ndlp->rport != NULL; rdata->pnode = NULL; ndlp->rport = NULL; if (put_node) lpfc_nlp_put(ndlp); if (put_rport) put_device(&rport->dev); if (!(vport->load_flag & FC_UNLOADING) && !(ndlp->nlp_flag & NLP_DELAY_TMO) && !(ndlp->nlp_flag & NLP_NPR_2B_DISC) && (ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && (ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE) && (ndlp->nlp_state != NLP_STE_PRLI_ISSUE)) lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); return fcf_inuse; } /** * lpfc_sli4_post_dev_loss_tmo_handler - SLI4 post devloss timeout handler * @phba: Pointer to hba context object. * @fcf_inuse: SLI4 FCF in-use state reported from devloss timeout handler. * @nlp_did: remote node identifer with devloss timeout. * * This function is called from the worker thread after invoking devloss * timeout handler and releasing the reference count for the ndlp with * which the devloss timeout was handled for SLI4 host. For the devloss * timeout of the last remote node which had been in use of FCF, when this * routine is invoked, it shall be guaranteed that none of the remote are * in-use of FCF. When devloss timeout to the last remote using the FCF, * if the FIP engine is neither in FCF table scan process nor roundrobin * failover process, the in-use FCF shall be unregistered. If the FIP * engine is in FCF discovery process, the devloss timeout state shall * be set for either the FCF table scan process or roundrobin failover * process to unregister the in-use FCF. **/ static void lpfc_sli4_post_dev_loss_tmo_handler(struct lpfc_hba *phba, int fcf_inuse, uint32_t nlp_did) { /* If devloss timeout happened to a remote node when FCF had no * longer been in-use, do nothing. */ if (!fcf_inuse) return; if ((phba->hba_flag & HBA_FIP_SUPPORT) && !lpfc_fcf_inuse(phba)) { spin_lock_irq(&phba->hbalock); if (phba->fcf.fcf_flag & FCF_DISCOVERY) { if (phba->hba_flag & HBA_DEVLOSS_TMO) { spin_unlock_irq(&phba->hbalock); return; } phba->hba_flag |= HBA_DEVLOSS_TMO; lpfc_printf_log(phba, KERN_INFO, LOG_FIP, "2847 Last remote node (x%x) using " "FCF devloss tmo\n", nlp_did); } if (phba->fcf.fcf_flag & FCF_REDISC_PROG) { spin_unlock_irq(&phba->hbalock); lpfc_printf_log(phba, KERN_INFO, LOG_FIP, "2868 Devloss tmo to FCF rediscovery " "in progress\n"); return; } if (!(phba->hba_flag & (FCF_TS_INPROG | FCF_RR_INPROG))) { spin_unlock_irq(&phba->hbalock); lpfc_printf_log(phba, KERN_INFO, LOG_FIP, "2869 Devloss tmo to idle FIP engine, " "unreg in-use FCF and rescan.\n"); /* Unregister in-use FCF and rescan */ lpfc_unregister_fcf_rescan(phba); return; } spin_unlock_irq(&phba->hbalock); if (phba->hba_flag & FCF_TS_INPROG) lpfc_printf_log(phba, KERN_INFO, LOG_FIP, "2870 FCF table scan in progress\n"); if (phba->hba_flag & FCF_RR_INPROG) lpfc_printf_log(phba, KERN_INFO, LOG_FIP, "2871 FLOGI roundrobin FCF failover " "in progress\n"); } lpfc_unregister_unused_fcf(phba); } /** * lpfc_alloc_fast_evt - Allocates data structure for posting event * @phba: Pointer to hba context object. * * This function is called from the functions which need to post * events from interrupt context. This function allocates data * structure required for posting event. It also keeps track of * number of events pending and prevent event storm when there are * too many events. **/ struct lpfc_fast_path_event * lpfc_alloc_fast_evt(struct lpfc_hba *phba) { struct lpfc_fast_path_event *ret; /* If there are lot of fast event do not exhaust memory due to this */ if (atomic_read(&phba->fast_event_count) > LPFC_MAX_EVT_COUNT) return NULL; ret = kzalloc(sizeof(struct lpfc_fast_path_event), GFP_ATOMIC); if (ret) { atomic_inc(&phba->fast_event_count); INIT_LIST_HEAD(&ret->work_evt.evt_listp); ret->work_evt.evt = LPFC_EVT_FASTPATH_MGMT_EVT; } return ret; } /** * lpfc_free_fast_evt - Frees event data structure * @phba: Pointer to hba context object. * @evt: Event object which need to be freed. * * This function frees the data structure required for posting * events. **/ void lpfc_free_fast_evt(struct lpfc_hba *phba, struct lpfc_fast_path_event *evt) { atomic_dec(&phba->fast_event_count); kfree(evt); } /** * lpfc_send_fastpath_evt - Posts events generated from fast path * @phba: Pointer to hba context object. * @evtp: Event data structure. * * This function is called from worker thread, when the interrupt * context need to post an event. This function posts the event * to fc transport netlink interface. **/ static void lpfc_send_fastpath_evt(struct lpfc_hba *phba, struct lpfc_work_evt *evtp) { unsigned long evt_category, evt_sub_category; struct lpfc_fast_path_event *fast_evt_data; char *evt_data; uint32_t evt_data_size; struct Scsi_Host *shost; fast_evt_data = container_of(evtp, struct lpfc_fast_path_event, work_evt); evt_category = (unsigned long) fast_evt_data->un.fabric_evt.event_type; evt_sub_category = (unsigned long) fast_evt_data->un. fabric_evt.subcategory; shost = lpfc_shost_from_vport(fast_evt_data->vport); if (evt_category == FC_REG_FABRIC_EVENT) { if (evt_sub_category == LPFC_EVENT_FCPRDCHKERR) { evt_data = (char *) &fast_evt_data->un.read_check_error; evt_data_size = sizeof(fast_evt_data->un. read_check_error); } else if ((evt_sub_category == LPFC_EVENT_FABRIC_BUSY) || (evt_sub_category == LPFC_EVENT_PORT_BUSY)) { evt_data = (char *) &fast_evt_data->un.fabric_evt; evt_data_size = sizeof(fast_evt_data->un.fabric_evt); } else { lpfc_free_fast_evt(phba, fast_evt_data); return; } } else if (evt_category == FC_REG_SCSI_EVENT) { switch (evt_sub_category) { case LPFC_EVENT_QFULL: case LPFC_EVENT_DEVBSY: evt_data = (char *) &fast_evt_data->un.scsi_evt; evt_data_size = sizeof(fast_evt_data->un.scsi_evt); break; case LPFC_EVENT_CHECK_COND: evt_data = (char *) &fast_evt_data->un.check_cond_evt; evt_data_size = sizeof(fast_evt_data->un. check_cond_evt); break; case LPFC_EVENT_VARQUEDEPTH: evt_data = (char *) &fast_evt_data->un.queue_depth_evt; evt_data_size = sizeof(fast_evt_data->un. queue_depth_evt); break; default: lpfc_free_fast_evt(phba, fast_evt_data); return; } } else { lpfc_free_fast_evt(phba, fast_evt_data); return; } fc_host_post_vendor_event(shost, fc_get_event_number(), evt_data_size, evt_data, LPFC_NL_VENDOR_ID); lpfc_free_fast_evt(phba, fast_evt_data); return; } static void lpfc_work_list_done(struct lpfc_hba *phba) { struct lpfc_work_evt *evtp = NULL; struct lpfc_nodelist *ndlp; int free_evt; int fcf_inuse; uint32_t nlp_did; spin_lock_irq(&phba->hbalock); while (!list_empty(&phba->work_list)) { list_remove_head((&phba->work_list), evtp, typeof(*evtp), evt_listp); spin_unlock_irq(&phba->hbalock); free_evt = 1; switch (evtp->evt) { case LPFC_EVT_ELS_RETRY: ndlp = (struct lpfc_nodelist *) (evtp->evt_arg1); lpfc_els_retry_delay_handler(ndlp); free_evt = 0; /* evt is part of ndlp */ /* decrement the node reference count held * for this queued work */ lpfc_nlp_put(ndlp); break; case LPFC_EVT_DEV_LOSS: ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1); fcf_inuse = lpfc_dev_loss_tmo_handler(ndlp); free_evt = 0; /* decrement the node reference count held for * this queued work */ nlp_did = ndlp->nlp_DID; lpfc_nlp_put(ndlp); if (phba->sli_rev == LPFC_SLI_REV4) lpfc_sli4_post_dev_loss_tmo_handler(phba, fcf_inuse, nlp_did); break; case LPFC_EVT_ONLINE: if (phba->link_state < LPFC_LINK_DOWN) *(int *) (evtp->evt_arg1) = lpfc_online(phba); else *(int *) (evtp->evt_arg1) = 0; complete((struct completion *)(evtp->evt_arg2)); break; case LPFC_EVT_OFFLINE_PREP: if (phba->link_state >= LPFC_LINK_DOWN) lpfc_offline_prep(phba); *(int *)(evtp->evt_arg1) = 0; complete((struct completion *)(evtp->evt_arg2)); break; case LPFC_EVT_OFFLINE: lpfc_offline(phba); lpfc_sli_brdrestart(phba); *(int *)(evtp->evt_arg1) = lpfc_sli_brdready(phba, HS_FFRDY | HS_MBRDY); lpfc_unblock_mgmt_io(phba); complete((struct completion *)(evtp->evt_arg2)); break; case LPFC_EVT_WARM_START: lpfc_offline(phba); lpfc_reset_barrier(phba); lpfc_sli_brdreset(phba); lpfc_hba_down_post(phba); *(int *)(evtp->evt_arg1) = lpfc_sli_brdready(phba, HS_MBRDY); lpfc_unblock_mgmt_io(phba); complete((struct completion *)(evtp->evt_arg2)); break; case LPFC_EVT_KILL: lpfc_offline(phba); *(int *)(evtp->evt_arg1) = (phba->pport->stopped) ? 0 : lpfc_sli_brdkill(phba); lpfc_unblock_mgmt_io(phba); complete((struct completion *)(evtp->evt_arg2)); break; case LPFC_EVT_FASTPATH_MGMT_EVT: lpfc_send_fastpath_evt(phba, evtp); free_evt = 0; break; case LPFC_EVT_RESET_HBA: if (!(phba->pport->load_flag & FC_UNLOADING)) lpfc_reset_hba(phba); break; } if (free_evt) kfree(evtp); spin_lock_irq(&phba->hbalock); } spin_unlock_irq(&phba->hbalock); } static void lpfc_work_done(struct lpfc_hba *phba) { struct lpfc_sli_ring *pring; uint32_t ha_copy, status, control, work_port_events; struct lpfc_vport **vports; struct lpfc_vport *vport; int i; spin_lock_irq(&phba->hbalock); ha_copy = phba->work_ha; phba->work_ha = 0; spin_unlock_irq(&phba->hbalock); /* First, try to post the next mailbox command to SLI4 device */ if (phba->pci_dev_grp == LPFC_PCI_DEV_OC) lpfc_sli4_post_async_mbox(phba); if (ha_copy & HA_ERATT) /* Handle the error attention event */ lpfc_handle_eratt(phba); if (ha_copy & HA_MBATT) lpfc_sli_handle_mb_event(phba); if (ha_copy & HA_LATT) lpfc_handle_latt(phba); /* Process SLI4 events */ if (phba->pci_dev_grp == LPFC_PCI_DEV_OC) { if (phba->hba_flag & HBA_RRQ_ACTIVE) lpfc_handle_rrq_active(phba); if (phba->hba_flag & FCP_XRI_ABORT_EVENT) lpfc_sli4_fcp_xri_abort_event_proc(phba); if (phba->hba_flag & ELS_XRI_ABORT_EVENT) lpfc_sli4_els_xri_abort_event_proc(phba); if (phba->hba_flag & ASYNC_EVENT) lpfc_sli4_async_event_proc(phba); if (phba->hba_flag & HBA_POST_RECEIVE_BUFFER) { spin_lock_irq(&phba->hbalock); phba->hba_flag &= ~HBA_POST_RECEIVE_BUFFER; spin_unlock_irq(&phba->hbalock); lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ); } if (phba->fcf.fcf_flag & FCF_REDISC_EVT) lpfc_sli4_fcf_redisc_event_proc(phba); } vports = lpfc_create_vport_work_array(phba); if (vports != NULL) for (i = 0; i <= phba->max_vports; i++) { /* * We could have no vports in array if unloading, so if * this happens then just use the pport */ if (vports[i] == NULL && i == 0) vport = phba->pport; else vport = vports[i]; if (vport == NULL) break; spin_lock_irq(&vport->work_port_lock); work_port_events = vport->work_port_events; vport->work_port_events &= ~work_port_events; spin_unlock_irq(&vport->work_port_lock); if (work_port_events & WORKER_DISC_TMO) lpfc_disc_timeout_handler(vport); if (work_port_events & WORKER_ELS_TMO) lpfc_els_timeout_handler(vport); if (work_port_events & WORKER_HB_TMO) lpfc_hb_timeout_handler(phba); if (work_port_events & WORKER_MBOX_TMO) lpfc_mbox_timeout_handler(phba); if (work_port_events & WORKER_FABRIC_BLOCK_TMO) lpfc_unblock_fabric_iocbs(phba); if (work_port_events & WORKER_FDMI_TMO) lpfc_fdmi_timeout_handler(vport); if (work_port_events & WORKER_RAMP_DOWN_QUEUE) lpfc_ramp_down_queue_handler(phba); if (work_port_events & WORKER_RAMP_UP_QUEUE) lpfc_ramp_up_queue_handler(phba); if (work_port_events & WORKER_DELAYED_DISC_TMO) lpfc_delayed_disc_timeout_handler(vport); } lpfc_destroy_vport_work_array(phba, vports); pring = &phba->sli.ring[LPFC_ELS_RING]; status = (ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING))); status >>= (4*LPFC_ELS_RING); if ((status & HA_RXMASK) || (pring->flag & LPFC_DEFERRED_RING_EVENT) || (phba->hba_flag & HBA_SP_QUEUE_EVT)) { if (pring->flag & LPFC_STOP_IOCB_EVENT) { pring->flag |= LPFC_DEFERRED_RING_EVENT; /* Set the lpfc data pending flag */ set_bit(LPFC_DATA_READY, &phba->data_flags); } else { pring->flag &= ~LPFC_DEFERRED_RING_EVENT; lpfc_sli_handle_slow_ring_event(phba, pring, (status & HA_RXMASK)); } if ((phba->sli_rev == LPFC_SLI_REV4) && pring->txq_cnt) lpfc_drain_txq(phba); /* * Turn on Ring interrupts */ if (phba->sli_rev <= LPFC_SLI_REV3) { spin_lock_irq(&phba->hbalock); control = readl(phba->HCregaddr); if (!(control & (HC_R0INT_ENA << LPFC_ELS_RING))) { lpfc_debugfs_slow_ring_trc(phba, "WRK Enable ring: cntl:x%x hacopy:x%x", control, ha_copy, 0); control |= (HC_R0INT_ENA << LPFC_ELS_RING); writel(control, phba->HCregaddr); readl(phba->HCregaddr); /* flush */ } else { lpfc_debugfs_slow_ring_trc(phba, "WRK Ring ok: cntl:x%x hacopy:x%x", control, ha_copy, 0); } spin_unlock_irq(&phba->hbalock); } } lpfc_work_list_done(phba); } int lpfc_do_work(void *p) { struct lpfc_hba *phba = p; int rc; set_user_nice(current, -20); phba->data_flags = 0; while (!kthread_should_stop()) { /* wait and check worker queue activities */ rc = wait_event_interruptible(phba->work_waitq, (test_and_clear_bit(LPFC_DATA_READY, &phba->data_flags) || kthread_should_stop())); /* Signal wakeup shall terminate the worker thread */ if (rc) { lpfc_printf_log(phba, KERN_ERR, LOG_ELS, "0433 Wakeup on signal: rc=x%x\n", rc); break; } /* Attend pending lpfc data processing */ lpfc_work_done(phba); } phba->worker_thread = NULL; lpfc_printf_log(phba, KERN_INFO, LOG_ELS, "0432 Worker thread stopped.\n"); return 0; } /* * This is only called to handle FC worker events. Since this a rare * occurrence, we allocate a struct lpfc_work_evt structure here instead of * embedding it in the IOCB. */ int lpfc_workq_post_event(struct lpfc_hba *phba, void *arg1, void *arg2, uint32_t evt) { struct lpfc_work_evt *evtp; unsigned long flags; /* * All Mailbox completions and LPFC_ELS_RING rcv ring IOCB events will * be queued to worker thread for processing */ evtp = kmalloc(sizeof(struct lpfc_work_evt), GFP_ATOMIC); if (!evtp) return 0; evtp->evt_arg1 = arg1; evtp->evt_arg2 = arg2; evtp->evt = evt; spin_lock_irqsave(&phba->hbalock, flags); list_add_tail(&evtp->evt_listp, &phba->work_list); spin_unlock_irqrestore(&phba->hbalock, flags); lpfc_worker_wake_up(phba); return 1; } void lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove) { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); struct lpfc_hba *phba = vport->phba; struct lpfc_nodelist *ndlp, *next_ndlp; int rc; list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { if (!NLP_CHK_NODE_ACT(ndlp)) continue; if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) continue; if ((phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) || ((vport->port_type == LPFC_NPIV_PORT) && (ndlp->nlp_DID == NameServer_DID))) lpfc_unreg_rpi(vport, ndlp); /* Leave Fabric nodes alone on link down */ if ((phba->sli_rev < LPFC_SLI_REV4) && (!remove && ndlp->nlp_type & NLP_FABRIC)) continue; rc = lpfc_disc_state_machine(vport, ndlp, NULL, remove ? NLP_EVT_DEVICE_RM : NLP_EVT_DEVICE_RECOVERY); } if (phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) { if (phba->sli_rev == LPFC_SLI_REV4) lpfc_sli4_unreg_all_rpis(vport); lpfc_mbx_unreg_vpi(vport); spin_lock_irq(shost->host_lock); vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; spin_unlock_irq(shost->host_lock); } } void lpfc_port_link_failure(struct lpfc_vport *vport) { lpfc_vport_set_state(vport, FC_VPORT_LINKDOWN); /* Cleanup any outstanding received buffers */ lpfc_cleanup_rcv_buffers(vport); /* Cleanup any outstanding RSCN activity */ lpfc_els_flush_rscn(vport); /* Cleanup any outstanding ELS commands */ lpfc_els_flush_cmd(vport); lpfc_cleanup_rpis(vport, 0); /* Turn off discovery timer if its running */ lpfc_can_disctmo(vport); } void lpfc_linkdown_port(struct lpfc_vport *vport) { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); fc_host_post_event(shost, fc_get_event_number(), FCH_EVT_LINKDOWN, 0); lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, "Link Down: state:x%x rtry:x%x flg:x%x", vport->port_state, vport->fc_ns_retry, vport->fc_flag); lpfc_port_link_failure(vport); /* Stop delayed Nport discovery */ spin_lock_irq(shost->host_lock); vport->fc_flag &= ~FC_DISC_DELAYED; spin_unlock_irq(shost->host_lock); del_timer_sync(&vport->delayed_disc_tmo); } int lpfc_linkdown(struct lpfc_hba *phba) { struct lpfc_vport *vport = phba->pport; struct Scsi_Host *shost = lpfc_shost_from_vport(vport); struct lpfc_vport **vports; LPFC_MBOXQ_t *mb; int i; if (phba->link_state == LPFC_LINK_DOWN) return 0; /* Block all SCSI stack I/Os */ lpfc_scsi_dev_block(phba); spin_lock_irq(&phba->hbalock); phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE); spin_unlock_irq(&phba->hbalock); if (phba->link_state > LPFC_LINK_DOWN) { phba->link_state = LPFC_LINK_DOWN; spin_lock_irq(shost->host_lock); phba->pport->fc_flag &= ~FC_LBIT; spin_unlock_irq(shost->host_lock); } vports = lpfc_create_vport_work_array(phba); if (vports != NULL) for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { /* Issue a LINK DOWN event to all nodes */ lpfc_linkdown_port(vports[i]); } lpfc_destroy_vport_work_array(phba, vports); /* Clean up any firmware default rpi's */ mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (mb) { lpfc_unreg_did(phba, 0xffff, LPFC_UNREG_ALL_DFLT_RPIS, mb); mb->vport = vport; mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; if (lpfc_sli_issue_mbox(phba, mb, MBX_NOWAIT) == MBX_NOT_FINISHED) { mempool_free(mb, phba->mbox_mem_pool); } } /* Setup myDID for link up if we are in pt2pt mode */ if (phba->pport->fc_flag & FC_PT2PT) { phba->pport->fc_myDID = 0; mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (mb) { lpfc_config_link(phba, mb); mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; mb->vport = vport; if (lpfc_sli_issue_mbox(phba, mb, MBX_NOWAIT) == MBX_NOT_FINISHED) { mempool_free(mb, phba->mbox_mem_pool); } } spin_lock_irq(shost->host_lock); phba->pport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI); spin_unlock_irq(shost->host_lock); } return 0; } static void lpfc_linkup_cleanup_nodes(struct lpfc_vport *vport) { struct lpfc_nodelist *ndlp; list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { if (!NLP_CHK_NODE_ACT(ndlp)) continue; if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) continue; if (ndlp->nlp_type & NLP_FABRIC) { /* On Linkup its safe to clean up the ndlp * from Fabric connections. */ if (ndlp->nlp_DID != Fabric_DID) lpfc_unreg_rpi(vport, ndlp); lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) { /* Fail outstanding IO now since device is * marked for PLOGI. */ lpfc_unreg_rpi(vport, ndlp); } } } static void lpfc_linkup_port(struct lpfc_vport *vport) { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); struct lpfc_hba *phba = vport->phba; if ((vport->load_flag & FC_UNLOADING) != 0) return; lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, "Link Up: top:x%x speed:x%x flg:x%x", phba->fc_topology, phba->fc_linkspeed, phba->link_flag); /* If NPIV is not enabled, only bring the physical port up */ if (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && (vport != phba->pport)) return; fc_host_post_event(shost, fc_get_event_number(), FCH_EVT_LINKUP, 0); spin_lock_irq(shost->host_lock); vport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI | FC_ABORT_DISCOVERY | FC_RSCN_MODE | FC_NLP_MORE | FC_RSCN_DISCOVERY); vport->fc_flag |= FC_NDISC_ACTIVE; vport->fc_ns_retry = 0; spin_unlock_irq(shost->host_lock); if (vport->fc_flag & FC_LBIT) lpfc_linkup_cleanup_nodes(vport); } static int lpfc_linkup(struct lpfc_hba *phba) { struct lpfc_vport **vports; int i; lpfc_cleanup_wt_rrqs(phba); phba->link_state = LPFC_LINK_UP; /* Unblock fabric iocbs if they are blocked */ clear_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags); del_timer_sync(&phba->fabric_block_timer); vports = lpfc_create_vport_work_array(phba); if (vports != NULL) for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) lpfc_linkup_port(vports[i]); lpfc_destroy_vport_work_array(phba, vports); if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && (phba->sli_rev < LPFC_SLI_REV4)) lpfc_issue_clear_la(phba, phba->pport); return 0; } /* * This routine handles processing a CLEAR_LA mailbox * command upon completion. It is setup in the LPFC_MBOXQ * as the completion routine when the command is * handed off to the SLI layer. */ static void lpfc_mbx_cmpl_clear_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) { struct lpfc_vport *vport = pmb->vport; struct Scsi_Host *shost = lpfc_shost_from_vport(vport); struct lpfc_sli *psli = &phba->sli; MAILBOX_t *mb = &pmb->u.mb; uint32_t control; /* Since we don't do discovery right now, turn these off here */ psli->ring[psli->extra_ring].flag &= ~LPFC_STOP_IOCB_EVENT; psli->ring[psli->fcp_ring].flag &= ~LPFC_STOP_IOCB_EVENT; psli->ring[psli->next_ring].flag &= ~LPFC_STOP_IOCB_EVENT; /* Check for error */ if ((mb->mbxStatus) && (mb->mbxStatus != 0x1601)) { /* CLEAR_LA mbox error <mbxStatus> state <hba_state> */ lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, "0320 CLEAR_LA mbxStatus error x%x hba " "state x%x\n", mb->mbxStatus, vport->port_state); phba->link_state = LPFC_HBA_ERROR; goto out; } if (vport->port_type == LPFC_PHYSICAL_PORT) phba->link_state = LPFC_HBA_READY; spin_lock_irq(&phba->hbalock); psli->sli_flag |= LPFC_PROCESS_LA; control = readl(phba->HCregaddr); control |= HC_LAINT_ENA; writel(control, phba->HCregaddr); readl(phba->HCregaddr); /* flush */ spin_unlock_irq(&phba->hbalock); mempool_free(pmb, phba->mbox_mem_pool); return; out: /* Device Discovery completes */ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "0225 Device Discovery completes\n"); mempool_free(pmb, phba->mbox_mem_pool); spin_lock_irq(shost->host_lock); vport->fc_flag &= ~FC_ABORT_DISCOVERY; spin_unlock_irq(shost->host_lock); lpfc_can_disctmo(vport); /* turn on Link Attention interrupts */ spin_lock_irq(&phba->hbalock); psli->sli_flag |= LPFC_PROCESS_LA; control = readl(phba->HCregaddr); control |= HC_LAINT_ENA; writel(control, phba->HCregaddr); readl(phba->HCregaddr); /* flush */ spin_unlock_irq(&phba->hbalock); return; } static void lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) { struct lpfc_vport *vport = pmb->vport; if (pmb->u.mb.mbxStatus) goto out; mempool_free(pmb, phba->mbox_mem_pool); /* don't perform discovery for SLI4 loopback diagnostic test */ if ((phba->sli_rev == LPFC_SLI_REV4) && !(phba->hba_flag & HBA_FCOE_MODE) && (phba->link_flag & LS_LOOPBACK_MODE)) return; if (phba->fc_topology == LPFC_TOPOLOGY_LOOP && vport->fc_flag & FC_PUBLIC_LOOP && !(vport->fc_flag & FC_LBIT)) { /* Need to wait for FAN - use discovery timer * for timeout. port_state is identically * LPFC_LOCAL_CFG_LINK while waiting for FAN */ lpfc_set_disctmo(vport); return; } /* Start discovery by sending a FLOGI. port_state is identically * LPFC_FLOGI while waiting for FLOGI cmpl */ if (vport->port_state != LPFC_FLOGI) lpfc_initial_flogi(vport); return; out: lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, "0306 CONFIG_LINK mbxStatus error x%x " "HBA state x%x\n", pmb->u.mb.mbxStatus, vport->port_state); mempool_free(pmb, phba->mbox_mem_pool); lpfc_linkdown(phba); lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, "0200 CONFIG_LINK bad hba state x%x\n", vport->port_state); lpfc_issue_clear_la(phba, vport); return; } /** * lpfc_sli4_clear_fcf_rr_bmask * @phba pointer to the struct lpfc_hba for this port. * This fucnction resets the round robin bit mask and clears the * fcf priority list. The list deletions are done while holding the * hbalock. The ON_LIST flag and the FLOGI_FAILED flags are cleared * from the lpfc_fcf_pri record. **/ void lpfc_sli4_clear_fcf_rr_bmask(struct lpfc_hba *phba) { struct lpfc_fcf_pri *fcf_pri; struct lpfc_fcf_pri *next_fcf_pri; memset(phba->fcf.fcf_rr_bmask, 0, sizeof(*phba->fcf.fcf_rr_bmask)); spin_lock_irq(&phba->hbalock); list_for_each_entry_safe(fcf_pri, next_fcf_pri, &phba->fcf.fcf_pri_list, list) { list_del_init(&fcf_pri->list); fcf_pri->fcf_rec.flag = 0; } spin_unlock_irq(&phba->hbalock); } static void lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) { struct lpfc_vport *vport = mboxq->vport; if (mboxq->u.mb.mbxStatus) { lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, "2017 REG_FCFI mbxStatus error x%x " "HBA state x%x\n", mboxq->u.mb.mbxStatus, vport->port_state); goto fail_out; } /* Start FCoE discovery by sending a FLOGI. */ phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi, &mboxq->u.mqe.un.reg_fcfi); /* Set the FCFI registered flag */ spin_lock_irq(&phba->hbalock); phba->fcf.fcf_flag |= FCF_REGISTERED; spin_unlock_irq(&phba->hbalock); /* If there is a pending FCoE event, restart FCF table scan. */ if ((!(phba->hba_flag & FCF_RR_INPROG)) && lpfc_check_pending_fcoe_event(phba, LPFC_UNREG_FCF)) goto fail_out; /* Mark successful completion of FCF table scan */ spin_lock_irq(&phba->hbalock); phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE); phba->hba_flag &= ~FCF_TS_INPROG; if (vport->port_state != LPFC_FLOGI) { phba->hba_flag |= FCF_RR_INPROG; spin_unlock_irq(&phba->hbalock); lpfc_issue_init_vfi(vport); goto out; } spin_unlock_irq(&phba->hbalock); goto out; fail_out: spin_lock_irq(&phba->hbalock); phba->hba_flag &= ~FCF_RR_INPROG; spin_unlock_irq(&phba->hbalock); out: mempool_free(mboxq, phba->mbox_mem_pool); } /** * lpfc_fab_name_match - Check if the fcf fabric name match. * @fab_name: pointer to fabric name. * @new_fcf_record: pointer to fcf record. * * This routine compare the fcf record's fabric name with provided * fabric name. If the fabric name are identical this function * returns 1 else return 0. **/ static uint32_t lpfc_fab_name_match(uint8_t *fab_name, struct fcf_record *new_fcf_record) { if (fab_name[0] != bf_get(lpfc_fcf_record_fab_name_0, new_fcf_record)) return 0; if (fab_name[1] != bf_get(lpfc_fcf_record_fab_name_1, new_fcf_record)) return 0; if (fab_name[2] != bf_get(lpfc_fcf_record_fab_name_2, new_fcf_record)) return 0; if (fab_name[3] != bf_get(lpfc_fcf_record_fab_name_3, new_fcf_record)) return 0; if (fab_name[4] != bf_get(lpfc_fcf_record_fab_name_4, new_fcf_record)) return 0; if (fab_name[5] != bf_get(lpfc_fcf_record_fab_name_5, new_fcf_record)) return 0; if (fab_name[6] != bf_get(lpfc_fcf_record_fab_name_6, new_fcf_record)) return 0; if (fab_name[7] != bf_get(lpfc_fcf_record_fab_name_7, new_fcf_record)) return 0; return 1; } /** * lpfc_sw_name_match - Check if the fcf switch name match. * @fab_name: pointer to fabric name. * @new_fcf_record: pointer to fcf record. * * This routine compare the fcf record's switch name with provided * switch name. If the switch name are identical this function * returns 1 else return 0. **/ static uint32_t lpfc_sw_name_match(uint8_t *sw_name, struct fcf_record *new_fcf_record) { if (sw_name[0] != bf_get(lpfc_fcf_record_switch_name_0, new_fcf_record)) return 0; if (sw_name[1] != bf_get(lpfc_fcf_record_switch_name_1, new_fcf_record)) return 0; if (sw_name[2] != bf_get(lpfc_fcf_record_switch_name_2, new_fcf_record)) return 0; if (sw_name[3] != bf_get(lpfc_fcf_record_switch_name_3, new_fcf_record)) return 0; if (sw_name[4] != bf_get(lpfc_fcf_record_switch_name_4, new_fcf_record)) return 0; if (sw_name[5] != bf_get(lpfc_fcf_record_switch_name_5, new_fcf_record)) return 0; if (sw_name[6] != bf_get(lpfc_fcf_record_switch_name_6, new_fcf_record)) return 0; if (sw_name[7] != bf_get(lpfc_fcf_record_switch_name_7, new_fcf_record)) return 0; return 1; } /** * lpfc_mac_addr_match - Check if the fcf mac address match. * @mac_addr: pointer to mac address. * @new_fcf_record: pointer to fcf record. * * This routine compare the fcf record's mac address with HBA's * FCF mac address. If the mac addresses are identical this function * returns 1 else return 0. **/ static uint32_t lpfc_mac_addr_match(uint8_t *mac_addr, struct fcf_record *new_fcf_record) { if (mac_addr[0] != bf_get(lpfc_fcf_record_mac_0, new_fcf_record)) return 0; if (mac_addr[1] != bf_get(lpfc_fcf_record_mac_1, new_fcf_record)) return 0; if (mac_addr[2] != bf_get(lpfc_fcf_record_mac_2, new_fcf_record)) return 0; if (mac_addr[3] != bf_get(lpfc_fcf_record_mac_3, new_fcf_record)) return 0; if (mac_addr[4] != bf_get(lpfc_fcf_record_mac_4, new_fcf_record)) return 0; if (mac_addr[5] != bf_get(lpfc_fcf_record_mac_5, new_fcf_record)) return 0; return 1; } static bool lpfc_vlan_id_match(uint16_t curr_vlan_id, uint16_t new_vlan_id) { return (curr_vlan_id == new_vlan_id); } /** * lpfc_update_fcf_record - Update driver fcf record * __lpfc_update_fcf_record_pri - update the lpfc_fcf_pri record. * @phba: pointer to lpfc hba data structure. * @fcf_index: Index for the lpfc_fcf_record. * @new_fcf_record: pointer to hba fcf record. * * This routine updates the driver FCF priority record from the new HBA FCF * record. This routine is called with the host lock held. **/ static void __lpfc_update_fcf_record_pri(struct lpfc_hba *phba, uint16_t fcf_index, struct fcf_record *new_fcf_record ) { struct lpfc_fcf_pri *fcf_pri; fcf_pri = &phba->fcf.fcf_pri[fcf_index]; fcf_pri->fcf_rec.fcf_index = fcf_index; /* FCF record priority */ fcf_pri->fcf_rec.priority = new_fcf_record->fip_priority; } /** * lpfc_copy_fcf_record - Copy fcf information to lpfc_hba. * @fcf: pointer to driver fcf record. * @new_fcf_record: pointer to fcf record. * * This routine copies the FCF information from the FCF * record to lpfc_hba data structure. **/ static void lpfc_copy_fcf_record(struct lpfc_fcf_rec *fcf_rec, struct fcf_record *new_fcf_record) { /* Fabric name */ fcf_rec->fabric_name[0] = bf_get(lpfc_fcf_record_fab_name_0, new_fcf_record); fcf_rec->fabric_name[1] = bf_get(lpfc_fcf_record_fab_name_1, new_fcf_record); fcf_rec->fabric_name[2] = bf_get(lpfc_fcf_record_fab_name_2, new_fcf_record); fcf_rec->fabric_name[3] = bf_get(lpfc_fcf_record_fab_name_3, new_fcf_record); fcf_rec->fabric_name[4] = bf_get(lpfc_fcf_record_fab_name_4, new_fcf_record); fcf_rec->fabric_name[5] = bf_get(lpfc_fcf_record_fab_name_5, new_fcf_record); fcf_rec->fabric_name[6] = bf_get(lpfc_fcf_record_fab_name_6, new_fcf_record); fcf_rec->fabric_name[7] = bf_get(lpfc_fcf_record_fab_name_7, new_fcf_record); /* Mac address */ fcf_rec->mac_addr[0] = bf_get(lpfc_fcf_record_mac_0, new_fcf_record); fcf_rec->mac_addr[1] = bf_get(lpfc_fcf_record_mac_1, new_fcf_record); fcf_rec->mac_addr[2] = bf_get(lpfc_fcf_record_mac_2, new_fcf_record); fcf_rec->mac_addr[3] = bf_get(lpfc_fcf_record_mac_3, new_fcf_record); fcf_rec->mac_addr[4] = bf_get(lpfc_fcf_record_mac_4, new_fcf_record); fcf_rec->mac_addr[5] = bf_get(lpfc_fcf_record_mac_5, new_fcf_record); /* FCF record index */ fcf_rec->fcf_indx = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record); /* FCF record priority */ fcf_rec->priority = new_fcf_record->fip_priority; /* Switch name */ fcf_rec->switch_name[0] = bf_get(lpfc_fcf_record_switch_name_0, new_fcf_record); fcf_rec->switch_name[1] = bf_get(lpfc_fcf_record_switch_name_1, new_fcf_record); fcf_rec->switch_name[2] = bf_get(lpfc_fcf_record_switch_name_2, new_fcf_record); fcf_rec->switch_name[3] = bf_get(lpfc_fcf_record_switch_name_3, new_fcf_record); fcf_rec->switch_name[4] = bf_get(lpfc_fcf_record_switch_name_4, new_fcf_record); fcf_rec->switch_name[5] = bf_get(lpfc_fcf_record_switch_name_5, new_fcf_record); fcf_rec->switch_name[6] = bf_get(lpfc_fcf_record_switch_name_6, new_fcf_record); fcf_rec->switch_name[7] = bf_get(lpfc_fcf_record_switch_name_7, new_fcf_record); } /** * lpfc_update_fcf_record - Update driver fcf record * @phba: pointer to lpfc hba data structure. * @fcf_rec: pointer to driver fcf record. * @new_fcf_record: pointer to hba fcf record. * @addr_mode: address mode to be set to the driver fcf record. * @vlan_id: vlan tag to be set to the driver fcf record. * @flag: flag bits to be set to the driver fcf record. * * This routine updates the driver FCF record from the new HBA FCF record * together with the address mode, vlan_id, and other informations. This * routine is called with the host lock held. **/ static void __lpfc_update_fcf_record(struct lpfc_hba *phba, struct lpfc_fcf_rec *fcf_rec, struct fcf_record *new_fcf_record, uint32_t addr_mode, uint16_t vlan_id, uint32_t flag) { /* Copy the fields from the HBA's FCF record */ lpfc_copy_fcf_record(fcf_rec, new_fcf_record); /* Update other fields of driver FCF record */ fcf_rec->addr_mode = addr_mode; fcf_rec->vlan_id = vlan_id; fcf_rec->flag |= (flag | RECORD_VALID); __lpfc_update_fcf_record_pri(phba, bf_get(lpfc_fcf_record_fcf_index, new_fcf_record), new_fcf_record); } /** * lpfc_register_fcf - Register the FCF with hba. * @phba: pointer to lpfc hba data structure. * * This routine issues a register fcfi mailbox command to register * the fcf with HBA. **/ static void lpfc_register_fcf(struct lpfc_hba *phba) { LPFC_MBOXQ_t *fcf_mbxq; int rc; spin_lock_irq(&phba->hbalock); /* If the FCF is not available do nothing. */ if (!(phba->fcf.fcf_flag & FCF_AVAILABLE)) { phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG); spin_unlock_irq(&phba->hbalock); return; } /* The FCF is already registered, start discovery */ if (phba->fcf.fcf_flag & FCF_REGISTERED) { phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE); phba->hba_flag &= ~FCF_TS_INPROG; if (phba->pport->port_state != LPFC_FLOGI) { phba->hba_flag |= FCF_RR_INPROG; spin_unlock_irq(&phba->hbalock); lpfc_initial_flogi(phba->pport); return; } spin_unlock_irq(&phba->hbalock); return; } spin_unlock_irq(&phba->hbalock); fcf_mbxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!fcf_mbxq) { spin_lock_irq(&phba->hbalock); phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG); spin_unlock_irq(&phba->hbalock); return; } lpfc_reg_fcfi(phba, fcf_mbxq); fcf_mbxq->vport = phba->pport; fcf_mbxq->mbox_cmpl = lpfc_mbx_cmpl_reg_fcfi; rc = lpfc_sli_issue_mbox(phba, fcf_mbxq, MBX_NOWAIT); if (rc == MBX_NOT_FINISHED) { spin_lock_irq(&phba->hbalock); phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG); spin_unlock_irq(&phba->hbalock); mempool_free(fcf_mbxq, phba->mbox_mem_pool); } return; } /** * lpfc_match_fcf_conn_list - Check if the FCF record can be used for discovery. * @phba: pointer to lpfc hba data structure. * @new_fcf_record: pointer to fcf record. * @boot_flag: Indicates if this record used by boot bios. * @addr_mode: The address mode to be used by this FCF * @vlan_id: The vlan id to be used as vlan tagging by this FCF. * * This routine compare the fcf record with connect list obtained from the * config region to decide if this FCF can be used for SAN discovery. It returns * 1 if this record can be used for SAN discovery else return zero. If this FCF * record can be used for SAN discovery, the boot_flag will indicate if this FCF * is used by boot bios and addr_mode will indicate the addressing mode to be * used for this FCF when the function returns. * If the FCF record need to be used with a particular vlan id, the vlan is * set in the vlan_id on return of the function. If not VLAN tagging need to * be used with the FCF vlan_id will be set to LPFC_FCOE_NULL_VID; **/ static int lpfc_match_fcf_conn_list(struct lpfc_hba *phba, struct fcf_record *new_fcf_record, uint32_t *boot_flag, uint32_t *addr_mode, uint16_t *vlan_id) { struct lpfc_fcf_conn_entry *conn_entry; int i, j, fcf_vlan_id = 0; /* Find the lowest VLAN id in the FCF record */ for (i = 0; i < 512; i++) { if (new_fcf_record->vlan_bitmap[i]) { fcf_vlan_id = i * 8; j = 0; while (!((new_fcf_record->vlan_bitmap[i] >> j) & 1)) { j++; fcf_vlan_id++; } break; } } /* If FCF not available return 0 */ if (!bf_get(lpfc_fcf_record_fcf_avail, new_fcf_record) || !bf_get(lpfc_fcf_record_fcf_valid, new_fcf_record)) return 0; if (!(phba->hba_flag & HBA_FIP_SUPPORT)) { *boot_flag = 0; *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov, new_fcf_record); if (phba->valid_vlan) *vlan_id = phba->vlan_id; else *vlan_id = LPFC_FCOE_NULL_VID; return 1; } /* * If there are no FCF connection table entry, driver connect to all * FCFs. */ if (list_empty(&phba->fcf_conn_rec_list)) { *boot_flag = 0; *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov, new_fcf_record); /* * When there are no FCF connect entries, use driver's default * addressing mode - FPMA. */ if (*addr_mode & LPFC_FCF_FPMA) *addr_mode = LPFC_FCF_FPMA; /* If FCF record report a vlan id use that vlan id */ if (fcf_vlan_id) *vlan_id = fcf_vlan_id; else *vlan_id = LPFC_FCOE_NULL_VID; return 1; } list_for_each_entry(conn_entry, &phba->fcf_conn_rec_list, list) { if (!(conn_entry->conn_rec.flags & FCFCNCT_VALID)) continue; if ((conn_entry->conn_rec.flags & FCFCNCT_FBNM_VALID) && !lpfc_fab_name_match(conn_entry->conn_rec.fabric_name, new_fcf_record)) continue; if ((conn_entry->conn_rec.flags & FCFCNCT_SWNM_VALID) && !lpfc_sw_name_match(conn_entry->conn_rec.switch_name, new_fcf_record)) continue; if (conn_entry->conn_rec.flags & FCFCNCT_VLAN_VALID) { /* * If the vlan bit map does not have the bit set for the * vlan id to be used, then it is not a match. */ if (!(new_fcf_record->vlan_bitmap [conn_entry->conn_rec.vlan_tag / 8] & (1 << (conn_entry->conn_rec.vlan_tag % 8)))) continue; } /* * If connection record does not support any addressing mode, * skip the FCF record. */ if (!(bf_get(lpfc_fcf_record_mac_addr_prov, new_fcf_record) & (LPFC_FCF_FPMA | LPFC_FCF_SPMA))) continue; /* * Check if the connection record specifies a required * addressing mode. */ if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) && !(conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED)) { /* * If SPMA required but FCF not support this continue. */ if ((conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) && !(bf_get(lpfc_fcf_record_mac_addr_prov, new_fcf_record) & LPFC_FCF_SPMA)) continue; /* * If FPMA required but FCF not support this continue. */ if (!(conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) && !(bf_get(lpfc_fcf_record_mac_addr_prov, new_fcf_record) & LPFC_FCF_FPMA)) continue; } /* * This fcf record matches filtering criteria. */ if (conn_entry->conn_rec.flags & FCFCNCT_BOOT) *boot_flag = 1; else *boot_flag = 0; /* * If user did not specify any addressing mode, or if the * preferred addressing mode specified by user is not supported * by FCF, allow fabric to pick the addressing mode. */ *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov, new_fcf_record); /* * If the user specified a required address mode, assign that * address mode */ if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) && (!(conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED))) *addr_mode = (conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) ? LPFC_FCF_SPMA : LPFC_FCF_FPMA; /* * If the user specified a preferred address mode, use the * addr mode only if FCF support the addr_mode. */ else if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) && (conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED) && (conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) && (*addr_mode & LPFC_FCF_SPMA)) *addr_mode = LPFC_FCF_SPMA; else if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) && (conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED) && !(conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) && (*addr_mode & LPFC_FCF_FPMA)) *addr_mode = LPFC_FCF_FPMA; /* If matching connect list has a vlan id, use it */ if (conn_entry->conn_rec.flags & FCFCNCT_VLAN_VALID) *vlan_id = conn_entry->conn_rec.vlan_tag; /* * If no vlan id is specified in connect list, use the vlan id * in the FCF record */ else if (fcf_vlan_id) *vlan_id = fcf_vlan_id; else *vlan_id = LPFC_FCOE_NULL_VID; return 1; } return 0; } /** * lpfc_check_pending_fcoe_event - Check if there is pending fcoe event. * @phba: pointer to lpfc hba data structure. * @unreg_fcf: Unregister FCF if FCF table need to be re-scaned. * * This function check if there is any fcoe event pending while driver * scan FCF entries. If there is any pending event, it will restart the * FCF saning and return 1 else return 0. */ int lpfc_check_pending_fcoe_event(struct lpfc_hba *phba, uint8_t unreg_fcf) { /* * If the Link is up and no FCoE events while in the * FCF discovery, no need to restart FCF discovery. */ if ((phba->link_state >= LPFC_LINK_UP) && (phba->fcoe_eventtag == phba->fcoe_eventtag_at_fcf_scan)) return 0; lpfc_printf_log(phba, KERN_INFO, LOG_FIP, "2768 Pending link or FCF event during current " "handling of the previous event: link_state:x%x, " "evt_tag_at_scan:x%x, evt_tag_current:x%x\n", phba->link_state, phba->fcoe_eventtag_at_fcf_scan, phba->fcoe_eventtag); spin_lock_irq(&phba->hbalock); phba->fcf.fcf_flag &= ~FCF_AVAILABLE; spin_unlock_irq(&phba->hbalock); if (phba->link_state >= LPFC_LINK_UP) { lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, "2780 Restart FCF table scan due to " "pending FCF event:evt_tag_at_scan:x%x, " "evt_tag_current:x%x\n", phba->fcoe_eventtag_at_fcf_scan, phba->fcoe_eventtag); lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST); } else { /* * Do not continue FCF discovery and clear FCF_TS_INPROG * flag */ lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, "2833 Stop FCF discovery process due to link " "state change (x%x)\n", phba->link_state); spin_lock_irq(&phba->hbalock); phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG); phba->fcf.fcf_flag &= ~(FCF_REDISC_FOV | FCF_DISCOVERY); spin_unlock_irq(&phba->hbalock); } /* Unregister the currently registered FCF if required */ if (unreg_fcf) { spin_lock_irq(&phba->hbalock); phba->fcf.fcf_flag &= ~FCF_REGISTERED; spin_unlock_irq(&phba->hbalock); lpfc_sli4_unregister_fcf(phba); } return 1; } /** * lpfc_sli4_new_fcf_random_select - Randomly select an eligible new fcf record * @phba: pointer to lpfc hba data structure. * @fcf_cnt: number of eligible fcf record seen so far. * * This function makes an running random selection decision on FCF record to * use through a sequence of @fcf_cnt eligible FCF records with equal * probability. To perform integer manunipulation of random numbers with * size unit32_t, the lower 16 bits of the 32-bit random number returned * from random32() are taken as the random random number generated. * * Returns true when outcome is for the newly read FCF record should be * chosen; otherwise, return false when outcome is for keeping the previously * chosen FCF record. **/ static bool lpfc_sli4_new_fcf_random_select(struct lpfc_hba *phba, uint32_t fcf_cnt) { uint32_t rand_num; /* Get 16-bit uniform random number */ rand_num = (0xFFFF & random32()); /* Decision with probability 1/fcf_cnt */ if ((fcf_cnt * rand_num) < 0xFFFF) return true; else return false; } /** * lpfc_sli4_fcf_rec_mbox_parse - Parse read_fcf mbox command. * @phba: pointer to lpfc hba data structure. * @mboxq: pointer to mailbox object. * @next_fcf_index: pointer to holder of next fcf index. * * This routine parses the non-embedded fcf mailbox command by performing the * necessarily error checking, non-embedded read FCF record mailbox command * SGE parsing, and endianness swapping. * * Returns the pointer to the new FCF record in the non-embedded mailbox * command DMA memory if successfully, other NULL. */ static struct fcf_record * lpfc_sli4_fcf_rec_mbox_parse(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq, uint16_t *next_fcf_index) { void *virt_addr; dma_addr_t phys_addr; struct lpfc_mbx_sge sge; struct lpfc_mbx_read_fcf_tbl *read_fcf; uint32_t shdr_status, shdr_add_status; union lpfc_sli4_cfg_shdr *shdr; struct fcf_record *new_fcf_record; /* Get the first SGE entry from the non-embedded DMA memory. This * routine only uses a single SGE. */ lpfc_sli4_mbx_sge_get(mboxq, 0, &sge); phys_addr = getPaddr(sge.pa_hi, sge.pa_lo); if (unlikely(!mboxq->sge_array)) { lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, "2524 Failed to get the non-embedded SGE " "virtual address\n"); return NULL; } virt_addr = mboxq->sge_array->addr[0]; shdr = (union lpfc_sli4_cfg_shdr *)virt_addr; shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); if (shdr_status || shdr_add_status) { if (shdr_status == STATUS_FCF_TABLE_EMPTY) lpfc_printf_log(phba, KERN_ERR, LOG_FIP, "2726 READ_FCF_RECORD Indicates empty " "FCF table.\n"); else lpfc_printf_log(phba, KERN_ERR, LOG_FIP, "2521 READ_FCF_RECORD mailbox failed " "with status x%x add_status x%x, " "mbx\n", shdr_status, shdr_add_status); return NULL; } /* Interpreting the returned information of the FCF record */ read_fcf = (struct lpfc_mbx_read_fcf_tbl *)virt_addr; lpfc_sli_pcimem_bcopy(read_fcf, read_fcf, sizeof(struct lpfc_mbx_read_fcf_tbl)); *next_fcf_index = bf_get(lpfc_mbx_read_fcf_tbl_nxt_vindx, read_fcf); new_fcf_record = (struct fcf_record *)(virt_addr + sizeof(struct lpfc_mbx_read_fcf_tbl)); lpfc_sli_pcimem_bcopy(new_fcf_record, new_fcf_record, offsetof(struct fcf_record, vlan_bitmap)); new_fcf_record->word137 = le32_to_cpu(new_fcf_record->word137); new_fcf_record->word138 = le32_to_cpu(new_fcf_record->word138); return new_fcf_record; } /** * lpfc_sli4_log_fcf_record_info - Log the information of a fcf record * @phba: pointer to lpfc hba data structure. * @fcf_record: pointer to the fcf record. * @vlan_id: the lowest vlan identifier associated to this fcf record. * @next_fcf_index: the index to the next fcf record in hba's fcf table. * * This routine logs the detailed FCF record if the LOG_FIP loggin is * enabled. **/ static void lpfc_sli4_log_fcf_record_info(struct lpfc_hba *phba, struct fcf_record *fcf_record, uint16_t vlan_id, uint16_t next_fcf_index) { lpfc_printf_log(phba, KERN_INFO, LOG_FIP, "2764 READ_FCF_RECORD:\n" "\tFCF_Index : x%x\n" "\tFCF_Avail : x%x\n" "\tFCF_Valid : x%x\n" "\tFIP_Priority : x%x\n" "\tMAC_Provider : x%x\n" "\tLowest VLANID : x%x\n" "\tFCF_MAC Addr : x%x:%x:%x:%x:%x:%x\n" "\tFabric_Name : x%x:%x:%x:%x:%x:%x:%x:%x\n" "\tSwitch_Name : x%x:%x:%x:%x:%x:%x:%x:%x\n" "\tNext_FCF_Index: x%x\n", bf_get(lpfc_fcf_record_fcf_index, fcf_record), bf_get(lpfc_fcf_record_fcf_avail, fcf_record), bf_get(lpfc_fcf_record_fcf_valid, fcf_record), fcf_record->fip_priority, bf_get(lpfc_fcf_record_mac_addr_prov, fcf_record), vlan_id, bf_get(lpfc_fcf_record_mac_0, fcf_record), bf_get(lpfc_fcf_record_mac_1, fcf_record), bf_get(lpfc_fcf_record_mac_2, fcf_record), bf_get(lpfc_fcf_record_mac_3, fcf_record), bf_get(lpfc_fcf_record_mac_4, fcf_record), bf_get(lpfc_fcf_record_mac_5, fcf_record), bf_get(lpfc_fcf_record_fab_name_0, fcf_record), bf_get(lpfc_fcf_record_fab_name_1, fcf_record), bf_get(lpfc_fcf_record_fab_name_2, fcf_record), bf_get(lpfc_fcf_record_fab_name_3, fcf_record), bf_get(lpfc_fcf_record_fab_name_4, fcf_record), bf_get(lpfc_fcf_record_fab_name_5, fcf_record), bf_get(lpfc_fcf_record_fab_name_6, fcf_record), bf_get(lpfc_fcf_record_fab_name_7, fcf_record), bf_get(lpfc_fcf_record_switch_name_0, fcf_record), bf_get(lpfc_fcf_record_switch_name_1, fcf_record), bf_get(lpfc_fcf_record_switch_name_2, fcf_record), bf_get(lpfc_fcf_record_switch_name_3, fcf_record), bf_get(lpfc_fcf_record_switch_name_4, fcf_record), bf_get(lpfc_fcf_record_switch_name_5, fcf_record), bf_get(lpfc_fcf_record_switch_name_6, fcf_record), bf_get(lpfc_fcf_record_switch_name_7, fcf_record), next_fcf_index); } /** lpfc_sli4_fcf_record_match - testing new FCF record for matching existing FCF * @phba: pointer to lpfc hba data structure. * @fcf_rec: pointer to an existing FCF record. * @new_fcf_record: pointer to a new FCF record. * @new_vlan_id: vlan id from the new FCF record. * * This function performs matching test of a new FCF record against an existing * FCF record. If the new_vlan_id passed in is LPFC_FCOE_IGNORE_VID, vlan id * will not be used as part of the FCF record matching criteria. * * Returns true if all the fields matching, otherwise returns false. */ static bool lpfc_sli4_fcf_record_match(struct lpfc_hba *phba, struct lpfc_fcf_rec *fcf_rec, struct fcf_record *new_fcf_record, uint16_t new_vlan_id) { if (new_vlan_id != LPFC_FCOE_IGNORE_VID) if (!lpfc_vlan_id_match(fcf_rec->vlan_id, new_vlan_id)) return false; if (!lpfc_mac_addr_match(fcf_rec->mac_addr, new_fcf_record)) return false; if (!lpfc_sw_name_match(fcf_rec->switch_name, new_fcf_record)) return false; if (!lpfc_fab_name_match(fcf_rec->fabric_name, new_fcf_record)) return false; if (fcf_rec->priority != new_fcf_record->fip_priority) return false; return true; } /** * lpfc_sli4_fcf_rr_next_proc - processing next roundrobin fcf * @vport: Pointer to vport object. * @fcf_index: index to next fcf. * * This function processing the roundrobin fcf failover to next fcf index. * When this function is invoked, there will be a current fcf registered * for flogi. * Return: 0 for continue retrying flogi on currently registered fcf; * 1 for stop flogi on currently registered fcf; */ int lpfc_sli4_fcf_rr_next_proc(struct lpfc_vport *vport, uint16_t fcf_index) { struct lpfc_hba *phba = vport->phba; int rc; if (fcf_index == LPFC_FCOE_FCF_NEXT_NONE) { spin_lock_irq(&phba->hbalock); if (phba->hba_flag & HBA_DEVLOSS_TMO) { spin_unlock_irq(&phba->hbalock); lpfc_printf_log(phba, KERN_INFO, LOG_FIP, "2872 Devloss tmo with no eligible " "FCF, unregister in-use FCF (x%x) " "and rescan FCF table\n", phba->fcf.current_rec.fcf_indx); lpfc_unregister_fcf_rescan(phba); goto stop_flogi_current_fcf; } /* Mark the end to FLOGI roundrobin failover */ phba->hba_flag &= ~FCF_RR_INPROG; /* Allow action to new fcf asynchronous event */ phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE); spin_unlock_irq(&phba->hbalock); lpfc_printf_log(phba, KERN_INFO, LOG_FIP, "2865 No FCF available, stop roundrobin FCF " "failover and change port state:x%x/x%x\n", phba->pport->port_state, LPFC_VPORT_UNKNOWN); phba->pport->port_state = LPFC_VPORT_UNKNOWN; goto stop_flogi_current_fcf; } else { lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_ELS, "2794 Try FLOGI roundrobin FCF failover to " "(x%x)\n", fcf_index); rc = lpfc_sli4_fcf_rr_read_fcf_rec(phba, fcf_index); if (rc) lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS, "2761 FLOGI roundrobin FCF failover " "failed (rc:x%x) to read FCF (x%x)\n", rc, phba->fcf.current_rec.fcf_indx); else goto stop_flogi_current_fcf; } return 0; stop_flogi_current_fcf: lpfc_can_disctmo(vport); return 1; } /** * lpfc_sli4_fcf_pri_list_del * @phba: pointer to lpfc hba data structure. * @fcf_index the index of the fcf record to delete * This routine checks the on list flag of the fcf_index to be deleted. * If it is one the list then it is removed from the list, and the flag * is cleared. This routine grab the hbalock before removing the fcf * record from the list. **/ static void lpfc_sli4_fcf_pri_list_del(struct lpfc_hba *phba, uint16_t fcf_index) { struct lpfc_fcf_pri *new_fcf_pri; new_fcf_pri = &phba->fcf.fcf_pri[fcf_index]; lpfc_printf_log(phba, KERN_INFO, LOG_FIP, "3058 deleting idx x%x pri x%x flg x%x\n", fcf_index, new_fcf_pri->fcf_rec.priority, new_fcf_pri->fcf_rec.flag); spin_lock_irq(&phba->hbalock); if (new_fcf_pri->fcf_rec.flag & LPFC_FCF_ON_PRI_LIST) { if (phba->fcf.current_rec.priority == new_fcf_pri->fcf_rec.priority) phba->fcf.eligible_fcf_cnt--; list_del_init(&new_fcf_pri->list); new_fcf_pri->fcf_rec.flag &= ~LPFC_FCF_ON_PRI_LIST; } spin_unlock_irq(&phba->hbalock); } /** * lpfc_sli4_set_fcf_flogi_fail * @phba: pointer to lpfc hba data structure. * @fcf_index the index of the fcf record to update * This routine acquires the hbalock and then set the LPFC_FCF_FLOGI_FAILED * flag so the the round robin slection for the particular priority level * will try a different fcf record that does not have this bit set. * If the fcf record is re-read for any reason this flag is cleared brfore * adding it to the priority list. **/ void lpfc_sli4_set_fcf_flogi_fail(struct lpfc_hba *phba, uint16_t fcf_index) { struct lpfc_fcf_pri *new_fcf_pri; new_fcf_pri = &phba->fcf.fcf_pri[fcf_index]; spin_lock_irq(&phba->hbalock); new_fcf_pri->fcf_rec.flag |= LPFC_FCF_FLOGI_FAILED; spin_unlock_irq(&phba->hbalock); } /** * lpfc_sli4_fcf_pri_list_add * @phba: pointer to lpfc hba data structure. * @fcf_index the index of the fcf record to add * This routine checks the priority of the fcf_index to be added. * If it is a lower priority than the current head of the fcf_pri list * then it is added to the list in the right order. * If it is the same priority as the current head of the list then it * is added to the head of the list and its bit in the rr_bmask is set. * If the fcf_index to be added is of a higher priority than the current * head of the list then the rr_bmask is cleared, its bit is set in the * rr_bmask and it is added to the head of the list. * returns: * 0=success 1=failure **/ int lpfc_sli4_fcf_pri_list_add(struct lpfc_hba *phba, uint16_t fcf_index, struct fcf_record *new_fcf_record) { uint16_t current_fcf_pri; uint16_t last_index; struct lpfc_fcf_pri *fcf_pri; struct lpfc_fcf_pri *next_fcf_pri; struct lpfc_fcf_pri *new_fcf_pri; int ret; new_fcf_pri = &phba->fcf.fcf_pri[fcf_index]; lpfc_printf_log(phba, KERN_INFO, LOG_FIP, "3059 adding idx x%x pri x%x flg x%x\n", fcf_index, new_fcf_record->fip_priority, new_fcf_pri->fcf_rec.flag); spin_lock_irq(&phba->hbalock); if (new_fcf_pri->fcf_rec.flag & LPFC_FCF_ON_PRI_LIST) list_del_init(&new_fcf_pri->list); new_fcf_pri->fcf_rec.fcf_index = fcf_index; new_fcf_pri->fcf_rec.priority = new_fcf_record->fip_priority; if (list_empty(&phba->fcf.fcf_pri_list)) { list_add(&new_fcf_pri->list, &phba->fcf.fcf_pri_list); ret = lpfc_sli4_fcf_rr_index_set(phba, new_fcf_pri->fcf_rec.fcf_index); goto out; } last_index = find_first_bit(phba->fcf.fcf_rr_bmask, LPFC_SLI4_FCF_TBL_INDX_MAX); if (last_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) { ret = 0; /* Empty rr list */ goto out; } current_fcf_pri = phba->fcf.fcf_pri[last_index].fcf_rec.priority; if (new_fcf_pri->fcf_rec.priority <= current_fcf_pri) { list_add(&new_fcf_pri->list, &phba->fcf.fcf_pri_list); if (new_fcf_pri->fcf_rec.priority < current_fcf_pri) { memset(phba->fcf.fcf_rr_bmask, 0, sizeof(*phba->fcf.fcf_rr_bmask)); /* fcfs_at_this_priority_level = 1; */ phba->fcf.eligible_fcf_cnt = 1; } else /* fcfs_at_this_priority_level++; */ phba->fcf.eligible_fcf_cnt++; ret = lpfc_sli4_fcf_rr_index_set(phba, new_fcf_pri->fcf_rec.fcf_index); goto out; } list_for_each_entry_safe(fcf_pri, next_fcf_pri, &phba->fcf.fcf_pri_list, list) { if (new_fcf_pri->fcf_rec.priority <= fcf_pri->fcf_rec.priority) { if (fcf_pri->list.prev == &phba->fcf.fcf_pri_list) list_add(&new_fcf_pri->list, &phba->fcf.fcf_pri_list); else list_add(&new_fcf_pri->list, &((struct lpfc_fcf_pri *) fcf_pri->list.prev)->list); ret = 0; goto out; } else if (fcf_pri->list.next == &phba->fcf.fcf_pri_list || new_fcf_pri->fcf_rec.priority < next_fcf_pri->fcf_rec.priority) { list_add(&new_fcf_pri->list, &fcf_pri->list); ret = 0; goto out; } if (new_fcf_pri->fcf_rec.priority > fcf_pri->fcf_rec.priority) continue; } ret = 1; out: /* we use = instead of |= to clear the FLOGI_FAILED flag. */ new_fcf_pri->fcf_rec.flag = LPFC_FCF_ON_PRI_LIST; spin_unlock_irq(&phba->hbalock); return ret; } /** * lpfc_mbx_cmpl_fcf_scan_read_fcf_rec - fcf scan read_fcf mbox cmpl handler. * @phba: pointer to lpfc hba data structure. * @mboxq: pointer to mailbox object. * * This function iterates through all the fcf records available in * HBA and chooses the optimal FCF record for discovery. After finding * the FCF for discovery it registers the FCF record and kicks start * discovery. * If FCF_IN_USE flag is set in currently used FCF, the routine tries to * use an FCF record which matches fabric name and mac address of the * currently used FCF record. * If the driver supports only one FCF, it will try to use the FCF record * used by BOOT_BIOS. */ void lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) { struct fcf_record *new_fcf_record; uint32_t boot_flag, addr_mode; uint16_t fcf_index, next_fcf_index; struct lpfc_fcf_rec *fcf_rec = NULL; uint16_t vlan_id; uint32_t seed; bool select_new_fcf; int rc; /* If there is pending FCoE event restart FCF table scan */ if (lpfc_check_pending_fcoe_event(phba, LPFC_SKIP_UNREG_FCF)) { lpfc_sli4_mbox_cmd_free(phba, mboxq); return; } /* Parse the FCF record from the non-embedded mailbox command */ new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq, &next_fcf_index); if (!new_fcf_record) { lpfc_printf_log(phba, KERN_ERR, LOG_FIP, "2765 Mailbox command READ_FCF_RECORD " "failed to retrieve a FCF record.\n"); /* Let next new FCF event trigger fast failover */ spin_lock_irq(&phba->hbalock); phba->hba_flag &= ~FCF_TS_INPROG; spin_unlock_irq(&phba->hbalock); lpfc_sli4_mbox_cmd_free(phba, mboxq); return; } /* Check the FCF record against the connection list */ rc = lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag, &addr_mode, &vlan_id); /* Log the FCF record information if turned on */ lpfc_sli4_log_fcf_record_info(phba, new_fcf_record, vlan_id, next_fcf_index); /* * If the fcf record does not match with connect list entries * read the next entry; otherwise, this is an eligible FCF * record for roundrobin FCF failover. */ if (!rc) { lpfc_sli4_fcf_pri_list_del(phba, bf_get(lpfc_fcf_record_fcf_index, new_fcf_record)); lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, "2781 FCF (x%x) failed connection " "list check: (x%x/x%x)\n", bf_get(lpfc_fcf_record_fcf_index, new_fcf_record), bf_get(lpfc_fcf_record_fcf_avail, new_fcf_record), bf_get(lpfc_fcf_record_fcf_valid, new_fcf_record)); if ((phba->fcf.fcf_flag & FCF_IN_USE) && lpfc_sli4_fcf_record_match(phba, &phba->fcf.current_rec, new_fcf_record, LPFC_FCOE_IGNORE_VID)) { if (bf_get(lpfc_fcf_record_fcf_index, new_fcf_record) != phba->fcf.current_rec.fcf_indx) { lpfc_printf_log(phba, KERN_ERR, LOG_FIP, "2862 FCF (x%x) matches property " "of in-use FCF (x%x)\n", bf_get(lpfc_fcf_record_fcf_index, new_fcf_record), phba->fcf.current_rec.fcf_indx); goto read_next_fcf; } /* * In case the current in-use FCF record becomes * invalid/unavailable during FCF discovery that * was not triggered by fast FCF failover process, * treat it as fast FCF failover. */ if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND) && !(phba->fcf.fcf_flag & FCF_REDISC_FOV)) { lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, "2835 Invalid in-use FCF " "(x%x), enter FCF failover " "table scan.\n", phba->fcf.current_rec.fcf_indx); spin_lock_irq(&phba->hbalock); phba->fcf.fcf_flag |= FCF_REDISC_FOV; spin_unlock_irq(&phba->hbalock); lpfc_sli4_mbox_cmd_free(phba, mboxq); lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST); return; } } goto read_next_fcf; } else { fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record); rc = lpfc_sli4_fcf_pri_list_add(phba, fcf_index, new_fcf_record); if (rc) goto read_next_fcf; } /* * If this is not the first FCF discovery of the HBA, use last * FCF record for the discovery. The condition that a rescan * matches the in-use FCF record: fabric name, switch name, mac * address, and vlan_id. */ spin_lock_irq(&phba->hbalock); if (phba->fcf.fcf_flag & FCF_IN_USE) { if (phba->cfg_fcf_failover_policy == LPFC_FCF_FOV && lpfc_sli4_fcf_record_match(phba, &phba->fcf.current_rec, new_fcf_record, vlan_id)) { if (bf_get(lpfc_fcf_record_fcf_index, new_fcf_record) == phba->fcf.current_rec.fcf_indx) { phba->fcf.fcf_flag |= FCF_AVAILABLE; if (phba->fcf.fcf_flag & FCF_REDISC_PEND) /* Stop FCF redisc wait timer */ __lpfc_sli4_stop_fcf_redisc_wait_timer( phba); else if (phba->fcf.fcf_flag & FCF_REDISC_FOV) /* Fast failover, mark completed */ phba->fcf.fcf_flag &= ~FCF_REDISC_FOV; spin_unlock_irq(&phba->hbalock); lpfc_printf_log(phba, KERN_INFO, LOG_FIP, "2836 New FCF matches in-use " "FCF (x%x)\n", phba->fcf.current_rec.fcf_indx); goto out; } else lpfc_printf_log(phba, KERN_ERR, LOG_FIP, "2863 New FCF (x%x) matches " "property of in-use FCF (x%x)\n", bf_get(lpfc_fcf_record_fcf_index, new_fcf_record), phba->fcf.current_rec.fcf_indx); } /* * Read next FCF record from HBA searching for the matching * with in-use record only if not during the fast failover * period. In case of fast failover period, it shall try to * determine whether the FCF record just read should be the * next candidate. */ if (!(phba->fcf.fcf_flag & FCF_REDISC_FOV)) { spin_unlock_irq(&phba->hbalock); goto read_next_fcf; } } /* * Update on failover FCF record only if it's in FCF fast-failover * period; otherwise, update on current FCF record. */ if (phba->fcf.fcf_flag & FCF_REDISC_FOV) fcf_rec = &phba->fcf.failover_rec; else fcf_rec = &phba->fcf.current_rec; if (phba->fcf.fcf_flag & FCF_AVAILABLE) { /* * If the driver FCF record does not have boot flag * set and new hba fcf record has boot flag set, use * the new hba fcf record. */ if (boot_flag && !(fcf_rec->flag & BOOT_ENABLE)) { /* Choose this FCF record */ lpfc_printf_log(phba, KERN_INFO, LOG_FIP, "2837 Update current FCF record " "(x%x) with new FCF record (x%x)\n", fcf_rec->fcf_indx, bf_get(lpfc_fcf_record_fcf_index, new_fcf_record)); __lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record, addr_mode, vlan_id, BOOT_ENABLE); spin_unlock_irq(&phba->hbalock); goto read_next_fcf; } /* * If the driver FCF record has boot flag set and the * new hba FCF record does not have boot flag, read * the next FCF record. */ if (!boot_flag && (fcf_rec->flag & BOOT_ENABLE)) { spin_unlock_irq(&phba->hbalock); goto read_next_fcf; } /* * If the new hba FCF record has lower priority value * than the driver FCF record, use the new record. */ if (new_fcf_record->fip_priority < fcf_rec->priority) { /* Choose the new FCF record with lower priority */ lpfc_printf_log(phba, KERN_INFO, LOG_FIP, "2838 Update current FCF record " "(x%x) with new FCF record (x%x)\n", fcf_rec->fcf_indx, bf_get(lpfc_fcf_record_fcf_index, new_fcf_record)); __lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record, addr_mode, vlan_id, 0); /* Reset running random FCF selection count */ phba->fcf.eligible_fcf_cnt = 1; } else if (new_fcf_record->fip_priority == fcf_rec->priority) { /* Update running random FCF selection count */ phba->fcf.eligible_fcf_cnt++; select_new_fcf = lpfc_sli4_new_fcf_random_select(phba, phba->fcf.eligible_fcf_cnt); if (select_new_fcf) { lpfc_printf_log(phba, KERN_INFO, LOG_FIP, "2839 Update current FCF record " "(x%x) with new FCF record (x%x)\n", fcf_rec->fcf_indx, bf_get(lpfc_fcf_record_fcf_index, new_fcf_record)); /* Choose the new FCF by random selection */ __lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record, addr_mode, vlan_id, 0); } } spin_unlock_irq(&phba->hbalock); goto read_next_fcf; } /* * This is the first suitable FCF record, choose this record for * initial best-fit FCF. */ if (fcf_rec) { lpfc_printf_log(phba, KERN_INFO, LOG_FIP, "2840 Update initial FCF candidate " "with FCF (x%x)\n", bf_get(lpfc_fcf_record_fcf_index, new_fcf_record)); __lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record, addr_mode, vlan_id, (boot_flag ? BOOT_ENABLE : 0)); phba->fcf.fcf_flag |= FCF_AVAILABLE; /* Setup initial running random FCF selection count */ phba->fcf.eligible_fcf_cnt = 1; /* Seeding the random number generator for random selection */ seed = (uint32_t)(0xFFFFFFFF & jiffies); srandom32(seed); } spin_unlock_irq(&phba->hbalock); goto read_next_fcf; read_next_fcf: lpfc_sli4_mbox_cmd_free(phba, mboxq); if (next_fcf_index == LPFC_FCOE_FCF_NEXT_NONE || next_fcf_index == 0) { if (phba->fcf.fcf_flag & FCF_REDISC_FOV) { /* * Case of FCF fast failover scan */ /* * It has not found any suitable FCF record, cancel * FCF scan inprogress, and do nothing */ if (!(phba->fcf.failover_rec.flag & RECORD_VALID)) { lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, "2782 No suitable FCF found: " "(x%x/x%x)\n", phba->fcoe_eventtag_at_fcf_scan, bf_get(lpfc_fcf_record_fcf_index, new_fcf_record)); spin_lock_irq(&phba->hbalock); if (phba->hba_flag & HBA_DEVLOSS_TMO) { phba->hba_flag &= ~FCF_TS_INPROG; spin_unlock_irq(&phba->hbalock); /* Unregister in-use FCF and rescan */ lpfc_printf_log(phba, KERN_INFO, LOG_FIP, "2864 On devloss tmo " "unreg in-use FCF and " "rescan FCF table\n"); lpfc_unregister_fcf_rescan(phba); return; } /* * Let next new FCF event trigger fast failover */ phba->hba_flag &= ~FCF_TS_INPROG; spin_unlock_irq(&phba->hbalock); return; } /* * It has found a suitable FCF record that is not * the same as in-use FCF record, unregister the * in-use FCF record, replace the in-use FCF record * with the new FCF record, mark FCF fast failover * completed, and then start register the new FCF * record. */ /* Unregister the current in-use FCF record */ lpfc_unregister_fcf(phba); /* Replace in-use record with the new record */ lpfc_printf_log(phba, KERN_INFO, LOG_FIP, "2842 Replace in-use FCF (x%x) " "with failover FCF (x%x)\n", phba->fcf.current_rec.fcf_indx, phba->fcf.failover_rec.fcf_indx); memcpy(&phba->fcf.current_rec, &phba->fcf.failover_rec, sizeof(struct lpfc_fcf_rec)); /* * Mark the fast FCF failover rediscovery completed * and the start of the first round of the roundrobin * FCF failover. */ spin_lock_irq(&phba->hbalock); phba->fcf.fcf_flag &= ~FCF_REDISC_FOV; spin_unlock_irq(&phba->hbalock); /* Register to the new FCF record */ lpfc_register_fcf(phba); } else { /* * In case of transaction period to fast FCF failover, * do nothing when search to the end of the FCF table. */ if ((phba->fcf.fcf_flag & FCF_REDISC_EVT) || (phba->fcf.fcf_flag & FCF_REDISC_PEND)) return; if (phba->cfg_fcf_failover_policy == LPFC_FCF_FOV && phba->fcf.fcf_flag & FCF_IN_USE) { /* * In case the current in-use FCF record no * longer existed during FCF discovery that * was not triggered by fast FCF failover * process, treat it as fast FCF failover. */ lpfc_printf_log(phba, KERN_INFO, LOG_FIP, "2841 In-use FCF record (x%x) " "not reported, entering fast " "FCF failover mode scanning.\n", phba->fcf.current_rec.fcf_indx); spin_lock_irq(&phba->hbalock); phba->fcf.fcf_flag |= FCF_REDISC_FOV; spin_unlock_irq(&phba->hbalock); lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST); return; } /* Register to the new FCF record */ lpfc_register_fcf(phba); } } else lpfc_sli4_fcf_scan_read_fcf_rec(phba, next_fcf_index); return; out: lpfc_sli4_mbox_cmd_free(phba, mboxq); lpfc_register_fcf(phba); return; } /** * lpfc_mbx_cmpl_fcf_rr_read_fcf_rec - fcf roundrobin read_fcf mbox cmpl hdler * @phba: pointer to lpfc hba data structure. * @mboxq: pointer to mailbox object. * * This is the callback function for FLOGI failure roundrobin FCF failover * read FCF record mailbox command from the eligible FCF record bmask for * performing the failover. If the FCF read back is not valid/available, it * fails through to retrying FLOGI to the currently registered FCF again. * Otherwise, if the FCF read back is valid and available, it will set the * newly read FCF record to the failover FCF record, unregister currently * registered FCF record, copy the failover FCF record to the current * FCF record, and then register the current FCF record before proceeding * to trying FLOGI on the new failover FCF. */ void lpfc_mbx_cmpl_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) { struct fcf_record *new_fcf_record; uint32_t boot_flag, addr_mode; uint16_t next_fcf_index, fcf_index; uint16_t current_fcf_index; uint16_t vlan_id; int rc; /* If link state is not up, stop the roundrobin failover process */ if (phba->link_state < LPFC_LINK_UP) { spin_lock_irq(&phba->hbalock); phba->fcf.fcf_flag &= ~FCF_DISCOVERY; phba->hba_flag &= ~FCF_RR_INPROG; spin_unlock_irq(&phba->hbalock); goto out; } /* Parse the FCF record from the non-embedded mailbox command */ new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq, &next_fcf_index); if (!new_fcf_record) { lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, "2766 Mailbox command READ_FCF_RECORD " "failed to retrieve a FCF record.\n"); goto error_out; } /* Get the needed parameters from FCF record */ rc = lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag, &addr_mode, &vlan_id); /* Log the FCF record information if turned on */ lpfc_sli4_log_fcf_record_info(phba, new_fcf_record, vlan_id, next_fcf_index); fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record); if (!rc) { lpfc_printf_log(phba, KERN_INFO, LOG_FIP, "2848 Remove ineligible FCF (x%x) from " "from roundrobin bmask\n", fcf_index); /* Clear roundrobin bmask bit for ineligible FCF */ lpfc_sli4_fcf_rr_index_clear(phba, fcf_index); /* Perform next round of roundrobin FCF failover */ fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba); rc = lpfc_sli4_fcf_rr_next_proc(phba->pport, fcf_index); if (rc) goto out; goto error_out; } if (fcf_index == phba->fcf.current_rec.fcf_indx) { lpfc_printf_log(phba, KERN_INFO, LOG_FIP, "2760 Perform FLOGI roundrobin FCF failover: " "FCF (x%x) back to FCF (x%x)\n", phba->fcf.current_rec.fcf_indx, fcf_index); /* Wait 500 ms before retrying FLOGI to current FCF */ msleep(500); lpfc_issue_init_vfi(phba->pport); goto out; } /* Upload new FCF record to the failover FCF record */ lpfc_printf_log(phba, KERN_INFO, LOG_FIP, "2834 Update current FCF (x%x) with new FCF (x%x)\n", phba->fcf.failover_rec.fcf_indx, fcf_index); spin_lock_irq(&phba->hbalock); __lpfc_update_fcf_record(phba, &phba->fcf.failover_rec, new_fcf_record, addr_mode, vlan_id, (boot_flag ? BOOT_ENABLE : 0)); spin_unlock_irq(&phba->hbalock); current_fcf_index = phba->fcf.current_rec.fcf_indx; /* Unregister the current in-use FCF record */ lpfc_unregister_fcf(phba); /* Replace in-use record with the new record */ memcpy(&phba->fcf.current_rec, &phba->fcf.failover_rec, sizeof(struct lpfc_fcf_rec)); lpfc_printf_log(phba, KERN_INFO, LOG_FIP, "2783 Perform FLOGI roundrobin FCF failover: FCF " "(x%x) to FCF (x%x)\n", current_fcf_index, fcf_index); error_out: lpfc_register_fcf(phba); out: lpfc_sli4_mbox_cmd_free(phba, mboxq); } /** * lpfc_mbx_cmpl_read_fcf_rec - read fcf completion handler. * @phba: pointer to lpfc hba data structure. * @mboxq: pointer to mailbox object. * * This is the callback function of read FCF record mailbox command for * updating the eligible FCF bmask for FLOGI failure roundrobin FCF * failover when a new FCF event happened. If the FCF read back is * valid/available and it passes the connection list check, it updates * the bmask for the eligible FCF record for roundrobin failover. */ void lpfc_mbx_cmpl_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) { struct fcf_record *new_fcf_record; uint32_t boot_flag, addr_mode; uint16_t fcf_index, next_fcf_index; uint16_t vlan_id; int rc; /* If link state is not up, no need to proceed */ if (phba->link_state < LPFC_LINK_UP) goto out; /* If FCF discovery period is over, no need to proceed */ if (!(phba->fcf.fcf_flag & FCF_DISCOVERY)) goto out; /* Parse the FCF record from the non-embedded mailbox command */ new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq, &next_fcf_index); if (!new_fcf_record) { lpfc_printf_log(phba, KERN_INFO, LOG_FIP, "2767 Mailbox command READ_FCF_RECORD " "failed to retrieve a FCF record.\n"); goto out; } /* Check the connection list for eligibility */ rc = lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag, &addr_mode, &vlan_id); /* Log the FCF record information if turned on */ lpfc_sli4_log_fcf_record_info(phba, new_fcf_record, vlan_id, next_fcf_index); if (!rc) goto out; /* Update the eligible FCF record index bmask */ fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record); rc = lpfc_sli4_fcf_pri_list_add(phba, fcf_index, new_fcf_record); out: lpfc_sli4_mbox_cmd_free(phba, mboxq); } /** * lpfc_init_vfi_cmpl - Completion handler for init_vfi mbox command. * @phba: pointer to lpfc hba data structure. * @mboxq: pointer to mailbox data structure. * * This function handles completion of init vfi mailbox command. */ void lpfc_init_vfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) { struct lpfc_vport *vport = mboxq->vport; /* * VFI not supported on interface type 0, just do the flogi * Also continue if the VFI is in use - just use the same one. */ if (mboxq->u.mb.mbxStatus && (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) != LPFC_SLI_INTF_IF_TYPE_0) && mboxq->u.mb.mbxStatus != MBX_VFI_IN_USE) { lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, "2891 Init VFI mailbox failed 0x%x\n", mboxq->u.mb.mbxStatus); mempool_free(mboxq, phba->mbox_mem_pool); lpfc_vport_set_state(vport, FC_VPORT_FAILED); return; } lpfc_initial_flogi(vport); mempool_free(mboxq, phba->mbox_mem_pool); return; } /** * lpfc_issue_init_vfi - Issue init_vfi mailbox command. * @vport: pointer to lpfc_vport data structure. * * This function issue a init_vfi mailbox command to initialize the VFI and * VPI for the physical port. */ void lpfc_issue_init_vfi(struct lpfc_vport *vport) { LPFC_MBOXQ_t *mboxq; int rc; struct lpfc_hba *phba = vport->phba; mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!mboxq) { lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, "2892 Failed to allocate " "init_vfi mailbox\n"); return; } lpfc_init_vfi(mboxq, vport); mboxq->mbox_cmpl = lpfc_init_vfi_cmpl; rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); if (rc == MBX_NOT_FINISHED) { lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, "2893 Failed to issue init_vfi mailbox\n"); mempool_free(mboxq, vport->phba->mbox_mem_pool); } } /** * lpfc_init_vpi_cmpl - Completion handler for init_vpi mbox command. * @phba: pointer to lpfc hba data structure. * @mboxq: pointer to mailbox data structure. * * This function handles completion of init vpi mailbox command. */ void lpfc_init_vpi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) { struct lpfc_vport *vport = mboxq->vport; struct lpfc_nodelist *ndlp; struct Scsi_Host *shost = lpfc_shost_from_vport(vport); if (mboxq->u.mb.mbxStatus) { lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, "2609 Init VPI mailbox failed 0x%x\n", mboxq->u.mb.mbxStatus); mempool_free(mboxq, phba->mbox_mem_pool); lpfc_vport_set_state(vport, FC_VPORT_FAILED); return; } spin_lock_irq(shost->host_lock); vport->fc_flag &= ~FC_VPORT_NEEDS_INIT_VPI; spin_unlock_irq(shost->host_lock); /* If this port is physical port or FDISC is done, do reg_vpi */ if ((phba->pport == vport) || (vport->port_state == LPFC_FDISC)) { ndlp = lpfc_findnode_did(vport, Fabric_DID); if (!ndlp) lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, "2731 Cannot find fabric " "controller node\n"); else lpfc_register_new_vport(phba, vport, ndlp); mempool_free(mboxq, phba->mbox_mem_pool); return; } if (phba->link_flag & LS_NPIV_FAB_SUPPORTED) lpfc_initial_fdisc(vport); else { lpfc_vport_set_state(vport, FC_VPORT_NO_FABRIC_SUPP); lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, "2606 No NPIV Fabric support\n"); } mempool_free(mboxq, phba->mbox_mem_pool); return; } /** * lpfc_issue_init_vpi - Issue init_vpi mailbox command. * @vport: pointer to lpfc_vport data structure. * * This function issue a init_vpi mailbox command to initialize * VPI for the vport. */ void lpfc_issue_init_vpi(struct lpfc_vport *vport) { LPFC_MBOXQ_t *mboxq; int rc; mboxq = mempool_alloc(vport->phba->mbox_mem_pool, GFP_KERNEL); if (!mboxq) { lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, "2607 Failed to allocate " "init_vpi mailbox\n"); return; } lpfc_init_vpi(vport->phba, mboxq, vport->vpi); mboxq->vport = vport; mboxq->mbox_cmpl = lpfc_init_vpi_cmpl; rc = lpfc_sli_issue_mbox(vport->phba, mboxq, MBX_NOWAIT); if (rc == MBX_NOT_FINISHED) { lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, "2608 Failed to issue init_vpi mailbox\n"); mempool_free(mboxq, vport->phba->mbox_mem_pool); } } /** * lpfc_start_fdiscs - send fdiscs for each vports on this port. * @phba: pointer to lpfc hba data structure. * * This function loops through the list of vports on the @phba and issues an * FDISC if possible. */ void lpfc_start_fdiscs(struct lpfc_hba *phba) { struct lpfc_vport **vports; int i; vports = lpfc_create_vport_work_array(phba); if (vports != NULL) { for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { if (vports[i]->port_type == LPFC_PHYSICAL_PORT) continue; /* There are no vpi for this vport */ if (vports[i]->vpi > phba->max_vpi) { lpfc_vport_set_state(vports[i], FC_VPORT_FAILED); continue; } if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { lpfc_vport_set_state(vports[i], FC_VPORT_LINKDOWN); continue; } if (vports[i]->fc_flag & FC_VPORT_NEEDS_INIT_VPI) { lpfc_issue_init_vpi(vports[i]); continue; } if (phba->link_flag & LS_NPIV_FAB_SUPPORTED) lpfc_initial_fdisc(vports[i]); else { lpfc_vport_set_state(vports[i], FC_VPORT_NO_FABRIC_SUPP); lpfc_printf_vlog(vports[i], KERN_ERR, LOG_ELS, "0259 No NPIV " "Fabric support\n"); } } } lpfc_destroy_vport_work_array(phba, vports); } void lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) { struct lpfc_dmabuf *dmabuf = mboxq->context1; struct lpfc_vport *vport = mboxq->vport; struct Scsi_Host *shost = lpfc_shost_from_vport(vport); /* * VFI not supported for interface type 0, so ignore any mailbox * error (except VFI in use) and continue with the discovery. */ if (mboxq->u.mb.mbxStatus && (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) != LPFC_SLI_INTF_IF_TYPE_0) && mboxq->u.mb.mbxStatus != MBX_VFI_IN_USE) { lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, "2018 REG_VFI mbxStatus error x%x " "HBA state x%x\n", mboxq->u.mb.mbxStatus, vport->port_state); if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { /* FLOGI failed, use loop map to make discovery list */ lpfc_disc_list_loopmap(vport); /* Start discovery */ lpfc_disc_start(vport); goto out_free_mem; } lpfc_vport_set_state(vport, FC_VPORT_FAILED); goto out_free_mem; } /* The VPI is implicitly registered when the VFI is registered */ spin_lock_irq(shost->host_lock); vport->vpi_state |= LPFC_VPI_REGISTERED; vport->fc_flag |= FC_VFI_REGISTERED; vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI; vport->fc_flag &= ~FC_VPORT_NEEDS_INIT_VPI; spin_unlock_irq(shost->host_lock); /* In case SLI4 FC loopback test, we are ready */ if ((phba->sli_rev == LPFC_SLI_REV4) && (phba->link_flag & LS_LOOPBACK_MODE)) { phba->link_state = LPFC_HBA_READY; goto out_free_mem; } if (vport->port_state == LPFC_FABRIC_CFG_LINK) { /* For private loop just start discovery and we are done. */ if ((phba->fc_topology == LPFC_TOPOLOGY_LOOP) && !(vport->fc_flag & FC_PUBLIC_LOOP)) { /* Use loop map to make discovery list */ lpfc_disc_list_loopmap(vport); /* Start discovery */ lpfc_disc_start(vport); } else { lpfc_start_fdiscs(phba); lpfc_do_scr_ns_plogi(phba, vport); } } out_free_mem: mempool_free(mboxq, phba->mbox_mem_pool); lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys); kfree(dmabuf); return; } static void lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) { MAILBOX_t *mb = &pmb->u.mb; struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) pmb->context1; struct lpfc_vport *vport = pmb->vport; /* Check for error */ if (mb->mbxStatus) { /* READ_SPARAM mbox error <mbxStatus> state <hba_state> */ lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, "0319 READ_SPARAM mbxStatus error x%x " "hba state x%x>\n", mb->mbxStatus, vport->port_state); lpfc_linkdown(phba); goto out; } memcpy((uint8_t *) &vport->fc_sparam, (uint8_t *) mp->virt, sizeof (struct serv_parm)); lpfc_update_vport_wwn(vport); if (vport->port_type == LPFC_PHYSICAL_PORT) { memcpy(&phba->wwnn, &vport->fc_nodename, sizeof(phba->wwnn)); memcpy(&phba->wwpn, &vport->fc_portname, sizeof(phba->wwnn)); } lpfc_mbuf_free(phba, mp->virt, mp->phys); kfree(mp); mempool_free(pmb, phba->mbox_mem_pool); return; out: pmb->context1 = NULL; lpfc_mbuf_free(phba, mp->virt, mp->phys); kfree(mp); lpfc_issue_clear_la(phba, vport); mempool_free(pmb, phba->mbox_mem_pool); return; } static void lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la) { struct lpfc_vport *vport = phba->pport; LPFC_MBOXQ_t *sparam_mbox, *cfglink_mbox = NULL; struct Scsi_Host *shost; int i; struct lpfc_dmabuf *mp; int rc; struct fcf_record *fcf_record; spin_lock_irq(&phba->hbalock); switch (bf_get(lpfc_mbx_read_top_link_spd, la)) { case LPFC_LINK_SPEED_1GHZ: case LPFC_LINK_SPEED_2GHZ: case LPFC_LINK_SPEED_4GHZ: case LPFC_LINK_SPEED_8GHZ: case LPFC_LINK_SPEED_10GHZ: case LPFC_LINK_SPEED_16GHZ: phba->fc_linkspeed = bf_get(lpfc_mbx_read_top_link_spd, la); break; default: phba->fc_linkspeed = LPFC_LINK_SPEED_UNKNOWN; break; } phba->fc_topology = bf_get(lpfc_mbx_read_top_topology, la); phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED; shost = lpfc_shost_from_vport(vport); if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED; /* if npiv is enabled and this adapter supports npiv log * a message that npiv is not supported in this topology */ if (phba->cfg_enable_npiv && phba->max_vpi) lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, "1309 Link Up Event npiv not supported in loop " "topology\n"); /* Get Loop Map information */ if (bf_get(lpfc_mbx_read_top_il, la)) { spin_lock(shost->host_lock); vport->fc_flag |= FC_LBIT; spin_unlock(shost->host_lock); } vport->fc_myDID = bf_get(lpfc_mbx_read_top_alpa_granted, la); i = la->lilpBde64.tus.f.bdeSize; if (i == 0) { phba->alpa_map[0] = 0; } else { if (vport->cfg_log_verbose & LOG_LINK_EVENT) { int numalpa, j, k; union { uint8_t pamap[16]; struct { uint32_t wd1; uint32_t wd2; uint32_t wd3; uint32_t wd4; } pa; } un; numalpa = phba->alpa_map[0]; j = 0; while (j < numalpa) { memset(un.pamap, 0, 16); for (k = 1; j < numalpa; k++) { un.pamap[k - 1] = phba->alpa_map[j + 1]; j++; if (k == 16) break; } /* Link Up Event ALPA map */ lpfc_printf_log(phba, KERN_WARNING, LOG_LINK_EVENT, "1304 Link Up Event " "ALPA map Data: x%x " "x%x x%x x%x\n", un.pa.wd1, un.pa.wd2, un.pa.wd3, un.pa.wd4); } } } } else { if (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)) { if (phba->max_vpi && phba->cfg_enable_npiv && (phba->sli_rev >= LPFC_SLI_REV3)) phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED; } vport->fc_myDID = phba->fc_pref_DID; spin_lock(shost->host_lock); vport->fc_flag |= FC_LBIT; spin_unlock(shost->host_lock); } spin_unlock_irq(&phba->hbalock); lpfc_linkup(phba); sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!sparam_mbox) goto out; rc = lpfc_read_sparam(phba, sparam_mbox, 0); if (rc) { mempool_free(sparam_mbox, phba->mbox_mem_pool); goto out; } sparam_mbox->vport = vport; sparam_mbox->mbox_cmpl = lpfc_mbx_cmpl_read_sparam; rc = lpfc_sli_issue_mbox(phba, sparam_mbox, MBX_NOWAIT); if (rc == MBX_NOT_FINISHED) { mp = (struct lpfc_dmabuf *) sparam_mbox->context1; lpfc_mbuf_free(phba, mp->virt, mp->phys); kfree(mp); mempool_free(sparam_mbox, phba->mbox_mem_pool); goto out; } if (!(phba->hba_flag & HBA_FCOE_MODE)) { cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!cfglink_mbox) goto out; vport->port_state = LPFC_LOCAL_CFG_LINK; lpfc_config_link(phba, cfglink_mbox); cfglink_mbox->vport = vport; cfglink_mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link; rc = lpfc_sli_issue_mbox(phba, cfglink_mbox, MBX_NOWAIT); if (rc == MBX_NOT_FINISHED) { mempool_free(cfglink_mbox, phba->mbox_mem_pool); goto out; } } else { vport->port_state = LPFC_VPORT_UNKNOWN; /* * Add the driver's default FCF record at FCF index 0 now. This * is phase 1 implementation that support FCF index 0 and driver * defaults. */ if (!(phba->hba_flag & HBA_FIP_SUPPORT)) { fcf_record = kzalloc(sizeof(struct fcf_record), GFP_KERNEL); if (unlikely(!fcf_record)) { lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, "2554 Could not allocate memory for " "fcf record\n"); rc = -ENODEV; goto out; } lpfc_sli4_build_dflt_fcf_record(phba, fcf_record, LPFC_FCOE_FCF_DEF_INDEX); rc = lpfc_sli4_add_fcf_record(phba, fcf_record); if (unlikely(rc)) { lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, "2013 Could not manually add FCF " "record 0, status %d\n", rc); rc = -ENODEV; kfree(fcf_record); goto out; } kfree(fcf_record); } /* * The driver is expected to do FIP/FCF. Call the port * and get the FCF Table. */ spin_lock_irq(&phba->hbalock); if (phba->hba_flag & FCF_TS_INPROG) { spin_unlock_irq(&phba->hbalock); return; } /* This is the initial FCF discovery scan */ phba->fcf.fcf_flag |= FCF_INIT_DISC; spin_unlock_irq(&phba->hbalock); lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, "2778 Start FCF table scan at linkup\n"); rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST); if (rc) { spin_lock_irq(&phba->hbalock); phba->fcf.fcf_flag &= ~FCF_INIT_DISC; spin_unlock_irq(&phba->hbalock); goto out; } /* Reset FCF roundrobin bmask for new discovery */ lpfc_sli4_clear_fcf_rr_bmask(phba); } return; out: lpfc_vport_set_state(vport, FC_VPORT_FAILED); lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, "0263 Discovery Mailbox error: state: 0x%x : %p %p\n", vport->port_state, sparam_mbox, cfglink_mbox); lpfc_issue_clear_la(phba, vport); return; } static void lpfc_enable_la(struct lpfc_hba *phba) { uint32_t control; struct lpfc_sli *psli = &phba->sli; spin_lock_irq(&phba->hbalock); psli->sli_flag |= LPFC_PROCESS_LA; if (phba->sli_rev <= LPFC_SLI_REV3) { control = readl(phba->HCregaddr); control |= HC_LAINT_ENA; writel(control, phba->HCregaddr); readl(phba->HCregaddr); /* flush */ } spin_unlock_irq(&phba->hbalock); } static void lpfc_mbx_issue_link_down(struct lpfc_hba *phba) { lpfc_linkdown(phba); lpfc_enable_la(phba); lpfc_unregister_unused_fcf(phba); /* turn on Link Attention interrupts - no CLEAR_LA needed */ } /* * This routine handles processing a READ_TOPOLOGY mailbox * command upon completion. It is setup in the LPFC_MBOXQ * as the completion routine when the command is * handed off to the SLI layer. */ void lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) { struct lpfc_vport *vport = pmb->vport; struct Scsi_Host *shost = lpfc_shost_from_vport(vport); struct lpfc_mbx_read_top *la; MAILBOX_t *mb = &pmb->u.mb; struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1); /* Unblock ELS traffic */ phba->sli.ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT; /* Check for error */ if (mb->mbxStatus) { lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, "1307 READ_LA mbox error x%x state x%x\n", mb->mbxStatus, vport->port_state); lpfc_mbx_issue_link_down(phba); phba->link_state = LPFC_HBA_ERROR; goto lpfc_mbx_cmpl_read_topology_free_mbuf; } la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop; memcpy(&phba->alpa_map[0], mp->virt, 128); spin_lock_irq(shost->host_lock); if (bf_get(lpfc_mbx_read_top_pb, la)) vport->fc_flag |= FC_BYPASSED_MODE; else vport->fc_flag &= ~FC_BYPASSED_MODE; spin_unlock_irq(shost->host_lock); if ((phba->fc_eventTag < la->eventTag) || (phba->fc_eventTag == la->eventTag)) { phba->fc_stat.LinkMultiEvent++; if (bf_get(lpfc_mbx_read_top_att_type, la) == LPFC_ATT_LINK_UP) if (phba->fc_eventTag != 0) lpfc_linkdown(phba); } phba->fc_eventTag = la->eventTag; spin_lock_irq(&phba->hbalock); if (bf_get(lpfc_mbx_read_top_mm, la)) phba->sli.sli_flag |= LPFC_MENLO_MAINT; else phba->sli.sli_flag &= ~LPFC_MENLO_MAINT; spin_unlock_irq(&phba->hbalock); phba->link_events++; if ((bf_get(lpfc_mbx_read_top_att_type, la) == LPFC_ATT_LINK_UP) && (!bf_get(lpfc_mbx_read_top_mm, la))) { phba->fc_stat.LinkUp++; if (phba->link_flag & LS_LOOPBACK_MODE) { lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, "1306 Link Up Event in loop back mode " "x%x received Data: x%x x%x x%x x%x\n", la->eventTag, phba->fc_eventTag, bf_get(lpfc_mbx_read_top_alpa_granted, la), bf_get(lpfc_mbx_read_top_link_spd, la), phba->alpa_map[0]); } else { lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, "1303 Link Up Event x%x received " "Data: x%x x%x x%x x%x x%x x%x %d\n", la->eventTag, phba->fc_eventTag, bf_get(lpfc_mbx_read_top_alpa_granted, la), bf_get(lpfc_mbx_read_top_link_spd, la), phba->alpa_map[0], bf_get(lpfc_mbx_read_top_mm, la), bf_get(lpfc_mbx_read_top_fa, la), phba->wait_4_mlo_maint_flg); } lpfc_mbx_process_link_up(phba, la); } else if (bf_get(lpfc_mbx_read_top_att_type, la) == LPFC_ATT_LINK_DOWN) { phba->fc_stat.LinkDown++; if (phba->link_flag & LS_LOOPBACK_MODE) lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, "1308 Link Down Event in loop back mode " "x%x received " "Data: x%x x%x x%x\n", la->eventTag, phba->fc_eventTag, phba->pport->port_state, vport->fc_flag); else lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, "1305 Link Down Event x%x received " "Data: x%x x%x x%x x%x x%x\n", la->eventTag, phba->fc_eventTag, phba->pport->port_state, vport->fc_flag, bf_get(lpfc_mbx_read_top_mm, la), bf_get(lpfc_mbx_read_top_fa, la)); lpfc_mbx_issue_link_down(phba); } if ((bf_get(lpfc_mbx_read_top_mm, la)) && (bf_get(lpfc_mbx_read_top_att_type, la) == LPFC_ATT_LINK_UP)) { if (phba->link_state != LPFC_LINK_DOWN) { phba->fc_stat.LinkDown++; lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, "1312 Link Down Event x%x received " "Data: x%x x%x x%x\n", la->eventTag, phba->fc_eventTag, phba->pport->port_state, vport->fc_flag); lpfc_mbx_issue_link_down(phba); } else lpfc_enable_la(phba); lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, "1310 Menlo Maint Mode Link up Event x%x rcvd " "Data: x%x x%x x%x\n", la->eventTag, phba->fc_eventTag, phba->pport->port_state, vport->fc_flag); /* * The cmnd that triggered this will be waiting for this * signal. */ /* WAKEUP for MENLO_SET_MODE or MENLO_RESET command. */ if (phba->wait_4_mlo_maint_flg) { phba->wait_4_mlo_maint_flg = 0; wake_up_interruptible(&phba->wait_4_mlo_m_q); } } if (bf_get(lpfc_mbx_read_top_fa, la)) { if (bf_get(lpfc_mbx_read_top_mm, la)) lpfc_issue_clear_la(phba, vport); lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, "1311 fa %d\n", bf_get(lpfc_mbx_read_top_fa, la)); } lpfc_mbx_cmpl_read_topology_free_mbuf: lpfc_mbuf_free(phba, mp->virt, mp->phys); kfree(mp); mempool_free(pmb, phba->mbox_mem_pool); return; } /* * This routine handles processing a REG_LOGIN mailbox * command upon completion. It is setup in the LPFC_MBOXQ * as the completion routine when the command is * handed off to the SLI layer. */ void lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) { struct lpfc_vport *vport = pmb->vport; struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1); struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2; struct Scsi_Host *shost = lpfc_shost_from_vport(vport); pmb->context1 = NULL; pmb->context2 = NULL; if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND) ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND; if (ndlp->nlp_flag & NLP_IGNR_REG_CMPL || ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE) { /* We rcvd a rscn after issuing this * mbox reg login, we may have cycled * back through the state and be * back at reg login state so this * mbox needs to be ignored becase * there is another reg login in * process. */ spin_lock_irq(shost->host_lock); ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL; spin_unlock_irq(shost->host_lock); } else /* Good status, call state machine */ lpfc_disc_state_machine(vport, ndlp, pmb, NLP_EVT_CMPL_REG_LOGIN); lpfc_mbuf_free(phba, mp->virt, mp->phys); kfree(mp); mempool_free(pmb, phba->mbox_mem_pool); /* decrement the node reference count held for this callback * function. */ lpfc_nlp_put(ndlp); return; } static void lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) { MAILBOX_t *mb = &pmb->u.mb; struct lpfc_vport *vport = pmb->vport; struct Scsi_Host *shost = lpfc_shost_from_vport(vport); switch (mb->mbxStatus) { case 0x0011: case 0x0020: lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, "0911 cmpl_unreg_vpi, mb status = 0x%x\n", mb->mbxStatus); break; /* If VPI is busy, reset the HBA */ case 0x9700: lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE, "2798 Unreg_vpi failed vpi 0x%x, mb status = 0x%x\n", vport->vpi, mb->mbxStatus); if (!(phba->pport->load_flag & FC_UNLOADING)) lpfc_workq_post_event(phba, NULL, NULL, LPFC_EVT_RESET_HBA); } spin_lock_irq(shost->host_lock); vport->vpi_state &= ~LPFC_VPI_REGISTERED; vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; spin_unlock_irq(shost->host_lock); vport->unreg_vpi_cmpl = VPORT_OK; mempool_free(pmb, phba->mbox_mem_pool); lpfc_cleanup_vports_rrqs(vport, NULL); /* * This shost reference might have been taken at the beginning of * lpfc_vport_delete() */ if ((vport->load_flag & FC_UNLOADING) && (vport != phba->pport)) scsi_host_put(shost); } int lpfc_mbx_unreg_vpi(struct lpfc_vport *vport) { struct lpfc_hba *phba = vport->phba; LPFC_MBOXQ_t *mbox; int rc; mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!mbox) return 1; lpfc_unreg_vpi(phba, vport->vpi, mbox); mbox->vport = vport; mbox->mbox_cmpl = lpfc_mbx_cmpl_unreg_vpi; rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); if (rc == MBX_NOT_FINISHED) { lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX | LOG_VPORT, "1800 Could not issue unreg_vpi\n"); mempool_free(mbox, phba->mbox_mem_pool); vport->unreg_vpi_cmpl = VPORT_ERROR; return rc; } return 0; } static void lpfc_mbx_cmpl_reg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) { struct lpfc_vport *vport = pmb->vport; struct Scsi_Host *shost = lpfc_shost_from_vport(vport); MAILBOX_t *mb = &pmb->u.mb; switch (mb->mbxStatus) { case 0x0011: case 0x9601: case 0x9602: lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, "0912 cmpl_reg_vpi, mb status = 0x%x\n", mb->mbxStatus); lpfc_vport_set_state(vport, FC_VPORT_FAILED); spin_lock_irq(shost->host_lock); vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); spin_unlock_irq(shost->host_lock); vport->fc_myDID = 0; goto out; } spin_lock_irq(shost->host_lock); vport->vpi_state |= LPFC_VPI_REGISTERED; vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI; spin_unlock_irq(shost->host_lock); vport->num_disc_nodes = 0; /* go thru NPR list and issue ELS PLOGIs */ if (vport->fc_npr_cnt) lpfc_els_disc_plogi(vport); if (!vport->num_disc_nodes) { spin_lock_irq(shost->host_lock); vport->fc_flag &= ~FC_NDISC_ACTIVE; spin_unlock_irq(shost->host_lock); lpfc_can_disctmo(vport); } vport->port_state = LPFC_VPORT_READY; out: mempool_free(pmb, phba->mbox_mem_pool); return; } /** * lpfc_create_static_vport - Read HBA config region to create static vports. * @phba: pointer to lpfc hba data structure. * * This routine issue a DUMP mailbox command for config region 22 to get * the list of static vports to be created. The function create vports * based on the information returned from the HBA. **/ void lpfc_create_static_vport(struct lpfc_hba *phba) { LPFC_MBOXQ_t *pmb = NULL; MAILBOX_t *mb; struct static_vport_info *vport_info; int rc = 0, i; struct fc_vport_identifiers vport_id; struct fc_vport *new_fc_vport; struct Scsi_Host *shost; struct lpfc_vport *vport; uint16_t offset = 0; uint8_t *vport_buff; struct lpfc_dmabuf *mp; uint32_t byte_count = 0; pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!pmb) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0542 lpfc_create_static_vport failed to" " allocate mailbox memory\n"); return; } mb = &pmb->u.mb; vport_info = kzalloc(sizeof(struct static_vport_info), GFP_KERNEL); if (!vport_info) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0543 lpfc_create_static_vport failed to" " allocate vport_info\n"); mempool_free(pmb, phba->mbox_mem_pool); return; } vport_buff = (uint8_t *) vport_info; do { if (lpfc_dump_static_vport(phba, pmb, offset)) goto out; pmb->vport = phba->pport; rc = lpfc_sli_issue_mbox_wait(phba, pmb, LPFC_MBOX_TMO); if ((rc != MBX_SUCCESS) || mb->mbxStatus) { lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, "0544 lpfc_create_static_vport failed to" " issue dump mailbox command ret 0x%x " "status 0x%x\n", rc, mb->mbxStatus); goto out; } if (phba->sli_rev == LPFC_SLI_REV4) { byte_count = pmb->u.mqe.un.mb_words[5]; mp = (struct lpfc_dmabuf *) pmb->context2; if (byte_count > sizeof(struct static_vport_info) - offset) byte_count = sizeof(struct static_vport_info) - offset; memcpy(vport_buff + offset, mp->virt, byte_count); offset += byte_count; } else { if (mb->un.varDmp.word_cnt > sizeof(struct static_vport_info) - offset) mb->un.varDmp.word_cnt = sizeof(struct static_vport_info) - offset; byte_count = mb->un.varDmp.word_cnt; lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET, vport_buff + offset, byte_count); offset += byte_count; } } while (byte_count && offset < sizeof(struct static_vport_info)); if ((le32_to_cpu(vport_info->signature) != VPORT_INFO_SIG) || ((le32_to_cpu(vport_info->rev) & VPORT_INFO_REV_MASK) != VPORT_INFO_REV)) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0545 lpfc_create_static_vport bad" " information header 0x%x 0x%x\n", le32_to_cpu(vport_info->signature), le32_to_cpu(vport_info->rev) & VPORT_INFO_REV_MASK); goto out; } shost = lpfc_shost_from_vport(phba->pport); for (i = 0; i < MAX_STATIC_VPORT_COUNT; i++) { memset(&vport_id, 0, sizeof(vport_id)); vport_id.port_name = wwn_to_u64(vport_info->vport_list[i].wwpn); vport_id.node_name = wwn_to_u64(vport_info->vport_list[i].wwnn); if (!vport_id.port_name || !vport_id.node_name) continue; vport_id.roles = FC_PORT_ROLE_FCP_INITIATOR; vport_id.vport_type = FC_PORTTYPE_NPIV; vport_id.disable = false; new_fc_vport = fc_vport_create(shost, 0, &vport_id); if (!new_fc_vport) { lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, "0546 lpfc_create_static_vport failed to" " create vport\n"); continue; } vport = *(struct lpfc_vport **)new_fc_vport->dd_data; vport->vport_flag |= STATIC_VPORT; } out: kfree(vport_info); if (rc != MBX_TIMEOUT) { if (pmb->context2) { mp = (struct lpfc_dmabuf *) pmb->context2; lpfc_mbuf_free(phba, mp->virt, mp->phys); kfree(mp); } mempool_free(pmb, phba->mbox_mem_pool); } return; } /* * This routine handles processing a Fabric REG_LOGIN mailbox * command upon completion. It is setup in the LPFC_MBOXQ * as the completion routine when the command is * handed off to the SLI layer. */ void lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) { struct lpfc_vport *vport = pmb->vport; MAILBOX_t *mb = &pmb->u.mb; struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1); struct lpfc_nodelist *ndlp; struct Scsi_Host *shost; ndlp = (struct lpfc_nodelist *) pmb->context2; pmb->context1 = NULL; pmb->context2 = NULL; if (mb->mbxStatus) { lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, "0258 Register Fabric login error: 0x%x\n", mb->mbxStatus); lpfc_mbuf_free(phba, mp->virt, mp->phys); kfree(mp); mempool_free(pmb, phba->mbox_mem_pool); if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { /* FLOGI failed, use loop map to make discovery list */ lpfc_disc_list_loopmap(vport); /* Start discovery */ lpfc_disc_start(vport); /* Decrement the reference count to ndlp after the * reference to the ndlp are done. */ lpfc_nlp_put(ndlp); return; } lpfc_vport_set_state(vport, FC_VPORT_FAILED); /* Decrement the reference count to ndlp after the reference * to the ndlp are done. */ lpfc_nlp_put(ndlp); return; } if (phba->sli_rev < LPFC_SLI_REV4) ndlp->nlp_rpi = mb->un.varWords[0]; ndlp->nlp_flag |= NLP_RPI_REGISTERED; ndlp->nlp_type |= NLP_FABRIC; lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); if (vport->port_state == LPFC_FABRIC_CFG_LINK) { /* when physical port receive logo donot start * vport discovery */ if (!(vport->fc_flag & FC_LOGO_RCVD_DID_CHNG)) lpfc_start_fdiscs(phba); else { shost = lpfc_shost_from_vport(vport); spin_lock_irq(shost->host_lock); vport->fc_flag &= ~FC_LOGO_RCVD_DID_CHNG ; spin_unlock_irq(shost->host_lock); } lpfc_do_scr_ns_plogi(phba, vport); } lpfc_mbuf_free(phba, mp->virt, mp->phys); kfree(mp); mempool_free(pmb, phba->mbox_mem_pool); /* Drop the reference count from the mbox at the end after * all the current reference to the ndlp have been done. */ lpfc_nlp_put(ndlp); return; } /* * This routine handles processing a NameServer REG_LOGIN mailbox * command upon completion. It is setup in the LPFC_MBOXQ * as the completion routine when the command is * handed off to the SLI layer. */ void lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) { MAILBOX_t *mb = &pmb->u.mb; struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1); struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2; struct lpfc_vport *vport = pmb->vport; pmb->context1 = NULL; pmb->context2 = NULL; if (mb->mbxStatus) { out: lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, "0260 Register NameServer error: 0x%x\n", mb->mbxStatus); /* decrement the node reference count held for this * callback function. */ lpfc_nlp_put(ndlp); lpfc_mbuf_free(phba, mp->virt, mp->phys); kfree(mp); mempool_free(pmb, phba->mbox_mem_pool); /* If no other thread is using the ndlp, free it */ lpfc_nlp_not_used(ndlp); if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { /* * RegLogin failed, use loop map to make discovery * list */ lpfc_disc_list_loopmap(vport); /* Start discovery */ lpfc_disc_start(vport); return; } lpfc_vport_set_state(vport, FC_VPORT_FAILED); return; } if (phba->sli_rev < LPFC_SLI_REV4) ndlp->nlp_rpi = mb->un.varWords[0]; ndlp->nlp_flag |= NLP_RPI_REGISTERED; ndlp->nlp_type |= NLP_FABRIC; lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); if (vport->port_state < LPFC_VPORT_READY) { /* Link up discovery requires Fabric registration. */ lpfc_ns_cmd(vport, SLI_CTNS_RFF_ID, 0, 0); /* Do this first! */ lpfc_ns_cmd(vport, SLI_CTNS_RNN_ID, 0, 0); lpfc_ns_cmd(vport, SLI_CTNS_RSNN_NN, 0, 0); lpfc_ns_cmd(vport, SLI_CTNS_RSPN_ID, 0, 0); lpfc_ns_cmd(vport, SLI_CTNS_RFT_ID, 0, 0); /* Issue SCR just before NameServer GID_FT Query */ lpfc_issue_els_scr(vport, SCR_DID, 0); } vport->fc_ns_retry = 0; /* Good status, issue CT Request to NameServer */ if (lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 0, 0)) { /* Cannot issue NameServer Query, so finish up discovery */ goto out; } /* decrement the node reference count held for this * callback function. */ lpfc_nlp_put(ndlp); lpfc_mbuf_free(phba, mp->virt, mp->phys); kfree(mp); mempool_free(pmb, phba->mbox_mem_pool); return; } static void lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); struct fc_rport *rport; struct lpfc_rport_data *rdata; struct fc_rport_identifiers rport_ids; struct lpfc_hba *phba = vport->phba; /* Remote port has reappeared. Re-register w/ FC transport */ rport_ids.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn); rport_ids.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn); rport_ids.port_id = ndlp->nlp_DID; rport_ids.roles = FC_RPORT_ROLE_UNKNOWN; /* * We leave our node pointer in rport->dd_data when we unregister a * FCP target port. But fc_remote_port_add zeros the space to which * rport->dd_data points. So, if we're reusing a previously * registered port, drop the reference that we took the last time we * registered the port. */ if (ndlp->rport && ndlp->rport->dd_data && ((struct lpfc_rport_data *) ndlp->rport->dd_data)->pnode == ndlp) lpfc_nlp_put(ndlp); lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT, "rport add: did:x%x flg:x%x type x%x", ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type); /* Don't add the remote port if unloading. */ if (vport->load_flag & FC_UNLOADING) return; ndlp->rport = rport = fc_remote_port_add(shost, 0, &rport_ids); if (!rport || !get_device(&rport->dev)) { dev_printk(KERN_WARNING, &phba->pcidev->dev, "Warning: fc_remote_port_add failed\n"); return; } /* initialize static port data */ rport->maxframe_size = ndlp->nlp_maxframe; rport->supported_classes = ndlp->nlp_class_sup; rdata = rport->dd_data; rdata->pnode = lpfc_nlp_get(ndlp); if (ndlp->nlp_type & NLP_FCP_TARGET) rport_ids.roles |= FC_RPORT_ROLE_FCP_TARGET; if (ndlp->nlp_type & NLP_FCP_INITIATOR) rport_ids.roles |= FC_RPORT_ROLE_FCP_INITIATOR; if (rport_ids.roles != FC_RPORT_ROLE_UNKNOWN) fc_remote_port_rolechg(rport, rport_ids.roles); if ((rport->scsi_target_id != -1) && (rport->scsi_target_id < LPFC_MAX_TARGET)) { ndlp->nlp_sid = rport->scsi_target_id; } return; } static void lpfc_unregister_remote_port(struct lpfc_nodelist *ndlp) { struct fc_rport *rport = ndlp->rport; lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_RPORT, "rport delete: did:x%x flg:x%x type x%x", ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type); fc_remote_port_delete(rport); return; } static void lpfc_nlp_counters(struct lpfc_vport *vport, int state, int count) { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); spin_lock_irq(shost->host_lock); switch (state) { case NLP_STE_UNUSED_NODE: vport->fc_unused_cnt += count; break; case NLP_STE_PLOGI_ISSUE: vport->fc_plogi_cnt += count; break; case NLP_STE_ADISC_ISSUE: vport->fc_adisc_cnt += count; break; case NLP_STE_REG_LOGIN_ISSUE: vport->fc_reglogin_cnt += count; break; case NLP_STE_PRLI_ISSUE: vport->fc_prli_cnt += count; break; case NLP_STE_UNMAPPED_NODE: vport->fc_unmap_cnt += count; break; case NLP_STE_MAPPED_NODE: vport->fc_map_cnt += count; break; case NLP_STE_NPR_NODE: vport->fc_npr_cnt += count; break; } spin_unlock_irq(shost->host_lock); } static void lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, int old_state, int new_state) { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); if (new_state == NLP_STE_UNMAPPED_NODE) { ndlp->nlp_flag &= ~NLP_NODEV_REMOVE; ndlp->nlp_type |= NLP_FC_NODE; } if (new_state == NLP_STE_MAPPED_NODE) ndlp->nlp_flag &= ~NLP_NODEV_REMOVE; if (new_state == NLP_STE_NPR_NODE) ndlp->nlp_flag &= ~NLP_RCV_PLOGI; /* Transport interface */ if (ndlp->rport && (old_state == NLP_STE_MAPPED_NODE || old_state == NLP_STE_UNMAPPED_NODE)) { vport->phba->nport_event_cnt++; lpfc_unregister_remote_port(ndlp); } if (new_state == NLP_STE_MAPPED_NODE || new_state == NLP_STE_UNMAPPED_NODE) { vport->phba->nport_event_cnt++; /* * Tell the fc transport about the port, if we haven't * already. If we have, and it's a scsi entity, be * sure to unblock any attached scsi devices */ lpfc_register_remote_port(vport, ndlp); } if ((new_state == NLP_STE_MAPPED_NODE) && (vport->stat_data_enabled)) { /* * A new target is discovered, if there is no buffer for * statistical data collection allocate buffer. */ ndlp->lat_data = kcalloc(LPFC_MAX_BUCKET_COUNT, sizeof(struct lpfc_scsicmd_bkt), GFP_KERNEL); if (!ndlp->lat_data) lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE, "0286 lpfc_nlp_state_cleanup failed to " "allocate statistical data buffer DID " "0x%x\n", ndlp->nlp_DID); } /* * if we added to Mapped list, but the remote port * registration failed or assigned a target id outside * our presentable range - move the node to the * Unmapped List */ if (new_state == NLP_STE_MAPPED_NODE && (!ndlp->rport || ndlp->rport->scsi_target_id == -1 || ndlp->rport->scsi_target_id >= LPFC_MAX_TARGET)) { spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_TGT_NO_SCSIID; spin_unlock_irq(shost->host_lock); lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); } } static char * lpfc_nlp_state_name(char *buffer, size_t size, int state) { static char *states[] = { [NLP_STE_UNUSED_NODE] = "UNUSED", [NLP_STE_PLOGI_ISSUE] = "PLOGI", [NLP_STE_ADISC_ISSUE] = "ADISC", [NLP_STE_REG_LOGIN_ISSUE] = "REGLOGIN", [NLP_STE_PRLI_ISSUE] = "PRLI", [NLP_STE_UNMAPPED_NODE] = "UNMAPPED", [NLP_STE_MAPPED_NODE] = "MAPPED", [NLP_STE_NPR_NODE] = "NPR", }; if (state < NLP_STE_MAX_STATE && states[state]) strlcpy(buffer, states[state], size); else snprintf(buffer, size, "unknown (%d)", state); return buffer; } void lpfc_nlp_set_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, int state) { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); int old_state = ndlp->nlp_state; char name1[16], name2[16]; lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, "0904 NPort state transition x%06x, %s -> %s\n", ndlp->nlp_DID, lpfc_nlp_state_name(name1, sizeof(name1), old_state), lpfc_nlp_state_name(name2, sizeof(name2), state)); lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE, "node statechg did:x%x old:%d ste:%d", ndlp->nlp_DID, old_state, state); if (old_state == NLP_STE_NPR_NODE && state != NLP_STE_NPR_NODE) lpfc_cancel_retry_delay_tmo(vport, ndlp); if (old_state == NLP_STE_UNMAPPED_NODE) { ndlp->nlp_flag &= ~NLP_TGT_NO_SCSIID; ndlp->nlp_type &= ~NLP_FC_NODE; } if (list_empty(&ndlp->nlp_listp)) { spin_lock_irq(shost->host_lock); list_add_tail(&ndlp->nlp_listp, &vport->fc_nodes); spin_unlock_irq(shost->host_lock); } else if (old_state) lpfc_nlp_counters(vport, old_state, -1); ndlp->nlp_state = state; lpfc_nlp_counters(vport, state, 1); lpfc_nlp_state_cleanup(vport, ndlp, old_state, state); } void lpfc_enqueue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); if (list_empty(&ndlp->nlp_listp)) { spin_lock_irq(shost->host_lock); list_add_tail(&ndlp->nlp_listp, &vport->fc_nodes); spin_unlock_irq(shost->host_lock); } } void lpfc_dequeue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); lpfc_cancel_retry_delay_tmo(vport, ndlp); if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp)) lpfc_nlp_counters(vport, ndlp->nlp_state, -1); spin_lock_irq(shost->host_lock); list_del_init(&ndlp->nlp_listp); spin_unlock_irq(shost->host_lock); lpfc_nlp_state_cleanup(vport, ndlp, ndlp->nlp_state, NLP_STE_UNUSED_NODE); } static void lpfc_disable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) { lpfc_cancel_retry_delay_tmo(vport, ndlp); if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp)) lpfc_nlp_counters(vport, ndlp->nlp_state, -1); lpfc_nlp_state_cleanup(vport, ndlp, ndlp->nlp_state, NLP_STE_UNUSED_NODE); } /** * lpfc_initialize_node - Initialize all fields of node object * @vport: Pointer to Virtual Port object. * @ndlp: Pointer to FC node object. * @did: FC_ID of the node. * * This function is always called when node object need to be initialized. * It initializes all the fields of the node object. Although the reference * to phba from @ndlp can be obtained indirectly through it's reference to * @vport, a direct reference to phba is taken here by @ndlp. This is due * to the life-span of the @ndlp might go beyond the existence of @vport as * the final release of ndlp is determined by its reference count. And, the * operation on @ndlp needs the reference to phba. **/ static inline void lpfc_initialize_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, uint32_t did) { INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp); INIT_LIST_HEAD(&ndlp->dev_loss_evt.evt_listp); init_timer(&ndlp->nlp_delayfunc); ndlp->nlp_delayfunc.function = lpfc_els_retry_delay; ndlp->nlp_delayfunc.data = (unsigned long)ndlp; ndlp->nlp_DID = did; ndlp->vport = vport; ndlp->phba = vport->phba; ndlp->nlp_sid = NLP_NO_SID; kref_init(&ndlp->kref); NLP_INT_NODE_ACT(ndlp); atomic_set(&ndlp->cmd_pending, 0); ndlp->cmd_qdepth = vport->cfg_tgt_queue_depth; if (vport->phba->sli_rev == LPFC_SLI_REV4) ndlp->nlp_rpi = lpfc_sli4_alloc_rpi(vport->phba); } struct lpfc_nodelist * lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, int state) { struct lpfc_hba *phba = vport->phba; uint32_t did; unsigned long flags; if (!ndlp) return NULL; spin_lock_irqsave(&phba->ndlp_lock, flags); /* The ndlp should not be in memory free mode */ if (NLP_CHK_FREE_REQ(ndlp)) { spin_unlock_irqrestore(&phba->ndlp_lock, flags); lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE, "0277 lpfc_enable_node: ndlp:x%p " "usgmap:x%x refcnt:%d\n", (void *)ndlp, ndlp->nlp_usg_map, atomic_read(&ndlp->kref.refcount)); return NULL; } /* The ndlp should not already be in active mode */ if (NLP_CHK_NODE_ACT(ndlp)) { spin_unlock_irqrestore(&phba->ndlp_lock, flags); lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE, "0278 lpfc_enable_node: ndlp:x%p " "usgmap:x%x refcnt:%d\n", (void *)ndlp, ndlp->nlp_usg_map, atomic_read(&ndlp->kref.refcount)); return NULL; } /* Keep the original DID */ did = ndlp->nlp_DID; /* re-initialize ndlp except of ndlp linked list pointer */ memset((((char *)ndlp) + sizeof (struct list_head)), 0, sizeof (struct lpfc_nodelist) - sizeof (struct list_head)); lpfc_initialize_node(vport, ndlp, did); spin_unlock_irqrestore(&phba->ndlp_lock, flags); if (state != NLP_STE_UNUSED_NODE) lpfc_nlp_set_state(vport, ndlp, state); lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE, "node enable: did:x%x", ndlp->nlp_DID, 0, 0); return ndlp; } void lpfc_drop_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) { /* * Use of lpfc_drop_node and UNUSED list: lpfc_drop_node should * be used if we wish to issue the "last" lpfc_nlp_put() to remove * the ndlp from the vport. The ndlp marked as UNUSED on the list * until ALL other outstanding threads have completed. We check * that the ndlp not already in the UNUSED state before we proceed. */ if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) return; lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE); if (vport->phba->sli_rev == LPFC_SLI_REV4) lpfc_cleanup_vports_rrqs(vport, ndlp); lpfc_nlp_put(ndlp); return; } /* * Start / ReStart rescue timer for Discovery / RSCN handling */ void lpfc_set_disctmo(struct lpfc_vport *vport) { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); struct lpfc_hba *phba = vport->phba; uint32_t tmo; if (vport->port_state == LPFC_LOCAL_CFG_LINK) { /* For FAN, timeout should be greater than edtov */ tmo = (((phba->fc_edtov + 999) / 1000) + 1); } else { /* Normal discovery timeout should be > than ELS/CT timeout * FC spec states we need 3 * ratov for CT requests */ tmo = ((phba->fc_ratov * 3) + 3); } if (!timer_pending(&vport->fc_disctmo)) { lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, "set disc timer: tmo:x%x state:x%x flg:x%x", tmo, vport->port_state, vport->fc_flag); } mod_timer(&vport->fc_disctmo, jiffies + HZ * tmo); spin_lock_irq(shost->host_lock); vport->fc_flag |= FC_DISC_TMO; spin_unlock_irq(shost->host_lock); /* Start Discovery Timer state <hba_state> */ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "0247 Start Discovery Timer state x%x " "Data: x%x x%lx x%x x%x\n", vport->port_state, tmo, (unsigned long)&vport->fc_disctmo, vport->fc_plogi_cnt, vport->fc_adisc_cnt); return; } /* * Cancel rescue timer for Discovery / RSCN handling */ int lpfc_can_disctmo(struct lpfc_vport *vport) { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); unsigned long iflags; lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, "can disc timer: state:x%x rtry:x%x flg:x%x", vport->port_state, vport->fc_ns_retry, vport->fc_flag); /* Turn off discovery timer if its running */ if (vport->fc_flag & FC_DISC_TMO) { spin_lock_irqsave(shost->host_lock, iflags); vport->fc_flag &= ~FC_DISC_TMO; spin_unlock_irqrestore(shost->host_lock, iflags); del_timer_sync(&vport->fc_disctmo); spin_lock_irqsave(&vport->work_port_lock, iflags); vport->work_port_events &= ~WORKER_DISC_TMO; spin_unlock_irqrestore(&vport->work_port_lock, iflags); } /* Cancel Discovery Timer state <hba_state> */ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "0248 Cancel Discovery Timer state x%x " "Data: x%x x%x x%x\n", vport->port_state, vport->fc_flag, vport->fc_plogi_cnt, vport->fc_adisc_cnt); return 0; } /* * Check specified ring for outstanding IOCB on the SLI queue * Return true if iocb matches the specified nport */ int lpfc_check_sli_ndlp(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, struct lpfc_iocbq *iocb, struct lpfc_nodelist *ndlp) { struct lpfc_sli *psli = &phba->sli; IOCB_t *icmd = &iocb->iocb; struct lpfc_vport *vport = ndlp->vport; if (iocb->vport != vport) return 0; if (pring->ringno == LPFC_ELS_RING) { switch (icmd->ulpCommand) { case CMD_GEN_REQUEST64_CR: if (iocb->context_un.ndlp == ndlp) return 1; case CMD_ELS_REQUEST64_CR: if (icmd->un.elsreq64.remoteID == ndlp->nlp_DID) return 1; case CMD_XMIT_ELS_RSP64_CX: if (iocb->context1 == (uint8_t *) ndlp) return 1; } } else if (pring->ringno == psli->extra_ring) { } else if (pring->ringno == psli->fcp_ring) { /* Skip match check if waiting to relogin to FCP target */ if ((ndlp->nlp_type & NLP_FCP_TARGET) && (ndlp->nlp_flag & NLP_DELAY_TMO)) { return 0; } if (icmd->ulpContext == (volatile ushort)ndlp->nlp_rpi) { return 1; } } else if (pring->ringno == psli->next_ring) { } return 0; } /* * Free resources / clean up outstanding I/Os * associated with nlp_rpi in the LPFC_NODELIST entry. */ static int lpfc_no_rpi(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) { LIST_HEAD(completions); struct lpfc_sli *psli; struct lpfc_sli_ring *pring; struct lpfc_iocbq *iocb, *next_iocb; uint32_t i; lpfc_fabric_abort_nport(ndlp); /* * Everything that matches on txcmplq will be returned * by firmware with a no rpi error. */ psli = &phba->sli; if (ndlp->nlp_flag & NLP_RPI_REGISTERED) { /* Now process each ring */ for (i = 0; i < psli->num_rings; i++) { pring = &psli->ring[i]; spin_lock_irq(&phba->hbalock); list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) { /* * Check to see if iocb matches the nport we are * looking for */ if ((lpfc_check_sli_ndlp(phba, pring, iocb, ndlp))) { /* It matches, so deque and call compl with an error */ list_move_tail(&iocb->list, &completions); pring->txq_cnt--; } } spin_unlock_irq(&phba->hbalock); } } /* Cancel all the IOCBs from the completions list */ lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED); return 0; } /* * Free rpi associated with LPFC_NODELIST entry. * This routine is called from lpfc_freenode(), when we are removing * a LPFC_NODELIST entry. It is also called if the driver initiates a * LOGO that completes successfully, and we are waiting to PLOGI back * to the remote NPort. In addition, it is called after we receive * and unsolicated ELS cmd, send back a rsp, the rsp completes and * we are waiting to PLOGI back to the remote NPort. */ int lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) { struct lpfc_hba *phba = vport->phba; LPFC_MBOXQ_t *mbox; int rc; uint16_t rpi; if (ndlp->nlp_flag & NLP_RPI_REGISTERED) { mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (mbox) { /* SLI4 ports require the physical rpi value. */ rpi = ndlp->nlp_rpi; if (phba->sli_rev == LPFC_SLI_REV4) rpi = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]; lpfc_unreg_login(phba, vport->vpi, rpi, mbox); mbox->vport = vport; mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); if (rc == MBX_NOT_FINISHED) mempool_free(mbox, phba->mbox_mem_pool); } lpfc_no_rpi(phba, ndlp); if (phba->sli_rev != LPFC_SLI_REV4) ndlp->nlp_rpi = 0; ndlp->nlp_flag &= ~NLP_RPI_REGISTERED; ndlp->nlp_flag &= ~NLP_NPR_ADISC; return 1; } return 0; } /** * lpfc_unreg_hba_rpis - Unregister rpis registered to the hba. * @phba: pointer to lpfc hba data structure. * * This routine is invoked to unregister all the currently registered RPIs * to the HBA. **/ void lpfc_unreg_hba_rpis(struct lpfc_hba *phba) { struct lpfc_vport **vports; struct lpfc_nodelist *ndlp; struct Scsi_Host *shost; int i; vports = lpfc_create_vport_work_array(phba); if (!vports) { lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, "2884 Vport array allocation failed \n"); return; } for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { shost = lpfc_shost_from_vport(vports[i]); spin_lock_irq(shost->host_lock); list_for_each_entry(ndlp, &vports[i]->fc_nodes, nlp_listp) { if (ndlp->nlp_flag & NLP_RPI_REGISTERED) { /* The mempool_alloc might sleep */ spin_unlock_irq(shost->host_lock); lpfc_unreg_rpi(vports[i], ndlp); spin_lock_irq(shost->host_lock); } } spin_unlock_irq(shost->host_lock); } lpfc_destroy_vport_work_array(phba, vports); } void lpfc_unreg_all_rpis(struct lpfc_vport *vport) { struct lpfc_hba *phba = vport->phba; LPFC_MBOXQ_t *mbox; int rc; if (phba->sli_rev == LPFC_SLI_REV4) { lpfc_sli4_unreg_all_rpis(vport); return; } mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (mbox) { lpfc_unreg_login(phba, vport->vpi, LPFC_UNREG_ALL_RPIS_VPORT, mbox); mbox->vport = vport; mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; mbox->context1 = NULL; rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO); if (rc != MBX_TIMEOUT) mempool_free(mbox, phba->mbox_mem_pool); if ((rc == MBX_TIMEOUT) || (rc == MBX_NOT_FINISHED)) lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX | LOG_VPORT, "1836 Could not issue " "unreg_login(all_rpis) status %d\n", rc); } } void lpfc_unreg_default_rpis(struct lpfc_vport *vport) { struct lpfc_hba *phba = vport->phba; LPFC_MBOXQ_t *mbox; int rc; mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (mbox) { lpfc_unreg_did(phba, vport->vpi, LPFC_UNREG_ALL_DFLT_RPIS, mbox); mbox->vport = vport; mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; mbox->context1 = NULL; rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO); if (rc != MBX_TIMEOUT) mempool_free(mbox, phba->mbox_mem_pool); if ((rc == MBX_TIMEOUT) || (rc == MBX_NOT_FINISHED)) lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX | LOG_VPORT, "1815 Could not issue " "unreg_did (default rpis) status %d\n", rc); } } /* * Free resources associated with LPFC_NODELIST entry * so it can be freed. */ static int lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); struct lpfc_hba *phba = vport->phba; LPFC_MBOXQ_t *mb, *nextmb; struct lpfc_dmabuf *mp; /* Cleanup node for NPort <nlp_DID> */ lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, "0900 Cleanup node for NPort x%x " "Data: x%x x%x x%x\n", ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); if (NLP_CHK_FREE_REQ(ndlp)) { lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE, "0280 lpfc_cleanup_node: ndlp:x%p " "usgmap:x%x refcnt:%d\n", (void *)ndlp, ndlp->nlp_usg_map, atomic_read(&ndlp->kref.refcount)); lpfc_dequeue_node(vport, ndlp); } else { lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE, "0281 lpfc_cleanup_node: ndlp:x%p " "usgmap:x%x refcnt:%d\n", (void *)ndlp, ndlp->nlp_usg_map, atomic_read(&ndlp->kref.refcount)); lpfc_disable_node(vport, ndlp); } /* cleanup any ndlp on mbox q waiting for reglogin cmpl */ if ((mb = phba->sli.mbox_active)) { if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) && (ndlp == (struct lpfc_nodelist *) mb->context2)) { mb->context2 = NULL; mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; } } spin_lock_irq(&phba->hbalock); /* Cleanup REG_LOGIN completions which are not yet processed */ list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) { if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) || (ndlp != (struct lpfc_nodelist *) mb->context2)) continue; mb->context2 = NULL; mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; } list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) { if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) && (ndlp == (struct lpfc_nodelist *) mb->context2)) { mp = (struct lpfc_dmabuf *) (mb->context1); if (mp) { __lpfc_mbuf_free(phba, mp->virt, mp->phys); kfree(mp); } list_del(&mb->list); mempool_free(mb, phba->mbox_mem_pool); /* We shall not invoke the lpfc_nlp_put to decrement * the ndlp reference count as we are in the process * of lpfc_nlp_release. */ } } spin_unlock_irq(&phba->hbalock); lpfc_els_abort(phba, ndlp); spin_lock_irq(shost->host_lock); ndlp->nlp_flag &= ~NLP_DELAY_TMO; spin_unlock_irq(shost->host_lock); ndlp->nlp_last_elscmd = 0; del_timer_sync(&ndlp->nlp_delayfunc); list_del_init(&ndlp->els_retry_evt.evt_listp); list_del_init(&ndlp->dev_loss_evt.evt_listp); lpfc_cleanup_vports_rrqs(vport, ndlp); lpfc_unreg_rpi(vport, ndlp); return 0; } /* * Check to see if we can free the nlp back to the freelist. * If we are in the middle of using the nlp in the discovery state * machine, defer the free till we reach the end of the state machine. */ static void lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) { struct lpfc_hba *phba = vport->phba; struct lpfc_rport_data *rdata; LPFC_MBOXQ_t *mbox; int rc; lpfc_cancel_retry_delay_tmo(vport, ndlp); if ((ndlp->nlp_flag & NLP_DEFER_RM) && !(ndlp->nlp_flag & NLP_REG_LOGIN_SEND) && !(ndlp->nlp_flag & NLP_RPI_REGISTERED)) { /* For this case we need to cleanup the default rpi * allocated by the firmware. */ if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL)) != NULL) { rc = lpfc_reg_rpi(phba, vport->vpi, ndlp->nlp_DID, (uint8_t *) &vport->fc_sparam, mbox, ndlp->nlp_rpi); if (rc) { mempool_free(mbox, phba->mbox_mem_pool); } else { mbox->mbox_flag |= LPFC_MBX_IMED_UNREG; mbox->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi; mbox->vport = vport; mbox->context2 = NULL; rc =lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); if (rc == MBX_NOT_FINISHED) { mempool_free(mbox, phba->mbox_mem_pool); } } } } lpfc_cleanup_node(vport, ndlp); /* * We can get here with a non-NULL ndlp->rport because when we * unregister a rport we don't break the rport/node linkage. So if we * do, make sure we don't leaving any dangling pointers behind. */ if (ndlp->rport) { rdata = ndlp->rport->dd_data; rdata->pnode = NULL; ndlp->rport = NULL; } } static int lpfc_matchdid(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, uint32_t did) { D_ID mydid, ndlpdid, matchdid; if (did == Bcast_DID) return 0; /* First check for Direct match */ if (ndlp->nlp_DID == did) return 1; /* Next check for area/domain identically equals 0 match */ mydid.un.word = vport->fc_myDID; if ((mydid.un.b.domain == 0) && (mydid.un.b.area == 0)) { return 0; } matchdid.un.word = did; ndlpdid.un.word = ndlp->nlp_DID; if (matchdid.un.b.id == ndlpdid.un.b.id) { if ((mydid.un.b.domain == matchdid.un.b.domain) && (mydid.un.b.area == matchdid.un.b.area)) { if ((ndlpdid.un.b.domain == 0) && (ndlpdid.un.b.area == 0)) { if (ndlpdid.un.b.id) return 1; } return 0; } matchdid.un.word = ndlp->nlp_DID; if ((mydid.un.b.domain == ndlpdid.un.b.domain) && (mydid.un.b.area == ndlpdid.un.b.area)) { if ((matchdid.un.b.domain == 0) && (matchdid.un.b.area == 0)) { if (matchdid.un.b.id) return 1; } } } return 0; } /* Search for a nodelist entry */ static struct lpfc_nodelist * __lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did) { struct lpfc_nodelist *ndlp; uint32_t data1; list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { if (lpfc_matchdid(vport, ndlp, did)) { data1 = (((uint32_t) ndlp->nlp_state << 24) | ((uint32_t) ndlp->nlp_xri << 16) | ((uint32_t) ndlp->nlp_type << 8) | ((uint32_t) ndlp->nlp_rpi & 0xff)); lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, "0929 FIND node DID " "Data: x%p x%x x%x x%x\n", ndlp, ndlp->nlp_DID, ndlp->nlp_flag, data1); return ndlp; } } /* FIND node did <did> NOT FOUND */ lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, "0932 FIND node did x%x NOT FOUND.\n", did); return NULL; } struct lpfc_nodelist * lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did) { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); struct lpfc_nodelist *ndlp; unsigned long iflags; spin_lock_irqsave(shost->host_lock, iflags); ndlp = __lpfc_findnode_did(vport, did); spin_unlock_irqrestore(shost->host_lock, iflags); return ndlp; } struct lpfc_nodelist * lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did) { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); struct lpfc_nodelist *ndlp; ndlp = lpfc_findnode_did(vport, did); if (!ndlp) { if ((vport->fc_flag & FC_RSCN_MODE) != 0 && lpfc_rscn_payload_check(vport, did) == 0) return NULL; ndlp = (struct lpfc_nodelist *) mempool_alloc(vport->phba->nlp_mem_pool, GFP_KERNEL); if (!ndlp) return NULL; lpfc_nlp_init(vport, ndlp, did); lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_NPR_2B_DISC; spin_unlock_irq(shost->host_lock); return ndlp; } else if (!NLP_CHK_NODE_ACT(ndlp)) { ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_NPR_NODE); if (!ndlp) return NULL; spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_NPR_2B_DISC; spin_unlock_irq(shost->host_lock); return ndlp; } if ((vport->fc_flag & FC_RSCN_MODE) && !(vport->fc_flag & FC_NDISC_ACTIVE)) { if (lpfc_rscn_payload_check(vport, did)) { /* If we've already received a PLOGI from this NPort * we don't need to try to discover it again. */ if (ndlp->nlp_flag & NLP_RCV_PLOGI) return NULL; /* Since this node is marked for discovery, * delay timeout is not needed. */ lpfc_cancel_retry_delay_tmo(vport, ndlp); spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_NPR_2B_DISC; spin_unlock_irq(shost->host_lock); } else ndlp = NULL; } else { /* If we've already received a PLOGI from this NPort, * or we are already in the process of discovery on it, * we don't need to try to discover it again. */ if (ndlp->nlp_state == NLP_STE_ADISC_ISSUE || ndlp->nlp_state == NLP_STE_PLOGI_ISSUE || ndlp->nlp_flag & NLP_RCV_PLOGI) return NULL; lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_NPR_2B_DISC; spin_unlock_irq(shost->host_lock); } return ndlp; } /* Build a list of nodes to discover based on the loopmap */ void lpfc_disc_list_loopmap(struct lpfc_vport *vport) { struct lpfc_hba *phba = vport->phba; int j; uint32_t alpa, index; if (!lpfc_is_link_up(phba)) return; if (phba->fc_topology != LPFC_TOPOLOGY_LOOP) return; /* Check for loop map present or not */ if (phba->alpa_map[0]) { for (j = 1; j <= phba->alpa_map[0]; j++) { alpa = phba->alpa_map[j]; if (((vport->fc_myDID & 0xff) == alpa) || (alpa == 0)) continue; lpfc_setup_disc_node(vport, alpa); } } else { /* No alpamap, so try all alpa's */ for (j = 0; j < FC_MAXLOOP; j++) { /* If cfg_scan_down is set, start from highest * ALPA (0xef) to lowest (0x1). */ if (vport->cfg_scan_down) index = j; else index = FC_MAXLOOP - j - 1; alpa = lpfcAlpaArray[index]; if ((vport->fc_myDID & 0xff) == alpa) continue; lpfc_setup_disc_node(vport, alpa); } } return; } void lpfc_issue_clear_la(struct lpfc_hba *phba, struct lpfc_vport *vport) { LPFC_MBOXQ_t *mbox; struct lpfc_sli *psli = &phba->sli; struct lpfc_sli_ring *extra_ring = &psli->ring[psli->extra_ring]; struct lpfc_sli_ring *fcp_ring = &psli->ring[psli->fcp_ring]; struct lpfc_sli_ring *next_ring = &psli->ring[psli->next_ring]; int rc; /* * if it's not a physical port or if we already send * clear_la then don't send it. */ if ((phba->link_state >= LPFC_CLEAR_LA) || (vport->port_type != LPFC_PHYSICAL_PORT) || (phba->sli_rev == LPFC_SLI_REV4)) return; /* Link up discovery */ if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL)) != NULL) { phba->link_state = LPFC_CLEAR_LA; lpfc_clear_la(phba, mbox); mbox->mbox_cmpl = lpfc_mbx_cmpl_clear_la; mbox->vport = vport; rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); if (rc == MBX_NOT_FINISHED) { mempool_free(mbox, phba->mbox_mem_pool); lpfc_disc_flush_list(vport); extra_ring->flag &= ~LPFC_STOP_IOCB_EVENT; fcp_ring->flag &= ~LPFC_STOP_IOCB_EVENT; next_ring->flag &= ~LPFC_STOP_IOCB_EVENT; phba->link_state = LPFC_HBA_ERROR; } } } /* Reg_vpi to tell firmware to resume normal operations */ void lpfc_issue_reg_vpi(struct lpfc_hba *phba, struct lpfc_vport *vport) { LPFC_MBOXQ_t *regvpimbox; regvpimbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (regvpimbox) { lpfc_reg_vpi(vport, regvpimbox); regvpimbox->mbox_cmpl = lpfc_mbx_cmpl_reg_vpi; regvpimbox->vport = vport; if (lpfc_sli_issue_mbox(phba, regvpimbox, MBX_NOWAIT) == MBX_NOT_FINISHED) { mempool_free(regvpimbox, phba->mbox_mem_pool); } } } /* Start Link up / RSCN discovery on NPR nodes */ void lpfc_disc_start(struct lpfc_vport *vport) { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); struct lpfc_hba *phba = vport->phba; uint32_t num_sent; uint32_t clear_la_pending; int did_changed; if (!lpfc_is_link_up(phba)) return; if (phba->link_state == LPFC_CLEAR_LA) clear_la_pending = 1; else clear_la_pending = 0; if (vport->port_state < LPFC_VPORT_READY) vport->port_state = LPFC_DISC_AUTH; lpfc_set_disctmo(vport); if (vport->fc_prevDID == vport->fc_myDID) did_changed = 0; else did_changed = 1; vport->fc_prevDID = vport->fc_myDID; vport->num_disc_nodes = 0; /* Start Discovery state <hba_state> */ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "0202 Start Discovery hba state x%x " "Data: x%x x%x x%x\n", vport->port_state, vport->fc_flag, vport->fc_plogi_cnt, vport->fc_adisc_cnt); /* First do ADISCs - if any */ num_sent = lpfc_els_disc_adisc(vport); if (num_sent) return; /* Register the VPI for SLI3, NON-NPIV only. */ if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && !(vport->fc_flag & FC_PT2PT) && !(vport->fc_flag & FC_RSCN_MODE) && (phba->sli_rev < LPFC_SLI_REV4)) { lpfc_issue_reg_vpi(phba, vport); return; } /* * For SLI2, we need to set port_state to READY and continue * discovery. */ if (vport->port_state < LPFC_VPORT_READY && !clear_la_pending) { /* If we get here, there is nothing to ADISC */ if (vport->port_type == LPFC_PHYSICAL_PORT) lpfc_issue_clear_la(phba, vport); if (!(vport->fc_flag & FC_ABORT_DISCOVERY)) { vport->num_disc_nodes = 0; /* go thru NPR nodes and issue ELS PLOGIs */ if (vport->fc_npr_cnt) lpfc_els_disc_plogi(vport); if (!vport->num_disc_nodes) { spin_lock_irq(shost->host_lock); vport->fc_flag &= ~FC_NDISC_ACTIVE; spin_unlock_irq(shost->host_lock); lpfc_can_disctmo(vport); } } vport->port_state = LPFC_VPORT_READY; } else { /* Next do PLOGIs - if any */ num_sent = lpfc_els_disc_plogi(vport); if (num_sent) return; if (vport->fc_flag & FC_RSCN_MODE) { /* Check to see if more RSCNs came in while we * were processing this one. */ if ((vport->fc_rscn_id_cnt == 0) && (!(vport->fc_flag & FC_RSCN_DISCOVERY))) { spin_lock_irq(shost->host_lock); vport->fc_flag &= ~FC_RSCN_MODE; spin_unlock_irq(shost->host_lock); lpfc_can_disctmo(vport); } else lpfc_els_handle_rscn(vport); } } return; } /* * Ignore completion for all IOCBs on tx and txcmpl queue for ELS * ring the match the sppecified nodelist. */ static void lpfc_free_tx(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) { LIST_HEAD(completions); struct lpfc_sli *psli; IOCB_t *icmd; struct lpfc_iocbq *iocb, *next_iocb; struct lpfc_sli_ring *pring; psli = &phba->sli; pring = &psli->ring[LPFC_ELS_RING]; /* Error matching iocb on txq or txcmplq * First check the txq. */ spin_lock_irq(&phba->hbalock); list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) { if (iocb->context1 != ndlp) { continue; } icmd = &iocb->iocb; if ((icmd->ulpCommand == CMD_ELS_REQUEST64_CR) || (icmd->ulpCommand == CMD_XMIT_ELS_RSP64_CX)) { list_move_tail(&iocb->list, &completions); pring->txq_cnt--; } } /* Next check the txcmplq */ list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) { if (iocb->context1 != ndlp) { continue; } icmd = &iocb->iocb; if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR || icmd->ulpCommand == CMD_XMIT_ELS_RSP64_CX) { lpfc_sli_issue_abort_iotag(phba, pring, iocb); } } spin_unlock_irq(&phba->hbalock); /* Cancel all the IOCBs from the completions list */ lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED); } static void lpfc_disc_flush_list(struct lpfc_vport *vport) { struct lpfc_nodelist *ndlp, *next_ndlp; struct lpfc_hba *phba = vport->phba; if (vport->fc_plogi_cnt || vport->fc_adisc_cnt) { list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { if (!NLP_CHK_NODE_ACT(ndlp)) continue; if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE || ndlp->nlp_state == NLP_STE_ADISC_ISSUE) { lpfc_free_tx(phba, ndlp); } } } } void lpfc_cleanup_discovery_resources(struct lpfc_vport *vport) { lpfc_els_flush_rscn(vport); lpfc_els_flush_cmd(vport); lpfc_disc_flush_list(vport); } /*****************************************************************************/ /* * NAME: lpfc_disc_timeout * * FUNCTION: Fibre Channel driver discovery timeout routine. * * EXECUTION ENVIRONMENT: interrupt only * * CALLED FROM: * Timer function * * RETURNS: * none */ /*****************************************************************************/ void lpfc_disc_timeout(unsigned long ptr) { struct lpfc_vport *vport = (struct lpfc_vport *) ptr; struct lpfc_hba *phba = vport->phba; uint32_t tmo_posted; unsigned long flags = 0; if (unlikely(!phba)) return; spin_lock_irqsave(&vport->work_port_lock, flags); tmo_posted = vport->work_port_events & WORKER_DISC_TMO; if (!tmo_posted) vport->work_port_events |= WORKER_DISC_TMO; spin_unlock_irqrestore(&vport->work_port_lock, flags); if (!tmo_posted) lpfc_worker_wake_up(phba); return; } static void lpfc_disc_timeout_handler(struct lpfc_vport *vport) { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); struct lpfc_hba *phba = vport->phba; struct lpfc_sli *psli = &phba->sli; struct lpfc_nodelist *ndlp, *next_ndlp; LPFC_MBOXQ_t *initlinkmbox; int rc, clrlaerr = 0; if (!(vport->fc_flag & FC_DISC_TMO)) return; spin_lock_irq(shost->host_lock); vport->fc_flag &= ~FC_DISC_TMO; spin_unlock_irq(shost->host_lock); lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, "disc timeout: state:x%x rtry:x%x flg:x%x", vport->port_state, vport->fc_ns_retry, vport->fc_flag); switch (vport->port_state) { case LPFC_LOCAL_CFG_LINK: /* port_state is identically LPFC_LOCAL_CFG_LINK while waiting for * FAN */ /* FAN timeout */ lpfc_printf_vlog(vport, KERN_WARNING, LOG_DISCOVERY, "0221 FAN timeout\n"); /* Start discovery by sending FLOGI, clean up old rpis */ list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { if (!NLP_CHK_NODE_ACT(ndlp)) continue; if (ndlp->nlp_state != NLP_STE_NPR_NODE) continue; if (ndlp->nlp_type & NLP_FABRIC) { /* Clean up the ndlp on Fabric connections */ lpfc_drop_node(vport, ndlp); } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) { /* Fail outstanding IO now since device * is marked for PLOGI. */ lpfc_unreg_rpi(vport, ndlp); } } if (vport->port_state != LPFC_FLOGI) { if (phba->sli_rev <= LPFC_SLI_REV3) lpfc_initial_flogi(vport); else lpfc_issue_init_vfi(vport); return; } break; case LPFC_FDISC: case LPFC_FLOGI: /* port_state is identically LPFC_FLOGI while waiting for FLOGI cmpl */ /* Initial FLOGI timeout */ lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, "0222 Initial %s timeout\n", vport->vpi ? "FDISC" : "FLOGI"); /* Assume no Fabric and go on with discovery. * Check for outstanding ELS FLOGI to abort. */ /* FLOGI failed, so just use loop map to make discovery list */ lpfc_disc_list_loopmap(vport); /* Start discovery */ lpfc_disc_start(vport); break; case LPFC_FABRIC_CFG_LINK: /* hba_state is identically LPFC_FABRIC_CFG_LINK while waiting for NameServer login */ lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, "0223 Timeout while waiting for " "NameServer login\n"); /* Next look for NameServer ndlp */ ndlp = lpfc_findnode_did(vport, NameServer_DID); if (ndlp && NLP_CHK_NODE_ACT(ndlp)) lpfc_els_abort(phba, ndlp); /* ReStart discovery */ goto restart_disc; case LPFC_NS_QRY: /* Check for wait for NameServer Rsp timeout */ lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, "0224 NameServer Query timeout " "Data: x%x x%x\n", vport->fc_ns_retry, LPFC_MAX_NS_RETRY); if (vport->fc_ns_retry < LPFC_MAX_NS_RETRY) { /* Try it one more time */ vport->fc_ns_retry++; rc = lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, vport->fc_ns_retry, 0); if (rc == 0) break; } vport->fc_ns_retry = 0; restart_disc: /* * Discovery is over. * set port_state to PORT_READY if SLI2. * cmpl_reg_vpi will set port_state to READY for SLI3. */ if (phba->sli_rev < LPFC_SLI_REV4) { if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) lpfc_issue_reg_vpi(phba, vport); else { lpfc_issue_clear_la(phba, vport); vport->port_state = LPFC_VPORT_READY; } } /* Setup and issue mailbox INITIALIZE LINK command */ initlinkmbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!initlinkmbox) { lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, "0206 Device Discovery " "completion error\n"); phba->link_state = LPFC_HBA_ERROR; break; } lpfc_linkdown(phba); lpfc_init_link(phba, initlinkmbox, phba->cfg_topology, phba->cfg_link_speed); initlinkmbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0; initlinkmbox->vport = vport; initlinkmbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; rc = lpfc_sli_issue_mbox(phba, initlinkmbox, MBX_NOWAIT); lpfc_set_loopback_flag(phba); if (rc == MBX_NOT_FINISHED) mempool_free(initlinkmbox, phba->mbox_mem_pool); break; case LPFC_DISC_AUTH: /* Node Authentication timeout */ lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, "0227 Node Authentication timeout\n"); lpfc_disc_flush_list(vport); /* * set port_state to PORT_READY if SLI2. * cmpl_reg_vpi will set port_state to READY for SLI3. */ if (phba->sli_rev < LPFC_SLI_REV4) { if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) lpfc_issue_reg_vpi(phba, vport); else { /* NPIV Not enabled */ lpfc_issue_clear_la(phba, vport); vport->port_state = LPFC_VPORT_READY; } } break; case LPFC_VPORT_READY: if (vport->fc_flag & FC_RSCN_MODE) { lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, "0231 RSCN timeout Data: x%x " "x%x\n", vport->fc_ns_retry, LPFC_MAX_NS_RETRY); /* Cleanup any outstanding ELS commands */ lpfc_els_flush_cmd(vport); lpfc_els_flush_rscn(vport); lpfc_disc_flush_list(vport); } break; default: lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, "0273 Unexpected discovery timeout, " "vport State x%x\n", vport->port_state); break; } switch (phba->link_state) { case LPFC_CLEAR_LA: /* CLEAR LA timeout */ lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, "0228 CLEAR LA timeout\n"); clrlaerr = 1; break; case LPFC_LINK_UP: lpfc_issue_clear_la(phba, vport); /* Drop thru */ case LPFC_LINK_UNKNOWN: case LPFC_WARM_START: case LPFC_INIT_START: case LPFC_INIT_MBX_CMDS: case LPFC_LINK_DOWN: case LPFC_HBA_ERROR: lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, "0230 Unexpected timeout, hba link " "state x%x\n", phba->link_state); clrlaerr = 1; break; case LPFC_HBA_READY: break; } if (clrlaerr) { lpfc_disc_flush_list(vport); psli->ring[(psli->extra_ring)].flag &= ~LPFC_STOP_IOCB_EVENT; psli->ring[(psli->fcp_ring)].flag &= ~LPFC_STOP_IOCB_EVENT; psli->ring[(psli->next_ring)].flag &= ~LPFC_STOP_IOCB_EVENT; vport->port_state = LPFC_VPORT_READY; } return; } /* * This routine handles processing a NameServer REG_LOGIN mailbox * command upon completion. It is setup in the LPFC_MBOXQ * as the completion routine when the command is * handed off to the SLI layer. */ void lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) { MAILBOX_t *mb = &pmb->u.mb; struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1); struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2; struct lpfc_vport *vport = pmb->vport; pmb->context1 = NULL; pmb->context2 = NULL; if (phba->sli_rev < LPFC_SLI_REV4) ndlp->nlp_rpi = mb->un.varWords[0]; ndlp->nlp_flag |= NLP_RPI_REGISTERED; ndlp->nlp_type |= NLP_FABRIC; lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); /* * Start issuing Fabric-Device Management Interface (FDMI) command to * 0xfffffa (FDMI well known port) or Delay issuing FDMI command if * fdmi-on=2 (supporting RPA/hostnmae) */ if (vport->cfg_fdmi_on == 1) lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA); else mod_timer(&vport->fc_fdmitmo, jiffies + HZ * 60); /* decrement the node reference count held for this callback * function. */ lpfc_nlp_put(ndlp); lpfc_mbuf_free(phba, mp->virt, mp->phys); kfree(mp); mempool_free(pmb, phba->mbox_mem_pool); return; } static int lpfc_filter_by_rpi(struct lpfc_nodelist *ndlp, void *param) { uint16_t *rpi = param; /* check for active node */ if (!NLP_CHK_NODE_ACT(ndlp)) return 0; return ndlp->nlp_rpi == *rpi; } static int lpfc_filter_by_wwpn(struct lpfc_nodelist *ndlp, void *param) { return memcmp(&ndlp->nlp_portname, param, sizeof(ndlp->nlp_portname)) == 0; } static struct lpfc_nodelist * __lpfc_find_node(struct lpfc_vport *vport, node_filter filter, void *param) { struct lpfc_nodelist *ndlp; list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { if (filter(ndlp, param)) return ndlp; } return NULL; } /* * This routine looks up the ndlp lists for the given RPI. If rpi found it * returns the node list element pointer else return NULL. */ struct lpfc_nodelist * __lpfc_findnode_rpi(struct lpfc_vport *vport, uint16_t rpi) { return __lpfc_find_node(vport, lpfc_filter_by_rpi, &rpi); } /* * This routine looks up the ndlp lists for the given WWPN. If WWPN found it * returns the node element list pointer else return NULL. */ struct lpfc_nodelist * lpfc_findnode_wwpn(struct lpfc_vport *vport, struct lpfc_name *wwpn) { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); struct lpfc_nodelist *ndlp; spin_lock_irq(shost->host_lock); ndlp = __lpfc_find_node(vport, lpfc_filter_by_wwpn, wwpn); spin_unlock_irq(shost->host_lock); return ndlp; } /* * This routine looks up the ndlp lists for the given RPI. If the rpi * is found, the routine returns the node element list pointer else * return NULL. */ struct lpfc_nodelist * lpfc_findnode_rpi(struct lpfc_vport *vport, uint16_t rpi) { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); struct lpfc_nodelist *ndlp; spin_lock_irq(shost->host_lock); ndlp = __lpfc_findnode_rpi(vport, rpi); spin_unlock_irq(shost->host_lock); return ndlp; } /** * lpfc_find_vport_by_vpid - Find a vport on a HBA through vport identifier * @phba: pointer to lpfc hba data structure. * @vpi: the physical host virtual N_Port identifier. * * This routine finds a vport on a HBA (referred by @phba) through a * @vpi. The function walks the HBA's vport list and returns the address * of the vport with the matching @vpi. * * Return code * NULL - No vport with the matching @vpi found * Otherwise - Address to the vport with the matching @vpi. **/ struct lpfc_vport * lpfc_find_vport_by_vpid(struct lpfc_hba *phba, uint16_t vpi) { struct lpfc_vport *vport; unsigned long flags; int i = 0; /* The physical ports are always vpi 0 - translate is unnecessary. */ if (vpi > 0) { /* * Translate the physical vpi to the logical vpi. The * vport stores the logical vpi. */ for (i = 0; i < phba->max_vpi; i++) { if (vpi == phba->vpi_ids[i]) break; } if (i >= phba->max_vpi) { lpfc_printf_log(phba, KERN_ERR, LOG_ELS, "2936 Could not find Vport mapped " "to vpi %d\n", vpi); return NULL; } } spin_lock_irqsave(&phba->hbalock, flags); list_for_each_entry(vport, &phba->port_list, listentry) { if (vport->vpi == i) { spin_unlock_irqrestore(&phba->hbalock, flags); return vport; } } spin_unlock_irqrestore(&phba->hbalock, flags); return NULL; } void lpfc_nlp_init(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, uint32_t did) { memset(ndlp, 0, sizeof (struct lpfc_nodelist)); lpfc_initialize_node(vport, ndlp, did); INIT_LIST_HEAD(&ndlp->nlp_listp); lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE, "node init: did:x%x", ndlp->nlp_DID, 0, 0); return; } /* This routine releases all resources associated with a specifc NPort's ndlp * and mempool_free's the nodelist. */ static void lpfc_nlp_release(struct kref *kref) { struct lpfc_hba *phba; unsigned long flags; struct lpfc_nodelist *ndlp = container_of(kref, struct lpfc_nodelist, kref); lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE, "node release: did:x%x flg:x%x type:x%x", ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type); lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE, "0279 lpfc_nlp_release: ndlp:x%p " "usgmap:x%x refcnt:%d\n", (void *)ndlp, ndlp->nlp_usg_map, atomic_read(&ndlp->kref.refcount)); /* remove ndlp from action. */ lpfc_nlp_remove(ndlp->vport, ndlp); /* clear the ndlp active flag for all release cases */ phba = ndlp->phba; spin_lock_irqsave(&phba->ndlp_lock, flags); NLP_CLR_NODE_ACT(ndlp); spin_unlock_irqrestore(&phba->ndlp_lock, flags); if (phba->sli_rev == LPFC_SLI_REV4) lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi); /* free ndlp memory for final ndlp release */ if (NLP_CHK_FREE_REQ(ndlp)) { kfree(ndlp->lat_data); mempool_free(ndlp, ndlp->phba->nlp_mem_pool); } } /* This routine bumps the reference count for a ndlp structure to ensure * that one discovery thread won't free a ndlp while another discovery thread * is using it. */ struct lpfc_nodelist * lpfc_nlp_get(struct lpfc_nodelist *ndlp) { struct lpfc_hba *phba; unsigned long flags; if (ndlp) { lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE, "node get: did:x%x flg:x%x refcnt:x%x", ndlp->nlp_DID, ndlp->nlp_flag, atomic_read(&ndlp->kref.refcount)); /* The check of ndlp usage to prevent incrementing the * ndlp reference count that is in the process of being * released. */ phba = ndlp->phba; spin_lock_irqsave(&phba->ndlp_lock, flags); if (!NLP_CHK_NODE_ACT(ndlp) || NLP_CHK_FREE_ACK(ndlp)) { spin_unlock_irqrestore(&phba->ndlp_lock, flags); lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE, "0276 lpfc_nlp_get: ndlp:x%p " "usgmap:x%x refcnt:%d\n", (void *)ndlp, ndlp->nlp_usg_map, atomic_read(&ndlp->kref.refcount)); return NULL; } else kref_get(&ndlp->kref); spin_unlock_irqrestore(&phba->ndlp_lock, flags); } return ndlp; } /* This routine decrements the reference count for a ndlp structure. If the * count goes to 0, this indicates the the associated nodelist should be * freed. Returning 1 indicates the ndlp resource has been released; on the * other hand, returning 0 indicates the ndlp resource has not been released * yet. */ int lpfc_nlp_put(struct lpfc_nodelist *ndlp) { struct lpfc_hba *phba; unsigned long flags; if (!ndlp) return 1; lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE, "node put: did:x%x flg:x%x refcnt:x%x", ndlp->nlp_DID, ndlp->nlp_flag, atomic_read(&ndlp->kref.refcount)); phba = ndlp->phba; spin_lock_irqsave(&phba->ndlp_lock, flags); /* Check the ndlp memory free acknowledge flag to avoid the * possible race condition that kref_put got invoked again * after previous one has done ndlp memory free. */ if (NLP_CHK_FREE_ACK(ndlp)) { spin_unlock_irqrestore(&phba->ndlp_lock, flags); lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE, "0274 lpfc_nlp_put: ndlp:x%p " "usgmap:x%x refcnt:%d\n", (void *)ndlp, ndlp->nlp_usg_map, atomic_read(&ndlp->kref.refcount)); return 1; } /* Check the ndlp inactivate log flag to avoid the possible * race condition that kref_put got invoked again after ndlp * is already in inactivating state. */ if (NLP_CHK_IACT_REQ(ndlp)) { spin_unlock_irqrestore(&phba->ndlp_lock, flags); lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE, "0275 lpfc_nlp_put: ndlp:x%p " "usgmap:x%x refcnt:%d\n", (void *)ndlp, ndlp->nlp_usg_map, atomic_read(&ndlp->kref.refcount)); return 1; } /* For last put, mark the ndlp usage flags to make sure no * other kref_get and kref_put on the same ndlp shall get * in between the process when the final kref_put has been * invoked on this ndlp. */ if (atomic_read(&ndlp->kref.refcount) == 1) { /* Indicate ndlp is put to inactive state. */ NLP_SET_IACT_REQ(ndlp); /* Acknowledge ndlp memory free has been seen. */ if (NLP_CHK_FREE_REQ(ndlp)) NLP_SET_FREE_ACK(ndlp); } spin_unlock_irqrestore(&phba->ndlp_lock, flags); /* Note, the kref_put returns 1 when decrementing a reference * count that was 1, it invokes the release callback function, * but it still left the reference count as 1 (not actually * performs the last decrementation). Otherwise, it actually * decrements the reference count and returns 0. */ return kref_put(&ndlp->kref, lpfc_nlp_release); } /* This routine free's the specified nodelist if it is not in use * by any other discovery thread. This routine returns 1 if the * ndlp has been freed. A return value of 0 indicates the ndlp is * not yet been released. */ int lpfc_nlp_not_used(struct lpfc_nodelist *ndlp) { lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE, "node not used: did:x%x flg:x%x refcnt:x%x", ndlp->nlp_DID, ndlp->nlp_flag, atomic_read(&ndlp->kref.refcount)); if (atomic_read(&ndlp->kref.refcount) == 1) if (lpfc_nlp_put(ndlp)) return 1; return 0; } /** * lpfc_fcf_inuse - Check if FCF can be unregistered. * @phba: Pointer to hba context object. * * This function iterate through all FC nodes associated * will all vports to check if there is any node with * fc_rports associated with it. If there is an fc_rport * associated with the node, then the node is either in * discovered state or its devloss_timer is pending. */ static int lpfc_fcf_inuse(struct lpfc_hba *phba) { struct lpfc_vport **vports; int i, ret = 0; struct lpfc_nodelist *ndlp; struct Scsi_Host *shost; vports = lpfc_create_vport_work_array(phba); /* If driver cannot allocate memory, indicate fcf is in use */ if (!vports) return 1; for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { shost = lpfc_shost_from_vport(vports[i]); spin_lock_irq(shost->host_lock); /* * IF the CVL_RCVD bit is not set then we have sent the * flogi. * If dev_loss fires while we are waiting we do not want to * unreg the fcf. */ if (!(vports[i]->fc_flag & FC_VPORT_CVL_RCVD)) { spin_unlock_irq(shost->host_lock); ret = 1; goto out; } list_for_each_entry(ndlp, &vports[i]->fc_nodes, nlp_listp) { if (NLP_CHK_NODE_ACT(ndlp) && ndlp->rport && (ndlp->rport->roles & FC_RPORT_ROLE_FCP_TARGET)) { ret = 1; spin_unlock_irq(shost->host_lock); goto out; } else if (ndlp->nlp_flag & NLP_RPI_REGISTERED) { ret = 1; lpfc_printf_log(phba, KERN_INFO, LOG_ELS, "2624 RPI %x DID %x flag %x " "still logged in\n", ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag); } } spin_unlock_irq(shost->host_lock); } out: lpfc_destroy_vport_work_array(phba, vports); return ret; } /** * lpfc_unregister_vfi_cmpl - Completion handler for unreg vfi. * @phba: Pointer to hba context object. * @mboxq: Pointer to mailbox object. * * This function frees memory associated with the mailbox command. */ void lpfc_unregister_vfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) { struct lpfc_vport *vport = mboxq->vport; struct Scsi_Host *shost = lpfc_shost_from_vport(vport); if (mboxq->u.mb.mbxStatus) { lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX, "2555 UNREG_VFI mbxStatus error x%x " "HBA state x%x\n", mboxq->u.mb.mbxStatus, vport->port_state); } spin_lock_irq(shost->host_lock); phba->pport->fc_flag &= ~FC_VFI_REGISTERED; spin_unlock_irq(shost->host_lock); mempool_free(mboxq, phba->mbox_mem_pool); return; } /** * lpfc_unregister_fcfi_cmpl - Completion handler for unreg fcfi. * @phba: Pointer to hba context object. * @mboxq: Pointer to mailbox object. * * This function frees memory associated with the mailbox command. */ static void lpfc_unregister_fcfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) { struct lpfc_vport *vport = mboxq->vport; if (mboxq->u.mb.mbxStatus) { lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX, "2550 UNREG_FCFI mbxStatus error x%x " "HBA state x%x\n", mboxq->u.mb.mbxStatus, vport->port_state); } mempool_free(mboxq, phba->mbox_mem_pool); return; } /** * lpfc_unregister_fcf_prep - Unregister fcf record preparation * @phba: Pointer to hba context object. * * This function prepare the HBA for unregistering the currently registered * FCF from the HBA. It performs unregistering, in order, RPIs, VPIs, and * VFIs. */ int lpfc_unregister_fcf_prep(struct lpfc_hba *phba) { struct lpfc_vport **vports; struct lpfc_nodelist *ndlp; struct Scsi_Host *shost; int i, rc; /* Unregister RPIs */ if (lpfc_fcf_inuse(phba)) lpfc_unreg_hba_rpis(phba); /* At this point, all discovery is aborted */ phba->pport->port_state = LPFC_VPORT_UNKNOWN; /* Unregister VPIs */ vports = lpfc_create_vport_work_array(phba); if (vports && (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)) for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { /* Stop FLOGI/FDISC retries */ ndlp = lpfc_findnode_did(vports[i], Fabric_DID); if (ndlp) lpfc_cancel_retry_delay_tmo(vports[i], ndlp); lpfc_cleanup_pending_mbox(vports[i]); if (phba->sli_rev == LPFC_SLI_REV4) lpfc_sli4_unreg_all_rpis(vports[i]); lpfc_mbx_unreg_vpi(vports[i]); shost = lpfc_shost_from_vport(vports[i]); spin_lock_irq(shost->host_lock); vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED; spin_unlock_irq(shost->host_lock); } lpfc_destroy_vport_work_array(phba, vports); /* Cleanup any outstanding ELS commands */ lpfc_els_flush_all_cmd(phba); /* Unregister the physical port VFI */ rc = lpfc_issue_unreg_vfi(phba->pport); return rc; } /** * lpfc_sli4_unregister_fcf - Unregister currently registered FCF record * @phba: Pointer to hba context object. * * This function issues synchronous unregister FCF mailbox command to HBA to * unregister the currently registered FCF record. The driver does not reset * the driver FCF usage state flags. * * Return 0 if successfully issued, none-zero otherwise. */ int lpfc_sli4_unregister_fcf(struct lpfc_hba *phba) { LPFC_MBOXQ_t *mbox; int rc; mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!mbox) { lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX, "2551 UNREG_FCFI mbox allocation failed" "HBA state x%x\n", phba->pport->port_state); return -ENOMEM; } lpfc_unreg_fcfi(mbox, phba->fcf.fcfi); mbox->vport = phba->pport; mbox->mbox_cmpl = lpfc_unregister_fcfi_cmpl; rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); if (rc == MBX_NOT_FINISHED) { lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "2552 Unregister FCFI command failed rc x%x " "HBA state x%x\n", rc, phba->pport->port_state); return -EINVAL; } return 0; } /** * lpfc_unregister_fcf_rescan - Unregister currently registered fcf and rescan * @phba: Pointer to hba context object. * * This function unregisters the currently reigstered FCF. This function * also tries to find another FCF for discovery by rescan the HBA FCF table. */ void lpfc_unregister_fcf_rescan(struct lpfc_hba *phba) { int rc; /* Preparation for unregistering fcf */ rc = lpfc_unregister_fcf_prep(phba); if (rc) { lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, "2748 Failed to prepare for unregistering " "HBA's FCF record: rc=%d\n", rc); return; } /* Now, unregister FCF record and reset HBA FCF state */ rc = lpfc_sli4_unregister_fcf(phba); if (rc) return; /* Reset HBA FCF states after successful unregister FCF */ phba->fcf.fcf_flag = 0; phba->fcf.current_rec.flag = 0; /* * If driver is not unloading, check if there is any other * FCF record that can be used for discovery. */ if ((phba->pport->load_flag & FC_UNLOADING) || (phba->link_state < LPFC_LINK_UP)) return; /* This is considered as the initial FCF discovery scan */ spin_lock_irq(&phba->hbalock); phba->fcf.fcf_flag |= FCF_INIT_DISC; spin_unlock_irq(&phba->hbalock); /* Reset FCF roundrobin bmask for new discovery */ lpfc_sli4_clear_fcf_rr_bmask(phba); rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST); if (rc) { spin_lock_irq(&phba->hbalock); phba->fcf.fcf_flag &= ~FCF_INIT_DISC; spin_unlock_irq(&phba->hbalock); lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX, "2553 lpfc_unregister_unused_fcf failed " "to read FCF record HBA state x%x\n", phba->pport->port_state); } } /** * lpfc_unregister_fcf - Unregister the currently registered fcf record * @phba: Pointer to hba context object. * * This function just unregisters the currently reigstered FCF. It does not * try to find another FCF for discovery. */ void lpfc_unregister_fcf(struct lpfc_hba *phba) { int rc; /* Preparation for unregistering fcf */ rc = lpfc_unregister_fcf_prep(phba); if (rc) { lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, "2749 Failed to prepare for unregistering " "HBA's FCF record: rc=%d\n", rc); return; } /* Now, unregister FCF record and reset HBA FCF state */ rc = lpfc_sli4_unregister_fcf(phba); if (rc) return; /* Set proper HBA FCF states after successful unregister FCF */ spin_lock_irq(&phba->hbalock); phba->fcf.fcf_flag &= ~FCF_REGISTERED; spin_unlock_irq(&phba->hbalock); } /** * lpfc_unregister_unused_fcf - Unregister FCF if all devices are disconnected. * @phba: Pointer to hba context object. * * This function check if there are any connected remote port for the FCF and * if all the devices are disconnected, this function unregister FCFI. * This function also tries to use another FCF for discovery. */ void lpfc_unregister_unused_fcf(struct lpfc_hba *phba) { /* * If HBA is not running in FIP mode, if HBA does not support * FCoE, if FCF discovery is ongoing, or if FCF has not been * registered, do nothing. */ spin_lock_irq(&phba->hbalock); if (!(phba->hba_flag & HBA_FCOE_MODE) || !(phba->fcf.fcf_flag & FCF_REGISTERED) || !(phba->hba_flag & HBA_FIP_SUPPORT) || (phba->fcf.fcf_flag & FCF_DISCOVERY) || (phba->pport->port_state == LPFC_FLOGI)) { spin_unlock_irq(&phba->hbalock); return; } spin_unlock_irq(&phba->hbalock); if (lpfc_fcf_inuse(phba)) return; lpfc_unregister_fcf_rescan(phba); } /** * lpfc_read_fcf_conn_tbl - Create driver FCF connection table. * @phba: Pointer to hba context object. * @buff: Buffer containing the FCF connection table as in the config * region. * This function create driver data structure for the FCF connection * record table read from config region 23. */ static void lpfc_read_fcf_conn_tbl(struct lpfc_hba *phba, uint8_t *buff) { struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry; struct lpfc_fcf_conn_hdr *conn_hdr; struct lpfc_fcf_conn_rec *conn_rec; uint32_t record_count; int i; /* Free the current connect table */ list_for_each_entry_safe(conn_entry, next_conn_entry, &phba->fcf_conn_rec_list, list) { list_del_init(&conn_entry->list); kfree(conn_entry); } conn_hdr = (struct lpfc_fcf_conn_hdr *) buff; record_count = conn_hdr->length * sizeof(uint32_t)/ sizeof(struct lpfc_fcf_conn_rec); conn_rec = (struct lpfc_fcf_conn_rec *) (buff + sizeof(struct lpfc_fcf_conn_hdr)); for (i = 0; i < record_count; i++) { if (!(conn_rec[i].flags & FCFCNCT_VALID)) continue; conn_entry = kzalloc(sizeof(struct lpfc_fcf_conn_entry), GFP_KERNEL); if (!conn_entry) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "2566 Failed to allocate connection" " table entry\n"); return; } memcpy(&conn_entry->conn_rec, &conn_rec[i], sizeof(struct lpfc_fcf_conn_rec)); conn_entry->conn_rec.vlan_tag = le16_to_cpu(conn_entry->conn_rec.vlan_tag) & 0xFFF; conn_entry->conn_rec.flags = le16_to_cpu(conn_entry->conn_rec.flags); list_add_tail(&conn_entry->list, &phba->fcf_conn_rec_list); } } /** * lpfc_read_fcoe_param - Read FCoe parameters from conf region.. * @phba: Pointer to hba context object. * @buff: Buffer containing the FCoE parameter data structure. * * This function update driver data structure with config * parameters read from config region 23. */ static void lpfc_read_fcoe_param(struct lpfc_hba *phba, uint8_t *buff) { struct lpfc_fip_param_hdr *fcoe_param_hdr; struct lpfc_fcoe_params *fcoe_param; fcoe_param_hdr = (struct lpfc_fip_param_hdr *) buff; fcoe_param = (struct lpfc_fcoe_params *) (buff + sizeof(struct lpfc_fip_param_hdr)); if ((fcoe_param_hdr->parm_version != FIPP_VERSION) || (fcoe_param_hdr->length != FCOE_PARAM_LENGTH)) return; if (fcoe_param_hdr->parm_flags & FIPP_VLAN_VALID) { phba->valid_vlan = 1; phba->vlan_id = le16_to_cpu(fcoe_param->vlan_tag) & 0xFFF; } phba->fc_map[0] = fcoe_param->fc_map[0]; phba->fc_map[1] = fcoe_param->fc_map[1]; phba->fc_map[2] = fcoe_param->fc_map[2]; return; } /** * lpfc_get_rec_conf23 - Get a record type in config region data. * @buff: Buffer containing config region 23 data. * @size: Size of the data buffer. * @rec_type: Record type to be searched. * * This function searches config region data to find the beginning * of the record specified by record_type. If record found, this * function return pointer to the record else return NULL. */ static uint8_t * lpfc_get_rec_conf23(uint8_t *buff, uint32_t size, uint8_t rec_type) { uint32_t offset = 0, rec_length; if ((buff[0] == LPFC_REGION23_LAST_REC) || (size < sizeof(uint32_t))) return NULL; rec_length = buff[offset + 1]; /* * One TLV record has one word header and number of data words * specified in the rec_length field of the record header. */ while ((offset + rec_length * sizeof(uint32_t) + sizeof(uint32_t)) <= size) { if (buff[offset] == rec_type) return &buff[offset]; if (buff[offset] == LPFC_REGION23_LAST_REC) return NULL; offset += rec_length * sizeof(uint32_t) + sizeof(uint32_t); rec_length = buff[offset + 1]; } return NULL; } /** * lpfc_parse_fcoe_conf - Parse FCoE config data read from config region 23. * @phba: Pointer to lpfc_hba data structure. * @buff: Buffer containing config region 23 data. * @size: Size of the data buffer. * * This function parses the FCoE config parameters in config region 23 and * populate driver data structure with the parameters. */ void lpfc_parse_fcoe_conf(struct lpfc_hba *phba, uint8_t *buff, uint32_t size) { uint32_t offset = 0, rec_length; uint8_t *rec_ptr; /* * If data size is less than 2 words signature and version cannot be * verified. */ if (size < 2*sizeof(uint32_t)) return; /* Check the region signature first */ if (memcmp(buff, LPFC_REGION23_SIGNATURE, 4)) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "2567 Config region 23 has bad signature\n"); return; } offset += 4; /* Check the data structure version */ if (buff[offset] != LPFC_REGION23_VERSION) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "2568 Config region 23 has bad version\n"); return; } offset += 4; rec_length = buff[offset + 1]; /* Read FCoE param record */ rec_ptr = lpfc_get_rec_conf23(&buff[offset], size - offset, FCOE_PARAM_TYPE); if (rec_ptr) lpfc_read_fcoe_param(phba, rec_ptr); /* Read FCF connection table */ rec_ptr = lpfc_get_rec_conf23(&buff[offset], size - offset, FCOE_CONN_TBL_TYPE); if (rec_ptr) lpfc_read_fcf_conn_tbl(phba, rec_ptr); }
gpl-2.0
DarkPoe/AK-OnePone
drivers/input/touchscreen/ucb1400_ts.c
5040
12578
/* * Philips UCB1400 touchscreen driver * * Author: Nicolas Pitre * Created: September 25, 2006 * Copyright: MontaVista Software, Inc. * * Spliting done by: Marek Vasut <marek.vasut@gmail.com> * If something doesn't work and it worked before spliting, e-mail me, * dont bother Nicolas please ;-) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This code is heavily based on ucb1x00-*.c copyrighted by Russell King * covering the UCB1100, UCB1200 and UCB1300.. Support for the UCB1400 has * been made separate from ucb1x00-core/ucb1x00-ts on Russell's request. */ #include <linux/module.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/sched.h> #include <linux/wait.h> #include <linux/input.h> #include <linux/device.h> #include <linux/interrupt.h> #include <linux/ucb1400.h> #define UCB1400_TS_POLL_PERIOD 10 /* ms */ static bool adcsync; static int ts_delay = 55; /* us */ static int ts_delay_pressure; /* us */ /* Switch to interrupt mode. */ static void ucb1400_ts_mode_int(struct ucb1400_ts *ucb) { ucb1400_reg_write(ucb->ac97, UCB_TS_CR, UCB_TS_CR_TSMX_POW | UCB_TS_CR_TSPX_POW | UCB_TS_CR_TSMY_GND | UCB_TS_CR_TSPY_GND | UCB_TS_CR_MODE_INT); } /* * Switch to pressure mode, and read pressure. We don't need to wait * here, since both plates are being driven. */ static unsigned int ucb1400_ts_read_pressure(struct ucb1400_ts *ucb) { ucb1400_reg_write(ucb->ac97, UCB_TS_CR, UCB_TS_CR_TSMX_POW | UCB_TS_CR_TSPX_POW | UCB_TS_CR_TSMY_GND | UCB_TS_CR_TSPY_GND | UCB_TS_CR_MODE_PRES | UCB_TS_CR_BIAS_ENA); udelay(ts_delay_pressure); return ucb1400_adc_read(ucb->ac97, UCB_ADC_INP_TSPY, adcsync); } /* * Switch to X position mode and measure Y plate. We switch the plate * configuration in pressure mode, then switch to position mode. This * gives a faster response time. Even so, we need to wait about 55us * for things to stabilise. */ static unsigned int ucb1400_ts_read_xpos(struct ucb1400_ts *ucb) { ucb1400_reg_write(ucb->ac97, UCB_TS_CR, UCB_TS_CR_TSMX_GND | UCB_TS_CR_TSPX_POW | UCB_TS_CR_MODE_PRES | UCB_TS_CR_BIAS_ENA); ucb1400_reg_write(ucb->ac97, UCB_TS_CR, UCB_TS_CR_TSMX_GND | UCB_TS_CR_TSPX_POW | UCB_TS_CR_MODE_PRES | UCB_TS_CR_BIAS_ENA); ucb1400_reg_write(ucb->ac97, UCB_TS_CR, UCB_TS_CR_TSMX_GND | UCB_TS_CR_TSPX_POW | UCB_TS_CR_MODE_POS | UCB_TS_CR_BIAS_ENA); udelay(ts_delay); return ucb1400_adc_read(ucb->ac97, UCB_ADC_INP_TSPY, adcsync); } /* * Switch to Y position mode and measure X plate. We switch the plate * configuration in pressure mode, then switch to position mode. This * gives a faster response time. Even so, we need to wait about 55us * for things to stabilise. */ static int ucb1400_ts_read_ypos(struct ucb1400_ts *ucb) { ucb1400_reg_write(ucb->ac97, UCB_TS_CR, UCB_TS_CR_TSMY_GND | UCB_TS_CR_TSPY_POW | UCB_TS_CR_MODE_PRES | UCB_TS_CR_BIAS_ENA); ucb1400_reg_write(ucb->ac97, UCB_TS_CR, UCB_TS_CR_TSMY_GND | UCB_TS_CR_TSPY_POW | UCB_TS_CR_MODE_PRES | UCB_TS_CR_BIAS_ENA); ucb1400_reg_write(ucb->ac97, UCB_TS_CR, UCB_TS_CR_TSMY_GND | UCB_TS_CR_TSPY_POW | UCB_TS_CR_MODE_POS | UCB_TS_CR_BIAS_ENA); udelay(ts_delay); return ucb1400_adc_read(ucb->ac97, UCB_ADC_INP_TSPX, adcsync); } /* * Switch to X plate resistance mode. Set MX to ground, PX to * supply. Measure current. */ static unsigned int ucb1400_ts_read_xres(struct ucb1400_ts *ucb) { ucb1400_reg_write(ucb->ac97, UCB_TS_CR, UCB_TS_CR_TSMX_GND | UCB_TS_CR_TSPX_POW | UCB_TS_CR_MODE_PRES | UCB_TS_CR_BIAS_ENA); return ucb1400_adc_read(ucb->ac97, 0, adcsync); } /* * Switch to Y plate resistance mode. Set MY to ground, PY to * supply. Measure current. */ static unsigned int ucb1400_ts_read_yres(struct ucb1400_ts *ucb) { ucb1400_reg_write(ucb->ac97, UCB_TS_CR, UCB_TS_CR_TSMY_GND | UCB_TS_CR_TSPY_POW | UCB_TS_CR_MODE_PRES | UCB_TS_CR_BIAS_ENA); return ucb1400_adc_read(ucb->ac97, 0, adcsync); } static int ucb1400_ts_pen_up(struct ucb1400_ts *ucb) { unsigned short val = ucb1400_reg_read(ucb->ac97, UCB_TS_CR); return val & (UCB_TS_CR_TSPX_LOW | UCB_TS_CR_TSMX_LOW); } static void ucb1400_ts_irq_enable(struct ucb1400_ts *ucb) { ucb1400_reg_write(ucb->ac97, UCB_IE_CLEAR, UCB_IE_TSPX); ucb1400_reg_write(ucb->ac97, UCB_IE_CLEAR, 0); ucb1400_reg_write(ucb->ac97, UCB_IE_FAL, UCB_IE_TSPX); } static void ucb1400_ts_irq_disable(struct ucb1400_ts *ucb) { ucb1400_reg_write(ucb->ac97, UCB_IE_FAL, 0); } static void ucb1400_ts_report_event(struct input_dev *idev, u16 pressure, u16 x, u16 y) { input_report_abs(idev, ABS_X, x); input_report_abs(idev, ABS_Y, y); input_report_abs(idev, ABS_PRESSURE, pressure); input_report_key(idev, BTN_TOUCH, 1); input_sync(idev); } static void ucb1400_ts_event_release(struct input_dev *idev) { input_report_abs(idev, ABS_PRESSURE, 0); input_report_key(idev, BTN_TOUCH, 0); input_sync(idev); } static void ucb1400_clear_pending_irq(struct ucb1400_ts *ucb) { unsigned int isr; isr = ucb1400_reg_read(ucb->ac97, UCB_IE_STATUS); ucb1400_reg_write(ucb->ac97, UCB_IE_CLEAR, isr); ucb1400_reg_write(ucb->ac97, UCB_IE_CLEAR, 0); if (isr & UCB_IE_TSPX) ucb1400_ts_irq_disable(ucb); else dev_dbg(&ucb->ts_idev->dev, "ucb1400: unexpected IE_STATUS = %#x\n", isr); } /* * A restriction with interrupts exists when using the ucb1400, as * the codec read/write routines may sleep while waiting for codec * access completion and uses semaphores for access control to the * AC97 bus. Therefore the driver is forced to use threaded interrupt * handler. */ static irqreturn_t ucb1400_irq(int irqnr, void *devid) { struct ucb1400_ts *ucb = devid; unsigned int x, y, p; bool penup; if (unlikely(irqnr != ucb->irq)) return IRQ_NONE; ucb1400_clear_pending_irq(ucb); /* Start with a small delay before checking pendown state */ msleep(UCB1400_TS_POLL_PERIOD); while (!ucb->stopped && !(penup = ucb1400_ts_pen_up(ucb))) { ucb1400_adc_enable(ucb->ac97); x = ucb1400_ts_read_xpos(ucb); y = ucb1400_ts_read_ypos(ucb); p = ucb1400_ts_read_pressure(ucb); ucb1400_adc_disable(ucb->ac97); ucb1400_ts_report_event(ucb->ts_idev, p, x, y); wait_event_timeout(ucb->ts_wait, ucb->stopped, msecs_to_jiffies(UCB1400_TS_POLL_PERIOD)); } ucb1400_ts_event_release(ucb->ts_idev); if (!ucb->stopped) { /* Switch back to interrupt mode. */ ucb1400_ts_mode_int(ucb); ucb1400_ts_irq_enable(ucb); } return IRQ_HANDLED; } static void ucb1400_ts_stop(struct ucb1400_ts *ucb) { /* Signal IRQ thread to stop polling and disable the handler. */ ucb->stopped = true; mb(); wake_up(&ucb->ts_wait); disable_irq(ucb->irq); ucb1400_ts_irq_disable(ucb); ucb1400_reg_write(ucb->ac97, UCB_TS_CR, 0); } /* Must be called with ts->lock held */ static void ucb1400_ts_start(struct ucb1400_ts *ucb) { /* Tell IRQ thread that it may poll the device. */ ucb->stopped = false; mb(); ucb1400_ts_mode_int(ucb); ucb1400_ts_irq_enable(ucb); enable_irq(ucb->irq); } static int ucb1400_ts_open(struct input_dev *idev) { struct ucb1400_ts *ucb = input_get_drvdata(idev); ucb1400_ts_start(ucb); return 0; } static void ucb1400_ts_close(struct input_dev *idev) { struct ucb1400_ts *ucb = input_get_drvdata(idev); ucb1400_ts_stop(ucb); } #ifndef NO_IRQ #define NO_IRQ 0 #endif /* * Try to probe our interrupt, rather than relying on lots of * hard-coded machine dependencies. */ static int __devinit ucb1400_ts_detect_irq(struct ucb1400_ts *ucb, struct platform_device *pdev) { unsigned long mask, timeout; mask = probe_irq_on(); /* Enable the ADC interrupt. */ ucb1400_reg_write(ucb->ac97, UCB_IE_RIS, UCB_IE_ADC); ucb1400_reg_write(ucb->ac97, UCB_IE_FAL, UCB_IE_ADC); ucb1400_reg_write(ucb->ac97, UCB_IE_CLEAR, 0xffff); ucb1400_reg_write(ucb->ac97, UCB_IE_CLEAR, 0); /* Cause an ADC interrupt. */ ucb1400_reg_write(ucb->ac97, UCB_ADC_CR, UCB_ADC_ENA); ucb1400_reg_write(ucb->ac97, UCB_ADC_CR, UCB_ADC_ENA | UCB_ADC_START); /* Wait for the conversion to complete. */ timeout = jiffies + HZ/2; while (!(ucb1400_reg_read(ucb->ac97, UCB_ADC_DATA) & UCB_ADC_DAT_VALID)) { cpu_relax(); if (time_after(jiffies, timeout)) { dev_err(&pdev->dev, "timed out in IRQ probe\n"); probe_irq_off(mask); return -ENODEV; } } ucb1400_reg_write(ucb->ac97, UCB_ADC_CR, 0); /* Disable and clear interrupt. */ ucb1400_reg_write(ucb->ac97, UCB_IE_RIS, 0); ucb1400_reg_write(ucb->ac97, UCB_IE_FAL, 0); ucb1400_reg_write(ucb->ac97, UCB_IE_CLEAR, 0xffff); ucb1400_reg_write(ucb->ac97, UCB_IE_CLEAR, 0); /* Read triggered interrupt. */ ucb->irq = probe_irq_off(mask); if (ucb->irq < 0 || ucb->irq == NO_IRQ) return -ENODEV; return 0; } static int __devinit ucb1400_ts_probe(struct platform_device *pdev) { struct ucb1400_ts *ucb = pdev->dev.platform_data; int error, x_res, y_res; u16 fcsr; ucb->ts_idev = input_allocate_device(); if (!ucb->ts_idev) { error = -ENOMEM; goto err; } /* Only in case the IRQ line wasn't supplied, try detecting it */ if (ucb->irq < 0) { error = ucb1400_ts_detect_irq(ucb, pdev); if (error) { dev_err(&pdev->dev, "IRQ probe failed\n"); goto err_free_devs; } } dev_dbg(&pdev->dev, "found IRQ %d\n", ucb->irq); init_waitqueue_head(&ucb->ts_wait); input_set_drvdata(ucb->ts_idev, ucb); ucb->ts_idev->dev.parent = &pdev->dev; ucb->ts_idev->name = "UCB1400 touchscreen interface"; ucb->ts_idev->id.vendor = ucb1400_reg_read(ucb->ac97, AC97_VENDOR_ID1); ucb->ts_idev->id.product = ucb->id; ucb->ts_idev->open = ucb1400_ts_open; ucb->ts_idev->close = ucb1400_ts_close; ucb->ts_idev->evbit[0] = BIT_MASK(EV_ABS) | BIT_MASK(EV_KEY); ucb->ts_idev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH); /* * Enable ADC filter to prevent horrible jitter on Colibri. * This also further reduces jitter on boards where ADCSYNC * pin is connected. */ fcsr = ucb1400_reg_read(ucb->ac97, UCB_FCSR); ucb1400_reg_write(ucb->ac97, UCB_FCSR, fcsr | UCB_FCSR_AVE); ucb1400_adc_enable(ucb->ac97); x_res = ucb1400_ts_read_xres(ucb); y_res = ucb1400_ts_read_yres(ucb); ucb1400_adc_disable(ucb->ac97); dev_dbg(&pdev->dev, "x/y = %d/%d\n", x_res, y_res); input_set_abs_params(ucb->ts_idev, ABS_X, 0, x_res, 0, 0); input_set_abs_params(ucb->ts_idev, ABS_Y, 0, y_res, 0, 0); input_set_abs_params(ucb->ts_idev, ABS_PRESSURE, 0, 0, 0, 0); ucb1400_ts_stop(ucb); error = request_threaded_irq(ucb->irq, NULL, ucb1400_irq, IRQF_TRIGGER_RISING | IRQF_ONESHOT, "UCB1400", ucb); if (error) { dev_err(&pdev->dev, "unable to grab irq%d: %d\n", ucb->irq, error); goto err_free_devs; } error = input_register_device(ucb->ts_idev); if (error) goto err_free_irq; return 0; err_free_irq: free_irq(ucb->irq, ucb); err_free_devs: input_free_device(ucb->ts_idev); err: return error; } static int __devexit ucb1400_ts_remove(struct platform_device *pdev) { struct ucb1400_ts *ucb = pdev->dev.platform_data; free_irq(ucb->irq, ucb); input_unregister_device(ucb->ts_idev); return 0; } #ifdef CONFIG_PM_SLEEP static int ucb1400_ts_suspend(struct device *dev) { struct ucb1400_ts *ucb = dev->platform_data; struct input_dev *idev = ucb->ts_idev; mutex_lock(&idev->mutex); if (idev->users) ucb1400_ts_start(ucb); mutex_unlock(&idev->mutex); return 0; } static int ucb1400_ts_resume(struct device *dev) { struct ucb1400_ts *ucb = dev->platform_data; struct input_dev *idev = ucb->ts_idev; mutex_lock(&idev->mutex); if (idev->users) ucb1400_ts_stop(ucb); mutex_unlock(&idev->mutex); return 0; } #endif static SIMPLE_DEV_PM_OPS(ucb1400_ts_pm_ops, ucb1400_ts_suspend, ucb1400_ts_resume); static struct platform_driver ucb1400_ts_driver = { .probe = ucb1400_ts_probe, .remove = __devexit_p(ucb1400_ts_remove), .driver = { .name = "ucb1400_ts", .owner = THIS_MODULE, .pm = &ucb1400_ts_pm_ops, }, }; module_platform_driver(ucb1400_ts_driver); module_param(adcsync, bool, 0444); MODULE_PARM_DESC(adcsync, "Synchronize touch readings with ADCSYNC pin."); module_param(ts_delay, int, 0444); MODULE_PARM_DESC(ts_delay, "Delay between panel setup and" " position read. Default = 55us."); module_param(ts_delay_pressure, int, 0444); MODULE_PARM_DESC(ts_delay_pressure, "delay between panel setup and pressure read." " Default = 0us."); MODULE_DESCRIPTION("Philips UCB1400 touchscreen driver"); MODULE_LICENSE("GPL");
gpl-2.0
garwynn/SMN900P_MI5_Kernel
ipc/ipc_sysctl.c
5552
6188
/* * Copyright (C) 2007 * * Author: Eric Biederman <ebiederm@xmision.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation, version 2 of the * License. */ #include <linux/module.h> #include <linux/ipc.h> #include <linux/nsproxy.h> #include <linux/sysctl.h> #include <linux/uaccess.h> #include <linux/ipc_namespace.h> #include <linux/msg.h> #include "util.h" static void *get_ipc(ctl_table *table) { char *which = table->data; struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns; which = (which - (char *)&init_ipc_ns) + (char *)ipc_ns; return which; } #ifdef CONFIG_PROC_SYSCTL static int proc_ipc_dointvec(ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { struct ctl_table ipc_table; memcpy(&ipc_table, table, sizeof(ipc_table)); ipc_table.data = get_ipc(table); return proc_dointvec(&ipc_table, write, buffer, lenp, ppos); } static int proc_ipc_dointvec_minmax(ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { struct ctl_table ipc_table; memcpy(&ipc_table, table, sizeof(ipc_table)); ipc_table.data = get_ipc(table); return proc_dointvec_minmax(&ipc_table, write, buffer, lenp, ppos); } static int proc_ipc_dointvec_minmax_orphans(ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { struct ipc_namespace *ns = current->nsproxy->ipc_ns; int err = proc_ipc_dointvec_minmax(table, write, buffer, lenp, ppos); if (err < 0) return err; if (ns->shm_rmid_forced) shm_destroy_orphaned(ns); return err; } static int proc_ipc_callback_dointvec(ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { struct ctl_table ipc_table; size_t lenp_bef = *lenp; int rc; memcpy(&ipc_table, table, sizeof(ipc_table)); ipc_table.data = get_ipc(table); rc = proc_dointvec(&ipc_table, write, buffer, lenp, ppos); if (write && !rc && lenp_bef == *lenp) /* * Tunable has successfully been changed by hand. Disable its * automatic adjustment. This simply requires unregistering * the notifiers that trigger recalculation. */ unregister_ipcns_notifier(current->nsproxy->ipc_ns); return rc; } static int proc_ipc_doulongvec_minmax(ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { struct ctl_table ipc_table; memcpy(&ipc_table, table, sizeof(ipc_table)); ipc_table.data = get_ipc(table); return proc_doulongvec_minmax(&ipc_table, write, buffer, lenp, ppos); } /* * Routine that is called when the file "auto_msgmni" has successfully been * written. * Two values are allowed: * 0: unregister msgmni's callback routine from the ipc namespace notifier * chain. This means that msgmni won't be recomputed anymore upon memory * add/remove or ipc namespace creation/removal. * 1: register back the callback routine. */ static void ipc_auto_callback(int val) { if (!val) unregister_ipcns_notifier(current->nsproxy->ipc_ns); else { /* * Re-enable automatic recomputing only if not already * enabled. */ recompute_msgmni(current->nsproxy->ipc_ns); cond_register_ipcns_notifier(current->nsproxy->ipc_ns); } } static int proc_ipcauto_dointvec_minmax(ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { struct ctl_table ipc_table; size_t lenp_bef = *lenp; int oldval; int rc; memcpy(&ipc_table, table, sizeof(ipc_table)); ipc_table.data = get_ipc(table); oldval = *((int *)(ipc_table.data)); rc = proc_dointvec_minmax(&ipc_table, write, buffer, lenp, ppos); if (write && !rc && lenp_bef == *lenp) { int newval = *((int *)(ipc_table.data)); /* * The file "auto_msgmni" has correctly been set. * React by (un)registering the corresponding tunable, if the * value has changed. */ if (newval != oldval) ipc_auto_callback(newval); } return rc; } #else #define proc_ipc_doulongvec_minmax NULL #define proc_ipc_dointvec NULL #define proc_ipc_dointvec_minmax NULL #define proc_ipc_dointvec_minmax_orphans NULL #define proc_ipc_callback_dointvec NULL #define proc_ipcauto_dointvec_minmax NULL #endif static int zero; static int one = 1; static struct ctl_table ipc_kern_table[] = { { .procname = "shmmax", .data = &init_ipc_ns.shm_ctlmax, .maxlen = sizeof (init_ipc_ns.shm_ctlmax), .mode = 0644, .proc_handler = proc_ipc_doulongvec_minmax, }, { .procname = "shmall", .data = &init_ipc_ns.shm_ctlall, .maxlen = sizeof (init_ipc_ns.shm_ctlall), .mode = 0644, .proc_handler = proc_ipc_doulongvec_minmax, }, { .procname = "shmmni", .data = &init_ipc_ns.shm_ctlmni, .maxlen = sizeof (init_ipc_ns.shm_ctlmni), .mode = 0644, .proc_handler = proc_ipc_dointvec, }, { .procname = "shm_rmid_forced", .data = &init_ipc_ns.shm_rmid_forced, .maxlen = sizeof(init_ipc_ns.shm_rmid_forced), .mode = 0644, .proc_handler = proc_ipc_dointvec_minmax_orphans, .extra1 = &zero, .extra2 = &one, }, { .procname = "msgmax", .data = &init_ipc_ns.msg_ctlmax, .maxlen = sizeof (init_ipc_ns.msg_ctlmax), .mode = 0644, .proc_handler = proc_ipc_dointvec, }, { .procname = "msgmni", .data = &init_ipc_ns.msg_ctlmni, .maxlen = sizeof (init_ipc_ns.msg_ctlmni), .mode = 0644, .proc_handler = proc_ipc_callback_dointvec, }, { .procname = "msgmnb", .data = &init_ipc_ns.msg_ctlmnb, .maxlen = sizeof (init_ipc_ns.msg_ctlmnb), .mode = 0644, .proc_handler = proc_ipc_dointvec, }, { .procname = "sem", .data = &init_ipc_ns.sem_ctls, .maxlen = 4*sizeof (int), .mode = 0644, .proc_handler = proc_ipc_dointvec, }, { .procname = "auto_msgmni", .data = &init_ipc_ns.auto_msgmni, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_ipcauto_dointvec_minmax, .extra1 = &zero, .extra2 = &one, }, {} }; static struct ctl_table ipc_root_table[] = { { .procname = "kernel", .mode = 0555, .child = ipc_kern_table, }, {} }; static int __init ipc_sysctl_init(void) { register_sysctl_table(ipc_root_table); return 0; } __initcall(ipc_sysctl_init);
gpl-2.0
uberlaggydarwin/htc-bfam-caf
sound/drivers/vx/vx_pcm.c
8112
34887
/* * Driver for Digigram VX soundcards * * PCM part * * Copyright (c) 2002,2003 by Takashi Iwai <tiwai@suse.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * * STRATEGY * for playback, we send series of "chunks", which size is equal with the * IBL size, typically 126 samples. at each end of chunk, the end-of-buffer * interrupt is notified, and the interrupt handler will feed the next chunk. * * the current position is calculated from the sample count RMH. * pipe->transferred is the counter of data which has been already transferred. * if this counter reaches to the period size, snd_pcm_period_elapsed() will * be issued. * * for capture, the situation is much easier. * to get a low latency response, we'll check the capture streams at each * interrupt (capture stream has no EOB notification). if the pending * data is accumulated to the period size, snd_pcm_period_elapsed() is * called and the pointer is updated. * * the current point of read buffer is kept in pipe->hw_ptr. note that * this is in bytes. * * * TODO * - linked trigger for full-duplex mode. * - scheduled action on the stream. */ #include <linux/slab.h> #include <linux/delay.h> #include <sound/core.h> #include <sound/asoundef.h> #include <sound/pcm.h> #include <sound/vx_core.h> #include "vx_cmd.h" /* * read three pending pcm bytes via inb() */ static void vx_pcm_read_per_bytes(struct vx_core *chip, struct snd_pcm_runtime *runtime, struct vx_pipe *pipe) { int offset = pipe->hw_ptr; unsigned char *buf = (unsigned char *)(runtime->dma_area + offset); *buf++ = vx_inb(chip, RXH); if (++offset >= pipe->buffer_bytes) { offset = 0; buf = (unsigned char *)runtime->dma_area; } *buf++ = vx_inb(chip, RXM); if (++offset >= pipe->buffer_bytes) { offset = 0; buf = (unsigned char *)runtime->dma_area; } *buf++ = vx_inb(chip, RXL); if (++offset >= pipe->buffer_bytes) { offset = 0; buf = (unsigned char *)runtime->dma_area; } pipe->hw_ptr = offset; } /* * vx_set_pcx_time - convert from the PC time to the RMH status time. * @pc_time: the pointer for the PC-time to set * @dsp_time: the pointer for RMH status time array */ static void vx_set_pcx_time(struct vx_core *chip, pcx_time_t *pc_time, unsigned int *dsp_time) { dsp_time[0] = (unsigned int)((*pc_time) >> 24) & PCX_TIME_HI_MASK; dsp_time[1] = (unsigned int)(*pc_time) & MASK_DSP_WORD; } /* * vx_set_differed_time - set the differed time if specified * @rmh: the rmh record to modify * @pipe: the pipe to be checked * * if the pipe is programmed with the differed time, set the DSP time * on the rmh and changes its command length. * * returns the increase of the command length. */ static int vx_set_differed_time(struct vx_core *chip, struct vx_rmh *rmh, struct vx_pipe *pipe) { /* Update The length added to the RMH command by the timestamp */ if (! (pipe->differed_type & DC_DIFFERED_DELAY)) return 0; /* Set the T bit */ rmh->Cmd[0] |= DSP_DIFFERED_COMMAND_MASK; /* Time stamp is the 1st following parameter */ vx_set_pcx_time(chip, &pipe->pcx_time, &rmh->Cmd[1]); /* Add the flags to a notified differed command */ if (pipe->differed_type & DC_NOTIFY_DELAY) rmh->Cmd[1] |= NOTIFY_MASK_TIME_HIGH ; /* Add the flags to a multiple differed command */ if (pipe->differed_type & DC_MULTIPLE_DELAY) rmh->Cmd[1] |= MULTIPLE_MASK_TIME_HIGH; /* Add the flags to a stream-time differed command */ if (pipe->differed_type & DC_STREAM_TIME_DELAY) rmh->Cmd[1] |= STREAM_MASK_TIME_HIGH; rmh->LgCmd += 2; return 2; } /* * vx_set_stream_format - send the stream format command * @pipe: the affected pipe * @data: format bitmask */ static int vx_set_stream_format(struct vx_core *chip, struct vx_pipe *pipe, unsigned int data) { struct vx_rmh rmh; vx_init_rmh(&rmh, pipe->is_capture ? CMD_FORMAT_STREAM_IN : CMD_FORMAT_STREAM_OUT); rmh.Cmd[0] |= pipe->number << FIELD_SIZE; /* Command might be longer since we may have to add a timestamp */ vx_set_differed_time(chip, &rmh, pipe); rmh.Cmd[rmh.LgCmd] = (data & 0xFFFFFF00) >> 8; rmh.Cmd[rmh.LgCmd + 1] = (data & 0xFF) << 16 /*| (datal & 0xFFFF00) >> 8*/; rmh.LgCmd += 2; return vx_send_msg(chip, &rmh); } /* * vx_set_format - set the format of a pipe * @pipe: the affected pipe * @runtime: pcm runtime instance to be referred * * returns 0 if successful, or a negative error code. */ static int vx_set_format(struct vx_core *chip, struct vx_pipe *pipe, struct snd_pcm_runtime *runtime) { unsigned int header = HEADER_FMT_BASE; if (runtime->channels == 1) header |= HEADER_FMT_MONO; if (snd_pcm_format_little_endian(runtime->format)) header |= HEADER_FMT_INTEL; if (runtime->rate < 32000 && runtime->rate > 11025) header |= HEADER_FMT_UPTO32; else if (runtime->rate <= 11025) header |= HEADER_FMT_UPTO11; switch (snd_pcm_format_physical_width(runtime->format)) { // case 8: break; case 16: header |= HEADER_FMT_16BITS; break; case 24: header |= HEADER_FMT_24BITS; break; default : snd_BUG(); return -EINVAL; }; return vx_set_stream_format(chip, pipe, header); } /* * set / query the IBL size */ static int vx_set_ibl(struct vx_core *chip, struct vx_ibl_info *info) { int err; struct vx_rmh rmh; vx_init_rmh(&rmh, CMD_IBL); rmh.Cmd[0] |= info->size & 0x03ffff; err = vx_send_msg(chip, &rmh); if (err < 0) return err; info->size = rmh.Stat[0]; info->max_size = rmh.Stat[1]; info->min_size = rmh.Stat[2]; info->granularity = rmh.Stat[3]; snd_printdd(KERN_DEBUG "vx_set_ibl: size = %d, max = %d, min = %d, gran = %d\n", info->size, info->max_size, info->min_size, info->granularity); return 0; } /* * vx_get_pipe_state - get the state of a pipe * @pipe: the pipe to be checked * @state: the pointer for the returned state * * checks the state of a given pipe, and stores the state (1 = running, * 0 = paused) on the given pointer. * * called from trigger callback only */ static int vx_get_pipe_state(struct vx_core *chip, struct vx_pipe *pipe, int *state) { int err; struct vx_rmh rmh; vx_init_rmh(&rmh, CMD_PIPE_STATE); vx_set_pipe_cmd_params(&rmh, pipe->is_capture, pipe->number, 0); err = vx_send_msg_nolock(chip, &rmh); /* no lock needed for trigger */ if (! err) *state = (rmh.Stat[0] & (1 << pipe->number)) ? 1 : 0; return err; } /* * vx_query_hbuffer_size - query available h-buffer size in bytes * @pipe: the pipe to be checked * * return the available size on h-buffer in bytes, * or a negative error code. * * NOTE: calling this function always switches to the stream mode. * you'll need to disconnect the host to get back to the * normal mode. */ static int vx_query_hbuffer_size(struct vx_core *chip, struct vx_pipe *pipe) { int result; struct vx_rmh rmh; vx_init_rmh(&rmh, CMD_SIZE_HBUFFER); vx_set_pipe_cmd_params(&rmh, pipe->is_capture, pipe->number, 0); if (pipe->is_capture) rmh.Cmd[0] |= 0x00000001; result = vx_send_msg(chip, &rmh); if (! result) result = rmh.Stat[0] & 0xffff; return result; } /* * vx_pipe_can_start - query whether a pipe is ready for start * @pipe: the pipe to be checked * * return 1 if ready, 0 if not ready, and negative value on error. * * called from trigger callback only */ static int vx_pipe_can_start(struct vx_core *chip, struct vx_pipe *pipe) { int err; struct vx_rmh rmh; vx_init_rmh(&rmh, CMD_CAN_START_PIPE); vx_set_pipe_cmd_params(&rmh, pipe->is_capture, pipe->number, 0); rmh.Cmd[0] |= 1; err = vx_send_msg_nolock(chip, &rmh); /* no lock needed for trigger */ if (! err) { if (rmh.Stat[0]) err = 1; } return err; } /* * vx_conf_pipe - tell the pipe to stand by and wait for IRQA. * @pipe: the pipe to be configured */ static int vx_conf_pipe(struct vx_core *chip, struct vx_pipe *pipe) { struct vx_rmh rmh; vx_init_rmh(&rmh, CMD_CONF_PIPE); if (pipe->is_capture) rmh.Cmd[0] |= COMMAND_RECORD_MASK; rmh.Cmd[1] = 1 << pipe->number; return vx_send_msg_nolock(chip, &rmh); /* no lock needed for trigger */ } /* * vx_send_irqa - trigger IRQA */ static int vx_send_irqa(struct vx_core *chip) { struct vx_rmh rmh; vx_init_rmh(&rmh, CMD_SEND_IRQA); return vx_send_msg_nolock(chip, &rmh); /* no lock needed for trigger */ } #define MAX_WAIT_FOR_DSP 250 /* * vx boards do not support inter-card sync, besides * only 126 samples require to be prepared before a pipe can start */ #define CAN_START_DELAY 2 /* wait 2ms only before asking if the pipe is ready*/ #define WAIT_STATE_DELAY 2 /* wait 2ms after irqA was requested and check if the pipe state toggled*/ /* * vx_toggle_pipe - start / pause a pipe * @pipe: the pipe to be triggered * @state: start = 1, pause = 0 * * called from trigger callback only * */ static int vx_toggle_pipe(struct vx_core *chip, struct vx_pipe *pipe, int state) { int err, i, cur_state; /* Check the pipe is not already in the requested state */ if (vx_get_pipe_state(chip, pipe, &cur_state) < 0) return -EBADFD; if (state == cur_state) return 0; /* If a start is requested, ask the DSP to get prepared * and wait for a positive acknowledge (when there are * enough sound buffer for this pipe) */ if (state) { for (i = 0 ; i < MAX_WAIT_FOR_DSP; i++) { err = vx_pipe_can_start(chip, pipe); if (err > 0) break; /* Wait for a few, before asking again * to avoid flooding the DSP with our requests */ mdelay(1); } } if ((err = vx_conf_pipe(chip, pipe)) < 0) return err; if ((err = vx_send_irqa(chip)) < 0) return err; /* If it completes successfully, wait for the pipes * reaching the expected state before returning * Check one pipe only (since they are synchronous) */ for (i = 0; i < MAX_WAIT_FOR_DSP; i++) { err = vx_get_pipe_state(chip, pipe, &cur_state); if (err < 0 || cur_state == state) break; err = -EIO; mdelay(1); } return err < 0 ? -EIO : 0; } /* * vx_stop_pipe - stop a pipe * @pipe: the pipe to be stopped * * called from trigger callback only */ static int vx_stop_pipe(struct vx_core *chip, struct vx_pipe *pipe) { struct vx_rmh rmh; vx_init_rmh(&rmh, CMD_STOP_PIPE); vx_set_pipe_cmd_params(&rmh, pipe->is_capture, pipe->number, 0); return vx_send_msg_nolock(chip, &rmh); /* no lock needed for trigger */ } /* * vx_alloc_pipe - allocate a pipe and initialize the pipe instance * @capture: 0 = playback, 1 = capture operation * @audioid: the audio id to be assigned * @num_audio: number of audio channels * @pipep: the returned pipe instance * * return 0 on success, or a negative error code. */ static int vx_alloc_pipe(struct vx_core *chip, int capture, int audioid, int num_audio, struct vx_pipe **pipep) { int err; struct vx_pipe *pipe; struct vx_rmh rmh; int data_mode; *pipep = NULL; vx_init_rmh(&rmh, CMD_RES_PIPE); vx_set_pipe_cmd_params(&rmh, capture, audioid, num_audio); #if 0 // NYI if (underrun_skip_sound) rmh.Cmd[0] |= BIT_SKIP_SOUND; #endif // NYI data_mode = (chip->uer_bits & IEC958_AES0_NONAUDIO) != 0; if (! capture && data_mode) rmh.Cmd[0] |= BIT_DATA_MODE; err = vx_send_msg(chip, &rmh); if (err < 0) return err; /* initialize the pipe record */ pipe = kzalloc(sizeof(*pipe), GFP_KERNEL); if (! pipe) { /* release the pipe */ vx_init_rmh(&rmh, CMD_FREE_PIPE); vx_set_pipe_cmd_params(&rmh, capture, audioid, 0); vx_send_msg(chip, &rmh); return -ENOMEM; } /* the pipe index should be identical with the audio index */ pipe->number = audioid; pipe->is_capture = capture; pipe->channels = num_audio; pipe->differed_type = 0; pipe->pcx_time = 0; pipe->data_mode = data_mode; *pipep = pipe; return 0; } /* * vx_free_pipe - release a pipe * @pipe: pipe to be released */ static int vx_free_pipe(struct vx_core *chip, struct vx_pipe *pipe) { struct vx_rmh rmh; vx_init_rmh(&rmh, CMD_FREE_PIPE); vx_set_pipe_cmd_params(&rmh, pipe->is_capture, pipe->number, 0); vx_send_msg(chip, &rmh); kfree(pipe); return 0; } /* * vx_start_stream - start the stream * * called from trigger callback only */ static int vx_start_stream(struct vx_core *chip, struct vx_pipe *pipe) { struct vx_rmh rmh; vx_init_rmh(&rmh, CMD_START_ONE_STREAM); vx_set_stream_cmd_params(&rmh, pipe->is_capture, pipe->number); vx_set_differed_time(chip, &rmh, pipe); return vx_send_msg_nolock(chip, &rmh); /* no lock needed for trigger */ } /* * vx_stop_stream - stop the stream * * called from trigger callback only */ static int vx_stop_stream(struct vx_core *chip, struct vx_pipe *pipe) { struct vx_rmh rmh; vx_init_rmh(&rmh, CMD_STOP_STREAM); vx_set_stream_cmd_params(&rmh, pipe->is_capture, pipe->number); return vx_send_msg_nolock(chip, &rmh); /* no lock needed for trigger */ } /* * playback hw information */ static struct snd_pcm_hardware vx_pcm_playback_hw = { .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_MMAP_VALID /*|*/ /*SNDRV_PCM_INFO_RESUME*/), .formats = (/*SNDRV_PCM_FMTBIT_U8 |*/ SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_3LE), .rates = SNDRV_PCM_RATE_CONTINUOUS | SNDRV_PCM_RATE_8000_48000, .rate_min = 5000, .rate_max = 48000, .channels_min = 1, .channels_max = 2, .buffer_bytes_max = (128*1024), .period_bytes_min = 126, .period_bytes_max = (128*1024), .periods_min = 2, .periods_max = VX_MAX_PERIODS, .fifo_size = 126, }; static void vx_pcm_delayed_start(unsigned long arg); /* * vx_pcm_playback_open - open callback for playback */ static int vx_pcm_playback_open(struct snd_pcm_substream *subs) { struct snd_pcm_runtime *runtime = subs->runtime; struct vx_core *chip = snd_pcm_substream_chip(subs); struct vx_pipe *pipe = NULL; unsigned int audio; int err; if (chip->chip_status & VX_STAT_IS_STALE) return -EBUSY; audio = subs->pcm->device * 2; if (snd_BUG_ON(audio >= chip->audio_outs)) return -EINVAL; /* playback pipe may have been already allocated for monitoring */ pipe = chip->playback_pipes[audio]; if (! pipe) { /* not allocated yet */ err = vx_alloc_pipe(chip, 0, audio, 2, &pipe); /* stereo playback */ if (err < 0) return err; chip->playback_pipes[audio] = pipe; } /* open for playback */ pipe->references++; pipe->substream = subs; tasklet_init(&pipe->start_tq, vx_pcm_delayed_start, (unsigned long)subs); chip->playback_pipes[audio] = pipe; runtime->hw = vx_pcm_playback_hw; runtime->hw.period_bytes_min = chip->ibl.size; runtime->private_data = pipe; /* align to 4 bytes (otherwise will be problematic when 24bit is used) */ snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_BYTES, 4); snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_BYTES, 4); return 0; } /* * vx_pcm_playback_close - close callback for playback */ static int vx_pcm_playback_close(struct snd_pcm_substream *subs) { struct vx_core *chip = snd_pcm_substream_chip(subs); struct vx_pipe *pipe; if (! subs->runtime->private_data) return -EINVAL; pipe = subs->runtime->private_data; if (--pipe->references == 0) { chip->playback_pipes[pipe->number] = NULL; vx_free_pipe(chip, pipe); } return 0; } /* * vx_notify_end_of_buffer - send "end-of-buffer" notifier at the given pipe * @pipe: the pipe to notify * * NB: call with a certain lock. */ static int vx_notify_end_of_buffer(struct vx_core *chip, struct vx_pipe *pipe) { int err; struct vx_rmh rmh; /* use a temporary rmh here */ /* Toggle Dsp Host Interface into Message mode */ vx_send_rih_nolock(chip, IRQ_PAUSE_START_CONNECT); vx_init_rmh(&rmh, CMD_NOTIFY_END_OF_BUFFER); vx_set_stream_cmd_params(&rmh, 0, pipe->number); err = vx_send_msg_nolock(chip, &rmh); if (err < 0) return err; /* Toggle Dsp Host Interface back to sound transfer mode */ vx_send_rih_nolock(chip, IRQ_PAUSE_START_CONNECT); return 0; } /* * vx_pcm_playback_transfer_chunk - transfer a single chunk * @subs: substream * @pipe: the pipe to transfer * @size: chunk size in bytes * * transfer a single buffer chunk. EOB notificaton is added after that. * called from the interrupt handler, too. * * return 0 if ok. */ static int vx_pcm_playback_transfer_chunk(struct vx_core *chip, struct snd_pcm_runtime *runtime, struct vx_pipe *pipe, int size) { int space, err = 0; space = vx_query_hbuffer_size(chip, pipe); if (space < 0) { /* disconnect the host, SIZE_HBUF command always switches to the stream mode */ vx_send_rih(chip, IRQ_CONNECT_STREAM_NEXT); snd_printd("error hbuffer\n"); return space; } if (space < size) { vx_send_rih(chip, IRQ_CONNECT_STREAM_NEXT); snd_printd("no enough hbuffer space %d\n", space); return -EIO; /* XRUN */ } /* we don't need irqsave here, because this function * is called from either trigger callback or irq handler */ spin_lock(&chip->lock); vx_pseudo_dma_write(chip, runtime, pipe, size); err = vx_notify_end_of_buffer(chip, pipe); /* disconnect the host, SIZE_HBUF command always switches to the stream mode */ vx_send_rih_nolock(chip, IRQ_CONNECT_STREAM_NEXT); spin_unlock(&chip->lock); return err; } /* * update the position of the given pipe. * pipe->position is updated and wrapped within the buffer size. * pipe->transferred is updated, too, but the size is not wrapped, * so that the caller can check the total transferred size later * (to call snd_pcm_period_elapsed). */ static int vx_update_pipe_position(struct vx_core *chip, struct snd_pcm_runtime *runtime, struct vx_pipe *pipe) { struct vx_rmh rmh; int err, update; u64 count; vx_init_rmh(&rmh, CMD_STREAM_SAMPLE_COUNT); vx_set_pipe_cmd_params(&rmh, pipe->is_capture, pipe->number, 0); err = vx_send_msg(chip, &rmh); if (err < 0) return err; count = ((u64)(rmh.Stat[0] & 0xfffff) << 24) | (u64)rmh.Stat[1]; update = (int)(count - pipe->cur_count); pipe->cur_count = count; pipe->position += update; if (pipe->position >= (int)runtime->buffer_size) pipe->position %= runtime->buffer_size; pipe->transferred += update; return 0; } /* * transfer the pending playback buffer data to DSP * called from interrupt handler */ static void vx_pcm_playback_transfer(struct vx_core *chip, struct snd_pcm_substream *subs, struct vx_pipe *pipe, int nchunks) { int i, err; struct snd_pcm_runtime *runtime = subs->runtime; if (! pipe->prepared || (chip->chip_status & VX_STAT_IS_STALE)) return; for (i = 0; i < nchunks; i++) { if ((err = vx_pcm_playback_transfer_chunk(chip, runtime, pipe, chip->ibl.size)) < 0) return; } } /* * update the playback position and call snd_pcm_period_elapsed() if necessary * called from interrupt handler */ static void vx_pcm_playback_update(struct vx_core *chip, struct snd_pcm_substream *subs, struct vx_pipe *pipe) { int err; struct snd_pcm_runtime *runtime = subs->runtime; if (pipe->running && ! (chip->chip_status & VX_STAT_IS_STALE)) { if ((err = vx_update_pipe_position(chip, runtime, pipe)) < 0) return; if (pipe->transferred >= (int)runtime->period_size) { pipe->transferred %= runtime->period_size; snd_pcm_period_elapsed(subs); } } } /* * start the stream and pipe. * this function is called from tasklet, which is invoked by the trigger * START callback. */ static void vx_pcm_delayed_start(unsigned long arg) { struct snd_pcm_substream *subs = (struct snd_pcm_substream *)arg; struct vx_core *chip = subs->pcm->private_data; struct vx_pipe *pipe = subs->runtime->private_data; int err; /* printk( KERN_DEBUG "DDDD tasklet delayed start jiffies = %ld\n", jiffies);*/ if ((err = vx_start_stream(chip, pipe)) < 0) { snd_printk(KERN_ERR "vx: cannot start stream\n"); return; } if ((err = vx_toggle_pipe(chip, pipe, 1)) < 0) { snd_printk(KERN_ERR "vx: cannot start pipe\n"); return; } /* printk( KERN_DEBUG "dddd tasklet delayed start jiffies = %ld \n", jiffies);*/ } /* * vx_pcm_playback_trigger - trigger callback for playback */ static int vx_pcm_trigger(struct snd_pcm_substream *subs, int cmd) { struct vx_core *chip = snd_pcm_substream_chip(subs); struct vx_pipe *pipe = subs->runtime->private_data; int err; if (chip->chip_status & VX_STAT_IS_STALE) return -EBUSY; switch (cmd) { case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_RESUME: if (! pipe->is_capture) vx_pcm_playback_transfer(chip, subs, pipe, 2); /* FIXME: * we trigger the pipe using tasklet, so that the interrupts are * issued surely after the trigger is completed. */ tasklet_schedule(&pipe->start_tq); chip->pcm_running++; pipe->running = 1; break; case SNDRV_PCM_TRIGGER_STOP: case SNDRV_PCM_TRIGGER_SUSPEND: vx_toggle_pipe(chip, pipe, 0); vx_stop_pipe(chip, pipe); vx_stop_stream(chip, pipe); chip->pcm_running--; pipe->running = 0; break; case SNDRV_PCM_TRIGGER_PAUSE_PUSH: if ((err = vx_toggle_pipe(chip, pipe, 0)) < 0) return err; break; case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: if ((err = vx_toggle_pipe(chip, pipe, 1)) < 0) return err; break; default: return -EINVAL; } return 0; } /* * vx_pcm_playback_pointer - pointer callback for playback */ static snd_pcm_uframes_t vx_pcm_playback_pointer(struct snd_pcm_substream *subs) { struct snd_pcm_runtime *runtime = subs->runtime; struct vx_pipe *pipe = runtime->private_data; return pipe->position; } /* * vx_pcm_hw_params - hw_params callback for playback and capture */ static int vx_pcm_hw_params(struct snd_pcm_substream *subs, struct snd_pcm_hw_params *hw_params) { return snd_pcm_lib_alloc_vmalloc_32_buffer (subs, params_buffer_bytes(hw_params)); } /* * vx_pcm_hw_free - hw_free callback for playback and capture */ static int vx_pcm_hw_free(struct snd_pcm_substream *subs) { return snd_pcm_lib_free_vmalloc_buffer(subs); } /* * vx_pcm_prepare - prepare callback for playback and capture */ static int vx_pcm_prepare(struct snd_pcm_substream *subs) { struct vx_core *chip = snd_pcm_substream_chip(subs); struct snd_pcm_runtime *runtime = subs->runtime; struct vx_pipe *pipe = runtime->private_data; int err, data_mode; // int max_size, nchunks; if (chip->chip_status & VX_STAT_IS_STALE) return -EBUSY; data_mode = (chip->uer_bits & IEC958_AES0_NONAUDIO) != 0; if (data_mode != pipe->data_mode && ! pipe->is_capture) { /* IEC958 status (raw-mode) was changed */ /* we reopen the pipe */ struct vx_rmh rmh; snd_printdd(KERN_DEBUG "reopen the pipe with data_mode = %d\n", data_mode); vx_init_rmh(&rmh, CMD_FREE_PIPE); vx_set_pipe_cmd_params(&rmh, 0, pipe->number, 0); if ((err = vx_send_msg(chip, &rmh)) < 0) return err; vx_init_rmh(&rmh, CMD_RES_PIPE); vx_set_pipe_cmd_params(&rmh, 0, pipe->number, pipe->channels); if (data_mode) rmh.Cmd[0] |= BIT_DATA_MODE; if ((err = vx_send_msg(chip, &rmh)) < 0) return err; pipe->data_mode = data_mode; } if (chip->pcm_running && chip->freq != runtime->rate) { snd_printk(KERN_ERR "vx: cannot set different clock %d " "from the current %d\n", runtime->rate, chip->freq); return -EINVAL; } vx_set_clock(chip, runtime->rate); if ((err = vx_set_format(chip, pipe, runtime)) < 0) return err; if (vx_is_pcmcia(chip)) { pipe->align = 2; /* 16bit word */ } else { pipe->align = 4; /* 32bit word */ } pipe->buffer_bytes = frames_to_bytes(runtime, runtime->buffer_size); pipe->period_bytes = frames_to_bytes(runtime, runtime->period_size); pipe->hw_ptr = 0; /* set the timestamp */ vx_update_pipe_position(chip, runtime, pipe); /* clear again */ pipe->transferred = 0; pipe->position = 0; pipe->prepared = 1; return 0; } /* * operators for PCM playback */ static struct snd_pcm_ops vx_pcm_playback_ops = { .open = vx_pcm_playback_open, .close = vx_pcm_playback_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = vx_pcm_hw_params, .hw_free = vx_pcm_hw_free, .prepare = vx_pcm_prepare, .trigger = vx_pcm_trigger, .pointer = vx_pcm_playback_pointer, .page = snd_pcm_lib_get_vmalloc_page, .mmap = snd_pcm_lib_mmap_vmalloc, }; /* * playback hw information */ static struct snd_pcm_hardware vx_pcm_capture_hw = { .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_MMAP_VALID /*|*/ /*SNDRV_PCM_INFO_RESUME*/), .formats = (/*SNDRV_PCM_FMTBIT_U8 |*/ SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_3LE), .rates = SNDRV_PCM_RATE_CONTINUOUS | SNDRV_PCM_RATE_8000_48000, .rate_min = 5000, .rate_max = 48000, .channels_min = 1, .channels_max = 2, .buffer_bytes_max = (128*1024), .period_bytes_min = 126, .period_bytes_max = (128*1024), .periods_min = 2, .periods_max = VX_MAX_PERIODS, .fifo_size = 126, }; /* * vx_pcm_capture_open - open callback for capture */ static int vx_pcm_capture_open(struct snd_pcm_substream *subs) { struct snd_pcm_runtime *runtime = subs->runtime; struct vx_core *chip = snd_pcm_substream_chip(subs); struct vx_pipe *pipe; struct vx_pipe *pipe_out_monitoring = NULL; unsigned int audio; int err; if (chip->chip_status & VX_STAT_IS_STALE) return -EBUSY; audio = subs->pcm->device * 2; if (snd_BUG_ON(audio >= chip->audio_ins)) return -EINVAL; err = vx_alloc_pipe(chip, 1, audio, 2, &pipe); if (err < 0) return err; pipe->substream = subs; tasklet_init(&pipe->start_tq, vx_pcm_delayed_start, (unsigned long)subs); chip->capture_pipes[audio] = pipe; /* check if monitoring is needed */ if (chip->audio_monitor_active[audio]) { pipe_out_monitoring = chip->playback_pipes[audio]; if (! pipe_out_monitoring) { /* allocate a pipe */ err = vx_alloc_pipe(chip, 0, audio, 2, &pipe_out_monitoring); if (err < 0) return err; chip->playback_pipes[audio] = pipe_out_monitoring; } pipe_out_monitoring->references++; /* if an output pipe is available, it's audios still may need to be unmuted. hence we'll have to call a mixer entry point. */ vx_set_monitor_level(chip, audio, chip->audio_monitor[audio], chip->audio_monitor_active[audio]); /* assuming stereo */ vx_set_monitor_level(chip, audio+1, chip->audio_monitor[audio+1], chip->audio_monitor_active[audio+1]); } pipe->monitoring_pipe = pipe_out_monitoring; /* default value NULL */ runtime->hw = vx_pcm_capture_hw; runtime->hw.period_bytes_min = chip->ibl.size; runtime->private_data = pipe; /* align to 4 bytes (otherwise will be problematic when 24bit is used) */ snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_BYTES, 4); snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_BYTES, 4); return 0; } /* * vx_pcm_capture_close - close callback for capture */ static int vx_pcm_capture_close(struct snd_pcm_substream *subs) { struct vx_core *chip = snd_pcm_substream_chip(subs); struct vx_pipe *pipe; struct vx_pipe *pipe_out_monitoring; if (! subs->runtime->private_data) return -EINVAL; pipe = subs->runtime->private_data; chip->capture_pipes[pipe->number] = NULL; pipe_out_monitoring = pipe->monitoring_pipe; /* if an output pipe is attached to this input, check if it needs to be released. */ if (pipe_out_monitoring) { if (--pipe_out_monitoring->references == 0) { vx_free_pipe(chip, pipe_out_monitoring); chip->playback_pipes[pipe->number] = NULL; pipe->monitoring_pipe = NULL; } } vx_free_pipe(chip, pipe); return 0; } #define DMA_READ_ALIGN 6 /* hardware alignment for read */ /* * vx_pcm_capture_update - update the capture buffer */ static void vx_pcm_capture_update(struct vx_core *chip, struct snd_pcm_substream *subs, struct vx_pipe *pipe) { int size, space, count; struct snd_pcm_runtime *runtime = subs->runtime; if (! pipe->prepared || (chip->chip_status & VX_STAT_IS_STALE)) return; size = runtime->buffer_size - snd_pcm_capture_avail(runtime); if (! size) return; size = frames_to_bytes(runtime, size); space = vx_query_hbuffer_size(chip, pipe); if (space < 0) goto _error; if (size > space) size = space; size = (size / 3) * 3; /* align to 3 bytes */ if (size < DMA_READ_ALIGN) goto _error; /* keep the last 6 bytes, they will be read after disconnection */ count = size - DMA_READ_ALIGN; /* read bytes until the current pointer reaches to the aligned position * for word-transfer */ while (count > 0) { if ((pipe->hw_ptr % pipe->align) == 0) break; if (vx_wait_for_rx_full(chip) < 0) goto _error; vx_pcm_read_per_bytes(chip, runtime, pipe); count -= 3; } if (count > 0) { /* ok, let's accelerate! */ int align = pipe->align * 3; space = (count / align) * align; vx_pseudo_dma_read(chip, runtime, pipe, space); count -= space; } /* read the rest of bytes */ while (count > 0) { if (vx_wait_for_rx_full(chip) < 0) goto _error; vx_pcm_read_per_bytes(chip, runtime, pipe); count -= 3; } /* disconnect the host, SIZE_HBUF command always switches to the stream mode */ vx_send_rih_nolock(chip, IRQ_CONNECT_STREAM_NEXT); /* read the last pending 6 bytes */ count = DMA_READ_ALIGN; while (count > 0) { vx_pcm_read_per_bytes(chip, runtime, pipe); count -= 3; } /* update the position */ pipe->transferred += size; if (pipe->transferred >= pipe->period_bytes) { pipe->transferred %= pipe->period_bytes; snd_pcm_period_elapsed(subs); } return; _error: /* disconnect the host, SIZE_HBUF command always switches to the stream mode */ vx_send_rih_nolock(chip, IRQ_CONNECT_STREAM_NEXT); return; } /* * vx_pcm_capture_pointer - pointer callback for capture */ static snd_pcm_uframes_t vx_pcm_capture_pointer(struct snd_pcm_substream *subs) { struct snd_pcm_runtime *runtime = subs->runtime; struct vx_pipe *pipe = runtime->private_data; return bytes_to_frames(runtime, pipe->hw_ptr); } /* * operators for PCM capture */ static struct snd_pcm_ops vx_pcm_capture_ops = { .open = vx_pcm_capture_open, .close = vx_pcm_capture_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = vx_pcm_hw_params, .hw_free = vx_pcm_hw_free, .prepare = vx_pcm_prepare, .trigger = vx_pcm_trigger, .pointer = vx_pcm_capture_pointer, .page = snd_pcm_lib_get_vmalloc_page, .mmap = snd_pcm_lib_mmap_vmalloc, }; /* * interrupt handler for pcm streams */ void vx_pcm_update_intr(struct vx_core *chip, unsigned int events) { unsigned int i; struct vx_pipe *pipe; #define EVENT_MASK (END_OF_BUFFER_EVENTS_PENDING|ASYNC_EVENTS_PENDING) if (events & EVENT_MASK) { vx_init_rmh(&chip->irq_rmh, CMD_ASYNC); if (events & ASYNC_EVENTS_PENDING) chip->irq_rmh.Cmd[0] |= 0x00000001; /* SEL_ASYNC_EVENTS */ if (events & END_OF_BUFFER_EVENTS_PENDING) chip->irq_rmh.Cmd[0] |= 0x00000002; /* SEL_END_OF_BUF_EVENTS */ if (vx_send_msg(chip, &chip->irq_rmh) < 0) { snd_printdd(KERN_ERR "msg send error!!\n"); return; } i = 1; while (i < chip->irq_rmh.LgStat) { int p, buf, capture, eob; p = chip->irq_rmh.Stat[i] & MASK_FIRST_FIELD; capture = (chip->irq_rmh.Stat[i] & 0x400000) ? 1 : 0; eob = (chip->irq_rmh.Stat[i] & 0x800000) ? 1 : 0; i++; if (events & ASYNC_EVENTS_PENDING) i++; buf = 1; /* force to transfer */ if (events & END_OF_BUFFER_EVENTS_PENDING) { if (eob) buf = chip->irq_rmh.Stat[i]; i++; } if (capture) continue; if (snd_BUG_ON(p < 0 || p >= chip->audio_outs)) continue; pipe = chip->playback_pipes[p]; if (pipe && pipe->substream) { vx_pcm_playback_update(chip, pipe->substream, pipe); vx_pcm_playback_transfer(chip, pipe->substream, pipe, buf); } } } /* update the capture pcm pointers as frequently as possible */ for (i = 0; i < chip->audio_ins; i++) { pipe = chip->capture_pipes[i]; if (pipe && pipe->substream) vx_pcm_capture_update(chip, pipe->substream, pipe); } } /* * vx_init_audio_io - check the available audio i/o and allocate pipe arrays */ static int vx_init_audio_io(struct vx_core *chip) { struct vx_rmh rmh; int preferred; vx_init_rmh(&rmh, CMD_SUPPORTED); if (vx_send_msg(chip, &rmh) < 0) { snd_printk(KERN_ERR "vx: cannot get the supported audio data\n"); return -ENXIO; } chip->audio_outs = rmh.Stat[0] & MASK_FIRST_FIELD; chip->audio_ins = (rmh.Stat[0] >> (FIELD_SIZE*2)) & MASK_FIRST_FIELD; chip->audio_info = rmh.Stat[1]; /* allocate pipes */ chip->playback_pipes = kcalloc(chip->audio_outs, sizeof(struct vx_pipe *), GFP_KERNEL); if (!chip->playback_pipes) return -ENOMEM; chip->capture_pipes = kcalloc(chip->audio_ins, sizeof(struct vx_pipe *), GFP_KERNEL); if (!chip->capture_pipes) { kfree(chip->playback_pipes); return -ENOMEM; } preferred = chip->ibl.size; chip->ibl.size = 0; vx_set_ibl(chip, &chip->ibl); /* query the info */ if (preferred > 0) { chip->ibl.size = ((preferred + chip->ibl.granularity - 1) / chip->ibl.granularity) * chip->ibl.granularity; if (chip->ibl.size > chip->ibl.max_size) chip->ibl.size = chip->ibl.max_size; } else chip->ibl.size = chip->ibl.min_size; /* set to the minimum */ vx_set_ibl(chip, &chip->ibl); return 0; } /* * free callback for pcm */ static void snd_vx_pcm_free(struct snd_pcm *pcm) { struct vx_core *chip = pcm->private_data; chip->pcm[pcm->device] = NULL; kfree(chip->playback_pipes); chip->playback_pipes = NULL; kfree(chip->capture_pipes); chip->capture_pipes = NULL; } /* * snd_vx_pcm_new - create and initialize a pcm */ int snd_vx_pcm_new(struct vx_core *chip) { struct snd_pcm *pcm; unsigned int i; int err; if ((err = vx_init_audio_io(chip)) < 0) return err; for (i = 0; i < chip->hw->num_codecs; i++) { unsigned int outs, ins; outs = chip->audio_outs > i * 2 ? 1 : 0; ins = chip->audio_ins > i * 2 ? 1 : 0; if (! outs && ! ins) break; err = snd_pcm_new(chip->card, "VX PCM", i, outs, ins, &pcm); if (err < 0) return err; if (outs) snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &vx_pcm_playback_ops); if (ins) snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &vx_pcm_capture_ops); pcm->private_data = chip; pcm->private_free = snd_vx_pcm_free; pcm->info_flags = 0; strcpy(pcm->name, chip->card->shortname); chip->pcm[i] = pcm; } return 0; }
gpl-2.0
sirmordred/android_kernel_samsung_ray
arch/arm/mach-omap2/clkt2xxx_dpllcore.c
8112
4550
/* * DPLL + CORE_CLK composite clock functions * * Copyright (C) 2005-2008 Texas Instruments, Inc. * Copyright (C) 2004-2010 Nokia Corporation * * Contacts: * Richard Woodruff <r-woodruff2@ti.com> * Paul Walmsley * * Based on earlier work by Tuukka Tikkanen, Tony Lindgren, * Gordon McNutt and RidgeRun, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * XXX The DPLL and CORE clocks should be split into two separate clock * types. */ #undef DEBUG #include <linux/kernel.h> #include <linux/errno.h> #include <linux/clk.h> #include <linux/io.h> #include <plat/clock.h> #include <plat/sram.h> #include <plat/sdrc.h> #include "clock.h" #include "clock2xxx.h" #include "opp2xxx.h" #include "cm2xxx_3xxx.h" #include "cm-regbits-24xx.h" /* #define DOWN_VARIABLE_DPLL 1 */ /* Experimental */ /** * omap2xxx_clk_get_core_rate - return the CORE_CLK rate * @clk: pointer to the combined dpll_ck + core_ck (currently "dpll_ck") * * Returns the CORE_CLK rate. CORE_CLK can have one of three rate * sources on OMAP2xxx: the DPLL CLKOUT rate, DPLL CLKOUTX2, or 32KHz * (the latter is unusual). This currently should be called with * struct clk *dpll_ck, which is a composite clock of dpll_ck and * core_ck. */ unsigned long omap2xxx_clk_get_core_rate(struct clk *clk) { long long core_clk; u32 v; core_clk = omap2_get_dpll_rate(clk); v = omap2_cm_read_mod_reg(PLL_MOD, CM_CLKSEL2); v &= OMAP24XX_CORE_CLK_SRC_MASK; if (v == CORE_CLK_SRC_32K) core_clk = 32768; else core_clk *= v; return core_clk; } /* * Uses the current prcm set to tell if a rate is valid. * You can go slower, but not faster within a given rate set. */ static long omap2_dpllcore_round_rate(unsigned long target_rate) { u32 high, low, core_clk_src; core_clk_src = omap2_cm_read_mod_reg(PLL_MOD, CM_CLKSEL2); core_clk_src &= OMAP24XX_CORE_CLK_SRC_MASK; if (core_clk_src == CORE_CLK_SRC_DPLL) { /* DPLL clockout */ high = curr_prcm_set->dpll_speed * 2; low = curr_prcm_set->dpll_speed; } else { /* DPLL clockout x 2 */ high = curr_prcm_set->dpll_speed; low = curr_prcm_set->dpll_speed / 2; } #ifdef DOWN_VARIABLE_DPLL if (target_rate > high) return high; else return target_rate; #else if (target_rate > low) return high; else return low; #endif } unsigned long omap2_dpllcore_recalc(struct clk *clk) { return omap2xxx_clk_get_core_rate(clk); } int omap2_reprogram_dpllcore(struct clk *clk, unsigned long rate) { u32 cur_rate, low, mult, div, valid_rate, done_rate; u32 bypass = 0; struct prcm_config tmpset; const struct dpll_data *dd; cur_rate = omap2xxx_clk_get_core_rate(dclk); mult = omap2_cm_read_mod_reg(PLL_MOD, CM_CLKSEL2); mult &= OMAP24XX_CORE_CLK_SRC_MASK; if ((rate == (cur_rate / 2)) && (mult == 2)) { omap2xxx_sdrc_reprogram(CORE_CLK_SRC_DPLL, 1); } else if ((rate == (cur_rate * 2)) && (mult == 1)) { omap2xxx_sdrc_reprogram(CORE_CLK_SRC_DPLL_X2, 1); } else if (rate != cur_rate) { valid_rate = omap2_dpllcore_round_rate(rate); if (valid_rate != rate) return -EINVAL; if (mult == 1) low = curr_prcm_set->dpll_speed; else low = curr_prcm_set->dpll_speed / 2; dd = clk->dpll_data; if (!dd) return -EINVAL; tmpset.cm_clksel1_pll = __raw_readl(dd->mult_div1_reg); tmpset.cm_clksel1_pll &= ~(dd->mult_mask | dd->div1_mask); div = ((curr_prcm_set->xtal_speed / 1000000) - 1); tmpset.cm_clksel2_pll = omap2_cm_read_mod_reg(PLL_MOD, CM_CLKSEL2); tmpset.cm_clksel2_pll &= ~OMAP24XX_CORE_CLK_SRC_MASK; if (rate > low) { tmpset.cm_clksel2_pll |= CORE_CLK_SRC_DPLL_X2; mult = ((rate / 2) / 1000000); done_rate = CORE_CLK_SRC_DPLL_X2; } else { tmpset.cm_clksel2_pll |= CORE_CLK_SRC_DPLL; mult = (rate / 1000000); done_rate = CORE_CLK_SRC_DPLL; } tmpset.cm_clksel1_pll |= (div << __ffs(dd->mult_mask)); tmpset.cm_clksel1_pll |= (mult << __ffs(dd->div1_mask)); /* Worst case */ tmpset.base_sdrc_rfr = SDRC_RFR_CTRL_BYPASS; if (rate == curr_prcm_set->xtal_speed) /* If asking for 1-1 */ bypass = 1; /* For omap2xxx_sdrc_init_params() */ omap2xxx_sdrc_reprogram(CORE_CLK_SRC_DPLL_X2, 1); /* Force dll lock mode */ omap2_set_prcm(tmpset.cm_clksel1_pll, tmpset.base_sdrc_rfr, bypass); /* Errata: ret dll entry state */ omap2xxx_sdrc_init_params(omap2xxx_sdrc_dll_is_unlocked()); omap2xxx_sdrc_reprogram(done_rate, 0); } return 0; }
gpl-2.0
TeamEOS/kernel_google_msm
arch/parisc/lib/iomap.c
8880
10309
/* * iomap.c - Implement iomap interface for PA-RISC * Copyright (c) 2004 Matthew Wilcox */ #include <linux/ioport.h> #include <linux/pci.h> #include <linux/export.h> #include <asm/io.h> /* * The iomap space on 32-bit PA-RISC is intended to look like this: * 00000000-7fffffff virtual mapped IO * 80000000-8fffffff ISA/EISA port space that can't be virtually mapped * 90000000-9fffffff Dino port space * a0000000-afffffff Astro port space * b0000000-bfffffff PAT port space * c0000000-cfffffff non-swapped memory IO * f0000000-ffffffff legacy IO memory pointers * * For the moment, here's what it looks like: * 80000000-8fffffff All ISA/EISA port space * f0000000-ffffffff legacy IO memory pointers * * On 64-bit, everything is extended, so: * 8000000000000000-8fffffffffffffff All ISA/EISA port space * f000000000000000-ffffffffffffffff legacy IO memory pointers */ /* * Technically, this should be 'if (VMALLOC_START < addr < VMALLOC_END), * but that's slow and we know it'll be within the first 2GB. */ #ifdef CONFIG_64BIT #define INDIRECT_ADDR(addr) (((unsigned long)(addr) & 1UL<<63) != 0) #define ADDR_TO_REGION(addr) (((unsigned long)addr >> 60) & 7) #define IOPORT_MAP_BASE (8UL << 60) #else #define INDIRECT_ADDR(addr) (((unsigned long)(addr) & 1UL<<31) != 0) #define ADDR_TO_REGION(addr) (((unsigned long)addr >> 28) & 7) #define IOPORT_MAP_BASE (8UL << 28) #endif struct iomap_ops { unsigned int (*read8)(void __iomem *); unsigned int (*read16)(void __iomem *); unsigned int (*read16be)(void __iomem *); unsigned int (*read32)(void __iomem *); unsigned int (*read32be)(void __iomem *); void (*write8)(u8, void __iomem *); void (*write16)(u16, void __iomem *); void (*write16be)(u16, void __iomem *); void (*write32)(u32, void __iomem *); void (*write32be)(u32, void __iomem *); void (*read8r)(void __iomem *, void *, unsigned long); void (*read16r)(void __iomem *, void *, unsigned long); void (*read32r)(void __iomem *, void *, unsigned long); void (*write8r)(void __iomem *, const void *, unsigned long); void (*write16r)(void __iomem *, const void *, unsigned long); void (*write32r)(void __iomem *, const void *, unsigned long); }; /* Generic ioport ops. To be replaced later by specific dino/elroy/wax code */ #define ADDR2PORT(addr) ((unsigned long __force)(addr) & 0xffffff) static unsigned int ioport_read8(void __iomem *addr) { return inb(ADDR2PORT(addr)); } static unsigned int ioport_read16(void __iomem *addr) { return inw(ADDR2PORT(addr)); } static unsigned int ioport_read32(void __iomem *addr) { return inl(ADDR2PORT(addr)); } static void ioport_write8(u8 datum, void __iomem *addr) { outb(datum, ADDR2PORT(addr)); } static void ioport_write16(u16 datum, void __iomem *addr) { outw(datum, ADDR2PORT(addr)); } static void ioport_write32(u32 datum, void __iomem *addr) { outl(datum, ADDR2PORT(addr)); } static void ioport_read8r(void __iomem *addr, void *dst, unsigned long count) { insb(ADDR2PORT(addr), dst, count); } static void ioport_read16r(void __iomem *addr, void *dst, unsigned long count) { insw(ADDR2PORT(addr), dst, count); } static void ioport_read32r(void __iomem *addr, void *dst, unsigned long count) { insl(ADDR2PORT(addr), dst, count); } static void ioport_write8r(void __iomem *addr, const void *s, unsigned long n) { outsb(ADDR2PORT(addr), s, n); } static void ioport_write16r(void __iomem *addr, const void *s, unsigned long n) { outsw(ADDR2PORT(addr), s, n); } static void ioport_write32r(void __iomem *addr, const void *s, unsigned long n) { outsl(ADDR2PORT(addr), s, n); } static const struct iomap_ops ioport_ops = { ioport_read8, ioport_read16, ioport_read16, ioport_read32, ioport_read32, ioport_write8, ioport_write16, ioport_write16, ioport_write32, ioport_write32, ioport_read8r, ioport_read16r, ioport_read32r, ioport_write8r, ioport_write16r, ioport_write32r, }; /* Legacy I/O memory ops */ static unsigned int iomem_read8(void __iomem *addr) { return readb(addr); } static unsigned int iomem_read16(void __iomem *addr) { return readw(addr); } static unsigned int iomem_read16be(void __iomem *addr) { return __raw_readw(addr); } static unsigned int iomem_read32(void __iomem *addr) { return readl(addr); } static unsigned int iomem_read32be(void __iomem *addr) { return __raw_readl(addr); } static void iomem_write8(u8 datum, void __iomem *addr) { writeb(datum, addr); } static void iomem_write16(u16 datum, void __iomem *addr) { writew(datum, addr); } static void iomem_write16be(u16 datum, void __iomem *addr) { __raw_writew(datum, addr); } static void iomem_write32(u32 datum, void __iomem *addr) { writel(datum, addr); } static void iomem_write32be(u32 datum, void __iomem *addr) { __raw_writel(datum, addr); } static void iomem_read8r(void __iomem *addr, void *dst, unsigned long count) { while (count--) { *(u8 *)dst = __raw_readb(addr); dst++; } } static void iomem_read16r(void __iomem *addr, void *dst, unsigned long count) { while (count--) { *(u16 *)dst = __raw_readw(addr); dst += 2; } } static void iomem_read32r(void __iomem *addr, void *dst, unsigned long count) { while (count--) { *(u32 *)dst = __raw_readl(addr); dst += 4; } } static void iomem_write8r(void __iomem *addr, const void *s, unsigned long n) { while (n--) { __raw_writeb(*(u8 *)s, addr); s++; } } static void iomem_write16r(void __iomem *addr, const void *s, unsigned long n) { while (n--) { __raw_writew(*(u16 *)s, addr); s += 2; } } static void iomem_write32r(void __iomem *addr, const void *s, unsigned long n) { while (n--) { __raw_writel(*(u32 *)s, addr); s += 4; } } static const struct iomap_ops iomem_ops = { iomem_read8, iomem_read16, iomem_read16be, iomem_read32, iomem_read32be, iomem_write8, iomem_write16, iomem_write16be, iomem_write32, iomem_write32be, iomem_read8r, iomem_read16r, iomem_read32r, iomem_write8r, iomem_write16r, iomem_write32r, }; static const struct iomap_ops *iomap_ops[8] = { [0] = &ioport_ops, [7] = &iomem_ops }; unsigned int ioread8(void __iomem *addr) { if (unlikely(INDIRECT_ADDR(addr))) return iomap_ops[ADDR_TO_REGION(addr)]->read8(addr); return *((u8 *)addr); } unsigned int ioread16(void __iomem *addr) { if (unlikely(INDIRECT_ADDR(addr))) return iomap_ops[ADDR_TO_REGION(addr)]->read16(addr); return le16_to_cpup((u16 *)addr); } unsigned int ioread16be(void __iomem *addr) { if (unlikely(INDIRECT_ADDR(addr))) return iomap_ops[ADDR_TO_REGION(addr)]->read16be(addr); return *((u16 *)addr); } unsigned int ioread32(void __iomem *addr) { if (unlikely(INDIRECT_ADDR(addr))) return iomap_ops[ADDR_TO_REGION(addr)]->read32(addr); return le32_to_cpup((u32 *)addr); } unsigned int ioread32be(void __iomem *addr) { if (unlikely(INDIRECT_ADDR(addr))) return iomap_ops[ADDR_TO_REGION(addr)]->read32be(addr); return *((u32 *)addr); } void iowrite8(u8 datum, void __iomem *addr) { if (unlikely(INDIRECT_ADDR(addr))) { iomap_ops[ADDR_TO_REGION(addr)]->write8(datum, addr); } else { *((u8 *)addr) = datum; } } void iowrite16(u16 datum, void __iomem *addr) { if (unlikely(INDIRECT_ADDR(addr))) { iomap_ops[ADDR_TO_REGION(addr)]->write16(datum, addr); } else { *((u16 *)addr) = cpu_to_le16(datum); } } void iowrite16be(u16 datum, void __iomem *addr) { if (unlikely(INDIRECT_ADDR(addr))) { iomap_ops[ADDR_TO_REGION(addr)]->write16be(datum, addr); } else { *((u16 *)addr) = datum; } } void iowrite32(u32 datum, void __iomem *addr) { if (unlikely(INDIRECT_ADDR(addr))) { iomap_ops[ADDR_TO_REGION(addr)]->write32(datum, addr); } else { *((u32 *)addr) = cpu_to_le32(datum); } } void iowrite32be(u32 datum, void __iomem *addr) { if (unlikely(INDIRECT_ADDR(addr))) { iomap_ops[ADDR_TO_REGION(addr)]->write32be(datum, addr); } else { *((u32 *)addr) = datum; } } /* Repeating interfaces */ void ioread8_rep(void __iomem *addr, void *dst, unsigned long count) { if (unlikely(INDIRECT_ADDR(addr))) { iomap_ops[ADDR_TO_REGION(addr)]->read8r(addr, dst, count); } else { while (count--) { *(u8 *)dst = *(u8 *)addr; dst++; } } } void ioread16_rep(void __iomem *addr, void *dst, unsigned long count) { if (unlikely(INDIRECT_ADDR(addr))) { iomap_ops[ADDR_TO_REGION(addr)]->read16r(addr, dst, count); } else { while (count--) { *(u16 *)dst = *(u16 *)addr; dst += 2; } } } void ioread32_rep(void __iomem *addr, void *dst, unsigned long count) { if (unlikely(INDIRECT_ADDR(addr))) { iomap_ops[ADDR_TO_REGION(addr)]->read32r(addr, dst, count); } else { while (count--) { *(u32 *)dst = *(u32 *)addr; dst += 4; } } } void iowrite8_rep(void __iomem *addr, const void *src, unsigned long count) { if (unlikely(INDIRECT_ADDR(addr))) { iomap_ops[ADDR_TO_REGION(addr)]->write8r(addr, src, count); } else { while (count--) { *(u8 *)addr = *(u8 *)src; src++; } } } void iowrite16_rep(void __iomem *addr, const void *src, unsigned long count) { if (unlikely(INDIRECT_ADDR(addr))) { iomap_ops[ADDR_TO_REGION(addr)]->write16r(addr, src, count); } else { while (count--) { *(u16 *)addr = *(u16 *)src; src += 2; } } } void iowrite32_rep(void __iomem *addr, const void *src, unsigned long count) { if (unlikely(INDIRECT_ADDR(addr))) { iomap_ops[ADDR_TO_REGION(addr)]->write32r(addr, src, count); } else { while (count--) { *(u32 *)addr = *(u32 *)src; src += 4; } } } /* Mapping interfaces */ void __iomem *ioport_map(unsigned long port, unsigned int nr) { return (void __iomem *)(IOPORT_MAP_BASE | port); } void ioport_unmap(void __iomem *addr) { if (!INDIRECT_ADDR(addr)) { iounmap(addr); } } void pci_iounmap(struct pci_dev *dev, void __iomem * addr) { if (!INDIRECT_ADDR(addr)) { iounmap(addr); } } EXPORT_SYMBOL(ioread8); EXPORT_SYMBOL(ioread16); EXPORT_SYMBOL(ioread16be); EXPORT_SYMBOL(ioread32); EXPORT_SYMBOL(ioread32be); EXPORT_SYMBOL(iowrite8); EXPORT_SYMBOL(iowrite16); EXPORT_SYMBOL(iowrite16be); EXPORT_SYMBOL(iowrite32); EXPORT_SYMBOL(iowrite32be); EXPORT_SYMBOL(ioread8_rep); EXPORT_SYMBOL(ioread16_rep); EXPORT_SYMBOL(ioread32_rep); EXPORT_SYMBOL(iowrite8_rep); EXPORT_SYMBOL(iowrite16_rep); EXPORT_SYMBOL(iowrite32_rep); EXPORT_SYMBOL(ioport_map); EXPORT_SYMBOL(ioport_unmap); EXPORT_SYMBOL(pci_iounmap);
gpl-2.0
CyanogenMod/android_kernel_sony_flamingo
arch/mips/mti-malta/malta-cmdline.c
9648
1571
/* * Carsten Langgaard, carstenl@mips.com * Copyright (C) 1999,2000 MIPS Technologies, Inc. All rights reserved. * * This program is free software; you can distribute it and/or modify it * under the terms of the GNU General Public License (Version 2) as * published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. * * Kernel command line creation using the prom monitor (YAMON) argc/argv. */ #include <linux/init.h> #include <linux/string.h> #include <asm/bootinfo.h> extern int prom_argc; extern int *_prom_argv; /* * YAMON (32-bit PROM) pass arguments and environment as 32-bit pointer. * This macro take care of sign extension. */ #define prom_argv(index) ((char *)(long)_prom_argv[(index)]) char * __init prom_getcmdline(void) { return &(arcs_cmdline[0]); } void __init prom_init_cmdline(void) { char *cp; int actr; actr = 1; /* Always ignore argv[0] */ cp = &(arcs_cmdline[0]); while(actr < prom_argc) { strcpy(cp, prom_argv(actr)); cp += strlen(prom_argv(actr)); *cp++ = ' '; actr++; } if (cp != &(arcs_cmdline[0])) { /* get rid of trailing space */ --cp; *cp = '\0'; } }
gpl-2.0
omnirom/android_kernel_lge_x3
drivers/power/pmu_battery.c
12720
5424
/* * Battery class driver for Apple PMU * * Copyright © 2006 David Woodhouse <dwmw2@infradead.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/platform_device.h> #include <linux/err.h> #include <linux/power_supply.h> #include <linux/adb.h> #include <linux/pmu.h> #include <linux/slab.h> static struct pmu_battery_dev { struct power_supply bat; struct pmu_battery_info *pbi; char name[16]; int propval; } *pbats[PMU_MAX_BATTERIES]; #define to_pmu_battery_dev(x) container_of(x, struct pmu_battery_dev, bat) /********************************************************************* * Power *********************************************************************/ static int pmu_get_ac_prop(struct power_supply *psy, enum power_supply_property psp, union power_supply_propval *val) { switch (psp) { case POWER_SUPPLY_PROP_ONLINE: val->intval = (!!(pmu_power_flags & PMU_PWR_AC_PRESENT)) || (pmu_battery_count == 0); break; default: return -EINVAL; } return 0; } static enum power_supply_property pmu_ac_props[] = { POWER_SUPPLY_PROP_ONLINE, }; static struct power_supply pmu_ac = { .name = "pmu-ac", .type = POWER_SUPPLY_TYPE_MAINS, .properties = pmu_ac_props, .num_properties = ARRAY_SIZE(pmu_ac_props), .get_property = pmu_get_ac_prop, }; /********************************************************************* * Battery properties *********************************************************************/ static char *pmu_batt_types[] = { "Smart", "Comet", "Hooper", "Unknown" }; static char *pmu_bat_get_model_name(struct pmu_battery_info *pbi) { switch (pbi->flags & PMU_BATT_TYPE_MASK) { case PMU_BATT_TYPE_SMART: return pmu_batt_types[0]; case PMU_BATT_TYPE_COMET: return pmu_batt_types[1]; case PMU_BATT_TYPE_HOOPER: return pmu_batt_types[2]; default: break; } return pmu_batt_types[3]; } static int pmu_bat_get_property(struct power_supply *psy, enum power_supply_property psp, union power_supply_propval *val) { struct pmu_battery_dev *pbat = to_pmu_battery_dev(psy); struct pmu_battery_info *pbi = pbat->pbi; switch (psp) { case POWER_SUPPLY_PROP_STATUS: if (pbi->flags & PMU_BATT_CHARGING) val->intval = POWER_SUPPLY_STATUS_CHARGING; else if (pmu_power_flags & PMU_PWR_AC_PRESENT) val->intval = POWER_SUPPLY_STATUS_FULL; else val->intval = POWER_SUPPLY_STATUS_DISCHARGING; break; case POWER_SUPPLY_PROP_PRESENT: val->intval = !!(pbi->flags & PMU_BATT_PRESENT); break; case POWER_SUPPLY_PROP_MODEL_NAME: val->strval = pmu_bat_get_model_name(pbi); break; case POWER_SUPPLY_PROP_ENERGY_AVG: val->intval = pbi->charge * 1000; /* mWh -> µWh */ break; case POWER_SUPPLY_PROP_ENERGY_FULL: val->intval = pbi->max_charge * 1000; /* mWh -> µWh */ break; case POWER_SUPPLY_PROP_CURRENT_AVG: val->intval = pbi->amperage * 1000; /* mA -> µA */ break; case POWER_SUPPLY_PROP_VOLTAGE_AVG: val->intval = pbi->voltage * 1000; /* mV -> µV */ break; case POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG: val->intval = pbi->time_remaining; break; default: return -EINVAL; } return 0; } static enum power_supply_property pmu_bat_props[] = { POWER_SUPPLY_PROP_STATUS, POWER_SUPPLY_PROP_PRESENT, POWER_SUPPLY_PROP_MODEL_NAME, POWER_SUPPLY_PROP_ENERGY_AVG, POWER_SUPPLY_PROP_ENERGY_FULL, POWER_SUPPLY_PROP_CURRENT_AVG, POWER_SUPPLY_PROP_VOLTAGE_AVG, POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG, }; /********************************************************************* * Initialisation *********************************************************************/ static struct platform_device *bat_pdev; static int __init pmu_bat_init(void) { int ret; int i; bat_pdev = platform_device_register_simple("pmu-battery", 0, NULL, 0); if (IS_ERR(bat_pdev)) { ret = PTR_ERR(bat_pdev); goto pdev_register_failed; } ret = power_supply_register(&bat_pdev->dev, &pmu_ac); if (ret) goto ac_register_failed; for (i = 0; i < pmu_battery_count; i++) { struct pmu_battery_dev *pbat = kzalloc(sizeof(*pbat), GFP_KERNEL); if (!pbat) break; sprintf(pbat->name, "PMU_battery_%d", i); pbat->bat.name = pbat->name; pbat->bat.properties = pmu_bat_props; pbat->bat.num_properties = ARRAY_SIZE(pmu_bat_props); pbat->bat.get_property = pmu_bat_get_property; pbat->pbi = &pmu_batteries[i]; ret = power_supply_register(&bat_pdev->dev, &pbat->bat); if (ret) { kfree(pbat); goto battery_register_failed; } pbats[i] = pbat; } goto success; battery_register_failed: while (i--) { if (!pbats[i]) continue; power_supply_unregister(&pbats[i]->bat); kfree(pbats[i]); } power_supply_unregister(&pmu_ac); ac_register_failed: platform_device_unregister(bat_pdev); pdev_register_failed: success: return ret; } static void __exit pmu_bat_exit(void) { int i; for (i = 0; i < PMU_MAX_BATTERIES; i++) { if (!pbats[i]) continue; power_supply_unregister(&pbats[i]->bat); kfree(pbats[i]); } power_supply_unregister(&pmu_ac); platform_device_unregister(bat_pdev); } module_init(pmu_bat_init); module_exit(pmu_bat_exit); MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("PMU battery driver");
gpl-2.0
OneEducation/kernel-rk310-lollipop-firefly
lib/xz/xz_crc32.c
13232
1261
/* * CRC32 using the polynomial from IEEE-802.3 * * Authors: Lasse Collin <lasse.collin@tukaani.org> * Igor Pavlov <http://7-zip.org/> * * This file has been put into the public domain. * You can do whatever you want with this file. */ /* * This is not the fastest implementation, but it is pretty compact. * The fastest versions of xz_crc32() on modern CPUs without hardware * accelerated CRC instruction are 3-5 times as fast as this version, * but they are bigger and use more memory for the lookup table. */ #include "xz_private.h" /* * STATIC_RW_DATA is used in the pre-boot environment on some architectures. * See <linux/decompress/mm.h> for details. */ #ifndef STATIC_RW_DATA # define STATIC_RW_DATA static #endif STATIC_RW_DATA uint32_t xz_crc32_table[256]; XZ_EXTERN void xz_crc32_init(void) { const uint32_t poly = 0xEDB88320; uint32_t i; uint32_t j; uint32_t r; for (i = 0; i < 256; ++i) { r = i; for (j = 0; j < 8; ++j) r = (r >> 1) ^ (poly & ~((r & 1) - 1)); xz_crc32_table[i] = r; } return; } XZ_EXTERN uint32_t xz_crc32(const uint8_t *buf, size_t size, uint32_t crc) { crc = ~crc; while (size != 0) { crc = xz_crc32_table[*buf++ ^ (crc & 0xFF)] ^ (crc >> 8); --size; } return ~crc; }
gpl-2.0
fhasovic/LG-G2-D802-Kernel
drivers/parport/probe.c
13744
7589
/* * Parallel port device probing code * * Authors: Carsten Gross, carsten@sol.wohnheim.uni-ulm.de * Philip Blundell <philb@gnu.org> */ #include <linux/module.h> #include <linux/parport.h> #include <linux/ctype.h> #include <linux/string.h> #include <linux/slab.h> #include <asm/uaccess.h> static const struct { const char *token; const char *descr; } classes[] = { { "", "Legacy device" }, { "PRINTER", "Printer" }, { "MODEM", "Modem" }, { "NET", "Network device" }, { "HDC", "Hard disk" }, { "PCMCIA", "PCMCIA" }, { "MEDIA", "Multimedia device" }, { "FDC", "Floppy disk" }, { "PORTS", "Ports" }, { "SCANNER", "Scanner" }, { "DIGICAM", "Digital camera" }, { "", "Unknown device" }, { "", "Unspecified" }, { "SCSIADAPTER", "SCSI adapter" }, { NULL, NULL } }; static void pretty_print(struct parport *port, int device) { struct parport_device_info *info = &port->probe_info[device + 1]; printk(KERN_INFO "%s", port->name); if (device >= 0) printk (" (addr %d)", device); printk (": %s", classes[info->class].descr); if (info->class) printk(", %s %s", info->mfr, info->model); printk("\n"); } static void parse_data(struct parport *port, int device, char *str) { char *txt = kmalloc(strlen(str)+1, GFP_KERNEL); char *p = txt, *q; int guessed_class = PARPORT_CLASS_UNSPEC; struct parport_device_info *info = &port->probe_info[device + 1]; if (!txt) { printk(KERN_WARNING "%s probe: memory squeeze\n", port->name); return; } strcpy(txt, str); while (p) { char *sep; q = strchr(p, ';'); if (q) *q = 0; sep = strchr(p, ':'); if (sep) { char *u; *(sep++) = 0; /* Get rid of trailing blanks */ u = sep + strlen (sep) - 1; while (u >= p && *u == ' ') *u-- = '\0'; u = p; while (*u) { *u = toupper(*u); u++; } if (!strcmp(p, "MFG") || !strcmp(p, "MANUFACTURER")) { kfree(info->mfr); info->mfr = kstrdup(sep, GFP_KERNEL); } else if (!strcmp(p, "MDL") || !strcmp(p, "MODEL")) { kfree(info->model); info->model = kstrdup(sep, GFP_KERNEL); } else if (!strcmp(p, "CLS") || !strcmp(p, "CLASS")) { int i; kfree(info->class_name); info->class_name = kstrdup(sep, GFP_KERNEL); for (u = sep; *u; u++) *u = toupper(*u); for (i = 0; classes[i].token; i++) { if (!strcmp(classes[i].token, sep)) { info->class = i; goto rock_on; } } printk(KERN_WARNING "%s probe: warning, class '%s' not understood.\n", port->name, sep); info->class = PARPORT_CLASS_OTHER; } else if (!strcmp(p, "CMD") || !strcmp(p, "COMMAND SET")) { kfree(info->cmdset); info->cmdset = kstrdup(sep, GFP_KERNEL); /* if it speaks printer language, it's probably a printer */ if (strstr(sep, "PJL") || strstr(sep, "PCL")) guessed_class = PARPORT_CLASS_PRINTER; } else if (!strcmp(p, "DES") || !strcmp(p, "DESCRIPTION")) { kfree(info->description); info->description = kstrdup(sep, GFP_KERNEL); } } rock_on: if (q) p = q + 1; else p = NULL; } /* If the device didn't tell us its class, maybe we have managed to guess one from the things it did say. */ if (info->class == PARPORT_CLASS_UNSPEC) info->class = guessed_class; pretty_print (port, device); kfree(txt); } /* Read up to count-1 bytes of device id. Terminate buffer with * '\0'. Buffer begins with two Device ID length bytes as given by * device. */ static ssize_t parport_read_device_id (struct parport *port, char *buffer, size_t count) { unsigned char length[2]; unsigned lelen, belen; size_t idlens[4]; unsigned numidlens; unsigned current_idlen; ssize_t retval; size_t len; /* First two bytes are MSB,LSB of inclusive length. */ retval = parport_read (port, length, 2); if (retval < 0) return retval; if (retval != 2) return -EIO; if (count < 2) return 0; memcpy(buffer, length, 2); len = 2; /* Some devices wrongly send LE length, and some send it two * bytes short. Construct a sorted array of lengths to try. */ belen = (length[0] << 8) + length[1]; lelen = (length[1] << 8) + length[0]; idlens[0] = min(belen, lelen); idlens[1] = idlens[0]+2; if (belen != lelen) { int off = 2; /* Don't try lengths of 0x100 and 0x200 as 1 and 2 */ if (idlens[0] <= 2) off = 0; idlens[off] = max(belen, lelen); idlens[off+1] = idlens[off]+2; numidlens = off+2; } else { /* Some devices don't truly implement Device ID, but * just return constant nibble forever. This catches * also those cases. */ if (idlens[0] == 0 || idlens[0] > 0xFFF) { printk (KERN_DEBUG "%s: reported broken Device ID" " length of %#zX bytes\n", port->name, idlens[0]); return -EIO; } numidlens = 2; } /* Try to respect the given ID length despite all the bugs in * the ID length. Read according to shortest possible ID * first. */ for (current_idlen = 0; current_idlen < numidlens; ++current_idlen) { size_t idlen = idlens[current_idlen]; if (idlen+1 >= count) break; retval = parport_read (port, buffer+len, idlen-len); if (retval < 0) return retval; len += retval; if (port->physport->ieee1284.phase != IEEE1284_PH_HBUSY_DAVAIL) { if (belen != len) { printk (KERN_DEBUG "%s: Device ID was %zd bytes" " while device told it would be %d" " bytes\n", port->name, len, belen); } goto done; } /* This might end reading the Device ID too * soon. Hopefully the needed fields were already in * the first 256 bytes or so that we must have read so * far. */ if (buffer[len-1] == ';') { printk (KERN_DEBUG "%s: Device ID reading stopped" " before device told data not available. " "Current idlen %u of %u, len bytes %02X %02X\n", port->name, current_idlen, numidlens, length[0], length[1]); goto done; } } if (current_idlen < numidlens) { /* Buffer not large enough, read to end of buffer. */ size_t idlen, len2; if (len+1 < count) { retval = parport_read (port, buffer+len, count-len-1); if (retval < 0) return retval; len += retval; } /* Read the whole ID since some devices would not * otherwise give back the Device ID from beginning * next time when asked. */ idlen = idlens[current_idlen]; len2 = len; while(len2 < idlen && retval > 0) { char tmp[4]; retval = parport_read (port, tmp, min(sizeof tmp, idlen-len2)); if (retval < 0) return retval; len2 += retval; } } /* In addition, there are broken devices out there that don't even finish off with a semi-colon. We do not need to care about those at this time. */ done: buffer[len] = '\0'; return len; } /* Get Std 1284 Device ID. */ ssize_t parport_device_id (int devnum, char *buffer, size_t count) { ssize_t retval = -ENXIO; struct pardevice *dev = parport_open (devnum, "Device ID probe"); if (!dev) return -ENXIO; parport_claim_or_block (dev); /* Negotiate to compatibility mode, and then to device ID * mode. (This so that we start form beginning of device ID if * already in device ID mode.) */ parport_negotiate (dev->port, IEEE1284_MODE_COMPAT); retval = parport_negotiate (dev->port, IEEE1284_MODE_NIBBLE | IEEE1284_DEVICEID); if (!retval) { retval = parport_read_device_id (dev->port, buffer, count); parport_negotiate (dev->port, IEEE1284_MODE_COMPAT); if (retval > 2) parse_data (dev->port, dev->daisy, buffer+2); } parport_release (dev); parport_close (dev); return retval; }
gpl-2.0
aqua-project/Linux-Minimal-x86-Reimplementation
drivers/coresight/coresight.c
177
16311
/* Copyright (c) 2012, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/types.h> #include <linux/device.h> #include <linux/io.h> #include <linux/err.h> #include <linux/export.h> #include <linux/slab.h> #include <linux/mutex.h> #include <linux/clk.h> #include <linux/coresight.h> #include <linux/of_platform.h> #include <linux/delay.h> #include "coresight-priv.h" static DEFINE_MUTEX(coresight_mutex); static int coresight_id_match(struct device *dev, void *data) { int trace_id, i_trace_id; struct coresight_device *csdev, *i_csdev; csdev = data; i_csdev = to_coresight_device(dev); /* * No need to care about oneself and components that are not * sources or not enabled */ if (i_csdev == csdev || !i_csdev->enable || i_csdev->type != CORESIGHT_DEV_TYPE_SOURCE) return 0; /* Get the source ID for both compoment */ trace_id = source_ops(csdev)->trace_id(csdev); i_trace_id = source_ops(i_csdev)->trace_id(i_csdev); /* All you need is one */ if (trace_id == i_trace_id) return 1; return 0; } static int coresight_source_is_unique(struct coresight_device *csdev) { int trace_id = source_ops(csdev)->trace_id(csdev); /* this shouldn't happen */ if (trace_id < 0) return 0; return !bus_for_each_dev(&coresight_bustype, NULL, csdev, coresight_id_match); } static int coresight_find_link_inport(struct coresight_device *csdev) { int i; struct coresight_device *parent; struct coresight_connection *conn; parent = container_of(csdev->path_link.next, struct coresight_device, path_link); for (i = 0; i < parent->nr_outport; i++) { conn = &parent->conns[i]; if (conn->child_dev == csdev) return conn->child_port; } dev_err(&csdev->dev, "couldn't find inport, parent: %s, child: %s\n", dev_name(&parent->dev), dev_name(&csdev->dev)); return 0; } static int coresight_find_link_outport(struct coresight_device *csdev) { int i; struct coresight_device *child; struct coresight_connection *conn; child = container_of(csdev->path_link.prev, struct coresight_device, path_link); for (i = 0; i < csdev->nr_outport; i++) { conn = &csdev->conns[i]; if (conn->child_dev == child) return conn->outport; } dev_err(&csdev->dev, "couldn't find outport, parent: %s, child: %s\n", dev_name(&csdev->dev), dev_name(&child->dev)); return 0; } static int coresight_enable_sink(struct coresight_device *csdev) { int ret; if (!csdev->enable) { if (sink_ops(csdev)->enable) { ret = sink_ops(csdev)->enable(csdev); if (ret) return ret; } csdev->enable = true; } atomic_inc(csdev->refcnt); return 0; } static void coresight_disable_sink(struct coresight_device *csdev) { if (atomic_dec_return(csdev->refcnt) == 0) { if (sink_ops(csdev)->disable) { sink_ops(csdev)->disable(csdev); csdev->enable = false; } } } static int coresight_enable_link(struct coresight_device *csdev) { int ret; int link_subtype; int refport, inport, outport; inport = coresight_find_link_inport(csdev); outport = coresight_find_link_outport(csdev); link_subtype = csdev->subtype.link_subtype; if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_MERG) refport = inport; else if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_SPLIT) refport = outport; else refport = 0; if (atomic_inc_return(&csdev->refcnt[refport]) == 1) { if (link_ops(csdev)->enable) { ret = link_ops(csdev)->enable(csdev, inport, outport); if (ret) return ret; } } csdev->enable = true; return 0; } static void coresight_disable_link(struct coresight_device *csdev) { int i, nr_conns; int link_subtype; int refport, inport, outport; inport = coresight_find_link_inport(csdev); outport = coresight_find_link_outport(csdev); link_subtype = csdev->subtype.link_subtype; if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_MERG) { refport = inport; nr_conns = csdev->nr_inport; } else if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_SPLIT) { refport = outport; nr_conns = csdev->nr_outport; } else { refport = 0; nr_conns = 1; } if (atomic_dec_return(&csdev->refcnt[refport]) == 0) { if (link_ops(csdev)->disable) link_ops(csdev)->disable(csdev, inport, outport); } for (i = 0; i < nr_conns; i++) if (atomic_read(&csdev->refcnt[i]) != 0) return; csdev->enable = false; } static int coresight_enable_source(struct coresight_device *csdev) { int ret; if (!coresight_source_is_unique(csdev)) { dev_warn(&csdev->dev, "traceID %d not unique\n", source_ops(csdev)->trace_id(csdev)); return -EINVAL; } if (!csdev->enable) { if (source_ops(csdev)->enable) { ret = source_ops(csdev)->enable(csdev); if (ret) return ret; } csdev->enable = true; } atomic_inc(csdev->refcnt); return 0; } static void coresight_disable_source(struct coresight_device *csdev) { if (atomic_dec_return(csdev->refcnt) == 0) { if (source_ops(csdev)->disable) { source_ops(csdev)->disable(csdev); csdev->enable = false; } } } static int coresight_enable_path(struct list_head *path) { int ret = 0; struct coresight_device *cd; list_for_each_entry(cd, path, path_link) { if (cd == list_first_entry(path, struct coresight_device, path_link)) { ret = coresight_enable_sink(cd); } else if (list_is_last(&cd->path_link, path)) { /* * Don't enable the source just yet - this needs to * happen at the very end when all links and sink * along the path have been configured properly. */ ; } else { ret = coresight_enable_link(cd); } if (ret) goto err; } return 0; err: list_for_each_entry_continue_reverse(cd, path, path_link) { if (cd == list_first_entry(path, struct coresight_device, path_link)) { coresight_disable_sink(cd); } else if (list_is_last(&cd->path_link, path)) { ; } else { coresight_disable_link(cd); } } return ret; } static int coresight_disable_path(struct list_head *path) { struct coresight_device *cd; list_for_each_entry_reverse(cd, path, path_link) { if (cd == list_first_entry(path, struct coresight_device, path_link)) { coresight_disable_sink(cd); } else if (list_is_last(&cd->path_link, path)) { /* * The source has already been stopped, no need * to do it again here. */ ; } else { coresight_disable_link(cd); } } return 0; } static int coresight_build_paths(struct coresight_device *csdev, struct list_head *path, bool enable) { int i, ret = -EINVAL; struct coresight_connection *conn; list_add(&csdev->path_link, path); if (csdev->type == CORESIGHT_DEV_TYPE_SINK && csdev->activated) { if (enable) ret = coresight_enable_path(path); else ret = coresight_disable_path(path); } else { for (i = 0; i < csdev->nr_outport; i++) { conn = &csdev->conns[i]; if (coresight_build_paths(conn->child_dev, path, enable) == 0) ret = 0; } } if (list_first_entry(path, struct coresight_device, path_link) != csdev) dev_err(&csdev->dev, "wrong device in %s\n", __func__); list_del(&csdev->path_link); return ret; } int coresight_enable(struct coresight_device *csdev) { int ret = 0; LIST_HEAD(path); mutex_lock(&coresight_mutex); if (csdev->type != CORESIGHT_DEV_TYPE_SOURCE) { ret = -EINVAL; dev_err(&csdev->dev, "wrong device type in %s\n", __func__); goto out; } if (csdev->enable) goto out; if (coresight_build_paths(csdev, &path, true)) { dev_err(&csdev->dev, "building path(s) failed\n"); goto out; } if (coresight_enable_source(csdev)) dev_err(&csdev->dev, "source enable failed\n"); out: mutex_unlock(&coresight_mutex); return ret; } EXPORT_SYMBOL_GPL(coresight_enable); void coresight_disable(struct coresight_device *csdev) { LIST_HEAD(path); mutex_lock(&coresight_mutex); if (csdev->type != CORESIGHT_DEV_TYPE_SOURCE) { dev_err(&csdev->dev, "wrong device type in %s\n", __func__); goto out; } if (!csdev->enable) goto out; coresight_disable_source(csdev); if (coresight_build_paths(csdev, &path, false)) dev_err(&csdev->dev, "releasing path(s) failed\n"); out: mutex_unlock(&coresight_mutex); } EXPORT_SYMBOL_GPL(coresight_disable); static ssize_t enable_sink_show(struct device *dev, struct device_attribute *attr, char *buf) { struct coresight_device *csdev = to_coresight_device(dev); return scnprintf(buf, PAGE_SIZE, "%u\n", (unsigned)csdev->activated); } static ssize_t enable_sink_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { int ret; unsigned long val; struct coresight_device *csdev = to_coresight_device(dev); ret = kstrtoul(buf, 10, &val); if (ret) return ret; if (val) csdev->activated = true; else csdev->activated = false; return size; } static DEVICE_ATTR_RW(enable_sink); static ssize_t enable_source_show(struct device *dev, struct device_attribute *attr, char *buf) { struct coresight_device *csdev = to_coresight_device(dev); return scnprintf(buf, PAGE_SIZE, "%u\n", (unsigned)csdev->enable); } static ssize_t enable_source_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { int ret = 0; unsigned long val; struct coresight_device *csdev = to_coresight_device(dev); ret = kstrtoul(buf, 10, &val); if (ret) return ret; if (val) { ret = coresight_enable(csdev); if (ret) return ret; } else { coresight_disable(csdev); } return size; } static DEVICE_ATTR_RW(enable_source); static struct attribute *coresight_sink_attrs[] = { &dev_attr_enable_sink.attr, NULL, }; ATTRIBUTE_GROUPS(coresight_sink); static struct attribute *coresight_source_attrs[] = { &dev_attr_enable_source.attr, NULL, }; ATTRIBUTE_GROUPS(coresight_source); static struct device_type coresight_dev_type[] = { { .name = "none", }, { .name = "sink", .groups = coresight_sink_groups, }, { .name = "link", }, { .name = "linksink", .groups = coresight_sink_groups, }, { .name = "source", .groups = coresight_source_groups, }, }; static void coresight_device_release(struct device *dev) { struct coresight_device *csdev = to_coresight_device(dev); kfree(csdev); } static int coresight_orphan_match(struct device *dev, void *data) { int i; bool still_orphan = false; struct coresight_device *csdev, *i_csdev; struct coresight_connection *conn; csdev = data; i_csdev = to_coresight_device(dev); /* No need to check oneself */ if (csdev == i_csdev) return 0; /* Move on to another component if no connection is orphan */ if (!i_csdev->orphan) return 0; /* * Circle throuch all the connection of that component. If we find * an orphan connection whose name matches @csdev, link it. */ for (i = 0; i < i_csdev->nr_outport; i++) { conn = &i_csdev->conns[i]; /* We have found at least one orphan connection */ if (conn->child_dev == NULL) { /* Does it match this newly added device? */ if (!strcmp(dev_name(&csdev->dev), conn->child_name)) { conn->child_dev = csdev; } else { /* This component still has an orphan */ still_orphan = true; } } } i_csdev->orphan = still_orphan; /* * Returning '0' ensures that all known component on the * bus will be checked. */ return 0; } static void coresight_fixup_orphan_conns(struct coresight_device *csdev) { /* * No need to check for a return value as orphan connection(s) * are hooked-up with each newly added component. */ bus_for_each_dev(&coresight_bustype, NULL, csdev, coresight_orphan_match); } static int coresight_name_match(struct device *dev, void *data) { char *to_match; struct coresight_device *i_csdev; to_match = data; i_csdev = to_coresight_device(dev); if (!strcmp(to_match, dev_name(&i_csdev->dev))) return 1; return 0; } static void coresight_fixup_device_conns(struct coresight_device *csdev) { int i; struct device *dev = NULL; struct coresight_connection *conn; for (i = 0; i < csdev->nr_outport; i++) { conn = &csdev->conns[i]; dev = bus_find_device(&coresight_bustype, NULL, (void *)conn->child_name, coresight_name_match); if (dev) { conn->child_dev = to_coresight_device(dev); } else { csdev->orphan = true; conn->child_dev = NULL; } } } /** * coresight_timeout - loop until a bit has changed to a specific state. * @addr: base address of the area of interest. * @offset: address of a register, starting from @addr. * @position: the position of the bit of interest. * @value: the value the bit should have. * * Return: 0 as soon as the bit has taken the desired state or -EAGAIN if * TIMEOUT_US has elapsed, which ever happens first. */ int coresight_timeout(void __iomem *addr, u32 offset, int position, int value) { int i; u32 val; for (i = TIMEOUT_US; i > 0; i--) { val = __raw_readl(addr + offset); /* waiting on the bit to go from 0 to 1 */ if (value) { if (val & BIT(position)) return 0; /* waiting on the bit to go from 1 to 0 */ } else { if (!(val & BIT(position))) return 0; } /* * Delay is arbitrary - the specification doesn't say how long * we are expected to wait. Extra check required to make sure * we don't wait needlessly on the last iteration. */ if (i - 1) udelay(1); } return -EAGAIN; } struct bus_type coresight_bustype = { .name = "coresight", }; static int __init coresight_init(void) { return bus_register(&coresight_bustype); } postcore_initcall(coresight_init); struct coresight_device *coresight_register(struct coresight_desc *desc) { int i; int ret; int link_subtype; int nr_refcnts = 1; atomic_t *refcnts = NULL; struct coresight_device *csdev; struct coresight_connection *conns; csdev = kzalloc(sizeof(*csdev), GFP_KERNEL); if (!csdev) { ret = -ENOMEM; goto err_kzalloc_csdev; } if (desc->type == CORESIGHT_DEV_TYPE_LINK || desc->type == CORESIGHT_DEV_TYPE_LINKSINK) { link_subtype = desc->subtype.link_subtype; if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_MERG) nr_refcnts = desc->pdata->nr_inport; else if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_SPLIT) nr_refcnts = desc->pdata->nr_outport; } refcnts = kcalloc(nr_refcnts, sizeof(*refcnts), GFP_KERNEL); if (!refcnts) { ret = -ENOMEM; goto err_kzalloc_refcnts; } csdev->refcnt = refcnts; csdev->nr_inport = desc->pdata->nr_inport; csdev->nr_outport = desc->pdata->nr_outport; conns = kcalloc(csdev->nr_outport, sizeof(*conns), GFP_KERNEL); if (!conns) { ret = -ENOMEM; goto err_kzalloc_conns; } for (i = 0; i < csdev->nr_outport; i++) { conns[i].outport = desc->pdata->outports[i]; conns[i].child_name = desc->pdata->child_names[i]; conns[i].child_port = desc->pdata->child_ports[i]; } csdev->conns = conns; csdev->type = desc->type; csdev->subtype = desc->subtype; csdev->ops = desc->ops; csdev->orphan = false; csdev->dev.type = &coresight_dev_type[desc->type]; csdev->dev.groups = desc->groups; csdev->dev.parent = desc->dev; csdev->dev.release = coresight_device_release; csdev->dev.bus = &coresight_bustype; dev_set_name(&csdev->dev, "%s", desc->pdata->name); ret = device_register(&csdev->dev); if (ret) goto err_device_register; mutex_lock(&coresight_mutex); coresight_fixup_device_conns(csdev); coresight_fixup_orphan_conns(csdev); mutex_unlock(&coresight_mutex); return csdev; err_device_register: kfree(conns); err_kzalloc_conns: kfree(refcnts); err_kzalloc_refcnts: kfree(csdev); err_kzalloc_csdev: return ERR_PTR(ret); } EXPORT_SYMBOL_GPL(coresight_register); void coresight_unregister(struct coresight_device *csdev) { mutex_lock(&coresight_mutex); kfree(csdev->conns); device_unregister(&csdev->dev); mutex_unlock(&coresight_mutex); } EXPORT_SYMBOL_GPL(coresight_unregister); MODULE_LICENSE("GPL v2");
gpl-2.0
Brainiarc7/linux-3.18-parrot
kernel/posix-cpu-timers.c
433
39089
/* * Implement CPU time clocks for the POSIX clock interface. */ #include <linux/sched.h> #include <linux/posix-timers.h> #include <linux/errno.h> #include <linux/math64.h> #include <asm/uaccess.h> #include <linux/kernel_stat.h> #include <trace/events/timer.h> #include <linux/random.h> #include <linux/tick.h> #include <linux/workqueue.h> /* * Called after updating RLIMIT_CPU to run cpu timer and update * tsk->signal->cputime_expires expiration cache if necessary. Needs * siglock protection since other code may update expiration cache as * well. */ void update_rlimit_cpu(struct task_struct *task, unsigned long rlim_new) { cputime_t cputime = secs_to_cputime(rlim_new); spin_lock_irq(&task->sighand->siglock); set_process_cpu_timer(task, CPUCLOCK_PROF, &cputime, NULL); spin_unlock_irq(&task->sighand->siglock); } static int check_clock(const clockid_t which_clock) { int error = 0; struct task_struct *p; const pid_t pid = CPUCLOCK_PID(which_clock); if (CPUCLOCK_WHICH(which_clock) >= CPUCLOCK_MAX) return -EINVAL; if (pid == 0) return 0; rcu_read_lock(); p = find_task_by_vpid(pid); if (!p || !(CPUCLOCK_PERTHREAD(which_clock) ? same_thread_group(p, current) : has_group_leader_pid(p))) { error = -EINVAL; } rcu_read_unlock(); return error; } static inline unsigned long long timespec_to_sample(const clockid_t which_clock, const struct timespec *tp) { unsigned long long ret; ret = 0; /* high half always zero when .cpu used */ if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) { ret = (unsigned long long)tp->tv_sec * NSEC_PER_SEC + tp->tv_nsec; } else { ret = cputime_to_expires(timespec_to_cputime(tp)); } return ret; } static void sample_to_timespec(const clockid_t which_clock, unsigned long long expires, struct timespec *tp) { if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) *tp = ns_to_timespec(expires); else cputime_to_timespec((__force cputime_t)expires, tp); } /* * Update expiry time from increment, and increase overrun count, * given the current clock sample. */ static void bump_cpu_timer(struct k_itimer *timer, unsigned long long now) { int i; unsigned long long delta, incr; if (timer->it.cpu.incr == 0) return; if (now < timer->it.cpu.expires) return; incr = timer->it.cpu.incr; delta = now + incr - timer->it.cpu.expires; /* Don't use (incr*2 < delta), incr*2 might overflow. */ for (i = 0; incr < delta - incr; i++) incr = incr << 1; for (; i >= 0; incr >>= 1, i--) { if (delta < incr) continue; timer->it.cpu.expires += incr; timer->it_overrun += 1 << i; delta -= incr; } } /** * task_cputime_zero - Check a task_cputime struct for all zero fields. * * @cputime: The struct to compare. * * Checks @cputime to see if all fields are zero. Returns true if all fields * are zero, false if any field is nonzero. */ static inline int task_cputime_zero(const struct task_cputime *cputime) { if (!cputime->utime && !cputime->stime && !cputime->sum_exec_runtime) return 1; return 0; } static inline unsigned long long prof_ticks(struct task_struct *p) { cputime_t utime, stime; task_cputime(p, &utime, &stime); return cputime_to_expires(utime + stime); } static inline unsigned long long virt_ticks(struct task_struct *p) { cputime_t utime; task_cputime(p, &utime, NULL); return cputime_to_expires(utime); } static int posix_cpu_clock_getres(const clockid_t which_clock, struct timespec *tp) { int error = check_clock(which_clock); if (!error) { tp->tv_sec = 0; tp->tv_nsec = ((NSEC_PER_SEC + HZ - 1) / HZ); if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) { /* * If sched_clock is using a cycle counter, we * don't have any idea of its true resolution * exported, but it is much more than 1s/HZ. */ tp->tv_nsec = 1; } } return error; } static int posix_cpu_clock_set(const clockid_t which_clock, const struct timespec *tp) { /* * You can never reset a CPU clock, but we check for other errors * in the call before failing with EPERM. */ int error = check_clock(which_clock); if (error == 0) { error = -EPERM; } return error; } /* * Sample a per-thread clock for the given task. */ static int cpu_clock_sample(const clockid_t which_clock, struct task_struct *p, unsigned long long *sample) { switch (CPUCLOCK_WHICH(which_clock)) { default: return -EINVAL; case CPUCLOCK_PROF: *sample = prof_ticks(p); break; case CPUCLOCK_VIRT: *sample = virt_ticks(p); break; case CPUCLOCK_SCHED: *sample = task_sched_runtime(p); break; } return 0; } static void update_gt_cputime(struct task_cputime *a, struct task_cputime *b) { if (b->utime > a->utime) a->utime = b->utime; if (b->stime > a->stime) a->stime = b->stime; if (b->sum_exec_runtime > a->sum_exec_runtime) a->sum_exec_runtime = b->sum_exec_runtime; } void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times) { struct thread_group_cputimer *cputimer = &tsk->signal->cputimer; struct task_cputime sum; unsigned long flags; if (!cputimer->running) { /* * The POSIX timer interface allows for absolute time expiry * values through the TIMER_ABSTIME flag, therefore we have * to synchronize the timer to the clock every time we start * it. */ thread_group_cputime(tsk, &sum); raw_spin_lock_irqsave(&cputimer->lock, flags); cputimer->running = 1; update_gt_cputime(&cputimer->cputime, &sum); } else raw_spin_lock_irqsave(&cputimer->lock, flags); *times = cputimer->cputime; raw_spin_unlock_irqrestore(&cputimer->lock, flags); } /* * Sample a process (thread group) clock for the given group_leader task. * Must be called with task sighand lock held for safe while_each_thread() * traversal. */ static int cpu_clock_sample_group(const clockid_t which_clock, struct task_struct *p, unsigned long long *sample) { struct task_cputime cputime; switch (CPUCLOCK_WHICH(which_clock)) { default: return -EINVAL; case CPUCLOCK_PROF: thread_group_cputime(p, &cputime); *sample = cputime_to_expires(cputime.utime + cputime.stime); break; case CPUCLOCK_VIRT: thread_group_cputime(p, &cputime); *sample = cputime_to_expires(cputime.utime); break; case CPUCLOCK_SCHED: thread_group_cputime(p, &cputime); *sample = cputime.sum_exec_runtime; break; } return 0; } static int posix_cpu_clock_get_task(struct task_struct *tsk, const clockid_t which_clock, struct timespec *tp) { int err = -EINVAL; unsigned long long rtn; if (CPUCLOCK_PERTHREAD(which_clock)) { if (same_thread_group(tsk, current)) err = cpu_clock_sample(which_clock, tsk, &rtn); } else { unsigned long flags; struct sighand_struct *sighand; /* * while_each_thread() is not yet entirely RCU safe, * keep locking the group while sampling process * clock for now. */ sighand = lock_task_sighand(tsk, &flags); if (!sighand) return err; if (tsk == current || thread_group_leader(tsk)) err = cpu_clock_sample_group(which_clock, tsk, &rtn); unlock_task_sighand(tsk, &flags); } if (!err) sample_to_timespec(which_clock, rtn, tp); return err; } static int posix_cpu_clock_get(const clockid_t which_clock, struct timespec *tp) { const pid_t pid = CPUCLOCK_PID(which_clock); int err = -EINVAL; if (pid == 0) { /* * Special case constant value for our own clocks. * We don't have to do any lookup to find ourselves. */ err = posix_cpu_clock_get_task(current, which_clock, tp); } else { /* * Find the given PID, and validate that the caller * should be able to see it. */ struct task_struct *p; rcu_read_lock(); p = find_task_by_vpid(pid); if (p) err = posix_cpu_clock_get_task(p, which_clock, tp); rcu_read_unlock(); } return err; } /* * Validate the clockid_t for a new CPU-clock timer, and initialize the timer. * This is called from sys_timer_create() and do_cpu_nanosleep() with the * new timer already all-zeros initialized. */ static int posix_cpu_timer_create(struct k_itimer *new_timer) { int ret = 0; const pid_t pid = CPUCLOCK_PID(new_timer->it_clock); struct task_struct *p; if (CPUCLOCK_WHICH(new_timer->it_clock) >= CPUCLOCK_MAX) return -EINVAL; INIT_LIST_HEAD(&new_timer->it.cpu.entry); rcu_read_lock(); if (CPUCLOCK_PERTHREAD(new_timer->it_clock)) { if (pid == 0) { p = current; } else { p = find_task_by_vpid(pid); if (p && !same_thread_group(p, current)) p = NULL; } } else { if (pid == 0) { p = current->group_leader; } else { p = find_task_by_vpid(pid); if (p && !has_group_leader_pid(p)) p = NULL; } } new_timer->it.cpu.task = p; if (p) { get_task_struct(p); } else { ret = -EINVAL; } rcu_read_unlock(); return ret; } /* * Clean up a CPU-clock timer that is about to be destroyed. * This is called from timer deletion with the timer already locked. * If we return TIMER_RETRY, it's necessary to release the timer's lock * and try again. (This happens when the timer is in the middle of firing.) */ static int posix_cpu_timer_del(struct k_itimer *timer) { int ret = 0; unsigned long flags; struct sighand_struct *sighand; struct task_struct *p = timer->it.cpu.task; WARN_ON_ONCE(p == NULL); /* * Protect against sighand release/switch in exit/exec and process/ * thread timer list entry concurrent read/writes. */ sighand = lock_task_sighand(p, &flags); if (unlikely(sighand == NULL)) { /* * We raced with the reaping of the task. * The deletion should have cleared us off the list. */ WARN_ON_ONCE(!list_empty(&timer->it.cpu.entry)); } else { if (timer->it.cpu.firing) ret = TIMER_RETRY; else list_del(&timer->it.cpu.entry); unlock_task_sighand(p, &flags); } if (!ret) put_task_struct(p); return ret; } static void cleanup_timers_list(struct list_head *head) { struct cpu_timer_list *timer, *next; list_for_each_entry_safe(timer, next, head, entry) list_del_init(&timer->entry); } /* * Clean out CPU timers still ticking when a thread exited. The task * pointer is cleared, and the expiry time is replaced with the residual * time for later timer_gettime calls to return. * This must be called with the siglock held. */ static void cleanup_timers(struct list_head *head) { cleanup_timers_list(head); cleanup_timers_list(++head); cleanup_timers_list(++head); } /* * These are both called with the siglock held, when the current thread * is being reaped. When the final (leader) thread in the group is reaped, * posix_cpu_timers_exit_group will be called after posix_cpu_timers_exit. */ void posix_cpu_timers_exit(struct task_struct *tsk) { add_device_randomness((const void*) &tsk->se.sum_exec_runtime, sizeof(unsigned long long)); cleanup_timers(tsk->cpu_timers); } void posix_cpu_timers_exit_group(struct task_struct *tsk) { cleanup_timers(tsk->signal->cpu_timers); } static inline int expires_gt(cputime_t expires, cputime_t new_exp) { return expires == 0 || expires > new_exp; } /* * Insert the timer on the appropriate list before any timers that * expire later. This must be called with the sighand lock held. */ static void arm_timer(struct k_itimer *timer) { struct task_struct *p = timer->it.cpu.task; struct list_head *head, *listpos; struct task_cputime *cputime_expires; struct cpu_timer_list *const nt = &timer->it.cpu; struct cpu_timer_list *next; if (CPUCLOCK_PERTHREAD(timer->it_clock)) { head = p->cpu_timers; cputime_expires = &p->cputime_expires; } else { head = p->signal->cpu_timers; cputime_expires = &p->signal->cputime_expires; } head += CPUCLOCK_WHICH(timer->it_clock); listpos = head; list_for_each_entry(next, head, entry) { if (nt->expires < next->expires) break; listpos = &next->entry; } list_add(&nt->entry, listpos); if (listpos == head) { unsigned long long exp = nt->expires; /* * We are the new earliest-expiring POSIX 1.b timer, hence * need to update expiration cache. Take into account that * for process timers we share expiration cache with itimers * and RLIMIT_CPU and for thread timers with RLIMIT_RTTIME. */ switch (CPUCLOCK_WHICH(timer->it_clock)) { case CPUCLOCK_PROF: if (expires_gt(cputime_expires->prof_exp, expires_to_cputime(exp))) cputime_expires->prof_exp = expires_to_cputime(exp); break; case CPUCLOCK_VIRT: if (expires_gt(cputime_expires->virt_exp, expires_to_cputime(exp))) cputime_expires->virt_exp = expires_to_cputime(exp); break; case CPUCLOCK_SCHED: if (cputime_expires->sched_exp == 0 || cputime_expires->sched_exp > exp) cputime_expires->sched_exp = exp; break; } } } /* * The timer is locked, fire it and arrange for its reload. */ static void cpu_timer_fire(struct k_itimer *timer) { if ((timer->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE) { /* * User don't want any signal. */ timer->it.cpu.expires = 0; } else if (unlikely(timer->sigq == NULL)) { /* * This a special case for clock_nanosleep, * not a normal timer from sys_timer_create. */ wake_up_process(timer->it_process); timer->it.cpu.expires = 0; } else if (timer->it.cpu.incr == 0) { /* * One-shot timer. Clear it as soon as it's fired. */ posix_timer_event(timer, 0); timer->it.cpu.expires = 0; } else if (posix_timer_event(timer, ++timer->it_requeue_pending)) { /* * The signal did not get queued because the signal * was ignored, so we won't get any callback to * reload the timer. But we need to keep it * ticking in case the signal is deliverable next time. */ posix_cpu_timer_schedule(timer); } } /* * Sample a process (thread group) timer for the given group_leader task. * Must be called with task sighand lock held for safe while_each_thread() * traversal. */ static int cpu_timer_sample_group(const clockid_t which_clock, struct task_struct *p, unsigned long long *sample) { struct task_cputime cputime; thread_group_cputimer(p, &cputime); switch (CPUCLOCK_WHICH(which_clock)) { default: return -EINVAL; case CPUCLOCK_PROF: *sample = cputime_to_expires(cputime.utime + cputime.stime); break; case CPUCLOCK_VIRT: *sample = cputime_to_expires(cputime.utime); break; case CPUCLOCK_SCHED: *sample = cputime.sum_exec_runtime + task_delta_exec(p); break; } return 0; } #ifdef CONFIG_NO_HZ_FULL static void nohz_kick_work_fn(struct work_struct *work) { tick_nohz_full_kick_all(); } static DECLARE_WORK(nohz_kick_work, nohz_kick_work_fn); /* * We need the IPIs to be sent from sane process context. * The posix cpu timers are always set with irqs disabled. */ static void posix_cpu_timer_kick_nohz(void) { if (context_tracking_is_enabled()) schedule_work(&nohz_kick_work); } bool posix_cpu_timers_can_stop_tick(struct task_struct *tsk) { if (!task_cputime_zero(&tsk->cputime_expires)) return false; if (tsk->signal->cputimer.running) return false; return true; } #else static inline void posix_cpu_timer_kick_nohz(void) { } #endif /* * Guts of sys_timer_settime for CPU timers. * This is called with the timer locked and interrupts disabled. * If we return TIMER_RETRY, it's necessary to release the timer's lock * and try again. (This happens when the timer is in the middle of firing.) */ static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags, struct itimerspec *new, struct itimerspec *old) { unsigned long flags; struct sighand_struct *sighand; struct task_struct *p = timer->it.cpu.task; unsigned long long old_expires, new_expires, old_incr, val; int ret; WARN_ON_ONCE(p == NULL); new_expires = timespec_to_sample(timer->it_clock, &new->it_value); /* * Protect against sighand release/switch in exit/exec and p->cpu_timers * and p->signal->cpu_timers read/write in arm_timer() */ sighand = lock_task_sighand(p, &flags); /* * If p has just been reaped, we can no * longer get any information about it at all. */ if (unlikely(sighand == NULL)) { return -ESRCH; } /* * Disarm any old timer after extracting its expiry time. */ WARN_ON_ONCE(!irqs_disabled()); ret = 0; old_incr = timer->it.cpu.incr; old_expires = timer->it.cpu.expires; if (unlikely(timer->it.cpu.firing)) { timer->it.cpu.firing = -1; ret = TIMER_RETRY; } else list_del_init(&timer->it.cpu.entry); /* * We need to sample the current value to convert the new * value from to relative and absolute, and to convert the * old value from absolute to relative. To set a process * timer, we need a sample to balance the thread expiry * times (in arm_timer). With an absolute time, we must * check if it's already passed. In short, we need a sample. */ if (CPUCLOCK_PERTHREAD(timer->it_clock)) { cpu_clock_sample(timer->it_clock, p, &val); } else { cpu_timer_sample_group(timer->it_clock, p, &val); } if (old) { if (old_expires == 0) { old->it_value.tv_sec = 0; old->it_value.tv_nsec = 0; } else { /* * Update the timer in case it has * overrun already. If it has, * we'll report it as having overrun * and with the next reloaded timer * already ticking, though we are * swallowing that pending * notification here to install the * new setting. */ bump_cpu_timer(timer, val); if (val < timer->it.cpu.expires) { old_expires = timer->it.cpu.expires - val; sample_to_timespec(timer->it_clock, old_expires, &old->it_value); } else { old->it_value.tv_nsec = 1; old->it_value.tv_sec = 0; } } } if (unlikely(ret)) { /* * We are colliding with the timer actually firing. * Punt after filling in the timer's old value, and * disable this firing since we are already reporting * it as an overrun (thanks to bump_cpu_timer above). */ unlock_task_sighand(p, &flags); goto out; } if (new_expires != 0 && !(timer_flags & TIMER_ABSTIME)) { new_expires += val; } /* * Install the new expiry time (or zero). * For a timer with no notification action, we don't actually * arm the timer (we'll just fake it for timer_gettime). */ timer->it.cpu.expires = new_expires; if (new_expires != 0 && val < new_expires) { arm_timer(timer); } unlock_task_sighand(p, &flags); /* * Install the new reload setting, and * set up the signal and overrun bookkeeping. */ timer->it.cpu.incr = timespec_to_sample(timer->it_clock, &new->it_interval); /* * This acts as a modification timestamp for the timer, * so any automatic reload attempt will punt on seeing * that we have reset the timer manually. */ timer->it_requeue_pending = (timer->it_requeue_pending + 2) & ~REQUEUE_PENDING; timer->it_overrun_last = 0; timer->it_overrun = -1; if (new_expires != 0 && !(val < new_expires)) { /* * The designated time already passed, so we notify * immediately, even if the thread never runs to * accumulate more time on this clock. */ cpu_timer_fire(timer); } ret = 0; out: if (old) { sample_to_timespec(timer->it_clock, old_incr, &old->it_interval); } if (!ret) posix_cpu_timer_kick_nohz(); return ret; } static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp) { unsigned long long now; struct task_struct *p = timer->it.cpu.task; WARN_ON_ONCE(p == NULL); /* * Easy part: convert the reload time. */ sample_to_timespec(timer->it_clock, timer->it.cpu.incr, &itp->it_interval); if (timer->it.cpu.expires == 0) { /* Timer not armed at all. */ itp->it_value.tv_sec = itp->it_value.tv_nsec = 0; return; } /* * Sample the clock to take the difference with the expiry time. */ if (CPUCLOCK_PERTHREAD(timer->it_clock)) { cpu_clock_sample(timer->it_clock, p, &now); } else { struct sighand_struct *sighand; unsigned long flags; /* * Protect against sighand release/switch in exit/exec and * also make timer sampling safe if it ends up calling * thread_group_cputime(). */ sighand = lock_task_sighand(p, &flags); if (unlikely(sighand == NULL)) { /* * The process has been reaped. * We can't even collect a sample any more. * Call the timer disarmed, nothing else to do. */ timer->it.cpu.expires = 0; sample_to_timespec(timer->it_clock, timer->it.cpu.expires, &itp->it_value); } else { cpu_timer_sample_group(timer->it_clock, p, &now); unlock_task_sighand(p, &flags); } } if (now < timer->it.cpu.expires) { sample_to_timespec(timer->it_clock, timer->it.cpu.expires - now, &itp->it_value); } else { /* * The timer should have expired already, but the firing * hasn't taken place yet. Say it's just about to expire. */ itp->it_value.tv_nsec = 1; itp->it_value.tv_sec = 0; } } static unsigned long long check_timers_list(struct list_head *timers, struct list_head *firing, unsigned long long curr) { int maxfire = 20; while (!list_empty(timers)) { struct cpu_timer_list *t; t = list_first_entry(timers, struct cpu_timer_list, entry); if (!--maxfire || curr < t->expires) return t->expires; t->firing = 1; list_move_tail(&t->entry, firing); } return 0; } /* * Check for any per-thread CPU timers that have fired and move them off * the tsk->cpu_timers[N] list onto the firing list. Here we update the * tsk->it_*_expires values to reflect the remaining thread CPU timers. */ static void check_thread_timers(struct task_struct *tsk, struct list_head *firing) { struct list_head *timers = tsk->cpu_timers; struct signal_struct *const sig = tsk->signal; struct task_cputime *tsk_expires = &tsk->cputime_expires; unsigned long long expires; unsigned long soft; expires = check_timers_list(timers, firing, prof_ticks(tsk)); tsk_expires->prof_exp = expires_to_cputime(expires); expires = check_timers_list(++timers, firing, virt_ticks(tsk)); tsk_expires->virt_exp = expires_to_cputime(expires); tsk_expires->sched_exp = check_timers_list(++timers, firing, tsk->se.sum_exec_runtime); /* * Check for the special case thread timers. */ soft = ACCESS_ONCE(sig->rlim[RLIMIT_RTTIME].rlim_cur); if (soft != RLIM_INFINITY) { unsigned long hard = ACCESS_ONCE(sig->rlim[RLIMIT_RTTIME].rlim_max); if (hard != RLIM_INFINITY && tsk->rt.timeout > DIV_ROUND_UP(hard, USEC_PER_SEC/HZ)) { /* * At the hard limit, we just die. * No need to calculate anything else now. */ __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk); return; } if (tsk->rt.timeout > DIV_ROUND_UP(soft, USEC_PER_SEC/HZ)) { /* * At the soft limit, send a SIGXCPU every second. */ if (soft < hard) { soft += USEC_PER_SEC; sig->rlim[RLIMIT_RTTIME].rlim_cur = soft; } printk(KERN_INFO "RT Watchdog Timeout: %s[%d]\n", tsk->comm, task_pid_nr(tsk)); __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk); } } } static void stop_process_timers(struct signal_struct *sig) { struct thread_group_cputimer *cputimer = &sig->cputimer; unsigned long flags; raw_spin_lock_irqsave(&cputimer->lock, flags); cputimer->running = 0; raw_spin_unlock_irqrestore(&cputimer->lock, flags); } static u32 onecputick; static void check_cpu_itimer(struct task_struct *tsk, struct cpu_itimer *it, unsigned long long *expires, unsigned long long cur_time, int signo) { if (!it->expires) return; if (cur_time >= it->expires) { if (it->incr) { it->expires += it->incr; it->error += it->incr_error; if (it->error >= onecputick) { it->expires -= cputime_one_jiffy; it->error -= onecputick; } } else { it->expires = 0; } trace_itimer_expire(signo == SIGPROF ? ITIMER_PROF : ITIMER_VIRTUAL, tsk->signal->leader_pid, cur_time); __group_send_sig_info(signo, SEND_SIG_PRIV, tsk); } if (it->expires && (!*expires || it->expires < *expires)) { *expires = it->expires; } } /* * Check for any per-thread CPU timers that have fired and move them * off the tsk->*_timers list onto the firing list. Per-thread timers * have already been taken off. */ static void check_process_timers(struct task_struct *tsk, struct list_head *firing) { struct signal_struct *const sig = tsk->signal; unsigned long long utime, ptime, virt_expires, prof_expires; unsigned long long sum_sched_runtime, sched_expires; struct list_head *timers = sig->cpu_timers; struct task_cputime cputime; unsigned long soft; /* * Collect the current process totals. */ thread_group_cputimer(tsk, &cputime); utime = cputime_to_expires(cputime.utime); ptime = utime + cputime_to_expires(cputime.stime); sum_sched_runtime = cputime.sum_exec_runtime; prof_expires = check_timers_list(timers, firing, ptime); virt_expires = check_timers_list(++timers, firing, utime); sched_expires = check_timers_list(++timers, firing, sum_sched_runtime); /* * Check for the special case process timers. */ check_cpu_itimer(tsk, &sig->it[CPUCLOCK_PROF], &prof_expires, ptime, SIGPROF); check_cpu_itimer(tsk, &sig->it[CPUCLOCK_VIRT], &virt_expires, utime, SIGVTALRM); soft = ACCESS_ONCE(sig->rlim[RLIMIT_CPU].rlim_cur); if (soft != RLIM_INFINITY) { unsigned long psecs = cputime_to_secs(ptime); unsigned long hard = ACCESS_ONCE(sig->rlim[RLIMIT_CPU].rlim_max); cputime_t x; if (psecs >= hard) { /* * At the hard limit, we just die. * No need to calculate anything else now. */ __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk); return; } if (psecs >= soft) { /* * At the soft limit, send a SIGXCPU every second. */ __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk); if (soft < hard) { soft++; sig->rlim[RLIMIT_CPU].rlim_cur = soft; } } x = secs_to_cputime(soft); if (!prof_expires || x < prof_expires) { prof_expires = x; } } sig->cputime_expires.prof_exp = expires_to_cputime(prof_expires); sig->cputime_expires.virt_exp = expires_to_cputime(virt_expires); sig->cputime_expires.sched_exp = sched_expires; if (task_cputime_zero(&sig->cputime_expires)) stop_process_timers(sig); } /* * This is called from the signal code (via do_schedule_next_timer) * when the last timer signal was delivered and we have to reload the timer. */ void posix_cpu_timer_schedule(struct k_itimer *timer) { struct sighand_struct *sighand; unsigned long flags; struct task_struct *p = timer->it.cpu.task; unsigned long long now; WARN_ON_ONCE(p == NULL); /* * Fetch the current sample and update the timer's expiry time. */ if (CPUCLOCK_PERTHREAD(timer->it_clock)) { cpu_clock_sample(timer->it_clock, p, &now); bump_cpu_timer(timer, now); if (unlikely(p->exit_state)) goto out; /* Protect timer list r/w in arm_timer() */ sighand = lock_task_sighand(p, &flags); if (!sighand) goto out; } else { /* * Protect arm_timer() and timer sampling in case of call to * thread_group_cputime(). */ sighand = lock_task_sighand(p, &flags); if (unlikely(sighand == NULL)) { /* * The process has been reaped. * We can't even collect a sample any more. */ timer->it.cpu.expires = 0; goto out; } else if (unlikely(p->exit_state) && thread_group_empty(p)) { unlock_task_sighand(p, &flags); /* Optimizations: if the process is dying, no need to rearm */ goto out; } cpu_timer_sample_group(timer->it_clock, p, &now); bump_cpu_timer(timer, now); /* Leave the sighand locked for the call below. */ } /* * Now re-arm for the new expiry time. */ WARN_ON_ONCE(!irqs_disabled()); arm_timer(timer); unlock_task_sighand(p, &flags); /* Kick full dynticks CPUs in case they need to tick on the new timer */ posix_cpu_timer_kick_nohz(); out: timer->it_overrun_last = timer->it_overrun; timer->it_overrun = -1; ++timer->it_requeue_pending; } /** * task_cputime_expired - Compare two task_cputime entities. * * @sample: The task_cputime structure to be checked for expiration. * @expires: Expiration times, against which @sample will be checked. * * Checks @sample against @expires to see if any field of @sample has expired. * Returns true if any field of the former is greater than the corresponding * field of the latter if the latter field is set. Otherwise returns false. */ static inline int task_cputime_expired(const struct task_cputime *sample, const struct task_cputime *expires) { if (expires->utime && sample->utime >= expires->utime) return 1; if (expires->stime && sample->utime + sample->stime >= expires->stime) return 1; if (expires->sum_exec_runtime != 0 && sample->sum_exec_runtime >= expires->sum_exec_runtime) return 1; return 0; } /** * fastpath_timer_check - POSIX CPU timers fast path. * * @tsk: The task (thread) being checked. * * Check the task and thread group timers. If both are zero (there are no * timers set) return false. Otherwise snapshot the task and thread group * timers and compare them with the corresponding expiration times. Return * true if a timer has expired, else return false. */ static inline int fastpath_timer_check(struct task_struct *tsk) { struct signal_struct *sig; cputime_t utime, stime; task_cputime(tsk, &utime, &stime); if (!task_cputime_zero(&tsk->cputime_expires)) { struct task_cputime task_sample = { .utime = utime, .stime = stime, .sum_exec_runtime = tsk->se.sum_exec_runtime }; if (task_cputime_expired(&task_sample, &tsk->cputime_expires)) return 1; } sig = tsk->signal; if (sig->cputimer.running) { struct task_cputime group_sample; raw_spin_lock(&sig->cputimer.lock); group_sample = sig->cputimer.cputime; raw_spin_unlock(&sig->cputimer.lock); if (task_cputime_expired(&group_sample, &sig->cputime_expires)) return 1; } return 0; } /* * This is called from the timer interrupt handler. The irq handler has * already updated our counts. We need to check if any timers fire now. * Interrupts are disabled. */ void run_posix_cpu_timers(struct task_struct *tsk) { LIST_HEAD(firing); struct k_itimer *timer, *next; unsigned long flags; WARN_ON_ONCE(!irqs_disabled()); /* * The fast path checks that there are no expired thread or thread * group timers. If that's so, just return. */ if (!fastpath_timer_check(tsk)) return; if (!lock_task_sighand(tsk, &flags)) return; /* * Here we take off tsk->signal->cpu_timers[N] and * tsk->cpu_timers[N] all the timers that are firing, and * put them on the firing list. */ check_thread_timers(tsk, &firing); /* * If there are any active process wide timers (POSIX 1.b, itimers, * RLIMIT_CPU) cputimer must be running. */ if (tsk->signal->cputimer.running) check_process_timers(tsk, &firing); /* * We must release these locks before taking any timer's lock. * There is a potential race with timer deletion here, as the * siglock now protects our private firing list. We have set * the firing flag in each timer, so that a deletion attempt * that gets the timer lock before we do will give it up and * spin until we've taken care of that timer below. */ unlock_task_sighand(tsk, &flags); /* * Now that all the timers on our list have the firing flag, * no one will touch their list entries but us. We'll take * each timer's lock before clearing its firing flag, so no * timer call will interfere. */ list_for_each_entry_safe(timer, next, &firing, it.cpu.entry) { int cpu_firing; spin_lock(&timer->it_lock); list_del_init(&timer->it.cpu.entry); cpu_firing = timer->it.cpu.firing; timer->it.cpu.firing = 0; /* * The firing flag is -1 if we collided with a reset * of the timer, which already reported this * almost-firing as an overrun. So don't generate an event. */ if (likely(cpu_firing >= 0)) cpu_timer_fire(timer); spin_unlock(&timer->it_lock); } } /* * Set one of the process-wide special case CPU timers or RLIMIT_CPU. * The tsk->sighand->siglock must be held by the caller. */ void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx, cputime_t *newval, cputime_t *oldval) { unsigned long long now; WARN_ON_ONCE(clock_idx == CPUCLOCK_SCHED); cpu_timer_sample_group(clock_idx, tsk, &now); if (oldval) { /* * We are setting itimer. The *oldval is absolute and we update * it to be relative, *newval argument is relative and we update * it to be absolute. */ if (*oldval) { if (*oldval <= now) { /* Just about to fire. */ *oldval = cputime_one_jiffy; } else { *oldval -= now; } } if (!*newval) goto out; *newval += now; } /* * Update expiration cache if we are the earliest timer, or eventually * RLIMIT_CPU limit is earlier than prof_exp cpu timer expire. */ switch (clock_idx) { case CPUCLOCK_PROF: if (expires_gt(tsk->signal->cputime_expires.prof_exp, *newval)) tsk->signal->cputime_expires.prof_exp = *newval; break; case CPUCLOCK_VIRT: if (expires_gt(tsk->signal->cputime_expires.virt_exp, *newval)) tsk->signal->cputime_expires.virt_exp = *newval; break; } out: posix_cpu_timer_kick_nohz(); } static int do_cpu_nanosleep(const clockid_t which_clock, int flags, struct timespec *rqtp, struct itimerspec *it) { struct k_itimer timer; int error; /* * Set up a temporary timer and then wait for it to go off. */ memset(&timer, 0, sizeof timer); spin_lock_init(&timer.it_lock); timer.it_clock = which_clock; timer.it_overrun = -1; error = posix_cpu_timer_create(&timer); timer.it_process = current; if (!error) { static struct itimerspec zero_it; memset(it, 0, sizeof *it); it->it_value = *rqtp; spin_lock_irq(&timer.it_lock); error = posix_cpu_timer_set(&timer, flags, it, NULL); if (error) { spin_unlock_irq(&timer.it_lock); return error; } while (!signal_pending(current)) { if (timer.it.cpu.expires == 0) { /* * Our timer fired and was reset, below * deletion can not fail. */ posix_cpu_timer_del(&timer); spin_unlock_irq(&timer.it_lock); return 0; } /* * Block until cpu_timer_fire (or a signal) wakes us. */ __set_current_state(TASK_INTERRUPTIBLE); spin_unlock_irq(&timer.it_lock); schedule(); spin_lock_irq(&timer.it_lock); } /* * We were interrupted by a signal. */ sample_to_timespec(which_clock, timer.it.cpu.expires, rqtp); error = posix_cpu_timer_set(&timer, 0, &zero_it, it); if (!error) { /* * Timer is now unarmed, deletion can not fail. */ posix_cpu_timer_del(&timer); } spin_unlock_irq(&timer.it_lock); while (error == TIMER_RETRY) { /* * We need to handle case when timer was or is in the * middle of firing. In other cases we already freed * resources. */ spin_lock_irq(&timer.it_lock); error = posix_cpu_timer_del(&timer); spin_unlock_irq(&timer.it_lock); } if ((it->it_value.tv_sec | it->it_value.tv_nsec) == 0) { /* * It actually did fire already. */ return 0; } error = -ERESTART_RESTARTBLOCK; } return error; } static long posix_cpu_nsleep_restart(struct restart_block *restart_block); static int posix_cpu_nsleep(const clockid_t which_clock, int flags, struct timespec *rqtp, struct timespec __user *rmtp) { struct restart_block *restart_block = &current_thread_info()->restart_block; struct itimerspec it; int error; /* * Diagnose required errors first. */ if (CPUCLOCK_PERTHREAD(which_clock) && (CPUCLOCK_PID(which_clock) == 0 || CPUCLOCK_PID(which_clock) == current->pid)) return -EINVAL; error = do_cpu_nanosleep(which_clock, flags, rqtp, &it); if (error == -ERESTART_RESTARTBLOCK) { if (flags & TIMER_ABSTIME) return -ERESTARTNOHAND; /* * Report back to the user the time still remaining. */ if (rmtp && copy_to_user(rmtp, &it.it_value, sizeof *rmtp)) return -EFAULT; restart_block->fn = posix_cpu_nsleep_restart; restart_block->nanosleep.clockid = which_clock; restart_block->nanosleep.rmtp = rmtp; restart_block->nanosleep.expires = timespec_to_ns(rqtp); } return error; } static long posix_cpu_nsleep_restart(struct restart_block *restart_block) { clockid_t which_clock = restart_block->nanosleep.clockid; struct timespec t; struct itimerspec it; int error; t = ns_to_timespec(restart_block->nanosleep.expires); error = do_cpu_nanosleep(which_clock, TIMER_ABSTIME, &t, &it); if (error == -ERESTART_RESTARTBLOCK) { struct timespec __user *rmtp = restart_block->nanosleep.rmtp; /* * Report back to the user the time still remaining. */ if (rmtp && copy_to_user(rmtp, &it.it_value, sizeof *rmtp)) return -EFAULT; restart_block->nanosleep.expires = timespec_to_ns(&t); } return error; } #define PROCESS_CLOCK MAKE_PROCESS_CPUCLOCK(0, CPUCLOCK_SCHED) #define THREAD_CLOCK MAKE_THREAD_CPUCLOCK(0, CPUCLOCK_SCHED) static int process_cpu_clock_getres(const clockid_t which_clock, struct timespec *tp) { return posix_cpu_clock_getres(PROCESS_CLOCK, tp); } static int process_cpu_clock_get(const clockid_t which_clock, struct timespec *tp) { return posix_cpu_clock_get(PROCESS_CLOCK, tp); } static int process_cpu_timer_create(struct k_itimer *timer) { timer->it_clock = PROCESS_CLOCK; return posix_cpu_timer_create(timer); } static int process_cpu_nsleep(const clockid_t which_clock, int flags, struct timespec *rqtp, struct timespec __user *rmtp) { return posix_cpu_nsleep(PROCESS_CLOCK, flags, rqtp, rmtp); } static long process_cpu_nsleep_restart(struct restart_block *restart_block) { return -EINVAL; } static int thread_cpu_clock_getres(const clockid_t which_clock, struct timespec *tp) { return posix_cpu_clock_getres(THREAD_CLOCK, tp); } static int thread_cpu_clock_get(const clockid_t which_clock, struct timespec *tp) { return posix_cpu_clock_get(THREAD_CLOCK, tp); } static int thread_cpu_timer_create(struct k_itimer *timer) { timer->it_clock = THREAD_CLOCK; return posix_cpu_timer_create(timer); } struct k_clock clock_posix_cpu = { .clock_getres = posix_cpu_clock_getres, .clock_set = posix_cpu_clock_set, .clock_get = posix_cpu_clock_get, .timer_create = posix_cpu_timer_create, .nsleep = posix_cpu_nsleep, .nsleep_restart = posix_cpu_nsleep_restart, .timer_set = posix_cpu_timer_set, .timer_del = posix_cpu_timer_del, .timer_get = posix_cpu_timer_get, }; static __init int init_posix_cpu_timers(void) { struct k_clock process = { .clock_getres = process_cpu_clock_getres, .clock_get = process_cpu_clock_get, .timer_create = process_cpu_timer_create, .nsleep = process_cpu_nsleep, .nsleep_restart = process_cpu_nsleep_restart, }; struct k_clock thread = { .clock_getres = thread_cpu_clock_getres, .clock_get = thread_cpu_clock_get, .timer_create = thread_cpu_timer_create, }; struct timespec ts; posix_timers_register_clock(CLOCK_PROCESS_CPUTIME_ID, &process); posix_timers_register_clock(CLOCK_THREAD_CPUTIME_ID, &thread); cputime_to_timespec(cputime_one_jiffy, &ts); onecputick = ts.tv_nsec; WARN_ON(ts.tv_sec != 0); return 0; } __initcall(init_posix_cpu_timers);
gpl-2.0
lostpuppy/m180s-kernel-stock
arch/sparc/kernel/iommu.c
945
21430
/* iommu.c: Generic sparc64 IOMMU support. * * Copyright (C) 1999, 2007, 2008 David S. Miller (davem@davemloft.net) * Copyright (C) 1999, 2000 Jakub Jelinek (jakub@redhat.com) */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/delay.h> #include <linux/device.h> #include <linux/dma-mapping.h> #include <linux/errno.h> #include <linux/iommu-helper.h> #include <linux/bitmap.h> #ifdef CONFIG_PCI #include <linux/pci.h> #endif #include <asm/iommu.h> #include "iommu_common.h" #define STC_CTXMATCH_ADDR(STC, CTX) \ ((STC)->strbuf_ctxmatch_base + ((CTX) << 3)) #define STC_FLUSHFLAG_INIT(STC) \ (*((STC)->strbuf_flushflag) = 0UL) #define STC_FLUSHFLAG_SET(STC) \ (*((STC)->strbuf_flushflag) != 0UL) #define iommu_read(__reg) \ ({ u64 __ret; \ __asm__ __volatile__("ldxa [%1] %2, %0" \ : "=r" (__ret) \ : "r" (__reg), "i" (ASI_PHYS_BYPASS_EC_E) \ : "memory"); \ __ret; \ }) #define iommu_write(__reg, __val) \ __asm__ __volatile__("stxa %0, [%1] %2" \ : /* no outputs */ \ : "r" (__val), "r" (__reg), \ "i" (ASI_PHYS_BYPASS_EC_E)) /* Must be invoked under the IOMMU lock. */ static void iommu_flushall(struct iommu *iommu) { if (iommu->iommu_flushinv) { iommu_write(iommu->iommu_flushinv, ~(u64)0); } else { unsigned long tag; int entry; tag = iommu->iommu_tags; for (entry = 0; entry < 16; entry++) { iommu_write(tag, 0); tag += 8; } /* Ensure completion of previous PIO writes. */ (void) iommu_read(iommu->write_complete_reg); } } #define IOPTE_CONSISTENT(CTX) \ (IOPTE_VALID | IOPTE_CACHE | \ (((CTX) << 47) & IOPTE_CONTEXT)) #define IOPTE_STREAMING(CTX) \ (IOPTE_CONSISTENT(CTX) | IOPTE_STBUF) /* Existing mappings are never marked invalid, instead they * are pointed to a dummy page. */ #define IOPTE_IS_DUMMY(iommu, iopte) \ ((iopte_val(*iopte) & IOPTE_PAGE) == (iommu)->dummy_page_pa) static inline void iopte_make_dummy(struct iommu *iommu, iopte_t *iopte) { unsigned long val = iopte_val(*iopte); val &= ~IOPTE_PAGE; val |= iommu->dummy_page_pa; iopte_val(*iopte) = val; } /* Based almost entirely upon the ppc64 iommu allocator. If you use the 'handle' * facility it must all be done in one pass while under the iommu lock. * * On sun4u platforms, we only flush the IOMMU once every time we've passed * over the entire page table doing allocations. Therefore we only ever advance * the hint and cannot backtrack it. */ unsigned long iommu_range_alloc(struct device *dev, struct iommu *iommu, unsigned long npages, unsigned long *handle) { unsigned long n, end, start, limit, boundary_size; struct iommu_arena *arena = &iommu->arena; int pass = 0; /* This allocator was derived from x86_64's bit string search */ /* Sanity check */ if (unlikely(npages == 0)) { if (printk_ratelimit()) WARN_ON(1); return DMA_ERROR_CODE; } if (handle && *handle) start = *handle; else start = arena->hint; limit = arena->limit; /* The case below can happen if we have a small segment appended * to a large, or when the previous alloc was at the very end of * the available space. If so, go back to the beginning and flush. */ if (start >= limit) { start = 0; if (iommu->flush_all) iommu->flush_all(iommu); } again: if (dev) boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1, 1 << IO_PAGE_SHIFT); else boundary_size = ALIGN(1UL << 32, 1 << IO_PAGE_SHIFT); n = iommu_area_alloc(arena->map, limit, start, npages, iommu->page_table_map_base >> IO_PAGE_SHIFT, boundary_size >> IO_PAGE_SHIFT, 0); if (n == -1) { if (likely(pass < 1)) { /* First failure, rescan from the beginning. */ start = 0; if (iommu->flush_all) iommu->flush_all(iommu); pass++; goto again; } else { /* Second failure, give up */ return DMA_ERROR_CODE; } } end = n + npages; arena->hint = end; /* Update handle for SG allocations */ if (handle) *handle = end; return n; } void iommu_range_free(struct iommu *iommu, dma_addr_t dma_addr, unsigned long npages) { struct iommu_arena *arena = &iommu->arena; unsigned long entry; entry = (dma_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT; bitmap_clear(arena->map, entry, npages); } int iommu_table_init(struct iommu *iommu, int tsbsize, u32 dma_offset, u32 dma_addr_mask, int numa_node) { unsigned long i, order, sz, num_tsb_entries; struct page *page; num_tsb_entries = tsbsize / sizeof(iopte_t); /* Setup initial software IOMMU state. */ spin_lock_init(&iommu->lock); iommu->ctx_lowest_free = 1; iommu->page_table_map_base = dma_offset; iommu->dma_addr_mask = dma_addr_mask; /* Allocate and initialize the free area map. */ sz = num_tsb_entries / 8; sz = (sz + 7UL) & ~7UL; iommu->arena.map = kmalloc_node(sz, GFP_KERNEL, numa_node); if (!iommu->arena.map) { printk(KERN_ERR "IOMMU: Error, kmalloc(arena.map) failed.\n"); return -ENOMEM; } memset(iommu->arena.map, 0, sz); iommu->arena.limit = num_tsb_entries; if (tlb_type != hypervisor) iommu->flush_all = iommu_flushall; /* Allocate and initialize the dummy page which we * set inactive IO PTEs to point to. */ page = alloc_pages_node(numa_node, GFP_KERNEL, 0); if (!page) { printk(KERN_ERR "IOMMU: Error, gfp(dummy_page) failed.\n"); goto out_free_map; } iommu->dummy_page = (unsigned long) page_address(page); memset((void *)iommu->dummy_page, 0, PAGE_SIZE); iommu->dummy_page_pa = (unsigned long) __pa(iommu->dummy_page); /* Now allocate and setup the IOMMU page table itself. */ order = get_order(tsbsize); page = alloc_pages_node(numa_node, GFP_KERNEL, order); if (!page) { printk(KERN_ERR "IOMMU: Error, gfp(tsb) failed.\n"); goto out_free_dummy_page; } iommu->page_table = (iopte_t *)page_address(page); for (i = 0; i < num_tsb_entries; i++) iopte_make_dummy(iommu, &iommu->page_table[i]); return 0; out_free_dummy_page: free_page(iommu->dummy_page); iommu->dummy_page = 0UL; out_free_map: kfree(iommu->arena.map); iommu->arena.map = NULL; return -ENOMEM; } static inline iopte_t *alloc_npages(struct device *dev, struct iommu *iommu, unsigned long npages) { unsigned long entry; entry = iommu_range_alloc(dev, iommu, npages, NULL); if (unlikely(entry == DMA_ERROR_CODE)) return NULL; return iommu->page_table + entry; } static int iommu_alloc_ctx(struct iommu *iommu) { int lowest = iommu->ctx_lowest_free; int sz = IOMMU_NUM_CTXS - lowest; int n = find_next_zero_bit(iommu->ctx_bitmap, sz, lowest); if (unlikely(n == sz)) { n = find_next_zero_bit(iommu->ctx_bitmap, lowest, 1); if (unlikely(n == lowest)) { printk(KERN_WARNING "IOMMU: Ran out of contexts.\n"); n = 0; } } if (n) __set_bit(n, iommu->ctx_bitmap); return n; } static inline void iommu_free_ctx(struct iommu *iommu, int ctx) { if (likely(ctx)) { __clear_bit(ctx, iommu->ctx_bitmap); if (ctx < iommu->ctx_lowest_free) iommu->ctx_lowest_free = ctx; } } static void *dma_4u_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addrp, gfp_t gfp) { unsigned long flags, order, first_page; struct iommu *iommu; struct page *page; int npages, nid; iopte_t *iopte; void *ret; size = IO_PAGE_ALIGN(size); order = get_order(size); if (order >= 10) return NULL; nid = dev->archdata.numa_node; page = alloc_pages_node(nid, gfp, order); if (unlikely(!page)) return NULL; first_page = (unsigned long) page_address(page); memset((char *)first_page, 0, PAGE_SIZE << order); iommu = dev->archdata.iommu; spin_lock_irqsave(&iommu->lock, flags); iopte = alloc_npages(dev, iommu, size >> IO_PAGE_SHIFT); spin_unlock_irqrestore(&iommu->lock, flags); if (unlikely(iopte == NULL)) { free_pages(first_page, order); return NULL; } *dma_addrp = (iommu->page_table_map_base + ((iopte - iommu->page_table) << IO_PAGE_SHIFT)); ret = (void *) first_page; npages = size >> IO_PAGE_SHIFT; first_page = __pa(first_page); while (npages--) { iopte_val(*iopte) = (IOPTE_CONSISTENT(0UL) | IOPTE_WRITE | (first_page & IOPTE_PAGE)); iopte++; first_page += IO_PAGE_SIZE; } return ret; } static void dma_4u_free_coherent(struct device *dev, size_t size, void *cpu, dma_addr_t dvma) { struct iommu *iommu; iopte_t *iopte; unsigned long flags, order, npages; npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT; iommu = dev->archdata.iommu; iopte = iommu->page_table + ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT); spin_lock_irqsave(&iommu->lock, flags); iommu_range_free(iommu, dvma, npages); spin_unlock_irqrestore(&iommu->lock, flags); order = get_order(size); if (order < 10) free_pages((unsigned long)cpu, order); } static dma_addr_t dma_4u_map_page(struct device *dev, struct page *page, unsigned long offset, size_t sz, enum dma_data_direction direction, struct dma_attrs *attrs) { struct iommu *iommu; struct strbuf *strbuf; iopte_t *base; unsigned long flags, npages, oaddr; unsigned long i, base_paddr, ctx; u32 bus_addr, ret; unsigned long iopte_protection; iommu = dev->archdata.iommu; strbuf = dev->archdata.stc; if (unlikely(direction == DMA_NONE)) goto bad_no_ctx; oaddr = (unsigned long)(page_address(page) + offset); npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK); npages >>= IO_PAGE_SHIFT; spin_lock_irqsave(&iommu->lock, flags); base = alloc_npages(dev, iommu, npages); ctx = 0; if (iommu->iommu_ctxflush) ctx = iommu_alloc_ctx(iommu); spin_unlock_irqrestore(&iommu->lock, flags); if (unlikely(!base)) goto bad; bus_addr = (iommu->page_table_map_base + ((base - iommu->page_table) << IO_PAGE_SHIFT)); ret = bus_addr | (oaddr & ~IO_PAGE_MASK); base_paddr = __pa(oaddr & IO_PAGE_MASK); if (strbuf->strbuf_enabled) iopte_protection = IOPTE_STREAMING(ctx); else iopte_protection = IOPTE_CONSISTENT(ctx); if (direction != DMA_TO_DEVICE) iopte_protection |= IOPTE_WRITE; for (i = 0; i < npages; i++, base++, base_paddr += IO_PAGE_SIZE) iopte_val(*base) = iopte_protection | base_paddr; return ret; bad: iommu_free_ctx(iommu, ctx); bad_no_ctx: if (printk_ratelimit()) WARN_ON(1); return DMA_ERROR_CODE; } static void strbuf_flush(struct strbuf *strbuf, struct iommu *iommu, u32 vaddr, unsigned long ctx, unsigned long npages, enum dma_data_direction direction) { int limit; if (strbuf->strbuf_ctxflush && iommu->iommu_ctxflush) { unsigned long matchreg, flushreg; u64 val; flushreg = strbuf->strbuf_ctxflush; matchreg = STC_CTXMATCH_ADDR(strbuf, ctx); iommu_write(flushreg, ctx); val = iommu_read(matchreg); val &= 0xffff; if (!val) goto do_flush_sync; while (val) { if (val & 0x1) iommu_write(flushreg, ctx); val >>= 1; } val = iommu_read(matchreg); if (unlikely(val)) { printk(KERN_WARNING "strbuf_flush: ctx flush " "timeout matchreg[%llx] ctx[%lx]\n", val, ctx); goto do_page_flush; } } else { unsigned long i; do_page_flush: for (i = 0; i < npages; i++, vaddr += IO_PAGE_SIZE) iommu_write(strbuf->strbuf_pflush, vaddr); } do_flush_sync: /* If the device could not have possibly put dirty data into * the streaming cache, no flush-flag synchronization needs * to be performed. */ if (direction == DMA_TO_DEVICE) return; STC_FLUSHFLAG_INIT(strbuf); iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa); (void) iommu_read(iommu->write_complete_reg); limit = 100000; while (!STC_FLUSHFLAG_SET(strbuf)) { limit--; if (!limit) break; udelay(1); rmb(); } if (!limit) printk(KERN_WARNING "strbuf_flush: flushflag timeout " "vaddr[%08x] ctx[%lx] npages[%ld]\n", vaddr, ctx, npages); } static void dma_4u_unmap_page(struct device *dev, dma_addr_t bus_addr, size_t sz, enum dma_data_direction direction, struct dma_attrs *attrs) { struct iommu *iommu; struct strbuf *strbuf; iopte_t *base; unsigned long flags, npages, ctx, i; if (unlikely(direction == DMA_NONE)) { if (printk_ratelimit()) WARN_ON(1); return; } iommu = dev->archdata.iommu; strbuf = dev->archdata.stc; npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK); npages >>= IO_PAGE_SHIFT; base = iommu->page_table + ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT); bus_addr &= IO_PAGE_MASK; spin_lock_irqsave(&iommu->lock, flags); /* Record the context, if any. */ ctx = 0; if (iommu->iommu_ctxflush) ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL; /* Step 1: Kick data out of streaming buffers if necessary. */ if (strbuf->strbuf_enabled) strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction); /* Step 2: Clear out TSB entries. */ for (i = 0; i < npages; i++) iopte_make_dummy(iommu, base + i); iommu_range_free(iommu, bus_addr, npages); iommu_free_ctx(iommu, ctx); spin_unlock_irqrestore(&iommu->lock, flags); } static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist, int nelems, enum dma_data_direction direction, struct dma_attrs *attrs) { struct scatterlist *s, *outs, *segstart; unsigned long flags, handle, prot, ctx; dma_addr_t dma_next = 0, dma_addr; unsigned int max_seg_size; unsigned long seg_boundary_size; int outcount, incount, i; struct strbuf *strbuf; struct iommu *iommu; unsigned long base_shift; BUG_ON(direction == DMA_NONE); iommu = dev->archdata.iommu; strbuf = dev->archdata.stc; if (nelems == 0 || !iommu) return 0; spin_lock_irqsave(&iommu->lock, flags); ctx = 0; if (iommu->iommu_ctxflush) ctx = iommu_alloc_ctx(iommu); if (strbuf->strbuf_enabled) prot = IOPTE_STREAMING(ctx); else prot = IOPTE_CONSISTENT(ctx); if (direction != DMA_TO_DEVICE) prot |= IOPTE_WRITE; outs = s = segstart = &sglist[0]; outcount = 1; incount = nelems; handle = 0; /* Init first segment length for backout at failure */ outs->dma_length = 0; max_seg_size = dma_get_max_seg_size(dev); seg_boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1, IO_PAGE_SIZE) >> IO_PAGE_SHIFT; base_shift = iommu->page_table_map_base >> IO_PAGE_SHIFT; for_each_sg(sglist, s, nelems, i) { unsigned long paddr, npages, entry, out_entry = 0, slen; iopte_t *base; slen = s->length; /* Sanity check */ if (slen == 0) { dma_next = 0; continue; } /* Allocate iommu entries for that segment */ paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s); npages = iommu_num_pages(paddr, slen, IO_PAGE_SIZE); entry = iommu_range_alloc(dev, iommu, npages, &handle); /* Handle failure */ if (unlikely(entry == DMA_ERROR_CODE)) { if (printk_ratelimit()) printk(KERN_INFO "iommu_alloc failed, iommu %p paddr %lx" " npages %lx\n", iommu, paddr, npages); goto iommu_map_failed; } base = iommu->page_table + entry; /* Convert entry to a dma_addr_t */ dma_addr = iommu->page_table_map_base + (entry << IO_PAGE_SHIFT); dma_addr |= (s->offset & ~IO_PAGE_MASK); /* Insert into HW table */ paddr &= IO_PAGE_MASK; while (npages--) { iopte_val(*base) = prot | paddr; base++; paddr += IO_PAGE_SIZE; } /* If we are in an open segment, try merging */ if (segstart != s) { /* We cannot merge if: * - allocated dma_addr isn't contiguous to previous allocation */ if ((dma_addr != dma_next) || (outs->dma_length + s->length > max_seg_size) || (is_span_boundary(out_entry, base_shift, seg_boundary_size, outs, s))) { /* Can't merge: create a new segment */ segstart = s; outcount++; outs = sg_next(outs); } else { outs->dma_length += s->length; } } if (segstart == s) { /* This is a new segment, fill entries */ outs->dma_address = dma_addr; outs->dma_length = slen; out_entry = entry; } /* Calculate next page pointer for contiguous check */ dma_next = dma_addr + slen; } spin_unlock_irqrestore(&iommu->lock, flags); if (outcount < incount) { outs = sg_next(outs); outs->dma_address = DMA_ERROR_CODE; outs->dma_length = 0; } return outcount; iommu_map_failed: for_each_sg(sglist, s, nelems, i) { if (s->dma_length != 0) { unsigned long vaddr, npages, entry, j; iopte_t *base; vaddr = s->dma_address & IO_PAGE_MASK; npages = iommu_num_pages(s->dma_address, s->dma_length, IO_PAGE_SIZE); iommu_range_free(iommu, vaddr, npages); entry = (vaddr - iommu->page_table_map_base) >> IO_PAGE_SHIFT; base = iommu->page_table + entry; for (j = 0; j < npages; j++) iopte_make_dummy(iommu, base + j); s->dma_address = DMA_ERROR_CODE; s->dma_length = 0; } if (s == outs) break; } spin_unlock_irqrestore(&iommu->lock, flags); return 0; } /* If contexts are being used, they are the same in all of the mappings * we make for a particular SG. */ static unsigned long fetch_sg_ctx(struct iommu *iommu, struct scatterlist *sg) { unsigned long ctx = 0; if (iommu->iommu_ctxflush) { iopte_t *base; u32 bus_addr; bus_addr = sg->dma_address & IO_PAGE_MASK; base = iommu->page_table + ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT); ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL; } return ctx; } static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist, int nelems, enum dma_data_direction direction, struct dma_attrs *attrs) { unsigned long flags, ctx; struct scatterlist *sg; struct strbuf *strbuf; struct iommu *iommu; BUG_ON(direction == DMA_NONE); iommu = dev->archdata.iommu; strbuf = dev->archdata.stc; ctx = fetch_sg_ctx(iommu, sglist); spin_lock_irqsave(&iommu->lock, flags); sg = sglist; while (nelems--) { dma_addr_t dma_handle = sg->dma_address; unsigned int len = sg->dma_length; unsigned long npages, entry; iopte_t *base; int i; if (!len) break; npages = iommu_num_pages(dma_handle, len, IO_PAGE_SIZE); iommu_range_free(iommu, dma_handle, npages); entry = ((dma_handle - iommu->page_table_map_base) >> IO_PAGE_SHIFT); base = iommu->page_table + entry; dma_handle &= IO_PAGE_MASK; if (strbuf->strbuf_enabled) strbuf_flush(strbuf, iommu, dma_handle, ctx, npages, direction); for (i = 0; i < npages; i++) iopte_make_dummy(iommu, base + i); sg = sg_next(sg); } iommu_free_ctx(iommu, ctx); spin_unlock_irqrestore(&iommu->lock, flags); } static void dma_4u_sync_single_for_cpu(struct device *dev, dma_addr_t bus_addr, size_t sz, enum dma_data_direction direction) { struct iommu *iommu; struct strbuf *strbuf; unsigned long flags, ctx, npages; iommu = dev->archdata.iommu; strbuf = dev->archdata.stc; if (!strbuf->strbuf_enabled) return; spin_lock_irqsave(&iommu->lock, flags); npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK); npages >>= IO_PAGE_SHIFT; bus_addr &= IO_PAGE_MASK; /* Step 1: Record the context, if any. */ ctx = 0; if (iommu->iommu_ctxflush && strbuf->strbuf_ctxflush) { iopte_t *iopte; iopte = iommu->page_table + ((bus_addr - iommu->page_table_map_base)>>IO_PAGE_SHIFT); ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL; } /* Step 2: Kick data out of streaming buffers. */ strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction); spin_unlock_irqrestore(&iommu->lock, flags); } static void dma_4u_sync_sg_for_cpu(struct device *dev, struct scatterlist *sglist, int nelems, enum dma_data_direction direction) { struct iommu *iommu; struct strbuf *strbuf; unsigned long flags, ctx, npages, i; struct scatterlist *sg, *sgprv; u32 bus_addr; iommu = dev->archdata.iommu; strbuf = dev->archdata.stc; if (!strbuf->strbuf_enabled) return; spin_lock_irqsave(&iommu->lock, flags); /* Step 1: Record the context, if any. */ ctx = 0; if (iommu->iommu_ctxflush && strbuf->strbuf_ctxflush) { iopte_t *iopte; iopte = iommu->page_table + ((sglist[0].dma_address - iommu->page_table_map_base) >> IO_PAGE_SHIFT); ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL; } /* Step 2: Kick data out of streaming buffers. */ bus_addr = sglist[0].dma_address & IO_PAGE_MASK; sgprv = NULL; for_each_sg(sglist, sg, nelems, i) { if (sg->dma_length == 0) break; sgprv = sg; } npages = (IO_PAGE_ALIGN(sgprv->dma_address + sgprv->dma_length) - bus_addr) >> IO_PAGE_SHIFT; strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction); spin_unlock_irqrestore(&iommu->lock, flags); } static struct dma_map_ops sun4u_dma_ops = { .alloc_coherent = dma_4u_alloc_coherent, .free_coherent = dma_4u_free_coherent, .map_page = dma_4u_map_page, .unmap_page = dma_4u_unmap_page, .map_sg = dma_4u_map_sg, .unmap_sg = dma_4u_unmap_sg, .sync_single_for_cpu = dma_4u_sync_single_for_cpu, .sync_sg_for_cpu = dma_4u_sync_sg_for_cpu, }; struct dma_map_ops *dma_ops = &sun4u_dma_ops; EXPORT_SYMBOL(dma_ops); extern int pci64_dma_supported(struct pci_dev *pdev, u64 device_mask); int dma_supported(struct device *dev, u64 device_mask) { struct iommu *iommu = dev->archdata.iommu; u64 dma_addr_mask = iommu->dma_addr_mask; if (device_mask >= (1UL << 32UL)) return 0; if ((device_mask & dma_addr_mask) == dma_addr_mask) return 1; #ifdef CONFIG_PCI if (dev->bus == &pci_bus_type) return pci64_dma_supported(to_pci_dev(dev), device_mask); #endif return 0; } EXPORT_SYMBOL(dma_supported);
gpl-2.0
friendlyarm/linux-4.x.y
arch/x86/pci/amd_bus.c
1713
10128
#include <linux/init.h> #include <linux/pci.h> #include <linux/topology.h> #include <linux/cpu.h> #include <linux/range.h> #include <asm/amd_nb.h> #include <asm/pci_x86.h> #include <asm/pci-direct.h> #include "bus_numa.h" #define AMD_NB_F0_NODE_ID 0x60 #define AMD_NB_F0_UNIT_ID 0x64 #define AMD_NB_F1_CONFIG_MAP_REG 0xe0 #define RANGE_NUM 16 #define AMD_NB_F1_CONFIG_MAP_RANGES 4 struct amd_hostbridge { u32 bus; u32 slot; u32 device; }; /* * IMPORTANT NOTE: * hb_probes[] and early_root_info_init() is in maintenance mode. * It only supports K8, Fam10h, Fam11h, and Fam15h_00h-0fh . * Future processor will rely on information in ACPI. */ static struct amd_hostbridge hb_probes[] __initdata = { { 0, 0x18, 0x1100 }, /* K8 */ { 0, 0x18, 0x1200 }, /* Family10h */ { 0xff, 0, 0x1200 }, /* Family10h */ { 0, 0x18, 0x1300 }, /* Family11h */ { 0, 0x18, 0x1600 }, /* Family15h */ }; static struct pci_root_info __init *find_pci_root_info(int node, int link) { struct pci_root_info *info; /* find the position */ list_for_each_entry(info, &pci_root_infos, list) if (info->node == node && info->link == link) return info; return NULL; } /** * early_root_info_init() * called before pcibios_scan_root and pci_scan_bus * fills the mp_bus_to_cpumask array based according * to the LDT Bus Number Registers found in the northbridge. */ static int __init early_root_info_init(void) { int i; unsigned bus; unsigned slot; int node; int link; int def_node; int def_link; struct pci_root_info *info; u32 reg; u64 start; u64 end; struct range range[RANGE_NUM]; u64 val; u32 address; bool found; struct resource fam10h_mmconf_res, *fam10h_mmconf; u64 fam10h_mmconf_start; u64 fam10h_mmconf_end; if (!early_pci_allowed()) return -1; found = false; for (i = 0; i < ARRAY_SIZE(hb_probes); i++) { u32 id; u16 device; u16 vendor; bus = hb_probes[i].bus; slot = hb_probes[i].slot; id = read_pci_config(bus, slot, 0, PCI_VENDOR_ID); vendor = id & 0xffff; device = (id>>16) & 0xffff; if (vendor != PCI_VENDOR_ID_AMD) continue; if (hb_probes[i].device == device) { found = true; break; } } if (!found) return 0; /* * We should learn topology and routing information from _PXM and * _CRS methods in the ACPI namespace. We extract node numbers * here to work around BIOSes that don't supply _PXM. */ for (i = 0; i < AMD_NB_F1_CONFIG_MAP_RANGES; i++) { int min_bus; int max_bus; reg = read_pci_config(bus, slot, 1, AMD_NB_F1_CONFIG_MAP_REG + (i << 2)); /* Check if that register is enabled for bus range */ if ((reg & 7) != 3) continue; min_bus = (reg >> 16) & 0xff; max_bus = (reg >> 24) & 0xff; node = (reg >> 4) & 0x07; link = (reg >> 8) & 0x03; info = alloc_pci_root_info(min_bus, max_bus, node, link); } /* * The following code extracts routing information for use on old * systems where Linux doesn't automatically use host bridge _CRS * methods (or when the user specifies "pci=nocrs"). * * We only do this through Fam11h, because _CRS should be enough on * newer systems. */ if (boot_cpu_data.x86 > 0x11) return 0; /* get the default node and link for left over res */ reg = read_pci_config(bus, slot, 0, AMD_NB_F0_NODE_ID); def_node = (reg >> 8) & 0x07; reg = read_pci_config(bus, slot, 0, AMD_NB_F0_UNIT_ID); def_link = (reg >> 8) & 0x03; memset(range, 0, sizeof(range)); add_range(range, RANGE_NUM, 0, 0, 0xffff + 1); /* io port resource */ for (i = 0; i < 4; i++) { reg = read_pci_config(bus, slot, 1, 0xc0 + (i << 3)); if (!(reg & 3)) continue; start = reg & 0xfff000; reg = read_pci_config(bus, slot, 1, 0xc4 + (i << 3)); node = reg & 0x07; link = (reg >> 4) & 0x03; end = (reg & 0xfff000) | 0xfff; info = find_pci_root_info(node, link); if (!info) continue; /* not found */ printk(KERN_DEBUG "node %d link %d: io port [%llx, %llx]\n", node, link, start, end); /* kernel only handle 16 bit only */ if (end > 0xffff) end = 0xffff; update_res(info, start, end, IORESOURCE_IO, 1); subtract_range(range, RANGE_NUM, start, end + 1); } /* add left over io port range to def node/link, [0, 0xffff] */ /* find the position */ info = find_pci_root_info(def_node, def_link); if (info) { for (i = 0; i < RANGE_NUM; i++) { if (!range[i].end) continue; update_res(info, range[i].start, range[i].end - 1, IORESOURCE_IO, 1); } } memset(range, 0, sizeof(range)); /* 0xfd00000000-0xffffffffff for HT */ end = cap_resource((0xfdULL<<32) - 1); end++; add_range(range, RANGE_NUM, 0, 0, end); /* need to take out [0, TOM) for RAM*/ address = MSR_K8_TOP_MEM1; rdmsrl(address, val); end = (val & 0xffffff800000ULL); printk(KERN_INFO "TOM: %016llx aka %lldM\n", end, end>>20); if (end < (1ULL<<32)) subtract_range(range, RANGE_NUM, 0, end); /* get mmconfig */ fam10h_mmconf = amd_get_mmconfig_range(&fam10h_mmconf_res); /* need to take out mmconf range */ if (fam10h_mmconf) { printk(KERN_DEBUG "Fam 10h mmconf %pR\n", fam10h_mmconf); fam10h_mmconf_start = fam10h_mmconf->start; fam10h_mmconf_end = fam10h_mmconf->end; subtract_range(range, RANGE_NUM, fam10h_mmconf_start, fam10h_mmconf_end + 1); } else { fam10h_mmconf_start = 0; fam10h_mmconf_end = 0; } /* mmio resource */ for (i = 0; i < 8; i++) { reg = read_pci_config(bus, slot, 1, 0x80 + (i << 3)); if (!(reg & 3)) continue; start = reg & 0xffffff00; /* 39:16 on 31:8*/ start <<= 8; reg = read_pci_config(bus, slot, 1, 0x84 + (i << 3)); node = reg & 0x07; link = (reg >> 4) & 0x03; end = (reg & 0xffffff00); end <<= 8; end |= 0xffff; info = find_pci_root_info(node, link); if (!info) continue; printk(KERN_DEBUG "node %d link %d: mmio [%llx, %llx]", node, link, start, end); /* * some sick allocation would have range overlap with fam10h * mmconf range, so need to update start and end. */ if (fam10h_mmconf_end) { int changed = 0; u64 endx = 0; if (start >= fam10h_mmconf_start && start <= fam10h_mmconf_end) { start = fam10h_mmconf_end + 1; changed = 1; } if (end >= fam10h_mmconf_start && end <= fam10h_mmconf_end) { end = fam10h_mmconf_start - 1; changed = 1; } if (start < fam10h_mmconf_start && end > fam10h_mmconf_end) { /* we got a hole */ endx = fam10h_mmconf_start - 1; update_res(info, start, endx, IORESOURCE_MEM, 0); subtract_range(range, RANGE_NUM, start, endx + 1); printk(KERN_CONT " ==> [%llx, %llx]", start, endx); start = fam10h_mmconf_end + 1; changed = 1; } if (changed) { if (start <= end) { printk(KERN_CONT " %s [%llx, %llx]", endx ? "and" : "==>", start, end); } else { printk(KERN_CONT "%s\n", endx?"":" ==> none"); continue; } } } update_res(info, cap_resource(start), cap_resource(end), IORESOURCE_MEM, 1); subtract_range(range, RANGE_NUM, start, end + 1); printk(KERN_CONT "\n"); } /* need to take out [4G, TOM2) for RAM*/ /* SYS_CFG */ address = MSR_K8_SYSCFG; rdmsrl(address, val); /* TOP_MEM2 is enabled? */ if (val & (1<<21)) { /* TOP_MEM2 */ address = MSR_K8_TOP_MEM2; rdmsrl(address, val); end = (val & 0xffffff800000ULL); printk(KERN_INFO "TOM2: %016llx aka %lldM\n", end, end>>20); subtract_range(range, RANGE_NUM, 1ULL<<32, end); } /* * add left over mmio range to def node/link ? * that is tricky, just record range in from start_min to 4G */ info = find_pci_root_info(def_node, def_link); if (info) { for (i = 0; i < RANGE_NUM; i++) { if (!range[i].end) continue; update_res(info, cap_resource(range[i].start), cap_resource(range[i].end - 1), IORESOURCE_MEM, 1); } } list_for_each_entry(info, &pci_root_infos, list) { int busnum; struct pci_root_res *root_res; busnum = info->busn.start; printk(KERN_DEBUG "bus: %pR on node %x link %x\n", &info->busn, info->node, info->link); list_for_each_entry(root_res, &info->resources, list) printk(KERN_DEBUG "bus: %02x %pR\n", busnum, &root_res->res); } return 0; } #define ENABLE_CF8_EXT_CFG (1ULL << 46) static void enable_pci_io_ecs(void *unused) { u64 reg; rdmsrl(MSR_AMD64_NB_CFG, reg); if (!(reg & ENABLE_CF8_EXT_CFG)) { reg |= ENABLE_CF8_EXT_CFG; wrmsrl(MSR_AMD64_NB_CFG, reg); } } static int amd_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) { int cpu = (long)hcpu; switch (action) { case CPU_ONLINE: case CPU_ONLINE_FROZEN: smp_call_function_single(cpu, enable_pci_io_ecs, NULL, 0); break; default: break; } return NOTIFY_OK; } static struct notifier_block amd_cpu_notifier = { .notifier_call = amd_cpu_notify, }; static void __init pci_enable_pci_io_ecs(void) { #ifdef CONFIG_AMD_NB unsigned int i, n; for (n = i = 0; !n && amd_nb_bus_dev_ranges[i].dev_limit; ++i) { u8 bus = amd_nb_bus_dev_ranges[i].bus; u8 slot = amd_nb_bus_dev_ranges[i].dev_base; u8 limit = amd_nb_bus_dev_ranges[i].dev_limit; for (; slot < limit; ++slot) { u32 val = read_pci_config(bus, slot, 3, 0); if (!early_is_amd_nb(val)) continue; val = read_pci_config(bus, slot, 3, 0x8c); if (!(val & (ENABLE_CF8_EXT_CFG >> 32))) { val |= ENABLE_CF8_EXT_CFG >> 32; write_pci_config(bus, slot, 3, 0x8c, val); } ++n; } } #endif } static int __init pci_io_ecs_init(void) { int cpu; /* assume all cpus from fam10h have IO ECS */ if (boot_cpu_data.x86 < 0x10) return 0; /* Try the PCI method first. */ if (early_pci_allowed()) pci_enable_pci_io_ecs(); cpu_notifier_register_begin(); for_each_online_cpu(cpu) amd_cpu_notify(&amd_cpu_notifier, (unsigned long)CPU_ONLINE, (void *)(long)cpu); __register_cpu_notifier(&amd_cpu_notifier); cpu_notifier_register_done(); pci_probe |= PCI_HAS_IO_ECS; return 0; } static int __init amd_postcore_init(void) { if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) return 0; early_root_info_init(); pci_io_ecs_init(); return 0; } postcore_initcall(amd_postcore_init);
gpl-2.0
neighborhoodhacker/kernel-prime
drivers/staging/rts5139/ms.c
2481
106837
/* Driver for Realtek RTS51xx USB card reader * * Copyright(c) 2009 Realtek Semiconductor Corp. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2, or (at your option) any * later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, see <http://www.gnu.org/licenses/>. * * Author: * wwang (wei_wang@realsil.com.cn) * No. 450, Shenhu Road, Suzhou Industry Park, Suzhou, China * Maintainer: * Edwin Rong (edwin_rong@realsil.com.cn) * No. 450, Shenhu Road, Suzhou Industry Park, Suzhou, China */ #include <linux/blkdev.h> #include <linux/kthread.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include "debug.h" #include "trace.h" #include "rts51x.h" #include "rts51x_transport.h" #include "rts51x_scsi.h" #include "rts51x_card.h" #include "ms.h" static inline void ms_set_err_code(struct rts51x_chip *chip, u8 err_code) { struct ms_info *ms_card = &(chip->ms_card); ms_card->err_code = err_code; } static inline int ms_check_err_code(struct rts51x_chip *chip, u8 err_code) { struct ms_info *ms_card = &(chip->ms_card); return (ms_card->err_code == err_code); } static int ms_parse_err_code(struct rts51x_chip *chip) { TRACE_RET(chip, STATUS_FAIL); } static int ms_transfer_tpc(struct rts51x_chip *chip, u8 trans_mode, u8 tpc, u8 cnt, u8 cfg) { struct ms_info *ms_card = &(chip->ms_card); int retval; RTS51X_DEBUGP("ms_transfer_tpc: tpc = 0x%x\n", tpc); rts51x_init_cmd(chip); rts51x_add_cmd(chip, WRITE_REG_CMD, MS_TPC, 0xFF, tpc); rts51x_add_cmd(chip, WRITE_REG_CMD, MS_BYTE_CNT, 0xFF, cnt); rts51x_add_cmd(chip, WRITE_REG_CMD, MS_TRANS_CFG, 0xFF, cfg); rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 0x01, PINGPONG_BUFFER); rts51x_add_cmd(chip, WRITE_REG_CMD, MS_TRANSFER, 0xFF, MS_TRANSFER_START | trans_mode); rts51x_add_cmd(chip, CHECK_REG_CMD, MS_TRANSFER, MS_TRANSFER_END, MS_TRANSFER_END); rts51x_add_cmd(chip, READ_REG_CMD, MS_TRANS_CFG, 0, 0); retval = rts51x_send_cmd(chip, MODE_CR, 100); if (retval != STATUS_SUCCESS) TRACE_RET(chip, retval); retval = rts51x_get_rsp(chip, 2, 5000); if (CHECK_MS_TRANS_FAIL(chip, retval)) { rts51x_clear_ms_error(chip); ms_set_err_code(chip, MS_TO_ERROR); TRACE_RET(chip, ms_parse_err_code(chip)); } if (!(tpc & 0x08)) { /* Read Packet */ /* Check CRC16 & Ready Timeout */ if (chip->rsp_buf[1] & MS_CRC16_ERR) { ms_set_err_code(chip, MS_CRC16_ERROR); TRACE_RET(chip, ms_parse_err_code(chip)); } } else { /* Write Packet */ if (CHK_MSPRO(ms_card) && !(chip->rsp_buf[1] & 0x80)) { if (chip->rsp_buf[1] & (MS_INT_ERR | MS_INT_CMDNK)) { ms_set_err_code(chip, MS_CMD_NK); TRACE_RET(chip, ms_parse_err_code(chip)); } } } /* Check Timeout of Ready Signal */ if (chip->rsp_buf[1] & MS_RDY_TIMEOUT) { rts51x_clear_ms_error(chip); ms_set_err_code(chip, MS_TO_ERROR); TRACE_RET(chip, ms_parse_err_code(chip)); } return STATUS_SUCCESS; } int ms_transfer_data(struct rts51x_chip *chip, u8 trans_mode, u8 tpc, u16 sec_cnt, u8 cfg, int mode_2k, int use_sg, void *buf, int buf_len) { struct ms_info *ms_card = &(chip->ms_card); int retval; u8 val, err_code = 0, flag = 0; enum dma_data_direction dir; unsigned int pipe; if (!buf || !buf_len) TRACE_RET(chip, STATUS_FAIL); if (trans_mode == MS_TM_AUTO_READ) { pipe = RCV_BULK_PIPE(chip); dir = DMA_FROM_DEVICE; flag = MODE_CDIR; err_code = MS_FLASH_READ_ERROR; } else if (trans_mode == MS_TM_AUTO_WRITE) { pipe = SND_BULK_PIPE(chip); dir = DMA_TO_DEVICE; flag = MODE_CDOR; err_code = MS_FLASH_WRITE_ERROR; } else { TRACE_RET(chip, STATUS_FAIL); } rts51x_init_cmd(chip); rts51x_add_cmd(chip, WRITE_REG_CMD, MS_TPC, 0xFF, tpc); rts51x_add_cmd(chip, WRITE_REG_CMD, MS_SECTOR_CNT_H, 0xFF, (u8) (sec_cnt >> 8)); rts51x_add_cmd(chip, WRITE_REG_CMD, MS_SECTOR_CNT_L, 0xFF, (u8) sec_cnt); rts51x_add_cmd(chip, WRITE_REG_CMD, MS_TRANS_CFG, 0xFF, cfg); rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 0x01, RING_BUFFER); if (mode_2k) rts51x_add_cmd(chip, WRITE_REG_CMD, MS_CFG, MS_2K_SECTOR_MODE, MS_2K_SECTOR_MODE); else rts51x_add_cmd(chip, WRITE_REG_CMD, MS_CFG, MS_2K_SECTOR_MODE, 0); rts51x_trans_dma_enable(dir, chip, sec_cnt * 512, DMA_512); rts51x_add_cmd(chip, WRITE_REG_CMD, MS_TRANSFER, 0xFF, MS_TRANSFER_START | trans_mode); rts51x_add_cmd(chip, CHECK_REG_CMD, MS_TRANSFER, MS_TRANSFER_END, MS_TRANSFER_END); retval = rts51x_send_cmd(chip, flag | STAGE_MS_STATUS, 100); if (retval != STATUS_SUCCESS) TRACE_RET(chip, retval); retval = rts51x_transfer_data_rcc(chip, pipe, buf, buf_len, use_sg, NULL, 15000, flag); if (retval != STATUS_SUCCESS) { ms_set_err_code(chip, err_code); rts51x_clear_ms_error(chip); TRACE_RET(chip, retval); } retval = rts51x_get_rsp(chip, 3, 15000); if (CHECK_MS_TRANS_FAIL(chip, retval)) { ms_set_err_code(chip, err_code); rts51x_clear_ms_error(chip); TRACE_RET(chip, STATUS_FAIL); } ms_card->last_rw_int = val = chip->rsp_buf[1]; if (val & (MS_INT_CMDNK | MS_INT_ERR | MS_CRC16_ERR | MS_RDY_TIMEOUT)) TRACE_RET(chip, STATUS_FAIL); return STATUS_SUCCESS; } int ms_write_bytes(struct rts51x_chip *chip, u8 tpc, u8 cnt, u8 cfg, u8 *data, int data_len) { struct ms_info *ms_card = &(chip->ms_card); int retval, i; if (!data || (data_len < cnt)) TRACE_RET(chip, STATUS_ERROR); rts51x_init_cmd(chip); for (i = 0; i < cnt; i++) { rts51x_add_cmd(chip, WRITE_REG_CMD, PPBUF_BASE2 + i, 0xFF, data[i]); } if (cnt % 2) rts51x_add_cmd(chip, WRITE_REG_CMD, PPBUF_BASE2 + i, 0xFF, 0xFF); rts51x_add_cmd(chip, WRITE_REG_CMD, MS_TPC, 0xFF, tpc); rts51x_add_cmd(chip, WRITE_REG_CMD, MS_BYTE_CNT, 0xFF, cnt); rts51x_add_cmd(chip, WRITE_REG_CMD, MS_TRANS_CFG, 0xFF, cfg); rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 0x01, PINGPONG_BUFFER); rts51x_add_cmd(chip, WRITE_REG_CMD, MS_TRANSFER, 0xFF, MS_TRANSFER_START | MS_TM_WRITE_BYTES); rts51x_add_cmd(chip, CHECK_REG_CMD, MS_TRANSFER, MS_TRANSFER_END, MS_TRANSFER_END); retval = rts51x_send_cmd(chip, MODE_CR, 100); if (retval != STATUS_SUCCESS) TRACE_RET(chip, retval); retval = rts51x_get_rsp(chip, 1, 5000); if (CHECK_MS_TRANS_FAIL(chip, retval)) { u8 val = 0; rts51x_ep0_read_register(chip, MS_TRANS_CFG, &val); RTS51X_DEBUGP("MS_TRANS_CFG: 0x%02x\n", val); rts51x_clear_ms_error(chip); if (!(tpc & 0x08)) { /* Read Packet */ /* Check CRC16 & Ready Timeout */ if (val & MS_CRC16_ERR) { ms_set_err_code(chip, MS_CRC16_ERROR); TRACE_RET(chip, ms_parse_err_code(chip)); } } else { /* Write Packet */ if (CHK_MSPRO(ms_card) && !(val & 0x80)) { if (val & (MS_INT_ERR | MS_INT_CMDNK)) { ms_set_err_code(chip, MS_CMD_NK); TRACE_RET(chip, ms_parse_err_code(chip)); } } } /* Check Timeout of Ready Signal */ if (val & MS_RDY_TIMEOUT) { ms_set_err_code(chip, MS_TO_ERROR); TRACE_RET(chip, ms_parse_err_code(chip)); } ms_set_err_code(chip, MS_TO_ERROR); TRACE_RET(chip, ms_parse_err_code(chip)); } return STATUS_SUCCESS; } int ms_read_bytes(struct rts51x_chip *chip, u8 tpc, u8 cnt, u8 cfg, u8 *data, int data_len) { struct ms_info *ms_card = &(chip->ms_card); int retval, i; if (!data) TRACE_RET(chip, STATUS_ERROR); rts51x_init_cmd(chip); rts51x_add_cmd(chip, WRITE_REG_CMD, MS_TPC, 0xFF, tpc); rts51x_add_cmd(chip, WRITE_REG_CMD, MS_BYTE_CNT, 0xFF, cnt); rts51x_add_cmd(chip, WRITE_REG_CMD, MS_TRANS_CFG, 0xFF, cfg); rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 0x01, PINGPONG_BUFFER); rts51x_add_cmd(chip, WRITE_REG_CMD, MS_TRANSFER, 0xFF, MS_TRANSFER_START | MS_TM_READ_BYTES); rts51x_add_cmd(chip, CHECK_REG_CMD, MS_TRANSFER, MS_TRANSFER_END, MS_TRANSFER_END); for (i = 0; i < data_len - 1; i++) rts51x_add_cmd(chip, READ_REG_CMD, PPBUF_BASE2 + i, 0, 0); if (data_len % 2) rts51x_add_cmd(chip, READ_REG_CMD, PPBUF_BASE2 + data_len, 0, 0); else rts51x_add_cmd(chip, READ_REG_CMD, PPBUF_BASE2 + data_len - 1, 0, 0); retval = rts51x_send_cmd(chip, MODE_CR, 100); if (retval != STATUS_SUCCESS) TRACE_RET(chip, retval); retval = rts51x_get_rsp(chip, data_len + 1, 5000); if (CHECK_MS_TRANS_FAIL(chip, retval)) { u8 val = 0; rts51x_ep0_read_register(chip, MS_TRANS_CFG, &val); RTS51X_DEBUGP("MS_TRANS_CFG: 0x%02x\n", val); rts51x_clear_ms_error(chip); if (!(tpc & 0x08)) { /* Read Packet */ /* Check CRC16 & Ready Timeout */ if (val & MS_CRC16_ERR) { ms_set_err_code(chip, MS_CRC16_ERROR); TRACE_RET(chip, ms_parse_err_code(chip)); } } else { /* Write Packet */ if (CHK_MSPRO(ms_card) && !(val & 0x80)) { if (val & (MS_INT_ERR | MS_INT_CMDNK)) { ms_set_err_code(chip, MS_CMD_NK); TRACE_RET(chip, ms_parse_err_code(chip)); } } } /* Check Timeout of Ready Signal */ if (val & MS_RDY_TIMEOUT) { ms_set_err_code(chip, MS_TO_ERROR); TRACE_RET(chip, ms_parse_err_code(chip)); } ms_set_err_code(chip, MS_TO_ERROR); TRACE_RET(chip, ms_parse_err_code(chip)); } rts51x_read_rsp_buf(chip, 1, data, data_len); return STATUS_SUCCESS; } int ms_set_rw_reg_addr(struct rts51x_chip *chip, u8 read_start, u8 read_cnt, u8 write_start, u8 write_cnt) { int retval, i; u8 data[4]; data[0] = read_start; data[1] = read_cnt; data[2] = write_start; data[3] = write_cnt; for (i = 0; i < MS_MAX_RETRY_COUNT; i++) { retval = ms_write_bytes(chip, SET_RW_REG_ADRS, 4, NO_WAIT_INT, data, 4); if (retval == STATUS_SUCCESS) return STATUS_SUCCESS; rts51x_clear_ms_error(chip); } TRACE_RET(chip, STATUS_FAIL); } static int ms_send_cmd(struct rts51x_chip *chip, u8 cmd, u8 cfg) { u8 data[2]; data[0] = cmd; data[1] = 0; return ms_write_bytes(chip, PRO_SET_CMD, 1, cfg, data, 1); } static int ms_set_cmd(struct rts51x_chip *chip, u8 read_start, u8 read_count, u8 write_start, u8 write_count, u8 cmd, u8 cfg, u8 *data, int data_len, u8 *int_stat) { int retval, i; u8 val; if (!data || (data_len <= 0) || (data_len > 128)) { RTS51X_DEBUGP("ms_set_cmd (data_len = %d)\n", data_len); TRACE_RET(chip, STATUS_FAIL); } retval = ms_set_rw_reg_addr(chip, read_start, read_count, write_start, write_count); if (retval != STATUS_SUCCESS) TRACE_RET(chip, retval); for (i = 0; i < MS_MAX_RETRY_COUNT; i++) { retval = ms_write_bytes(chip, WRITE_REG, write_count, NO_WAIT_INT, data, data_len); if (retval == STATUS_SUCCESS) break; } if (i == MS_MAX_RETRY_COUNT) TRACE_RET(chip, STATUS_FAIL); ms_set_err_code(chip, MS_NO_ERROR); for (i = 0; i < MS_MAX_RETRY_COUNT; i++) { retval = ms_send_cmd(chip, cmd, WAIT_INT); if (retval == STATUS_SUCCESS) break; } if (i == MS_MAX_RETRY_COUNT) TRACE_RET(chip, STATUS_FAIL); /* GET_INT Register */ ms_set_err_code(chip, MS_NO_ERROR); retval = ms_read_bytes(chip, GET_INT, 1, NO_WAIT_INT, &val, 1); if (retval != STATUS_SUCCESS) TRACE_RET(chip, retval); if (int_stat) *int_stat = val; return STATUS_SUCCESS; } #ifdef MS_SPEEDUP static int ms_auto_set_cmd(struct rts51x_chip *chip, u8 read_start, u8 read_count, u8 write_start, u8 write_count, u8 cmd, u8 cfg, u8 *data, int data_len, u8 *int_stat) { int retval; int i; if (!data || (data_len <= 0) || (data_len > 128)) { RTS51X_DEBUGP("ms_auto_set_cmd (data_len = %d)\n", data_len); TRACE_RET(chip, STATUS_FAIL); } rts51x_init_cmd(chip); rts51x_add_cmd(chip, WRITE_REG_CMD, MS_READ_START, 0xFF, read_start); rts51x_add_cmd(chip, WRITE_REG_CMD, MS_READ_COUNT, 0xFF, read_count); rts51x_add_cmd(chip, WRITE_REG_CMD, MS_WRITE_START, 0xFF, write_start); rts51x_add_cmd(chip, WRITE_REG_CMD, MS_WRITE_COUNT, 0xFF, write_count); rts51x_add_cmd(chip, WRITE_REG_CMD, MS_COMMAND, 0xFF, cmd); rts51x_add_cmd(chip, WRITE_REG_CMD, MS_TRANS_CFG, 0xFF, cfg); rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 0x01, PINGPONG_BUFFER); for (i = 0; i < data_len; i++) { rts51x_add_cmd(chip, WRITE_REG_CMD, PPBUF_BASE2 + i, 0xFF, data[i]); } rts51x_add_cmd(chip, WRITE_REG_CMD, MS_TRANSFER, 0xFF, MS_TRANSFER_START | MS_TM_SET_CMD); rts51x_add_cmd(chip, CHECK_REG_CMD, MS_TRANSFER, MS_TRANSFER_END, MS_TRANSFER_END); retval = rts51x_send_cmd(chip, MODE_CR | STAGE_MS_STATUS, 100); if (retval != STATUS_SUCCESS) TRACE_RET(chip, retval); retval = rts51x_get_rsp(chip, 3, 5000); if (CHECK_MS_TRANS_FAIL(chip, retval)) { rts51x_clear_ms_error(chip); TRACE_RET(chip, STATUS_FAIL); } if (int_stat) *int_stat = chip->rsp_buf[2]; return STATUS_SUCCESS; } #endif static int ms_set_init_para(struct rts51x_chip *chip) { struct ms_info *ms_card = &(chip->ms_card); int retval; if (CHK_HG8BIT(ms_card)) { if (chip->asic_code) ms_card->ms_clock = chip->option.asic_ms_hg_clk; else ms_card->ms_clock = chip->option.fpga_ms_hg_clk; } else if (CHK_MSPRO(ms_card) || CHK_MS4BIT(ms_card)) { if (chip->asic_code) ms_card->ms_clock = chip->option.asic_ms_4bit_clk; else ms_card->ms_clock = chip->option.fpga_ms_4bit_clk; } else { if (chip->asic_code) ms_card->ms_clock = 38; else ms_card->ms_clock = CLK_40; } retval = switch_clock(chip, ms_card->ms_clock); if (retval != STATUS_SUCCESS) TRACE_RET(chip, retval); retval = rts51x_select_card(chip, MS_CARD); if (retval != STATUS_SUCCESS) TRACE_RET(chip, retval); return STATUS_SUCCESS; } int ms_switch_clock(struct rts51x_chip *chip) { struct ms_info *ms_card = &(chip->ms_card); int retval; retval = rts51x_select_card(chip, MS_CARD); if (retval != STATUS_SUCCESS) TRACE_RET(chip, retval); retval = switch_clock(chip, ms_card->ms_clock); if (retval != STATUS_SUCCESS) TRACE_RET(chip, retval); return STATUS_SUCCESS; } static void ms_pull_ctl_disable(struct rts51x_chip *chip) { if (CHECK_PKG(chip, LQFP48)) { rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL1, 0xFF, 0x55); rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL2, 0xFF, 0x55); rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL3, 0xFF, 0x95); rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL4, 0xFF, 0x55); rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL5, 0xFF, 0x55); rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL6, 0xFF, 0xA5); } else { rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL1, 0xFF, 0x65); rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL2, 0xFF, 0x55); rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL3, 0xFF, 0x95); rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL4, 0xFF, 0x55); rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL5, 0xFF, 0x56); rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL6, 0xFF, 0x59); } } static void ms_pull_ctl_enable(struct rts51x_chip *chip) { if (CHECK_PKG(chip, LQFP48)) { rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL1, 0xFF, 0x55); rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL2, 0xFF, 0x55); rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL3, 0xFF, 0x95); rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL4, 0xFF, 0x55); rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL5, 0xFF, 0x55); rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL6, 0xFF, 0xA5); } else { rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL1, 0xFF, 0x65); rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL2, 0xFF, 0x55); rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL3, 0xFF, 0x95); rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL4, 0xFF, 0x55); rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL5, 0xFF, 0x55); rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL6, 0xFF, 0x59); } } static int ms_prepare_reset(struct rts51x_chip *chip) { struct ms_info *ms_card = &(chip->ms_card); int retval; ms_card->ms_type = 0; ms_card->check_ms_flow = 0; ms_card->switch_8bit_fail = 0; ms_card->delay_write.delay_write_flag = 0; ms_card->pro_under_formatting = 0; rts51x_init_cmd(chip); if (chip->asic_code) { ms_pull_ctl_enable(chip); } else { rts51x_add_cmd(chip, WRITE_REG_CMD, FPGA_PULL_CTL, FPGA_MS_PULL_CTL_BIT | 0x20, 0); } /* Tri-state MS output */ rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_OE, MS_OUTPUT_EN, 0); if (!chip->option.FT2_fast_mode) { rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PWR_CTL, POWER_MASK, POWER_OFF); } retval = rts51x_send_cmd(chip, MODE_C, 100); if (retval != STATUS_SUCCESS) TRACE_RET(chip, retval); if (!chip->option.FT2_fast_mode) { wait_timeout(250); rts51x_card_power_on(chip, MS_CARD); wait_timeout(150); #ifdef SUPPORT_OCP rts51x_get_card_status(chip, &(chip->card_status)); /* get OCP status */ chip->ocp_stat = (chip->card_status >> 4) & 0x03; if (chip->ocp_stat & (MS_OCP_NOW | MS_OCP_EVER)) { RTS51X_DEBUGP("Over current, OCPSTAT is 0x%x\n", chip->ocp_stat); TRACE_RET(chip, STATUS_FAIL); } #endif } rts51x_init_cmd(chip); /* Enable MS Output */ rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_OE, MS_OUTPUT_EN, MS_OUTPUT_EN); /* Reset Registers */ if (chip->asic_code) rts51x_add_cmd(chip, WRITE_REG_CMD, MS_CFG, 0xFF, SAMPLE_TIME_RISING | PUSH_TIME_DEFAULT | NO_EXTEND_TOGGLE | MS_BUS_WIDTH_1); else rts51x_add_cmd(chip, WRITE_REG_CMD, MS_CFG, 0xFF, SAMPLE_TIME_FALLING | PUSH_TIME_DEFAULT | NO_EXTEND_TOGGLE | MS_BUS_WIDTH_1); rts51x_add_cmd(chip, WRITE_REG_CMD, MS_TRANS_CFG, 0xFF, NO_WAIT_INT | NO_AUTO_READ_INT_REG); retval = rts51x_send_cmd(chip, MODE_C, 100); if (retval != STATUS_SUCCESS) TRACE_RET(chip, retval); return ms_set_init_para(chip); } static int ms_identify_media_type(struct rts51x_chip *chip, int switch_8bit_bus) { struct ms_info *ms_card = &(chip->ms_card); int retval, i; u8 val; retval = ms_set_rw_reg_addr(chip, Pro_StatusReg, 6, SystemParm, 1); if (retval != STATUS_SUCCESS) TRACE_RET(chip, retval); /* Get Register form MS-PRO card */ for (i = 0; i < MS_MAX_RETRY_COUNT; i++) { retval = ms_transfer_tpc(chip, MS_TM_READ_BYTES, READ_REG, 6, NO_WAIT_INT); if (retval == STATUS_SUCCESS) break; } if (i == MS_MAX_RETRY_COUNT) TRACE_RET(chip, STATUS_FAIL); RTS51X_READ_REG(chip, PPBUF_BASE2 + 2, &val); RTS51X_DEBUGP("Type register: 0x%x\n", val); if (val != 0x01) { if (val != 0x02) ms_card->check_ms_flow = 1; TRACE_RET(chip, STATUS_FAIL); } /* Category Register */ RTS51X_READ_REG(chip, PPBUF_BASE2 + 4, &val); RTS51X_DEBUGP("Category register: 0x%x\n", val); if (val != 0) { ms_card->check_ms_flow = 1; TRACE_RET(chip, STATUS_FAIL); } /* Class Register */ RTS51X_READ_REG(chip, PPBUF_BASE2 + 5, &val); RTS51X_DEBUGP("Class register: 0x%x\n", val); if (val == 0) { RTS51X_READ_REG(chip, PPBUF_BASE2, &val); if (val & WRT_PRTCT) chip->card_wp |= MS_CARD; else chip->card_wp &= ~MS_CARD; } else if ((val == 0x01) || (val == 0x02) || (val == 0x03)) { chip->card_wp |= MS_CARD; } else { ms_card->check_ms_flow = 1; TRACE_RET(chip, STATUS_FAIL); } ms_card->ms_type |= TYPE_MSPRO; /* Check MSPro-HG Card, use IF Mode Register to distinguish */ RTS51X_READ_REG(chip, PPBUF_BASE2 + 3, &val); RTS51X_DEBUGP("IF Mode register: 0x%x\n", val); if (val == 0) { ms_card->ms_type &= 0x0F; } else if (val == 7) { if (switch_8bit_bus) ms_card->ms_type |= MS_HG; else ms_card->ms_type &= 0x0F; } else { TRACE_RET(chip, STATUS_FAIL); } /* end Procedure to identify Media Type */ return STATUS_SUCCESS; } static int ms_confirm_cpu_startup(struct rts51x_chip *chip) { int retval, i, k; u8 val; /* Confirm CPU StartUp */ k = 0; do { if (monitor_card_cd(chip, MS_CARD) == CD_NOT_EXIST) TRACE_RET(chip, STATUS_FAIL); for (i = 0; i < MS_MAX_RETRY_COUNT; i++) { retval = ms_read_bytes(chip, GET_INT, 1, NO_WAIT_INT, &val, 1); if (retval == STATUS_SUCCESS) break; } if (i == MS_MAX_RETRY_COUNT) TRACE_RET(chip, STATUS_FAIL); if (k > 100) TRACE_RET(chip, STATUS_FAIL); k++; wait_timeout(100); } while (!(val & INT_REG_CED)); for (i = 0; i < MS_MAX_RETRY_COUNT; i++) { retval = ms_read_bytes(chip, GET_INT, 1, NO_WAIT_INT, &val, 1); if (retval == STATUS_SUCCESS) break; } if (i == MS_MAX_RETRY_COUNT) TRACE_RET(chip, STATUS_FAIL); if (val & INT_REG_ERR) { if (val & INT_REG_CMDNK) { /* CMDNK = 1 */ chip->card_wp |= (MS_CARD); } else { /* CMDNK = 0 */ TRACE_RET(chip, STATUS_FAIL); } } /*-- end confirm CPU startup */ return STATUS_SUCCESS; } static int ms_switch_parallel_bus(struct rts51x_chip *chip) { int retval, i; u8 data[2]; data[0] = PARALLEL_4BIT_IF; data[1] = 0; for (i = 0; i < MS_MAX_RETRY_COUNT; i++) { retval = ms_write_bytes(chip, WRITE_REG, 1, NO_WAIT_INT, data, 2); if (retval == STATUS_SUCCESS) break; } if (retval != STATUS_SUCCESS) TRACE_RET(chip, retval); return STATUS_SUCCESS; } static int ms_switch_8bit_bus(struct rts51x_chip *chip) { struct ms_info *ms_card = &(chip->ms_card); int retval, i; u8 data[2]; data[0] = PARALLEL_8BIT_IF; data[1] = 0; for (i = 0; i < MS_MAX_RETRY_COUNT; i++) { retval = ms_write_bytes(chip, WRITE_REG, 1, NO_WAIT_INT, data, 2); if (retval == STATUS_SUCCESS) break; } if (retval != STATUS_SUCCESS) TRACE_RET(chip, STATUS_FAIL); RTS51X_WRITE_REG(chip, MS_CFG, 0x98, MS_BUS_WIDTH_8 | SAMPLE_TIME_FALLING); ms_card->ms_type |= MS_8BIT; retval = ms_set_init_para(chip); if (retval != STATUS_SUCCESS) TRACE_RET(chip, retval); for (i = 0; i < MS_MAX_RETRY_COUNT; i++) { retval = ms_transfer_tpc(chip, MS_TM_READ_BYTES, GET_INT, 1, NO_WAIT_INT); if (retval != STATUS_SUCCESS) TRACE_RET(chip, retval); } return STATUS_SUCCESS; } static int ms_pro_reset_flow(struct rts51x_chip *chip, int switch_8bit_bus) { struct ms_info *ms_card = &(chip->ms_card); int retval, i; for (i = 0; i < 3; i++) { retval = ms_prepare_reset(chip); if (retval != STATUS_SUCCESS) TRACE_RET(chip, retval); retval = ms_identify_media_type(chip, switch_8bit_bus); if (retval != STATUS_SUCCESS) TRACE_RET(chip, retval); retval = ms_confirm_cpu_startup(chip); if (retval != STATUS_SUCCESS) TRACE_RET(chip, retval); retval = ms_switch_parallel_bus(chip); if (retval != STATUS_SUCCESS) { if (monitor_card_cd(chip, MS_CARD) == CD_NOT_EXIST) TRACE_RET(chip, STATUS_FAIL); continue; } else { break; } } if (retval != STATUS_SUCCESS) TRACE_RET(chip, retval); RTS51X_WRITE_REG(chip, MS_CFG, 0x18, MS_BUS_WIDTH_4); RTS51X_WRITE_REG(chip, MS_CFG, PUSH_TIME_ODD, PUSH_TIME_ODD); retval = ms_set_init_para(chip); if (retval != STATUS_SUCCESS) TRACE_RET(chip, retval); if (CHK_MSHG(ms_card) && switch_8bit_bus) { retval = ms_switch_8bit_bus(chip); if (retval != STATUS_SUCCESS) { ms_card->switch_8bit_fail = 1; TRACE_RET(chip, retval); } } return STATUS_SUCCESS; } #ifdef XC_POWERCLASS static int msxc_change_power(struct rts51x_chip *chip, u8 mode) { int retval; u8 buf[6]; rts51x_ms_cleanup_work(chip); /* Set Parameter Register */ retval = ms_set_rw_reg_addr(chip, 0, 0, Pro_DataCount1, 6); if (retval != STATUS_SUCCESS) TRACE_RET(chip, retval); buf[0] = 0; buf[1] = mode; buf[2] = 0; buf[3] = 0; buf[4] = 0; buf[5] = 0; retval = ms_write_bytes(chip, PRO_WRITE_REG, 6, NO_WAIT_INT, buf, 6); if (retval != STATUS_SUCCESS) TRACE_RET(chip, retval); retval = ms_send_cmd(chip, XC_CHG_POWER, WAIT_INT); if (retval != STATUS_SUCCESS) TRACE_RET(chip, retval); RTS51X_READ_REG(chip, MS_TRANS_CFG, buf); if (buf[0] & (MS_INT_CMDNK | MS_INT_ERR)) TRACE_RET(chip, STATUS_FAIL); return STATUS_SUCCESS; } #endif static int ms_read_attribute_info(struct rts51x_chip *chip) { struct ms_info *ms_card = &(chip->ms_card); int retval, i; u8 val, *buf, class_code, device_type, sub_class, data[16]; u16 total_blk = 0, blk_size = 0; #ifdef SUPPORT_MSXC u32 xc_total_blk = 0, xc_blk_size = 0; #endif u32 sys_info_addr = 0, sys_info_size; #ifdef SUPPORT_PCGL_1P18 u32 model_name_addr = 0, model_name_size; int found_sys_info = 0, found_model_name = 0; #endif retval = ms_set_rw_reg_addr(chip, Pro_IntReg, 2, Pro_SystemParm, 7); if (retval != STATUS_SUCCESS) TRACE_RET(chip, retval); if (CHK_MS8BIT(ms_card)) data[0] = PARALLEL_8BIT_IF; else data[0] = PARALLEL_4BIT_IF; data[1] = 0; data[2] = 0x40; data[3] = 0; data[4] = 0; data[5] = 0; /* Start address 0 */ data[6] = 0; data[7] = 0; for (i = 0; i < MS_MAX_RETRY_COUNT; i++) { retval = ms_write_bytes(chip, PRO_WRITE_REG, 7, NO_WAIT_INT, data, 8); if (retval == STATUS_SUCCESS) break; } if (retval != STATUS_SUCCESS) TRACE_RET(chip, retval); buf = kmalloc(64 * 512, GFP_KERNEL); if (buf == NULL) TRACE_RET(chip, STATUS_NOMEM); for (i = 0; i < MS_MAX_RETRY_COUNT; i++) { retval = ms_send_cmd(chip, PRO_READ_ATRB, WAIT_INT); if (retval != STATUS_SUCCESS) continue; retval = rts51x_read_register(chip, MS_TRANS_CFG, &val); if (retval != STATUS_SUCCESS) { kfree(buf); TRACE_RET(chip, STATUS_FAIL); } if (!(val & MS_INT_BREQ)) { kfree(buf); TRACE_RET(chip, STATUS_FAIL); } retval = ms_transfer_data(chip, MS_TM_AUTO_READ, PRO_READ_LONG_DATA, 0x40, WAIT_INT, 0, 0, buf, 64 * 512); if (retval == STATUS_SUCCESS) break; else rts51x_clear_ms_error(chip); } if (retval != STATUS_SUCCESS) { kfree(buf); TRACE_RET(chip, retval); } i = 0; do { retval = rts51x_read_register(chip, MS_TRANS_CFG, &val); if (retval != STATUS_SUCCESS) { kfree(buf); TRACE_RET(chip, retval); } if ((val & MS_INT_CED) || !(val & MS_INT_BREQ)) break; retval = ms_transfer_tpc(chip, MS_TM_NORMAL_READ, PRO_READ_LONG_DATA, 0, WAIT_INT); if (retval != STATUS_SUCCESS) { kfree(buf); TRACE_RET(chip, retval); } i++; } while (i < 1024); if (retval != STATUS_SUCCESS) { kfree(buf); TRACE_RET(chip, retval); } if ((buf[0] != 0xa5) && (buf[1] != 0xc3)) { /* Signature code is wrong */ kfree(buf); TRACE_RET(chip, STATUS_FAIL); } if ((buf[4] < 1) || (buf[4] > 12)) { kfree(buf); TRACE_RET(chip, STATUS_FAIL); } for (i = 0; i < buf[4]; i++) { int cur_addr_off = 16 + i * 12; #ifdef SUPPORT_MSXC if ((buf[cur_addr_off + 8] == 0x10) || (buf[cur_addr_off + 8] == 0x13)) { #else if (buf[cur_addr_off + 8] == 0x10) { #endif sys_info_addr = ((u32) buf[cur_addr_off + 0] << 24) | ((u32) buf[cur_addr_off + 1] << 16) | ((u32) buf[cur_addr_off + 2] << 8) | buf[cur_addr_off + 3]; sys_info_size = ((u32) buf[cur_addr_off + 4] << 24) | ((u32) buf[cur_addr_off + 5] << 16) | ((u32) buf[cur_addr_off + 6] << 8) | buf[cur_addr_off + 7]; RTS51X_DEBUGP("sys_info_addr = 0x%x," "sys_info_size = 0x%x\n", sys_info_addr, sys_info_size); if (sys_info_size != 96) { kfree(buf); TRACE_RET(chip, STATUS_FAIL); } if (sys_info_addr < 0x1A0) { kfree(buf); TRACE_RET(chip, STATUS_FAIL); } if ((sys_info_size + sys_info_addr) > 0x8000) { kfree(buf); TRACE_RET(chip, STATUS_FAIL); } #ifdef SUPPORT_MSXC if (buf[cur_addr_off + 8] == 0x13) ms_card->ms_type |= MS_XC; #endif #ifdef SUPPORT_PCGL_1P18 found_sys_info = 1; #else break; #endif } #ifdef SUPPORT_PCGL_1P18 if (buf[cur_addr_off + 8] == 0x15) { model_name_addr = ((u32) buf[cur_addr_off + 0] << 24) | ((u32) buf[cur_addr_off + 1] << 16) | ((u32) buf[cur_addr_off + 2] << 8) | buf[cur_addr_off + 3]; model_name_size = ((u32) buf[cur_addr_off + 4] << 24) | ((u32) buf[cur_addr_off + 5] << 16) | ((u32) buf[cur_addr_off + 6] << 8) | buf[cur_addr_off + 7]; RTS51X_DEBUGP("model_name_addr = 0x%x," "model_name_size = 0x%x\n", model_name_addr, model_name_size); if (model_name_size != 48) { kfree(buf); TRACE_RET(chip, STATUS_FAIL); } if (model_name_addr < 0x1A0) { kfree(buf); TRACE_RET(chip, STATUS_FAIL); } if ((model_name_size + model_name_addr) > 0x8000) { kfree(buf); TRACE_RET(chip, STATUS_FAIL); } found_model_name = 1; } if (found_sys_info && found_model_name) break; #endif } if (i == buf[4]) { kfree(buf); TRACE_RET(chip, STATUS_FAIL); } class_code = buf[sys_info_addr + 0]; device_type = buf[sys_info_addr + 56]; sub_class = buf[sys_info_addr + 46]; #ifdef SUPPORT_MSXC if (CHK_MSXC(ms_card)) { xc_total_blk = ((u32) buf[sys_info_addr + 6] << 24) | ((u32) buf[sys_info_addr + 7] << 16) | ((u32) buf[sys_info_addr + 8] << 8) | buf[sys_info_addr + 9]; xc_blk_size = ((u32) buf[sys_info_addr + 32] << 24) | ((u32) buf[sys_info_addr + 33] << 16) | ((u32) buf[sys_info_addr + 34] << 8) | buf[sys_info_addr + 35]; RTS51X_DEBUGP("xc_total_blk = 0x%x, xc_blk_size = 0x%x\n", xc_total_blk, xc_blk_size); } else { total_blk = ((u16) buf[sys_info_addr + 6] << 8) | buf[sys_info_addr + 7]; blk_size = ((u16) buf[sys_info_addr + 2] << 8) | buf[sys_info_addr + 3]; RTS51X_DEBUGP("total_blk = 0x%x, blk_size = 0x%x\n", total_blk, blk_size); } #else total_blk = ((u16) buf[sys_info_addr + 6] << 8) | buf[sys_info_addr + 7]; blk_size = ((u16) buf[sys_info_addr + 2] << 8) | buf[sys_info_addr + 3]; RTS51X_DEBUGP("total_blk = 0x%x, blk_size = 0x%x\n", total_blk, blk_size); #endif RTS51X_DEBUGP("class_code = 0x%x, device_type = 0x%x," "sub_class = 0x%x\n", class_code, device_type, sub_class); memcpy(ms_card->raw_sys_info, buf + sys_info_addr, 96); #ifdef SUPPORT_PCGL_1P18 memcpy(ms_card->raw_model_name, buf + model_name_addr, 48); #endif kfree(buf); /* Confirm System Information */ #ifdef SUPPORT_MSXC if (CHK_MSXC(ms_card)) { if (class_code != 0x03) TRACE_RET(chip, STATUS_FAIL); } else { if (class_code != 0x02) TRACE_RET(chip, STATUS_FAIL); } #else if (class_code != 0x02) TRACE_RET(chip, STATUS_FAIL); #endif if (device_type != 0x00) { if ((device_type == 0x01) || (device_type == 0x02) || (device_type == 0x03)) chip->card_wp |= MS_CARD; else TRACE_RET(chip, STATUS_FAIL); } if (sub_class & 0xC0) TRACE_RET(chip, STATUS_FAIL); RTS51X_DEBUGP("class_code: 0x%x, device_type: 0x%x, sub_class: 0x%x\n", class_code, device_type, sub_class); #ifdef SUPPORT_MSXC if (CHK_MSXC(ms_card)) { chip->capacity[chip->card2lun[MS_CARD]] = ms_card->capacity = xc_total_blk * xc_blk_size; } else { chip->capacity[chip->card2lun[MS_CARD]] = ms_card->capacity = total_blk * blk_size; } #else chip->capacity[chip->card2lun[MS_CARD]] = ms_card->capacity = total_blk * blk_size; #endif return STATUS_SUCCESS; } #ifdef SUPPORT_MAGIC_GATE int mg_set_tpc_para_sub(struct rts51x_chip *chip, int type, u8 mg_entry_num); #endif static int reset_ms_pro(struct rts51x_chip *chip) { struct ms_info *ms_card = &(chip->ms_card); int retval; #ifdef XC_POWERCLASS u8 change_power_class = 2; #endif #ifdef XC_POWERCLASS Retry: #endif retval = ms_pro_reset_flow(chip, 1); if (retval != STATUS_SUCCESS) { if (ms_card->switch_8bit_fail) { retval = ms_pro_reset_flow(chip, 0); if (retval != STATUS_SUCCESS) TRACE_RET(chip, retval); } else { TRACE_RET(chip, retval); } } retval = ms_read_attribute_info(chip); if (retval != STATUS_SUCCESS) TRACE_RET(chip, retval); #ifdef XC_POWERCLASS if (CHK_HG8BIT(ms_card)) change_power_class = 0; if (change_power_class && CHK_MSXC(ms_card)) { u8 power_class_mode = (ms_card->raw_sys_info[46] & 0x18) >> 3; RTS51X_DEBUGP("power_class_mode = 0x%x", power_class_mode); if (change_power_class > power_class_mode) change_power_class = power_class_mode; if (change_power_class) { retval = msxc_change_power(chip, change_power_class); if (retval != STATUS_SUCCESS) { change_power_class--; goto Retry; } } } #endif #ifdef SUPPORT_MAGIC_GATE retval = mg_set_tpc_para_sub(chip, 0, 0); if (retval != STATUS_SUCCESS) TRACE_RET(chip, retval); #endif if (CHK_HG8BIT(ms_card)) chip->card_bus_width[chip->card2lun[MS_CARD]] = 8; else chip->card_bus_width[chip->card2lun[MS_CARD]] = 4; return STATUS_SUCCESS; } static int ms_read_status_reg(struct rts51x_chip *chip) { int retval; u8 val[2]; retval = ms_set_rw_reg_addr(chip, StatusReg0, 2, 0, 0); if (retval != STATUS_SUCCESS) TRACE_RET(chip, retval); retval = ms_read_bytes(chip, READ_REG, 2, NO_WAIT_INT, val, 2); if (retval != STATUS_SUCCESS) TRACE_RET(chip, retval); if (val[1] & (STS_UCDT | STS_UCEX | STS_UCFG)) { ms_set_err_code(chip, MS_FLASH_READ_ERROR); TRACE_RET(chip, STATUS_FAIL); } return STATUS_SUCCESS; } static int ms_check_boot_block(struct rts51x_chip *chip, u16 block_addr) { struct ms_info *ms_card = &(chip->ms_card); int retval; u8 extra[MS_EXTRA_SIZE], data[10], val = 0; if (CHK_MS4BIT(ms_card)) { /* Parallel interface */ data[0] = 0x88; } else { /* Serial interface */ data[0] = 0x80; } /* Block Address */ data[1] = 0; data[2] = (u8) (block_addr >> 8); data[3] = (u8) block_addr; /* Page Number * Extra data access mode */ data[4] = 0x40; data[5] = 0; retval = ms_set_cmd(chip, OverwriteFlag, MS_EXTRA_SIZE, SystemParm, 6, BLOCK_READ, WAIT_INT, data, 6, &val); if (retval != STATUS_SUCCESS) TRACE_RET(chip, retval); if (val & INT_REG_CMDNK) { ms_set_err_code(chip, MS_CMD_NK); TRACE_RET(chip, STATUS_FAIL); } if (val & INT_REG_CED) { if (val & INT_REG_ERR) { retval = ms_read_status_reg(chip); if (retval != STATUS_SUCCESS) TRACE_RET(chip, retval); retval = ms_set_rw_reg_addr(chip, OverwriteFlag, MS_EXTRA_SIZE, SystemParm, 6); if (retval != STATUS_SUCCESS) TRACE_RET(chip, retval); } } retval = ms_read_bytes(chip, READ_REG, MS_EXTRA_SIZE, NO_WAIT_INT, extra, MS_EXTRA_SIZE); if (retval != STATUS_SUCCESS) TRACE_RET(chip, retval); if (!(extra[0] & BLOCK_OK) || (extra[1] & NOT_BOOT_BLOCK)) TRACE_RET(chip, STATUS_FAIL); return STATUS_SUCCESS; } static int ms_read_extra_data(struct rts51x_chip *chip, u16 block_addr, u8 page_num, u8 *buf, int buf_len) { struct ms_info *ms_card = &(chip->ms_card); int retval; u8 val = 0, data[10]; if (CHK_MS4BIT(ms_card)) { /* Parallel interface */ data[0] = 0x88; } else { /* Serial interface */ data[0] = 0x80; } /* Block Address */ data[1] = 0; data[2] = (u8) (block_addr >> 8); data[3] = (u8) block_addr; /* Page Number * Extra data access mode */ data[4] = 0x40; data[5] = page_num; #ifdef MS_SPEEDUP retval = ms_auto_set_cmd(chip, OverwriteFlag, MS_EXTRA_SIZE, SystemParm, 6, BLOCK_READ, WAIT_INT, data, 6, &val); #else retval = ms_set_cmd(chip, OverwriteFlag, MS_EXTRA_SIZE, SystemParm, 6, BLOCK_READ, WAIT_INT, data, 6, &val); #endif if (retval != STATUS_SUCCESS) TRACE_RET(chip, retval); if (val & INT_REG_CMDNK) { ms_set_err_code(chip, MS_CMD_NK); TRACE_RET(chip, STATUS_FAIL); } if (val & INT_REG_CED) { if (val & INT_REG_ERR) { retval = ms_read_status_reg(chip); if (retval != STATUS_SUCCESS) TRACE_RET(chip, retval); retval = ms_set_rw_reg_addr(chip, OverwriteFlag, MS_EXTRA_SIZE, SystemParm, 6); if (retval != STATUS_SUCCESS) TRACE_RET(chip, retval); } } retval = ms_read_bytes(chip, READ_REG, MS_EXTRA_SIZE, NO_WAIT_INT, data, MS_EXTRA_SIZE); if (retval != STATUS_SUCCESS) TRACE_RET(chip, retval); if (buf && buf_len) { if (buf_len > MS_EXTRA_SIZE) buf_len = MS_EXTRA_SIZE; memcpy(buf, data, buf_len); } return STATUS_SUCCESS; } static int ms_write_extra_data(struct rts51x_chip *chip, u16 block_addr, u8 page_num, u8 *buf, int buf_len) { struct ms_info *ms_card = &(chip->ms_card); int retval, i; u8 val = 0, data[16]; if (!buf || (buf_len < MS_EXTRA_SIZE)) TRACE_RET(chip, STATUS_FAIL); /* Write REG */ if (CHK_MS4BIT(ms_card)) { /* Parallel interface */ data[0] = 0x88; } else { /* Serial interface */ data[0] = 0x80; } /* Block Address */ data[1] = 0; data[2] = (u8) (block_addr >> 8); data[3] = (u8) block_addr; /* Page Number * Extra data access mode */ data[4] = 0x40; data[5] = page_num; for (i = 6; i < MS_EXTRA_SIZE + 6; i++) data[i] = buf[i - 6]; #ifdef MS_SPEEDUP retval = ms_auto_set_cmd(chip, OverwriteFlag, MS_EXTRA_SIZE, SystemParm, 6 + MS_EXTRA_SIZE, BLOCK_WRITE, WAIT_INT, data, 16, &val); #else retval = ms_set_cmd(chip, OverwriteFlag, MS_EXTRA_SIZE, SystemParm, 6 + MS_EXTRA_SIZE, BLOCK_WRITE, WAIT_INT, data, 16, &val); #endif if (retval != STATUS_SUCCESS) TRACE_RET(chip, retval); if (val & INT_REG_CMDNK) { ms_set_err_code(chip, MS_CMD_NK); TRACE_RET(chip, STATUS_FAIL); } if (val & INT_REG_CED) { if (val & INT_REG_ERR) { ms_set_err_code(chip, MS_FLASH_WRITE_ERROR); TRACE_RET(chip, STATUS_FAIL); } } return STATUS_SUCCESS; } static int ms_read_page(struct rts51x_chip *chip, u16 block_addr, u8 page_num) { struct ms_info *ms_card = &(chip->ms_card); int retval; u8 val = 0, data[6]; if (CHK_MS4BIT(ms_card)) { /* Parallel interface */ data[0] = 0x88; } else { /* Serial interface */ data[0] = 0x80; } /* Block Address */ data[1] = 0; data[2] = (u8) (block_addr >> 8); data[3] = (u8) block_addr; /* Page Number * Single page access mode */ data[4] = 0x20; data[5] = page_num; retval = ms_set_cmd(chip, OverwriteFlag, MS_EXTRA_SIZE, SystemParm, 6, BLOCK_READ, WAIT_INT, data, 6, &val); if (retval != STATUS_SUCCESS) TRACE_RET(chip, retval); if (val & INT_REG_CMDNK) { ms_set_err_code(chip, MS_CMD_NK); TRACE_RET(chip, STATUS_FAIL); } if (val & INT_REG_CED) { if (val & INT_REG_ERR) { if (!(val & INT_REG_BREQ)) { ms_set_err_code(chip, MS_FLASH_READ_ERROR); TRACE_RET(chip, STATUS_FAIL); } retval = ms_read_status_reg(chip); if (retval != STATUS_SUCCESS) ms_set_err_code(chip, MS_FLASH_WRITE_ERROR); } else { if (!(val & INT_REG_BREQ)) { ms_set_err_code(chip, MS_BREQ_ERROR); TRACE_RET(chip, STATUS_FAIL); } } } retval = ms_transfer_tpc(chip, MS_TM_NORMAL_READ, READ_PAGE_DATA, 0, NO_WAIT_INT); if (retval != STATUS_SUCCESS) TRACE_RET(chip, retval); if (ms_check_err_code(chip, MS_FLASH_WRITE_ERROR)) TRACE_RET(chip, STATUS_FAIL); return STATUS_SUCCESS; } static int ms_set_bad_block(struct rts51x_chip *chip, u16 phy_blk) { struct ms_info *ms_card = &(chip->ms_card); int retval; u8 val = 0, data[8], extra[MS_EXTRA_SIZE]; retval = ms_read_extra_data(chip, phy_blk, 0, extra, MS_EXTRA_SIZE); if (retval != STATUS_SUCCESS) TRACE_RET(chip, retval); ms_set_err_code(chip, MS_NO_ERROR); if (CHK_MS4BIT(ms_card)) { /* Parallel interface */ data[0] = 0x88; } else { /* Serial interface */ data[0] = 0x80; } /* Block Address */ data[1] = 0; data[2] = (u8) (phy_blk >> 8); data[3] = (u8) phy_blk; data[4] = 0x80; data[5] = 0; data[6] = extra[0] & 0x7F; data[7] = 0xFF; #ifdef MS_SPEEDUP retval = ms_set_cmd(chip, OverwriteFlag, MS_EXTRA_SIZE, SystemParm, 7, BLOCK_WRITE, WAIT_INT, data, 7, &val); #else retval = ms_set_cmd(chip, OverwriteFlag, MS_EXTRA_SIZE, SystemParm, 7, BLOCK_WRITE, WAIT_INT, data, 7, &val); #endif if (retval != STATUS_SUCCESS) TRACE_RET(chip, retval); if (val & INT_REG_CMDNK) { ms_set_err_code(chip, MS_CMD_NK); TRACE_RET(chip, STATUS_FAIL); } if (val & INT_REG_CED) { if (val & INT_REG_ERR) { ms_set_err_code(chip, MS_FLASH_WRITE_ERROR); TRACE_RET(chip, STATUS_FAIL); } } return STATUS_SUCCESS; } static int ms_erase_block(struct rts51x_chip *chip, u16 phy_blk) { struct ms_info *ms_card = &(chip->ms_card); int retval, i = 0; u8 val = 0, data[6]; retval = ms_set_rw_reg_addr(chip, OverwriteFlag, MS_EXTRA_SIZE, SystemParm, 6); if (retval != STATUS_SUCCESS) TRACE_RET(chip, retval); ms_set_err_code(chip, MS_NO_ERROR); if (CHK_MS4BIT(ms_card)) { /* Parallel interface */ data[0] = 0x88; } else { /* Serial interface */ data[0] = 0x80; } /* Block Address */ data[1] = 0; data[2] = (u8) (phy_blk >> 8); data[3] = (u8) phy_blk; data[4] = 0; data[5] = 0; ERASE_RTY: #ifdef MS_SPEEDUP retval = ms_auto_set_cmd(chip, OverwriteFlag, MS_EXTRA_SIZE, SystemParm, 6, BLOCK_ERASE, WAIT_INT, data, 6, &val); #else retval = ms_set_cmd(chip, OverwriteFlag, MS_EXTRA_SIZE, SystemParm, 6, BLOCK_ERASE, WAIT_INT, data, 6, &val); #endif if (val & INT_REG_CMDNK) { if (i < 3) { i++; goto ERASE_RTY; } ms_set_err_code(chip, MS_CMD_NK); ms_set_bad_block(chip, phy_blk); TRACE_RET(chip, STATUS_FAIL); } if (val & INT_REG_CED) { if (val & INT_REG_ERR) { ms_set_err_code(chip, MS_FLASH_WRITE_ERROR); TRACE_RET(chip, STATUS_FAIL); } } return STATUS_SUCCESS; } static void ms_set_page_status(u16 log_blk, u8 type, u8 *extra, int extra_len) { if (!extra || (extra_len < MS_EXTRA_SIZE)) return; memset(extra, 0xFF, MS_EXTRA_SIZE); if (type == setPS_NG) extra[0] = 0xB8; else extra[0] = 0x98; extra[2] = (u8) (log_blk >> 8); extra[3] = (u8) log_blk; } static int ms_init_page(struct rts51x_chip *chip, u16 phy_blk, u16 log_blk, u8 start_page, u8 end_page) { int retval; u8 extra[MS_EXTRA_SIZE], i; memset(extra, 0xff, MS_EXTRA_SIZE); extra[0] = 0xf8; /* Block, page OK, data erased */ extra[1] = 0xff; extra[2] = (u8) (log_blk >> 8); extra[3] = (u8) log_blk; for (i = start_page; i < end_page; i++) { if (monitor_card_cd(chip, MS_CARD) == CD_NOT_EXIST) TRACE_RET(chip, STATUS_FAIL); retval = ms_write_extra_data(chip, phy_blk, i, extra, MS_EXTRA_SIZE); if (retval != STATUS_SUCCESS) TRACE_RET(chip, retval); } return STATUS_SUCCESS; } static int ms_copy_page(struct rts51x_chip *chip, u16 old_blk, u16 new_blk, u16 log_blk, u8 start_page, u8 end_page) { struct ms_info *ms_card = &(chip->ms_card); int retval, rty_cnt, uncorrect_flag = 0; u8 extra[MS_EXTRA_SIZE], val, i, j, data[16]; RTS51X_DEBUGP("Copy page from 0x%x to 0x%x, logical block is 0x%x\n", old_blk, new_blk, log_blk); RTS51X_DEBUGP("start_page = %d, end_page = %d\n", start_page, end_page); retval = ms_read_extra_data(chip, new_blk, 0, extra, MS_EXTRA_SIZE); if (retval != STATUS_SUCCESS) TRACE_RET(chip, retval); retval = ms_read_status_reg(chip); if (retval != STATUS_SUCCESS) TRACE_RET(chip, retval); RTS51X_READ_REG(chip, PPBUF_BASE2, &val); if (val & BUF_FULL) { /* Clear Buffer */ retval = ms_send_cmd(chip, CLEAR_BUF, WAIT_INT); if (retval != STATUS_SUCCESS) TRACE_RET(chip, retval); /* GET_INT Register */ retval = ms_read_bytes(chip, GET_INT, 1, NO_WAIT_INT, &val, 1); if (retval != STATUS_SUCCESS) TRACE_RET(chip, retval); if (!(val & INT_REG_CED)) { ms_set_err_code(chip, MS_FLASH_WRITE_ERROR); TRACE_RET(chip, STATUS_FAIL); } } for (i = start_page; i < end_page; i++) { if (monitor_card_cd(chip, MS_CARD) == CD_NOT_EXIST) TRACE_RET(chip, STATUS_FAIL); ms_read_extra_data(chip, old_blk, i, extra, MS_EXTRA_SIZE); retval = ms_set_rw_reg_addr(chip, OverwriteFlag, MS_EXTRA_SIZE, SystemParm, 6); if (retval != STATUS_SUCCESS) TRACE_RET(chip, retval); /* Write REG */ ms_set_err_code(chip, MS_NO_ERROR); if (CHK_MS4BIT(ms_card)) { /* Parallel interface */ data[0] = 0x88; } else { /* Serial interface */ data[0] = 0x80; } /* Block Address */ data[1] = 0; data[2] = (u8) (old_blk >> 8); data[3] = (u8) old_blk; data[4] = 0x20; data[5] = i; retval = ms_write_bytes(chip, WRITE_REG, 6, NO_WAIT_INT, data, 6); if (retval != STATUS_SUCCESS) TRACE_RET(chip, retval); retval = ms_send_cmd(chip, BLOCK_READ, WAIT_INT); if (retval != STATUS_SUCCESS) TRACE_RET(chip, retval); ms_set_err_code(chip, MS_NO_ERROR); retval = ms_read_bytes(chip, GET_INT, 1, NO_WAIT_INT, &val, 1); if (retval != STATUS_SUCCESS) TRACE_RET(chip, retval); if (val & INT_REG_CMDNK) { ms_set_err_code(chip, MS_CMD_NK); TRACE_RET(chip, STATUS_FAIL); } if (val & INT_REG_CED) { if (val & INT_REG_ERR) { retval = ms_read_status_reg(chip); if (retval != STATUS_SUCCESS) { uncorrect_flag = 1; RTS51X_DEBUGP("Uncorrectable" "error\n"); } else { uncorrect_flag = 0; } retval = ms_transfer_tpc(chip, MS_TM_NORMAL_READ, READ_PAGE_DATA, 0, NO_WAIT_INT); if (retval != STATUS_SUCCESS) TRACE_RET(chip, retval); if (uncorrect_flag) { ms_set_page_status(log_blk, setPS_NG, extra, MS_EXTRA_SIZE); if (i == 0) extra[0] &= 0xEF; ms_write_extra_data(chip, old_blk, i, extra, MS_EXTRA_SIZE); RTS51X_DEBUGP("page %d :" "extra[0] = 0x%x\n", i, extra[0]); MS_SET_BAD_BLOCK_FLG(ms_card); ms_set_page_status(log_blk, setPS_Error, extra, MS_EXTRA_SIZE); ms_write_extra_data(chip, new_blk, i, extra, MS_EXTRA_SIZE); continue; } for (rty_cnt = 0; rty_cnt < MS_MAX_RETRY_COUNT; rty_cnt++) { retval = ms_transfer_tpc(chip, MS_TM_NORMAL_WRITE, WRITE_PAGE_DATA, 0, NO_WAIT_INT); if (retval == STATUS_SUCCESS) break; } if (rty_cnt == MS_MAX_RETRY_COUNT) TRACE_RET(chip, STATUS_FAIL); } if (!(val & INT_REG_BREQ)) { ms_set_err_code(chip, MS_BREQ_ERROR); TRACE_RET(chip, STATUS_FAIL); } } retval = ms_set_rw_reg_addr(chip, OverwriteFlag, MS_EXTRA_SIZE, SystemParm, (6 + MS_EXTRA_SIZE)); /* Write REG */ ms_set_err_code(chip, MS_NO_ERROR); if (CHK_MS4BIT(ms_card)) { /* Parallel interface */ data[0] = 0x88; } else { /* Serial interface */ data[0] = 0x80; } /* Block Address */ data[1] = 0; data[2] = (u8) (new_blk >> 8); data[3] = (u8) new_blk; data[4] = 0x20; data[5] = i; /* for MS check procedure */ if ((extra[0] & 0x60) != 0x60) data[6] = extra[0]; else data[6] = 0xF8; data[6 + 1] = 0xFF; data[6 + 2] = (u8) (log_blk >> 8); data[6 + 3] = (u8) log_blk; for (j = 4; j <= MS_EXTRA_SIZE; j++) data[6 + j] = 0xFF; retval = ms_write_bytes(chip, WRITE_REG, (6 + MS_EXTRA_SIZE), NO_WAIT_INT, data, 16); if (retval != STATUS_SUCCESS) TRACE_RET(chip, retval); retval = ms_send_cmd(chip, BLOCK_WRITE, WAIT_INT); if (retval != STATUS_SUCCESS) TRACE_RET(chip, retval); /* GET_INT Register */ ms_set_err_code(chip, MS_NO_ERROR); retval = ms_read_bytes(chip, GET_INT, 1, NO_WAIT_INT, &val, 1); if (retval != STATUS_SUCCESS) TRACE_RET(chip, retval); if (val & INT_REG_CMDNK) { ms_set_err_code(chip, MS_CMD_NK); TRACE_RET(chip, STATUS_FAIL); } if (val & INT_REG_CED) { if (val & INT_REG_ERR) { ms_set_err_code(chip, MS_FLASH_WRITE_ERROR); TRACE_RET(chip, STATUS_FAIL); } } if (i == 0) { retval = ms_set_rw_reg_addr(chip, OverwriteFlag, MS_EXTRA_SIZE, SystemParm, 7); if (retval != STATUS_SUCCESS) TRACE_RET(chip, retval); ms_set_err_code(chip, MS_NO_ERROR); if (CHK_MS4BIT(ms_card)) { /* Parallel interface */ data[0] = 0x88; } else { /* Serial interface */ data[0] = 0x80; } /* Block Address */ data[1] = 0; data[2] = (u8) (old_blk >> 8); data[3] = (u8) old_blk; data[4] = 0x80; data[5] = 0; data[6] = 0xEF; data[7] = 0xFF; retval = ms_write_bytes(chip, WRITE_REG, 7, NO_WAIT_INT, data, 8); if (retval != STATUS_SUCCESS) TRACE_RET(chip, retval); retval = ms_send_cmd(chip, BLOCK_WRITE, WAIT_INT); if (retval != STATUS_SUCCESS) TRACE_RET(chip, retval); ms_set_err_code(chip, MS_NO_ERROR); retval = ms_read_bytes(chip, GET_INT, 1, NO_WAIT_INT, &val, 1); if (retval != STATUS_SUCCESS) TRACE_RET(chip, retval); if (val & INT_REG_CMDNK) { ms_set_err_code(chip, MS_CMD_NK); TRACE_RET(chip, STATUS_FAIL); } if (val & INT_REG_CED) { if (val & INT_REG_ERR) { ms_set_err_code(chip, MS_FLASH_WRITE_ERROR); TRACE_RET(chip, STATUS_FAIL); } } } } return STATUS_SUCCESS; } #ifdef MS_SPEEDUP static int ms_auto_copy_page(struct rts51x_chip *chip, u16 old_blk, u16 new_blk, u16 log_blk, u8 start_page, u8 end_page) { struct ms_info *ms_card = &(chip->ms_card); int retval; u8 page_len, bus_width, val = 0; u8 extra[MS_EXTRA_SIZE]; RTS51X_DEBUGP("Auto copy page from 0x%x to 0x%x," "logical block is 0x%x\n", old_blk, new_blk, log_blk); RTS51X_DEBUGP("start_page = %d, end_page = %d\n", start_page, end_page); page_len = end_page - start_page; retval = ms_read_extra_data(chip, new_blk, 0, extra, MS_EXTRA_SIZE); if (retval != STATUS_SUCCESS) TRACE_RET(chip, retval); retval = ms_read_status_reg(chip); if (retval != STATUS_SUCCESS) TRACE_RET(chip, retval); RTS51X_READ_REG(chip, PPBUF_BASE2, &val); if (val & BUF_FULL) { retval = ms_send_cmd(chip, CLEAR_BUF, WAIT_INT); if (retval != STATUS_SUCCESS) TRACE_RET(chip, retval); retval = ms_read_bytes(chip, GET_INT, 1, NO_WAIT_INT, &val, 1); if (retval != STATUS_SUCCESS) TRACE_RET(chip, retval); if (!(val & INT_REG_CED)) { ms_set_err_code(chip, MS_FLASH_WRITE_ERROR); TRACE_RET(chip, STATUS_FAIL); } } if (CHK_MS4BIT(ms_card)) { /* Parallel interface */ bus_width = 0x88; } else { /* Serial interface */ bus_width = 0x80; } rts51x_init_cmd(chip); rts51x_add_cmd(chip, WRITE_REG_CMD, MS_OLD_BLOCK_0, 0xFF, (u8) old_blk); rts51x_add_cmd(chip, WRITE_REG_CMD, MS_OLD_BLOCK_1, 0xFF, (u8) (old_blk >> 8)); rts51x_add_cmd(chip, WRITE_REG_CMD, MS_NEW_BLOCK_0, 0xFF, (u8) new_blk); rts51x_add_cmd(chip, WRITE_REG_CMD, MS_NEW_BLOCK_1, 0xFF, (u8) (new_blk >> 8)); rts51x_add_cmd(chip, WRITE_REG_CMD, MS_LOG_BLOCK_0, 0xFF, (u8) log_blk); rts51x_add_cmd(chip, WRITE_REG_CMD, MS_LOG_BLOCK_1, 0xFF, (u8) (log_blk >> 8)); rts51x_add_cmd(chip, WRITE_REG_CMD, MS_PAGE_START, 0xFF, start_page); rts51x_add_cmd(chip, WRITE_REG_CMD, MS_PAGE_LENGTH, 0xFF, page_len); rts51x_add_cmd(chip, WRITE_REG_CMD, MS_BUS_WIDTH, 0xFF, bus_width); rts51x_add_cmd(chip, WRITE_REG_CMD, MS_TRANSFER, 0xFF, MS_TRANSFER_START | MS_TM_COPY_PAGE); rts51x_add_cmd(chip, CHECK_REG_CMD, MS_TRANSFER, MS_TRANSFER_END, MS_TRANSFER_END); retval = rts51x_send_cmd(chip, MODE_CR, 100); if (retval != STATUS_SUCCESS) { rts51x_clear_ms_error(chip); TRACE_RET(chip, retval); } retval = rts51x_get_rsp(chip, 1, 5000); if (CHECK_MS_TRANS_FAIL(chip, retval)) { rts51x_clear_ms_error(chip); if (retval == STATUS_TIMEDOUT) TRACE_RET(chip, retval); TRACE_GOTO(chip, Fail); } return STATUS_SUCCESS; Fail: retval = ms_erase_block(chip, new_blk); if (retval != STATUS_SUCCESS) TRACE_RET(chip, retval); retval = ms_copy_page(chip, old_blk, new_blk, log_blk, start_page, end_page); if (retval != STATUS_SUCCESS) TRACE_RET(chip, retval); return STATUS_SUCCESS; } #endif static int reset_ms(struct rts51x_chip *chip) { struct ms_info *ms_card = &(chip->ms_card); int retval; u16 i, reg_addr, block_size; u8 val, j, *ptr; #ifndef SUPPORT_MAGIC_GATE u16 eblock_cnt; #endif retval = ms_prepare_reset(chip); if (retval != STATUS_SUCCESS) TRACE_RET(chip, retval); ms_card->ms_type |= TYPE_MS; retval = ms_send_cmd(chip, MS_RESET, NO_WAIT_INT); if (retval != STATUS_SUCCESS) TRACE_RET(chip, retval); retval = ms_read_status_reg(chip); if (retval != STATUS_SUCCESS) TRACE_RET(chip, retval); RTS51X_READ_REG(chip, PPBUF_BASE2, &val); if (val & WRT_PRTCT) chip->card_wp |= MS_CARD; else chip->card_wp &= ~MS_CARD; i = 0; RE_SEARCH: /* Search For Boot Block */ while (i < (MAX_DEFECTIVE_BLOCK + 2)) { if (monitor_card_cd(chip, MS_CARD) == CD_NOT_EXIST) TRACE_RET(chip, STATUS_FAIL); retval = ms_check_boot_block(chip, i); if (retval != STATUS_SUCCESS) { i++; continue; } ms_card->boot_block = i; break; } if (i == (MAX_DEFECTIVE_BLOCK + 2)) { RTS51X_DEBUGP("No boot block found!"); TRACE_RET(chip, STATUS_FAIL); } for (j = 0; j < 3; j++) { retval = ms_read_page(chip, ms_card->boot_block, j); if (retval != STATUS_SUCCESS) { if (ms_check_err_code(chip, MS_FLASH_WRITE_ERROR)) { i = ms_card->boot_block + 1; ms_set_err_code(chip, MS_NO_ERROR); goto RE_SEARCH; } } } /* Read boot block contents */ retval = ms_read_page(chip, ms_card->boot_block, 0); if (retval != STATUS_SUCCESS) TRACE_RET(chip, retval); /* Read MS system information as sys_info */ retval = rts51x_seq_read_register(chip, PPBUF_BASE2 + 0x1A0, 96, ms_card->raw_sys_info); if (retval != STATUS_SUCCESS) TRACE_RET(chip, retval); /* Read useful block contents */ rts51x_init_cmd(chip); rts51x_add_cmd(chip, READ_REG_CMD, HEADER_ID0, 0, 0); rts51x_add_cmd(chip, READ_REG_CMD, HEADER_ID1, 0, 0); for (reg_addr = DISABLED_BLOCK0; reg_addr <= DISABLED_BLOCK3; reg_addr++) { rts51x_add_cmd(chip, READ_REG_CMD, reg_addr, 0, 0); } for (reg_addr = BLOCK_SIZE_0; reg_addr <= PAGE_SIZE_1; reg_addr++) rts51x_add_cmd(chip, READ_REG_CMD, reg_addr, 0, 0); rts51x_add_cmd(chip, READ_REG_CMD, MS_Device_Type, 0, 0); rts51x_add_cmd(chip, READ_REG_CMD, MS_4bit_Support, 0, 0); retval = rts51x_send_cmd(chip, MODE_CR, 100); if (retval != STATUS_SUCCESS) TRACE_RET(chip, retval); retval = rts51x_get_rsp(chip, 16, 100); if (retval != STATUS_SUCCESS) TRACE_RET(chip, retval); ptr = rts51x_get_rsp_data(chip); RTS51X_DEBUGP("Boot block data:\n"); RTS51X_DUMP(ptr, 16); if (ptr[0] != 0x00 || ptr[1] != 0x01) { i = ms_card->boot_block + 1; goto RE_SEARCH; } if (ptr[12] != 0x02 || ptr[13] != 0x00) { i = ms_card->boot_block + 1; goto RE_SEARCH; } if ((ptr[14] == 1) || (ptr[14] == 3)) chip->card_wp |= MS_CARD; block_size = ((u16) ptr[6] << 8) | ptr[7]; if (block_size == 0x0010) { ms_card->block_shift = 5; ms_card->page_off = 0x1F; } else if (block_size == 0x0008) { ms_card->block_shift = 4; ms_card->page_off = 0x0F; } ms_card->total_block = ((u16) ptr[8] << 8) | ptr[9]; #ifdef SUPPORT_MAGIC_GATE j = ptr[10]; if (ms_card->block_shift == 4) { if (j < 2) ms_card->capacity = 0x1EE0; else ms_card->capacity = 0x3DE0; } else { if (j < 5) ms_card->capacity = 0x7BC0; else if (j < 0xA) ms_card->capacity = 0xF7C0; else if (j < 0x11) ms_card->capacity = 0x1EF80; else ms_card->capacity = 0x3DF00; } #else eblock_cnt = ((u16) ptr[10] << 8) | ptr[11]; ms_card->capacity = ((u32) eblock_cnt - 2) << ms_card->block_shift; #endif chip->capacity[chip->card2lun[MS_CARD]] = ms_card->capacity; if (ptr[15]) { retval = ms_set_rw_reg_addr(chip, 0, 0, SystemParm, 1); if (retval != STATUS_SUCCESS) TRACE_RET(chip, STATUS_FAIL); RTS51X_WRITE_REG(chip, PPBUF_BASE2, 0xFF, 0x88); RTS51X_WRITE_REG(chip, PPBUF_BASE2 + 1, 0xFF, 0); retval = ms_transfer_tpc(chip, MS_TM_WRITE_BYTES, WRITE_REG, 1, NO_WAIT_INT); if (retval != STATUS_SUCCESS) TRACE_RET(chip, STATUS_FAIL); RTS51X_WRITE_REG(chip, MS_CFG, 0x58 | MS_NO_CHECK_INT, MS_BUS_WIDTH_4 | PUSH_TIME_ODD | MS_NO_CHECK_INT); ms_card->ms_type |= MS_4BIT; } if (CHK_MS4BIT(ms_card)) chip->card_bus_width[chip->card2lun[MS_CARD]] = 4; else chip->card_bus_width[chip->card2lun[MS_CARD]] = 1; return STATUS_SUCCESS; } static int ms_init_l2p_tbl(struct rts51x_chip *chip) { struct ms_info *ms_card = &(chip->ms_card); int size, i, seg_no, retval; u16 defect_block, reg_addr; u8 val1, val2; ms_card->segment_cnt = ms_card->total_block >> 9; RTS51X_DEBUGP("ms_card->segment_cnt = %d\n", ms_card->segment_cnt); size = ms_card->segment_cnt * sizeof(struct zone_entry); ms_card->segment = vmalloc(size); if (ms_card->segment == NULL) TRACE_RET(chip, STATUS_FAIL); memset(ms_card->segment, 0, size); retval = ms_read_page(chip, ms_card->boot_block, 1); if (retval != STATUS_SUCCESS) TRACE_GOTO(chip, INIT_FAIL); reg_addr = PPBUF_BASE2; for (i = 0; i < (((ms_card->total_block >> 9) * 10) + 1); i++) { retval = rts51x_read_register(chip, reg_addr++, &val1); if (retval != STATUS_SUCCESS) TRACE_GOTO(chip, INIT_FAIL); retval = rts51x_read_register(chip, reg_addr++, &val2); if (retval != STATUS_SUCCESS) TRACE_GOTO(chip, INIT_FAIL); defect_block = ((u16) val1 << 8) | val2; if (defect_block == 0xFFFF) break; seg_no = defect_block / 512; ms_card->segment[seg_no].defect_list[ms_card->segment[seg_no]. disable_count++] = defect_block; } for (i = 0; i < ms_card->segment_cnt; i++) { ms_card->segment[i].build_flag = 0; ms_card->segment[i].l2p_table = NULL; ms_card->segment[i].free_table = NULL; ms_card->segment[i].get_index = 0; ms_card->segment[i].set_index = 0; ms_card->segment[i].unused_blk_cnt = 0; RTS51X_DEBUGP("defective block count of segment %d is %d\n", i, ms_card->segment[i].disable_count); } return STATUS_SUCCESS; INIT_FAIL: if (ms_card->segment) { vfree(ms_card->segment); ms_card->segment = NULL; } return STATUS_FAIL; } static u16 ms_get_l2p_tbl(struct rts51x_chip *chip, int seg_no, u16 log_off) { struct ms_info *ms_card = &(chip->ms_card); struct zone_entry *segment; if (ms_card->segment == NULL) return 0xFFFF; segment = &(ms_card->segment[seg_no]); if (segment->l2p_table) return segment->l2p_table[log_off]; return 0xFFFF; } static void ms_set_l2p_tbl(struct rts51x_chip *chip, int seg_no, u16 log_off, u16 phy_blk) { struct ms_info *ms_card = &(chip->ms_card); struct zone_entry *segment; if (ms_card->segment == NULL) return; segment = &(ms_card->segment[seg_no]); if (segment->l2p_table) segment->l2p_table[log_off] = phy_blk; } static void ms_set_unused_block(struct rts51x_chip *chip, u16 phy_blk) { struct ms_info *ms_card = &(chip->ms_card); struct zone_entry *segment; int seg_no; seg_no = (int)phy_blk >> 9; segment = &(ms_card->segment[seg_no]); segment->free_table[segment->set_index++] = phy_blk; if (segment->set_index >= MS_FREE_TABLE_CNT) segment->set_index = 0; segment->unused_blk_cnt++; } static u16 ms_get_unused_block(struct rts51x_chip *chip, int seg_no) { struct ms_info *ms_card = &(chip->ms_card); struct zone_entry *segment; u16 phy_blk; segment = &(ms_card->segment[seg_no]); if (segment->unused_blk_cnt <= 0) return 0xFFFF; phy_blk = segment->free_table[segment->get_index]; segment->free_table[segment->get_index++] = 0xFFFF; if (segment->get_index >= MS_FREE_TABLE_CNT) segment->get_index = 0; segment->unused_blk_cnt--; return phy_blk; } static const unsigned short ms_start_idx[] = { 0, 494, 990, 1486, 1982, 2478, 2974, 3470, 3966, 4462, 4958, 5454, 5950, 6446, 6942, 7438, 7934 }; static int ms_arbitrate_l2p(struct rts51x_chip *chip, u16 phy_blk, u16 log_off, u8 us1, u8 us2) { struct ms_info *ms_card = &(chip->ms_card); struct zone_entry *segment; int seg_no; u16 tmp_blk; seg_no = (int)phy_blk >> 9; segment = &(ms_card->segment[seg_no]); tmp_blk = segment->l2p_table[log_off]; if (us1 != us2) { if (us1 == 0) { if (!(chip->card_wp & MS_CARD)) ms_erase_block(chip, tmp_blk); ms_set_unused_block(chip, tmp_blk); segment->l2p_table[log_off] = phy_blk; } else { if (!(chip->card_wp & MS_CARD)) ms_erase_block(chip, phy_blk); ms_set_unused_block(chip, phy_blk); } } else { if (phy_blk < tmp_blk) { if (!(chip->card_wp & MS_CARD)) ms_erase_block(chip, phy_blk); ms_set_unused_block(chip, phy_blk); } else { if (!(chip->card_wp & MS_CARD)) ms_erase_block(chip, tmp_blk); ms_set_unused_block(chip, tmp_blk); segment->l2p_table[log_off] = phy_blk; } } return STATUS_SUCCESS; } static int ms_build_l2p_tbl(struct rts51x_chip *chip, int seg_no) { struct ms_info *ms_card = &(chip->ms_card); struct zone_entry *segment; int retval, table_size, disable_cnt, defect_flag, i; u16 start, end, phy_blk, log_blk, tmp_blk; u8 extra[MS_EXTRA_SIZE], us1, us2; RTS51X_DEBUGP("ms_build_l2p_tbl: %d\n", seg_no); if (ms_card->segment == NULL) { retval = ms_init_l2p_tbl(chip); if (retval != STATUS_SUCCESS) TRACE_RET(chip, retval); } if (ms_card->segment[seg_no].build_flag) { RTS51X_DEBUGP("l2p table of segment %d has been built\n", seg_no); return STATUS_SUCCESS; } if (seg_no == 0) table_size = 494; else table_size = 496; segment = &(ms_card->segment[seg_no]); if (segment->l2p_table == NULL) { segment->l2p_table = vmalloc(table_size * 2); if (segment->l2p_table == NULL) TRACE_GOTO(chip, BUILD_FAIL); } memset((u8 *) (segment->l2p_table), 0xff, table_size * 2); if (segment->free_table == NULL) { segment->free_table = vmalloc(MS_FREE_TABLE_CNT * 2); if (segment->free_table == NULL) TRACE_GOTO(chip, BUILD_FAIL); } memset((u8 *) (segment->free_table), 0xff, MS_FREE_TABLE_CNT * 2); start = (u16) seg_no << 9; end = (u16) (seg_no + 1) << 9; disable_cnt = segment->disable_count; segment->get_index = segment->set_index = 0; segment->unused_blk_cnt = 0; for (phy_blk = start; phy_blk < end; phy_blk++) { if (disable_cnt) { defect_flag = 0; for (i = 0; i < segment->disable_count; i++) { if (phy_blk == segment->defect_list[i]) { defect_flag = 1; break; } } if (defect_flag) { disable_cnt--; continue; } } retval = ms_read_extra_data(chip, phy_blk, 0, extra, MS_EXTRA_SIZE); if (retval != STATUS_SUCCESS) { RTS51X_DEBUGP("read extra data fail\n"); ms_set_bad_block(chip, phy_blk); continue; } if (seg_no == ms_card->segment_cnt - 1) { if (!(extra[1] & NOT_TRANSLATION_TABLE)) { if (!(chip->card_wp & MS_CARD)) { retval = ms_erase_block(chip, phy_blk); if (retval != STATUS_SUCCESS) continue; extra[2] = 0xff; extra[3] = 0xff; } } } if (!(extra[0] & BLOCK_OK)) continue; if (!(extra[1] & NOT_BOOT_BLOCK)) continue; if ((extra[0] & PAGE_OK) != PAGE_OK) continue; log_blk = ((u16) extra[2] << 8) | extra[3]; if (log_blk == 0xFFFF) { if (!(chip->card_wp & MS_CARD)) { retval = ms_erase_block(chip, phy_blk); if (retval != STATUS_SUCCESS) continue; } ms_set_unused_block(chip, phy_blk); continue; } if ((log_blk < ms_start_idx[seg_no]) || (log_blk >= ms_start_idx[seg_no + 1])) { if (!(chip->card_wp & MS_CARD)) { retval = ms_erase_block(chip, phy_blk); if (retval != STATUS_SUCCESS) continue; } ms_set_unused_block(chip, phy_blk); continue; } if (segment->l2p_table[log_blk - ms_start_idx[seg_no]] == 0xFFFF) { segment->l2p_table[log_blk - ms_start_idx[seg_no]] = phy_blk; continue; } us1 = extra[0] & 0x10; tmp_blk = segment->l2p_table[log_blk - ms_start_idx[seg_no]]; retval = ms_read_extra_data(chip, tmp_blk, 0, extra, MS_EXTRA_SIZE); if (retval != STATUS_SUCCESS) continue; us2 = extra[0] & 0x10; (void)ms_arbitrate_l2p(chip, phy_blk, log_blk - ms_start_idx[seg_no], us1, us2); continue; } segment->build_flag = 1; RTS51X_DEBUGP("unused block count: %d\n", segment->unused_blk_cnt); if (seg_no == ms_card->segment_cnt - 1) { if (segment->unused_blk_cnt < 2) chip->card_wp |= MS_CARD; } else { if (segment->unused_blk_cnt < 1) chip->card_wp |= MS_CARD; } if (chip->card_wp & MS_CARD) return STATUS_SUCCESS; for (log_blk = ms_start_idx[seg_no]; log_blk < ms_start_idx[seg_no + 1]; log_blk++) { if (segment->l2p_table[log_blk - ms_start_idx[seg_no]] == 0xFFFF) { phy_blk = ms_get_unused_block(chip, seg_no); if (phy_blk == 0xFFFF) { chip->card_wp |= MS_CARD; return STATUS_SUCCESS; } retval = ms_init_page(chip, phy_blk, log_blk, 0, 1); if (retval != STATUS_SUCCESS) TRACE_GOTO(chip, BUILD_FAIL); segment->l2p_table[log_blk - ms_start_idx[seg_no]] = phy_blk; if (seg_no == ms_card->segment_cnt - 1) { if (segment->unused_blk_cnt < 2) { chip->card_wp |= MS_CARD; return STATUS_SUCCESS; } } else { if (segment->unused_blk_cnt < 1) { chip->card_wp |= MS_CARD; return STATUS_SUCCESS; } } } } if (seg_no == 0) { for (log_blk = 0; log_blk < 494; log_blk++) { tmp_blk = segment->l2p_table[log_blk]; if (tmp_blk < ms_card->boot_block) { RTS51X_DEBUGP("Boot block is not the first" "normal block.\n"); if (chip->card_wp & MS_CARD) break; phy_blk = ms_get_unused_block(chip, 0); #ifdef MS_SPEEDUP retval = ms_auto_copy_page(chip, tmp_blk, phy_blk, log_blk, 0, ms_card->page_off + 1); #else retval = ms_copy_page(chip, tmp_blk, phy_blk, log_blk, 0, ms_card->page_off + 1); #endif if (retval != STATUS_SUCCESS) TRACE_RET(chip, retval); segment->l2p_table[log_blk] = phy_blk; retval = ms_set_bad_block(chip, tmp_blk); if (retval != STATUS_SUCCESS) TRACE_RET(chip, retval); } } } return STATUS_SUCCESS; BUILD_FAIL: segment->build_flag = 0; if (segment->l2p_table) { vfree(segment->l2p_table); segment->l2p_table = NULL; } if (segment->free_table) { vfree(segment->free_table); segment->free_table = NULL; } return STATUS_FAIL; } int rts51x_reset_ms_card(struct rts51x_chip *chip) { struct ms_info *ms_card = &(chip->ms_card); int retval; memset(ms_card, 0, sizeof(struct ms_info)); rts51x_enable_card_clock(chip, MS_CARD); retval = rts51x_select_card(chip, MS_CARD); if (retval != STATUS_SUCCESS) TRACE_RET(chip, retval); ms_card->ms_type = 0; ms_card->last_rw_int = 0; retval = reset_ms_pro(chip); if (retval != STATUS_SUCCESS) { if (ms_card->check_ms_flow) { retval = reset_ms(chip); if (retval != STATUS_SUCCESS) { if (chip->option.reset_or_rw_fail_set_pad_drive) { rts51x_write_register(chip, CARD_DRIVE_SEL, SD20_DRIVE_MASK, DRIVE_8mA); } TRACE_RET(chip, retval); } } else { if (chip->option.reset_or_rw_fail_set_pad_drive) { rts51x_write_register(chip, CARD_DRIVE_SEL, SD20_DRIVE_MASK, DRIVE_8mA); } TRACE_RET(chip, retval); } } retval = ms_set_init_para(chip); if (retval != STATUS_SUCCESS) TRACE_RET(chip, retval); if (!CHK_MSPRO(ms_card)) { retval = ms_build_l2p_tbl(chip, ms_card->total_block / 512 - 1); if (retval != STATUS_SUCCESS) TRACE_RET(chip, retval); } RTS51X_DEBUGP("ms_card->ms_type = 0x%x\n", ms_card->ms_type); return STATUS_SUCCESS; } static int mspro_set_rw_cmd(struct rts51x_chip *chip, u32 start_sec, u16 sec_cnt, u8 cmd) { int retval, i; u8 data[8]; data[0] = cmd; data[1] = (u8) (sec_cnt >> 8); data[2] = (u8) sec_cnt; data[3] = (u8) (start_sec >> 24); data[4] = (u8) (start_sec >> 16); data[5] = (u8) (start_sec >> 8); data[6] = (u8) start_sec; data[7] = 0; for (i = 0; i < MS_MAX_RETRY_COUNT; i++) { retval = ms_write_bytes(chip, PRO_EX_SET_CMD, 7, WAIT_INT, data, 8); if (retval == STATUS_SUCCESS) break; } if (i == MS_MAX_RETRY_COUNT) TRACE_RET(chip, STATUS_FAIL); return STATUS_SUCCESS; } static void mspro_stop_seq_mode(struct rts51x_chip *chip) { struct ms_info *ms_card = &(chip->ms_card); int retval; if (ms_card->seq_mode) { retval = ms_switch_clock(chip); if (retval != STATUS_SUCCESS) return; ms_card->seq_mode = 0; ms_card->total_sec_cnt = 0; ms_card->last_rw_int = 0; ms_send_cmd(chip, PRO_STOP, WAIT_INT); rts51x_ep0_write_register(chip, MC_FIFO_CTL, FIFO_FLUSH, FIFO_FLUSH); } } static inline int ms_auto_tune_clock(struct rts51x_chip *chip) { struct ms_info *ms_card = &(chip->ms_card); int retval; if (chip->asic_code) { if (ms_card->ms_clock > 30) ms_card->ms_clock -= 20; } else { if (ms_card->ms_clock == CLK_80) ms_card->ms_clock = CLK_60; else if (ms_card->ms_clock == CLK_60) ms_card->ms_clock = CLK_40; } retval = ms_switch_clock(chip); if (retval != STATUS_SUCCESS) TRACE_RET(chip, retval); return STATUS_SUCCESS; } static int mspro_rw_multi_sector(struct scsi_cmnd *srb, struct rts51x_chip *chip, u32 start_sector, u16 sector_cnt) { struct ms_info *ms_card = &(chip->ms_card); int retval, mode_2k = 0; u16 count; u8 val, trans_mode, rw_tpc, rw_cmd; ms_set_err_code(chip, MS_NO_ERROR); ms_card->counter = 0; if (CHK_MSHG(ms_card)) { if ((start_sector % 4) || (sector_cnt % 4)) { if (srb->sc_data_direction == DMA_FROM_DEVICE) { rw_tpc = PRO_READ_LONG_DATA; rw_cmd = PRO_READ_DATA; } else { rw_tpc = PRO_WRITE_LONG_DATA; rw_cmd = PRO_WRITE_DATA; } } else { if (srb->sc_data_direction == DMA_FROM_DEVICE) { rw_tpc = PRO_READ_QUAD_DATA; rw_cmd = PRO_READ_2K_DATA; } else { rw_tpc = PRO_WRITE_QUAD_DATA; rw_cmd = PRO_WRITE_2K_DATA; } mode_2k = 1; } } else { if (srb->sc_data_direction == DMA_FROM_DEVICE) { rw_tpc = PRO_READ_LONG_DATA; rw_cmd = PRO_READ_DATA; } else { rw_tpc = PRO_WRITE_LONG_DATA; rw_cmd = PRO_WRITE_DATA; } } retval = ms_switch_clock(chip); if (retval != STATUS_SUCCESS) TRACE_RET(chip, retval); if (srb->sc_data_direction == DMA_FROM_DEVICE) trans_mode = MS_TM_AUTO_READ; else trans_mode = MS_TM_AUTO_WRITE; val = ms_card->last_rw_int; if (ms_card->seq_mode) { if ((ms_card->pre_dir != srb->sc_data_direction) || ((ms_card->pre_sec_addr + ms_card->pre_sec_cnt) != start_sector) || (mode_2k && (ms_card->seq_mode & MODE_512_SEQ)) || (!mode_2k && (ms_card->seq_mode & MODE_2K_SEQ)) || !(val & MS_INT_BREQ) || ((ms_card->total_sec_cnt + sector_cnt) > 0xFE00)) { ms_card->seq_mode = 0; ms_card->total_sec_cnt = 0; ms_card->last_rw_int = 0; if (val & MS_INT_BREQ) { retval = ms_send_cmd(chip, PRO_STOP, WAIT_INT); if (retval != STATUS_SUCCESS) TRACE_RET(chip, retval); rts51x_ep0_write_register(chip, MC_FIFO_CTL, FIFO_FLUSH, FIFO_FLUSH); } } } if (!ms_card->seq_mode) { ms_card->total_sec_cnt = 0; if (sector_cnt >= 0x80) { if ((ms_card->capacity - start_sector) > 0xFE00) count = 0xFE00; else count = (u16) (ms_card->capacity - start_sector); if (count > sector_cnt) { if (mode_2k) ms_card->seq_mode |= MODE_2K_SEQ; else ms_card->seq_mode |= MODE_512_SEQ; } } else { count = sector_cnt; } retval = mspro_set_rw_cmd(chip, start_sector, count, rw_cmd); if (retval != STATUS_SUCCESS) { ms_card->seq_mode = 0; TRACE_RET(chip, retval); } } retval = ms_transfer_data(chip, trans_mode, rw_tpc, sector_cnt, WAIT_INT, mode_2k, scsi_sg_count(srb), scsi_sglist(srb), scsi_bufflen(srb)); if (retval != STATUS_SUCCESS) { ms_card->seq_mode = 0; rts51x_ep0_read_register(chip, MS_TRANS_CFG, &val); rts51x_clear_ms_error(chip); if (val & MS_INT_BREQ) ms_send_cmd(chip, PRO_STOP, WAIT_INT); if (val & (MS_CRC16_ERR | MS_RDY_TIMEOUT)) { RTS51X_DEBUGP("MSPro CRC error, tune clock!\n"); ms_auto_tune_clock(chip); } TRACE_RET(chip, retval); } ms_card->pre_sec_addr = start_sector; ms_card->pre_sec_cnt = sector_cnt; ms_card->pre_dir = srb->sc_data_direction; ms_card->total_sec_cnt += sector_cnt; return STATUS_SUCCESS; } static int mspro_read_format_progress(struct rts51x_chip *chip, const int short_data_len) { struct ms_info *ms_card = &(chip->ms_card); int retval, i; u32 total_progress, cur_progress; u8 cnt, tmp; u8 data[8]; ms_card->format_status = FORMAT_FAIL; retval = ms_switch_clock(chip); if (retval != STATUS_SUCCESS) TRACE_RET(chip, retval); RTS51X_READ_REG(chip, MS_TRANS_CFG, &tmp); if ((tmp & (MS_INT_CED | MS_INT_CMDNK | MS_INT_ERR)) == MS_INT_CED) { ms_card->format_status = FORMAT_SUCCESS; ms_card->pro_under_formatting = 0; return STATUS_SUCCESS; } if (! ((tmp & (MS_INT_BREQ | MS_INT_CED | MS_INT_CMDNK | MS_INT_ERR)) == MS_INT_BREQ)) { ms_card->pro_under_formatting = 0; TRACE_RET(chip, STATUS_FAIL); } if (short_data_len >= 256) cnt = 0; else cnt = (u8) short_data_len; retval = ms_read_bytes(chip, PRO_READ_SHORT_DATA, cnt, WAIT_INT, data, 8); if (retval != STATUS_SUCCESS) TRACE_RET(chip, retval); total_progress = (data[0] << 24) | (data[1] << 16) | (data[2] << 8) | data[3]; cur_progress = (data[4] << 24) | (data[5] << 16) | (data[6] << 8) | data[7]; RTS51X_DEBUGP("total_progress = %d, cur_progress = %d\n", total_progress, cur_progress); if (total_progress == 0) { ms_card->progress = 0; } else { u64 ulltmp = (u64) cur_progress * (u64) 65535; do_div(ulltmp, total_progress); ms_card->progress = (u16) ulltmp; } RTS51X_DEBUGP("progress = %d\n", ms_card->progress); for (i = 0; i < 2500; i++) { RTS51X_READ_REG(chip, MS_TRANS_CFG, &tmp); if (tmp & (MS_INT_CED | MS_INT_CMDNK | MS_INT_BREQ | MS_INT_ERR)) break; wait_timeout(1); } if (i == 2500) TRACE_RET(chip, STATUS_FAIL); RTS51X_DEBUGP("MSPro format tmp:%d\n", tmp); if (tmp & (MS_INT_CMDNK | MS_INT_ERR)) TRACE_RET(chip, STATUS_FAIL); if (tmp & MS_INT_CED) { ms_card->format_status = FORMAT_SUCCESS; ms_card->pro_under_formatting = 0; } else if (tmp & MS_INT_BREQ) { ms_card->format_status = FORMAT_IN_PROGRESS; } else { ms_card->format_status = FORMAT_FAIL; ms_card->pro_under_formatting = 0; TRACE_RET(chip, STATUS_FAIL); } RTS51X_DEBUGP("MSPro format format_status:%d\n", ms_card->format_status); return STATUS_SUCCESS; } void rts51x_mspro_polling_format_status(struct rts51x_chip *chip) { struct ms_info *ms_card = &(chip->ms_card); int i; if (ms_card->pro_under_formatting) { for (i = 0; i < 65535; i++) { mspro_read_format_progress(chip, MS_SHORT_DATA_LEN); if (ms_card->format_status != FORMAT_IN_PROGRESS) break; } } return; } void rts51x_mspro_format_sense(struct rts51x_chip *chip, unsigned int lun) { struct ms_info *ms_card = &(chip->ms_card); if (CHK_FORMAT_STATUS(ms_card, FORMAT_SUCCESS)) { rts51x_set_sense_type(chip, lun, SENSE_TYPE_NO_SENSE); ms_card->pro_under_formatting = 0; ms_card->progress = 0; } else if (CHK_FORMAT_STATUS(ms_card, FORMAT_IN_PROGRESS)) { rts51x_set_sense_data(chip, lun, CUR_ERR, 0x02, 0, 0x04, 0x04, 0, (u16) (ms_card->progress)); } else { rts51x_set_sense_type(chip, lun, SENSE_TYPE_FORMAT_CMD_FAILED); ms_card->pro_under_formatting = 0; ms_card->progress = 0; } } int rts51x_mspro_format(struct scsi_cmnd *srb, struct rts51x_chip *chip, int short_data_len, int quick_format) { struct ms_info *ms_card = &(chip->ms_card); int retval, i; u8 buf[8], tmp; u16 para; retval = ms_switch_clock(chip); if (retval != STATUS_SUCCESS) TRACE_RET(chip, retval); retval = ms_set_rw_reg_addr(chip, 0x00, 0x00, Pro_TPCParm, 0x01); if (retval != STATUS_SUCCESS) TRACE_RET(chip, retval); memset(buf, 0, 2); switch (short_data_len) { case 32: buf[0] = 0; break; case 64: buf[0] = 1; break; case 128: buf[0] = 2; break; case 256: default: buf[0] = 3; break; } for (i = 0; i < MS_MAX_RETRY_COUNT; i++) { retval = ms_write_bytes(chip, PRO_WRITE_REG, 1, NO_WAIT_INT, buf, 2); if (retval == STATUS_SUCCESS) break; } if (i == MS_MAX_RETRY_COUNT) TRACE_RET(chip, STATUS_FAIL); /* Format command */ if (quick_format) para = 0x0000; else para = 0x0001; retval = mspro_set_rw_cmd(chip, 0, para, PRO_FORMAT); if (retval != STATUS_SUCCESS) TRACE_RET(chip, retval); /* Check INT */ RTS51X_READ_REG(chip, MS_TRANS_CFG, &tmp); if (tmp & (MS_INT_CMDNK | MS_INT_ERR)) TRACE_RET(chip, STATUS_FAIL); if ((tmp & (MS_INT_BREQ | MS_INT_CED)) == MS_INT_BREQ) { ms_card->pro_under_formatting = 1; ms_card->progress = 0; ms_card->format_status = FORMAT_IN_PROGRESS; return STATUS_SUCCESS; } if (tmp & MS_INT_CED) { ms_card->pro_under_formatting = 0; ms_card->progress = 0; ms_card->format_status = FORMAT_SUCCESS; rts51x_set_sense_type(chip, SCSI_LUN(srb), SENSE_TYPE_NO_SENSE); return STATUS_SUCCESS; } TRACE_RET(chip, STATUS_FAIL); } #ifdef MS_SPEEDUP static int ms_read_multiple_pages(struct rts51x_chip *chip, u16 phy_blk, u16 log_blk, u8 start_page, u8 end_page, u8 *buf, void **ptr, unsigned int *offset) { struct ms_info *ms_card = &(chip->ms_card); int retval; int send_blkend; u8 extra[MS_EXTRA_SIZE], val1, val2, data[6]; u8 page_cnt = end_page - start_page, page_addr, sec_cnt; if (end_page != (ms_card->page_off + 1)) send_blkend = 1; else send_blkend = 0; retval = ms_read_extra_data(chip, phy_blk, start_page, extra, MS_EXTRA_SIZE); if (retval == STATUS_SUCCESS) { if ((extra[1] & 0x30) != 0x30) { ms_set_err_code(chip, MS_FLASH_READ_ERROR); TRACE_RET(chip, STATUS_FAIL); } } if (CHK_MS4BIT(ms_card)) { /* Parallel interface */ data[0] = 0x88; } else { /* Serial interface */ data[0] = 0x80; } /* Block Address */ data[1] = 0; data[2] = (u8) (phy_blk >> 8); data[3] = (u8) phy_blk; /* Page Number * Extra data access mode */ data[4] = 0; data[5] = start_page; retval = ms_auto_set_cmd(chip, OverwriteFlag, MS_EXTRA_SIZE, SystemParm, 6, BLOCK_READ, WAIT_INT, data, 6, &val1); if (retval != STATUS_SUCCESS) TRACE_RET(chip, retval); rts51x_init_cmd(chip); if (send_blkend) rts51x_add_cmd(chip, WRITE_REG_CMD, MS_BLKEND, SET_BLKEND, SET_BLKEND); else rts51x_add_cmd(chip, WRITE_REG_CMD, MS_BLKEND, SET_BLKEND, 0); rts51x_add_cmd(chip, WRITE_REG_CMD, MS_TRANS_CFG, WAIT_INT, NO_WAIT_INT); rts51x_add_cmd(chip, WRITE_REG_CMD, MS_SECTOR_CNT_L, 0xFF, (u8) page_cnt); rts51x_add_cmd(chip, WRITE_REG_CMD, MS_SECTOR_CNT_H, 0xFF, 0); rts51x_add_cmd(chip, WRITE_REG_CMD, MS_TPC, 0xFF, READ_PAGE_DATA); rts51x_trans_dma_enable(DMA_FROM_DEVICE, chip, 512 * page_cnt, DMA_512); rts51x_add_cmd(chip, WRITE_REG_CMD, MS_TRANSFER, 0xFF, MS_TRANSFER_START | MS_TM_MULTI_READ); rts51x_add_cmd(chip, CHECK_REG_CMD, MS_TRANSFER, MS_TRANSFER_END, MS_TRANSFER_END); retval = rts51x_send_cmd(chip, MODE_CDIR | STAGE_MS_STATUS, 100); if (retval != STATUS_SUCCESS) TRACE_RET(chip, retval); retval = rts51x_transfer_data_partial(chip, RCV_BULK_PIPE(chip), (void *)buf, ptr, offset, 512 * page_cnt, scsi_sg_count(chip->srb), NULL, 2000); if (retval != STATUS_SUCCESS) { rts51x_clear_ms_error(chip); if (retval == STATUS_TIMEDOUT) TRACE_RET(chip, retval); TRACE_GOTO(chip, Fail); } retval = rts51x_get_rsp(chip, 3, 200); if (CHECK_MS_TRANS_FAIL(chip, retval)) { rts51x_clear_ms_error(chip); if (retval == STATUS_TIMEDOUT) TRACE_RET(chip, retval); TRACE_GOTO(chip, Fail); } return STATUS_SUCCESS; Fail: rts51x_init_cmd(chip); rts51x_add_cmd(chip, READ_REG_CMD, MS_SECTOR_CNT_L, 0, 0); retval = rts51x_send_cmd(chip, MODE_CR | STAGE_MS_STATUS, 100); if (retval != STATUS_SUCCESS) TRACE_RET(chip, retval); retval = rts51x_get_rsp(chip, 3, 200); if (CHECK_MS_TRANS_FAIL(chip, retval)) TRACE_RET(chip, STATUS_FAIL); sec_cnt = chip->rsp_buf[0]; RTS51X_DEBUGP("%d pages need be transferred, %d pages remained\n", (int)page_cnt, (int)sec_cnt); page_addr = start_page + (page_cnt - sec_cnt); if (CHK_MS4BIT(ms_card)) { val1 = chip->rsp_buf[1]; RTS51X_DEBUGP("MS_TRANS_CFG: 0x%x\n", val1); } else { val1 = 0; } val2 = chip->rsp_buf[2]; RTS51X_DEBUGP("GET_INT: 0x%x\n", val2); if ((val1 & INT_CMDNK) || (val2 & INT_REG_CMDNK)) { ms_set_err_code(chip, MS_CMD_NK); TRACE_RET(chip, STATUS_FAIL); } if ((val1 & INT_ERR) || (val2 & INT_REG_ERR)) { if ((val1 & INT_BREQ) || (val2 & INT_REG_BREQ)) { retval = ms_read_status_reg(chip); if (retval != STATUS_SUCCESS) { if (!(chip->card_wp & MS_CARD)) { reset_ms(chip); ms_set_page_status(log_blk, setPS_NG, extra, MS_EXTRA_SIZE); ms_write_extra_data(chip, phy_blk, page_addr, extra, MS_EXTRA_SIZE); } ms_set_err_code(chip, MS_FLASH_READ_ERROR); TRACE_RET(chip, STATUS_FAIL); } } else { ms_set_err_code(chip, MS_FLASH_READ_ERROR); TRACE_RET(chip, STATUS_FAIL); } } else { if (CHK_MS4BIT(ms_card)) { if (!(val1 & INT_BREQ) && !(val2 & INT_REG_BREQ)) { ms_set_err_code(chip, MS_BREQ_ERROR); TRACE_RET(chip, STATUS_FAIL); } } else { if (!(val2 & INT_REG_BREQ)) { ms_set_err_code(chip, MS_BREQ_ERROR); TRACE_RET(chip, STATUS_FAIL); } } } TRACE_RET(chip, STATUS_FAIL); } static int ms_write_multiple_pages(struct rts51x_chip *chip, u16 old_blk, u16 new_blk, u16 log_blk, u8 start_page, u8 end_page, u8 *buf, void **ptr, unsigned int *offset) { struct ms_info *ms_card = &(chip->ms_card); int retval, i; int send_blkend; u8 val, data[16]; u8 page_cnt = end_page - start_page; if ((end_page == (ms_card->page_off + 1)) || (page_cnt == 1)) send_blkend = 0; else send_blkend = 1; if (!start_page) { if (CHK_MS4BIT(ms_card)) { /* Parallel interface */ data[0] = 0x88; } else { /* Serial interface */ data[0] = 0x80; } /* Block Address */ data[1] = 0; data[2] = (u8) (old_blk >> 8); data[3] = (u8) old_blk; data[4] = 0x80; data[5] = 0; data[6] = 0xEF; data[7] = 0xFF; retval = ms_auto_set_cmd(chip, OverwriteFlag, MS_EXTRA_SIZE, SystemParm, 7, BLOCK_WRITE, WAIT_INT, data, 7, &val); if (retval != STATUS_SUCCESS) TRACE_RET(chip, retval); } retval = ms_set_rw_reg_addr(chip, OverwriteFlag, MS_EXTRA_SIZE, SystemParm, (6 + MS_EXTRA_SIZE)); if (retval != STATUS_SUCCESS) TRACE_RET(chip, retval); ms_set_err_code(chip, MS_NO_ERROR); if (CHK_MS4BIT(ms_card)) { /* Parallel interface */ data[0] = 0x88; } else { /* Serial interface */ data[0] = 0x80; } /* Block Address */ data[1] = 0; data[2] = (u8) (new_blk >> 8); data[3] = (u8) new_blk; /* Page Number * Extra data access mode */ if (page_cnt == 1) { /* Single page access mode */ data[4] = 0x20; } else { /* Block access mode */ data[4] = 0; } data[5] = start_page; data[6] = 0xF8; data[7] = 0xFF; data[8] = (u8) (log_blk >> 8); data[9] = (u8) log_blk; for (i = 0x0A; i < 0x10; i++) { /* ECC */ data[i] = 0xFF; } retval = ms_auto_set_cmd(chip, OverwriteFlag, MS_EXTRA_SIZE, SystemParm, (6 + MS_EXTRA_SIZE), BLOCK_WRITE, WAIT_INT, data, 16, &val); if (retval != STATUS_SUCCESS) TRACE_RET(chip, retval); rts51x_init_cmd(chip); if (send_blkend) rts51x_add_cmd(chip, WRITE_REG_CMD, MS_BLKEND, SET_BLKEND, SET_BLKEND); else rts51x_add_cmd(chip, WRITE_REG_CMD, MS_BLKEND, SET_BLKEND, 0); rts51x_add_cmd(chip, WRITE_REG_CMD, MS_TRANS_CFG, WAIT_INT, NO_WAIT_INT); rts51x_add_cmd(chip, WRITE_REG_CMD, MS_SECTOR_CNT_L, 0xFF, (u8) page_cnt); rts51x_add_cmd(chip, WRITE_REG_CMD, MS_SECTOR_CNT_H, 0xFF, 0); rts51x_add_cmd(chip, WRITE_REG_CMD, MS_TPC, 0xFF, WRITE_PAGE_DATA); rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 0x01, RING_BUFFER); rts51x_trans_dma_enable(DMA_TO_DEVICE, chip, 512 * page_cnt, DMA_512); rts51x_add_cmd(chip, WRITE_REG_CMD, MS_TRANSFER, 0xFF, MS_TRANSFER_START | MS_TM_MULTI_WRITE); rts51x_add_cmd(chip, CHECK_REG_CMD, MS_TRANSFER, MS_TRANSFER_END, MS_TRANSFER_END); retval = rts51x_send_cmd(chip, MODE_CDOR | STAGE_MS_STATUS, 100); if (retval != STATUS_SUCCESS) TRACE_RET(chip, retval); retval = rts51x_transfer_data_partial(chip, SND_BULK_PIPE(chip), (void *)buf, ptr, offset, 512 * page_cnt, scsi_sg_count(chip->srb), NULL, 2000); if (retval != STATUS_SUCCESS) { rts51x_clear_ms_error(chip); TRACE_RET(chip, retval); } retval = rts51x_get_rsp(chip, 3, 2000); if (CHECK_MS_TRANS_FAIL(chip, retval)) { rts51x_clear_ms_error(chip); TRACE_RET(chip, STATUS_FAIL); } return STATUS_SUCCESS; } #else static int ms_read_multiple_pages(struct rts51x_chip *chip, u16 phy_blk, u16 log_blk, u8 start_page, u8 end_page, u8 *buf, void **ptr, unsigned int *offset) { struct ms_info *ms_card = &(chip->ms_card); int retval, i; u8 extra[MS_EXTRA_SIZE], page_addr, val, trans_cfg, data[6]; retval = ms_read_extra_data(chip, phy_blk, start_page, extra, MS_EXTRA_SIZE); if (retval == STATUS_SUCCESS) { if ((extra[1] & 0x30) != 0x30) { ms_set_err_code(chip, MS_FLASH_READ_ERROR); TRACE_RET(chip, STATUS_FAIL); } } retval = ms_set_rw_reg_addr(chip, OverwriteFlag, MS_EXTRA_SIZE, SystemParm, 6); if (retval != STATUS_SUCCESS) TRACE_RET(chip, retval); /* Write REG */ if (CHK_MS4BIT(ms_card)) { /* Parallel interface */ data[0] = 0x88; } else { /* Serial interface */ data[0] = 0x80; } /* Block Address */ data[1] = 0; data[2] = (u8) (phy_blk >> 8); data[3] = (u8) phy_blk; /* Page Number * Extra data access mode */ data[4] = 0; data[5] = start_page; for (i = 0; i < MS_MAX_RETRY_COUNT; i++) { retval = ms_write_bytes(chip, WRITE_REG, 6, NO_WAIT_INT, data, 6); if (retval == STATUS_SUCCESS) break; } if (i == MS_MAX_RETRY_COUNT) TRACE_RET(chip, STATUS_FAIL); ms_set_err_code(chip, MS_NO_ERROR); retval = ms_send_cmd(chip, BLOCK_READ, WAIT_INT); if (retval != STATUS_SUCCESS) TRACE_RET(chip, retval); for (page_addr = start_page; page_addr < end_page; page_addr++) { ms_set_err_code(chip, MS_NO_ERROR); if (monitor_card_cd(chip, MS_CARD) == CD_NOT_EXIST) { ms_set_err_code(chip, MS_NO_CARD); chip->card_exist &= ~MS_CARD; chip->card_ready &= ~MS_CARD; TRACE_RET(chip, STATUS_FAIL); } /* GET_INT Register */ retval = ms_read_bytes(chip, GET_INT, 1, NO_WAIT_INT, &val, 1); if (retval != STATUS_SUCCESS) TRACE_RET(chip, retval); if (val & INT_REG_CMDNK) { ms_set_err_code(chip, MS_CMD_NK); TRACE_RET(chip, STATUS_FAIL); } if (val & INT_REG_ERR) { if (val & INT_REG_BREQ) { retval = ms_read_status_reg(chip); if (retval != STATUS_SUCCESS) { if (!(chip->card_wp & MS_CARD)) { reset_ms(chip); ms_set_page_status(log_blk, setPS_NG, extra, MS_EXTRA_SIZE); ms_write_extra_data(chip, phy_blk, page_addr, extra, MS_EXTRA_SIZE); } ms_set_err_code(chip, MS_FLASH_READ_ERROR); TRACE_RET(chip, STATUS_FAIL); } } else { ms_set_err_code(chip, MS_FLASH_READ_ERROR); TRACE_RET(chip, STATUS_FAIL); } } else { if (!(val & INT_REG_BREQ)) { ms_set_err_code(chip, MS_BREQ_ERROR); TRACE_RET(chip, STATUS_FAIL); } } if (page_addr == (end_page - 1)) { if (!(val & INT_REG_CED)) { retval = ms_send_cmd(chip, BLOCK_END, WAIT_INT); if (retval != STATUS_SUCCESS) TRACE_RET(chip, retval); } retval = ms_read_bytes(chip, GET_INT, 1, NO_WAIT_INT, &val, 1); if (retval != STATUS_SUCCESS) TRACE_RET(chip, retval); if (!(val & INT_REG_CED)) { ms_set_err_code(chip, MS_FLASH_READ_ERROR); TRACE_RET(chip, STATUS_FAIL); } trans_cfg = NO_WAIT_INT; } else { trans_cfg = WAIT_INT; } rts51x_init_cmd(chip); rts51x_add_cmd(chip, WRITE_REG_CMD, MS_TPC, 0xFF, READ_PAGE_DATA); rts51x_add_cmd(chip, WRITE_REG_CMD, MS_TRANS_CFG, 0xFF, trans_cfg); rts51x_trans_dma_enable(DMA_FROM_DEVICE, chip, 512, DMA_512); rts51x_add_cmd(chip, WRITE_REG_CMD, MS_TRANSFER, 0xFF, MS_TRANSFER_START | MS_TM_NORMAL_READ); rts51x_add_cmd(chip, CHECK_REG_CMD, MS_TRANSFER, MS_TRANSFER_END, MS_TRANSFER_END); retval = rts51x_send_cmd(chip, MODE_CDIR, 100); if (retval != STATUS_SUCCESS) TRACE_RET(chip, retval); retval = rts51x_transfer_data_partial(chip, RCV_BULK_PIPE(chip), (void *)buf, ptr, offset, 512, scsi_sg_count(chip->srb), NULL, 2000); if (retval != STATUS_SUCCESS) { if (retval == STATUS_TIMEDOUT) { ms_set_err_code(chip, MS_TO_ERROR); rts51x_clear_ms_error(chip); TRACE_RET(chip, retval); } retval = rts51x_ep0_read_register(chip, MS_TRANS_CFG, &val); if (retval != STATUS_SUCCESS) { ms_set_err_code(chip, MS_TO_ERROR); rts51x_clear_ms_error(chip); TRACE_RET(chip, STATUS_FAIL); } if (val & (MS_CRC16_ERR | MS_RDY_TIMEOUT)) { ms_set_err_code(chip, MS_CRC16_ERROR); rts51x_clear_ms_error(chip); TRACE_RET(chip, STATUS_FAIL); } } retval = rts51x_get_rsp(chip, 1, 2000); if (CHECK_MS_TRANS_FAIL(chip, retval)) { if (retval == STATUS_TIMEDOUT) { ms_set_err_code(chip, MS_TO_ERROR); rts51x_clear_ms_error(chip); TRACE_RET(chip, retval); } retval = rts51x_ep0_read_register(chip, MS_TRANS_CFG, &val); if (retval != STATUS_SUCCESS) { ms_set_err_code(chip, MS_TO_ERROR); rts51x_clear_ms_error(chip); TRACE_RET(chip, retval); } if (val & (MS_CRC16_ERR | MS_RDY_TIMEOUT)) { ms_set_err_code(chip, MS_CRC16_ERROR); rts51x_clear_ms_error(chip); TRACE_RET(chip, STATUS_FAIL); } } } return STATUS_SUCCESS; } static int ms_write_multiple_pages(struct rts51x_chip *chip, u16 old_blk, u16 new_blk, u16 log_blk, u8 start_page, u8 end_page, u8 *buf, void **ptr, unsigned int *offset) { struct ms_info *ms_card = &(chip->ms_card); int retval, i; u8 page_addr, val, data[16]; if (!start_page) { retval = ms_set_rw_reg_addr(chip, OverwriteFlag, MS_EXTRA_SIZE, SystemParm, 7); if (retval != STATUS_SUCCESS) TRACE_RET(chip, retval); if (CHK_MS4BIT(ms_card)) { /* Parallel interface */ data[0] = 0x88; } else { /* Serial interface */ data[0] = 0x80; } /* Block Address */ data[1] = 0; data[2] = (u8) (old_blk >> 8); data[3] = (u8) old_blk; data[4] = 0x80; data[5] = 0; data[6] = 0xEF; data[7] = 0xFF; retval = ms_write_bytes(chip, WRITE_REG, 7, NO_WAIT_INT, data, 8); if (retval != STATUS_SUCCESS) TRACE_RET(chip, retval); retval = ms_send_cmd(chip, BLOCK_WRITE, WAIT_INT); if (retval != STATUS_SUCCESS) TRACE_RET(chip, retval); /* GET_INT Register */ ms_set_err_code(chip, MS_NO_ERROR); retval = ms_transfer_tpc(chip, MS_TM_READ_BYTES, GET_INT, 1, NO_WAIT_INT); if (retval != STATUS_SUCCESS) TRACE_RET(chip, retval); } retval = ms_set_rw_reg_addr(chip, OverwriteFlag, MS_EXTRA_SIZE, SystemParm, (6 + MS_EXTRA_SIZE)); if (retval != STATUS_SUCCESS) TRACE_RET(chip, retval); ms_set_err_code(chip, MS_NO_ERROR); if (CHK_MS4BIT(ms_card)) { /* Parallel interface */ data[0] = 0x88; } else { /* Serial interface */ data[0] = 0x80; } /* Block Address */ data[1] = 0; data[2] = (u8) (new_blk >> 8); data[3] = (u8) new_blk; /* Page Number * Extra data access mode */ if ((end_page - start_page) == 1) { /* Single page access mode */ data[4] = 0x20; } else { /* Block access mode */ data[4] = 0; } data[5] = start_page; data[6] = 0xF8; data[7] = 0xFF; data[8] = (u8) (log_blk >> 8); data[9] = (u8) log_blk; for (i = 0x0A; i < 0x10; i++) { /* ECC */ data[i] = 0xFF; } for (i = 0; i < MS_MAX_RETRY_COUNT; i++) { retval = ms_write_bytes(chip, WRITE_REG, 6 + MS_EXTRA_SIZE, NO_WAIT_INT, data, 16); if (retval == STATUS_SUCCESS) break; } if (i == MS_MAX_RETRY_COUNT) TRACE_RET(chip, STATUS_FAIL); for (i = 0; i < MS_MAX_RETRY_COUNT; i++) { retval = ms_send_cmd(chip, BLOCK_WRITE, WAIT_INT); if (retval == STATUS_SUCCESS) break; } if (i == MS_MAX_RETRY_COUNT) TRACE_RET(chip, STATUS_FAIL); /* GET_INT Register */ retval = ms_read_bytes(chip, GET_INT, 1, NO_WAIT_INT, &val, 1); if (retval != STATUS_SUCCESS) TRACE_RET(chip, retval); for (page_addr = start_page; page_addr < end_page; page_addr++) { ms_set_err_code(chip, MS_NO_ERROR); if (monitor_card_cd(chip, MS_CARD) == CD_NOT_EXIST) { ms_set_err_code(chip, MS_NO_CARD); TRACE_RET(chip, STATUS_FAIL); } if (val & INT_REG_CMDNK) { ms_set_err_code(chip, MS_CMD_NK); TRACE_RET(chip, STATUS_FAIL); } if (val & INT_REG_ERR) { ms_set_err_code(chip, MS_FLASH_WRITE_ERROR); TRACE_RET(chip, STATUS_FAIL); } if (!(val & INT_REG_BREQ)) { ms_set_err_code(chip, MS_BREQ_ERROR); TRACE_RET(chip, STATUS_FAIL); } udelay(30); rts51x_init_cmd(chip); rts51x_add_cmd(chip, WRITE_REG_CMD, MS_TPC, 0xFF, WRITE_PAGE_DATA); rts51x_add_cmd(chip, WRITE_REG_CMD, MS_TRANS_CFG, 0xFF, WAIT_INT); rts51x_trans_dma_enable(DMA_TO_DEVICE, chip, 512, DMA_512); rts51x_add_cmd(chip, WRITE_REG_CMD, MS_TRANSFER, 0xFF, MS_TRANSFER_START | MS_TM_NORMAL_WRITE); rts51x_add_cmd(chip, CHECK_REG_CMD, MS_TRANSFER, MS_TRANSFER_END, MS_TRANSFER_END); retval = rts51x_send_cmd(chip, MODE_CDOR, 100); if (retval != STATUS_SUCCESS) TRACE_RET(chip, retval); retval = rts51x_transfer_data_partial(chip, SND_BULK_PIPE(chip), (void *)buf, ptr, offset, 512, scsi_sg_count(chip->srb), NULL, 2000); if (retval != STATUS_SUCCESS) { ms_set_err_code(chip, MS_TO_ERROR); rts51x_clear_ms_error(chip); if (retval == STATUS_TIMEDOUT) TRACE_RET(chip, STATUS_TIMEDOUT); else TRACE_RET(chip, STATUS_FAIL); } retval = rts51x_get_rsp(chip, 1, 2000); if (CHECK_MS_TRANS_FAIL(chip, retval)) { ms_set_err_code(chip, MS_TO_ERROR); rts51x_clear_ms_error(chip); if (retval == STATUS_TIMEDOUT) TRACE_RET(chip, STATUS_TIMEDOUT); else TRACE_RET(chip, STATUS_FAIL); } /* GET_INT Register */ retval = ms_read_bytes(chip, GET_INT, 1, NO_WAIT_INT, &val, 1); if (retval != STATUS_SUCCESS) TRACE_RET(chip, retval); if ((end_page - start_page) == 1) { if (!(val & INT_REG_CED)) { /* Command can not be executed */ ms_set_err_code(chip, MS_FLASH_WRITE_ERROR); TRACE_RET(chip, STATUS_FAIL); } } else { if (page_addr == (end_page - 1)) { if (!(val & INT_REG_CED)) { retval = ms_send_cmd(chip, BLOCK_END, WAIT_INT); if (retval != STATUS_SUCCESS) TRACE_RET(chip, retval); } /* GET_INT Register */ retval = ms_read_bytes(chip, GET_INT, 1, NO_WAIT_INT, &val, 1); if (retval != STATUS_SUCCESS) TRACE_RET(chip, retval); } if ((page_addr == (end_page - 1)) || (page_addr == ms_card->page_off)) { if (!(val & INT_REG_CED)) { ms_set_err_code(chip, MS_FLASH_WRITE_ERROR); TRACE_RET(chip, STATUS_FAIL); } } } } return STATUS_SUCCESS; } #endif static int ms_finish_write(struct rts51x_chip *chip, u16 old_blk, u16 new_blk, u16 log_blk, u8 page_off) { struct ms_info *ms_card = &(chip->ms_card); int retval, seg_no; #ifdef MS_SPEEDUP retval = ms_auto_copy_page(chip, old_blk, new_blk, log_blk, page_off, ms_card->page_off + 1); #else retval = ms_copy_page(chip, old_blk, new_blk, log_blk, page_off, ms_card->page_off + 1); #endif if (retval != STATUS_SUCCESS) TRACE_RET(chip, retval); seg_no = old_blk >> 9; if (MS_TST_BAD_BLOCK_FLG(ms_card)) { MS_CLR_BAD_BLOCK_FLG(ms_card); ms_set_bad_block(chip, old_blk); } else { retval = ms_erase_block(chip, old_blk); if (retval == STATUS_SUCCESS) ms_set_unused_block(chip, old_blk); } ms_set_l2p_tbl(chip, seg_no, log_blk - ms_start_idx[seg_no], new_blk); return STATUS_SUCCESS; } static int ms_prepare_write(struct rts51x_chip *chip, u16 old_blk, u16 new_blk, u16 log_blk, u8 start_page) { int retval; if (start_page) { #ifdef MS_SPEEDUP retval = ms_auto_copy_page(chip, old_blk, new_blk, log_blk, 0, start_page); #else retval = ms_copy_page(chip, old_blk, new_blk, log_blk, 0, start_page); #endif if (retval != STATUS_SUCCESS) TRACE_RET(chip, retval); } return STATUS_SUCCESS; } int rts51x_ms_delay_write(struct rts51x_chip *chip) { struct ms_info *ms_card = &(chip->ms_card); struct rts51x_ms_delay_write_tag *delay_write = &(ms_card->delay_write); int retval; if (delay_write->delay_write_flag) { retval = ms_set_init_para(chip); if (retval != STATUS_SUCCESS) TRACE_RET(chip, retval); delay_write->delay_write_flag = 0; retval = ms_finish_write(chip, delay_write->old_phyblock, delay_write->new_phyblock, delay_write->logblock, delay_write->pageoff); if (retval != STATUS_SUCCESS) TRACE_RET(chip, retval); } return STATUS_SUCCESS; } static inline void rts51x_ms_rw_fail(struct scsi_cmnd *srb, struct rts51x_chip *chip) { if (srb->sc_data_direction == DMA_FROM_DEVICE) rts51x_set_sense_type(chip, SCSI_LUN(srb), SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR); else rts51x_set_sense_type(chip, SCSI_LUN(srb), SENSE_TYPE_MEDIA_WRITE_ERR); } static int rts51x_ms_rw_multi_sector(struct scsi_cmnd *srb, struct rts51x_chip *chip, u32 start_sector, u16 sector_cnt) { struct ms_info *ms_card = &(chip->ms_card); unsigned int lun = SCSI_LUN(srb); int retval, seg_no; unsigned int offset = 0; u16 old_blk = 0, new_blk = 0, log_blk, total_sec_cnt = sector_cnt; u8 start_page, end_page = 0, page_cnt; u8 *buf; void *ptr = NULL; struct rts51x_ms_delay_write_tag *delay_write = &(ms_card->delay_write); ms_set_err_code(chip, MS_NO_ERROR); ms_card->counter = 0; buf = (u8 *) scsi_sglist(srb); retval = ms_switch_clock(chip); if (retval != STATUS_SUCCESS) { rts51x_ms_rw_fail(srb, chip); TRACE_RET(chip, retval); } log_blk = (u16) (start_sector >> ms_card->block_shift); start_page = (u8) (start_sector & ms_card->page_off); for (seg_no = 0; seg_no < ARRAY_SIZE(ms_start_idx) - 1; seg_no++) { if (log_blk < ms_start_idx[seg_no + 1]) break; } if (ms_card->segment[seg_no].build_flag == 0) { retval = ms_build_l2p_tbl(chip, seg_no); if (retval != STATUS_SUCCESS) { chip->card_fail |= MS_CARD; rts51x_set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT); TRACE_RET(chip, retval); } } if (srb->sc_data_direction == DMA_TO_DEVICE) { if (delay_write->delay_write_flag && (delay_write->logblock == log_blk) && (start_page > delay_write->pageoff)) { delay_write->delay_write_flag = 0; #ifdef MS_SPEEDUP retval = ms_auto_copy_page(chip, delay_write->old_phyblock, delay_write->new_phyblock, log_blk, delay_write->pageoff, start_page); #else retval = ms_copy_page(chip, delay_write->old_phyblock, delay_write->new_phyblock, log_blk, delay_write->pageoff, start_page); #endif if (retval != STATUS_SUCCESS) { rts51x_set_sense_type(chip, lun, SENSE_TYPE_MEDIA_WRITE_ERR); TRACE_RET(chip, retval); } old_blk = delay_write->old_phyblock; new_blk = delay_write->new_phyblock; } else if (delay_write->delay_write_flag && (delay_write->logblock == log_blk) && (start_page == delay_write->pageoff)) { delay_write->delay_write_flag = 0; old_blk = delay_write->old_phyblock; new_blk = delay_write->new_phyblock; } else { retval = rts51x_ms_delay_write(chip); if (retval != STATUS_SUCCESS) { rts51x_set_sense_type(chip, lun, SENSE_TYPE_MEDIA_WRITE_ERR); TRACE_RET(chip, retval); } old_blk = ms_get_l2p_tbl(chip, seg_no, log_blk - ms_start_idx[seg_no]); new_blk = ms_get_unused_block(chip, seg_no); if ((old_blk == 0xFFFF) || (new_blk == 0xFFFF)) { rts51x_set_sense_type(chip, lun, SENSE_TYPE_MEDIA_WRITE_ERR); TRACE_RET(chip, STATUS_FAIL); } retval = ms_prepare_write(chip, old_blk, new_blk, log_blk, start_page); if (retval != STATUS_SUCCESS) { if (monitor_card_cd(chip, MS_CARD) == CD_NOT_EXIST) { rts51x_set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT); TRACE_RET(chip, STATUS_FAIL); } rts51x_set_sense_type(chip, lun, SENSE_TYPE_MEDIA_WRITE_ERR); TRACE_RET(chip, retval); } } } else { retval = rts51x_ms_delay_write(chip); if (retval != STATUS_SUCCESS) { if (monitor_card_cd(chip, MS_CARD) == CD_NOT_EXIST) { rts51x_set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT); TRACE_RET(chip, STATUS_FAIL); } rts51x_set_sense_type(chip, lun, SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR); TRACE_RET(chip, retval); } old_blk = ms_get_l2p_tbl(chip, seg_no, log_blk - ms_start_idx[seg_no]); if (old_blk == 0xFFFF) { rts51x_set_sense_type(chip, lun, SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR); TRACE_RET(chip, STATUS_FAIL); } } RTS51X_DEBUGP("seg_no = %d, old_blk = 0x%x, new_blk = 0x%x\n", seg_no, old_blk, new_blk); while (total_sec_cnt) { if ((start_page + total_sec_cnt) > (ms_card->page_off + 1)) end_page = ms_card->page_off + 1; else end_page = start_page + (u8) total_sec_cnt; page_cnt = end_page - start_page; RTS51X_DEBUGP("start_page = %d, end_page = %d," "page_cnt = %d\n", start_page, end_page, page_cnt); if (srb->sc_data_direction == DMA_FROM_DEVICE) retval = ms_read_multiple_pages(chip, old_blk, log_blk, start_page, end_page, buf, &ptr, &offset); else retval = ms_write_multiple_pages(chip, old_blk, new_blk, log_blk, start_page, end_page, buf, &ptr, &offset); if (retval != STATUS_SUCCESS) { if (monitor_card_cd(chip, MS_CARD) == CD_NOT_EXIST) { rts51x_set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT); TRACE_RET(chip, STATUS_FAIL); } rts51x_ms_rw_fail(srb, chip); TRACE_RET(chip, retval); } /* Update L2P table if need */ if (srb->sc_data_direction == DMA_TO_DEVICE) { if (end_page == (ms_card->page_off + 1)) { retval = ms_erase_block(chip, old_blk); if (retval == STATUS_SUCCESS) ms_set_unused_block(chip, old_blk); ms_set_l2p_tbl(chip, seg_no, log_blk - ms_start_idx[seg_no], new_blk); } } total_sec_cnt -= page_cnt; if (total_sec_cnt == 0) break; log_blk++; for (seg_no = 0; seg_no < ARRAY_SIZE(ms_start_idx) - 1; seg_no++) { if (log_blk < ms_start_idx[seg_no + 1]) break; } if (ms_card->segment[seg_no].build_flag == 0) { retval = ms_build_l2p_tbl(chip, seg_no); if (retval != STATUS_SUCCESS) { chip->card_fail |= MS_CARD; rts51x_set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT); TRACE_RET(chip, retval); } } old_blk = ms_get_l2p_tbl(chip, seg_no, log_blk - ms_start_idx[seg_no]); if (old_blk == 0xFFFF) { rts51x_ms_rw_fail(srb, chip); TRACE_RET(chip, STATUS_FAIL); } if (srb->sc_data_direction == DMA_TO_DEVICE) { new_blk = ms_get_unused_block(chip, seg_no); if (new_blk == 0xFFFF) { rts51x_ms_rw_fail(srb, chip); TRACE_RET(chip, STATUS_FAIL); } } RTS51X_DEBUGP("seg_no = %d, old_blk = 0x%x, new_blk = 0x%x\n", seg_no, old_blk, new_blk); start_page = 0; } if (srb->sc_data_direction == DMA_TO_DEVICE) { if (end_page < (ms_card->page_off + 1)) { delay_write->delay_write_flag = 1; delay_write->old_phyblock = old_blk; delay_write->new_phyblock = new_blk; delay_write->logblock = log_blk; delay_write->pageoff = end_page; } } scsi_set_resid(srb, 0); return STATUS_SUCCESS; } int rts51x_ms_rw(struct scsi_cmnd *srb, struct rts51x_chip *chip, u32 start_sector, u16 sector_cnt) { struct ms_info *ms_card = &(chip->ms_card); int retval; if (CHK_MSPRO(ms_card)) retval = mspro_rw_multi_sector(srb, chip, start_sector, sector_cnt); else retval = rts51x_ms_rw_multi_sector(srb, chip, start_sector, sector_cnt); return retval; } void rts51x_ms_free_l2p_tbl(struct rts51x_chip *chip) { struct ms_info *ms_card = &(chip->ms_card); int i = 0; if (ms_card->segment != NULL) { for (i = 0; i < ms_card->segment_cnt; i++) { if (ms_card->segment[i].l2p_table != NULL) { vfree(ms_card->segment[i].l2p_table); ms_card->segment[i].l2p_table = NULL; } if (ms_card->segment[i].free_table != NULL) { vfree(ms_card->segment[i].free_table); ms_card->segment[i].free_table = NULL; } } vfree(ms_card->segment); ms_card->segment = NULL; } } void rts51x_ms_cleanup_work(struct rts51x_chip *chip) { struct ms_info *ms_card = &(chip->ms_card); if (CHK_MSPRO(ms_card)) { if (ms_card->seq_mode) { RTS51X_DEBUGP("MS Pro: stop transmission\n"); mspro_stop_seq_mode(chip); ms_card->counter = 0; } if (CHK_MSHG(ms_card)) { u8 value; rts51x_read_register(chip, MS_CFG, &value); if (value & MS_2K_SECTOR_MODE) rts51x_write_register(chip, MS_CFG, MS_2K_SECTOR_MODE, 0x00); } } else if ((!CHK_MSPRO(ms_card)) && ms_card->delay_write.delay_write_flag) { RTS51X_DEBUGP("MS: delay write\n"); rts51x_ms_delay_write(chip); ms_card->counter = 0; } } static int ms_power_off_card3v3(struct rts51x_chip *chip) { int retval; rts51x_init_cmd(chip); rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_CLK_EN, MS_CLK_EN, 0); if (chip->asic_code) ms_pull_ctl_disable(chip); else rts51x_add_cmd(chip, WRITE_REG_CMD, FPGA_PULL_CTL, FPGA_MS_PULL_CTL_BIT | 0x20, FPGA_MS_PULL_CTL_BIT); rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_OE, MS_OUTPUT_EN, 0); if (!chip->option.FT2_fast_mode) { rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PWR_CTL, POWER_MASK, POWER_OFF); } retval = rts51x_send_cmd(chip, MODE_C, 100); if (retval != STATUS_SUCCESS) TRACE_RET(chip, retval); return STATUS_SUCCESS; } int rts51x_release_ms_card(struct rts51x_chip *chip) { struct ms_info *ms_card = &(chip->ms_card); int retval; RTS51X_DEBUGP("rts51x_release_ms_card\n"); ms_card->delay_write.delay_write_flag = 0; ms_card->pro_under_formatting = 0; chip->card_ready &= ~MS_CARD; chip->card_fail &= ~MS_CARD; chip->card_wp &= ~MS_CARD; rts51x_ms_free_l2p_tbl(chip); rts51x_write_register(chip, SFSM_ED, HW_CMD_STOP, HW_CMD_STOP); memset(ms_card->raw_sys_info, 0, 96); #ifdef SUPPORT_PCGL_1P18 memset(ms_card->raw_model_name, 0, 48); #endif retval = ms_power_off_card3v3(chip); if (retval != STATUS_SUCCESS) TRACE_RET(chip, retval); return STATUS_SUCCESS; }
gpl-2.0
Flinny/kernel_htc_msm8994
drivers/net/ethernet/dec/tulip/media.c
2481
16702
/* drivers/net/ethernet/dec/tulip/media.c Copyright 2000,2001 The Linux Kernel Team Written/copyright 1994-2001 by Donald Becker. This software may be used and distributed according to the terms of the GNU General Public License, incorporated herein by reference. Please submit bugs to http://bugzilla.kernel.org/ . */ #include <linux/kernel.h> #include <linux/mii.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/pci.h> #include "tulip.h" /* The maximum data clock rate is 2.5 Mhz. The minimum timing is usually met by back-to-back PCI I/O cycles, but we insert a delay to avoid "overclocking" issues or future 66Mhz PCI. */ #define mdio_delay() ioread32(mdio_addr) /* Read and write the MII registers using software-generated serial MDIO protocol. It is just different enough from the EEPROM protocol to not share code. The maxium data clock rate is 2.5 Mhz. */ #define MDIO_SHIFT_CLK 0x10000 #define MDIO_DATA_WRITE0 0x00000 #define MDIO_DATA_WRITE1 0x20000 #define MDIO_ENB 0x00000 /* Ignore the 0x02000 databook setting. */ #define MDIO_ENB_IN 0x40000 #define MDIO_DATA_READ 0x80000 static const unsigned char comet_miireg2offset[32] = { 0xB4, 0xB8, 0xBC, 0xC0, 0xC4, 0xC8, 0xCC, 0, 0,0,0,0, 0,0,0,0, 0,0xD0,0,0, 0,0,0,0, 0,0,0,0, 0, 0xD4, 0xD8, 0xDC, }; /* MII transceiver control section. Read and write the MII registers using software-generated serial MDIO protocol. See IEEE 802.3-2002.pdf (Section 2, Chapter "22.2.4 Management functions") or DP83840A data sheet for more details. */ int tulip_mdio_read(struct net_device *dev, int phy_id, int location) { struct tulip_private *tp = netdev_priv(dev); int i; int read_cmd = (0xf6 << 10) | ((phy_id & 0x1f) << 5) | location; int retval = 0; void __iomem *ioaddr = tp->base_addr; void __iomem *mdio_addr = ioaddr + CSR9; unsigned long flags; if (location & ~0x1f) return 0xffff; if (tp->chip_id == COMET && phy_id == 30) { if (comet_miireg2offset[location]) return ioread32(ioaddr + comet_miireg2offset[location]); return 0xffff; } spin_lock_irqsave(&tp->mii_lock, flags); if (tp->chip_id == LC82C168) { iowrite32(0x60020000 + (phy_id<<23) + (location<<18), ioaddr + 0xA0); ioread32(ioaddr + 0xA0); ioread32(ioaddr + 0xA0); for (i = 1000; i >= 0; --i) { barrier(); if ( ! ((retval = ioread32(ioaddr + 0xA0)) & 0x80000000)) break; } spin_unlock_irqrestore(&tp->mii_lock, flags); return retval & 0xffff; } /* Establish sync by sending at least 32 logic ones. */ for (i = 32; i >= 0; i--) { iowrite32(MDIO_ENB | MDIO_DATA_WRITE1, mdio_addr); mdio_delay(); iowrite32(MDIO_ENB | MDIO_DATA_WRITE1 | MDIO_SHIFT_CLK, mdio_addr); mdio_delay(); } /* Shift the read command bits out. */ for (i = 15; i >= 0; i--) { int dataval = (read_cmd & (1 << i)) ? MDIO_DATA_WRITE1 : 0; iowrite32(MDIO_ENB | dataval, mdio_addr); mdio_delay(); iowrite32(MDIO_ENB | dataval | MDIO_SHIFT_CLK, mdio_addr); mdio_delay(); } /* Read the two transition, 16 data, and wire-idle bits. */ for (i = 19; i > 0; i--) { iowrite32(MDIO_ENB_IN, mdio_addr); mdio_delay(); retval = (retval << 1) | ((ioread32(mdio_addr) & MDIO_DATA_READ) ? 1 : 0); iowrite32(MDIO_ENB_IN | MDIO_SHIFT_CLK, mdio_addr); mdio_delay(); } spin_unlock_irqrestore(&tp->mii_lock, flags); return (retval>>1) & 0xffff; } void tulip_mdio_write(struct net_device *dev, int phy_id, int location, int val) { struct tulip_private *tp = netdev_priv(dev); int i; int cmd = (0x5002 << 16) | ((phy_id & 0x1f) << 23) | (location<<18) | (val & 0xffff); void __iomem *ioaddr = tp->base_addr; void __iomem *mdio_addr = ioaddr + CSR9; unsigned long flags; if (location & ~0x1f) return; if (tp->chip_id == COMET && phy_id == 30) { if (comet_miireg2offset[location]) iowrite32(val, ioaddr + comet_miireg2offset[location]); return; } spin_lock_irqsave(&tp->mii_lock, flags); if (tp->chip_id == LC82C168) { iowrite32(cmd, ioaddr + 0xA0); for (i = 1000; i >= 0; --i) { barrier(); if ( ! (ioread32(ioaddr + 0xA0) & 0x80000000)) break; } spin_unlock_irqrestore(&tp->mii_lock, flags); return; } /* Establish sync by sending 32 logic ones. */ for (i = 32; i >= 0; i--) { iowrite32(MDIO_ENB | MDIO_DATA_WRITE1, mdio_addr); mdio_delay(); iowrite32(MDIO_ENB | MDIO_DATA_WRITE1 | MDIO_SHIFT_CLK, mdio_addr); mdio_delay(); } /* Shift the command bits out. */ for (i = 31; i >= 0; i--) { int dataval = (cmd & (1 << i)) ? MDIO_DATA_WRITE1 : 0; iowrite32(MDIO_ENB | dataval, mdio_addr); mdio_delay(); iowrite32(MDIO_ENB | dataval | MDIO_SHIFT_CLK, mdio_addr); mdio_delay(); } /* Clear out extra bits. */ for (i = 2; i > 0; i--) { iowrite32(MDIO_ENB_IN, mdio_addr); mdio_delay(); iowrite32(MDIO_ENB_IN | MDIO_SHIFT_CLK, mdio_addr); mdio_delay(); } spin_unlock_irqrestore(&tp->mii_lock, flags); } /* Set up the transceiver control registers for the selected media type. */ void tulip_select_media(struct net_device *dev, int startup) { struct tulip_private *tp = netdev_priv(dev); void __iomem *ioaddr = tp->base_addr; struct mediatable *mtable = tp->mtable; u32 new_csr6; int i; if (mtable) { struct medialeaf *mleaf = &mtable->mleaf[tp->cur_index]; unsigned char *p = mleaf->leafdata; switch (mleaf->type) { case 0: /* 21140 non-MII xcvr. */ if (tulip_debug > 1) netdev_dbg(dev, "Using a 21140 non-MII transceiver with control setting %02x\n", p[1]); dev->if_port = p[0]; if (startup) iowrite32(mtable->csr12dir | 0x100, ioaddr + CSR12); iowrite32(p[1], ioaddr + CSR12); new_csr6 = 0x02000000 | ((p[2] & 0x71) << 18); break; case 2: case 4: { u16 setup[5]; u32 csr13val, csr14val, csr15dir, csr15val; for (i = 0; i < 5; i++) setup[i] = get_u16(&p[i*2 + 1]); dev->if_port = p[0] & MEDIA_MASK; if (tulip_media_cap[dev->if_port] & MediaAlwaysFD) tp->full_duplex = 1; if (startup && mtable->has_reset) { struct medialeaf *rleaf = &mtable->mleaf[mtable->has_reset]; unsigned char *rst = rleaf->leafdata; if (tulip_debug > 1) netdev_dbg(dev, "Resetting the transceiver\n"); for (i = 0; i < rst[0]; i++) iowrite32(get_u16(rst + 1 + (i<<1)) << 16, ioaddr + CSR15); } if (tulip_debug > 1) netdev_dbg(dev, "21143 non-MII %s transceiver control %04x/%04x\n", medianame[dev->if_port], setup[0], setup[1]); if (p[0] & 0x40) { /* SIA (CSR13-15) setup values are provided. */ csr13val = setup[0]; csr14val = setup[1]; csr15dir = (setup[3]<<16) | setup[2]; csr15val = (setup[4]<<16) | setup[2]; iowrite32(0, ioaddr + CSR13); iowrite32(csr14val, ioaddr + CSR14); iowrite32(csr15dir, ioaddr + CSR15); /* Direction */ iowrite32(csr15val, ioaddr + CSR15); /* Data */ iowrite32(csr13val, ioaddr + CSR13); } else { csr13val = 1; csr14val = 0; csr15dir = (setup[0]<<16) | 0x0008; csr15val = (setup[1]<<16) | 0x0008; if (dev->if_port <= 4) csr14val = t21142_csr14[dev->if_port]; if (startup) { iowrite32(0, ioaddr + CSR13); iowrite32(csr14val, ioaddr + CSR14); } iowrite32(csr15dir, ioaddr + CSR15); /* Direction */ iowrite32(csr15val, ioaddr + CSR15); /* Data */ if (startup) iowrite32(csr13val, ioaddr + CSR13); } if (tulip_debug > 1) netdev_dbg(dev, "Setting CSR15 to %08x/%08x\n", csr15dir, csr15val); if (mleaf->type == 4) new_csr6 = 0x82020000 | ((setup[2] & 0x71) << 18); else new_csr6 = 0x82420000; break; } case 1: case 3: { int phy_num = p[0]; int init_length = p[1]; u16 *misc_info, tmp_info; dev->if_port = 11; new_csr6 = 0x020E0000; if (mleaf->type == 3) { /* 21142 */ u16 *init_sequence = (u16*)(p+2); u16 *reset_sequence = &((u16*)(p+3))[init_length]; int reset_length = p[2 + init_length*2]; misc_info = reset_sequence + reset_length; if (startup) { int timeout = 10; /* max 1 ms */ for (i = 0; i < reset_length; i++) iowrite32(get_u16(&reset_sequence[i]) << 16, ioaddr + CSR15); /* flush posted writes */ ioread32(ioaddr + CSR15); /* Sect 3.10.3 in DP83840A.pdf (p39) */ udelay(500); /* Section 4.2 in DP83840A.pdf (p43) */ /* and IEEE 802.3 "22.2.4.1.1 Reset" */ while (timeout-- && (tulip_mdio_read (dev, phy_num, MII_BMCR) & BMCR_RESET)) udelay(100); } for (i = 0; i < init_length; i++) iowrite32(get_u16(&init_sequence[i]) << 16, ioaddr + CSR15); ioread32(ioaddr + CSR15); /* flush posted writes */ } else { u8 *init_sequence = p + 2; u8 *reset_sequence = p + 3 + init_length; int reset_length = p[2 + init_length]; misc_info = (u16*)(reset_sequence + reset_length); if (startup) { int timeout = 10; /* max 1 ms */ iowrite32(mtable->csr12dir | 0x100, ioaddr + CSR12); for (i = 0; i < reset_length; i++) iowrite32(reset_sequence[i], ioaddr + CSR12); /* flush posted writes */ ioread32(ioaddr + CSR12); /* Sect 3.10.3 in DP83840A.pdf (p39) */ udelay(500); /* Section 4.2 in DP83840A.pdf (p43) */ /* and IEEE 802.3 "22.2.4.1.1 Reset" */ while (timeout-- && (tulip_mdio_read (dev, phy_num, MII_BMCR) & BMCR_RESET)) udelay(100); } for (i = 0; i < init_length; i++) iowrite32(init_sequence[i], ioaddr + CSR12); ioread32(ioaddr + CSR12); /* flush posted writes */ } tmp_info = get_u16(&misc_info[1]); if (tmp_info) tp->advertising[phy_num] = tmp_info | 1; if (tmp_info && startup < 2) { if (tp->mii_advertise == 0) tp->mii_advertise = tp->advertising[phy_num]; if (tulip_debug > 1) netdev_dbg(dev, " Advertising %04x on MII %d\n", tp->mii_advertise, tp->phys[phy_num]); tulip_mdio_write(dev, tp->phys[phy_num], 4, tp->mii_advertise); } break; } case 5: case 6: { u16 setup[5]; new_csr6 = 0; /* FIXME */ for (i = 0; i < 5; i++) setup[i] = get_u16(&p[i*2 + 1]); if (startup && mtable->has_reset) { struct medialeaf *rleaf = &mtable->mleaf[mtable->has_reset]; unsigned char *rst = rleaf->leafdata; if (tulip_debug > 1) netdev_dbg(dev, "Resetting the transceiver\n"); for (i = 0; i < rst[0]; i++) iowrite32(get_u16(rst + 1 + (i<<1)) << 16, ioaddr + CSR15); } break; } default: netdev_dbg(dev, " Invalid media table selection %d\n", mleaf->type); new_csr6 = 0x020E0000; } if (tulip_debug > 1) netdev_dbg(dev, "Using media type %s, CSR12 is %02x\n", medianame[dev->if_port], ioread32(ioaddr + CSR12) & 0xff); } else if (tp->chip_id == LC82C168) { if (startup && ! tp->medialock) dev->if_port = tp->mii_cnt ? 11 : 0; if (tulip_debug > 1) netdev_dbg(dev, "PNIC PHY status is %3.3x, media %s\n", ioread32(ioaddr + 0xB8), medianame[dev->if_port]); if (tp->mii_cnt) { new_csr6 = 0x810C0000; iowrite32(0x0001, ioaddr + CSR15); iowrite32(0x0201B07A, ioaddr + 0xB8); } else if (startup) { /* Start with 10mbps to do autonegotiation. */ iowrite32(0x32, ioaddr + CSR12); new_csr6 = 0x00420000; iowrite32(0x0001B078, ioaddr + 0xB8); iowrite32(0x0201B078, ioaddr + 0xB8); } else if (dev->if_port == 3 || dev->if_port == 5) { iowrite32(0x33, ioaddr + CSR12); new_csr6 = 0x01860000; /* Trigger autonegotiation. */ iowrite32(startup ? 0x0201F868 : 0x0001F868, ioaddr + 0xB8); } else { iowrite32(0x32, ioaddr + CSR12); new_csr6 = 0x00420000; iowrite32(0x1F078, ioaddr + 0xB8); } } else { /* Unknown chip type with no media table. */ if (tp->default_port == 0) dev->if_port = tp->mii_cnt ? 11 : 3; if (tulip_media_cap[dev->if_port] & MediaIsMII) { new_csr6 = 0x020E0000; } else if (tulip_media_cap[dev->if_port] & MediaIsFx) { new_csr6 = 0x02860000; } else new_csr6 = 0x03860000; if (tulip_debug > 1) netdev_dbg(dev, "No media description table, assuming %s transceiver, CSR12 %02x\n", medianame[dev->if_port], ioread32(ioaddr + CSR12)); } tp->csr6 = new_csr6 | (tp->csr6 & 0xfdff) | (tp->full_duplex ? 0x0200 : 0); mdelay(1); } /* Check the MII negotiated duplex and change the CSR6 setting if required. Return 0 if everything is OK. Return < 0 if the transceiver is missing or has no link beat. */ int tulip_check_duplex(struct net_device *dev) { struct tulip_private *tp = netdev_priv(dev); unsigned int bmsr, lpa, negotiated, new_csr6; bmsr = tulip_mdio_read(dev, tp->phys[0], MII_BMSR); lpa = tulip_mdio_read(dev, tp->phys[0], MII_LPA); if (tulip_debug > 1) dev_info(&dev->dev, "MII status %04x, Link partner report %04x\n", bmsr, lpa); if (bmsr == 0xffff) return -2; if ((bmsr & BMSR_LSTATUS) == 0) { int new_bmsr = tulip_mdio_read(dev, tp->phys[0], MII_BMSR); if ((new_bmsr & BMSR_LSTATUS) == 0) { if (tulip_debug > 1) dev_info(&dev->dev, "No link beat on the MII interface, status %04x\n", new_bmsr); return -1; } } negotiated = lpa & tp->advertising[0]; tp->full_duplex = mii_duplex(tp->full_duplex_lock, negotiated); new_csr6 = tp->csr6; if (negotiated & LPA_100) new_csr6 &= ~TxThreshold; else new_csr6 |= TxThreshold; if (tp->full_duplex) new_csr6 |= FullDuplex; else new_csr6 &= ~FullDuplex; if (new_csr6 != tp->csr6) { tp->csr6 = new_csr6; tulip_restart_rxtx(tp); if (tulip_debug > 0) dev_info(&dev->dev, "Setting %s-duplex based on MII#%d link partner capability of %04x\n", tp->full_duplex ? "full" : "half", tp->phys[0], lpa); return 1; } return 0; } void tulip_find_mii(struct net_device *dev, int board_idx) { struct tulip_private *tp = netdev_priv(dev); int phyn, phy_idx = 0; int mii_reg0; int mii_advert; unsigned int to_advert, new_bmcr, ane_switch; /* Find the connected MII xcvrs. Doing this in open() would allow detecting external xcvrs later, but takes much time. */ for (phyn = 1; phyn <= 32 && phy_idx < sizeof (tp->phys); phyn++) { int phy = phyn & 0x1f; int mii_status = tulip_mdio_read (dev, phy, MII_BMSR); if ((mii_status & 0x8301) == 0x8001 || ((mii_status & BMSR_100BASE4) == 0 && (mii_status & 0x7800) != 0)) { /* preserve Becker logic, gain indentation level */ } else { continue; } mii_reg0 = tulip_mdio_read (dev, phy, MII_BMCR); mii_advert = tulip_mdio_read (dev, phy, MII_ADVERTISE); ane_switch = 0; /* if not advertising at all, gen an * advertising value from the capability * bits in BMSR */ if ((mii_advert & ADVERTISE_ALL) == 0) { unsigned int tmpadv = tulip_mdio_read (dev, phy, MII_BMSR); mii_advert = ((tmpadv >> 6) & 0x3e0) | 1; } if (tp->mii_advertise) { tp->advertising[phy_idx] = to_advert = tp->mii_advertise; } else if (tp->advertising[phy_idx]) { to_advert = tp->advertising[phy_idx]; } else { tp->advertising[phy_idx] = tp->mii_advertise = to_advert = mii_advert; } tp->phys[phy_idx++] = phy; pr_info("tulip%d: MII transceiver #%d config %04x status %04x advertising %04x\n", board_idx, phy, mii_reg0, mii_status, mii_advert); /* Fixup for DLink with miswired PHY. */ if (mii_advert != to_advert) { pr_debug("tulip%d: Advertising %04x on PHY %d, previously advertising %04x\n", board_idx, to_advert, phy, mii_advert); tulip_mdio_write (dev, phy, 4, to_advert); } /* Enable autonegotiation: some boards default to off. */ if (tp->default_port == 0) { new_bmcr = mii_reg0 | BMCR_ANENABLE; if (new_bmcr != mii_reg0) { new_bmcr |= BMCR_ANRESTART; ane_switch = 1; } } /* ...or disable nway, if forcing media */ else { new_bmcr = mii_reg0 & ~BMCR_ANENABLE; if (new_bmcr != mii_reg0) ane_switch = 1; } /* clear out bits we never want at this point */ new_bmcr &= ~(BMCR_CTST | BMCR_FULLDPLX | BMCR_ISOLATE | BMCR_PDOWN | BMCR_SPEED100 | BMCR_LOOPBACK | BMCR_RESET); if (tp->full_duplex) new_bmcr |= BMCR_FULLDPLX; if (tulip_media_cap[tp->default_port] & MediaIs100) new_bmcr |= BMCR_SPEED100; if (new_bmcr != mii_reg0) { /* some phys need the ANE switch to * happen before forced media settings * will "take." However, we write the * same value twice in order not to * confuse the sane phys. */ if (ane_switch) { tulip_mdio_write (dev, phy, MII_BMCR, new_bmcr); udelay (10); } tulip_mdio_write (dev, phy, MII_BMCR, new_bmcr); } } tp->mii_cnt = phy_idx; if (tp->mtable && tp->mtable->has_mii && phy_idx == 0) { pr_info("tulip%d: ***WARNING***: No MII transceiver found!\n", board_idx); tp->phys[0] = 1; } }
gpl-2.0
SM-G920P/SM-G920P-Kernel
arch/arm/mach-imx/devices/platform-mx2-emma.c
2737
1090
/* * Copyright (C) 2010 Pengutronix * Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de> * * This program is free software; you can redistribute it and/or modify it under * the terms of the GNU General Public License version 2 as published by the * Free Software Foundation. */ #include "../hardware.h" #include "devices-common.h" #define imx_mx2_emmaprp_data_entry_single(soc) \ { \ .iobase = soc ## _EMMAPRP_BASE_ADDR, \ .iosize = SZ_32, \ .irq = soc ## _INT_EMMAPRP, \ } #ifdef CONFIG_SOC_IMX27 const struct imx_mx2_emma_data imx27_mx2_emmaprp_data __initconst = imx_mx2_emmaprp_data_entry_single(MX27); #endif /* ifdef CONFIG_SOC_IMX27 */ struct platform_device *__init imx_add_mx2_emmaprp( const struct imx_mx2_emma_data *data) { struct resource res[] = { { .start = data->iobase, .end = data->iobase + data->iosize - 1, .flags = IORESOURCE_MEM, }, { .start = data->irq, .end = data->irq, .flags = IORESOURCE_IRQ, }, }; return imx_add_platform_device_dmamask("m2m-emmaprp", 0, res, 2, NULL, 0, DMA_BIT_MASK(32)); }
gpl-2.0
TheTypoMaster/e980-zeKrnl
drivers/edac/i7core_edac.c
2737
65636
/* Intel i7 core/Nehalem Memory Controller kernel module * * This driver supports the memory controllers found on the Intel * processor families i7core, i7core 7xx/8xx, i5core, Xeon 35xx, * Xeon 55xx and Xeon 56xx also known as Nehalem, Nehalem-EP, Lynnfield * and Westmere-EP. * * This file may be distributed under the terms of the * GNU General Public License version 2 only. * * Copyright (c) 2009-2010 by: * Mauro Carvalho Chehab <mchehab@redhat.com> * * Red Hat Inc. http://www.redhat.com * * Forked and adapted from the i5400_edac driver * * Based on the following public Intel datasheets: * Intel Core i7 Processor Extreme Edition and Intel Core i7 Processor * Datasheet, Volume 2: * http://download.intel.com/design/processor/datashts/320835.pdf * Intel Xeon Processor 5500 Series Datasheet Volume 2 * http://www.intel.com/Assets/PDF/datasheet/321322.pdf * also available at: * http://www.arrownac.com/manufacturers/intel/s/nehalem/5500-datasheet-v2.pdf */ #include <linux/module.h> #include <linux/init.h> #include <linux/pci.h> #include <linux/pci_ids.h> #include <linux/slab.h> #include <linux/delay.h> #include <linux/dmi.h> #include <linux/edac.h> #include <linux/mmzone.h> #include <linux/smp.h> #include <asm/mce.h> #include <asm/processor.h> #include <asm/div64.h> #include "edac_core.h" /* Static vars */ static LIST_HEAD(i7core_edac_list); static DEFINE_MUTEX(i7core_edac_lock); static int probed; static int use_pci_fixup; module_param(use_pci_fixup, int, 0444); MODULE_PARM_DESC(use_pci_fixup, "Enable PCI fixup to seek for hidden devices"); /* * This is used for Nehalem-EP and Nehalem-EX devices, where the non-core * registers start at bus 255, and are not reported by BIOS. * We currently find devices with only 2 sockets. In order to support more QPI * Quick Path Interconnect, just increment this number. */ #define MAX_SOCKET_BUSES 2 /* * Alter this version for the module when modifications are made */ #define I7CORE_REVISION " Ver: 1.0.0" #define EDAC_MOD_STR "i7core_edac" /* * Debug macros */ #define i7core_printk(level, fmt, arg...) \ edac_printk(level, "i7core", fmt, ##arg) #define i7core_mc_printk(mci, level, fmt, arg...) \ edac_mc_chipset_printk(mci, level, "i7core", fmt, ##arg) /* * i7core Memory Controller Registers */ /* OFFSETS for Device 0 Function 0 */ #define MC_CFG_CONTROL 0x90 #define MC_CFG_UNLOCK 0x02 #define MC_CFG_LOCK 0x00 /* OFFSETS for Device 3 Function 0 */ #define MC_CONTROL 0x48 #define MC_STATUS 0x4c #define MC_MAX_DOD 0x64 /* * OFFSETS for Device 3 Function 4, as inicated on Xeon 5500 datasheet: * http://www.arrownac.com/manufacturers/intel/s/nehalem/5500-datasheet-v2.pdf */ #define MC_TEST_ERR_RCV1 0x60 #define DIMM2_COR_ERR(r) ((r) & 0x7fff) #define MC_TEST_ERR_RCV0 0x64 #define DIMM1_COR_ERR(r) (((r) >> 16) & 0x7fff) #define DIMM0_COR_ERR(r) ((r) & 0x7fff) /* OFFSETS for Device 3 Function 2, as inicated on Xeon 5500 datasheet */ #define MC_SSRCONTROL 0x48 #define SSR_MODE_DISABLE 0x00 #define SSR_MODE_ENABLE 0x01 #define SSR_MODE_MASK 0x03 #define MC_SCRUB_CONTROL 0x4c #define STARTSCRUB (1 << 24) #define SCRUBINTERVAL_MASK 0xffffff #define MC_COR_ECC_CNT_0 0x80 #define MC_COR_ECC_CNT_1 0x84 #define MC_COR_ECC_CNT_2 0x88 #define MC_COR_ECC_CNT_3 0x8c #define MC_COR_ECC_CNT_4 0x90 #define MC_COR_ECC_CNT_5 0x94 #define DIMM_TOP_COR_ERR(r) (((r) >> 16) & 0x7fff) #define DIMM_BOT_COR_ERR(r) ((r) & 0x7fff) /* OFFSETS for Devices 4,5 and 6 Function 0 */ #define MC_CHANNEL_DIMM_INIT_PARAMS 0x58 #define THREE_DIMMS_PRESENT (1 << 24) #define SINGLE_QUAD_RANK_PRESENT (1 << 23) #define QUAD_RANK_PRESENT (1 << 22) #define REGISTERED_DIMM (1 << 15) #define MC_CHANNEL_MAPPER 0x60 #define RDLCH(r, ch) ((((r) >> (3 + (ch * 6))) & 0x07) - 1) #define WRLCH(r, ch) ((((r) >> (ch * 6)) & 0x07) - 1) #define MC_CHANNEL_RANK_PRESENT 0x7c #define RANK_PRESENT_MASK 0xffff #define MC_CHANNEL_ADDR_MATCH 0xf0 #define MC_CHANNEL_ERROR_MASK 0xf8 #define MC_CHANNEL_ERROR_INJECT 0xfc #define INJECT_ADDR_PARITY 0x10 #define INJECT_ECC 0x08 #define MASK_CACHELINE 0x06 #define MASK_FULL_CACHELINE 0x06 #define MASK_MSB32_CACHELINE 0x04 #define MASK_LSB32_CACHELINE 0x02 #define NO_MASK_CACHELINE 0x00 #define REPEAT_EN 0x01 /* OFFSETS for Devices 4,5 and 6 Function 1 */ #define MC_DOD_CH_DIMM0 0x48 #define MC_DOD_CH_DIMM1 0x4c #define MC_DOD_CH_DIMM2 0x50 #define RANKOFFSET_MASK ((1 << 12) | (1 << 11) | (1 << 10)) #define RANKOFFSET(x) ((x & RANKOFFSET_MASK) >> 10) #define DIMM_PRESENT_MASK (1 << 9) #define DIMM_PRESENT(x) (((x) & DIMM_PRESENT_MASK) >> 9) #define MC_DOD_NUMBANK_MASK ((1 << 8) | (1 << 7)) #define MC_DOD_NUMBANK(x) (((x) & MC_DOD_NUMBANK_MASK) >> 7) #define MC_DOD_NUMRANK_MASK ((1 << 6) | (1 << 5)) #define MC_DOD_NUMRANK(x) (((x) & MC_DOD_NUMRANK_MASK) >> 5) #define MC_DOD_NUMROW_MASK ((1 << 4) | (1 << 3) | (1 << 2)) #define MC_DOD_NUMROW(x) (((x) & MC_DOD_NUMROW_MASK) >> 2) #define MC_DOD_NUMCOL_MASK 3 #define MC_DOD_NUMCOL(x) ((x) & MC_DOD_NUMCOL_MASK) #define MC_RANK_PRESENT 0x7c #define MC_SAG_CH_0 0x80 #define MC_SAG_CH_1 0x84 #define MC_SAG_CH_2 0x88 #define MC_SAG_CH_3 0x8c #define MC_SAG_CH_4 0x90 #define MC_SAG_CH_5 0x94 #define MC_SAG_CH_6 0x98 #define MC_SAG_CH_7 0x9c #define MC_RIR_LIMIT_CH_0 0x40 #define MC_RIR_LIMIT_CH_1 0x44 #define MC_RIR_LIMIT_CH_2 0x48 #define MC_RIR_LIMIT_CH_3 0x4C #define MC_RIR_LIMIT_CH_4 0x50 #define MC_RIR_LIMIT_CH_5 0x54 #define MC_RIR_LIMIT_CH_6 0x58 #define MC_RIR_LIMIT_CH_7 0x5C #define MC_RIR_LIMIT_MASK ((1 << 10) - 1) #define MC_RIR_WAY_CH 0x80 #define MC_RIR_WAY_OFFSET_MASK (((1 << 14) - 1) & ~0x7) #define MC_RIR_WAY_RANK_MASK 0x7 /* * i7core structs */ #define NUM_CHANS 3 #define MAX_DIMMS 3 /* Max DIMMS per channel */ #define MAX_MCR_FUNC 4 #define MAX_CHAN_FUNC 3 struct i7core_info { u32 mc_control; u32 mc_status; u32 max_dod; u32 ch_map; }; struct i7core_inject { int enable; u32 section; u32 type; u32 eccmask; /* Error address mask */ int channel, dimm, rank, bank, page, col; }; struct i7core_channel { u32 ranks; u32 dimms; }; struct pci_id_descr { int dev; int func; int dev_id; int optional; }; struct pci_id_table { const struct pci_id_descr *descr; int n_devs; }; struct i7core_dev { struct list_head list; u8 socket; struct pci_dev **pdev; int n_devs; struct mem_ctl_info *mci; }; struct i7core_pvt { struct pci_dev *pci_noncore; struct pci_dev *pci_mcr[MAX_MCR_FUNC + 1]; struct pci_dev *pci_ch[NUM_CHANS][MAX_CHAN_FUNC + 1]; struct i7core_dev *i7core_dev; struct i7core_info info; struct i7core_inject inject; struct i7core_channel channel[NUM_CHANS]; int ce_count_available; int csrow_map[NUM_CHANS][MAX_DIMMS]; /* ECC corrected errors counts per udimm */ unsigned long udimm_ce_count[MAX_DIMMS]; int udimm_last_ce_count[MAX_DIMMS]; /* ECC corrected errors counts per rdimm */ unsigned long rdimm_ce_count[NUM_CHANS][MAX_DIMMS]; int rdimm_last_ce_count[NUM_CHANS][MAX_DIMMS]; bool is_registered, enable_scrub; /* Fifo double buffers */ struct mce mce_entry[MCE_LOG_LEN]; struct mce mce_outentry[MCE_LOG_LEN]; /* Fifo in/out counters */ unsigned mce_in, mce_out; /* Count indicator to show errors not got */ unsigned mce_overrun; /* DCLK Frequency used for computing scrub rate */ int dclk_freq; /* Struct to control EDAC polling */ struct edac_pci_ctl_info *i7core_pci; }; #define PCI_DESCR(device, function, device_id) \ .dev = (device), \ .func = (function), \ .dev_id = (device_id) static const struct pci_id_descr pci_dev_descr_i7core_nehalem[] = { /* Memory controller */ { PCI_DESCR(3, 0, PCI_DEVICE_ID_INTEL_I7_MCR) }, { PCI_DESCR(3, 1, PCI_DEVICE_ID_INTEL_I7_MC_TAD) }, /* Exists only for RDIMM */ { PCI_DESCR(3, 2, PCI_DEVICE_ID_INTEL_I7_MC_RAS), .optional = 1 }, { PCI_DESCR(3, 4, PCI_DEVICE_ID_INTEL_I7_MC_TEST) }, /* Channel 0 */ { PCI_DESCR(4, 0, PCI_DEVICE_ID_INTEL_I7_MC_CH0_CTRL) }, { PCI_DESCR(4, 1, PCI_DEVICE_ID_INTEL_I7_MC_CH0_ADDR) }, { PCI_DESCR(4, 2, PCI_DEVICE_ID_INTEL_I7_MC_CH0_RANK) }, { PCI_DESCR(4, 3, PCI_DEVICE_ID_INTEL_I7_MC_CH0_TC) }, /* Channel 1 */ { PCI_DESCR(5, 0, PCI_DEVICE_ID_INTEL_I7_MC_CH1_CTRL) }, { PCI_DESCR(5, 1, PCI_DEVICE_ID_INTEL_I7_MC_CH1_ADDR) }, { PCI_DESCR(5, 2, PCI_DEVICE_ID_INTEL_I7_MC_CH1_RANK) }, { PCI_DESCR(5, 3, PCI_DEVICE_ID_INTEL_I7_MC_CH1_TC) }, /* Channel 2 */ { PCI_DESCR(6, 0, PCI_DEVICE_ID_INTEL_I7_MC_CH2_CTRL) }, { PCI_DESCR(6, 1, PCI_DEVICE_ID_INTEL_I7_MC_CH2_ADDR) }, { PCI_DESCR(6, 2, PCI_DEVICE_ID_INTEL_I7_MC_CH2_RANK) }, { PCI_DESCR(6, 3, PCI_DEVICE_ID_INTEL_I7_MC_CH2_TC) }, /* Generic Non-core registers */ /* * This is the PCI device on i7core and on Xeon 35xx (8086:2c41) * On Xeon 55xx, however, it has a different id (8086:2c40). So, * the probing code needs to test for the other address in case of * failure of this one */ { PCI_DESCR(0, 0, PCI_DEVICE_ID_INTEL_I7_NONCORE) }, }; static const struct pci_id_descr pci_dev_descr_lynnfield[] = { { PCI_DESCR( 3, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MCR) }, { PCI_DESCR( 3, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TAD) }, { PCI_DESCR( 3, 4, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TEST) }, { PCI_DESCR( 4, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_CTRL) }, { PCI_DESCR( 4, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_ADDR) }, { PCI_DESCR( 4, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_RANK) }, { PCI_DESCR( 4, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_TC) }, { PCI_DESCR( 5, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_CTRL) }, { PCI_DESCR( 5, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_ADDR) }, { PCI_DESCR( 5, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_RANK) }, { PCI_DESCR( 5, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_TC) }, /* * This is the PCI device has an alternate address on some * processors like Core i7 860 */ { PCI_DESCR( 0, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE) }, }; static const struct pci_id_descr pci_dev_descr_i7core_westmere[] = { /* Memory controller */ { PCI_DESCR(3, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MCR_REV2) }, { PCI_DESCR(3, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TAD_REV2) }, /* Exists only for RDIMM */ { PCI_DESCR(3, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_RAS_REV2), .optional = 1 }, { PCI_DESCR(3, 4, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TEST_REV2) }, /* Channel 0 */ { PCI_DESCR(4, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_CTRL_REV2) }, { PCI_DESCR(4, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_ADDR_REV2) }, { PCI_DESCR(4, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_RANK_REV2) }, { PCI_DESCR(4, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_TC_REV2) }, /* Channel 1 */ { PCI_DESCR(5, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_CTRL_REV2) }, { PCI_DESCR(5, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_ADDR_REV2) }, { PCI_DESCR(5, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_RANK_REV2) }, { PCI_DESCR(5, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_TC_REV2) }, /* Channel 2 */ { PCI_DESCR(6, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_CTRL_REV2) }, { PCI_DESCR(6, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_ADDR_REV2) }, { PCI_DESCR(6, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_RANK_REV2) }, { PCI_DESCR(6, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_TC_REV2) }, /* Generic Non-core registers */ { PCI_DESCR(0, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE_REV2) }, }; #define PCI_ID_TABLE_ENTRY(A) { .descr=A, .n_devs = ARRAY_SIZE(A) } static const struct pci_id_table pci_dev_table[] = { PCI_ID_TABLE_ENTRY(pci_dev_descr_i7core_nehalem), PCI_ID_TABLE_ENTRY(pci_dev_descr_lynnfield), PCI_ID_TABLE_ENTRY(pci_dev_descr_i7core_westmere), {0,} /* 0 terminated list. */ }; /* * pci_device_id table for which devices we are looking for */ static DEFINE_PCI_DEVICE_TABLE(i7core_pci_tbl) = { {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_X58_HUB_MGMT)}, {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LYNNFIELD_QPI_LINK0)}, {0,} /* 0 terminated list. */ }; /**************************************************************************** Anciliary status routines ****************************************************************************/ /* MC_CONTROL bits */ #define CH_ACTIVE(pvt, ch) ((pvt)->info.mc_control & (1 << (8 + ch))) #define ECCx8(pvt) ((pvt)->info.mc_control & (1 << 1)) /* MC_STATUS bits */ #define ECC_ENABLED(pvt) ((pvt)->info.mc_status & (1 << 4)) #define CH_DISABLED(pvt, ch) ((pvt)->info.mc_status & (1 << ch)) /* MC_MAX_DOD read functions */ static inline int numdimms(u32 dimms) { return (dimms & 0x3) + 1; } static inline int numrank(u32 rank) { static int ranks[4] = { 1, 2, 4, -EINVAL }; return ranks[rank & 0x3]; } static inline int numbank(u32 bank) { static int banks[4] = { 4, 8, 16, -EINVAL }; return banks[bank & 0x3]; } static inline int numrow(u32 row) { static int rows[8] = { 1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16, -EINVAL, -EINVAL, -EINVAL, }; return rows[row & 0x7]; } static inline int numcol(u32 col) { static int cols[8] = { 1 << 10, 1 << 11, 1 << 12, -EINVAL, }; return cols[col & 0x3]; } static struct i7core_dev *get_i7core_dev(u8 socket) { struct i7core_dev *i7core_dev; list_for_each_entry(i7core_dev, &i7core_edac_list, list) { if (i7core_dev->socket == socket) return i7core_dev; } return NULL; } static struct i7core_dev *alloc_i7core_dev(u8 socket, const struct pci_id_table *table) { struct i7core_dev *i7core_dev; i7core_dev = kzalloc(sizeof(*i7core_dev), GFP_KERNEL); if (!i7core_dev) return NULL; i7core_dev->pdev = kzalloc(sizeof(*i7core_dev->pdev) * table->n_devs, GFP_KERNEL); if (!i7core_dev->pdev) { kfree(i7core_dev); return NULL; } i7core_dev->socket = socket; i7core_dev->n_devs = table->n_devs; list_add_tail(&i7core_dev->list, &i7core_edac_list); return i7core_dev; } static void free_i7core_dev(struct i7core_dev *i7core_dev) { list_del(&i7core_dev->list); kfree(i7core_dev->pdev); kfree(i7core_dev); } /**************************************************************************** Memory check routines ****************************************************************************/ static struct pci_dev *get_pdev_slot_func(u8 socket, unsigned slot, unsigned func) { struct i7core_dev *i7core_dev = get_i7core_dev(socket); int i; if (!i7core_dev) return NULL; for (i = 0; i < i7core_dev->n_devs; i++) { if (!i7core_dev->pdev[i]) continue; if (PCI_SLOT(i7core_dev->pdev[i]->devfn) == slot && PCI_FUNC(i7core_dev->pdev[i]->devfn) == func) { return i7core_dev->pdev[i]; } } return NULL; } /** * i7core_get_active_channels() - gets the number of channels and csrows * @socket: Quick Path Interconnect socket * @channels: Number of channels that will be returned * @csrows: Number of csrows found * * Since EDAC core needs to know in advance the number of available channels * and csrows, in order to allocate memory for csrows/channels, it is needed * to run two similar steps. At the first step, implemented on this function, * it checks the number of csrows/channels present at one socket. * this is used in order to properly allocate the size of mci components. * * It should be noticed that none of the current available datasheets explain * or even mention how csrows are seen by the memory controller. So, we need * to add a fake description for csrows. * So, this driver is attributing one DIMM memory for one csrow. */ static int i7core_get_active_channels(const u8 socket, unsigned *channels, unsigned *csrows) { struct pci_dev *pdev = NULL; int i, j; u32 status, control; *channels = 0; *csrows = 0; pdev = get_pdev_slot_func(socket, 3, 0); if (!pdev) { i7core_printk(KERN_ERR, "Couldn't find socket %d fn 3.0!!!\n", socket); return -ENODEV; } /* Device 3 function 0 reads */ pci_read_config_dword(pdev, MC_STATUS, &status); pci_read_config_dword(pdev, MC_CONTROL, &control); for (i = 0; i < NUM_CHANS; i++) { u32 dimm_dod[3]; /* Check if the channel is active */ if (!(control & (1 << (8 + i)))) continue; /* Check if the channel is disabled */ if (status & (1 << i)) continue; pdev = get_pdev_slot_func(socket, i + 4, 1); if (!pdev) { i7core_printk(KERN_ERR, "Couldn't find socket %d " "fn %d.%d!!!\n", socket, i + 4, 1); return -ENODEV; } /* Devices 4-6 function 1 */ pci_read_config_dword(pdev, MC_DOD_CH_DIMM0, &dimm_dod[0]); pci_read_config_dword(pdev, MC_DOD_CH_DIMM1, &dimm_dod[1]); pci_read_config_dword(pdev, MC_DOD_CH_DIMM2, &dimm_dod[2]); (*channels)++; for (j = 0; j < 3; j++) { if (!DIMM_PRESENT(dimm_dod[j])) continue; (*csrows)++; } } debugf0("Number of active channels on socket %d: %d\n", socket, *channels); return 0; } static int get_dimm_config(const struct mem_ctl_info *mci) { struct i7core_pvt *pvt = mci->pvt_info; struct csrow_info *csr; struct pci_dev *pdev; int i, j; int csrow = 0; unsigned long last_page = 0; enum edac_type mode; enum mem_type mtype; /* Get data from the MC register, function 0 */ pdev = pvt->pci_mcr[0]; if (!pdev) return -ENODEV; /* Device 3 function 0 reads */ pci_read_config_dword(pdev, MC_CONTROL, &pvt->info.mc_control); pci_read_config_dword(pdev, MC_STATUS, &pvt->info.mc_status); pci_read_config_dword(pdev, MC_MAX_DOD, &pvt->info.max_dod); pci_read_config_dword(pdev, MC_CHANNEL_MAPPER, &pvt->info.ch_map); debugf0("QPI %d control=0x%08x status=0x%08x dod=0x%08x map=0x%08x\n", pvt->i7core_dev->socket, pvt->info.mc_control, pvt->info.mc_status, pvt->info.max_dod, pvt->info.ch_map); if (ECC_ENABLED(pvt)) { debugf0("ECC enabled with x%d SDCC\n", ECCx8(pvt) ? 8 : 4); if (ECCx8(pvt)) mode = EDAC_S8ECD8ED; else mode = EDAC_S4ECD4ED; } else { debugf0("ECC disabled\n"); mode = EDAC_NONE; } /* FIXME: need to handle the error codes */ debugf0("DOD Max limits: DIMMS: %d, %d-ranked, %d-banked " "x%x x 0x%x\n", numdimms(pvt->info.max_dod), numrank(pvt->info.max_dod >> 2), numbank(pvt->info.max_dod >> 4), numrow(pvt->info.max_dod >> 6), numcol(pvt->info.max_dod >> 9)); for (i = 0; i < NUM_CHANS; i++) { u32 data, dimm_dod[3], value[8]; if (!pvt->pci_ch[i][0]) continue; if (!CH_ACTIVE(pvt, i)) { debugf0("Channel %i is not active\n", i); continue; } if (CH_DISABLED(pvt, i)) { debugf0("Channel %i is disabled\n", i); continue; } /* Devices 4-6 function 0 */ pci_read_config_dword(pvt->pci_ch[i][0], MC_CHANNEL_DIMM_INIT_PARAMS, &data); pvt->channel[i].ranks = (data & QUAD_RANK_PRESENT) ? 4 : 2; if (data & REGISTERED_DIMM) mtype = MEM_RDDR3; else mtype = MEM_DDR3; #if 0 if (data & THREE_DIMMS_PRESENT) pvt->channel[i].dimms = 3; else if (data & SINGLE_QUAD_RANK_PRESENT) pvt->channel[i].dimms = 1; else pvt->channel[i].dimms = 2; #endif /* Devices 4-6 function 1 */ pci_read_config_dword(pvt->pci_ch[i][1], MC_DOD_CH_DIMM0, &dimm_dod[0]); pci_read_config_dword(pvt->pci_ch[i][1], MC_DOD_CH_DIMM1, &dimm_dod[1]); pci_read_config_dword(pvt->pci_ch[i][1], MC_DOD_CH_DIMM2, &dimm_dod[2]); debugf0("Ch%d phy rd%d, wr%d (0x%08x): " "%d ranks, %cDIMMs\n", i, RDLCH(pvt->info.ch_map, i), WRLCH(pvt->info.ch_map, i), data, pvt->channel[i].ranks, (data & REGISTERED_DIMM) ? 'R' : 'U'); for (j = 0; j < 3; j++) { u32 banks, ranks, rows, cols; u32 size, npages; if (!DIMM_PRESENT(dimm_dod[j])) continue; banks = numbank(MC_DOD_NUMBANK(dimm_dod[j])); ranks = numrank(MC_DOD_NUMRANK(dimm_dod[j])); rows = numrow(MC_DOD_NUMROW(dimm_dod[j])); cols = numcol(MC_DOD_NUMCOL(dimm_dod[j])); /* DDR3 has 8 I/O banks */ size = (rows * cols * banks * ranks) >> (20 - 3); pvt->channel[i].dimms++; debugf0("\tdimm %d %d Mb offset: %x, " "bank: %d, rank: %d, row: %#x, col: %#x\n", j, size, RANKOFFSET(dimm_dod[j]), banks, ranks, rows, cols); npages = MiB_TO_PAGES(size); csr = &mci->csrows[csrow]; csr->first_page = last_page + 1; last_page += npages; csr->last_page = last_page; csr->nr_pages = npages; csr->page_mask = 0; csr->grain = 8; csr->csrow_idx = csrow; csr->nr_channels = 1; csr->channels[0].chan_idx = i; csr->channels[0].ce_count = 0; pvt->csrow_map[i][j] = csrow; switch (banks) { case 4: csr->dtype = DEV_X4; break; case 8: csr->dtype = DEV_X8; break; case 16: csr->dtype = DEV_X16; break; default: csr->dtype = DEV_UNKNOWN; } csr->edac_mode = mode; csr->mtype = mtype; snprintf(csr->channels[0].label, sizeof(csr->channels[0].label), "CPU#%uChannel#%u_DIMM#%u", pvt->i7core_dev->socket, i, j); csrow++; } pci_read_config_dword(pdev, MC_SAG_CH_0, &value[0]); pci_read_config_dword(pdev, MC_SAG_CH_1, &value[1]); pci_read_config_dword(pdev, MC_SAG_CH_2, &value[2]); pci_read_config_dword(pdev, MC_SAG_CH_3, &value[3]); pci_read_config_dword(pdev, MC_SAG_CH_4, &value[4]); pci_read_config_dword(pdev, MC_SAG_CH_5, &value[5]); pci_read_config_dword(pdev, MC_SAG_CH_6, &value[6]); pci_read_config_dword(pdev, MC_SAG_CH_7, &value[7]); debugf1("\t[%i] DIVBY3\tREMOVED\tOFFSET\n", i); for (j = 0; j < 8; j++) debugf1("\t\t%#x\t%#x\t%#x\n", (value[j] >> 27) & 0x1, (value[j] >> 24) & 0x7, (value[j] & ((1 << 24) - 1))); } return 0; } /**************************************************************************** Error insertion routines ****************************************************************************/ /* The i7core has independent error injection features per channel. However, to have a simpler code, we don't allow enabling error injection on more than one channel. Also, since a change at an inject parameter will be applied only at enable, we're disabling error injection on all write calls to the sysfs nodes that controls the error code injection. */ static int disable_inject(const struct mem_ctl_info *mci) { struct i7core_pvt *pvt = mci->pvt_info; pvt->inject.enable = 0; if (!pvt->pci_ch[pvt->inject.channel][0]) return -ENODEV; pci_write_config_dword(pvt->pci_ch[pvt->inject.channel][0], MC_CHANNEL_ERROR_INJECT, 0); return 0; } /* * i7core inject inject.section * * accept and store error injection inject.section value * bit 0 - refers to the lower 32-byte half cacheline * bit 1 - refers to the upper 32-byte half cacheline */ static ssize_t i7core_inject_section_store(struct mem_ctl_info *mci, const char *data, size_t count) { struct i7core_pvt *pvt = mci->pvt_info; unsigned long value; int rc; if (pvt->inject.enable) disable_inject(mci); rc = strict_strtoul(data, 10, &value); if ((rc < 0) || (value > 3)) return -EIO; pvt->inject.section = (u32) value; return count; } static ssize_t i7core_inject_section_show(struct mem_ctl_info *mci, char *data) { struct i7core_pvt *pvt = mci->pvt_info; return sprintf(data, "0x%08x\n", pvt->inject.section); } /* * i7core inject.type * * accept and store error injection inject.section value * bit 0 - repeat enable - Enable error repetition * bit 1 - inject ECC error * bit 2 - inject parity error */ static ssize_t i7core_inject_type_store(struct mem_ctl_info *mci, const char *data, size_t count) { struct i7core_pvt *pvt = mci->pvt_info; unsigned long value; int rc; if (pvt->inject.enable) disable_inject(mci); rc = strict_strtoul(data, 10, &value); if ((rc < 0) || (value > 7)) return -EIO; pvt->inject.type = (u32) value; return count; } static ssize_t i7core_inject_type_show(struct mem_ctl_info *mci, char *data) { struct i7core_pvt *pvt = mci->pvt_info; return sprintf(data, "0x%08x\n", pvt->inject.type); } /* * i7core_inject_inject.eccmask_store * * The type of error (UE/CE) will depend on the inject.eccmask value: * Any bits set to a 1 will flip the corresponding ECC bit * Correctable errors can be injected by flipping 1 bit or the bits within * a symbol pair (2 consecutive aligned 8-bit pairs - i.e. 7:0 and 15:8 or * 23:16 and 31:24). Flipping bits in two symbol pairs will cause an * uncorrectable error to be injected. */ static ssize_t i7core_inject_eccmask_store(struct mem_ctl_info *mci, const char *data, size_t count) { struct i7core_pvt *pvt = mci->pvt_info; unsigned long value; int rc; if (pvt->inject.enable) disable_inject(mci); rc = strict_strtoul(data, 10, &value); if (rc < 0) return -EIO; pvt->inject.eccmask = (u32) value; return count; } static ssize_t i7core_inject_eccmask_show(struct mem_ctl_info *mci, char *data) { struct i7core_pvt *pvt = mci->pvt_info; return sprintf(data, "0x%08x\n", pvt->inject.eccmask); } /* * i7core_addrmatch * * The type of error (UE/CE) will depend on the inject.eccmask value: * Any bits set to a 1 will flip the corresponding ECC bit * Correctable errors can be injected by flipping 1 bit or the bits within * a symbol pair (2 consecutive aligned 8-bit pairs - i.e. 7:0 and 15:8 or * 23:16 and 31:24). Flipping bits in two symbol pairs will cause an * uncorrectable error to be injected. */ #define DECLARE_ADDR_MATCH(param, limit) \ static ssize_t i7core_inject_store_##param( \ struct mem_ctl_info *mci, \ const char *data, size_t count) \ { \ struct i7core_pvt *pvt; \ long value; \ int rc; \ \ debugf1("%s()\n", __func__); \ pvt = mci->pvt_info; \ \ if (pvt->inject.enable) \ disable_inject(mci); \ \ if (!strcasecmp(data, "any") || !strcasecmp(data, "any\n"))\ value = -1; \ else { \ rc = strict_strtoul(data, 10, &value); \ if ((rc < 0) || (value >= limit)) \ return -EIO; \ } \ \ pvt->inject.param = value; \ \ return count; \ } \ \ static ssize_t i7core_inject_show_##param( \ struct mem_ctl_info *mci, \ char *data) \ { \ struct i7core_pvt *pvt; \ \ pvt = mci->pvt_info; \ debugf1("%s() pvt=%p\n", __func__, pvt); \ if (pvt->inject.param < 0) \ return sprintf(data, "any\n"); \ else \ return sprintf(data, "%d\n", pvt->inject.param);\ } #define ATTR_ADDR_MATCH(param) \ { \ .attr = { \ .name = #param, \ .mode = (S_IRUGO | S_IWUSR) \ }, \ .show = i7core_inject_show_##param, \ .store = i7core_inject_store_##param, \ } DECLARE_ADDR_MATCH(channel, 3); DECLARE_ADDR_MATCH(dimm, 3); DECLARE_ADDR_MATCH(rank, 4); DECLARE_ADDR_MATCH(bank, 32); DECLARE_ADDR_MATCH(page, 0x10000); DECLARE_ADDR_MATCH(col, 0x4000); static int write_and_test(struct pci_dev *dev, const int where, const u32 val) { u32 read; int count; debugf0("setting pci %02x:%02x.%x reg=%02x value=%08x\n", dev->bus->number, PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn), where, val); for (count = 0; count < 10; count++) { if (count) msleep(100); pci_write_config_dword(dev, where, val); pci_read_config_dword(dev, where, &read); if (read == val) return 0; } i7core_printk(KERN_ERR, "Error during set pci %02x:%02x.%x reg=%02x " "write=%08x. Read=%08x\n", dev->bus->number, PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn), where, val, read); return -EINVAL; } /* * This routine prepares the Memory Controller for error injection. * The error will be injected when some process tries to write to the * memory that matches the given criteria. * The criteria can be set in terms of a mask where dimm, rank, bank, page * and col can be specified. * A -1 value for any of the mask items will make the MCU to ignore * that matching criteria for error injection. * * It should be noticed that the error will only happen after a write operation * on a memory that matches the condition. if REPEAT_EN is not enabled at * inject mask, then it will produce just one error. Otherwise, it will repeat * until the injectmask would be cleaned. * * FIXME: This routine assumes that MAXNUMDIMMS value of MC_MAX_DOD * is reliable enough to check if the MC is using the * three channels. However, this is not clear at the datasheet. */ static ssize_t i7core_inject_enable_store(struct mem_ctl_info *mci, const char *data, size_t count) { struct i7core_pvt *pvt = mci->pvt_info; u32 injectmask; u64 mask = 0; int rc; long enable; if (!pvt->pci_ch[pvt->inject.channel][0]) return 0; rc = strict_strtoul(data, 10, &enable); if ((rc < 0)) return 0; if (enable) { pvt->inject.enable = 1; } else { disable_inject(mci); return count; } /* Sets pvt->inject.dimm mask */ if (pvt->inject.dimm < 0) mask |= 1LL << 41; else { if (pvt->channel[pvt->inject.channel].dimms > 2) mask |= (pvt->inject.dimm & 0x3LL) << 35; else mask |= (pvt->inject.dimm & 0x1LL) << 36; } /* Sets pvt->inject.rank mask */ if (pvt->inject.rank < 0) mask |= 1LL << 40; else { if (pvt->channel[pvt->inject.channel].dimms > 2) mask |= (pvt->inject.rank & 0x1LL) << 34; else mask |= (pvt->inject.rank & 0x3LL) << 34; } /* Sets pvt->inject.bank mask */ if (pvt->inject.bank < 0) mask |= 1LL << 39; else mask |= (pvt->inject.bank & 0x15LL) << 30; /* Sets pvt->inject.page mask */ if (pvt->inject.page < 0) mask |= 1LL << 38; else mask |= (pvt->inject.page & 0xffff) << 14; /* Sets pvt->inject.column mask */ if (pvt->inject.col < 0) mask |= 1LL << 37; else mask |= (pvt->inject.col & 0x3fff); /* * bit 0: REPEAT_EN * bits 1-2: MASK_HALF_CACHELINE * bit 3: INJECT_ECC * bit 4: INJECT_ADDR_PARITY */ injectmask = (pvt->inject.type & 1) | (pvt->inject.section & 0x3) << 1 | (pvt->inject.type & 0x6) << (3 - 1); /* Unlock writes to registers - this register is write only */ pci_write_config_dword(pvt->pci_noncore, MC_CFG_CONTROL, 0x2); write_and_test(pvt->pci_ch[pvt->inject.channel][0], MC_CHANNEL_ADDR_MATCH, mask); write_and_test(pvt->pci_ch[pvt->inject.channel][0], MC_CHANNEL_ADDR_MATCH + 4, mask >> 32L); write_and_test(pvt->pci_ch[pvt->inject.channel][0], MC_CHANNEL_ERROR_MASK, pvt->inject.eccmask); write_and_test(pvt->pci_ch[pvt->inject.channel][0], MC_CHANNEL_ERROR_INJECT, injectmask); /* * This is something undocumented, based on my tests * Without writing 8 to this register, errors aren't injected. Not sure * why. */ pci_write_config_dword(pvt->pci_noncore, MC_CFG_CONTROL, 8); debugf0("Error inject addr match 0x%016llx, ecc 0x%08x," " inject 0x%08x\n", mask, pvt->inject.eccmask, injectmask); return count; } static ssize_t i7core_inject_enable_show(struct mem_ctl_info *mci, char *data) { struct i7core_pvt *pvt = mci->pvt_info; u32 injectmask; if (!pvt->pci_ch[pvt->inject.channel][0]) return 0; pci_read_config_dword(pvt->pci_ch[pvt->inject.channel][0], MC_CHANNEL_ERROR_INJECT, &injectmask); debugf0("Inject error read: 0x%018x\n", injectmask); if (injectmask & 0x0c) pvt->inject.enable = 1; return sprintf(data, "%d\n", pvt->inject.enable); } #define DECLARE_COUNTER(param) \ static ssize_t i7core_show_counter_##param( \ struct mem_ctl_info *mci, \ char *data) \ { \ struct i7core_pvt *pvt = mci->pvt_info; \ \ debugf1("%s() \n", __func__); \ if (!pvt->ce_count_available || (pvt->is_registered)) \ return sprintf(data, "data unavailable\n"); \ return sprintf(data, "%lu\n", \ pvt->udimm_ce_count[param]); \ } #define ATTR_COUNTER(param) \ { \ .attr = { \ .name = __stringify(udimm##param), \ .mode = (S_IRUGO | S_IWUSR) \ }, \ .show = i7core_show_counter_##param \ } DECLARE_COUNTER(0); DECLARE_COUNTER(1); DECLARE_COUNTER(2); /* * Sysfs struct */ static const struct mcidev_sysfs_attribute i7core_addrmatch_attrs[] = { ATTR_ADDR_MATCH(channel), ATTR_ADDR_MATCH(dimm), ATTR_ADDR_MATCH(rank), ATTR_ADDR_MATCH(bank), ATTR_ADDR_MATCH(page), ATTR_ADDR_MATCH(col), { } /* End of list */ }; static const struct mcidev_sysfs_group i7core_inject_addrmatch = { .name = "inject_addrmatch", .mcidev_attr = i7core_addrmatch_attrs, }; static const struct mcidev_sysfs_attribute i7core_udimm_counters_attrs[] = { ATTR_COUNTER(0), ATTR_COUNTER(1), ATTR_COUNTER(2), { .attr = { .name = NULL } } }; static const struct mcidev_sysfs_group i7core_udimm_counters = { .name = "all_channel_counts", .mcidev_attr = i7core_udimm_counters_attrs, }; static const struct mcidev_sysfs_attribute i7core_sysfs_rdimm_attrs[] = { { .attr = { .name = "inject_section", .mode = (S_IRUGO | S_IWUSR) }, .show = i7core_inject_section_show, .store = i7core_inject_section_store, }, { .attr = { .name = "inject_type", .mode = (S_IRUGO | S_IWUSR) }, .show = i7core_inject_type_show, .store = i7core_inject_type_store, }, { .attr = { .name = "inject_eccmask", .mode = (S_IRUGO | S_IWUSR) }, .show = i7core_inject_eccmask_show, .store = i7core_inject_eccmask_store, }, { .grp = &i7core_inject_addrmatch, }, { .attr = { .name = "inject_enable", .mode = (S_IRUGO | S_IWUSR) }, .show = i7core_inject_enable_show, .store = i7core_inject_enable_store, }, { } /* End of list */ }; static const struct mcidev_sysfs_attribute i7core_sysfs_udimm_attrs[] = { { .attr = { .name = "inject_section", .mode = (S_IRUGO | S_IWUSR) }, .show = i7core_inject_section_show, .store = i7core_inject_section_store, }, { .attr = { .name = "inject_type", .mode = (S_IRUGO | S_IWUSR) }, .show = i7core_inject_type_show, .store = i7core_inject_type_store, }, { .attr = { .name = "inject_eccmask", .mode = (S_IRUGO | S_IWUSR) }, .show = i7core_inject_eccmask_show, .store = i7core_inject_eccmask_store, }, { .grp = &i7core_inject_addrmatch, }, { .attr = { .name = "inject_enable", .mode = (S_IRUGO | S_IWUSR) }, .show = i7core_inject_enable_show, .store = i7core_inject_enable_store, }, { .grp = &i7core_udimm_counters, }, { } /* End of list */ }; /**************************************************************************** Device initialization routines: put/get, init/exit ****************************************************************************/ /* * i7core_put_all_devices 'put' all the devices that we have * reserved via 'get' */ static void i7core_put_devices(struct i7core_dev *i7core_dev) { int i; debugf0(__FILE__ ": %s()\n", __func__); for (i = 0; i < i7core_dev->n_devs; i++) { struct pci_dev *pdev = i7core_dev->pdev[i]; if (!pdev) continue; debugf0("Removing dev %02x:%02x.%d\n", pdev->bus->number, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); pci_dev_put(pdev); } } static void i7core_put_all_devices(void) { struct i7core_dev *i7core_dev, *tmp; list_for_each_entry_safe(i7core_dev, tmp, &i7core_edac_list, list) { i7core_put_devices(i7core_dev); free_i7core_dev(i7core_dev); } } static void __init i7core_xeon_pci_fixup(const struct pci_id_table *table) { struct pci_dev *pdev = NULL; int i; /* * On Xeon 55xx, the Intel Quick Path Arch Generic Non-core pci buses * aren't announced by acpi. So, we need to use a legacy scan probing * to detect them */ while (table && table->descr) { pdev = pci_get_device(PCI_VENDOR_ID_INTEL, table->descr[0].dev_id, NULL); if (unlikely(!pdev)) { for (i = 0; i < MAX_SOCKET_BUSES; i++) pcibios_scan_specific_bus(255-i); } pci_dev_put(pdev); table++; } } static unsigned i7core_pci_lastbus(void) { int last_bus = 0, bus; struct pci_bus *b = NULL; while ((b = pci_find_next_bus(b)) != NULL) { bus = b->number; debugf0("Found bus %d\n", bus); if (bus > last_bus) last_bus = bus; } debugf0("Last bus %d\n", last_bus); return last_bus; } /* * i7core_get_all_devices Find and perform 'get' operation on the MCH's * device/functions we want to reference for this driver * * Need to 'get' device 16 func 1 and func 2 */ static int i7core_get_onedevice(struct pci_dev **prev, const struct pci_id_table *table, const unsigned devno, const unsigned last_bus) { struct i7core_dev *i7core_dev; const struct pci_id_descr *dev_descr = &table->descr[devno]; struct pci_dev *pdev = NULL; u8 bus = 0; u8 socket = 0; pdev = pci_get_device(PCI_VENDOR_ID_INTEL, dev_descr->dev_id, *prev); /* * On Xeon 55xx, the Intel Quckpath Arch Generic Non-core regs * is at addr 8086:2c40, instead of 8086:2c41. So, we need * to probe for the alternate address in case of failure */ if (dev_descr->dev_id == PCI_DEVICE_ID_INTEL_I7_NONCORE && !pdev) pdev = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I7_NONCORE_ALT, *prev); if (dev_descr->dev_id == PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE && !pdev) pdev = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE_ALT, *prev); if (!pdev) { if (*prev) { *prev = pdev; return 0; } if (dev_descr->optional) return 0; if (devno == 0) return -ENODEV; i7core_printk(KERN_INFO, "Device not found: dev %02x.%d PCI ID %04x:%04x\n", dev_descr->dev, dev_descr->func, PCI_VENDOR_ID_INTEL, dev_descr->dev_id); /* End of list, leave */ return -ENODEV; } bus = pdev->bus->number; socket = last_bus - bus; i7core_dev = get_i7core_dev(socket); if (!i7core_dev) { i7core_dev = alloc_i7core_dev(socket, table); if (!i7core_dev) { pci_dev_put(pdev); return -ENOMEM; } } if (i7core_dev->pdev[devno]) { i7core_printk(KERN_ERR, "Duplicated device for " "dev %02x:%02x.%d PCI ID %04x:%04x\n", bus, dev_descr->dev, dev_descr->func, PCI_VENDOR_ID_INTEL, dev_descr->dev_id); pci_dev_put(pdev); return -ENODEV; } i7core_dev->pdev[devno] = pdev; /* Sanity check */ if (unlikely(PCI_SLOT(pdev->devfn) != dev_descr->dev || PCI_FUNC(pdev->devfn) != dev_descr->func)) { i7core_printk(KERN_ERR, "Device PCI ID %04x:%04x " "has dev %02x:%02x.%d instead of dev %02x:%02x.%d\n", PCI_VENDOR_ID_INTEL, dev_descr->dev_id, bus, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn), bus, dev_descr->dev, dev_descr->func); return -ENODEV; } /* Be sure that the device is enabled */ if (unlikely(pci_enable_device(pdev) < 0)) { i7core_printk(KERN_ERR, "Couldn't enable " "dev %02x:%02x.%d PCI ID %04x:%04x\n", bus, dev_descr->dev, dev_descr->func, PCI_VENDOR_ID_INTEL, dev_descr->dev_id); return -ENODEV; } debugf0("Detected socket %d dev %02x:%02x.%d PCI ID %04x:%04x\n", socket, bus, dev_descr->dev, dev_descr->func, PCI_VENDOR_ID_INTEL, dev_descr->dev_id); /* * As stated on drivers/pci/search.c, the reference count for * @from is always decremented if it is not %NULL. So, as we need * to get all devices up to null, we need to do a get for the device */ pci_dev_get(pdev); *prev = pdev; return 0; } static int i7core_get_all_devices(void) { int i, rc, last_bus; struct pci_dev *pdev = NULL; const struct pci_id_table *table = pci_dev_table; last_bus = i7core_pci_lastbus(); while (table && table->descr) { for (i = 0; i < table->n_devs; i++) { pdev = NULL; do { rc = i7core_get_onedevice(&pdev, table, i, last_bus); if (rc < 0) { if (i == 0) { i = table->n_devs; break; } i7core_put_all_devices(); return -ENODEV; } } while (pdev); } table++; } return 0; } static int mci_bind_devs(struct mem_ctl_info *mci, struct i7core_dev *i7core_dev) { struct i7core_pvt *pvt = mci->pvt_info; struct pci_dev *pdev; int i, func, slot; char *family; pvt->is_registered = false; pvt->enable_scrub = false; for (i = 0; i < i7core_dev->n_devs; i++) { pdev = i7core_dev->pdev[i]; if (!pdev) continue; func = PCI_FUNC(pdev->devfn); slot = PCI_SLOT(pdev->devfn); if (slot == 3) { if (unlikely(func > MAX_MCR_FUNC)) goto error; pvt->pci_mcr[func] = pdev; } else if (likely(slot >= 4 && slot < 4 + NUM_CHANS)) { if (unlikely(func > MAX_CHAN_FUNC)) goto error; pvt->pci_ch[slot - 4][func] = pdev; } else if (!slot && !func) { pvt->pci_noncore = pdev; /* Detect the processor family */ switch (pdev->device) { case PCI_DEVICE_ID_INTEL_I7_NONCORE: family = "Xeon 35xx/ i7core"; pvt->enable_scrub = false; break; case PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE_ALT: family = "i7-800/i5-700"; pvt->enable_scrub = false; break; case PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE: family = "Xeon 34xx"; pvt->enable_scrub = false; break; case PCI_DEVICE_ID_INTEL_I7_NONCORE_ALT: family = "Xeon 55xx"; pvt->enable_scrub = true; break; case PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE_REV2: family = "Xeon 56xx / i7-900"; pvt->enable_scrub = true; break; default: family = "unknown"; pvt->enable_scrub = false; } debugf0("Detected a processor type %s\n", family); } else goto error; debugf0("Associated fn %d.%d, dev = %p, socket %d\n", PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn), pdev, i7core_dev->socket); if (PCI_SLOT(pdev->devfn) == 3 && PCI_FUNC(pdev->devfn) == 2) pvt->is_registered = true; } return 0; error: i7core_printk(KERN_ERR, "Device %d, function %d " "is out of the expected range\n", slot, func); return -EINVAL; } /**************************************************************************** Error check routines ****************************************************************************/ static void i7core_rdimm_update_csrow(struct mem_ctl_info *mci, const int chan, const int dimm, const int add) { char *msg; struct i7core_pvt *pvt = mci->pvt_info; int row = pvt->csrow_map[chan][dimm], i; for (i = 0; i < add; i++) { msg = kasprintf(GFP_KERNEL, "Corrected error " "(Socket=%d channel=%d dimm=%d)", pvt->i7core_dev->socket, chan, dimm); edac_mc_handle_fbd_ce(mci, row, 0, msg); kfree (msg); } } static void i7core_rdimm_update_ce_count(struct mem_ctl_info *mci, const int chan, const int new0, const int new1, const int new2) { struct i7core_pvt *pvt = mci->pvt_info; int add0 = 0, add1 = 0, add2 = 0; /* Updates CE counters if it is not the first time here */ if (pvt->ce_count_available) { /* Updates CE counters */ add2 = new2 - pvt->rdimm_last_ce_count[chan][2]; add1 = new1 - pvt->rdimm_last_ce_count[chan][1]; add0 = new0 - pvt->rdimm_last_ce_count[chan][0]; if (add2 < 0) add2 += 0x7fff; pvt->rdimm_ce_count[chan][2] += add2; if (add1 < 0) add1 += 0x7fff; pvt->rdimm_ce_count[chan][1] += add1; if (add0 < 0) add0 += 0x7fff; pvt->rdimm_ce_count[chan][0] += add0; } else pvt->ce_count_available = 1; /* Store the new values */ pvt->rdimm_last_ce_count[chan][2] = new2; pvt->rdimm_last_ce_count[chan][1] = new1; pvt->rdimm_last_ce_count[chan][0] = new0; /*updated the edac core */ if (add0 != 0) i7core_rdimm_update_csrow(mci, chan, 0, add0); if (add1 != 0) i7core_rdimm_update_csrow(mci, chan, 1, add1); if (add2 != 0) i7core_rdimm_update_csrow(mci, chan, 2, add2); } static void i7core_rdimm_check_mc_ecc_err(struct mem_ctl_info *mci) { struct i7core_pvt *pvt = mci->pvt_info; u32 rcv[3][2]; int i, new0, new1, new2; /*Read DEV 3: FUN 2: MC_COR_ECC_CNT regs directly*/ pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_0, &rcv[0][0]); pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_1, &rcv[0][1]); pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_2, &rcv[1][0]); pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_3, &rcv[1][1]); pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_4, &rcv[2][0]); pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_5, &rcv[2][1]); for (i = 0 ; i < 3; i++) { debugf3("MC_COR_ECC_CNT%d = 0x%x; MC_COR_ECC_CNT%d = 0x%x\n", (i * 2), rcv[i][0], (i * 2) + 1, rcv[i][1]); /*if the channel has 3 dimms*/ if (pvt->channel[i].dimms > 2) { new0 = DIMM_BOT_COR_ERR(rcv[i][0]); new1 = DIMM_TOP_COR_ERR(rcv[i][0]); new2 = DIMM_BOT_COR_ERR(rcv[i][1]); } else { new0 = DIMM_TOP_COR_ERR(rcv[i][0]) + DIMM_BOT_COR_ERR(rcv[i][0]); new1 = DIMM_TOP_COR_ERR(rcv[i][1]) + DIMM_BOT_COR_ERR(rcv[i][1]); new2 = 0; } i7core_rdimm_update_ce_count(mci, i, new0, new1, new2); } } /* This function is based on the device 3 function 4 registers as described on: * Intel Xeon Processor 5500 Series Datasheet Volume 2 * http://www.intel.com/Assets/PDF/datasheet/321322.pdf * also available at: * http://www.arrownac.com/manufacturers/intel/s/nehalem/5500-datasheet-v2.pdf */ static void i7core_udimm_check_mc_ecc_err(struct mem_ctl_info *mci) { struct i7core_pvt *pvt = mci->pvt_info; u32 rcv1, rcv0; int new0, new1, new2; if (!pvt->pci_mcr[4]) { debugf0("%s MCR registers not found\n", __func__); return; } /* Corrected test errors */ pci_read_config_dword(pvt->pci_mcr[4], MC_TEST_ERR_RCV1, &rcv1); pci_read_config_dword(pvt->pci_mcr[4], MC_TEST_ERR_RCV0, &rcv0); /* Store the new values */ new2 = DIMM2_COR_ERR(rcv1); new1 = DIMM1_COR_ERR(rcv0); new0 = DIMM0_COR_ERR(rcv0); /* Updates CE counters if it is not the first time here */ if (pvt->ce_count_available) { /* Updates CE counters */ int add0, add1, add2; add2 = new2 - pvt->udimm_last_ce_count[2]; add1 = new1 - pvt->udimm_last_ce_count[1]; add0 = new0 - pvt->udimm_last_ce_count[0]; if (add2 < 0) add2 += 0x7fff; pvt->udimm_ce_count[2] += add2; if (add1 < 0) add1 += 0x7fff; pvt->udimm_ce_count[1] += add1; if (add0 < 0) add0 += 0x7fff; pvt->udimm_ce_count[0] += add0; if (add0 | add1 | add2) i7core_printk(KERN_ERR, "New Corrected error(s): " "dimm0: +%d, dimm1: +%d, dimm2 +%d\n", add0, add1, add2); } else pvt->ce_count_available = 1; /* Store the new values */ pvt->udimm_last_ce_count[2] = new2; pvt->udimm_last_ce_count[1] = new1; pvt->udimm_last_ce_count[0] = new0; } /* * According with tables E-11 and E-12 of chapter E.3.3 of Intel 64 and IA-32 * Architectures Software Developer’s Manual Volume 3B. * Nehalem are defined as family 0x06, model 0x1a * * The MCA registers used here are the following ones: * struct mce field MCA Register * m->status MSR_IA32_MC8_STATUS * m->addr MSR_IA32_MC8_ADDR * m->misc MSR_IA32_MC8_MISC * In the case of Nehalem, the error information is masked at .status and .misc * fields */ static void i7core_mce_output_error(struct mem_ctl_info *mci, const struct mce *m) { struct i7core_pvt *pvt = mci->pvt_info; char *type, *optype, *err, *msg; unsigned long error = m->status & 0x1ff0000l; u32 optypenum = (m->status >> 4) & 0x07; u32 core_err_cnt = (m->status >> 38) & 0x7fff; u32 dimm = (m->misc >> 16) & 0x3; u32 channel = (m->misc >> 18) & 0x3; u32 syndrome = m->misc >> 32; u32 errnum = find_first_bit(&error, 32); int csrow; if (m->mcgstatus & 1) type = "FATAL"; else type = "NON_FATAL"; switch (optypenum) { case 0: optype = "generic undef request"; break; case 1: optype = "read error"; break; case 2: optype = "write error"; break; case 3: optype = "addr/cmd error"; break; case 4: optype = "scrubbing error"; break; default: optype = "reserved"; break; } switch (errnum) { case 16: err = "read ECC error"; break; case 17: err = "RAS ECC error"; break; case 18: err = "write parity error"; break; case 19: err = "redundacy loss"; break; case 20: err = "reserved"; break; case 21: err = "memory range error"; break; case 22: err = "RTID out of range"; break; case 23: err = "address parity error"; break; case 24: err = "byte enable parity error"; break; default: err = "unknown"; } /* FIXME: should convert addr into bank and rank information */ msg = kasprintf(GFP_ATOMIC, "%s (addr = 0x%08llx, cpu=%d, Dimm=%d, Channel=%d, " "syndrome=0x%08x, count=%d, Err=%08llx:%08llx (%s: %s))\n", type, (long long) m->addr, m->cpu, dimm, channel, syndrome, core_err_cnt, (long long)m->status, (long long)m->misc, optype, err); debugf0("%s", msg); csrow = pvt->csrow_map[channel][dimm]; /* Call the helper to output message */ if (m->mcgstatus & 1) edac_mc_handle_fbd_ue(mci, csrow, 0, 0 /* FIXME: should be channel here */, msg); else if (!pvt->is_registered) edac_mc_handle_fbd_ce(mci, csrow, 0 /* FIXME: should be channel here */, msg); kfree(msg); } /* * i7core_check_error Retrieve and process errors reported by the * hardware. Called by the Core module. */ static void i7core_check_error(struct mem_ctl_info *mci) { struct i7core_pvt *pvt = mci->pvt_info; int i; unsigned count = 0; struct mce *m; /* * MCE first step: Copy all mce errors into a temporary buffer * We use a double buffering here, to reduce the risk of * losing an error. */ smp_rmb(); count = (pvt->mce_out + MCE_LOG_LEN - pvt->mce_in) % MCE_LOG_LEN; if (!count) goto check_ce_error; m = pvt->mce_outentry; if (pvt->mce_in + count > MCE_LOG_LEN) { unsigned l = MCE_LOG_LEN - pvt->mce_in; memcpy(m, &pvt->mce_entry[pvt->mce_in], sizeof(*m) * l); smp_wmb(); pvt->mce_in = 0; count -= l; m += l; } memcpy(m, &pvt->mce_entry[pvt->mce_in], sizeof(*m) * count); smp_wmb(); pvt->mce_in += count; smp_rmb(); if (pvt->mce_overrun) { i7core_printk(KERN_ERR, "Lost %d memory errors\n", pvt->mce_overrun); smp_wmb(); pvt->mce_overrun = 0; } /* * MCE second step: parse errors and display */ for (i = 0; i < count; i++) i7core_mce_output_error(mci, &pvt->mce_outentry[i]); /* * Now, let's increment CE error counts */ check_ce_error: if (!pvt->is_registered) i7core_udimm_check_mc_ecc_err(mci); else i7core_rdimm_check_mc_ecc_err(mci); } /* * i7core_mce_check_error Replicates mcelog routine to get errors * This routine simply queues mcelog errors, and * return. The error itself should be handled later * by i7core_check_error. * WARNING: As this routine should be called at NMI time, extra care should * be taken to avoid deadlocks, and to be as fast as possible. */ static int i7core_mce_check_error(struct notifier_block *nb, unsigned long val, void *data) { struct mce *mce = (struct mce *)data; struct i7core_dev *i7_dev; struct mem_ctl_info *mci; struct i7core_pvt *pvt; i7_dev = get_i7core_dev(mce->socketid); if (!i7_dev) return NOTIFY_BAD; mci = i7_dev->mci; pvt = mci->pvt_info; /* * Just let mcelog handle it if the error is * outside the memory controller */ if (((mce->status & 0xffff) >> 7) != 1) return NOTIFY_DONE; /* Bank 8 registers are the only ones that we know how to handle */ if (mce->bank != 8) return NOTIFY_DONE; #ifdef CONFIG_SMP /* Only handle if it is the right mc controller */ if (mce->socketid != pvt->i7core_dev->socket) return NOTIFY_DONE; #endif smp_rmb(); if ((pvt->mce_out + 1) % MCE_LOG_LEN == pvt->mce_in) { smp_wmb(); pvt->mce_overrun++; return NOTIFY_DONE; } /* Copy memory error at the ringbuffer */ memcpy(&pvt->mce_entry[pvt->mce_out], mce, sizeof(*mce)); smp_wmb(); pvt->mce_out = (pvt->mce_out + 1) % MCE_LOG_LEN; /* Handle fatal errors immediately */ if (mce->mcgstatus & 1) i7core_check_error(mci); /* Advise mcelog that the errors were handled */ return NOTIFY_STOP; } static struct notifier_block i7_mce_dec = { .notifier_call = i7core_mce_check_error, }; struct memdev_dmi_entry { u8 type; u8 length; u16 handle; u16 phys_mem_array_handle; u16 mem_err_info_handle; u16 total_width; u16 data_width; u16 size; u8 form; u8 device_set; u8 device_locator; u8 bank_locator; u8 memory_type; u16 type_detail; u16 speed; u8 manufacturer; u8 serial_number; u8 asset_tag; u8 part_number; u8 attributes; u32 extended_size; u16 conf_mem_clk_speed; } __attribute__((__packed__)); /* * Decode the DRAM Clock Frequency, be paranoid, make sure that all * memory devices show the same speed, and if they don't then consider * all speeds to be invalid. */ static void decode_dclk(const struct dmi_header *dh, void *_dclk_freq) { int *dclk_freq = _dclk_freq; u16 dmi_mem_clk_speed; if (*dclk_freq == -1) return; if (dh->type == DMI_ENTRY_MEM_DEVICE) { struct memdev_dmi_entry *memdev_dmi_entry = (struct memdev_dmi_entry *)dh; unsigned long conf_mem_clk_speed_offset = (unsigned long)&memdev_dmi_entry->conf_mem_clk_speed - (unsigned long)&memdev_dmi_entry->type; unsigned long speed_offset = (unsigned long)&memdev_dmi_entry->speed - (unsigned long)&memdev_dmi_entry->type; /* Check that a DIMM is present */ if (memdev_dmi_entry->size == 0) return; /* * Pick the configured speed if it's available, otherwise * pick the DIMM speed, or we don't have a speed. */ if (memdev_dmi_entry->length > conf_mem_clk_speed_offset) { dmi_mem_clk_speed = memdev_dmi_entry->conf_mem_clk_speed; } else if (memdev_dmi_entry->length > speed_offset) { dmi_mem_clk_speed = memdev_dmi_entry->speed; } else { *dclk_freq = -1; return; } if (*dclk_freq == 0) { /* First pass, speed was 0 */ if (dmi_mem_clk_speed > 0) { /* Set speed if a valid speed is read */ *dclk_freq = dmi_mem_clk_speed; } else { /* Otherwise we don't have a valid speed */ *dclk_freq = -1; } } else if (*dclk_freq > 0 && *dclk_freq != dmi_mem_clk_speed) { /* * If we have a speed, check that all DIMMS are the same * speed, otherwise set the speed as invalid. */ *dclk_freq = -1; } } } /* * The default DCLK frequency is used as a fallback if we * fail to find anything reliable in the DMI. The value * is taken straight from the datasheet. */ #define DEFAULT_DCLK_FREQ 800 static int get_dclk_freq(void) { int dclk_freq = 0; dmi_walk(decode_dclk, (void *)&dclk_freq); if (dclk_freq < 1) return DEFAULT_DCLK_FREQ; return dclk_freq; } /* * set_sdram_scrub_rate This routine sets byte/sec bandwidth scrub rate * to hardware according to SCRUBINTERVAL formula * found in datasheet. */ static int set_sdram_scrub_rate(struct mem_ctl_info *mci, u32 new_bw) { struct i7core_pvt *pvt = mci->pvt_info; struct pci_dev *pdev; u32 dw_scrub; u32 dw_ssr; /* Get data from the MC register, function 2 */ pdev = pvt->pci_mcr[2]; if (!pdev) return -ENODEV; pci_read_config_dword(pdev, MC_SCRUB_CONTROL, &dw_scrub); if (new_bw == 0) { /* Prepare to disable petrol scrub */ dw_scrub &= ~STARTSCRUB; /* Stop the patrol scrub engine */ write_and_test(pdev, MC_SCRUB_CONTROL, dw_scrub & ~SCRUBINTERVAL_MASK); /* Get current status of scrub rate and set bit to disable */ pci_read_config_dword(pdev, MC_SSRCONTROL, &dw_ssr); dw_ssr &= ~SSR_MODE_MASK; dw_ssr |= SSR_MODE_DISABLE; } else { const int cache_line_size = 64; const u32 freq_dclk_mhz = pvt->dclk_freq; unsigned long long scrub_interval; /* * Translate the desired scrub rate to a register value and * program the corresponding register value. */ scrub_interval = (unsigned long long)freq_dclk_mhz * cache_line_size * 1000000; do_div(scrub_interval, new_bw); if (!scrub_interval || scrub_interval > SCRUBINTERVAL_MASK) return -EINVAL; dw_scrub = SCRUBINTERVAL_MASK & scrub_interval; /* Start the patrol scrub engine */ pci_write_config_dword(pdev, MC_SCRUB_CONTROL, STARTSCRUB | dw_scrub); /* Get current status of scrub rate and set bit to enable */ pci_read_config_dword(pdev, MC_SSRCONTROL, &dw_ssr); dw_ssr &= ~SSR_MODE_MASK; dw_ssr |= SSR_MODE_ENABLE; } /* Disable or enable scrubbing */ pci_write_config_dword(pdev, MC_SSRCONTROL, dw_ssr); return new_bw; } /* * get_sdram_scrub_rate This routine convert current scrub rate value * into byte/sec bandwidth accourding to * SCRUBINTERVAL formula found in datasheet. */ static int get_sdram_scrub_rate(struct mem_ctl_info *mci) { struct i7core_pvt *pvt = mci->pvt_info; struct pci_dev *pdev; const u32 cache_line_size = 64; const u32 freq_dclk_mhz = pvt->dclk_freq; unsigned long long scrub_rate; u32 scrubval; /* Get data from the MC register, function 2 */ pdev = pvt->pci_mcr[2]; if (!pdev) return -ENODEV; /* Get current scrub control data */ pci_read_config_dword(pdev, MC_SCRUB_CONTROL, &scrubval); /* Mask highest 8-bits to 0 */ scrubval &= SCRUBINTERVAL_MASK; if (!scrubval) return 0; /* Calculate scrub rate value into byte/sec bandwidth */ scrub_rate = (unsigned long long)freq_dclk_mhz * 1000000 * cache_line_size; do_div(scrub_rate, scrubval); return (int)scrub_rate; } static void enable_sdram_scrub_setting(struct mem_ctl_info *mci) { struct i7core_pvt *pvt = mci->pvt_info; u32 pci_lock; /* Unlock writes to pci registers */ pci_read_config_dword(pvt->pci_noncore, MC_CFG_CONTROL, &pci_lock); pci_lock &= ~0x3; pci_write_config_dword(pvt->pci_noncore, MC_CFG_CONTROL, pci_lock | MC_CFG_UNLOCK); mci->set_sdram_scrub_rate = set_sdram_scrub_rate; mci->get_sdram_scrub_rate = get_sdram_scrub_rate; } static void disable_sdram_scrub_setting(struct mem_ctl_info *mci) { struct i7core_pvt *pvt = mci->pvt_info; u32 pci_lock; /* Lock writes to pci registers */ pci_read_config_dword(pvt->pci_noncore, MC_CFG_CONTROL, &pci_lock); pci_lock &= ~0x3; pci_write_config_dword(pvt->pci_noncore, MC_CFG_CONTROL, pci_lock | MC_CFG_LOCK); } static void i7core_pci_ctl_create(struct i7core_pvt *pvt) { pvt->i7core_pci = edac_pci_create_generic_ctl( &pvt->i7core_dev->pdev[0]->dev, EDAC_MOD_STR); if (unlikely(!pvt->i7core_pci)) i7core_printk(KERN_WARNING, "Unable to setup PCI error report via EDAC\n"); } static void i7core_pci_ctl_release(struct i7core_pvt *pvt) { if (likely(pvt->i7core_pci)) edac_pci_release_generic_ctl(pvt->i7core_pci); else i7core_printk(KERN_ERR, "Couldn't find mem_ctl_info for socket %d\n", pvt->i7core_dev->socket); pvt->i7core_pci = NULL; } static void i7core_unregister_mci(struct i7core_dev *i7core_dev) { struct mem_ctl_info *mci = i7core_dev->mci; struct i7core_pvt *pvt; if (unlikely(!mci || !mci->pvt_info)) { debugf0("MC: " __FILE__ ": %s(): dev = %p\n", __func__, &i7core_dev->pdev[0]->dev); i7core_printk(KERN_ERR, "Couldn't find mci handler\n"); return; } pvt = mci->pvt_info; debugf0("MC: " __FILE__ ": %s(): mci = %p, dev = %p\n", __func__, mci, &i7core_dev->pdev[0]->dev); /* Disable scrubrate setting */ if (pvt->enable_scrub) disable_sdram_scrub_setting(mci); mce_unregister_decode_chain(&i7_mce_dec); /* Disable EDAC polling */ i7core_pci_ctl_release(pvt); /* Remove MC sysfs nodes */ edac_mc_del_mc(mci->dev); debugf1("%s: free mci struct\n", mci->ctl_name); kfree(mci->ctl_name); edac_mc_free(mci); i7core_dev->mci = NULL; } static int i7core_register_mci(struct i7core_dev *i7core_dev) { struct mem_ctl_info *mci; struct i7core_pvt *pvt; int rc, channels, csrows; /* Check the number of active and not disabled channels */ rc = i7core_get_active_channels(i7core_dev->socket, &channels, &csrows); if (unlikely(rc < 0)) return rc; /* allocate a new MC control structure */ mci = edac_mc_alloc(sizeof(*pvt), csrows, channels, i7core_dev->socket); if (unlikely(!mci)) return -ENOMEM; debugf0("MC: " __FILE__ ": %s(): mci = %p, dev = %p\n", __func__, mci, &i7core_dev->pdev[0]->dev); pvt = mci->pvt_info; memset(pvt, 0, sizeof(*pvt)); /* Associates i7core_dev and mci for future usage */ pvt->i7core_dev = i7core_dev; i7core_dev->mci = mci; /* * FIXME: how to handle RDDR3 at MCI level? It is possible to have * Mixed RDDR3/UDDR3 with Nehalem, provided that they are on different * memory channels */ mci->mtype_cap = MEM_FLAG_DDR3; mci->edac_ctl_cap = EDAC_FLAG_NONE; mci->edac_cap = EDAC_FLAG_NONE; mci->mod_name = "i7core_edac.c"; mci->mod_ver = I7CORE_REVISION; mci->ctl_name = kasprintf(GFP_KERNEL, "i7 core #%d", i7core_dev->socket); mci->dev_name = pci_name(i7core_dev->pdev[0]); mci->ctl_page_to_phys = NULL; /* Store pci devices at mci for faster access */ rc = mci_bind_devs(mci, i7core_dev); if (unlikely(rc < 0)) goto fail0; if (pvt->is_registered) mci->mc_driver_sysfs_attributes = i7core_sysfs_rdimm_attrs; else mci->mc_driver_sysfs_attributes = i7core_sysfs_udimm_attrs; /* Get dimm basic config */ get_dimm_config(mci); /* record ptr to the generic device */ mci->dev = &i7core_dev->pdev[0]->dev; /* Set the function pointer to an actual operation function */ mci->edac_check = i7core_check_error; /* Enable scrubrate setting */ if (pvt->enable_scrub) enable_sdram_scrub_setting(mci); /* add this new MC control structure to EDAC's list of MCs */ if (unlikely(edac_mc_add_mc(mci))) { debugf0("MC: " __FILE__ ": %s(): failed edac_mc_add_mc()\n", __func__); /* FIXME: perhaps some code should go here that disables error * reporting if we just enabled it */ rc = -EINVAL; goto fail0; } /* Default error mask is any memory */ pvt->inject.channel = 0; pvt->inject.dimm = -1; pvt->inject.rank = -1; pvt->inject.bank = -1; pvt->inject.page = -1; pvt->inject.col = -1; /* allocating generic PCI control info */ i7core_pci_ctl_create(pvt); /* DCLK for scrub rate setting */ pvt->dclk_freq = get_dclk_freq(); mce_register_decode_chain(&i7_mce_dec); return 0; fail0: kfree(mci->ctl_name); edac_mc_free(mci); i7core_dev->mci = NULL; return rc; } /* * i7core_probe Probe for ONE instance of device to see if it is * present. * return: * 0 for FOUND a device * < 0 for error code */ static int __devinit i7core_probe(struct pci_dev *pdev, const struct pci_device_id *id) { int rc, count = 0; struct i7core_dev *i7core_dev; /* get the pci devices we want to reserve for our use */ mutex_lock(&i7core_edac_lock); /* * All memory controllers are allocated at the first pass. */ if (unlikely(probed >= 1)) { mutex_unlock(&i7core_edac_lock); return -ENODEV; } probed++; rc = i7core_get_all_devices(); if (unlikely(rc < 0)) goto fail0; list_for_each_entry(i7core_dev, &i7core_edac_list, list) { count++; rc = i7core_register_mci(i7core_dev); if (unlikely(rc < 0)) goto fail1; } /* * Nehalem-EX uses a different memory controller. However, as the * memory controller is not visible on some Nehalem/Nehalem-EP, we * need to indirectly probe via a X58 PCI device. The same devices * are found on (some) Nehalem-EX. So, on those machines, the * probe routine needs to return -ENODEV, as the actual Memory * Controller registers won't be detected. */ if (!count) { rc = -ENODEV; goto fail1; } i7core_printk(KERN_INFO, "Driver loaded, %d memory controller(s) found.\n", count); mutex_unlock(&i7core_edac_lock); return 0; fail1: list_for_each_entry(i7core_dev, &i7core_edac_list, list) i7core_unregister_mci(i7core_dev); i7core_put_all_devices(); fail0: mutex_unlock(&i7core_edac_lock); return rc; } /* * i7core_remove destructor for one instance of device * */ static void __devexit i7core_remove(struct pci_dev *pdev) { struct i7core_dev *i7core_dev; debugf0(__FILE__ ": %s()\n", __func__); /* * we have a trouble here: pdev value for removal will be wrong, since * it will point to the X58 register used to detect that the machine * is a Nehalem or upper design. However, due to the way several PCI * devices are grouped together to provide MC functionality, we need * to use a different method for releasing the devices */ mutex_lock(&i7core_edac_lock); if (unlikely(!probed)) { mutex_unlock(&i7core_edac_lock); return; } list_for_each_entry(i7core_dev, &i7core_edac_list, list) i7core_unregister_mci(i7core_dev); /* Release PCI resources */ i7core_put_all_devices(); probed--; mutex_unlock(&i7core_edac_lock); } MODULE_DEVICE_TABLE(pci, i7core_pci_tbl); /* * i7core_driver pci_driver structure for this module * */ static struct pci_driver i7core_driver = { .name = "i7core_edac", .probe = i7core_probe, .remove = __devexit_p(i7core_remove), .id_table = i7core_pci_tbl, }; /* * i7core_init Module entry function * Try to initialize this module for its devices */ static int __init i7core_init(void) { int pci_rc; debugf2("MC: " __FILE__ ": %s()\n", __func__); /* Ensure that the OPSTATE is set correctly for POLL or NMI */ opstate_init(); if (use_pci_fixup) i7core_xeon_pci_fixup(pci_dev_table); pci_rc = pci_register_driver(&i7core_driver); if (pci_rc >= 0) return 0; i7core_printk(KERN_ERR, "Failed to register device with error %d.\n", pci_rc); return pci_rc; } /* * i7core_exit() Module exit function * Unregister the driver */ static void __exit i7core_exit(void) { debugf2("MC: " __FILE__ ": %s()\n", __func__); pci_unregister_driver(&i7core_driver); } module_init(i7core_init); module_exit(i7core_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>"); MODULE_AUTHOR("Red Hat Inc. (http://www.redhat.com)"); MODULE_DESCRIPTION("MC Driver for Intel i7 Core memory controllers - " I7CORE_REVISION); module_param(edac_op_state, int, 0444); MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
gpl-2.0
artefvck/android_kernel_asus_T00F
arch/powerpc/mm/stab.c
2737
7612
/* * PowerPC64 Segment Translation Support. * * Dave Engebretsen and Mike Corrigan {engebret|mikejc}@us.ibm.com * Copyright (c) 2001 Dave Engebretsen * * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/memblock.h> #include <asm/pgtable.h> #include <asm/mmu.h> #include <asm/mmu_context.h> #include <asm/paca.h> #include <asm/cputable.h> #include <asm/prom.h> struct stab_entry { unsigned long esid_data; unsigned long vsid_data; }; #define NR_STAB_CACHE_ENTRIES 8 static DEFINE_PER_CPU(long, stab_cache_ptr); static DEFINE_PER_CPU(long [NR_STAB_CACHE_ENTRIES], stab_cache); /* * Create a segment table entry for the given esid/vsid pair. */ static int make_ste(unsigned long stab, unsigned long esid, unsigned long vsid) { unsigned long esid_data, vsid_data; unsigned long entry, group, old_esid, castout_entry, i; unsigned int global_entry; struct stab_entry *ste, *castout_ste; unsigned long kernel_segment = (esid << SID_SHIFT) >= PAGE_OFFSET; vsid_data = vsid << STE_VSID_SHIFT; esid_data = esid << SID_SHIFT | STE_ESID_KP | STE_ESID_V; if (! kernel_segment) esid_data |= STE_ESID_KS; /* Search the primary group first. */ global_entry = (esid & 0x1f) << 3; ste = (struct stab_entry *)(stab | ((esid & 0x1f) << 7)); /* Find an empty entry, if one exists. */ for (group = 0; group < 2; group++) { for (entry = 0; entry < 8; entry++, ste++) { if (!(ste->esid_data & STE_ESID_V)) { ste->vsid_data = vsid_data; eieio(); ste->esid_data = esid_data; return (global_entry | entry); } } /* Now search the secondary group. */ global_entry = ((~esid) & 0x1f) << 3; ste = (struct stab_entry *)(stab | (((~esid) & 0x1f) << 7)); } /* * Could not find empty entry, pick one with a round robin selection. * Search all entries in the two groups. */ castout_entry = get_paca()->stab_rr; for (i = 0; i < 16; i++) { if (castout_entry < 8) { global_entry = (esid & 0x1f) << 3; ste = (struct stab_entry *)(stab | ((esid & 0x1f) << 7)); castout_ste = ste + castout_entry; } else { global_entry = ((~esid) & 0x1f) << 3; ste = (struct stab_entry *)(stab | (((~esid) & 0x1f) << 7)); castout_ste = ste + (castout_entry - 8); } /* Dont cast out the first kernel segment */ if ((castout_ste->esid_data & ESID_MASK) != PAGE_OFFSET) break; castout_entry = (castout_entry + 1) & 0xf; } get_paca()->stab_rr = (castout_entry + 1) & 0xf; /* Modify the old entry to the new value. */ /* Force previous translations to complete. DRENG */ asm volatile("isync" : : : "memory"); old_esid = castout_ste->esid_data >> SID_SHIFT; castout_ste->esid_data = 0; /* Invalidate old entry */ asm volatile("sync" : : : "memory"); /* Order update */ castout_ste->vsid_data = vsid_data; eieio(); /* Order update */ castout_ste->esid_data = esid_data; asm volatile("slbie %0" : : "r" (old_esid << SID_SHIFT)); /* Ensure completion of slbie */ asm volatile("sync" : : : "memory"); return (global_entry | (castout_entry & 0x7)); } /* * Allocate a segment table entry for the given ea and mm */ static int __ste_allocate(unsigned long ea, struct mm_struct *mm) { unsigned long vsid; unsigned char stab_entry; unsigned long offset; /* Kernel or user address? */ if (is_kernel_addr(ea)) { vsid = get_kernel_vsid(ea, MMU_SEGSIZE_256M); } else { if ((ea >= TASK_SIZE_USER64) || (! mm)) return 1; vsid = get_vsid(mm->context.id, ea, MMU_SEGSIZE_256M); } stab_entry = make_ste(get_paca()->stab_addr, GET_ESID(ea), vsid); if (!is_kernel_addr(ea)) { offset = __get_cpu_var(stab_cache_ptr); if (offset < NR_STAB_CACHE_ENTRIES) __get_cpu_var(stab_cache[offset++]) = stab_entry; else offset = NR_STAB_CACHE_ENTRIES+1; __get_cpu_var(stab_cache_ptr) = offset; /* Order update */ asm volatile("sync":::"memory"); } return 0; } int ste_allocate(unsigned long ea) { return __ste_allocate(ea, current->mm); } /* * Do the segment table work for a context switch: flush all user * entries from the table, then preload some probably useful entries * for the new task */ void switch_stab(struct task_struct *tsk, struct mm_struct *mm) { struct stab_entry *stab = (struct stab_entry *) get_paca()->stab_addr; struct stab_entry *ste; unsigned long offset; unsigned long pc = KSTK_EIP(tsk); unsigned long stack = KSTK_ESP(tsk); unsigned long unmapped_base; /* Force previous translations to complete. DRENG */ asm volatile("isync" : : : "memory"); /* * We need interrupts hard-disabled here, not just soft-disabled, * so that a PMU interrupt can't occur, which might try to access * user memory (to get a stack trace) and possible cause an STAB miss * which would update the stab_cache/stab_cache_ptr per-cpu variables. */ hard_irq_disable(); offset = __get_cpu_var(stab_cache_ptr); if (offset <= NR_STAB_CACHE_ENTRIES) { int i; for (i = 0; i < offset; i++) { ste = stab + __get_cpu_var(stab_cache[i]); ste->esid_data = 0; /* invalidate entry */ } } else { unsigned long entry; /* Invalidate all entries. */ ste = stab; /* Never flush the first entry. */ ste += 1; for (entry = 1; entry < (HW_PAGE_SIZE / sizeof(struct stab_entry)); entry++, ste++) { unsigned long ea; ea = ste->esid_data & ESID_MASK; if (!is_kernel_addr(ea)) { ste->esid_data = 0; } } } asm volatile("sync; slbia; sync":::"memory"); __get_cpu_var(stab_cache_ptr) = 0; /* Now preload some entries for the new task */ if (test_tsk_thread_flag(tsk, TIF_32BIT)) unmapped_base = TASK_UNMAPPED_BASE_USER32; else unmapped_base = TASK_UNMAPPED_BASE_USER64; __ste_allocate(pc, mm); if (GET_ESID(pc) == GET_ESID(stack)) return; __ste_allocate(stack, mm); if ((GET_ESID(pc) == GET_ESID(unmapped_base)) || (GET_ESID(stack) == GET_ESID(unmapped_base))) return; __ste_allocate(unmapped_base, mm); /* Order update */ asm volatile("sync" : : : "memory"); } /* * Allocate segment tables for secondary CPUs. These must all go in * the first (bolted) segment, so that do_stab_bolted won't get a * recursive segment miss on the segment table itself. */ void __init stabs_alloc(void) { int cpu; if (mmu_has_feature(MMU_FTR_SLB)) return; for_each_possible_cpu(cpu) { unsigned long newstab; if (cpu == 0) continue; /* stab for CPU 0 is statically allocated */ newstab = memblock_alloc_base(HW_PAGE_SIZE, HW_PAGE_SIZE, 1<<SID_SHIFT); newstab = (unsigned long)__va(newstab); memset((void *)newstab, 0, HW_PAGE_SIZE); paca[cpu].stab_addr = newstab; paca[cpu].stab_real = __pa(newstab); printk(KERN_INFO "Segment table for CPU %d at 0x%llx " "virtual, 0x%llx absolute\n", cpu, paca[cpu].stab_addr, paca[cpu].stab_real); } } /* * Build an entry for the base kernel segment and put it into * the segment table or SLB. All other segment table or SLB * entries are faulted in. */ void stab_initialize(unsigned long stab) { unsigned long vsid = get_kernel_vsid(PAGE_OFFSET, MMU_SEGSIZE_256M); unsigned long stabreal; asm volatile("isync; slbia; isync":::"memory"); make_ste(stab, GET_ESID(PAGE_OFFSET), vsid); /* Order update */ asm volatile("sync":::"memory"); /* Set ASR */ stabreal = get_paca()->stab_real | 0x1ul; mtspr(SPRN_ASR, stabreal); }
gpl-2.0
DirtyUnicorns/android_kernel_lge_gee
lib/crc32.c
4529
42819
/* * Aug 8, 2011 Bob Pearson with help from Joakim Tjernlund and George Spelvin * cleaned up code to current version of sparse and added the slicing-by-8 * algorithm to the closely similar existing slicing-by-4 algorithm. * * Oct 15, 2000 Matt Domsch <Matt_Domsch@dell.com> * Nicer crc32 functions/docs submitted by linux@horizon.com. Thanks! * Code was from the public domain, copyright abandoned. Code was * subsequently included in the kernel, thus was re-licensed under the * GNU GPL v2. * * Oct 12, 2000 Matt Domsch <Matt_Domsch@dell.com> * Same crc32 function was used in 5 other places in the kernel. * I made one version, and deleted the others. * There are various incantations of crc32(). Some use a seed of 0 or ~0. * Some xor at the end with ~0. The generic crc32() function takes * seed as an argument, and doesn't xor at the end. Then individual * users can do whatever they need. * drivers/net/smc9194.c uses seed ~0, doesn't xor with ~0. * fs/jffs2 uses seed 0, doesn't xor with ~0. * fs/partitions/efi.c uses seed ~0, xor's with ~0. * * This source code is licensed under the GNU General Public License, * Version 2. See the file COPYING for more details. */ /* see: Documentation/crc32.txt for a description of algorithms */ #include <linux/crc32.h> #include <linux/module.h> #include <linux/types.h> #include "crc32defs.h" #if CRC_LE_BITS > 8 # define tole(x) ((__force u32) __constant_cpu_to_le32(x)) #else # define tole(x) (x) #endif #if CRC_BE_BITS > 8 # define tobe(x) ((__force u32) __constant_cpu_to_be32(x)) #else # define tobe(x) (x) #endif #include "crc32table.h" MODULE_AUTHOR("Matt Domsch <Matt_Domsch@dell.com>"); MODULE_DESCRIPTION("Various CRC32 calculations"); MODULE_LICENSE("GPL"); #if CRC_LE_BITS > 8 || CRC_BE_BITS > 8 /* implements slicing-by-4 or slicing-by-8 algorithm */ static inline u32 crc32_body(u32 crc, unsigned char const *buf, size_t len, const u32 (*tab)[256]) { # ifdef __LITTLE_ENDIAN # define DO_CRC(x) crc = t0[(crc ^ (x)) & 255] ^ (crc >> 8) # define DO_CRC4 (t3[(q) & 255] ^ t2[(q >> 8) & 255] ^ \ t1[(q >> 16) & 255] ^ t0[(q >> 24) & 255]) # define DO_CRC8 (t7[(q) & 255] ^ t6[(q >> 8) & 255] ^ \ t5[(q >> 16) & 255] ^ t4[(q >> 24) & 255]) # else # define DO_CRC(x) crc = t0[((crc >> 24) ^ (x)) & 255] ^ (crc << 8) # define DO_CRC4 (t0[(q) & 255] ^ t1[(q >> 8) & 255] ^ \ t2[(q >> 16) & 255] ^ t3[(q >> 24) & 255]) # define DO_CRC8 (t4[(q) & 255] ^ t5[(q >> 8) & 255] ^ \ t6[(q >> 16) & 255] ^ t7[(q >> 24) & 255]) # endif const u32 *b; size_t rem_len; # ifdef CONFIG_X86 size_t i; # endif const u32 *t0=tab[0], *t1=tab[1], *t2=tab[2], *t3=tab[3]; const u32 *t4 = tab[4], *t5 = tab[5], *t6 = tab[6], *t7 = tab[7]; u32 q; /* Align it */ if (unlikely((long)buf & 3 && len)) { do { DO_CRC(*buf++); } while ((--len) && ((long)buf)&3); } # if CRC_LE_BITS == 32 rem_len = len & 3; len = len >> 2; # else rem_len = len & 7; len = len >> 3; # endif b = (const u32 *)buf; # ifdef CONFIG_X86 --b; for (i = 0; i < len; i++) { # else for (--b; len; --len) { # endif q = crc ^ *++b; /* use pre increment for speed */ # if CRC_LE_BITS == 32 crc = DO_CRC4; # else crc = DO_CRC8; q = *++b; crc ^= DO_CRC4; # endif } len = rem_len; /* And the last few bytes */ if (len) { u8 *p = (u8 *)(b + 1) - 1; # ifdef CONFIG_X86 for (i = 0; i < len; i++) DO_CRC(*++p); /* use pre increment for speed */ # else do { DO_CRC(*++p); /* use pre increment for speed */ } while (--len); # endif } return crc; #undef DO_CRC #undef DO_CRC4 #undef DO_CRC8 } #endif /** * crc32_le() - Calculate bitwise little-endian Ethernet AUTODIN II CRC32 * @crc: seed value for computation. ~0 for Ethernet, sometimes 0 for * other uses, or the previous crc32 value if computing incrementally. * @p: pointer to buffer over which CRC is run * @len: length of buffer @p */ static inline u32 __pure crc32_le_generic(u32 crc, unsigned char const *p, size_t len, const u32 (*tab)[256], u32 polynomial) { #if CRC_LE_BITS == 1 int i; while (len--) { crc ^= *p++; for (i = 0; i < 8; i++) crc = (crc >> 1) ^ ((crc & 1) ? polynomial : 0); } # elif CRC_LE_BITS == 2 while (len--) { crc ^= *p++; crc = (crc >> 2) ^ tab[0][crc & 3]; crc = (crc >> 2) ^ tab[0][crc & 3]; crc = (crc >> 2) ^ tab[0][crc & 3]; crc = (crc >> 2) ^ tab[0][crc & 3]; } # elif CRC_LE_BITS == 4 while (len--) { crc ^= *p++; crc = (crc >> 4) ^ tab[0][crc & 15]; crc = (crc >> 4) ^ tab[0][crc & 15]; } # elif CRC_LE_BITS == 8 /* aka Sarwate algorithm */ while (len--) { crc ^= *p++; crc = (crc >> 8) ^ tab[0][crc & 255]; } # else crc = (__force u32) __cpu_to_le32(crc); crc = crc32_body(crc, p, len, tab); crc = __le32_to_cpu((__force __le32)crc); #endif return crc; } #if CRC_LE_BITS == 1 u32 __pure crc32_le(u32 crc, unsigned char const *p, size_t len) { return crc32_le_generic(crc, p, len, NULL, CRCPOLY_LE); } u32 __pure __crc32c_le(u32 crc, unsigned char const *p, size_t len) { return crc32_le_generic(crc, p, len, NULL, CRC32C_POLY_LE); } #else u32 __pure crc32_le(u32 crc, unsigned char const *p, size_t len) { return crc32_le_generic(crc, p, len, crc32table_le, CRCPOLY_LE); } u32 __pure __crc32c_le(u32 crc, unsigned char const *p, size_t len) { return crc32_le_generic(crc, p, len, crc32ctable_le, CRC32C_POLY_LE); } #endif EXPORT_SYMBOL(crc32_le); EXPORT_SYMBOL(__crc32c_le); /** * crc32_be() - Calculate bitwise big-endian Ethernet AUTODIN II CRC32 * @crc: seed value for computation. ~0 for Ethernet, sometimes 0 for * other uses, or the previous crc32 value if computing incrementally. * @p: pointer to buffer over which CRC is run * @len: length of buffer @p */ static inline u32 __pure crc32_be_generic(u32 crc, unsigned char const *p, size_t len, const u32 (*tab)[256], u32 polynomial) { #if CRC_BE_BITS == 1 int i; while (len--) { crc ^= *p++ << 24; for (i = 0; i < 8; i++) crc = (crc << 1) ^ ((crc & 0x80000000) ? polynomial : 0); } # elif CRC_BE_BITS == 2 while (len--) { crc ^= *p++ << 24; crc = (crc << 2) ^ tab[0][crc >> 30]; crc = (crc << 2) ^ tab[0][crc >> 30]; crc = (crc << 2) ^ tab[0][crc >> 30]; crc = (crc << 2) ^ tab[0][crc >> 30]; } # elif CRC_BE_BITS == 4 while (len--) { crc ^= *p++ << 24; crc = (crc << 4) ^ tab[0][crc >> 28]; crc = (crc << 4) ^ tab[0][crc >> 28]; } # elif CRC_BE_BITS == 8 while (len--) { crc ^= *p++ << 24; crc = (crc << 8) ^ tab[0][crc >> 24]; } # else crc = (__force u32) __cpu_to_be32(crc); crc = crc32_body(crc, p, len, tab); crc = __be32_to_cpu((__force __be32)crc); # endif return crc; } #if CRC_LE_BITS == 1 u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len) { return crc32_be_generic(crc, p, len, NULL, CRCPOLY_BE); } #else u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len) { return crc32_be_generic(crc, p, len, crc32table_be, CRCPOLY_BE); } #endif EXPORT_SYMBOL(crc32_be); #ifdef CONFIG_CRC32_SELFTEST /* 4096 random bytes */ static u8 __attribute__((__aligned__(8))) test_buf[] = { 0x5b, 0x85, 0x21, 0xcb, 0x09, 0x68, 0x7d, 0x30, 0xc7, 0x69, 0xd7, 0x30, 0x92, 0xde, 0x59, 0xe4, 0xc9, 0x6e, 0x8b, 0xdb, 0x98, 0x6b, 0xaa, 0x60, 0xa8, 0xb5, 0xbc, 0x6c, 0xa9, 0xb1, 0x5b, 0x2c, 0xea, 0xb4, 0x92, 0x6a, 0x3f, 0x79, 0x91, 0xe4, 0xe9, 0x70, 0x51, 0x8c, 0x7f, 0x95, 0x6f, 0x1a, 0x56, 0xa1, 0x5c, 0x27, 0x03, 0x67, 0x9f, 0x3a, 0xe2, 0x31, 0x11, 0x29, 0x6b, 0x98, 0xfc, 0xc4, 0x53, 0x24, 0xc5, 0x8b, 0xce, 0x47, 0xb2, 0xb9, 0x32, 0xcb, 0xc1, 0xd0, 0x03, 0x57, 0x4e, 0xd4, 0xe9, 0x3c, 0xa1, 0x63, 0xcf, 0x12, 0x0e, 0xca, 0xe1, 0x13, 0xd1, 0x93, 0xa6, 0x88, 0x5c, 0x61, 0x5b, 0xbb, 0xf0, 0x19, 0x46, 0xb4, 0xcf, 0x9e, 0xb6, 0x6b, 0x4c, 0x3a, 0xcf, 0x60, 0xf9, 0x7a, 0x8d, 0x07, 0x63, 0xdb, 0x40, 0xe9, 0x0b, 0x6f, 0xad, 0x97, 0xf1, 0xed, 0xd0, 0x1e, 0x26, 0xfd, 0xbf, 0xb7, 0xc8, 0x04, 0x94, 0xf8, 0x8b, 0x8c, 0xf1, 0xab, 0x7a, 0xd4, 0xdd, 0xf3, 0xe8, 0x88, 0xc3, 0xed, 0x17, 0x8a, 0x9b, 0x40, 0x0d, 0x53, 0x62, 0x12, 0x03, 0x5f, 0x1b, 0x35, 0x32, 0x1f, 0xb4, 0x7b, 0x93, 0x78, 0x0d, 0xdb, 0xce, 0xa4, 0xc0, 0x47, 0xd5, 0xbf, 0x68, 0xe8, 0x5d, 0x74, 0x8f, 0x8e, 0x75, 0x1c, 0xb2, 0x4f, 0x9a, 0x60, 0xd1, 0xbe, 0x10, 0xf4, 0x5c, 0xa1, 0x53, 0x09, 0xa5, 0xe0, 0x09, 0x54, 0x85, 0x5c, 0xdc, 0x07, 0xe7, 0x21, 0x69, 0x7b, 0x8a, 0xfd, 0x90, 0xf1, 0x22, 0xd0, 0xb4, 0x36, 0x28, 0xe6, 0xb8, 0x0f, 0x39, 0xde, 0xc8, 0xf3, 0x86, 0x60, 0x34, 0xd2, 0x5e, 0xdf, 0xfd, 0xcf, 0x0f, 0xa9, 0x65, 0xf0, 0xd5, 0x4d, 0x96, 0x40, 0xe3, 0xdf, 0x3f, 0x95, 0x5a, 0x39, 0x19, 0x93, 0xf4, 0x75, 0xce, 0x22, 0x00, 0x1c, 0x93, 0xe2, 0x03, 0x66, 0xf4, 0x93, 0x73, 0x86, 0x81, 0x8e, 0x29, 0x44, 0x48, 0x86, 0x61, 0x7c, 0x48, 0xa3, 0x43, 0xd2, 0x9c, 0x8d, 0xd4, 0x95, 0xdd, 0xe1, 0x22, 0x89, 0x3a, 0x40, 0x4c, 0x1b, 0x8a, 0x04, 0xa8, 0x09, 0x69, 0x8b, 0xea, 0xc6, 0x55, 0x8e, 0x57, 0xe6, 0x64, 0x35, 0xf0, 0xc7, 0x16, 0x9f, 0x5d, 0x5e, 0x86, 0x40, 0x46, 0xbb, 0xe5, 0x45, 0x88, 0xfe, 0xc9, 0x63, 0x15, 0xfb, 0xf5, 0xbd, 0x71, 0x61, 0xeb, 0x7b, 0x78, 0x70, 0x07, 0x31, 0x03, 0x9f, 0xb2, 0xc8, 0xa7, 0xab, 0x47, 0xfd, 0xdf, 0xa0, 0x78, 0x72, 0xa4, 0x2a, 0xe4, 0xb6, 0xba, 0xc0, 0x1e, 0x86, 0x71, 0xe6, 0x3d, 0x18, 0x37, 0x70, 0xe6, 0xff, 0xe0, 0xbc, 0x0b, 0x22, 0xa0, 0x1f, 0xd3, 0xed, 0xa2, 0x55, 0x39, 0xab, 0xa8, 0x13, 0x73, 0x7c, 0x3f, 0xb2, 0xd6, 0x19, 0xac, 0xff, 0x99, 0xed, 0xe8, 0xe6, 0xa6, 0x22, 0xe3, 0x9c, 0xf1, 0x30, 0xdc, 0x01, 0x0a, 0x56, 0xfa, 0xe4, 0xc9, 0x99, 0xdd, 0xa8, 0xd8, 0xda, 0x35, 0x51, 0x73, 0xb4, 0x40, 0x86, 0x85, 0xdb, 0x5c, 0xd5, 0x85, 0x80, 0x14, 0x9c, 0xfd, 0x98, 0xa9, 0x82, 0xc5, 0x37, 0xff, 0x32, 0x5d, 0xd0, 0x0b, 0xfa, 0xdc, 0x04, 0x5e, 0x09, 0xd2, 0xca, 0x17, 0x4b, 0x1a, 0x8e, 0x15, 0xe1, 0xcc, 0x4e, 0x52, 0x88, 0x35, 0xbd, 0x48, 0xfe, 0x15, 0xa0, 0x91, 0xfd, 0x7e, 0x6c, 0x0e, 0x5d, 0x79, 0x1b, 0x81, 0x79, 0xd2, 0x09, 0x34, 0x70, 0x3d, 0x81, 0xec, 0xf6, 0x24, 0xbb, 0xfb, 0xf1, 0x7b, 0xdf, 0x54, 0xea, 0x80, 0x9b, 0xc7, 0x99, 0x9e, 0xbd, 0x16, 0x78, 0x12, 0x53, 0x5e, 0x01, 0xa7, 0x4e, 0xbd, 0x67, 0xe1, 0x9b, 0x4c, 0x0e, 0x61, 0x45, 0x97, 0xd2, 0xf0, 0x0f, 0xfe, 0x15, 0x08, 0xb7, 0x11, 0x4c, 0xe7, 0xff, 0x81, 0x53, 0xff, 0x91, 0x25, 0x38, 0x7e, 0x40, 0x94, 0xe5, 0xe0, 0xad, 0xe6, 0xd9, 0x79, 0xb6, 0x92, 0xc9, 0xfc, 0xde, 0xc3, 0x1a, 0x23, 0xbb, 0xdd, 0xc8, 0x51, 0x0c, 0x3a, 0x72, 0xfa, 0x73, 0x6f, 0xb7, 0xee, 0x61, 0x39, 0x03, 0x01, 0x3f, 0x7f, 0x94, 0x2e, 0x2e, 0xba, 0x3a, 0xbb, 0xb4, 0xfa, 0x6a, 0x17, 0xfe, 0xea, 0xef, 0x5e, 0x66, 0x97, 0x3f, 0x32, 0x3d, 0xd7, 0x3e, 0xb1, 0xf1, 0x6c, 0x14, 0x4c, 0xfd, 0x37, 0xd3, 0x38, 0x80, 0xfb, 0xde, 0xa6, 0x24, 0x1e, 0xc8, 0xca, 0x7f, 0x3a, 0x93, 0xd8, 0x8b, 0x18, 0x13, 0xb2, 0xe5, 0xe4, 0x93, 0x05, 0x53, 0x4f, 0x84, 0x66, 0xa7, 0x58, 0x5c, 0x7b, 0x86, 0x52, 0x6d, 0x0d, 0xce, 0xa4, 0x30, 0x7d, 0xb6, 0x18, 0x9f, 0xeb, 0xff, 0x22, 0xbb, 0x72, 0x29, 0xb9, 0x44, 0x0b, 0x48, 0x1e, 0x84, 0x71, 0x81, 0xe3, 0x6d, 0x73, 0x26, 0x92, 0xb4, 0x4d, 0x2a, 0x29, 0xb8, 0x1f, 0x72, 0xed, 0xd0, 0xe1, 0x64, 0x77, 0xea, 0x8e, 0x88, 0x0f, 0xef, 0x3f, 0xb1, 0x3b, 0xad, 0xf9, 0xc9, 0x8b, 0xd0, 0xac, 0xc6, 0xcc, 0xa9, 0x40, 0xcc, 0x76, 0xf6, 0x3b, 0x53, 0xb5, 0x88, 0xcb, 0xc8, 0x37, 0xf1, 0xa2, 0xba, 0x23, 0x15, 0x99, 0x09, 0xcc, 0xe7, 0x7a, 0x3b, 0x37, 0xf7, 0x58, 0xc8, 0x46, 0x8c, 0x2b, 0x2f, 0x4e, 0x0e, 0xa6, 0x5c, 0xea, 0x85, 0x55, 0xba, 0x02, 0x0e, 0x0e, 0x48, 0xbc, 0xe1, 0xb1, 0x01, 0x35, 0x79, 0x13, 0x3d, 0x1b, 0xc0, 0x53, 0x68, 0x11, 0xe7, 0x95, 0x0f, 0x9d, 0x3f, 0x4c, 0x47, 0x7b, 0x4d, 0x1c, 0xae, 0x50, 0x9b, 0xcb, 0xdd, 0x05, 0x8d, 0x9a, 0x97, 0xfd, 0x8c, 0xef, 0x0c, 0x1d, 0x67, 0x73, 0xa8, 0x28, 0x36, 0xd5, 0xb6, 0x92, 0x33, 0x40, 0x75, 0x0b, 0x51, 0xc3, 0x64, 0xba, 0x1d, 0xc2, 0xcc, 0xee, 0x7d, 0x54, 0x0f, 0x27, 0x69, 0xa7, 0x27, 0x63, 0x30, 0x29, 0xd9, 0xc8, 0x84, 0xd8, 0xdf, 0x9f, 0x68, 0x8d, 0x04, 0xca, 0xa6, 0xc5, 0xc7, 0x7a, 0x5c, 0xc8, 0xd1, 0xcb, 0x4a, 0xec, 0xd0, 0xd8, 0x20, 0x69, 0xc5, 0x17, 0xcd, 0x78, 0xc8, 0x75, 0x23, 0x30, 0x69, 0xc9, 0xd4, 0xea, 0x5c, 0x4f, 0x6b, 0x86, 0x3f, 0x8b, 0xfe, 0xee, 0x44, 0xc9, 0x7c, 0xb7, 0xdd, 0x3e, 0xe5, 0xec, 0x54, 0x03, 0x3e, 0xaa, 0x82, 0xc6, 0xdf, 0xb2, 0x38, 0x0e, 0x5d, 0xb3, 0x88, 0xd9, 0xd3, 0x69, 0x5f, 0x8f, 0x70, 0x8a, 0x7e, 0x11, 0xd9, 0x1e, 0x7b, 0x38, 0xf1, 0x42, 0x1a, 0xc0, 0x35, 0xf5, 0xc7, 0x36, 0x85, 0xf5, 0xf7, 0xb8, 0x7e, 0xc7, 0xef, 0x18, 0xf1, 0x63, 0xd6, 0x7a, 0xc6, 0xc9, 0x0e, 0x4d, 0x69, 0x4f, 0x84, 0xef, 0x26, 0x41, 0x0c, 0xec, 0xc7, 0xe0, 0x7e, 0x3c, 0x67, 0x01, 0x4c, 0x62, 0x1a, 0x20, 0x6f, 0xee, 0x47, 0x4d, 0xc0, 0x99, 0x13, 0x8d, 0x91, 0x4a, 0x26, 0xd4, 0x37, 0x28, 0x90, 0x58, 0x75, 0x66, 0x2b, 0x0a, 0xdf, 0xda, 0xee, 0x92, 0x25, 0x90, 0x62, 0x39, 0x9e, 0x44, 0x98, 0xad, 0xc1, 0x88, 0xed, 0xe4, 0xb4, 0xaf, 0xf5, 0x8c, 0x9b, 0x48, 0x4d, 0x56, 0x60, 0x97, 0x0f, 0x61, 0x59, 0x9e, 0xa6, 0x27, 0xfe, 0xc1, 0x91, 0x15, 0x38, 0xb8, 0x0f, 0xae, 0x61, 0x7d, 0x26, 0x13, 0x5a, 0x73, 0xff, 0x1c, 0xa3, 0x61, 0x04, 0x58, 0x48, 0x55, 0x44, 0x11, 0xfe, 0x15, 0xca, 0xc3, 0xbd, 0xca, 0xc5, 0xb4, 0x40, 0x5d, 0x1b, 0x7f, 0x39, 0xb5, 0x9c, 0x35, 0xec, 0x61, 0x15, 0x32, 0x32, 0xb8, 0x4e, 0x40, 0x9f, 0x17, 0x1f, 0x0a, 0x4d, 0xa9, 0x91, 0xef, 0xb7, 0xb0, 0xeb, 0xc2, 0x83, 0x9a, 0x6c, 0xd2, 0x79, 0x43, 0x78, 0x5e, 0x2f, 0xe5, 0xdd, 0x1a, 0x3c, 0x45, 0xab, 0x29, 0x40, 0x3a, 0x37, 0x5b, 0x6f, 0xd7, 0xfc, 0x48, 0x64, 0x3c, 0x49, 0xfb, 0x21, 0xbe, 0xc3, 0xff, 0x07, 0xfb, 0x17, 0xe9, 0xc9, 0x0c, 0x4c, 0x5c, 0x15, 0x9e, 0x8e, 0x22, 0x30, 0x0a, 0xde, 0x48, 0x7f, 0xdb, 0x0d, 0xd1, 0x2b, 0x87, 0x38, 0x9e, 0xcc, 0x5a, 0x01, 0x16, 0xee, 0x75, 0x49, 0x0d, 0x30, 0x01, 0x34, 0x6a, 0xb6, 0x9a, 0x5a, 0x2a, 0xec, 0xbb, 0x48, 0xac, 0xd3, 0x77, 0x83, 0xd8, 0x08, 0x86, 0x4f, 0x48, 0x09, 0x29, 0x41, 0x79, 0xa1, 0x03, 0x12, 0xc4, 0xcd, 0x90, 0x55, 0x47, 0x66, 0x74, 0x9a, 0xcc, 0x4f, 0x35, 0x8c, 0xd6, 0x98, 0xef, 0xeb, 0x45, 0xb9, 0x9a, 0x26, 0x2f, 0x39, 0xa5, 0x70, 0x6d, 0xfc, 0xb4, 0x51, 0xee, 0xf4, 0x9c, 0xe7, 0x38, 0x59, 0xad, 0xf4, 0xbc, 0x46, 0xff, 0x46, 0x8e, 0x60, 0x9c, 0xa3, 0x60, 0x1d, 0xf8, 0x26, 0x72, 0xf5, 0x72, 0x9d, 0x68, 0x80, 0x04, 0xf6, 0x0b, 0xa1, 0x0a, 0xd5, 0xa7, 0x82, 0x3a, 0x3e, 0x47, 0xa8, 0x5a, 0xde, 0x59, 0x4f, 0x7b, 0x07, 0xb3, 0xe9, 0x24, 0x19, 0x3d, 0x34, 0x05, 0xec, 0xf1, 0xab, 0x6e, 0x64, 0x8f, 0xd3, 0xe6, 0x41, 0x86, 0x80, 0x70, 0xe3, 0x8d, 0x60, 0x9c, 0x34, 0x25, 0x01, 0x07, 0x4d, 0x19, 0x41, 0x4e, 0x3d, 0x5c, 0x7e, 0xa8, 0xf5, 0xcc, 0xd5, 0x7b, 0xe2, 0x7d, 0x3d, 0x49, 0x86, 0x7d, 0x07, 0xb7, 0x10, 0xe3, 0x35, 0xb8, 0x84, 0x6d, 0x76, 0xab, 0x17, 0xc6, 0x38, 0xb4, 0xd3, 0x28, 0x57, 0xad, 0xd3, 0x88, 0x5a, 0xda, 0xea, 0xc8, 0x94, 0xcc, 0x37, 0x19, 0xac, 0x9c, 0x9f, 0x4b, 0x00, 0x15, 0xc0, 0xc8, 0xca, 0x1f, 0x15, 0xaa, 0xe0, 0xdb, 0xf9, 0x2f, 0x57, 0x1b, 0x24, 0xc7, 0x6f, 0x76, 0x29, 0xfb, 0xed, 0x25, 0x0d, 0xc0, 0xfe, 0xbd, 0x5a, 0xbf, 0x20, 0x08, 0x51, 0x05, 0xec, 0x71, 0xa3, 0xbf, 0xef, 0x5e, 0x99, 0x75, 0xdb, 0x3c, 0x5f, 0x9a, 0x8c, 0xbb, 0x19, 0x5c, 0x0e, 0x93, 0x19, 0xf8, 0x6a, 0xbc, 0xf2, 0x12, 0x54, 0x2f, 0xcb, 0x28, 0x64, 0x88, 0xb3, 0x92, 0x0d, 0x96, 0xd1, 0xa6, 0xe4, 0x1f, 0xf1, 0x4d, 0xa4, 0xab, 0x1c, 0xee, 0x54, 0xf2, 0xad, 0x29, 0x6d, 0x32, 0x37, 0xb2, 0x16, 0x77, 0x5c, 0xdc, 0x2e, 0x54, 0xec, 0x75, 0x26, 0xc6, 0x36, 0xd9, 0x17, 0x2c, 0xf1, 0x7a, 0xdc, 0x4b, 0xf1, 0xe2, 0xd9, 0x95, 0xba, 0xac, 0x87, 0xc1, 0xf3, 0x8e, 0x58, 0x08, 0xd8, 0x87, 0x60, 0xc9, 0xee, 0x6a, 0xde, 0xa4, 0xd2, 0xfc, 0x0d, 0xe5, 0x36, 0xc4, 0x5c, 0x52, 0xb3, 0x07, 0x54, 0x65, 0x24, 0xc1, 0xb1, 0xd1, 0xb1, 0x53, 0x13, 0x31, 0x79, 0x7f, 0x05, 0x76, 0xeb, 0x37, 0x59, 0x15, 0x2b, 0xd1, 0x3f, 0xac, 0x08, 0x97, 0xeb, 0x91, 0x98, 0xdf, 0x6c, 0x09, 0x0d, 0x04, 0x9f, 0xdc, 0x3b, 0x0e, 0x60, 0x68, 0x47, 0x23, 0x15, 0x16, 0xc6, 0x0b, 0x35, 0xf8, 0x77, 0xa2, 0x78, 0x50, 0xd4, 0x64, 0x22, 0x33, 0xff, 0xfb, 0x93, 0x71, 0x46, 0x50, 0x39, 0x1b, 0x9c, 0xea, 0x4e, 0x8d, 0x0c, 0x37, 0xe5, 0x5c, 0x51, 0x3a, 0x31, 0xb2, 0x85, 0x84, 0x3f, 0x41, 0xee, 0xa2, 0xc1, 0xc6, 0x13, 0x3b, 0x54, 0x28, 0xd2, 0x18, 0x37, 0xcc, 0x46, 0x9f, 0x6a, 0x91, 0x3d, 0x5a, 0x15, 0x3c, 0x89, 0xa3, 0x61, 0x06, 0x7d, 0x2e, 0x78, 0xbe, 0x7d, 0x40, 0xba, 0x2f, 0x95, 0xb1, 0x2f, 0x87, 0x3b, 0x8a, 0xbe, 0x6a, 0xf4, 0xc2, 0x31, 0x74, 0xee, 0x91, 0xe0, 0x23, 0xaa, 0x5d, 0x7f, 0xdd, 0xf0, 0x44, 0x8c, 0x0b, 0x59, 0x2b, 0xfc, 0x48, 0x3a, 0xdf, 0x07, 0x05, 0x38, 0x6c, 0xc9, 0xeb, 0x18, 0x24, 0x68, 0x8d, 0x58, 0x98, 0xd3, 0x31, 0xa3, 0xe4, 0x70, 0x59, 0xb1, 0x21, 0xbe, 0x7e, 0x65, 0x7d, 0xb8, 0x04, 0xab, 0xf6, 0xe4, 0xd7, 0xda, 0xec, 0x09, 0x8f, 0xda, 0x6d, 0x24, 0x07, 0xcc, 0x29, 0x17, 0x05, 0x78, 0x1a, 0xc1, 0xb1, 0xce, 0xfc, 0xaa, 0x2d, 0xe7, 0xcc, 0x85, 0x84, 0x84, 0x03, 0x2a, 0x0c, 0x3f, 0xa9, 0xf8, 0xfd, 0x84, 0x53, 0x59, 0x5c, 0xf0, 0xd4, 0x09, 0xf0, 0xd2, 0x6c, 0x32, 0x03, 0xb0, 0xa0, 0x8c, 0x52, 0xeb, 0x23, 0x91, 0x88, 0x43, 0x13, 0x46, 0xf6, 0x1e, 0xb4, 0x1b, 0xf5, 0x8e, 0x3a, 0xb5, 0x3d, 0x00, 0xf6, 0xe5, 0x08, 0x3d, 0x5f, 0x39, 0xd3, 0x21, 0x69, 0xbc, 0x03, 0x22, 0x3a, 0xd2, 0x5c, 0x84, 0xf8, 0x15, 0xc4, 0x80, 0x0b, 0xbc, 0x29, 0x3c, 0xf3, 0x95, 0x98, 0xcd, 0x8f, 0x35, 0xbc, 0xa5, 0x3e, 0xfc, 0xd4, 0x13, 0x9e, 0xde, 0x4f, 0xce, 0x71, 0x9d, 0x09, 0xad, 0xf2, 0x80, 0x6b, 0x65, 0x7f, 0x03, 0x00, 0x14, 0x7c, 0x15, 0x85, 0x40, 0x6d, 0x70, 0xea, 0xdc, 0xb3, 0x63, 0x35, 0x4f, 0x4d, 0xe0, 0xd9, 0xd5, 0x3c, 0x58, 0x56, 0x23, 0x80, 0xe2, 0x36, 0xdd, 0x75, 0x1d, 0x94, 0x11, 0x41, 0x8e, 0xe0, 0x81, 0x8e, 0xcf, 0xe0, 0xe5, 0xf6, 0xde, 0xd1, 0xe7, 0x04, 0x12, 0x79, 0x92, 0x2b, 0x71, 0x2a, 0x79, 0x8b, 0x7c, 0x44, 0x79, 0x16, 0x30, 0x4e, 0xf4, 0xf6, 0x9b, 0xb7, 0x40, 0xa3, 0x5a, 0xa7, 0x69, 0x3e, 0xc1, 0x3a, 0x04, 0xd0, 0x88, 0xa0, 0x3b, 0xdd, 0xc6, 0x9e, 0x7e, 0x1e, 0x1e, 0x8f, 0x44, 0xf7, 0x73, 0x67, 0x1e, 0x1a, 0x78, 0xfa, 0x62, 0xf4, 0xa9, 0xa8, 0xc6, 0x5b, 0xb8, 0xfa, 0x06, 0x7d, 0x5e, 0x38, 0x1c, 0x9a, 0x39, 0xe9, 0x39, 0x98, 0x22, 0x0b, 0xa7, 0xac, 0x0b, 0xf3, 0xbc, 0xf1, 0xeb, 0x8c, 0x81, 0xe3, 0x48, 0x8a, 0xed, 0x42, 0xc2, 0x38, 0xcf, 0x3e, 0xda, 0xd2, 0x89, 0x8d, 0x9c, 0x53, 0xb5, 0x2f, 0x41, 0x01, 0x26, 0x84, 0x9c, 0xa3, 0x56, 0xf6, 0x49, 0xc7, 0xd4, 0x9f, 0x93, 0x1b, 0x96, 0x49, 0x5e, 0xad, 0xb3, 0x84, 0x1f, 0x3c, 0xa4, 0xe0, 0x9b, 0xd1, 0x90, 0xbc, 0x38, 0x6c, 0xdd, 0x95, 0x4d, 0x9d, 0xb1, 0x71, 0x57, 0x2d, 0x34, 0xe8, 0xb8, 0x42, 0xc7, 0x99, 0x03, 0xc7, 0x07, 0x30, 0x65, 0x91, 0x55, 0xd5, 0x90, 0x70, 0x97, 0x37, 0x68, 0xd4, 0x11, 0xf9, 0xe8, 0xce, 0xec, 0xdc, 0x34, 0xd5, 0xd3, 0xb7, 0xc4, 0xb8, 0x97, 0x05, 0x92, 0xad, 0xf8, 0xe2, 0x36, 0x64, 0x41, 0xc9, 0xc5, 0x41, 0x77, 0x52, 0xd7, 0x2c, 0xa5, 0x24, 0x2f, 0xd9, 0x34, 0x0b, 0x47, 0x35, 0xa7, 0x28, 0x8b, 0xc5, 0xcd, 0xe9, 0x46, 0xac, 0x39, 0x94, 0x3c, 0x10, 0xc6, 0x29, 0x73, 0x0e, 0x0e, 0x5d, 0xe0, 0x71, 0x03, 0x8a, 0x72, 0x0e, 0x26, 0xb0, 0x7d, 0x84, 0xed, 0x95, 0x23, 0x49, 0x5a, 0x45, 0x83, 0x45, 0x60, 0x11, 0x4a, 0x46, 0x31, 0xd4, 0xd8, 0x16, 0x54, 0x98, 0x58, 0xed, 0x6d, 0xcc, 0x5d, 0xd6, 0x50, 0x61, 0x9f, 0x9d, 0xc5, 0x3e, 0x9d, 0x32, 0x47, 0xde, 0x96, 0xe1, 0x5d, 0xd8, 0xf8, 0xb4, 0x69, 0x6f, 0xb9, 0x15, 0x90, 0x57, 0x7a, 0xf6, 0xad, 0xb0, 0x5b, 0xf5, 0xa6, 0x36, 0x94, 0xfd, 0x84, 0xce, 0x1c, 0x0f, 0x4b, 0xd0, 0xc2, 0x5b, 0x6b, 0x56, 0xef, 0x73, 0x93, 0x0b, 0xc3, 0xee, 0xd9, 0xcf, 0xd3, 0xa4, 0x22, 0x58, 0xcd, 0x50, 0x6e, 0x65, 0xf4, 0xe9, 0xb7, 0x71, 0xaf, 0x4b, 0xb3, 0xb6, 0x2f, 0x0f, 0x0e, 0x3b, 0xc9, 0x85, 0x14, 0xf5, 0x17, 0xe8, 0x7a, 0x3a, 0xbf, 0x5f, 0x5e, 0xf8, 0x18, 0x48, 0xa6, 0x72, 0xab, 0x06, 0x95, 0xe9, 0xc8, 0xa7, 0xf4, 0x32, 0x44, 0x04, 0x0c, 0x84, 0x98, 0x73, 0xe3, 0x89, 0x8d, 0x5f, 0x7e, 0x4a, 0x42, 0x8f, 0xc5, 0x28, 0xb1, 0x82, 0xef, 0x1c, 0x97, 0x31, 0x3b, 0x4d, 0xe0, 0x0e, 0x10, 0x10, 0x97, 0x93, 0x49, 0x78, 0x2f, 0x0d, 0x86, 0x8b, 0xa1, 0x53, 0xa9, 0x81, 0x20, 0x79, 0xe7, 0x07, 0x77, 0xb6, 0xac, 0x5e, 0xd2, 0x05, 0xcd, 0xe9, 0xdb, 0x8a, 0x94, 0x82, 0x8a, 0x23, 0xb9, 0x3d, 0x1c, 0xa9, 0x7d, 0x72, 0x4a, 0xed, 0x33, 0xa3, 0xdb, 0x21, 0xa7, 0x86, 0x33, 0x45, 0xa5, 0xaa, 0x56, 0x45, 0xb5, 0x83, 0x29, 0x40, 0x47, 0x79, 0x04, 0x6e, 0xb9, 0x95, 0xd0, 0x81, 0x77, 0x2d, 0x48, 0x1e, 0xfe, 0xc3, 0xc2, 0x1e, 0xe5, 0xf2, 0xbe, 0xfd, 0x3b, 0x94, 0x9f, 0xc4, 0xc4, 0x26, 0x9d, 0xe4, 0x66, 0x1e, 0x19, 0xee, 0x6c, 0x79, 0x97, 0x11, 0x31, 0x4b, 0x0d, 0x01, 0xcb, 0xde, 0xa8, 0xf6, 0x6d, 0x7c, 0x39, 0x46, 0x4e, 0x7e, 0x3f, 0x94, 0x17, 0xdf, 0xa1, 0x7d, 0xd9, 0x1c, 0x8e, 0xbc, 0x7d, 0x33, 0x7d, 0xe3, 0x12, 0x40, 0xca, 0xab, 0x37, 0x11, 0x46, 0xd4, 0xae, 0xef, 0x44, 0xa2, 0xb3, 0x6a, 0x66, 0x0e, 0x0c, 0x90, 0x7f, 0xdf, 0x5c, 0x66, 0x5f, 0xf2, 0x94, 0x9f, 0xa6, 0x73, 0x4f, 0xeb, 0x0d, 0xad, 0xbf, 0xc0, 0x63, 0x5c, 0xdc, 0x46, 0x51, 0xe8, 0x8e, 0x90, 0x19, 0xa8, 0xa4, 0x3c, 0x91, 0x79, 0xfa, 0x7e, 0x58, 0x85, 0x13, 0x55, 0xc5, 0x19, 0x82, 0x37, 0x1b, 0x0a, 0x02, 0x1f, 0x99, 0x6b, 0x18, 0xf1, 0x28, 0x08, 0xa2, 0x73, 0xb8, 0x0f, 0x2e, 0xcd, 0xbf, 0xf3, 0x86, 0x7f, 0xea, 0xef, 0xd0, 0xbb, 0xa6, 0x21, 0xdf, 0x49, 0x73, 0x51, 0xcc, 0x36, 0xd3, 0x3e, 0xa0, 0xf8, 0x44, 0xdf, 0xd3, 0xa6, 0xbe, 0x8a, 0xd4, 0x57, 0xdd, 0x72, 0x94, 0x61, 0x0f, 0x82, 0xd1, 0x07, 0xb8, 0x7c, 0x18, 0x83, 0xdf, 0x3a, 0xe5, 0x50, 0x6a, 0x82, 0x20, 0xac, 0xa9, 0xa8, 0xff, 0xd9, 0xf3, 0x77, 0x33, 0x5a, 0x9e, 0x7f, 0x6d, 0xfe, 0x5d, 0x33, 0x41, 0x42, 0xe7, 0x6c, 0x19, 0xe0, 0x44, 0x8a, 0x15, 0xf6, 0x70, 0x98, 0xb7, 0x68, 0x4d, 0xfa, 0x97, 0x39, 0xb0, 0x8e, 0xe8, 0x84, 0x8b, 0x75, 0x30, 0xb7, 0x7d, 0x92, 0x69, 0x20, 0x9c, 0x81, 0xfb, 0x4b, 0xf4, 0x01, 0x50, 0xeb, 0xce, 0x0c, 0x1c, 0x6c, 0xb5, 0x4a, 0xd7, 0x27, 0x0c, 0xce, 0xbb, 0xe5, 0x85, 0xf0, 0xb6, 0xee, 0xd5, 0x70, 0xdd, 0x3b, 0xfc, 0xd4, 0x99, 0xf1, 0x33, 0xdd, 0x8b, 0xc4, 0x2f, 0xae, 0xab, 0x74, 0x96, 0x32, 0xc7, 0x4c, 0x56, 0x3c, 0x89, 0x0f, 0x96, 0x0b, 0x42, 0xc0, 0xcb, 0xee, 0x0f, 0x0b, 0x8c, 0xfb, 0x7e, 0x47, 0x7b, 0x64, 0x48, 0xfd, 0xb2, 0x00, 0x80, 0x89, 0xa5, 0x13, 0x55, 0x62, 0xfc, 0x8f, 0xe2, 0x42, 0x03, 0xb7, 0x4e, 0x2a, 0x79, 0xb4, 0x82, 0xea, 0x23, 0x49, 0xda, 0xaf, 0x52, 0x63, 0x1e, 0x60, 0x03, 0x89, 0x06, 0x44, 0x46, 0x08, 0xc3, 0xc4, 0x87, 0x70, 0x2e, 0xda, 0x94, 0xad, 0x6b, 0xe0, 0xe4, 0xd1, 0x8a, 0x06, 0xc2, 0xa8, 0xc0, 0xa7, 0x43, 0x3c, 0x47, 0x52, 0x0e, 0xc3, 0x77, 0x81, 0x11, 0x67, 0x0e, 0xa0, 0x70, 0x04, 0x47, 0x29, 0x40, 0x86, 0x0d, 0x34, 0x56, 0xa7, 0xc9, 0x35, 0x59, 0x68, 0xdc, 0x93, 0x81, 0x70, 0xee, 0x86, 0xd9, 0x80, 0x06, 0x40, 0x4f, 0x1a, 0x0d, 0x40, 0x30, 0x0b, 0xcb, 0x96, 0x47, 0xc1, 0xb7, 0x52, 0xfd, 0x56, 0xe0, 0x72, 0x4b, 0xfb, 0xbd, 0x92, 0x45, 0x61, 0x71, 0xc2, 0x33, 0x11, 0xbf, 0x52, 0x83, 0x79, 0x26, 0xe0, 0x49, 0x6b, 0xb7, 0x05, 0x8b, 0xe8, 0x0e, 0x87, 0x31, 0xd7, 0x9d, 0x8a, 0xf5, 0xc0, 0x5f, 0x2e, 0x58, 0x4a, 0xdb, 0x11, 0xb3, 0x6c, 0x30, 0x2a, 0x46, 0x19, 0xe3, 0x27, 0x84, 0x1f, 0x63, 0x6e, 0xf6, 0x57, 0xc7, 0xc9, 0xd8, 0x5e, 0xba, 0xb3, 0x87, 0xd5, 0x83, 0x26, 0x34, 0x21, 0x9e, 0x65, 0xde, 0x42, 0xd3, 0xbe, 0x7b, 0xbc, 0x91, 0x71, 0x44, 0x4d, 0x99, 0x3b, 0x31, 0xe5, 0x3f, 0x11, 0x4e, 0x7f, 0x13, 0x51, 0x3b, 0xae, 0x79, 0xc9, 0xd3, 0x81, 0x8e, 0x25, 0x40, 0x10, 0xfc, 0x07, 0x1e, 0xf9, 0x7b, 0x9a, 0x4b, 0x6c, 0xe3, 0xb3, 0xad, 0x1a, 0x0a, 0xdd, 0x9e, 0x59, 0x0c, 0xa2, 0xcd, 0xae, 0x48, 0x4a, 0x38, 0x5b, 0x47, 0x41, 0x94, 0x65, 0x6b, 0xbb, 0xeb, 0x5b, 0xe3, 0xaf, 0x07, 0x5b, 0xd4, 0x4a, 0xa2, 0xc9, 0x5d, 0x2f, 0x64, 0x03, 0xd7, 0x3a, 0x2c, 0x6e, 0xce, 0x76, 0x95, 0xb4, 0xb3, 0xc0, 0xf1, 0xe2, 0x45, 0x73, 0x7a, 0x5c, 0xab, 0xc1, 0xfc, 0x02, 0x8d, 0x81, 0x29, 0xb3, 0xac, 0x07, 0xec, 0x40, 0x7d, 0x45, 0xd9, 0x7a, 0x59, 0xee, 0x34, 0xf0, 0xe9, 0xd5, 0x7b, 0x96, 0xb1, 0x3d, 0x95, 0xcc, 0x86, 0xb5, 0xb6, 0x04, 0x2d, 0xb5, 0x92, 0x7e, 0x76, 0xf4, 0x06, 0xa9, 0xa3, 0x12, 0x0f, 0xb1, 0xaf, 0x26, 0xba, 0x7c, 0xfc, 0x7e, 0x1c, 0xbc, 0x2c, 0x49, 0x97, 0x53, 0x60, 0x13, 0x0b, 0xa6, 0x61, 0x83, 0x89, 0x42, 0xd4, 0x17, 0x0c, 0x6c, 0x26, 0x52, 0xc3, 0xb3, 0xd4, 0x67, 0xf5, 0xe3, 0x04, 0xb7, 0xf4, 0xcb, 0x80, 0xb8, 0xcb, 0x77, 0x56, 0x3e, 0xaa, 0x57, 0x54, 0xee, 0xb4, 0x2c, 0x67, 0xcf, 0xf2, 0xdc, 0xbe, 0x55, 0xf9, 0x43, 0x1f, 0x6e, 0x22, 0x97, 0x67, 0x7f, 0xc4, 0xef, 0xb1, 0x26, 0x31, 0x1e, 0x27, 0xdf, 0x41, 0x80, 0x47, 0x6c, 0xe2, 0xfa, 0xa9, 0x8c, 0x2a, 0xf6, 0xf2, 0xab, 0xf0, 0x15, 0xda, 0x6c, 0xc8, 0xfe, 0xb5, 0x23, 0xde, 0xa9, 0x05, 0x3f, 0x06, 0x54, 0x4c, 0xcd, 0xe1, 0xab, 0xfc, 0x0e, 0x62, 0x33, 0x31, 0x73, 0x2c, 0x76, 0xcb, 0xb4, 0x47, 0x1e, 0x20, 0xad, 0xd8, 0xf2, 0x31, 0xdd, 0xc4, 0x8b, 0x0c, 0x77, 0xbe, 0xe1, 0x8b, 0x26, 0x00, 0x02, 0x58, 0xd6, 0x8d, 0xef, 0xad, 0x74, 0x67, 0xab, 0x3f, 0xef, 0xcb, 0x6f, 0xb0, 0xcc, 0x81, 0x44, 0x4c, 0xaf, 0xe9, 0x49, 0x4f, 0xdb, 0xa0, 0x25, 0xa4, 0xf0, 0x89, 0xf1, 0xbe, 0xd8, 0x10, 0xff, 0xb1, 0x3b, 0x4b, 0xfa, 0x98, 0xf5, 0x79, 0x6d, 0x1e, 0x69, 0x4d, 0x57, 0xb1, 0xc8, 0x19, 0x1b, 0xbd, 0x1e, 0x8c, 0x84, 0xb7, 0x7b, 0xe8, 0xd2, 0x2d, 0x09, 0x41, 0x41, 0x37, 0x3d, 0xb1, 0x6f, 0x26, 0x5d, 0x71, 0x16, 0x3d, 0xb7, 0x83, 0x27, 0x2c, 0xa7, 0xb6, 0x50, 0xbd, 0x91, 0x86, 0xab, 0x24, 0xa1, 0x38, 0xfd, 0xea, 0x71, 0x55, 0x7e, 0x9a, 0x07, 0x77, 0x4b, 0xfa, 0x61, 0x66, 0x20, 0x1e, 0x28, 0x95, 0x18, 0x1b, 0xa4, 0xa0, 0xfd, 0xc0, 0x89, 0x72, 0x43, 0xd9, 0x3b, 0x49, 0x5a, 0x3f, 0x9d, 0xbf, 0xdb, 0xb4, 0x46, 0xea, 0x42, 0x01, 0x77, 0x23, 0x68, 0x95, 0xb6, 0x24, 0xb3, 0xa8, 0x6c, 0x28, 0x3b, 0x11, 0x40, 0x7e, 0x18, 0x65, 0x6d, 0xd8, 0x24, 0x42, 0x7d, 0x88, 0xc0, 0x52, 0xd9, 0x05, 0xe4, 0x95, 0x90, 0x87, 0x8c, 0xf4, 0xd0, 0x6b, 0xb9, 0x83, 0x99, 0x34, 0x6d, 0xfe, 0x54, 0x40, 0x94, 0x52, 0x21, 0x4f, 0x14, 0x25, 0xc5, 0xd6, 0x5e, 0x95, 0xdc, 0x0a, 0x2b, 0x89, 0x20, 0x11, 0x84, 0x48, 0xd6, 0x3a, 0xcd, 0x5c, 0x24, 0xad, 0x62, 0xe3, 0xb1, 0x93, 0x25, 0x8d, 0xcd, 0x7e, 0xfc, 0x27, 0xa3, 0x37, 0xfd, 0x84, 0xfc, 0x1b, 0xb2, 0xf1, 0x27, 0x38, 0x5a, 0xb7, 0xfc, 0xf2, 0xfa, 0x95, 0x66, 0xd4, 0xfb, 0xba, 0xa7, 0xd7, 0xa3, 0x72, 0x69, 0x48, 0x48, 0x8c, 0xeb, 0x28, 0x89, 0xfe, 0x33, 0x65, 0x5a, 0x36, 0x01, 0x7e, 0x06, 0x79, 0x0a, 0x09, 0x3b, 0x74, 0x11, 0x9a, 0x6e, 0xbf, 0xd4, 0x9e, 0x58, 0x90, 0x49, 0x4f, 0x4d, 0x08, 0xd4, 0xe5, 0x4a, 0x09, 0x21, 0xef, 0x8b, 0xb8, 0x74, 0x3b, 0x91, 0xdd, 0x36, 0x85, 0x60, 0x2d, 0xfa, 0xd4, 0x45, 0x7b, 0x45, 0x53, 0xf5, 0x47, 0x87, 0x7e, 0xa6, 0x37, 0xc8, 0x78, 0x7a, 0x68, 0x9d, 0x8d, 0x65, 0x2c, 0x0e, 0x91, 0x5c, 0xa2, 0x60, 0xf0, 0x8e, 0x3f, 0xe9, 0x1a, 0xcd, 0xaa, 0xe7, 0xd5, 0x77, 0x18, 0xaf, 0xc9, 0xbc, 0x18, 0xea, 0x48, 0x1b, 0xfb, 0x22, 0x48, 0x70, 0x16, 0x29, 0x9e, 0x5b, 0xc1, 0x2c, 0x66, 0x23, 0xbc, 0xf0, 0x1f, 0xef, 0xaf, 0xe4, 0xd6, 0x04, 0x19, 0x82, 0x7a, 0x0b, 0xba, 0x4b, 0x46, 0xb1, 0x6a, 0x85, 0x5d, 0xb4, 0x73, 0xd6, 0x21, 0xa1, 0x71, 0x60, 0x14, 0xee, 0x0a, 0x77, 0xc4, 0x66, 0x2e, 0xf9, 0x69, 0x30, 0xaf, 0x41, 0x0b, 0xc8, 0x83, 0x3c, 0x53, 0x99, 0x19, 0x27, 0x46, 0xf7, 0x41, 0x6e, 0x56, 0xdc, 0x94, 0x28, 0x67, 0x4e, 0xb7, 0x25, 0x48, 0x8a, 0xc2, 0xe0, 0x60, 0x96, 0xcc, 0x18, 0xf4, 0x84, 0xdd, 0xa7, 0x5e, 0x3e, 0x05, 0x0b, 0x26, 0x26, 0xb2, 0x5c, 0x1f, 0x57, 0x1a, 0x04, 0x7e, 0x6a, 0xe3, 0x2f, 0xb4, 0x35, 0xb6, 0x38, 0x40, 0x40, 0xcd, 0x6f, 0x87, 0x2e, 0xef, 0xa3, 0xd7, 0xa9, 0xc2, 0xe8, 0x0d, 0x27, 0xdf, 0x44, 0x62, 0x99, 0xa0, 0xfc, 0xcf, 0x81, 0x78, 0xcb, 0xfe, 0xe5, 0xa0, 0x03, 0x4e, 0x6c, 0xd7, 0xf4, 0xaf, 0x7a, 0xbb, 0x61, 0x82, 0xfe, 0x71, 0x89, 0xb2, 0x22, 0x7c, 0x8e, 0x83, 0x04, 0xce, 0xf6, 0x5d, 0x84, 0x8f, 0x95, 0x6a, 0x7f, 0xad, 0xfd, 0x32, 0x9c, 0x5e, 0xe4, 0x9c, 0x89, 0x60, 0x54, 0xaa, 0x96, 0x72, 0xd2, 0xd7, 0x36, 0x85, 0xa9, 0x45, 0xd2, 0x2a, 0xa1, 0x81, 0x49, 0x6f, 0x7e, 0x04, 0xfa, 0xe2, 0xfe, 0x90, 0x26, 0x77, 0x5a, 0x33, 0xb8, 0x04, 0x9a, 0x7a, 0xe6, 0x4c, 0x4f, 0xad, 0x72, 0x96, 0x08, 0x28, 0x58, 0x13, 0xf8, 0xc4, 0x1c, 0xf0, 0xc3, 0x45, 0x95, 0x49, 0x20, 0x8c, 0x9f, 0x39, 0x70, 0xe1, 0x77, 0xfe, 0xd5, 0x4b, 0xaf, 0x86, 0xda, 0xef, 0x22, 0x06, 0x83, 0x36, 0x29, 0x12, 0x11, 0x40, 0xbc, 0x3b, 0x86, 0xaa, 0xaa, 0x65, 0x60, 0xc3, 0x80, 0xca, 0xed, 0xa9, 0xf3, 0xb0, 0x79, 0x96, 0xa2, 0x55, 0x27, 0x28, 0x55, 0x73, 0x26, 0xa5, 0x50, 0xea, 0x92, 0x4b, 0x3c, 0x5c, 0x82, 0x33, 0xf0, 0x01, 0x3f, 0x03, 0xc1, 0x08, 0x05, 0xbf, 0x98, 0xf4, 0x9b, 0x6d, 0xa5, 0xa8, 0xb4, 0x82, 0x0c, 0x06, 0xfa, 0xff, 0x2d, 0x08, 0xf3, 0x05, 0x4f, 0x57, 0x2a, 0x39, 0xd4, 0x83, 0x0d, 0x75, 0x51, 0xd8, 0x5b, 0x1b, 0xd3, 0x51, 0x5a, 0x32, 0x2a, 0x9b, 0x32, 0xb2, 0xf2, 0xa4, 0x96, 0x12, 0xf2, 0xae, 0x40, 0x34, 0x67, 0xa8, 0xf5, 0x44, 0xd5, 0x35, 0x53, 0xfe, 0xa3, 0x60, 0x96, 0x63, 0x0f, 0x1f, 0x6e, 0xb0, 0x5a, 0x42, 0xa6, 0xfc, 0x51, 0x0b, 0x60, 0x27, 0xbc, 0x06, 0x71, 0xed, 0x65, 0x5b, 0x23, 0x86, 0x4a, 0x07, 0x3b, 0x22, 0x07, 0x46, 0xe6, 0x90, 0x3e, 0xf3, 0x25, 0x50, 0x1b, 0x4c, 0x7f, 0x03, 0x08, 0xa8, 0x36, 0x6b, 0x87, 0xe5, 0xe3, 0xdb, 0x9a, 0x38, 0x83, 0xff, 0x9f, 0x1a, 0x9f, 0x57, 0xa4, 0x2a, 0xf6, 0x37, 0xbc, 0x1a, 0xff, 0xc9, 0x1e, 0x35, 0x0c, 0xc3, 0x7c, 0xa3, 0xb2, 0xe5, 0xd2, 0xc6, 0xb4, 0x57, 0x47, 0xe4, 0x32, 0x16, 0x6d, 0xa9, 0xae, 0x64, 0xe6, 0x2d, 0x8d, 0xc5, 0x8d, 0x50, 0x8e, 0xe8, 0x1a, 0x22, 0x34, 0x2a, 0xd9, 0xeb, 0x51, 0x90, 0x4a, 0xb1, 0x41, 0x7d, 0x64, 0xf9, 0xb9, 0x0d, 0xf6, 0x23, 0x33, 0xb0, 0x33, 0xf4, 0xf7, 0x3f, 0x27, 0x84, 0xc6, 0x0f, 0x54, 0xa5, 0xc0, 0x2e, 0xec, 0x0b, 0x3a, 0x48, 0x6e, 0x80, 0x35, 0x81, 0x43, 0x9b, 0x90, 0xb1, 0xd0, 0x2b, 0xea, 0x21, 0xdc, 0xda, 0x5b, 0x09, 0xf4, 0xcc, 0x10, 0xb4, 0xc7, 0xfe, 0x79, 0x51, 0xc3, 0xc5, 0xac, 0x88, 0x74, 0x84, 0x0b, 0x4b, 0xca, 0x79, 0x16, 0x29, 0xfb, 0x69, 0x54, 0xdf, 0x41, 0x7e, 0xe9, 0xc7, 0x8e, 0xea, 0xa5, 0xfe, 0xfc, 0x76, 0x0e, 0x90, 0xc4, 0x92, 0x38, 0xad, 0x7b, 0x48, 0xe6, 0x6e, 0xf7, 0x21, 0xfd, 0x4e, 0x93, 0x0a, 0x7b, 0x41, 0x83, 0x68, 0xfb, 0x57, 0x51, 0x76, 0x34, 0xa9, 0x6c, 0x00, 0xaa, 0x4f, 0x66, 0x65, 0x98, 0x4a, 0x4f, 0xa3, 0xa0, 0xef, 0x69, 0x3f, 0xe3, 0x1c, 0x92, 0x8c, 0xfd, 0xd8, 0xe8, 0xde, 0x7c, 0x7f, 0x3e, 0x84, 0x8e, 0x69, 0x3c, 0xf1, 0xf2, 0x05, 0x46, 0xdc, 0x2f, 0x9d, 0x5e, 0x6e, 0x4c, 0xfb, 0xb5, 0x99, 0x2a, 0x59, 0x63, 0xc1, 0x34, 0xbc, 0x57, 0xc0, 0x0d, 0xb9, 0x61, 0x25, 0xf3, 0x33, 0x23, 0x51, 0xb6, 0x0d, 0x07, 0xa6, 0xab, 0x94, 0x4a, 0xb7, 0x2a, 0xea, 0xee, 0xac, 0xa3, 0xc3, 0x04, 0x8b, 0x0e, 0x56, 0xfe, 0x44, 0xa7, 0x39, 0xe2, 0xed, 0xed, 0xb4, 0x22, 0x2b, 0xac, 0x12, 0x32, 0x28, 0x91, 0xd8, 0xa5, 0xab, 0xff, 0x5f, 0xe0, 0x4b, 0xda, 0x78, 0x17, 0xda, 0xf1, 0x01, 0x5b, 0xcd, 0xe2, 0x5f, 0x50, 0x45, 0x73, 0x2b, 0xe4, 0x76, 0x77, 0xf4, 0x64, 0x1d, 0x43, 0xfb, 0x84, 0x7a, 0xea, 0x91, 0xae, 0xf9, 0x9e, 0xb7, 0xb4, 0xb0, 0x91, 0x5f, 0x16, 0x35, 0x9a, 0x11, 0xb8, 0xc7, 0xc1, 0x8c, 0xc6, 0x10, 0x8d, 0x2f, 0x63, 0x4a, 0xa7, 0x57, 0x3a, 0x51, 0xd6, 0x32, 0x2d, 0x64, 0x72, 0xd4, 0x66, 0xdc, 0x10, 0xa6, 0x67, 0xd6, 0x04, 0x23, 0x9d, 0x0a, 0x11, 0x77, 0xdd, 0x37, 0x94, 0x17, 0x3c, 0xbf, 0x8b, 0x65, 0xb0, 0x2e, 0x5e, 0x66, 0x47, 0x64, 0xac, 0xdd, 0xf0, 0x84, 0xfd, 0x39, 0xfa, 0x15, 0x5d, 0xef, 0xae, 0xca, 0xc1, 0x36, 0xa7, 0x5c, 0xbf, 0xc7, 0x08, 0xc2, 0x66, 0x00, 0x74, 0x74, 0x4e, 0x27, 0x3f, 0x55, 0x8a, 0xb7, 0x38, 0x66, 0x83, 0x6d, 0xcf, 0x99, 0x9e, 0x60, 0x8f, 0xdd, 0x2e, 0x62, 0x22, 0x0e, 0xef, 0x0c, 0x98, 0xa7, 0x85, 0x74, 0x3b, 0x9d, 0xec, 0x9e, 0xa9, 0x19, 0x72, 0xa5, 0x7f, 0x2c, 0x39, 0xb7, 0x7d, 0xb7, 0xf1, 0x12, 0x65, 0x27, 0x4b, 0x5a, 0xde, 0x17, 0xfe, 0xad, 0x44, 0xf3, 0x20, 0x4d, 0xfd, 0xe4, 0x1f, 0xb5, 0x81, 0xb0, 0x36, 0x37, 0x08, 0x6f, 0xc3, 0x0c, 0xe9, 0x85, 0x98, 0x82, 0xa9, 0x62, 0x0c, 0xc4, 0x97, 0xc0, 0x50, 0xc8, 0xa7, 0x3c, 0x50, 0x9f, 0x43, 0xb9, 0xcd, 0x5e, 0x4d, 0xfa, 0x1c, 0x4b, 0x0b, 0xa9, 0x98, 0x85, 0x38, 0x92, 0xac, 0x8d, 0xe4, 0xad, 0x9b, 0x98, 0xab, 0xd9, 0x38, 0xac, 0x62, 0x52, 0xa3, 0x22, 0x63, 0x0f, 0xbf, 0x95, 0x48, 0xdf, 0x69, 0xe7, 0x8b, 0x33, 0xd5, 0xb2, 0xbd, 0x05, 0x49, 0x49, 0x9d, 0x57, 0x73, 0x19, 0x33, 0xae, 0xfa, 0x33, 0xf1, 0x19, 0xa8, 0x80, 0xce, 0x04, 0x9f, 0xbc, 0x1d, 0x65, 0x82, 0x1b, 0xe5, 0x3a, 0x51, 0xc8, 0x1c, 0x21, 0xe3, 0x5d, 0xf3, 0x7d, 0x9b, 0x2f, 0x2c, 0x1d, 0x4a, 0x7f, 0x9b, 0x68, 0x35, 0xa3, 0xb2, 0x50, 0xf7, 0x62, 0x79, 0xcd, 0xf4, 0x98, 0x4f, 0xe5, 0x63, 0x7c, 0x3e, 0x45, 0x31, 0x8c, 0x16, 0xa0, 0x12, 0xc8, 0x58, 0xce, 0x39, 0xa6, 0xbc, 0x54, 0xdb, 0xc5, 0xe0, 0xd5, 0xba, 0xbc, 0xb9, 0x04, 0xf4, 0x8d, 0xe8, 0x2f, 0x15, 0x9d, }; /* 100 test cases */ static struct crc_test { u32 crc; /* random starting crc */ u32 start; /* random 6 bit offset in buf */ u32 length; /* random 11 bit length of test */ u32 crc_le; /* expected crc32_le result */ u32 crc_be; /* expected crc32_be result */ u32 crc32c_le; /* expected crc32c_le result */ } test[] = { {0x674bf11d, 0x00000038, 0x00000542, 0x0af6d466, 0xd8b6e4c1, 0xf6e93d6c}, {0x35c672c6, 0x0000003a, 0x000001aa, 0xc6d3dfba, 0x28aaf3ad, 0x0fe92aca}, {0x496da28e, 0x00000039, 0x000005af, 0xd933660f, 0x5d57e81f, 0x52e1ebb8}, {0x09a9b90e, 0x00000027, 0x000001f8, 0xb45fe007, 0xf45fca9a, 0x0798af9a}, {0xdc97e5a9, 0x00000025, 0x000003b6, 0xf81a3562, 0xe0126ba2, 0x18eb3152}, {0x47c58900, 0x0000000a, 0x000000b9, 0x8e58eccf, 0xf3afc793, 0xd00d08c7}, {0x292561e8, 0x0000000c, 0x00000403, 0xa2ba8aaf, 0x0b797aed, 0x8ba966bc}, {0x415037f6, 0x00000003, 0x00000676, 0xa17d52e8, 0x7f0fdf35, 0x11d694a2}, {0x3466e707, 0x00000026, 0x00000042, 0x258319be, 0x75c484a2, 0x6ab3208d}, {0xafd1281b, 0x00000023, 0x000002ee, 0x4428eaf8, 0x06c7ad10, 0xba4603c5}, {0xd3857b18, 0x00000028, 0x000004a2, 0x5c430821, 0xb062b7cb, 0xe6071c6f}, {0x1d825a8f, 0x0000002b, 0x0000050b, 0xd2c45f0c, 0xd68634e0, 0x179ec30a}, {0x5033e3bc, 0x0000000b, 0x00000078, 0xa3ea4113, 0xac6d31fb, 0x0903beb8}, {0x94f1fb5e, 0x0000000f, 0x000003a2, 0xfbfc50b1, 0x3cfe50ed, 0x6a7cb4fa}, {0xc9a0fe14, 0x00000009, 0x00000473, 0x5fb61894, 0x87070591, 0xdb535801}, {0x88a034b1, 0x0000001c, 0x000005ad, 0xc1b16053, 0x46f95c67, 0x92bed597}, {0xf0f72239, 0x00000020, 0x0000026d, 0xa6fa58f3, 0xf8c2c1dd, 0x192a3f1b}, {0xcc20a5e3, 0x0000003b, 0x0000067a, 0x7740185a, 0x308b979a, 0xccbaec1a}, {0xce589c95, 0x0000002b, 0x00000641, 0xd055e987, 0x40aae25b, 0x7eabae4d}, {0x78edc885, 0x00000035, 0x000005be, 0xa39cb14b, 0x035b0d1f, 0x28c72982}, {0x9d40a377, 0x0000003b, 0x00000038, 0x1f47ccd2, 0x197fbc9d, 0xc3cd4d18}, {0x703d0e01, 0x0000003c, 0x000006f1, 0x88735e7c, 0xfed57c5a, 0xbca8f0e7}, {0x776bf505, 0x0000000f, 0x000005b2, 0x5cc4fc01, 0xf32efb97, 0x713f60b3}, {0x4a3e7854, 0x00000027, 0x000004b8, 0x8d923c82, 0x0cbfb4a2, 0xebd08fd5}, {0x209172dd, 0x0000003b, 0x00000356, 0xb89e9c2b, 0xd7868138, 0x64406c59}, {0x3ba4cc5b, 0x0000002f, 0x00000203, 0xe51601a9, 0x5b2a1032, 0x7421890e}, {0xfc62f297, 0x00000000, 0x00000079, 0x71a8e1a2, 0x5d88685f, 0xe9347603}, {0x64280b8b, 0x00000016, 0x000007ab, 0x0fa7a30c, 0xda3a455f, 0x1bef9060}, {0x97dd724b, 0x00000033, 0x000007ad, 0x5788b2f4, 0xd7326d32, 0x34720072}, {0x61394b52, 0x00000035, 0x00000571, 0xc66525f1, 0xcabe7fef, 0x48310f59}, {0x29b4faff, 0x00000024, 0x0000006e, 0xca13751e, 0x993648e0, 0x783a4213}, {0x29bfb1dc, 0x0000000b, 0x00000244, 0x436c43f7, 0x429f7a59, 0x9e8efd41}, {0x86ae934b, 0x00000035, 0x00000104, 0x0760ec93, 0x9cf7d0f4, 0xfc3d34a5}, {0xc4c1024e, 0x0000002e, 0x000006b1, 0x6516a3ec, 0x19321f9c, 0x17a52ae2}, {0x3287a80a, 0x00000026, 0x00000496, 0x0b257eb1, 0x754ebd51, 0x886d935a}, {0xa4db423e, 0x00000023, 0x0000045d, 0x9b3a66dc, 0x873e9f11, 0xeaaeaeb2}, {0x7a1078df, 0x00000015, 0x0000014a, 0x8c2484c5, 0x6a628659, 0x8e900a4b}, {0x6048bd5b, 0x00000006, 0x0000006a, 0x897e3559, 0xac9961af, 0xd74662b1}, {0xd8f9ea20, 0x0000003d, 0x00000277, 0x60eb905b, 0xed2aaf99, 0xd26752ba}, {0xea5ec3b4, 0x0000002a, 0x000004fe, 0x869965dc, 0x6c1f833b, 0x8b1fcd62}, {0x2dfb005d, 0x00000016, 0x00000345, 0x6a3b117e, 0xf05e8521, 0xf54342fe}, {0x5a214ade, 0x00000020, 0x000005b6, 0x467f70be, 0xcb22ccd3, 0x5b95b988}, {0xf0ab9cca, 0x00000032, 0x00000515, 0xed223df3, 0x7f3ef01d, 0x2e1176be}, {0x91b444f9, 0x0000002e, 0x000007f8, 0x84e9a983, 0x5676756f, 0x66120546}, {0x1b5d2ddb, 0x0000002e, 0x0000012c, 0xba638c4c, 0x3f42047b, 0xf256a5cc}, {0xd824d1bb, 0x0000003a, 0x000007b5, 0x6288653b, 0x3a3ebea0, 0x4af1dd69}, {0x0470180c, 0x00000034, 0x000001f0, 0x9d5b80d6, 0x3de08195, 0x56f0a04a}, {0xffaa3a3f, 0x00000036, 0x00000299, 0xf3a82ab8, 0x53e0c13d, 0x74f6b6b2}, {0x6406cfeb, 0x00000023, 0x00000600, 0xa920b8e8, 0xe4e2acf4, 0x085951fd}, {0xb24aaa38, 0x0000003e, 0x000004a1, 0x657cc328, 0x5077b2c3, 0xc65387eb}, {0x58b2ab7c, 0x00000039, 0x000002b4, 0x3a17ee7e, 0x9dcb3643, 0x1ca9257b}, {0x3db85970, 0x00000006, 0x000002b6, 0x95268b59, 0xb9812c10, 0xfd196d76}, {0x857830c5, 0x00000003, 0x00000590, 0x4ef439d5, 0xf042161d, 0x5ef88339}, {0xe1fcd978, 0x0000003e, 0x000007d8, 0xae8d8699, 0xce0a1ef5, 0x2c3714d9}, {0xb982a768, 0x00000016, 0x000006e0, 0x62fad3df, 0x5f8a067b, 0x58576548}, {0x1d581ce8, 0x0000001e, 0x0000058b, 0xf0f5da53, 0x26e39eee, 0xfd7c57de}, {0x2456719b, 0x00000025, 0x00000503, 0x4296ac64, 0xd50e4c14, 0xd5fedd59}, {0xfae6d8f2, 0x00000000, 0x0000055d, 0x057fdf2e, 0x2a31391a, 0x1cc3b17b}, {0xcba828e3, 0x00000039, 0x000002ce, 0xe3f22351, 0x8f00877b, 0x270eed73}, {0x13d25952, 0x0000000a, 0x0000072d, 0x76d4b4cc, 0x5eb67ec3, 0x91ecbb11}, {0x0342be3f, 0x00000015, 0x00000599, 0xec75d9f1, 0x9d4d2826, 0x05ed8d0c}, {0xeaa344e0, 0x00000014, 0x000004d8, 0x72a4c981, 0x2064ea06, 0x0b09ad5b}, {0xbbb52021, 0x0000003b, 0x00000272, 0x04af99fc, 0xaf042d35, 0xf8d511fb}, {0xb66384dc, 0x0000001d, 0x000007fc, 0xd7629116, 0x782bd801, 0x5ad832cc}, {0x616c01b6, 0x00000022, 0x000002c8, 0x5b1dab30, 0x783ce7d2, 0x1214d196}, {0xce2bdaad, 0x00000016, 0x0000062a, 0x932535c8, 0x3f02926d, 0x5747218a}, {0x00fe84d7, 0x00000005, 0x00000205, 0x850e50aa, 0x753d649c, 0xde8f14de}, {0xbebdcb4c, 0x00000006, 0x0000055d, 0xbeaa37a2, 0x2d8c9eba, 0x3563b7b9}, {0xd8b1a02a, 0x00000010, 0x00000387, 0x5017d2fc, 0x503541a5, 0x071475d0}, {0x3b96cad2, 0x00000036, 0x00000347, 0x1d2372ae, 0x926cd90b, 0x54c79d60}, {0xc94c1ed7, 0x00000005, 0x0000038b, 0x9e9fdb22, 0x144a9178, 0x4c53eee6}, {0x1aad454e, 0x00000025, 0x000002b2, 0xc3f6315c, 0x5c7a35b3, 0x10137a3c}, {0xa4fec9a6, 0x00000000, 0x000006d6, 0x90be5080, 0xa4107605, 0xaa9d6c73}, {0x1bbe71e2, 0x0000001f, 0x000002fd, 0x4e504c3b, 0x284ccaf1, 0xb63d23e7}, {0x4201c7e4, 0x00000002, 0x000002b7, 0x7822e3f9, 0x0cc912a9, 0x7f53e9cf}, {0x23fddc96, 0x00000003, 0x00000627, 0x8a385125, 0x07767e78, 0x13c1cd83}, {0xd82ba25c, 0x00000016, 0x0000063e, 0x98e4148a, 0x283330c9, 0x49ff5867}, {0x786f2032, 0x0000002d, 0x0000060f, 0xf201600a, 0xf561bfcd, 0x8467f211}, {0xfebe4e1f, 0x0000002a, 0x000004f2, 0x95e51961, 0xfd80dcab, 0x3f9683b2}, {0x1a6e0a39, 0x00000008, 0x00000672, 0x8af6c2a5, 0x78dd84cb, 0x76a3f874}, {0x56000ab8, 0x0000000e, 0x000000e5, 0x36bacb8f, 0x22ee1f77, 0x863b702f}, {0x4717fe0c, 0x00000000, 0x000006ec, 0x8439f342, 0x5c8e03da, 0xdc6c58ff}, {0xd5d5d68e, 0x0000003c, 0x000003a3, 0x46fff083, 0x177d1b39, 0x0622cc95}, {0xc25dd6c6, 0x00000024, 0x000006c0, 0x5ceb8eb4, 0x892b0d16, 0xe85605cd}, {0xe9b11300, 0x00000023, 0x00000683, 0x07a5d59a, 0x6c6a3208, 0x31da5f06}, {0x95cd285e, 0x00000001, 0x00000047, 0x7b3a4368, 0x0202c07e, 0xa1f2e784}, {0xd9245a25, 0x0000001e, 0x000003a6, 0xd33c1841, 0x1936c0d5, 0xb07cc616}, {0x103279db, 0x00000006, 0x0000039b, 0xca09b8a0, 0x77d62892, 0xbf943b6c}, {0x1cba3172, 0x00000027, 0x000001c8, 0xcb377194, 0xebe682db, 0x2c01af1c}, {0x8f613739, 0x0000000c, 0x000001df, 0xb4b0bc87, 0x7710bd43, 0x0fe5f56d}, {0x1c6aa90d, 0x0000001b, 0x0000053c, 0x70559245, 0xda7894ac, 0xf8943b2d}, {0xaabe5b93, 0x0000003d, 0x00000715, 0xcdbf42fa, 0x0c3b99e7, 0xe4d89272}, {0xf15dd038, 0x00000006, 0x000006db, 0x6e104aea, 0x8d5967f2, 0x7c2f6bbb}, {0x584dd49c, 0x00000020, 0x000007bc, 0x36b6cfd6, 0xad4e23b2, 0xabbf388b}, {0x5d8c9506, 0x00000020, 0x00000470, 0x4c62378e, 0x31d92640, 0x1dca1f4e}, {0xb80d17b0, 0x00000032, 0x00000346, 0x22a5bb88, 0x9a7ec89f, 0x5c170e23}, {0xdaf0592e, 0x00000023, 0x000007b0, 0x3cab3f99, 0x9b1fdd99, 0xc0e9d672}, {0x4793cc85, 0x0000000d, 0x00000706, 0xe82e04f6, 0xed3db6b7, 0xc18bdc86}, {0x82ebf64e, 0x00000009, 0x000007c3, 0x69d590a9, 0x9efa8499, 0xa874fcdd}, {0xb18a0319, 0x00000026, 0x000007db, 0x1cf98dcc, 0x8fa9ad6a, 0x9dc0bb48}, }; #include <linux/time.h> static int __init crc32c_test(void) { int i; int errors = 0; int bytes = 0; struct timespec start, stop; u64 nsec; unsigned long flags; /* keep static to prevent cache warming code from * getting eliminated by the compiler */ static u32 crc; /* pre-warm the cache */ for (i = 0; i < 100; i++) { bytes += 2*test[i].length; crc ^= __crc32c_le(test[i].crc, test_buf + test[i].start, test[i].length); } /* reduce OS noise */ local_irq_save(flags); local_irq_disable(); getnstimeofday(&start); for (i = 0; i < 100; i++) { if (test[i].crc32c_le != __crc32c_le(test[i].crc, test_buf + test[i].start, test[i].length)) errors++; } getnstimeofday(&stop); local_irq_restore(flags); local_irq_enable(); nsec = stop.tv_nsec - start.tv_nsec + 1000000000 * (stop.tv_sec - start.tv_sec); pr_info("crc32c: CRC_LE_BITS = %d\n", CRC_LE_BITS); if (errors) pr_warn("crc32c: %d self tests failed\n", errors); else { pr_info("crc32c: self tests passed, processed %d bytes in %lld nsec\n", bytes, nsec); } return 0; } static int __init crc32_test(void) { int i; int errors = 0; int bytes = 0; struct timespec start, stop; u64 nsec; unsigned long flags; /* keep static to prevent cache warming code from * getting eliminated by the compiler */ static u32 crc; /* pre-warm the cache */ for (i = 0; i < 100; i++) { bytes += 2*test[i].length; crc ^= crc32_le(test[i].crc, test_buf + test[i].start, test[i].length); crc ^= crc32_be(test[i].crc, test_buf + test[i].start, test[i].length); } /* reduce OS noise */ local_irq_save(flags); local_irq_disable(); getnstimeofday(&start); for (i = 0; i < 100; i++) { if (test[i].crc_le != crc32_le(test[i].crc, test_buf + test[i].start, test[i].length)) errors++; if (test[i].crc_be != crc32_be(test[i].crc, test_buf + test[i].start, test[i].length)) errors++; } getnstimeofday(&stop); local_irq_restore(flags); local_irq_enable(); nsec = stop.tv_nsec - start.tv_nsec + 1000000000 * (stop.tv_sec - start.tv_sec); pr_info("crc32: CRC_LE_BITS = %d, CRC_BE BITS = %d\n", CRC_LE_BITS, CRC_BE_BITS); if (errors) pr_warn("crc32: %d self tests failed\n", errors); else { pr_info("crc32: self tests passed, processed %d bytes in %lld nsec\n", bytes, nsec); } return 0; } static int __init crc32test_init(void) { crc32_test(); crc32c_test(); return 0; } static void __exit crc32_exit(void) { } module_init(crc32test_init); module_exit(crc32_exit); #endif /* CONFIG_CRC32_SELFTEST */
gpl-2.0
Fechinator/FechdaKernelReloaded6.0
drivers/net/ethernet/atheros/atlx/atl1.c
4785
100915
/* * Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved. * Copyright(c) 2006 - 2007 Chris Snook <csnook@redhat.com> * Copyright(c) 2006 - 2008 Jay Cliburn <jcliburn@gmail.com> * * Derived from Intel e1000 driver * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 59 * Temple Place - Suite 330, Boston, MA 02111-1307, USA. * * The full GNU General Public License is included in this distribution in the * file called COPYING. * * Contact Information: * Xiong Huang <xiong.huang@atheros.com> * Jie Yang <jie.yang@atheros.com> * Chris Snook <csnook@redhat.com> * Jay Cliburn <jcliburn@gmail.com> * * This version is adapted from the Attansic reference driver. * * TODO: * Add more ethtool functions. * Fix abstruse irq enable/disable condition described here: * http://marc.theaimsgroup.com/?l=linux-netdev&m=116398508500553&w=2 * * NEEDS TESTING: * VLAN * multicast * promiscuous mode * interrupt coalescing * SMP torture testing */ #include <linux/atomic.h> #include <asm/byteorder.h> #include <linux/compiler.h> #include <linux/crc32.h> #include <linux/delay.h> #include <linux/dma-mapping.h> #include <linux/etherdevice.h> #include <linux/hardirq.h> #include <linux/if_ether.h> #include <linux/if_vlan.h> #include <linux/in.h> #include <linux/interrupt.h> #include <linux/ip.h> #include <linux/irqflags.h> #include <linux/irqreturn.h> #include <linux/jiffies.h> #include <linux/mii.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/net.h> #include <linux/netdevice.h> #include <linux/pci.h> #include <linux/pci_ids.h> #include <linux/pm.h> #include <linux/skbuff.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/string.h> #include <linux/tcp.h> #include <linux/timer.h> #include <linux/types.h> #include <linux/workqueue.h> #include <net/checksum.h> #include "atl1.h" #define ATLX_DRIVER_VERSION "2.1.3" MODULE_AUTHOR("Xiong Huang <xiong.huang@atheros.com>, " "Chris Snook <csnook@redhat.com>, " "Jay Cliburn <jcliburn@gmail.com>"); MODULE_LICENSE("GPL"); MODULE_VERSION(ATLX_DRIVER_VERSION); /* Temporary hack for merging atl1 and atl2 */ #include "atlx.c" static const struct ethtool_ops atl1_ethtool_ops; /* * This is the only thing that needs to be changed to adjust the * maximum number of ports that the driver can manage. */ #define ATL1_MAX_NIC 4 #define OPTION_UNSET -1 #define OPTION_DISABLED 0 #define OPTION_ENABLED 1 #define ATL1_PARAM_INIT { [0 ... ATL1_MAX_NIC] = OPTION_UNSET } /* * Interrupt Moderate Timer in units of 2 us * * Valid Range: 10-65535 * * Default Value: 100 (200us) */ static int __devinitdata int_mod_timer[ATL1_MAX_NIC+1] = ATL1_PARAM_INIT; static unsigned int num_int_mod_timer; module_param_array_named(int_mod_timer, int_mod_timer, int, &num_int_mod_timer, 0); MODULE_PARM_DESC(int_mod_timer, "Interrupt moderator timer"); #define DEFAULT_INT_MOD_CNT 100 /* 200us */ #define MAX_INT_MOD_CNT 65000 #define MIN_INT_MOD_CNT 50 struct atl1_option { enum { enable_option, range_option, list_option } type; char *name; char *err; int def; union { struct { /* range_option info */ int min; int max; } r; struct { /* list_option info */ int nr; struct atl1_opt_list { int i; char *str; } *p; } l; } arg; }; static int __devinit atl1_validate_option(int *value, struct atl1_option *opt, struct pci_dev *pdev) { if (*value == OPTION_UNSET) { *value = opt->def; return 0; } switch (opt->type) { case enable_option: switch (*value) { case OPTION_ENABLED: dev_info(&pdev->dev, "%s enabled\n", opt->name); return 0; case OPTION_DISABLED: dev_info(&pdev->dev, "%s disabled\n", opt->name); return 0; } break; case range_option: if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) { dev_info(&pdev->dev, "%s set to %i\n", opt->name, *value); return 0; } break; case list_option:{ int i; struct atl1_opt_list *ent; for (i = 0; i < opt->arg.l.nr; i++) { ent = &opt->arg.l.p[i]; if (*value == ent->i) { if (ent->str[0] != '\0') dev_info(&pdev->dev, "%s\n", ent->str); return 0; } } } break; default: break; } dev_info(&pdev->dev, "invalid %s specified (%i) %s\n", opt->name, *value, opt->err); *value = opt->def; return -1; } /* * atl1_check_options - Range Checking for Command Line Parameters * @adapter: board private structure * * This routine checks all command line parameters for valid user * input. If an invalid value is given, or if no user specified * value exists, a default value is used. The final value is stored * in a variable in the adapter structure. */ static void __devinit atl1_check_options(struct atl1_adapter *adapter) { struct pci_dev *pdev = adapter->pdev; int bd = adapter->bd_number; if (bd >= ATL1_MAX_NIC) { dev_notice(&pdev->dev, "no configuration for board#%i\n", bd); dev_notice(&pdev->dev, "using defaults for all values\n"); } { /* Interrupt Moderate Timer */ struct atl1_option opt = { .type = range_option, .name = "Interrupt Moderator Timer", .err = "using default of " __MODULE_STRING(DEFAULT_INT_MOD_CNT), .def = DEFAULT_INT_MOD_CNT, .arg = {.r = {.min = MIN_INT_MOD_CNT, .max = MAX_INT_MOD_CNT} } }; int val; if (num_int_mod_timer > bd) { val = int_mod_timer[bd]; atl1_validate_option(&val, &opt, pdev); adapter->imt = (u16) val; } else adapter->imt = (u16) (opt.def); } } /* * atl1_pci_tbl - PCI Device ID Table */ static DEFINE_PCI_DEVICE_TABLE(atl1_pci_tbl) = { {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L1)}, /* required last entry */ {0,} }; MODULE_DEVICE_TABLE(pci, atl1_pci_tbl); static const u32 atl1_default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP; static int debug = -1; module_param(debug, int, 0); MODULE_PARM_DESC(debug, "Message level (0=none,...,16=all)"); /* * Reset the transmit and receive units; mask and clear all interrupts. * hw - Struct containing variables accessed by shared code * return : 0 or idle status (if error) */ static s32 atl1_reset_hw(struct atl1_hw *hw) { struct pci_dev *pdev = hw->back->pdev; struct atl1_adapter *adapter = hw->back; u32 icr; int i; /* * Clear Interrupt mask to stop board from generating * interrupts & Clear any pending interrupt events */ /* * iowrite32(0, hw->hw_addr + REG_IMR); * iowrite32(0xffffffff, hw->hw_addr + REG_ISR); */ /* * Issue Soft Reset to the MAC. This will reset the chip's * transmit, receive, DMA. It will not effect * the current PCI configuration. The global reset bit is self- * clearing, and should clear within a microsecond. */ iowrite32(MASTER_CTRL_SOFT_RST, hw->hw_addr + REG_MASTER_CTRL); ioread32(hw->hw_addr + REG_MASTER_CTRL); iowrite16(1, hw->hw_addr + REG_PHY_ENABLE); ioread16(hw->hw_addr + REG_PHY_ENABLE); /* delay about 1ms */ msleep(1); /* Wait at least 10ms for All module to be Idle */ for (i = 0; i < 10; i++) { icr = ioread32(hw->hw_addr + REG_IDLE_STATUS); if (!icr) break; /* delay 1 ms */ msleep(1); /* FIXME: still the right way to do this? */ cpu_relax(); } if (icr) { if (netif_msg_hw(adapter)) dev_dbg(&pdev->dev, "ICR = 0x%x\n", icr); return icr; } return 0; } /* function about EEPROM * * check_eeprom_exist * return 0 if eeprom exist */ static int atl1_check_eeprom_exist(struct atl1_hw *hw) { u32 value; value = ioread32(hw->hw_addr + REG_SPI_FLASH_CTRL); if (value & SPI_FLASH_CTRL_EN_VPD) { value &= ~SPI_FLASH_CTRL_EN_VPD; iowrite32(value, hw->hw_addr + REG_SPI_FLASH_CTRL); } value = ioread16(hw->hw_addr + REG_PCIE_CAP_LIST); return ((value & 0xFF00) == 0x6C00) ? 0 : 1; } static bool atl1_read_eeprom(struct atl1_hw *hw, u32 offset, u32 *p_value) { int i; u32 control; if (offset & 3) /* address do not align */ return false; iowrite32(0, hw->hw_addr + REG_VPD_DATA); control = (offset & VPD_CAP_VPD_ADDR_MASK) << VPD_CAP_VPD_ADDR_SHIFT; iowrite32(control, hw->hw_addr + REG_VPD_CAP); ioread32(hw->hw_addr + REG_VPD_CAP); for (i = 0; i < 10; i++) { msleep(2); control = ioread32(hw->hw_addr + REG_VPD_CAP); if (control & VPD_CAP_VPD_FLAG) break; } if (control & VPD_CAP_VPD_FLAG) { *p_value = ioread32(hw->hw_addr + REG_VPD_DATA); return true; } /* timeout */ return false; } /* * Reads the value from a PHY register * hw - Struct containing variables accessed by shared code * reg_addr - address of the PHY register to read */ static s32 atl1_read_phy_reg(struct atl1_hw *hw, u16 reg_addr, u16 *phy_data) { u32 val; int i; val = ((u32) (reg_addr & MDIO_REG_ADDR_MASK)) << MDIO_REG_ADDR_SHIFT | MDIO_START | MDIO_SUP_PREAMBLE | MDIO_RW | MDIO_CLK_25_4 << MDIO_CLK_SEL_SHIFT; iowrite32(val, hw->hw_addr + REG_MDIO_CTRL); ioread32(hw->hw_addr + REG_MDIO_CTRL); for (i = 0; i < MDIO_WAIT_TIMES; i++) { udelay(2); val = ioread32(hw->hw_addr + REG_MDIO_CTRL); if (!(val & (MDIO_START | MDIO_BUSY))) break; } if (!(val & (MDIO_START | MDIO_BUSY))) { *phy_data = (u16) val; return 0; } return ATLX_ERR_PHY; } #define CUSTOM_SPI_CS_SETUP 2 #define CUSTOM_SPI_CLK_HI 2 #define CUSTOM_SPI_CLK_LO 2 #define CUSTOM_SPI_CS_HOLD 2 #define CUSTOM_SPI_CS_HI 3 static bool atl1_spi_read(struct atl1_hw *hw, u32 addr, u32 *buf) { int i; u32 value; iowrite32(0, hw->hw_addr + REG_SPI_DATA); iowrite32(addr, hw->hw_addr + REG_SPI_ADDR); value = SPI_FLASH_CTRL_WAIT_READY | (CUSTOM_SPI_CS_SETUP & SPI_FLASH_CTRL_CS_SETUP_MASK) << SPI_FLASH_CTRL_CS_SETUP_SHIFT | (CUSTOM_SPI_CLK_HI & SPI_FLASH_CTRL_CLK_HI_MASK) << SPI_FLASH_CTRL_CLK_HI_SHIFT | (CUSTOM_SPI_CLK_LO & SPI_FLASH_CTRL_CLK_LO_MASK) << SPI_FLASH_CTRL_CLK_LO_SHIFT | (CUSTOM_SPI_CS_HOLD & SPI_FLASH_CTRL_CS_HOLD_MASK) << SPI_FLASH_CTRL_CS_HOLD_SHIFT | (CUSTOM_SPI_CS_HI & SPI_FLASH_CTRL_CS_HI_MASK) << SPI_FLASH_CTRL_CS_HI_SHIFT | (1 & SPI_FLASH_CTRL_INS_MASK) << SPI_FLASH_CTRL_INS_SHIFT; iowrite32(value, hw->hw_addr + REG_SPI_FLASH_CTRL); value |= SPI_FLASH_CTRL_START; iowrite32(value, hw->hw_addr + REG_SPI_FLASH_CTRL); ioread32(hw->hw_addr + REG_SPI_FLASH_CTRL); for (i = 0; i < 10; i++) { msleep(1); value = ioread32(hw->hw_addr + REG_SPI_FLASH_CTRL); if (!(value & SPI_FLASH_CTRL_START)) break; } if (value & SPI_FLASH_CTRL_START) return false; *buf = ioread32(hw->hw_addr + REG_SPI_DATA); return true; } /* * get_permanent_address * return 0 if get valid mac address, */ static int atl1_get_permanent_address(struct atl1_hw *hw) { u32 addr[2]; u32 i, control; u16 reg; u8 eth_addr[ETH_ALEN]; bool key_valid; if (is_valid_ether_addr(hw->perm_mac_addr)) return 0; /* init */ addr[0] = addr[1] = 0; if (!atl1_check_eeprom_exist(hw)) { reg = 0; key_valid = false; /* Read out all EEPROM content */ i = 0; while (1) { if (atl1_read_eeprom(hw, i + 0x100, &control)) { if (key_valid) { if (reg == REG_MAC_STA_ADDR) addr[0] = control; else if (reg == (REG_MAC_STA_ADDR + 4)) addr[1] = control; key_valid = false; } else if ((control & 0xff) == 0x5A) { key_valid = true; reg = (u16) (control >> 16); } else break; } else /* read error */ break; i += 4; } *(u32 *) &eth_addr[2] = swab32(addr[0]); *(u16 *) &eth_addr[0] = swab16(*(u16 *) &addr[1]); if (is_valid_ether_addr(eth_addr)) { memcpy(hw->perm_mac_addr, eth_addr, ETH_ALEN); return 0; } } /* see if SPI FLAGS exist ? */ addr[0] = addr[1] = 0; reg = 0; key_valid = false; i = 0; while (1) { if (atl1_spi_read(hw, i + 0x1f000, &control)) { if (key_valid) { if (reg == REG_MAC_STA_ADDR) addr[0] = control; else if (reg == (REG_MAC_STA_ADDR + 4)) addr[1] = control; key_valid = false; } else if ((control & 0xff) == 0x5A) { key_valid = true; reg = (u16) (control >> 16); } else /* data end */ break; } else /* read error */ break; i += 4; } *(u32 *) &eth_addr[2] = swab32(addr[0]); *(u16 *) &eth_addr[0] = swab16(*(u16 *) &addr[1]); if (is_valid_ether_addr(eth_addr)) { memcpy(hw->perm_mac_addr, eth_addr, ETH_ALEN); return 0; } /* * On some motherboards, the MAC address is written by the * BIOS directly to the MAC register during POST, and is * not stored in eeprom. If all else thus far has failed * to fetch the permanent MAC address, try reading it directly. */ addr[0] = ioread32(hw->hw_addr + REG_MAC_STA_ADDR); addr[1] = ioread16(hw->hw_addr + (REG_MAC_STA_ADDR + 4)); *(u32 *) &eth_addr[2] = swab32(addr[0]); *(u16 *) &eth_addr[0] = swab16(*(u16 *) &addr[1]); if (is_valid_ether_addr(eth_addr)) { memcpy(hw->perm_mac_addr, eth_addr, ETH_ALEN); return 0; } return 1; } /* * Reads the adapter's MAC address from the EEPROM * hw - Struct containing variables accessed by shared code */ static s32 atl1_read_mac_addr(struct atl1_hw *hw) { s32 ret = 0; u16 i; if (atl1_get_permanent_address(hw)) { random_ether_addr(hw->perm_mac_addr); ret = 1; } for (i = 0; i < ETH_ALEN; i++) hw->mac_addr[i] = hw->perm_mac_addr[i]; return ret; } /* * Hashes an address to determine its location in the multicast table * hw - Struct containing variables accessed by shared code * mc_addr - the multicast address to hash * * atl1_hash_mc_addr * purpose * set hash value for a multicast address * hash calcu processing : * 1. calcu 32bit CRC for multicast address * 2. reverse crc with MSB to LSB */ static u32 atl1_hash_mc_addr(struct atl1_hw *hw, u8 *mc_addr) { u32 crc32, value = 0; int i; crc32 = ether_crc_le(6, mc_addr); for (i = 0; i < 32; i++) value |= (((crc32 >> i) & 1) << (31 - i)); return value; } /* * Sets the bit in the multicast table corresponding to the hash value. * hw - Struct containing variables accessed by shared code * hash_value - Multicast address hash value */ static void atl1_hash_set(struct atl1_hw *hw, u32 hash_value) { u32 hash_bit, hash_reg; u32 mta; /* * The HASH Table is a register array of 2 32-bit registers. * It is treated like an array of 64 bits. We want to set * bit BitArray[hash_value]. So we figure out what register * the bit is in, read it, OR in the new bit, then write * back the new value. The register is determined by the * upper 7 bits of the hash value and the bit within that * register are determined by the lower 5 bits of the value. */ hash_reg = (hash_value >> 31) & 0x1; hash_bit = (hash_value >> 26) & 0x1F; mta = ioread32((hw->hw_addr + REG_RX_HASH_TABLE) + (hash_reg << 2)); mta |= (1 << hash_bit); iowrite32(mta, (hw->hw_addr + REG_RX_HASH_TABLE) + (hash_reg << 2)); } /* * Writes a value to a PHY register * hw - Struct containing variables accessed by shared code * reg_addr - address of the PHY register to write * data - data to write to the PHY */ static s32 atl1_write_phy_reg(struct atl1_hw *hw, u32 reg_addr, u16 phy_data) { int i; u32 val; val = ((u32) (phy_data & MDIO_DATA_MASK)) << MDIO_DATA_SHIFT | (reg_addr & MDIO_REG_ADDR_MASK) << MDIO_REG_ADDR_SHIFT | MDIO_SUP_PREAMBLE | MDIO_START | MDIO_CLK_25_4 << MDIO_CLK_SEL_SHIFT; iowrite32(val, hw->hw_addr + REG_MDIO_CTRL); ioread32(hw->hw_addr + REG_MDIO_CTRL); for (i = 0; i < MDIO_WAIT_TIMES; i++) { udelay(2); val = ioread32(hw->hw_addr + REG_MDIO_CTRL); if (!(val & (MDIO_START | MDIO_BUSY))) break; } if (!(val & (MDIO_START | MDIO_BUSY))) return 0; return ATLX_ERR_PHY; } /* * Make L001's PHY out of Power Saving State (bug) * hw - Struct containing variables accessed by shared code * when power on, L001's PHY always on Power saving State * (Gigabit Link forbidden) */ static s32 atl1_phy_leave_power_saving(struct atl1_hw *hw) { s32 ret; ret = atl1_write_phy_reg(hw, 29, 0x0029); if (ret) return ret; return atl1_write_phy_reg(hw, 30, 0); } /* * Resets the PHY and make all config validate * hw - Struct containing variables accessed by shared code * * Sets bit 15 and 12 of the MII Control regiser (for F001 bug) */ static s32 atl1_phy_reset(struct atl1_hw *hw) { struct pci_dev *pdev = hw->back->pdev; struct atl1_adapter *adapter = hw->back; s32 ret_val; u16 phy_data; if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR || hw->media_type == MEDIA_TYPE_1000M_FULL) phy_data = MII_CR_RESET | MII_CR_AUTO_NEG_EN; else { switch (hw->media_type) { case MEDIA_TYPE_100M_FULL: phy_data = MII_CR_FULL_DUPLEX | MII_CR_SPEED_100 | MII_CR_RESET; break; case MEDIA_TYPE_100M_HALF: phy_data = MII_CR_SPEED_100 | MII_CR_RESET; break; case MEDIA_TYPE_10M_FULL: phy_data = MII_CR_FULL_DUPLEX | MII_CR_SPEED_10 | MII_CR_RESET; break; default: /* MEDIA_TYPE_10M_HALF: */ phy_data = MII_CR_SPEED_10 | MII_CR_RESET; break; } } ret_val = atl1_write_phy_reg(hw, MII_BMCR, phy_data); if (ret_val) { u32 val; int i; /* pcie serdes link may be down! */ if (netif_msg_hw(adapter)) dev_dbg(&pdev->dev, "pcie phy link down\n"); for (i = 0; i < 25; i++) { msleep(1); val = ioread32(hw->hw_addr + REG_MDIO_CTRL); if (!(val & (MDIO_START | MDIO_BUSY))) break; } if ((val & (MDIO_START | MDIO_BUSY)) != 0) { if (netif_msg_hw(adapter)) dev_warn(&pdev->dev, "pcie link down at least 25ms\n"); return ret_val; } } return 0; } /* * Configures PHY autoneg and flow control advertisement settings * hw - Struct containing variables accessed by shared code */ static s32 atl1_phy_setup_autoneg_adv(struct atl1_hw *hw) { s32 ret_val; s16 mii_autoneg_adv_reg; s16 mii_1000t_ctrl_reg; /* Read the MII Auto-Neg Advertisement Register (Address 4). */ mii_autoneg_adv_reg = MII_AR_DEFAULT_CAP_MASK; /* Read the MII 1000Base-T Control Register (Address 9). */ mii_1000t_ctrl_reg = MII_ATLX_CR_1000T_DEFAULT_CAP_MASK; /* * First we clear all the 10/100 mb speed bits in the Auto-Neg * Advertisement Register (Address 4) and the 1000 mb speed bits in * the 1000Base-T Control Register (Address 9). */ mii_autoneg_adv_reg &= ~MII_AR_SPEED_MASK; mii_1000t_ctrl_reg &= ~MII_ATLX_CR_1000T_SPEED_MASK; /* * Need to parse media_type and set up * the appropriate PHY registers. */ switch (hw->media_type) { case MEDIA_TYPE_AUTO_SENSOR: mii_autoneg_adv_reg |= (MII_AR_10T_HD_CAPS | MII_AR_10T_FD_CAPS | MII_AR_100TX_HD_CAPS | MII_AR_100TX_FD_CAPS); mii_1000t_ctrl_reg |= MII_ATLX_CR_1000T_FD_CAPS; break; case MEDIA_TYPE_1000M_FULL: mii_1000t_ctrl_reg |= MII_ATLX_CR_1000T_FD_CAPS; break; case MEDIA_TYPE_100M_FULL: mii_autoneg_adv_reg |= MII_AR_100TX_FD_CAPS; break; case MEDIA_TYPE_100M_HALF: mii_autoneg_adv_reg |= MII_AR_100TX_HD_CAPS; break; case MEDIA_TYPE_10M_FULL: mii_autoneg_adv_reg |= MII_AR_10T_FD_CAPS; break; default: mii_autoneg_adv_reg |= MII_AR_10T_HD_CAPS; break; } /* flow control fixed to enable all */ mii_autoneg_adv_reg |= (MII_AR_ASM_DIR | MII_AR_PAUSE); hw->mii_autoneg_adv_reg = mii_autoneg_adv_reg; hw->mii_1000t_ctrl_reg = mii_1000t_ctrl_reg; ret_val = atl1_write_phy_reg(hw, MII_ADVERTISE, mii_autoneg_adv_reg); if (ret_val) return ret_val; ret_val = atl1_write_phy_reg(hw, MII_ATLX_CR, mii_1000t_ctrl_reg); if (ret_val) return ret_val; return 0; } /* * Configures link settings. * hw - Struct containing variables accessed by shared code * Assumes the hardware has previously been reset and the * transmitter and receiver are not enabled. */ static s32 atl1_setup_link(struct atl1_hw *hw) { struct pci_dev *pdev = hw->back->pdev; struct atl1_adapter *adapter = hw->back; s32 ret_val; /* * Options: * PHY will advertise value(s) parsed from * autoneg_advertised and fc * no matter what autoneg is , We will not wait link result. */ ret_val = atl1_phy_setup_autoneg_adv(hw); if (ret_val) { if (netif_msg_link(adapter)) dev_dbg(&pdev->dev, "error setting up autonegotiation\n"); return ret_val; } /* SW.Reset , En-Auto-Neg if needed */ ret_val = atl1_phy_reset(hw); if (ret_val) { if (netif_msg_link(adapter)) dev_dbg(&pdev->dev, "error resetting phy\n"); return ret_val; } hw->phy_configured = true; return ret_val; } static void atl1_init_flash_opcode(struct atl1_hw *hw) { if (hw->flash_vendor >= ARRAY_SIZE(flash_table)) /* Atmel */ hw->flash_vendor = 0; /* Init OP table */ iowrite8(flash_table[hw->flash_vendor].cmd_program, hw->hw_addr + REG_SPI_FLASH_OP_PROGRAM); iowrite8(flash_table[hw->flash_vendor].cmd_sector_erase, hw->hw_addr + REG_SPI_FLASH_OP_SC_ERASE); iowrite8(flash_table[hw->flash_vendor].cmd_chip_erase, hw->hw_addr + REG_SPI_FLASH_OP_CHIP_ERASE); iowrite8(flash_table[hw->flash_vendor].cmd_rdid, hw->hw_addr + REG_SPI_FLASH_OP_RDID); iowrite8(flash_table[hw->flash_vendor].cmd_wren, hw->hw_addr + REG_SPI_FLASH_OP_WREN); iowrite8(flash_table[hw->flash_vendor].cmd_rdsr, hw->hw_addr + REG_SPI_FLASH_OP_RDSR); iowrite8(flash_table[hw->flash_vendor].cmd_wrsr, hw->hw_addr + REG_SPI_FLASH_OP_WRSR); iowrite8(flash_table[hw->flash_vendor].cmd_read, hw->hw_addr + REG_SPI_FLASH_OP_READ); } /* * Performs basic configuration of the adapter. * hw - Struct containing variables accessed by shared code * Assumes that the controller has previously been reset and is in a * post-reset uninitialized state. Initializes multicast table, * and Calls routines to setup link * Leaves the transmit and receive units disabled and uninitialized. */ static s32 atl1_init_hw(struct atl1_hw *hw) { u32 ret_val = 0; /* Zero out the Multicast HASH table */ iowrite32(0, hw->hw_addr + REG_RX_HASH_TABLE); /* clear the old settings from the multicast hash table */ iowrite32(0, (hw->hw_addr + REG_RX_HASH_TABLE) + (1 << 2)); atl1_init_flash_opcode(hw); if (!hw->phy_configured) { /* enable GPHY LinkChange Interrupt */ ret_val = atl1_write_phy_reg(hw, 18, 0xC00); if (ret_val) return ret_val; /* make PHY out of power-saving state */ ret_val = atl1_phy_leave_power_saving(hw); if (ret_val) return ret_val; /* Call a subroutine to configure the link */ ret_val = atl1_setup_link(hw); } return ret_val; } /* * Detects the current speed and duplex settings of the hardware. * hw - Struct containing variables accessed by shared code * speed - Speed of the connection * duplex - Duplex setting of the connection */ static s32 atl1_get_speed_and_duplex(struct atl1_hw *hw, u16 *speed, u16 *duplex) { struct pci_dev *pdev = hw->back->pdev; struct atl1_adapter *adapter = hw->back; s32 ret_val; u16 phy_data; /* ; --- Read PHY Specific Status Register (17) */ ret_val = atl1_read_phy_reg(hw, MII_ATLX_PSSR, &phy_data); if (ret_val) return ret_val; if (!(phy_data & MII_ATLX_PSSR_SPD_DPLX_RESOLVED)) return ATLX_ERR_PHY_RES; switch (phy_data & MII_ATLX_PSSR_SPEED) { case MII_ATLX_PSSR_1000MBS: *speed = SPEED_1000; break; case MII_ATLX_PSSR_100MBS: *speed = SPEED_100; break; case MII_ATLX_PSSR_10MBS: *speed = SPEED_10; break; default: if (netif_msg_hw(adapter)) dev_dbg(&pdev->dev, "error getting speed\n"); return ATLX_ERR_PHY_SPEED; break; } if (phy_data & MII_ATLX_PSSR_DPLX) *duplex = FULL_DUPLEX; else *duplex = HALF_DUPLEX; return 0; } static void atl1_set_mac_addr(struct atl1_hw *hw) { u32 value; /* * 00-0B-6A-F6-00-DC * 0: 6AF600DC 1: 000B * low dword */ value = (((u32) hw->mac_addr[2]) << 24) | (((u32) hw->mac_addr[3]) << 16) | (((u32) hw->mac_addr[4]) << 8) | (((u32) hw->mac_addr[5])); iowrite32(value, hw->hw_addr + REG_MAC_STA_ADDR); /* high dword */ value = (((u32) hw->mac_addr[0]) << 8) | (((u32) hw->mac_addr[1])); iowrite32(value, (hw->hw_addr + REG_MAC_STA_ADDR) + (1 << 2)); } /* * atl1_sw_init - Initialize general software structures (struct atl1_adapter) * @adapter: board private structure to initialize * * atl1_sw_init initializes the Adapter private data structure. * Fields are initialized based on PCI device information and * OS network device settings (MTU size). */ static int __devinit atl1_sw_init(struct atl1_adapter *adapter) { struct atl1_hw *hw = &adapter->hw; struct net_device *netdev = adapter->netdev; hw->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; hw->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; adapter->wol = 0; device_set_wakeup_enable(&adapter->pdev->dev, false); adapter->rx_buffer_len = (hw->max_frame_size + 7) & ~7; adapter->ict = 50000; /* 100ms */ adapter->link_speed = SPEED_0; /* hardware init */ adapter->link_duplex = FULL_DUPLEX; hw->phy_configured = false; hw->preamble_len = 7; hw->ipgt = 0x60; hw->min_ifg = 0x50; hw->ipgr1 = 0x40; hw->ipgr2 = 0x60; hw->max_retry = 0xf; hw->lcol = 0x37; hw->jam_ipg = 7; hw->rfd_burst = 8; hw->rrd_burst = 8; hw->rfd_fetch_gap = 1; hw->rx_jumbo_th = adapter->rx_buffer_len / 8; hw->rx_jumbo_lkah = 1; hw->rrd_ret_timer = 16; hw->tpd_burst = 4; hw->tpd_fetch_th = 16; hw->txf_burst = 0x100; hw->tx_jumbo_task_th = (hw->max_frame_size + 7) >> 3; hw->tpd_fetch_gap = 1; hw->rcb_value = atl1_rcb_64; hw->dma_ord = atl1_dma_ord_enh; hw->dmar_block = atl1_dma_req_256; hw->dmaw_block = atl1_dma_req_256; hw->cmb_rrd = 4; hw->cmb_tpd = 4; hw->cmb_rx_timer = 1; /* about 2us */ hw->cmb_tx_timer = 1; /* about 2us */ hw->smb_timer = 100000; /* about 200ms */ spin_lock_init(&adapter->lock); spin_lock_init(&adapter->mb_lock); return 0; } static int mdio_read(struct net_device *netdev, int phy_id, int reg_num) { struct atl1_adapter *adapter = netdev_priv(netdev); u16 result; atl1_read_phy_reg(&adapter->hw, reg_num & 0x1f, &result); return result; } static void mdio_write(struct net_device *netdev, int phy_id, int reg_num, int val) { struct atl1_adapter *adapter = netdev_priv(netdev); atl1_write_phy_reg(&adapter->hw, reg_num, val); } /* * atl1_mii_ioctl - * @netdev: * @ifreq: * @cmd: */ static int atl1_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) { struct atl1_adapter *adapter = netdev_priv(netdev); unsigned long flags; int retval; if (!netif_running(netdev)) return -EINVAL; spin_lock_irqsave(&adapter->lock, flags); retval = generic_mii_ioctl(&adapter->mii, if_mii(ifr), cmd, NULL); spin_unlock_irqrestore(&adapter->lock, flags); return retval; } /* * atl1_setup_mem_resources - allocate Tx / RX descriptor resources * @adapter: board private structure * * Return 0 on success, negative on failure */ static s32 atl1_setup_ring_resources(struct atl1_adapter *adapter) { struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring; struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring; struct atl1_ring_header *ring_header = &adapter->ring_header; struct pci_dev *pdev = adapter->pdev; int size; u8 offset = 0; size = sizeof(struct atl1_buffer) * (tpd_ring->count + rfd_ring->count); tpd_ring->buffer_info = kzalloc(size, GFP_KERNEL); if (unlikely(!tpd_ring->buffer_info)) { if (netif_msg_drv(adapter)) dev_err(&pdev->dev, "kzalloc failed , size = D%d\n", size); goto err_nomem; } rfd_ring->buffer_info = (struct atl1_buffer *)(tpd_ring->buffer_info + tpd_ring->count); /* * real ring DMA buffer * each ring/block may need up to 8 bytes for alignment, hence the * additional 40 bytes tacked onto the end. */ ring_header->size = size = sizeof(struct tx_packet_desc) * tpd_ring->count + sizeof(struct rx_free_desc) * rfd_ring->count + sizeof(struct rx_return_desc) * rrd_ring->count + sizeof(struct coals_msg_block) + sizeof(struct stats_msg_block) + 40; ring_header->desc = pci_alloc_consistent(pdev, ring_header->size, &ring_header->dma); if (unlikely(!ring_header->desc)) { if (netif_msg_drv(adapter)) dev_err(&pdev->dev, "pci_alloc_consistent failed\n"); goto err_nomem; } memset(ring_header->desc, 0, ring_header->size); /* init TPD ring */ tpd_ring->dma = ring_header->dma; offset = (tpd_ring->dma & 0x7) ? (8 - (ring_header->dma & 0x7)) : 0; tpd_ring->dma += offset; tpd_ring->desc = (u8 *) ring_header->desc + offset; tpd_ring->size = sizeof(struct tx_packet_desc) * tpd_ring->count; /* init RFD ring */ rfd_ring->dma = tpd_ring->dma + tpd_ring->size; offset = (rfd_ring->dma & 0x7) ? (8 - (rfd_ring->dma & 0x7)) : 0; rfd_ring->dma += offset; rfd_ring->desc = (u8 *) tpd_ring->desc + (tpd_ring->size + offset); rfd_ring->size = sizeof(struct rx_free_desc) * rfd_ring->count; /* init RRD ring */ rrd_ring->dma = rfd_ring->dma + rfd_ring->size; offset = (rrd_ring->dma & 0x7) ? (8 - (rrd_ring->dma & 0x7)) : 0; rrd_ring->dma += offset; rrd_ring->desc = (u8 *) rfd_ring->desc + (rfd_ring->size + offset); rrd_ring->size = sizeof(struct rx_return_desc) * rrd_ring->count; /* init CMB */ adapter->cmb.dma = rrd_ring->dma + rrd_ring->size; offset = (adapter->cmb.dma & 0x7) ? (8 - (adapter->cmb.dma & 0x7)) : 0; adapter->cmb.dma += offset; adapter->cmb.cmb = (struct coals_msg_block *) ((u8 *) rrd_ring->desc + (rrd_ring->size + offset)); /* init SMB */ adapter->smb.dma = adapter->cmb.dma + sizeof(struct coals_msg_block); offset = (adapter->smb.dma & 0x7) ? (8 - (adapter->smb.dma & 0x7)) : 0; adapter->smb.dma += offset; adapter->smb.smb = (struct stats_msg_block *) ((u8 *) adapter->cmb.cmb + (sizeof(struct coals_msg_block) + offset)); return 0; err_nomem: kfree(tpd_ring->buffer_info); return -ENOMEM; } static void atl1_init_ring_ptrs(struct atl1_adapter *adapter) { struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring; struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring; atomic_set(&tpd_ring->next_to_use, 0); atomic_set(&tpd_ring->next_to_clean, 0); rfd_ring->next_to_clean = 0; atomic_set(&rfd_ring->next_to_use, 0); rrd_ring->next_to_use = 0; atomic_set(&rrd_ring->next_to_clean, 0); } /* * atl1_clean_rx_ring - Free RFD Buffers * @adapter: board private structure */ static void atl1_clean_rx_ring(struct atl1_adapter *adapter) { struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring; struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring; struct atl1_buffer *buffer_info; struct pci_dev *pdev = adapter->pdev; unsigned long size; unsigned int i; /* Free all the Rx ring sk_buffs */ for (i = 0; i < rfd_ring->count; i++) { buffer_info = &rfd_ring->buffer_info[i]; if (buffer_info->dma) { pci_unmap_page(pdev, buffer_info->dma, buffer_info->length, PCI_DMA_FROMDEVICE); buffer_info->dma = 0; } if (buffer_info->skb) { dev_kfree_skb(buffer_info->skb); buffer_info->skb = NULL; } } size = sizeof(struct atl1_buffer) * rfd_ring->count; memset(rfd_ring->buffer_info, 0, size); /* Zero out the descriptor ring */ memset(rfd_ring->desc, 0, rfd_ring->size); rfd_ring->next_to_clean = 0; atomic_set(&rfd_ring->next_to_use, 0); rrd_ring->next_to_use = 0; atomic_set(&rrd_ring->next_to_clean, 0); } /* * atl1_clean_tx_ring - Free Tx Buffers * @adapter: board private structure */ static void atl1_clean_tx_ring(struct atl1_adapter *adapter) { struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; struct atl1_buffer *buffer_info; struct pci_dev *pdev = adapter->pdev; unsigned long size; unsigned int i; /* Free all the Tx ring sk_buffs */ for (i = 0; i < tpd_ring->count; i++) { buffer_info = &tpd_ring->buffer_info[i]; if (buffer_info->dma) { pci_unmap_page(pdev, buffer_info->dma, buffer_info->length, PCI_DMA_TODEVICE); buffer_info->dma = 0; } } for (i = 0; i < tpd_ring->count; i++) { buffer_info = &tpd_ring->buffer_info[i]; if (buffer_info->skb) { dev_kfree_skb_any(buffer_info->skb); buffer_info->skb = NULL; } } size = sizeof(struct atl1_buffer) * tpd_ring->count; memset(tpd_ring->buffer_info, 0, size); /* Zero out the descriptor ring */ memset(tpd_ring->desc, 0, tpd_ring->size); atomic_set(&tpd_ring->next_to_use, 0); atomic_set(&tpd_ring->next_to_clean, 0); } /* * atl1_free_ring_resources - Free Tx / RX descriptor Resources * @adapter: board private structure * * Free all transmit software resources */ static void atl1_free_ring_resources(struct atl1_adapter *adapter) { struct pci_dev *pdev = adapter->pdev; struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring; struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring; struct atl1_ring_header *ring_header = &adapter->ring_header; atl1_clean_tx_ring(adapter); atl1_clean_rx_ring(adapter); kfree(tpd_ring->buffer_info); pci_free_consistent(pdev, ring_header->size, ring_header->desc, ring_header->dma); tpd_ring->buffer_info = NULL; tpd_ring->desc = NULL; tpd_ring->dma = 0; rfd_ring->buffer_info = NULL; rfd_ring->desc = NULL; rfd_ring->dma = 0; rrd_ring->desc = NULL; rrd_ring->dma = 0; adapter->cmb.dma = 0; adapter->cmb.cmb = NULL; adapter->smb.dma = 0; adapter->smb.smb = NULL; } static void atl1_setup_mac_ctrl(struct atl1_adapter *adapter) { u32 value; struct atl1_hw *hw = &adapter->hw; struct net_device *netdev = adapter->netdev; /* Config MAC CTRL Register */ value = MAC_CTRL_TX_EN | MAC_CTRL_RX_EN; /* duplex */ if (FULL_DUPLEX == adapter->link_duplex) value |= MAC_CTRL_DUPLX; /* speed */ value |= ((u32) ((SPEED_1000 == adapter->link_speed) ? MAC_CTRL_SPEED_1000 : MAC_CTRL_SPEED_10_100) << MAC_CTRL_SPEED_SHIFT); /* flow control */ value |= (MAC_CTRL_TX_FLOW | MAC_CTRL_RX_FLOW); /* PAD & CRC */ value |= (MAC_CTRL_ADD_CRC | MAC_CTRL_PAD); /* preamble length */ value |= (((u32) adapter->hw.preamble_len & MAC_CTRL_PRMLEN_MASK) << MAC_CTRL_PRMLEN_SHIFT); /* vlan */ __atlx_vlan_mode(netdev->features, &value); /* rx checksum if (adapter->rx_csum) value |= MAC_CTRL_RX_CHKSUM_EN; */ /* filter mode */ value |= MAC_CTRL_BC_EN; if (netdev->flags & IFF_PROMISC) value |= MAC_CTRL_PROMIS_EN; else if (netdev->flags & IFF_ALLMULTI) value |= MAC_CTRL_MC_ALL_EN; /* value |= MAC_CTRL_LOOPBACK; */ iowrite32(value, hw->hw_addr + REG_MAC_CTRL); } static u32 atl1_check_link(struct atl1_adapter *adapter) { struct atl1_hw *hw = &adapter->hw; struct net_device *netdev = adapter->netdev; u32 ret_val; u16 speed, duplex, phy_data; int reconfig = 0; /* MII_BMSR must read twice */ atl1_read_phy_reg(hw, MII_BMSR, &phy_data); atl1_read_phy_reg(hw, MII_BMSR, &phy_data); if (!(phy_data & BMSR_LSTATUS)) { /* link down */ if (netif_carrier_ok(netdev)) { /* old link state: Up */ if (netif_msg_link(adapter)) dev_info(&adapter->pdev->dev, "link is down\n"); adapter->link_speed = SPEED_0; netif_carrier_off(netdev); } return 0; } /* Link Up */ ret_val = atl1_get_speed_and_duplex(hw, &speed, &duplex); if (ret_val) return ret_val; switch (hw->media_type) { case MEDIA_TYPE_1000M_FULL: if (speed != SPEED_1000 || duplex != FULL_DUPLEX) reconfig = 1; break; case MEDIA_TYPE_100M_FULL: if (speed != SPEED_100 || duplex != FULL_DUPLEX) reconfig = 1; break; case MEDIA_TYPE_100M_HALF: if (speed != SPEED_100 || duplex != HALF_DUPLEX) reconfig = 1; break; case MEDIA_TYPE_10M_FULL: if (speed != SPEED_10 || duplex != FULL_DUPLEX) reconfig = 1; break; case MEDIA_TYPE_10M_HALF: if (speed != SPEED_10 || duplex != HALF_DUPLEX) reconfig = 1; break; } /* link result is our setting */ if (!reconfig) { if (adapter->link_speed != speed || adapter->link_duplex != duplex) { adapter->link_speed = speed; adapter->link_duplex = duplex; atl1_setup_mac_ctrl(adapter); if (netif_msg_link(adapter)) dev_info(&adapter->pdev->dev, "%s link is up %d Mbps %s\n", netdev->name, adapter->link_speed, adapter->link_duplex == FULL_DUPLEX ? "full duplex" : "half duplex"); } if (!netif_carrier_ok(netdev)) { /* Link down -> Up */ netif_carrier_on(netdev); } return 0; } /* change original link status */ if (netif_carrier_ok(netdev)) { adapter->link_speed = SPEED_0; netif_carrier_off(netdev); netif_stop_queue(netdev); } if (hw->media_type != MEDIA_TYPE_AUTO_SENSOR && hw->media_type != MEDIA_TYPE_1000M_FULL) { switch (hw->media_type) { case MEDIA_TYPE_100M_FULL: phy_data = MII_CR_FULL_DUPLEX | MII_CR_SPEED_100 | MII_CR_RESET; break; case MEDIA_TYPE_100M_HALF: phy_data = MII_CR_SPEED_100 | MII_CR_RESET; break; case MEDIA_TYPE_10M_FULL: phy_data = MII_CR_FULL_DUPLEX | MII_CR_SPEED_10 | MII_CR_RESET; break; default: /* MEDIA_TYPE_10M_HALF: */ phy_data = MII_CR_SPEED_10 | MII_CR_RESET; break; } atl1_write_phy_reg(hw, MII_BMCR, phy_data); return 0; } /* auto-neg, insert timer to re-config phy */ if (!adapter->phy_timer_pending) { adapter->phy_timer_pending = true; mod_timer(&adapter->phy_config_timer, round_jiffies(jiffies + 3 * HZ)); } return 0; } static void set_flow_ctrl_old(struct atl1_adapter *adapter) { u32 hi, lo, value; /* RFD Flow Control */ value = adapter->rfd_ring.count; hi = value / 16; if (hi < 2) hi = 2; lo = value * 7 / 8; value = ((hi & RXQ_RXF_PAUSE_TH_HI_MASK) << RXQ_RXF_PAUSE_TH_HI_SHIFT) | ((lo & RXQ_RXF_PAUSE_TH_LO_MASK) << RXQ_RXF_PAUSE_TH_LO_SHIFT); iowrite32(value, adapter->hw.hw_addr + REG_RXQ_RXF_PAUSE_THRESH); /* RRD Flow Control */ value = adapter->rrd_ring.count; lo = value / 16; hi = value * 7 / 8; if (lo < 2) lo = 2; value = ((hi & RXQ_RRD_PAUSE_TH_HI_MASK) << RXQ_RRD_PAUSE_TH_HI_SHIFT) | ((lo & RXQ_RRD_PAUSE_TH_LO_MASK) << RXQ_RRD_PAUSE_TH_LO_SHIFT); iowrite32(value, adapter->hw.hw_addr + REG_RXQ_RRD_PAUSE_THRESH); } static void set_flow_ctrl_new(struct atl1_hw *hw) { u32 hi, lo, value; /* RXF Flow Control */ value = ioread32(hw->hw_addr + REG_SRAM_RXF_LEN); lo = value / 16; if (lo < 192) lo = 192; hi = value * 7 / 8; if (hi < lo) hi = lo + 16; value = ((hi & RXQ_RXF_PAUSE_TH_HI_MASK) << RXQ_RXF_PAUSE_TH_HI_SHIFT) | ((lo & RXQ_RXF_PAUSE_TH_LO_MASK) << RXQ_RXF_PAUSE_TH_LO_SHIFT); iowrite32(value, hw->hw_addr + REG_RXQ_RXF_PAUSE_THRESH); /* RRD Flow Control */ value = ioread32(hw->hw_addr + REG_SRAM_RRD_LEN); lo = value / 8; hi = value * 7 / 8; if (lo < 2) lo = 2; if (hi < lo) hi = lo + 3; value = ((hi & RXQ_RRD_PAUSE_TH_HI_MASK) << RXQ_RRD_PAUSE_TH_HI_SHIFT) | ((lo & RXQ_RRD_PAUSE_TH_LO_MASK) << RXQ_RRD_PAUSE_TH_LO_SHIFT); iowrite32(value, hw->hw_addr + REG_RXQ_RRD_PAUSE_THRESH); } /* * atl1_configure - Configure Transmit&Receive Unit after Reset * @adapter: board private structure * * Configure the Tx /Rx unit of the MAC after a reset. */ static u32 atl1_configure(struct atl1_adapter *adapter) { struct atl1_hw *hw = &adapter->hw; u32 value; /* clear interrupt status */ iowrite32(0xffffffff, adapter->hw.hw_addr + REG_ISR); /* set MAC Address */ value = (((u32) hw->mac_addr[2]) << 24) | (((u32) hw->mac_addr[3]) << 16) | (((u32) hw->mac_addr[4]) << 8) | (((u32) hw->mac_addr[5])); iowrite32(value, hw->hw_addr + REG_MAC_STA_ADDR); value = (((u32) hw->mac_addr[0]) << 8) | (((u32) hw->mac_addr[1])); iowrite32(value, hw->hw_addr + (REG_MAC_STA_ADDR + 4)); /* tx / rx ring */ /* HI base address */ iowrite32((u32) ((adapter->tpd_ring.dma & 0xffffffff00000000ULL) >> 32), hw->hw_addr + REG_DESC_BASE_ADDR_HI); /* LO base address */ iowrite32((u32) (adapter->rfd_ring.dma & 0x00000000ffffffffULL), hw->hw_addr + REG_DESC_RFD_ADDR_LO); iowrite32((u32) (adapter->rrd_ring.dma & 0x00000000ffffffffULL), hw->hw_addr + REG_DESC_RRD_ADDR_LO); iowrite32((u32) (adapter->tpd_ring.dma & 0x00000000ffffffffULL), hw->hw_addr + REG_DESC_TPD_ADDR_LO); iowrite32((u32) (adapter->cmb.dma & 0x00000000ffffffffULL), hw->hw_addr + REG_DESC_CMB_ADDR_LO); iowrite32((u32) (adapter->smb.dma & 0x00000000ffffffffULL), hw->hw_addr + REG_DESC_SMB_ADDR_LO); /* element count */ value = adapter->rrd_ring.count; value <<= 16; value += adapter->rfd_ring.count; iowrite32(value, hw->hw_addr + REG_DESC_RFD_RRD_RING_SIZE); iowrite32(adapter->tpd_ring.count, hw->hw_addr + REG_DESC_TPD_RING_SIZE); /* Load Ptr */ iowrite32(1, hw->hw_addr + REG_LOAD_PTR); /* config Mailbox */ value = ((atomic_read(&adapter->tpd_ring.next_to_use) & MB_TPD_PROD_INDX_MASK) << MB_TPD_PROD_INDX_SHIFT) | ((atomic_read(&adapter->rrd_ring.next_to_clean) & MB_RRD_CONS_INDX_MASK) << MB_RRD_CONS_INDX_SHIFT) | ((atomic_read(&adapter->rfd_ring.next_to_use) & MB_RFD_PROD_INDX_MASK) << MB_RFD_PROD_INDX_SHIFT); iowrite32(value, hw->hw_addr + REG_MAILBOX); /* config IPG/IFG */ value = (((u32) hw->ipgt & MAC_IPG_IFG_IPGT_MASK) << MAC_IPG_IFG_IPGT_SHIFT) | (((u32) hw->min_ifg & MAC_IPG_IFG_MIFG_MASK) << MAC_IPG_IFG_MIFG_SHIFT) | (((u32) hw->ipgr1 & MAC_IPG_IFG_IPGR1_MASK) << MAC_IPG_IFG_IPGR1_SHIFT) | (((u32) hw->ipgr2 & MAC_IPG_IFG_IPGR2_MASK) << MAC_IPG_IFG_IPGR2_SHIFT); iowrite32(value, hw->hw_addr + REG_MAC_IPG_IFG); /* config Half-Duplex Control */ value = ((u32) hw->lcol & MAC_HALF_DUPLX_CTRL_LCOL_MASK) | (((u32) hw->max_retry & MAC_HALF_DUPLX_CTRL_RETRY_MASK) << MAC_HALF_DUPLX_CTRL_RETRY_SHIFT) | MAC_HALF_DUPLX_CTRL_EXC_DEF_EN | (0xa << MAC_HALF_DUPLX_CTRL_ABEBT_SHIFT) | (((u32) hw->jam_ipg & MAC_HALF_DUPLX_CTRL_JAMIPG_MASK) << MAC_HALF_DUPLX_CTRL_JAMIPG_SHIFT); iowrite32(value, hw->hw_addr + REG_MAC_HALF_DUPLX_CTRL); /* set Interrupt Moderator Timer */ iowrite16(adapter->imt, hw->hw_addr + REG_IRQ_MODU_TIMER_INIT); iowrite32(MASTER_CTRL_ITIMER_EN, hw->hw_addr + REG_MASTER_CTRL); /* set Interrupt Clear Timer */ iowrite16(adapter->ict, hw->hw_addr + REG_CMBDISDMA_TIMER); /* set max frame size hw will accept */ iowrite32(hw->max_frame_size, hw->hw_addr + REG_MTU); /* jumbo size & rrd retirement timer */ value = (((u32) hw->rx_jumbo_th & RXQ_JMBOSZ_TH_MASK) << RXQ_JMBOSZ_TH_SHIFT) | (((u32) hw->rx_jumbo_lkah & RXQ_JMBO_LKAH_MASK) << RXQ_JMBO_LKAH_SHIFT) | (((u32) hw->rrd_ret_timer & RXQ_RRD_TIMER_MASK) << RXQ_RRD_TIMER_SHIFT); iowrite32(value, hw->hw_addr + REG_RXQ_JMBOSZ_RRDTIM); /* Flow Control */ switch (hw->dev_rev) { case 0x8001: case 0x9001: case 0x9002: case 0x9003: set_flow_ctrl_old(adapter); break; default: set_flow_ctrl_new(hw); break; } /* config TXQ */ value = (((u32) hw->tpd_burst & TXQ_CTRL_TPD_BURST_NUM_MASK) << TXQ_CTRL_TPD_BURST_NUM_SHIFT) | (((u32) hw->txf_burst & TXQ_CTRL_TXF_BURST_NUM_MASK) << TXQ_CTRL_TXF_BURST_NUM_SHIFT) | (((u32) hw->tpd_fetch_th & TXQ_CTRL_TPD_FETCH_TH_MASK) << TXQ_CTRL_TPD_FETCH_TH_SHIFT) | TXQ_CTRL_ENH_MODE | TXQ_CTRL_EN; iowrite32(value, hw->hw_addr + REG_TXQ_CTRL); /* min tpd fetch gap & tx jumbo packet size threshold for taskoffload */ value = (((u32) hw->tx_jumbo_task_th & TX_JUMBO_TASK_TH_MASK) << TX_JUMBO_TASK_TH_SHIFT) | (((u32) hw->tpd_fetch_gap & TX_TPD_MIN_IPG_MASK) << TX_TPD_MIN_IPG_SHIFT); iowrite32(value, hw->hw_addr + REG_TX_JUMBO_TASK_TH_TPD_IPG); /* config RXQ */ value = (((u32) hw->rfd_burst & RXQ_CTRL_RFD_BURST_NUM_MASK) << RXQ_CTRL_RFD_BURST_NUM_SHIFT) | (((u32) hw->rrd_burst & RXQ_CTRL_RRD_BURST_THRESH_MASK) << RXQ_CTRL_RRD_BURST_THRESH_SHIFT) | (((u32) hw->rfd_fetch_gap & RXQ_CTRL_RFD_PREF_MIN_IPG_MASK) << RXQ_CTRL_RFD_PREF_MIN_IPG_SHIFT) | RXQ_CTRL_CUT_THRU_EN | RXQ_CTRL_EN; iowrite32(value, hw->hw_addr + REG_RXQ_CTRL); /* config DMA Engine */ value = ((((u32) hw->dmar_block) & DMA_CTRL_DMAR_BURST_LEN_MASK) << DMA_CTRL_DMAR_BURST_LEN_SHIFT) | ((((u32) hw->dmaw_block) & DMA_CTRL_DMAW_BURST_LEN_MASK) << DMA_CTRL_DMAW_BURST_LEN_SHIFT) | DMA_CTRL_DMAR_EN | DMA_CTRL_DMAW_EN; value |= (u32) hw->dma_ord; if (atl1_rcb_128 == hw->rcb_value) value |= DMA_CTRL_RCB_VALUE; iowrite32(value, hw->hw_addr + REG_DMA_CTRL); /* config CMB / SMB */ value = (hw->cmb_tpd > adapter->tpd_ring.count) ? hw->cmb_tpd : adapter->tpd_ring.count; value <<= 16; value |= hw->cmb_rrd; iowrite32(value, hw->hw_addr + REG_CMB_WRITE_TH); value = hw->cmb_rx_timer | ((u32) hw->cmb_tx_timer << 16); iowrite32(value, hw->hw_addr + REG_CMB_WRITE_TIMER); iowrite32(hw->smb_timer, hw->hw_addr + REG_SMB_TIMER); /* --- enable CMB / SMB */ value = CSMB_CTRL_CMB_EN | CSMB_CTRL_SMB_EN; iowrite32(value, hw->hw_addr + REG_CSMB_CTRL); value = ioread32(adapter->hw.hw_addr + REG_ISR); if (unlikely((value & ISR_PHY_LINKDOWN) != 0)) value = 1; /* config failed */ else value = 0; /* clear all interrupt status */ iowrite32(0x3fffffff, adapter->hw.hw_addr + REG_ISR); iowrite32(0, adapter->hw.hw_addr + REG_ISR); return value; } /* * atl1_pcie_patch - Patch for PCIE module */ static void atl1_pcie_patch(struct atl1_adapter *adapter) { u32 value; /* much vendor magic here */ value = 0x6500; iowrite32(value, adapter->hw.hw_addr + 0x12FC); /* pcie flow control mode change */ value = ioread32(adapter->hw.hw_addr + 0x1008); value |= 0x8000; iowrite32(value, adapter->hw.hw_addr + 0x1008); } /* * When ACPI resume on some VIA MotherBoard, the Interrupt Disable bit/0x400 * on PCI Command register is disable. * The function enable this bit. * Brackett, 2006/03/15 */ static void atl1_via_workaround(struct atl1_adapter *adapter) { unsigned long value; value = ioread16(adapter->hw.hw_addr + PCI_COMMAND); if (value & PCI_COMMAND_INTX_DISABLE) value &= ~PCI_COMMAND_INTX_DISABLE; iowrite32(value, adapter->hw.hw_addr + PCI_COMMAND); } static void atl1_inc_smb(struct atl1_adapter *adapter) { struct net_device *netdev = adapter->netdev; struct stats_msg_block *smb = adapter->smb.smb; /* Fill out the OS statistics structure */ adapter->soft_stats.rx_packets += smb->rx_ok; adapter->soft_stats.tx_packets += smb->tx_ok; adapter->soft_stats.rx_bytes += smb->rx_byte_cnt; adapter->soft_stats.tx_bytes += smb->tx_byte_cnt; adapter->soft_stats.multicast += smb->rx_mcast; adapter->soft_stats.collisions += (smb->tx_1_col + smb->tx_2_col * 2 + smb->tx_late_col + smb->tx_abort_col * adapter->hw.max_retry); /* Rx Errors */ adapter->soft_stats.rx_errors += (smb->rx_frag + smb->rx_fcs_err + smb->rx_len_err + smb->rx_sz_ov + smb->rx_rxf_ov + smb->rx_rrd_ov + smb->rx_align_err); adapter->soft_stats.rx_fifo_errors += smb->rx_rxf_ov; adapter->soft_stats.rx_length_errors += smb->rx_len_err; adapter->soft_stats.rx_crc_errors += smb->rx_fcs_err; adapter->soft_stats.rx_frame_errors += smb->rx_align_err; adapter->soft_stats.rx_missed_errors += (smb->rx_rrd_ov + smb->rx_rxf_ov); adapter->soft_stats.rx_pause += smb->rx_pause; adapter->soft_stats.rx_rrd_ov += smb->rx_rrd_ov; adapter->soft_stats.rx_trunc += smb->rx_sz_ov; /* Tx Errors */ adapter->soft_stats.tx_errors += (smb->tx_late_col + smb->tx_abort_col + smb->tx_underrun + smb->tx_trunc); adapter->soft_stats.tx_fifo_errors += smb->tx_underrun; adapter->soft_stats.tx_aborted_errors += smb->tx_abort_col; adapter->soft_stats.tx_window_errors += smb->tx_late_col; adapter->soft_stats.excecol += smb->tx_abort_col; adapter->soft_stats.deffer += smb->tx_defer; adapter->soft_stats.scc += smb->tx_1_col; adapter->soft_stats.mcc += smb->tx_2_col; adapter->soft_stats.latecol += smb->tx_late_col; adapter->soft_stats.tx_underun += smb->tx_underrun; adapter->soft_stats.tx_trunc += smb->tx_trunc; adapter->soft_stats.tx_pause += smb->tx_pause; netdev->stats.rx_packets = adapter->soft_stats.rx_packets; netdev->stats.tx_packets = adapter->soft_stats.tx_packets; netdev->stats.rx_bytes = adapter->soft_stats.rx_bytes; netdev->stats.tx_bytes = adapter->soft_stats.tx_bytes; netdev->stats.multicast = adapter->soft_stats.multicast; netdev->stats.collisions = adapter->soft_stats.collisions; netdev->stats.rx_errors = adapter->soft_stats.rx_errors; netdev->stats.rx_over_errors = adapter->soft_stats.rx_missed_errors; netdev->stats.rx_length_errors = adapter->soft_stats.rx_length_errors; netdev->stats.rx_crc_errors = adapter->soft_stats.rx_crc_errors; netdev->stats.rx_frame_errors = adapter->soft_stats.rx_frame_errors; netdev->stats.rx_fifo_errors = adapter->soft_stats.rx_fifo_errors; netdev->stats.rx_missed_errors = adapter->soft_stats.rx_missed_errors; netdev->stats.tx_errors = adapter->soft_stats.tx_errors; netdev->stats.tx_fifo_errors = adapter->soft_stats.tx_fifo_errors; netdev->stats.tx_aborted_errors = adapter->soft_stats.tx_aborted_errors; netdev->stats.tx_window_errors = adapter->soft_stats.tx_window_errors; netdev->stats.tx_carrier_errors = adapter->soft_stats.tx_carrier_errors; } static void atl1_update_mailbox(struct atl1_adapter *adapter) { unsigned long flags; u32 tpd_next_to_use; u32 rfd_next_to_use; u32 rrd_next_to_clean; u32 value; spin_lock_irqsave(&adapter->mb_lock, flags); tpd_next_to_use = atomic_read(&adapter->tpd_ring.next_to_use); rfd_next_to_use = atomic_read(&adapter->rfd_ring.next_to_use); rrd_next_to_clean = atomic_read(&adapter->rrd_ring.next_to_clean); value = ((rfd_next_to_use & MB_RFD_PROD_INDX_MASK) << MB_RFD_PROD_INDX_SHIFT) | ((rrd_next_to_clean & MB_RRD_CONS_INDX_MASK) << MB_RRD_CONS_INDX_SHIFT) | ((tpd_next_to_use & MB_TPD_PROD_INDX_MASK) << MB_TPD_PROD_INDX_SHIFT); iowrite32(value, adapter->hw.hw_addr + REG_MAILBOX); spin_unlock_irqrestore(&adapter->mb_lock, flags); } static void atl1_clean_alloc_flag(struct atl1_adapter *adapter, struct rx_return_desc *rrd, u16 offset) { struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring; while (rfd_ring->next_to_clean != (rrd->buf_indx + offset)) { rfd_ring->buffer_info[rfd_ring->next_to_clean].alloced = 0; if (++rfd_ring->next_to_clean == rfd_ring->count) { rfd_ring->next_to_clean = 0; } } } static void atl1_update_rfd_index(struct atl1_adapter *adapter, struct rx_return_desc *rrd) { u16 num_buf; num_buf = (rrd->xsz.xsum_sz.pkt_size + adapter->rx_buffer_len - 1) / adapter->rx_buffer_len; if (rrd->num_buf == num_buf) /* clean alloc flag for bad rrd */ atl1_clean_alloc_flag(adapter, rrd, num_buf); } static void atl1_rx_checksum(struct atl1_adapter *adapter, struct rx_return_desc *rrd, struct sk_buff *skb) { struct pci_dev *pdev = adapter->pdev; /* * The L1 hardware contains a bug that erroneously sets the * PACKET_FLAG_ERR and ERR_FLAG_L4_CHKSUM bits whenever a * fragmented IP packet is received, even though the packet * is perfectly valid and its checksum is correct. There's * no way to distinguish between one of these good packets * and a packet that actually contains a TCP/UDP checksum * error, so all we can do is allow it to be handed up to * the higher layers and let it be sorted out there. */ skb_checksum_none_assert(skb); if (unlikely(rrd->pkt_flg & PACKET_FLAG_ERR)) { if (rrd->err_flg & (ERR_FLAG_CRC | ERR_FLAG_TRUNC | ERR_FLAG_CODE | ERR_FLAG_OV)) { adapter->hw_csum_err++; if (netif_msg_rx_err(adapter)) dev_printk(KERN_DEBUG, &pdev->dev, "rx checksum error\n"); return; } } /* not IPv4 */ if (!(rrd->pkt_flg & PACKET_FLAG_IPV4)) /* checksum is invalid, but it's not an IPv4 pkt, so ok */ return; /* IPv4 packet */ if (likely(!(rrd->err_flg & (ERR_FLAG_IP_CHKSUM | ERR_FLAG_L4_CHKSUM)))) { skb->ip_summed = CHECKSUM_UNNECESSARY; adapter->hw_csum_good++; return; } } /* * atl1_alloc_rx_buffers - Replace used receive buffers * @adapter: address of board private structure */ static u16 atl1_alloc_rx_buffers(struct atl1_adapter *adapter) { struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring; struct pci_dev *pdev = adapter->pdev; struct page *page; unsigned long offset; struct atl1_buffer *buffer_info, *next_info; struct sk_buff *skb; u16 num_alloc = 0; u16 rfd_next_to_use, next_next; struct rx_free_desc *rfd_desc; next_next = rfd_next_to_use = atomic_read(&rfd_ring->next_to_use); if (++next_next == rfd_ring->count) next_next = 0; buffer_info = &rfd_ring->buffer_info[rfd_next_to_use]; next_info = &rfd_ring->buffer_info[next_next]; while (!buffer_info->alloced && !next_info->alloced) { if (buffer_info->skb) { buffer_info->alloced = 1; goto next; } rfd_desc = ATL1_RFD_DESC(rfd_ring, rfd_next_to_use); skb = netdev_alloc_skb_ip_align(adapter->netdev, adapter->rx_buffer_len); if (unlikely(!skb)) { /* Better luck next round */ adapter->netdev->stats.rx_dropped++; break; } buffer_info->alloced = 1; buffer_info->skb = skb; buffer_info->length = (u16) adapter->rx_buffer_len; page = virt_to_page(skb->data); offset = (unsigned long)skb->data & ~PAGE_MASK; buffer_info->dma = pci_map_page(pdev, page, offset, adapter->rx_buffer_len, PCI_DMA_FROMDEVICE); rfd_desc->buffer_addr = cpu_to_le64(buffer_info->dma); rfd_desc->buf_len = cpu_to_le16(adapter->rx_buffer_len); rfd_desc->coalese = 0; next: rfd_next_to_use = next_next; if (unlikely(++next_next == rfd_ring->count)) next_next = 0; buffer_info = &rfd_ring->buffer_info[rfd_next_to_use]; next_info = &rfd_ring->buffer_info[next_next]; num_alloc++; } if (num_alloc) { /* * Force memory writes to complete before letting h/w * know there are new descriptors to fetch. (Only * applicable for weak-ordered memory model archs, * such as IA-64). */ wmb(); atomic_set(&rfd_ring->next_to_use, (int)rfd_next_to_use); } return num_alloc; } static void atl1_intr_rx(struct atl1_adapter *adapter) { int i, count; u16 length; u16 rrd_next_to_clean; u32 value; struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring; struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring; struct atl1_buffer *buffer_info; struct rx_return_desc *rrd; struct sk_buff *skb; count = 0; rrd_next_to_clean = atomic_read(&rrd_ring->next_to_clean); while (1) { rrd = ATL1_RRD_DESC(rrd_ring, rrd_next_to_clean); i = 1; if (likely(rrd->xsz.valid)) { /* packet valid */ chk_rrd: /* check rrd status */ if (likely(rrd->num_buf == 1)) goto rrd_ok; else if (netif_msg_rx_err(adapter)) { dev_printk(KERN_DEBUG, &adapter->pdev->dev, "unexpected RRD buffer count\n"); dev_printk(KERN_DEBUG, &adapter->pdev->dev, "rx_buf_len = %d\n", adapter->rx_buffer_len); dev_printk(KERN_DEBUG, &adapter->pdev->dev, "RRD num_buf = %d\n", rrd->num_buf); dev_printk(KERN_DEBUG, &adapter->pdev->dev, "RRD pkt_len = %d\n", rrd->xsz.xsum_sz.pkt_size); dev_printk(KERN_DEBUG, &adapter->pdev->dev, "RRD pkt_flg = 0x%08X\n", rrd->pkt_flg); dev_printk(KERN_DEBUG, &adapter->pdev->dev, "RRD err_flg = 0x%08X\n", rrd->err_flg); dev_printk(KERN_DEBUG, &adapter->pdev->dev, "RRD vlan_tag = 0x%08X\n", rrd->vlan_tag); } /* rrd seems to be bad */ if (unlikely(i-- > 0)) { /* rrd may not be DMAed completely */ udelay(1); goto chk_rrd; } /* bad rrd */ if (netif_msg_rx_err(adapter)) dev_printk(KERN_DEBUG, &adapter->pdev->dev, "bad RRD\n"); /* see if update RFD index */ if (rrd->num_buf > 1) atl1_update_rfd_index(adapter, rrd); /* update rrd */ rrd->xsz.valid = 0; if (++rrd_next_to_clean == rrd_ring->count) rrd_next_to_clean = 0; count++; continue; } else { /* current rrd still not be updated */ break; } rrd_ok: /* clean alloc flag for bad rrd */ atl1_clean_alloc_flag(adapter, rrd, 0); buffer_info = &rfd_ring->buffer_info[rrd->buf_indx]; if (++rfd_ring->next_to_clean == rfd_ring->count) rfd_ring->next_to_clean = 0; /* update rrd next to clean */ if (++rrd_next_to_clean == rrd_ring->count) rrd_next_to_clean = 0; count++; if (unlikely(rrd->pkt_flg & PACKET_FLAG_ERR)) { if (!(rrd->err_flg & (ERR_FLAG_IP_CHKSUM | ERR_FLAG_L4_CHKSUM | ERR_FLAG_LEN))) { /* packet error, don't need upstream */ buffer_info->alloced = 0; rrd->xsz.valid = 0; continue; } } /* Good Receive */ pci_unmap_page(adapter->pdev, buffer_info->dma, buffer_info->length, PCI_DMA_FROMDEVICE); buffer_info->dma = 0; skb = buffer_info->skb; length = le16_to_cpu(rrd->xsz.xsum_sz.pkt_size); skb_put(skb, length - ETH_FCS_LEN); /* Receive Checksum Offload */ atl1_rx_checksum(adapter, rrd, skb); skb->protocol = eth_type_trans(skb, adapter->netdev); if (rrd->pkt_flg & PACKET_FLAG_VLAN_INS) { u16 vlan_tag = (rrd->vlan_tag >> 4) | ((rrd->vlan_tag & 7) << 13) | ((rrd->vlan_tag & 8) << 9); __vlan_hwaccel_put_tag(skb, vlan_tag); } netif_rx(skb); /* let protocol layer free skb */ buffer_info->skb = NULL; buffer_info->alloced = 0; rrd->xsz.valid = 0; } atomic_set(&rrd_ring->next_to_clean, rrd_next_to_clean); atl1_alloc_rx_buffers(adapter); /* update mailbox ? */ if (count) { u32 tpd_next_to_use; u32 rfd_next_to_use; spin_lock(&adapter->mb_lock); tpd_next_to_use = atomic_read(&adapter->tpd_ring.next_to_use); rfd_next_to_use = atomic_read(&adapter->rfd_ring.next_to_use); rrd_next_to_clean = atomic_read(&adapter->rrd_ring.next_to_clean); value = ((rfd_next_to_use & MB_RFD_PROD_INDX_MASK) << MB_RFD_PROD_INDX_SHIFT) | ((rrd_next_to_clean & MB_RRD_CONS_INDX_MASK) << MB_RRD_CONS_INDX_SHIFT) | ((tpd_next_to_use & MB_TPD_PROD_INDX_MASK) << MB_TPD_PROD_INDX_SHIFT); iowrite32(value, adapter->hw.hw_addr + REG_MAILBOX); spin_unlock(&adapter->mb_lock); } } static void atl1_intr_tx(struct atl1_adapter *adapter) { struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; struct atl1_buffer *buffer_info; u16 sw_tpd_next_to_clean; u16 cmb_tpd_next_to_clean; sw_tpd_next_to_clean = atomic_read(&tpd_ring->next_to_clean); cmb_tpd_next_to_clean = le16_to_cpu(adapter->cmb.cmb->tpd_cons_idx); while (cmb_tpd_next_to_clean != sw_tpd_next_to_clean) { buffer_info = &tpd_ring->buffer_info[sw_tpd_next_to_clean]; if (buffer_info->dma) { pci_unmap_page(adapter->pdev, buffer_info->dma, buffer_info->length, PCI_DMA_TODEVICE); buffer_info->dma = 0; } if (buffer_info->skb) { dev_kfree_skb_irq(buffer_info->skb); buffer_info->skb = NULL; } if (++sw_tpd_next_to_clean == tpd_ring->count) sw_tpd_next_to_clean = 0; } atomic_set(&tpd_ring->next_to_clean, sw_tpd_next_to_clean); if (netif_queue_stopped(adapter->netdev) && netif_carrier_ok(adapter->netdev)) netif_wake_queue(adapter->netdev); } static u16 atl1_tpd_avail(struct atl1_tpd_ring *tpd_ring) { u16 next_to_clean = atomic_read(&tpd_ring->next_to_clean); u16 next_to_use = atomic_read(&tpd_ring->next_to_use); return (next_to_clean > next_to_use) ? next_to_clean - next_to_use - 1 : tpd_ring->count + next_to_clean - next_to_use - 1; } static int atl1_tso(struct atl1_adapter *adapter, struct sk_buff *skb, struct tx_packet_desc *ptpd) { u8 hdr_len, ip_off; u32 real_len; int err; if (skb_shinfo(skb)->gso_size) { if (skb_header_cloned(skb)) { err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); if (unlikely(err)) return -1; } if (skb->protocol == htons(ETH_P_IP)) { struct iphdr *iph = ip_hdr(skb); real_len = (((unsigned char *)iph - skb->data) + ntohs(iph->tot_len)); if (real_len < skb->len) pskb_trim(skb, real_len); hdr_len = (skb_transport_offset(skb) + tcp_hdrlen(skb)); if (skb->len == hdr_len) { iph->check = 0; tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, tcp_hdrlen(skb), IPPROTO_TCP, 0); ptpd->word3 |= (iph->ihl & TPD_IPHL_MASK) << TPD_IPHL_SHIFT; ptpd->word3 |= ((tcp_hdrlen(skb) >> 2) & TPD_TCPHDRLEN_MASK) << TPD_TCPHDRLEN_SHIFT; ptpd->word3 |= 1 << TPD_IP_CSUM_SHIFT; ptpd->word3 |= 1 << TPD_TCP_CSUM_SHIFT; return 1; } iph->check = 0; tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0, IPPROTO_TCP, 0); ip_off = (unsigned char *)iph - (unsigned char *) skb_network_header(skb); if (ip_off == 8) /* 802.3-SNAP frame */ ptpd->word3 |= 1 << TPD_ETHTYPE_SHIFT; else if (ip_off != 0) return -2; ptpd->word3 |= (iph->ihl & TPD_IPHL_MASK) << TPD_IPHL_SHIFT; ptpd->word3 |= ((tcp_hdrlen(skb) >> 2) & TPD_TCPHDRLEN_MASK) << TPD_TCPHDRLEN_SHIFT; ptpd->word3 |= (skb_shinfo(skb)->gso_size & TPD_MSS_MASK) << TPD_MSS_SHIFT; ptpd->word3 |= 1 << TPD_SEGMENT_EN_SHIFT; return 3; } } return false; } static int atl1_tx_csum(struct atl1_adapter *adapter, struct sk_buff *skb, struct tx_packet_desc *ptpd) { u8 css, cso; if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { css = skb_checksum_start_offset(skb); cso = css + (u8) skb->csum_offset; if (unlikely(css & 0x1)) { /* L1 hardware requires an even number here */ if (netif_msg_tx_err(adapter)) dev_printk(KERN_DEBUG, &adapter->pdev->dev, "payload offset not an even number\n"); return -1; } ptpd->word3 |= (css & TPD_PLOADOFFSET_MASK) << TPD_PLOADOFFSET_SHIFT; ptpd->word3 |= (cso & TPD_CCSUMOFFSET_MASK) << TPD_CCSUMOFFSET_SHIFT; ptpd->word3 |= 1 << TPD_CUST_CSUM_EN_SHIFT; return true; } return 0; } static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb, struct tx_packet_desc *ptpd) { struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; struct atl1_buffer *buffer_info; u16 buf_len = skb->len; struct page *page; unsigned long offset; unsigned int nr_frags; unsigned int f; int retval; u16 next_to_use; u16 data_len; u8 hdr_len; buf_len -= skb->data_len; nr_frags = skb_shinfo(skb)->nr_frags; next_to_use = atomic_read(&tpd_ring->next_to_use); buffer_info = &tpd_ring->buffer_info[next_to_use]; BUG_ON(buffer_info->skb); /* put skb in last TPD */ buffer_info->skb = NULL; retval = (ptpd->word3 >> TPD_SEGMENT_EN_SHIFT) & TPD_SEGMENT_EN_MASK; if (retval) { /* TSO */ hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); buffer_info->length = hdr_len; page = virt_to_page(skb->data); offset = (unsigned long)skb->data & ~PAGE_MASK; buffer_info->dma = pci_map_page(adapter->pdev, page, offset, hdr_len, PCI_DMA_TODEVICE); if (++next_to_use == tpd_ring->count) next_to_use = 0; if (buf_len > hdr_len) { int i, nseg; data_len = buf_len - hdr_len; nseg = (data_len + ATL1_MAX_TX_BUF_LEN - 1) / ATL1_MAX_TX_BUF_LEN; for (i = 0; i < nseg; i++) { buffer_info = &tpd_ring->buffer_info[next_to_use]; buffer_info->skb = NULL; buffer_info->length = (ATL1_MAX_TX_BUF_LEN >= data_len) ? ATL1_MAX_TX_BUF_LEN : data_len; data_len -= buffer_info->length; page = virt_to_page(skb->data + (hdr_len + i * ATL1_MAX_TX_BUF_LEN)); offset = (unsigned long)(skb->data + (hdr_len + i * ATL1_MAX_TX_BUF_LEN)) & ~PAGE_MASK; buffer_info->dma = pci_map_page(adapter->pdev, page, offset, buffer_info->length, PCI_DMA_TODEVICE); if (++next_to_use == tpd_ring->count) next_to_use = 0; } } } else { /* not TSO */ buffer_info->length = buf_len; page = virt_to_page(skb->data); offset = (unsigned long)skb->data & ~PAGE_MASK; buffer_info->dma = pci_map_page(adapter->pdev, page, offset, buf_len, PCI_DMA_TODEVICE); if (++next_to_use == tpd_ring->count) next_to_use = 0; } for (f = 0; f < nr_frags; f++) { const struct skb_frag_struct *frag; u16 i, nseg; frag = &skb_shinfo(skb)->frags[f]; buf_len = skb_frag_size(frag); nseg = (buf_len + ATL1_MAX_TX_BUF_LEN - 1) / ATL1_MAX_TX_BUF_LEN; for (i = 0; i < nseg; i++) { buffer_info = &tpd_ring->buffer_info[next_to_use]; BUG_ON(buffer_info->skb); buffer_info->skb = NULL; buffer_info->length = (buf_len > ATL1_MAX_TX_BUF_LEN) ? ATL1_MAX_TX_BUF_LEN : buf_len; buf_len -= buffer_info->length; buffer_info->dma = skb_frag_dma_map(&adapter->pdev->dev, frag, i * ATL1_MAX_TX_BUF_LEN, buffer_info->length, DMA_TO_DEVICE); if (++next_to_use == tpd_ring->count) next_to_use = 0; } } /* last tpd's buffer-info */ buffer_info->skb = skb; } static void atl1_tx_queue(struct atl1_adapter *adapter, u16 count, struct tx_packet_desc *ptpd) { struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; struct atl1_buffer *buffer_info; struct tx_packet_desc *tpd; u16 j; u32 val; u16 next_to_use = (u16) atomic_read(&tpd_ring->next_to_use); for (j = 0; j < count; j++) { buffer_info = &tpd_ring->buffer_info[next_to_use]; tpd = ATL1_TPD_DESC(&adapter->tpd_ring, next_to_use); if (tpd != ptpd) memcpy(tpd, ptpd, sizeof(struct tx_packet_desc)); tpd->buffer_addr = cpu_to_le64(buffer_info->dma); tpd->word2 &= ~(TPD_BUFLEN_MASK << TPD_BUFLEN_SHIFT); tpd->word2 |= (cpu_to_le16(buffer_info->length) & TPD_BUFLEN_MASK) << TPD_BUFLEN_SHIFT; /* * if this is the first packet in a TSO chain, set * TPD_HDRFLAG, otherwise, clear it. */ val = (tpd->word3 >> TPD_SEGMENT_EN_SHIFT) & TPD_SEGMENT_EN_MASK; if (val) { if (!j) tpd->word3 |= 1 << TPD_HDRFLAG_SHIFT; else tpd->word3 &= ~(1 << TPD_HDRFLAG_SHIFT); } if (j == (count - 1)) tpd->word3 |= 1 << TPD_EOP_SHIFT; if (++next_to_use == tpd_ring->count) next_to_use = 0; } /* * Force memory writes to complete before letting h/w * know there are new descriptors to fetch. (Only * applicable for weak-ordered memory model archs, * such as IA-64). */ wmb(); atomic_set(&tpd_ring->next_to_use, next_to_use); } static netdev_tx_t atl1_xmit_frame(struct sk_buff *skb, struct net_device *netdev) { struct atl1_adapter *adapter = netdev_priv(netdev); struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; int len; int tso; int count = 1; int ret_val; struct tx_packet_desc *ptpd; u16 vlan_tag; unsigned int nr_frags = 0; unsigned int mss = 0; unsigned int f; unsigned int proto_hdr_len; len = skb_headlen(skb); if (unlikely(skb->len <= 0)) { dev_kfree_skb_any(skb); return NETDEV_TX_OK; } nr_frags = skb_shinfo(skb)->nr_frags; for (f = 0; f < nr_frags; f++) { unsigned int f_size = skb_frag_size(&skb_shinfo(skb)->frags[f]); count += (f_size + ATL1_MAX_TX_BUF_LEN - 1) / ATL1_MAX_TX_BUF_LEN; } mss = skb_shinfo(skb)->gso_size; if (mss) { if (skb->protocol == htons(ETH_P_IP)) { proto_hdr_len = (skb_transport_offset(skb) + tcp_hdrlen(skb)); if (unlikely(proto_hdr_len > len)) { dev_kfree_skb_any(skb); return NETDEV_TX_OK; } /* need additional TPD ? */ if (proto_hdr_len != len) count += (len - proto_hdr_len + ATL1_MAX_TX_BUF_LEN - 1) / ATL1_MAX_TX_BUF_LEN; } } if (atl1_tpd_avail(&adapter->tpd_ring) < count) { /* not enough descriptors */ netif_stop_queue(netdev); if (netif_msg_tx_queued(adapter)) dev_printk(KERN_DEBUG, &adapter->pdev->dev, "tx busy\n"); return NETDEV_TX_BUSY; } ptpd = ATL1_TPD_DESC(tpd_ring, (u16) atomic_read(&tpd_ring->next_to_use)); memset(ptpd, 0, sizeof(struct tx_packet_desc)); if (vlan_tx_tag_present(skb)) { vlan_tag = vlan_tx_tag_get(skb); vlan_tag = (vlan_tag << 4) | (vlan_tag >> 13) | ((vlan_tag >> 9) & 0x8); ptpd->word3 |= 1 << TPD_INS_VL_TAG_SHIFT; ptpd->word2 |= (vlan_tag & TPD_VLANTAG_MASK) << TPD_VLANTAG_SHIFT; } tso = atl1_tso(adapter, skb, ptpd); if (tso < 0) { dev_kfree_skb_any(skb); return NETDEV_TX_OK; } if (!tso) { ret_val = atl1_tx_csum(adapter, skb, ptpd); if (ret_val < 0) { dev_kfree_skb_any(skb); return NETDEV_TX_OK; } } atl1_tx_map(adapter, skb, ptpd); atl1_tx_queue(adapter, count, ptpd); atl1_update_mailbox(adapter); mmiowb(); return NETDEV_TX_OK; } /* * atl1_intr - Interrupt Handler * @irq: interrupt number * @data: pointer to a network interface device structure * @pt_regs: CPU registers structure */ static irqreturn_t atl1_intr(int irq, void *data) { struct atl1_adapter *adapter = netdev_priv(data); u32 status; int max_ints = 10; status = adapter->cmb.cmb->int_stats; if (!status) return IRQ_NONE; do { /* clear CMB interrupt status at once */ adapter->cmb.cmb->int_stats = 0; if (status & ISR_GPHY) /* clear phy status */ atlx_clear_phy_int(adapter); /* clear ISR status, and Enable CMB DMA/Disable Interrupt */ iowrite32(status | ISR_DIS_INT, adapter->hw.hw_addr + REG_ISR); /* check if SMB intr */ if (status & ISR_SMB) atl1_inc_smb(adapter); /* check if PCIE PHY Link down */ if (status & ISR_PHY_LINKDOWN) { if (netif_msg_intr(adapter)) dev_printk(KERN_DEBUG, &adapter->pdev->dev, "pcie phy link down %x\n", status); if (netif_running(adapter->netdev)) { /* reset MAC */ iowrite32(0, adapter->hw.hw_addr + REG_IMR); schedule_work(&adapter->reset_dev_task); return IRQ_HANDLED; } } /* check if DMA read/write error ? */ if (status & (ISR_DMAR_TO_RST | ISR_DMAW_TO_RST)) { if (netif_msg_intr(adapter)) dev_printk(KERN_DEBUG, &adapter->pdev->dev, "pcie DMA r/w error (status = 0x%x)\n", status); iowrite32(0, adapter->hw.hw_addr + REG_IMR); schedule_work(&adapter->reset_dev_task); return IRQ_HANDLED; } /* link event */ if (status & ISR_GPHY) { adapter->soft_stats.tx_carrier_errors++; atl1_check_for_link(adapter); } /* transmit event */ if (status & ISR_CMB_TX) atl1_intr_tx(adapter); /* rx exception */ if (unlikely(status & (ISR_RXF_OV | ISR_RFD_UNRUN | ISR_RRD_OV | ISR_HOST_RFD_UNRUN | ISR_HOST_RRD_OV | ISR_CMB_RX))) { if (status & (ISR_RXF_OV | ISR_RFD_UNRUN | ISR_RRD_OV | ISR_HOST_RFD_UNRUN | ISR_HOST_RRD_OV)) if (netif_msg_intr(adapter)) dev_printk(KERN_DEBUG, &adapter->pdev->dev, "rx exception, ISR = 0x%x\n", status); atl1_intr_rx(adapter); } if (--max_ints < 0) break; } while ((status = adapter->cmb.cmb->int_stats)); /* re-enable Interrupt */ iowrite32(ISR_DIS_SMB | ISR_DIS_DMA, adapter->hw.hw_addr + REG_ISR); return IRQ_HANDLED; } /* * atl1_phy_config - Timer Call-back * @data: pointer to netdev cast into an unsigned long */ static void atl1_phy_config(unsigned long data) { struct atl1_adapter *adapter = (struct atl1_adapter *)data; struct atl1_hw *hw = &adapter->hw; unsigned long flags; spin_lock_irqsave(&adapter->lock, flags); adapter->phy_timer_pending = false; atl1_write_phy_reg(hw, MII_ADVERTISE, hw->mii_autoneg_adv_reg); atl1_write_phy_reg(hw, MII_ATLX_CR, hw->mii_1000t_ctrl_reg); atl1_write_phy_reg(hw, MII_BMCR, MII_CR_RESET | MII_CR_AUTO_NEG_EN); spin_unlock_irqrestore(&adapter->lock, flags); } /* * Orphaned vendor comment left intact here: * <vendor comment> * If TPD Buffer size equal to 0, PCIE DMAR_TO_INT * will assert. We do soft reset <0x1400=1> according * with the SPEC. BUT, it seemes that PCIE or DMA * state-machine will not be reset. DMAR_TO_INT will * assert again and again. * </vendor comment> */ static int atl1_reset(struct atl1_adapter *adapter) { int ret; ret = atl1_reset_hw(&adapter->hw); if (ret) return ret; return atl1_init_hw(&adapter->hw); } static s32 atl1_up(struct atl1_adapter *adapter) { struct net_device *netdev = adapter->netdev; int err; int irq_flags = 0; /* hardware has been reset, we need to reload some things */ atlx_set_multi(netdev); atl1_init_ring_ptrs(adapter); atlx_restore_vlan(adapter); err = atl1_alloc_rx_buffers(adapter); if (unlikely(!err)) /* no RX BUFFER allocated */ return -ENOMEM; if (unlikely(atl1_configure(adapter))) { err = -EIO; goto err_up; } err = pci_enable_msi(adapter->pdev); if (err) { if (netif_msg_ifup(adapter)) dev_info(&adapter->pdev->dev, "Unable to enable MSI: %d\n", err); irq_flags |= IRQF_SHARED; } err = request_irq(adapter->pdev->irq, atl1_intr, irq_flags, netdev->name, netdev); if (unlikely(err)) goto err_up; atlx_irq_enable(adapter); atl1_check_link(adapter); netif_start_queue(netdev); return 0; err_up: pci_disable_msi(adapter->pdev); /* free rx_buffers */ atl1_clean_rx_ring(adapter); return err; } static void atl1_down(struct atl1_adapter *adapter) { struct net_device *netdev = adapter->netdev; netif_stop_queue(netdev); del_timer_sync(&adapter->phy_config_timer); adapter->phy_timer_pending = false; atlx_irq_disable(adapter); free_irq(adapter->pdev->irq, netdev); pci_disable_msi(adapter->pdev); atl1_reset_hw(&adapter->hw); adapter->cmb.cmb->int_stats = 0; adapter->link_speed = SPEED_0; adapter->link_duplex = -1; netif_carrier_off(netdev); atl1_clean_tx_ring(adapter); atl1_clean_rx_ring(adapter); } static void atl1_reset_dev_task(struct work_struct *work) { struct atl1_adapter *adapter = container_of(work, struct atl1_adapter, reset_dev_task); struct net_device *netdev = adapter->netdev; netif_device_detach(netdev); atl1_down(adapter); atl1_up(adapter); netif_device_attach(netdev); } /* * atl1_change_mtu - Change the Maximum Transfer Unit * @netdev: network interface device structure * @new_mtu: new value for maximum frame size * * Returns 0 on success, negative on failure */ static int atl1_change_mtu(struct net_device *netdev, int new_mtu) { struct atl1_adapter *adapter = netdev_priv(netdev); int old_mtu = netdev->mtu; int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) || (max_frame > MAX_JUMBO_FRAME_SIZE)) { if (netif_msg_link(adapter)) dev_warn(&adapter->pdev->dev, "invalid MTU setting\n"); return -EINVAL; } adapter->hw.max_frame_size = max_frame; adapter->hw.tx_jumbo_task_th = (max_frame + 7) >> 3; adapter->rx_buffer_len = (max_frame + 7) & ~7; adapter->hw.rx_jumbo_th = adapter->rx_buffer_len / 8; netdev->mtu = new_mtu; if ((old_mtu != new_mtu) && netif_running(netdev)) { atl1_down(adapter); atl1_up(adapter); } return 0; } /* * atl1_open - Called when a network interface is made active * @netdev: network interface device structure * * Returns 0 on success, negative value on failure * * The open entry point is called when a network interface is made * active by the system (IFF_UP). At this point all resources needed * for transmit and receive operations are allocated, the interrupt * handler is registered with the OS, the watchdog timer is started, * and the stack is notified that the interface is ready. */ static int atl1_open(struct net_device *netdev) { struct atl1_adapter *adapter = netdev_priv(netdev); int err; netif_carrier_off(netdev); /* allocate transmit descriptors */ err = atl1_setup_ring_resources(adapter); if (err) return err; err = atl1_up(adapter); if (err) goto err_up; return 0; err_up: atl1_reset(adapter); return err; } /* * atl1_close - Disables a network interface * @netdev: network interface device structure * * Returns 0, this is not allowed to fail * * The close entry point is called when an interface is de-activated * by the OS. The hardware is still under the drivers control, but * needs to be disabled. A global MAC reset is issued to stop the * hardware, and all transmit and receive resources are freed. */ static int atl1_close(struct net_device *netdev) { struct atl1_adapter *adapter = netdev_priv(netdev); atl1_down(adapter); atl1_free_ring_resources(adapter); return 0; } #ifdef CONFIG_PM static int atl1_suspend(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct net_device *netdev = pci_get_drvdata(pdev); struct atl1_adapter *adapter = netdev_priv(netdev); struct atl1_hw *hw = &adapter->hw; u32 ctrl = 0; u32 wufc = adapter->wol; u32 val; u16 speed; u16 duplex; netif_device_detach(netdev); if (netif_running(netdev)) atl1_down(adapter); atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl); atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl); val = ctrl & BMSR_LSTATUS; if (val) wufc &= ~ATLX_WUFC_LNKC; if (!wufc) goto disable_wol; if (val) { val = atl1_get_speed_and_duplex(hw, &speed, &duplex); if (val) { if (netif_msg_ifdown(adapter)) dev_printk(KERN_DEBUG, &pdev->dev, "error getting speed/duplex\n"); goto disable_wol; } ctrl = 0; /* enable magic packet WOL */ if (wufc & ATLX_WUFC_MAG) ctrl |= (WOL_MAGIC_EN | WOL_MAGIC_PME_EN); iowrite32(ctrl, hw->hw_addr + REG_WOL_CTRL); ioread32(hw->hw_addr + REG_WOL_CTRL); /* configure the mac */ ctrl = MAC_CTRL_RX_EN; ctrl |= ((u32)((speed == SPEED_1000) ? MAC_CTRL_SPEED_1000 : MAC_CTRL_SPEED_10_100) << MAC_CTRL_SPEED_SHIFT); if (duplex == FULL_DUPLEX) ctrl |= MAC_CTRL_DUPLX; ctrl |= (((u32)adapter->hw.preamble_len & MAC_CTRL_PRMLEN_MASK) << MAC_CTRL_PRMLEN_SHIFT); __atlx_vlan_mode(netdev->features, &ctrl); if (wufc & ATLX_WUFC_MAG) ctrl |= MAC_CTRL_BC_EN; iowrite32(ctrl, hw->hw_addr + REG_MAC_CTRL); ioread32(hw->hw_addr + REG_MAC_CTRL); /* poke the PHY */ ctrl = ioread32(hw->hw_addr + REG_PCIE_PHYMISC); ctrl |= PCIE_PHYMISC_FORCE_RCV_DET; iowrite32(ctrl, hw->hw_addr + REG_PCIE_PHYMISC); ioread32(hw->hw_addr + REG_PCIE_PHYMISC); } else { ctrl |= (WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN); iowrite32(ctrl, hw->hw_addr + REG_WOL_CTRL); ioread32(hw->hw_addr + REG_WOL_CTRL); iowrite32(0, hw->hw_addr + REG_MAC_CTRL); ioread32(hw->hw_addr + REG_MAC_CTRL); hw->phy_configured = false; } return 0; disable_wol: iowrite32(0, hw->hw_addr + REG_WOL_CTRL); ioread32(hw->hw_addr + REG_WOL_CTRL); ctrl = ioread32(hw->hw_addr + REG_PCIE_PHYMISC); ctrl |= PCIE_PHYMISC_FORCE_RCV_DET; iowrite32(ctrl, hw->hw_addr + REG_PCIE_PHYMISC); ioread32(hw->hw_addr + REG_PCIE_PHYMISC); hw->phy_configured = false; return 0; } static int atl1_resume(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct net_device *netdev = pci_get_drvdata(pdev); struct atl1_adapter *adapter = netdev_priv(netdev); iowrite32(0, adapter->hw.hw_addr + REG_WOL_CTRL); atl1_reset_hw(&adapter->hw); if (netif_running(netdev)) { adapter->cmb.cmb->int_stats = 0; atl1_up(adapter); } netif_device_attach(netdev); return 0; } static SIMPLE_DEV_PM_OPS(atl1_pm_ops, atl1_suspend, atl1_resume); #define ATL1_PM_OPS (&atl1_pm_ops) #else static int atl1_suspend(struct device *dev) { return 0; } #define ATL1_PM_OPS NULL #endif static void atl1_shutdown(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); struct atl1_adapter *adapter = netdev_priv(netdev); atl1_suspend(&pdev->dev); pci_wake_from_d3(pdev, adapter->wol); pci_set_power_state(pdev, PCI_D3hot); } #ifdef CONFIG_NET_POLL_CONTROLLER static void atl1_poll_controller(struct net_device *netdev) { disable_irq(netdev->irq); atl1_intr(netdev->irq, netdev); enable_irq(netdev->irq); } #endif static const struct net_device_ops atl1_netdev_ops = { .ndo_open = atl1_open, .ndo_stop = atl1_close, .ndo_start_xmit = atl1_xmit_frame, .ndo_set_rx_mode = atlx_set_multi, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = atl1_set_mac, .ndo_change_mtu = atl1_change_mtu, .ndo_fix_features = atlx_fix_features, .ndo_set_features = atlx_set_features, .ndo_do_ioctl = atlx_ioctl, .ndo_tx_timeout = atlx_tx_timeout, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = atl1_poll_controller, #endif }; /* * atl1_probe - Device Initialization Routine * @pdev: PCI device information struct * @ent: entry in atl1_pci_tbl * * Returns 0 on success, negative on failure * * atl1_probe initializes an adapter identified by a pci_dev structure. * The OS initialization, configuring of the adapter private structure, * and a hardware reset occur. */ static int __devinit atl1_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct net_device *netdev; struct atl1_adapter *adapter; static int cards_found = 0; int err; err = pci_enable_device(pdev); if (err) return err; /* * The atl1 chip can DMA to 64-bit addresses, but it uses a single * shared register for the high 32 bits, so only a single, aligned, * 4 GB physical address range can be used at a time. * * Supporting 64-bit DMA on this hardware is more trouble than it's * worth. It is far easier to limit to 32-bit DMA than update * various kernel subsystems to support the mechanics required by a * fixed-high-32-bit system. */ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (err) { dev_err(&pdev->dev, "no usable DMA configuration\n"); goto err_dma; } /* * Mark all PCI regions associated with PCI device * pdev as being reserved by owner atl1_driver_name */ err = pci_request_regions(pdev, ATLX_DRIVER_NAME); if (err) goto err_request_regions; /* * Enables bus-mastering on the device and calls * pcibios_set_master to do the needed arch specific settings */ pci_set_master(pdev); netdev = alloc_etherdev(sizeof(struct atl1_adapter)); if (!netdev) { err = -ENOMEM; goto err_alloc_etherdev; } SET_NETDEV_DEV(netdev, &pdev->dev); pci_set_drvdata(pdev, netdev); adapter = netdev_priv(netdev); adapter->netdev = netdev; adapter->pdev = pdev; adapter->hw.back = adapter; adapter->msg_enable = netif_msg_init(debug, atl1_default_msg); adapter->hw.hw_addr = pci_iomap(pdev, 0, 0); if (!adapter->hw.hw_addr) { err = -EIO; goto err_pci_iomap; } /* get device revision number */ adapter->hw.dev_rev = ioread16(adapter->hw.hw_addr + (REG_MASTER_CTRL + 2)); if (netif_msg_probe(adapter)) dev_info(&pdev->dev, "version %s\n", ATLX_DRIVER_VERSION); /* set default ring resource counts */ adapter->rfd_ring.count = adapter->rrd_ring.count = ATL1_DEFAULT_RFD; adapter->tpd_ring.count = ATL1_DEFAULT_TPD; adapter->mii.dev = netdev; adapter->mii.mdio_read = mdio_read; adapter->mii.mdio_write = mdio_write; adapter->mii.phy_id_mask = 0x1f; adapter->mii.reg_num_mask = 0x1f; netdev->netdev_ops = &atl1_netdev_ops; netdev->watchdog_timeo = 5 * HZ; netdev->ethtool_ops = &atl1_ethtool_ops; adapter->bd_number = cards_found; /* setup the private structure */ err = atl1_sw_init(adapter); if (err) goto err_common; netdev->features = NETIF_F_HW_CSUM; netdev->features |= NETIF_F_SG; netdev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX); netdev->hw_features = NETIF_F_HW_CSUM | NETIF_F_SG | NETIF_F_TSO | NETIF_F_HW_VLAN_RX; /* is this valid? see atl1_setup_mac_ctrl() */ netdev->features |= NETIF_F_RXCSUM; /* * patch for some L1 of old version, * the final version of L1 may not need these * patches */ /* atl1_pcie_patch(adapter); */ /* really reset GPHY core */ iowrite16(0, adapter->hw.hw_addr + REG_PHY_ENABLE); /* * reset the controller to * put the device in a known good starting state */ if (atl1_reset_hw(&adapter->hw)) { err = -EIO; goto err_common; } /* copy the MAC address out of the EEPROM */ if (atl1_read_mac_addr(&adapter->hw)) { /* mark random mac */ netdev->addr_assign_type |= NET_ADDR_RANDOM; } memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len); if (!is_valid_ether_addr(netdev->dev_addr)) { err = -EIO; goto err_common; } atl1_check_options(adapter); /* pre-init the MAC, and setup link */ err = atl1_init_hw(&adapter->hw); if (err) { err = -EIO; goto err_common; } atl1_pcie_patch(adapter); /* assume we have no link for now */ netif_carrier_off(netdev); setup_timer(&adapter->phy_config_timer, atl1_phy_config, (unsigned long)adapter); adapter->phy_timer_pending = false; INIT_WORK(&adapter->reset_dev_task, atl1_reset_dev_task); INIT_WORK(&adapter->link_chg_task, atlx_link_chg_task); err = register_netdev(netdev); if (err) goto err_common; cards_found++; atl1_via_workaround(adapter); return 0; err_common: pci_iounmap(pdev, adapter->hw.hw_addr); err_pci_iomap: free_netdev(netdev); err_alloc_etherdev: pci_release_regions(pdev); err_dma: err_request_regions: pci_disable_device(pdev); return err; } /* * atl1_remove - Device Removal Routine * @pdev: PCI device information struct * * atl1_remove is called by the PCI subsystem to alert the driver * that it should release a PCI device. The could be caused by a * Hot-Plug event, or because the driver is going to be removed from * memory. */ static void __devexit atl1_remove(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); struct atl1_adapter *adapter; /* Device not available. Return. */ if (!netdev) return; adapter = netdev_priv(netdev); /* * Some atl1 boards lack persistent storage for their MAC, and get it * from the BIOS during POST. If we've been messing with the MAC * address, we need to save the permanent one. */ if (memcmp(adapter->hw.mac_addr, adapter->hw.perm_mac_addr, ETH_ALEN)) { memcpy(adapter->hw.mac_addr, adapter->hw.perm_mac_addr, ETH_ALEN); atl1_set_mac_addr(&adapter->hw); } iowrite16(0, adapter->hw.hw_addr + REG_PHY_ENABLE); unregister_netdev(netdev); pci_iounmap(pdev, adapter->hw.hw_addr); pci_release_regions(pdev); free_netdev(netdev); pci_disable_device(pdev); } static struct pci_driver atl1_driver = { .name = ATLX_DRIVER_NAME, .id_table = atl1_pci_tbl, .probe = atl1_probe, .remove = __devexit_p(atl1_remove), .shutdown = atl1_shutdown, .driver.pm = ATL1_PM_OPS, }; /* * atl1_exit_module - Driver Exit Cleanup Routine * * atl1_exit_module is called just before the driver is removed * from memory. */ static void __exit atl1_exit_module(void) { pci_unregister_driver(&atl1_driver); } /* * atl1_init_module - Driver Registration Routine * * atl1_init_module is the first routine called when the driver is * loaded. All it does is register with the PCI subsystem. */ static int __init atl1_init_module(void) { return pci_register_driver(&atl1_driver); } module_init(atl1_init_module); module_exit(atl1_exit_module); struct atl1_stats { char stat_string[ETH_GSTRING_LEN]; int sizeof_stat; int stat_offset; }; #define ATL1_STAT(m) \ sizeof(((struct atl1_adapter *)0)->m), offsetof(struct atl1_adapter, m) static struct atl1_stats atl1_gstrings_stats[] = { {"rx_packets", ATL1_STAT(soft_stats.rx_packets)}, {"tx_packets", ATL1_STAT(soft_stats.tx_packets)}, {"rx_bytes", ATL1_STAT(soft_stats.rx_bytes)}, {"tx_bytes", ATL1_STAT(soft_stats.tx_bytes)}, {"rx_errors", ATL1_STAT(soft_stats.rx_errors)}, {"tx_errors", ATL1_STAT(soft_stats.tx_errors)}, {"multicast", ATL1_STAT(soft_stats.multicast)}, {"collisions", ATL1_STAT(soft_stats.collisions)}, {"rx_length_errors", ATL1_STAT(soft_stats.rx_length_errors)}, {"rx_over_errors", ATL1_STAT(soft_stats.rx_missed_errors)}, {"rx_crc_errors", ATL1_STAT(soft_stats.rx_crc_errors)}, {"rx_frame_errors", ATL1_STAT(soft_stats.rx_frame_errors)}, {"rx_fifo_errors", ATL1_STAT(soft_stats.rx_fifo_errors)}, {"rx_missed_errors", ATL1_STAT(soft_stats.rx_missed_errors)}, {"tx_aborted_errors", ATL1_STAT(soft_stats.tx_aborted_errors)}, {"tx_carrier_errors", ATL1_STAT(soft_stats.tx_carrier_errors)}, {"tx_fifo_errors", ATL1_STAT(soft_stats.tx_fifo_errors)}, {"tx_window_errors", ATL1_STAT(soft_stats.tx_window_errors)}, {"tx_abort_exce_coll", ATL1_STAT(soft_stats.excecol)}, {"tx_abort_late_coll", ATL1_STAT(soft_stats.latecol)}, {"tx_deferred_ok", ATL1_STAT(soft_stats.deffer)}, {"tx_single_coll_ok", ATL1_STAT(soft_stats.scc)}, {"tx_multi_coll_ok", ATL1_STAT(soft_stats.mcc)}, {"tx_underun", ATL1_STAT(soft_stats.tx_underun)}, {"tx_trunc", ATL1_STAT(soft_stats.tx_trunc)}, {"tx_pause", ATL1_STAT(soft_stats.tx_pause)}, {"rx_pause", ATL1_STAT(soft_stats.rx_pause)}, {"rx_rrd_ov", ATL1_STAT(soft_stats.rx_rrd_ov)}, {"rx_trunc", ATL1_STAT(soft_stats.rx_trunc)} }; static void atl1_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats, u64 *data) { struct atl1_adapter *adapter = netdev_priv(netdev); int i; char *p; for (i = 0; i < ARRAY_SIZE(atl1_gstrings_stats); i++) { p = (char *)adapter+atl1_gstrings_stats[i].stat_offset; data[i] = (atl1_gstrings_stats[i].sizeof_stat == sizeof(u64)) ? *(u64 *)p : *(u32 *)p; } } static int atl1_get_sset_count(struct net_device *netdev, int sset) { switch (sset) { case ETH_SS_STATS: return ARRAY_SIZE(atl1_gstrings_stats); default: return -EOPNOTSUPP; } } static int atl1_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) { struct atl1_adapter *adapter = netdev_priv(netdev); struct atl1_hw *hw = &adapter->hw; ecmd->supported = (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_TP); ecmd->advertising = ADVERTISED_TP; if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR || hw->media_type == MEDIA_TYPE_1000M_FULL) { ecmd->advertising |= ADVERTISED_Autoneg; if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR) { ecmd->advertising |= ADVERTISED_Autoneg; ecmd->advertising |= (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | ADVERTISED_1000baseT_Full); } else ecmd->advertising |= (ADVERTISED_1000baseT_Full); } ecmd->port = PORT_TP; ecmd->phy_address = 0; ecmd->transceiver = XCVR_INTERNAL; if (netif_carrier_ok(adapter->netdev)) { u16 link_speed, link_duplex; atl1_get_speed_and_duplex(hw, &link_speed, &link_duplex); ethtool_cmd_speed_set(ecmd, link_speed); if (link_duplex == FULL_DUPLEX) ecmd->duplex = DUPLEX_FULL; else ecmd->duplex = DUPLEX_HALF; } else { ethtool_cmd_speed_set(ecmd, -1); ecmd->duplex = -1; } if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR || hw->media_type == MEDIA_TYPE_1000M_FULL) ecmd->autoneg = AUTONEG_ENABLE; else ecmd->autoneg = AUTONEG_DISABLE; return 0; } static int atl1_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) { struct atl1_adapter *adapter = netdev_priv(netdev); struct atl1_hw *hw = &adapter->hw; u16 phy_data; int ret_val = 0; u16 old_media_type = hw->media_type; if (netif_running(adapter->netdev)) { if (netif_msg_link(adapter)) dev_dbg(&adapter->pdev->dev, "ethtool shutting down adapter\n"); atl1_down(adapter); } if (ecmd->autoneg == AUTONEG_ENABLE) hw->media_type = MEDIA_TYPE_AUTO_SENSOR; else { u32 speed = ethtool_cmd_speed(ecmd); if (speed == SPEED_1000) { if (ecmd->duplex != DUPLEX_FULL) { if (netif_msg_link(adapter)) dev_warn(&adapter->pdev->dev, "1000M half is invalid\n"); ret_val = -EINVAL; goto exit_sset; } hw->media_type = MEDIA_TYPE_1000M_FULL; } else if (speed == SPEED_100) { if (ecmd->duplex == DUPLEX_FULL) hw->media_type = MEDIA_TYPE_100M_FULL; else hw->media_type = MEDIA_TYPE_100M_HALF; } else { if (ecmd->duplex == DUPLEX_FULL) hw->media_type = MEDIA_TYPE_10M_FULL; else hw->media_type = MEDIA_TYPE_10M_HALF; } } switch (hw->media_type) { case MEDIA_TYPE_AUTO_SENSOR: ecmd->advertising = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg | ADVERTISED_TP; break; case MEDIA_TYPE_1000M_FULL: ecmd->advertising = ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg | ADVERTISED_TP; break; default: ecmd->advertising = 0; break; } if (atl1_phy_setup_autoneg_adv(hw)) { ret_val = -EINVAL; if (netif_msg_link(adapter)) dev_warn(&adapter->pdev->dev, "invalid ethtool speed/duplex setting\n"); goto exit_sset; } if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR || hw->media_type == MEDIA_TYPE_1000M_FULL) phy_data = MII_CR_RESET | MII_CR_AUTO_NEG_EN; else { switch (hw->media_type) { case MEDIA_TYPE_100M_FULL: phy_data = MII_CR_FULL_DUPLEX | MII_CR_SPEED_100 | MII_CR_RESET; break; case MEDIA_TYPE_100M_HALF: phy_data = MII_CR_SPEED_100 | MII_CR_RESET; break; case MEDIA_TYPE_10M_FULL: phy_data = MII_CR_FULL_DUPLEX | MII_CR_SPEED_10 | MII_CR_RESET; break; default: /* MEDIA_TYPE_10M_HALF: */ phy_data = MII_CR_SPEED_10 | MII_CR_RESET; break; } } atl1_write_phy_reg(hw, MII_BMCR, phy_data); exit_sset: if (ret_val) hw->media_type = old_media_type; if (netif_running(adapter->netdev)) { if (netif_msg_link(adapter)) dev_dbg(&adapter->pdev->dev, "ethtool starting adapter\n"); atl1_up(adapter); } else if (!ret_val) { if (netif_msg_link(adapter)) dev_dbg(&adapter->pdev->dev, "ethtool resetting adapter\n"); atl1_reset(adapter); } return ret_val; } static void atl1_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) { struct atl1_adapter *adapter = netdev_priv(netdev); strlcpy(drvinfo->driver, ATLX_DRIVER_NAME, sizeof(drvinfo->driver)); strlcpy(drvinfo->version, ATLX_DRIVER_VERSION, sizeof(drvinfo->version)); strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), sizeof(drvinfo->bus_info)); drvinfo->eedump_len = ATL1_EEDUMP_LEN; } static void atl1_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) { struct atl1_adapter *adapter = netdev_priv(netdev); wol->supported = WAKE_MAGIC; wol->wolopts = 0; if (adapter->wol & ATLX_WUFC_MAG) wol->wolopts |= WAKE_MAGIC; } static int atl1_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) { struct atl1_adapter *adapter = netdev_priv(netdev); if (wol->wolopts & (WAKE_PHY | WAKE_UCAST | WAKE_MCAST | WAKE_BCAST | WAKE_ARP | WAKE_MAGICSECURE)) return -EOPNOTSUPP; adapter->wol = 0; if (wol->wolopts & WAKE_MAGIC) adapter->wol |= ATLX_WUFC_MAG; device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); return 0; } static u32 atl1_get_msglevel(struct net_device *netdev) { struct atl1_adapter *adapter = netdev_priv(netdev); return adapter->msg_enable; } static void atl1_set_msglevel(struct net_device *netdev, u32 value) { struct atl1_adapter *adapter = netdev_priv(netdev); adapter->msg_enable = value; } static int atl1_get_regs_len(struct net_device *netdev) { return ATL1_REG_COUNT * sizeof(u32); } static void atl1_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p) { struct atl1_adapter *adapter = netdev_priv(netdev); struct atl1_hw *hw = &adapter->hw; unsigned int i; u32 *regbuf = p; for (i = 0; i < ATL1_REG_COUNT; i++) { /* * This switch statement avoids reserved regions * of register space. */ switch (i) { case 6 ... 9: case 14: case 29 ... 31: case 34 ... 63: case 75 ... 127: case 136 ... 1023: case 1027 ... 1087: case 1091 ... 1151: case 1194 ... 1195: case 1200 ... 1201: case 1206 ... 1213: case 1216 ... 1279: case 1290 ... 1311: case 1323 ... 1343: case 1358 ... 1359: case 1368 ... 1375: case 1378 ... 1383: case 1388 ... 1391: case 1393 ... 1395: case 1402 ... 1403: case 1410 ... 1471: case 1522 ... 1535: /* reserved region; don't read it */ regbuf[i] = 0; break; default: /* unreserved region */ regbuf[i] = ioread32(hw->hw_addr + (i * sizeof(u32))); } } } static void atl1_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring) { struct atl1_adapter *adapter = netdev_priv(netdev); struct atl1_tpd_ring *txdr = &adapter->tpd_ring; struct atl1_rfd_ring *rxdr = &adapter->rfd_ring; ring->rx_max_pending = ATL1_MAX_RFD; ring->tx_max_pending = ATL1_MAX_TPD; ring->rx_pending = rxdr->count; ring->tx_pending = txdr->count; } static int atl1_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring) { struct atl1_adapter *adapter = netdev_priv(netdev); struct atl1_tpd_ring *tpdr = &adapter->tpd_ring; struct atl1_rrd_ring *rrdr = &adapter->rrd_ring; struct atl1_rfd_ring *rfdr = &adapter->rfd_ring; struct atl1_tpd_ring tpd_old, tpd_new; struct atl1_rfd_ring rfd_old, rfd_new; struct atl1_rrd_ring rrd_old, rrd_new; struct atl1_ring_header rhdr_old, rhdr_new; struct atl1_smb smb; struct atl1_cmb cmb; int err; tpd_old = adapter->tpd_ring; rfd_old = adapter->rfd_ring; rrd_old = adapter->rrd_ring; rhdr_old = adapter->ring_header; if (netif_running(adapter->netdev)) atl1_down(adapter); rfdr->count = (u16) max(ring->rx_pending, (u32) ATL1_MIN_RFD); rfdr->count = rfdr->count > ATL1_MAX_RFD ? ATL1_MAX_RFD : rfdr->count; rfdr->count = (rfdr->count + 3) & ~3; rrdr->count = rfdr->count; tpdr->count = (u16) max(ring->tx_pending, (u32) ATL1_MIN_TPD); tpdr->count = tpdr->count > ATL1_MAX_TPD ? ATL1_MAX_TPD : tpdr->count; tpdr->count = (tpdr->count + 3) & ~3; if (netif_running(adapter->netdev)) { /* try to get new resources before deleting old */ err = atl1_setup_ring_resources(adapter); if (err) goto err_setup_ring; /* * save the new, restore the old in order to free it, * then restore the new back again */ rfd_new = adapter->rfd_ring; rrd_new = adapter->rrd_ring; tpd_new = adapter->tpd_ring; rhdr_new = adapter->ring_header; adapter->rfd_ring = rfd_old; adapter->rrd_ring = rrd_old; adapter->tpd_ring = tpd_old; adapter->ring_header = rhdr_old; /* * Save SMB and CMB, since atl1_free_ring_resources * will clear them. */ smb = adapter->smb; cmb = adapter->cmb; atl1_free_ring_resources(adapter); adapter->rfd_ring = rfd_new; adapter->rrd_ring = rrd_new; adapter->tpd_ring = tpd_new; adapter->ring_header = rhdr_new; adapter->smb = smb; adapter->cmb = cmb; err = atl1_up(adapter); if (err) return err; } return 0; err_setup_ring: adapter->rfd_ring = rfd_old; adapter->rrd_ring = rrd_old; adapter->tpd_ring = tpd_old; adapter->ring_header = rhdr_old; atl1_up(adapter); return err; } static void atl1_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *epause) { struct atl1_adapter *adapter = netdev_priv(netdev); struct atl1_hw *hw = &adapter->hw; if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR || hw->media_type == MEDIA_TYPE_1000M_FULL) { epause->autoneg = AUTONEG_ENABLE; } else { epause->autoneg = AUTONEG_DISABLE; } epause->rx_pause = 1; epause->tx_pause = 1; } static int atl1_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *epause) { struct atl1_adapter *adapter = netdev_priv(netdev); struct atl1_hw *hw = &adapter->hw; if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR || hw->media_type == MEDIA_TYPE_1000M_FULL) { epause->autoneg = AUTONEG_ENABLE; } else { epause->autoneg = AUTONEG_DISABLE; } epause->rx_pause = 1; epause->tx_pause = 1; return 0; } static void atl1_get_strings(struct net_device *netdev, u32 stringset, u8 *data) { u8 *p = data; int i; switch (stringset) { case ETH_SS_STATS: for (i = 0; i < ARRAY_SIZE(atl1_gstrings_stats); i++) { memcpy(p, atl1_gstrings_stats[i].stat_string, ETH_GSTRING_LEN); p += ETH_GSTRING_LEN; } break; } } static int atl1_nway_reset(struct net_device *netdev) { struct atl1_adapter *adapter = netdev_priv(netdev); struct atl1_hw *hw = &adapter->hw; if (netif_running(netdev)) { u16 phy_data; atl1_down(adapter); if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR || hw->media_type == MEDIA_TYPE_1000M_FULL) { phy_data = MII_CR_RESET | MII_CR_AUTO_NEG_EN; } else { switch (hw->media_type) { case MEDIA_TYPE_100M_FULL: phy_data = MII_CR_FULL_DUPLEX | MII_CR_SPEED_100 | MII_CR_RESET; break; case MEDIA_TYPE_100M_HALF: phy_data = MII_CR_SPEED_100 | MII_CR_RESET; break; case MEDIA_TYPE_10M_FULL: phy_data = MII_CR_FULL_DUPLEX | MII_CR_SPEED_10 | MII_CR_RESET; break; default: /* MEDIA_TYPE_10M_HALF */ phy_data = MII_CR_SPEED_10 | MII_CR_RESET; } } atl1_write_phy_reg(hw, MII_BMCR, phy_data); atl1_up(adapter); } return 0; } static const struct ethtool_ops atl1_ethtool_ops = { .get_settings = atl1_get_settings, .set_settings = atl1_set_settings, .get_drvinfo = atl1_get_drvinfo, .get_wol = atl1_get_wol, .set_wol = atl1_set_wol, .get_msglevel = atl1_get_msglevel, .set_msglevel = atl1_set_msglevel, .get_regs_len = atl1_get_regs_len, .get_regs = atl1_get_regs, .get_ringparam = atl1_get_ringparam, .set_ringparam = atl1_set_ringparam, .get_pauseparam = atl1_get_pauseparam, .set_pauseparam = atl1_set_pauseparam, .get_link = ethtool_op_get_link, .get_strings = atl1_get_strings, .nway_reset = atl1_nway_reset, .get_ethtool_stats = atl1_get_ethtool_stats, .get_sset_count = atl1_get_sset_count, };
gpl-2.0
jfdsmabalot/kernel_mako
kernel/trace/trace_mmiotrace.c
5809
9198
/* * Memory mapped I/O tracing * * Copyright (C) 2008 Pekka Paalanen <pq@iki.fi> */ #define DEBUG 1 #include <linux/kernel.h> #include <linux/mmiotrace.h> #include <linux/pci.h> #include <linux/slab.h> #include <linux/time.h> #include <linux/atomic.h> #include "trace.h" #include "trace_output.h" struct header_iter { struct pci_dev *dev; }; static struct trace_array *mmio_trace_array; static bool overrun_detected; static unsigned long prev_overruns; static atomic_t dropped_count; static void mmio_reset_data(struct trace_array *tr) { overrun_detected = false; prev_overruns = 0; tracing_reset_online_cpus(tr); } static int mmio_trace_init(struct trace_array *tr) { pr_debug("in %s\n", __func__); mmio_trace_array = tr; mmio_reset_data(tr); enable_mmiotrace(); return 0; } static void mmio_trace_reset(struct trace_array *tr) { pr_debug("in %s\n", __func__); disable_mmiotrace(); mmio_reset_data(tr); mmio_trace_array = NULL; } static void mmio_trace_start(struct trace_array *tr) { pr_debug("in %s\n", __func__); mmio_reset_data(tr); } static int mmio_print_pcidev(struct trace_seq *s, const struct pci_dev *dev) { int ret = 0; int i; resource_size_t start, end; const struct pci_driver *drv = pci_dev_driver(dev); /* XXX: incomplete checks for trace_seq_printf() return value */ ret += trace_seq_printf(s, "PCIDEV %02x%02x %04x%04x %x", dev->bus->number, dev->devfn, dev->vendor, dev->device, dev->irq); /* * XXX: is pci_resource_to_user() appropriate, since we are * supposed to interpret the __ioremap() phys_addr argument based on * these printed values? */ for (i = 0; i < 7; i++) { pci_resource_to_user(dev, i, &dev->resource[i], &start, &end); ret += trace_seq_printf(s, " %llx", (unsigned long long)(start | (dev->resource[i].flags & PCI_REGION_FLAG_MASK))); } for (i = 0; i < 7; i++) { pci_resource_to_user(dev, i, &dev->resource[i], &start, &end); ret += trace_seq_printf(s, " %llx", dev->resource[i].start < dev->resource[i].end ? (unsigned long long)(end - start) + 1 : 0); } if (drv) ret += trace_seq_printf(s, " %s\n", drv->name); else ret += trace_seq_printf(s, " \n"); return ret; } static void destroy_header_iter(struct header_iter *hiter) { if (!hiter) return; pci_dev_put(hiter->dev); kfree(hiter); } static void mmio_pipe_open(struct trace_iterator *iter) { struct header_iter *hiter; struct trace_seq *s = &iter->seq; trace_seq_printf(s, "VERSION 20070824\n"); hiter = kzalloc(sizeof(*hiter), GFP_KERNEL); if (!hiter) return; hiter->dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, NULL); iter->private = hiter; } /* XXX: This is not called when the pipe is closed! */ static void mmio_close(struct trace_iterator *iter) { struct header_iter *hiter = iter->private; destroy_header_iter(hiter); iter->private = NULL; } static unsigned long count_overruns(struct trace_iterator *iter) { unsigned long cnt = atomic_xchg(&dropped_count, 0); unsigned long over = ring_buffer_overruns(iter->tr->buffer); if (over > prev_overruns) cnt += over - prev_overruns; prev_overruns = over; return cnt; } static ssize_t mmio_read(struct trace_iterator *iter, struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { ssize_t ret; struct header_iter *hiter = iter->private; struct trace_seq *s = &iter->seq; unsigned long n; n = count_overruns(iter); if (n) { /* XXX: This is later than where events were lost. */ trace_seq_printf(s, "MARK 0.000000 Lost %lu events.\n", n); if (!overrun_detected) pr_warning("mmiotrace has lost events.\n"); overrun_detected = true; goto print_out; } if (!hiter) return 0; mmio_print_pcidev(s, hiter->dev); hiter->dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, hiter->dev); if (!hiter->dev) { destroy_header_iter(hiter); iter->private = NULL; } print_out: ret = trace_seq_to_user(s, ubuf, cnt); return (ret == -EBUSY) ? 0 : ret; } static enum print_line_t mmio_print_rw(struct trace_iterator *iter) { struct trace_entry *entry = iter->ent; struct trace_mmiotrace_rw *field; struct mmiotrace_rw *rw; struct trace_seq *s = &iter->seq; unsigned long long t = ns2usecs(iter->ts); unsigned long usec_rem = do_div(t, USEC_PER_SEC); unsigned secs = (unsigned long)t; int ret = 1; trace_assign_type(field, entry); rw = &field->rw; switch (rw->opcode) { case MMIO_READ: ret = trace_seq_printf(s, "R %d %u.%06lu %d 0x%llx 0x%lx 0x%lx %d\n", rw->width, secs, usec_rem, rw->map_id, (unsigned long long)rw->phys, rw->value, rw->pc, 0); break; case MMIO_WRITE: ret = trace_seq_printf(s, "W %d %u.%06lu %d 0x%llx 0x%lx 0x%lx %d\n", rw->width, secs, usec_rem, rw->map_id, (unsigned long long)rw->phys, rw->value, rw->pc, 0); break; case MMIO_UNKNOWN_OP: ret = trace_seq_printf(s, "UNKNOWN %u.%06lu %d 0x%llx %02lx,%02lx," "%02lx 0x%lx %d\n", secs, usec_rem, rw->map_id, (unsigned long long)rw->phys, (rw->value >> 16) & 0xff, (rw->value >> 8) & 0xff, (rw->value >> 0) & 0xff, rw->pc, 0); break; default: ret = trace_seq_printf(s, "rw what?\n"); break; } if (ret) return TRACE_TYPE_HANDLED; return TRACE_TYPE_PARTIAL_LINE; } static enum print_line_t mmio_print_map(struct trace_iterator *iter) { struct trace_entry *entry = iter->ent; struct trace_mmiotrace_map *field; struct mmiotrace_map *m; struct trace_seq *s = &iter->seq; unsigned long long t = ns2usecs(iter->ts); unsigned long usec_rem = do_div(t, USEC_PER_SEC); unsigned secs = (unsigned long)t; int ret; trace_assign_type(field, entry); m = &field->map; switch (m->opcode) { case MMIO_PROBE: ret = trace_seq_printf(s, "MAP %u.%06lu %d 0x%llx 0x%lx 0x%lx 0x%lx %d\n", secs, usec_rem, m->map_id, (unsigned long long)m->phys, m->virt, m->len, 0UL, 0); break; case MMIO_UNPROBE: ret = trace_seq_printf(s, "UNMAP %u.%06lu %d 0x%lx %d\n", secs, usec_rem, m->map_id, 0UL, 0); break; default: ret = trace_seq_printf(s, "map what?\n"); break; } if (ret) return TRACE_TYPE_HANDLED; return TRACE_TYPE_PARTIAL_LINE; } static enum print_line_t mmio_print_mark(struct trace_iterator *iter) { struct trace_entry *entry = iter->ent; struct print_entry *print = (struct print_entry *)entry; const char *msg = print->buf; struct trace_seq *s = &iter->seq; unsigned long long t = ns2usecs(iter->ts); unsigned long usec_rem = do_div(t, USEC_PER_SEC); unsigned secs = (unsigned long)t; int ret; /* The trailing newline must be in the message. */ ret = trace_seq_printf(s, "MARK %u.%06lu %s", secs, usec_rem, msg); if (!ret) return TRACE_TYPE_PARTIAL_LINE; return TRACE_TYPE_HANDLED; } static enum print_line_t mmio_print_line(struct trace_iterator *iter) { switch (iter->ent->type) { case TRACE_MMIO_RW: return mmio_print_rw(iter); case TRACE_MMIO_MAP: return mmio_print_map(iter); case TRACE_PRINT: return mmio_print_mark(iter); default: return TRACE_TYPE_HANDLED; /* ignore unknown entries */ } } static struct tracer mmio_tracer __read_mostly = { .name = "mmiotrace", .init = mmio_trace_init, .reset = mmio_trace_reset, .start = mmio_trace_start, .pipe_open = mmio_pipe_open, .close = mmio_close, .read = mmio_read, .print_line = mmio_print_line, }; __init static int init_mmio_trace(void) { return register_tracer(&mmio_tracer); } device_initcall(init_mmio_trace); static void __trace_mmiotrace_rw(struct trace_array *tr, struct trace_array_cpu *data, struct mmiotrace_rw *rw) { struct ftrace_event_call *call = &event_mmiotrace_rw; struct ring_buffer *buffer = tr->buffer; struct ring_buffer_event *event; struct trace_mmiotrace_rw *entry; int pc = preempt_count(); event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW, sizeof(*entry), 0, pc); if (!event) { atomic_inc(&dropped_count); return; } entry = ring_buffer_event_data(event); entry->rw = *rw; if (!filter_check_discard(call, entry, buffer, event)) trace_buffer_unlock_commit(buffer, event, 0, pc); } void mmio_trace_rw(struct mmiotrace_rw *rw) { struct trace_array *tr = mmio_trace_array; struct trace_array_cpu *data = tr->data[smp_processor_id()]; __trace_mmiotrace_rw(tr, data, rw); } static void __trace_mmiotrace_map(struct trace_array *tr, struct trace_array_cpu *data, struct mmiotrace_map *map) { struct ftrace_event_call *call = &event_mmiotrace_map; struct ring_buffer *buffer = tr->buffer; struct ring_buffer_event *event; struct trace_mmiotrace_map *entry; int pc = preempt_count(); event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP, sizeof(*entry), 0, pc); if (!event) { atomic_inc(&dropped_count); return; } entry = ring_buffer_event_data(event); entry->map = *map; if (!filter_check_discard(call, entry, buffer, event)) trace_buffer_unlock_commit(buffer, event, 0, pc); } void mmio_trace_mapping(struct mmiotrace_map *map) { struct trace_array *tr = mmio_trace_array; struct trace_array_cpu *data; preempt_disable(); data = tr->data[smp_processor_id()]; __trace_mmiotrace_map(tr, data, map); preempt_enable(); } int mmio_trace_printk(const char *fmt, va_list args) { return trace_vprintk(0, fmt, args); }
gpl-2.0
icoolguy1995/Elixer-1
drivers/video/aty/mach64_cursor.c
7345
5780
/* * ATI Mach64 CT/VT/GT/LT Cursor Support */ #include <linux/fb.h> #include <linux/init.h> #include <linux/string.h> #include <asm/io.h> #ifdef __sparc__ #include <asm/fbio.h> #endif #include <video/mach64.h> #include "atyfb.h" /* * The hardware cursor definition requires 2 bits per pixel. The * Cursor size reguardless of the visible cursor size is 64 pixels * by 64 lines. The total memory required to define the cursor is * 16 bytes / line for 64 lines or 1024 bytes of data. The data * must be in a contigiuos format. The 2 bit cursor code values are * as follows: * * 00 - pixel colour = CURSOR_CLR_0 * 01 - pixel colour = CURSOR_CLR_1 * 10 - pixel colour = transparent (current display pixel) * 11 - pixel colour = 1's complement of current display pixel * * Cursor Offset 64 pixels Actual Displayed Area * \_________________________/ * | | | | * |<--------------->| | | * | CURS_HORZ_OFFSET| | | * | |_______| | 64 Lines * | ^ | | * | | | | * | CURS_VERT_OFFSET| | * | | | | * |____________________|____| | * * * The Screen position of the top left corner of the displayed * cursor is specificed by CURS_HORZ_VERT_POSN. Care must be taken * when the cursor hot spot is not the top left corner and the * physical cursor position becomes negative. It will be be displayed * if either the horizontal or vertical cursor position is negative * * If x becomes negative the cursor manager must adjust the CURS_HORZ_OFFSET * to a larger number and saturate CUR_HORZ_POSN to zero. * * if Y becomes negative, CUR_VERT_OFFSET must be adjusted to a larger number, * CUR_OFFSET must be adjusted to a point to the appropriate line in the cursor * definitation and CUR_VERT_POSN must be saturated to zero. */ /* * Hardware Cursor support. */ static const u8 cursor_bits_lookup[16] = { 0x00, 0x40, 0x10, 0x50, 0x04, 0x44, 0x14, 0x54, 0x01, 0x41, 0x11, 0x51, 0x05, 0x45, 0x15, 0x55 }; static int atyfb_cursor(struct fb_info *info, struct fb_cursor *cursor) { struct atyfb_par *par = (struct atyfb_par *) info->par; u16 xoff, yoff; int x, y, h; #ifdef __sparc__ if (par->mmaped) return -EPERM; #endif if (par->asleep) return -EPERM; wait_for_fifo(1, par); if (cursor->enable) aty_st_le32(GEN_TEST_CNTL, aty_ld_le32(GEN_TEST_CNTL, par) | HWCURSOR_ENABLE, par); else aty_st_le32(GEN_TEST_CNTL, aty_ld_le32(GEN_TEST_CNTL, par) & ~HWCURSOR_ENABLE, par); /* set position */ if (cursor->set & FB_CUR_SETPOS) { x = cursor->image.dx - cursor->hot.x - info->var.xoffset; if (x < 0) { xoff = -x; x = 0; } else { xoff = 0; } y = cursor->image.dy - cursor->hot.y - info->var.yoffset; if (y < 0) { yoff = -y; y = 0; } else { yoff = 0; } h = cursor->image.height; /* * In doublescan mode, the cursor location * and heigh also needs to be doubled. */ if (par->crtc.gen_cntl & CRTC_DBL_SCAN_EN) { y<<=1; h<<=1; } wait_for_fifo(3, par); aty_st_le32(CUR_OFFSET, (info->fix.smem_len >> 3) + (yoff << 1), par); aty_st_le32(CUR_HORZ_VERT_OFF, ((u32) (64 - h + yoff) << 16) | xoff, par); aty_st_le32(CUR_HORZ_VERT_POSN, ((u32) y << 16) | x, par); } /* Set color map */ if (cursor->set & FB_CUR_SETCMAP) { u32 fg_idx, bg_idx, fg, bg; fg_idx = cursor->image.fg_color; bg_idx = cursor->image.bg_color; fg = ((info->cmap.red[fg_idx] & 0xff) << 24) | ((info->cmap.green[fg_idx] & 0xff) << 16) | ((info->cmap.blue[fg_idx] & 0xff) << 8) | 0xff; bg = ((info->cmap.red[bg_idx] & 0xff) << 24) | ((info->cmap.green[bg_idx] & 0xff) << 16) | ((info->cmap.blue[bg_idx] & 0xff) << 8); wait_for_fifo(2, par); aty_st_le32(CUR_CLR0, bg, par); aty_st_le32(CUR_CLR1, fg, par); } if (cursor->set & (FB_CUR_SETSHAPE | FB_CUR_SETIMAGE)) { u8 *src = (u8 *)cursor->image.data; u8 *msk = (u8 *)cursor->mask; u8 __iomem *dst = (u8 __iomem *)info->sprite.addr; unsigned int width = (cursor->image.width + 7) >> 3; unsigned int height = cursor->image.height; unsigned int align = info->sprite.scan_align; unsigned int i, j, offset; u8 m, b; // Clear cursor image with 1010101010... fb_memset(dst, 0xaa, 1024); offset = align - width*2; for (i = 0; i < height; i++) { for (j = 0; j < width; j++) { b = *src++; m = *msk++; switch (cursor->rop) { case ROP_XOR: // Upper 4 bits of mask data fb_writeb(cursor_bits_lookup[(b ^ m) >> 4], dst++); // Lower 4 bits of mask fb_writeb(cursor_bits_lookup[(b ^ m) & 0x0f], dst++); break; case ROP_COPY: // Upper 4 bits of mask data fb_writeb(cursor_bits_lookup[(b & m) >> 4], dst++); // Lower 4 bits of mask fb_writeb(cursor_bits_lookup[(b & m) & 0x0f], dst++); break; } } dst += offset; } } return 0; } int __devinit aty_init_cursor(struct fb_info *info) { unsigned long addr; info->fix.smem_len -= PAGE_SIZE; #ifdef __sparc__ addr = (unsigned long) info->screen_base - 0x800000 + info->fix.smem_len; info->sprite.addr = (u8 *) addr; #else #ifdef __BIG_ENDIAN addr = info->fix.smem_start - 0x800000 + info->fix.smem_len; info->sprite.addr = (u8 *) ioremap(addr, 1024); #else addr = (unsigned long) info->screen_base + info->fix.smem_len; info->sprite.addr = (u8 *) addr; #endif #endif if (!info->sprite.addr) return -ENXIO; info->sprite.size = PAGE_SIZE; info->sprite.scan_align = 16; /* Scratch pad 64 bytes wide */ info->sprite.buf_align = 16; /* and 64 lines tall. */ info->sprite.flags = FB_PIXMAP_IO; info->fbops->fb_cursor = atyfb_cursor; return 0; }
gpl-2.0
czobor/android_kernel_msm
drivers/mtd/inftlcore.c
7345
25065
/* * inftlcore.c -- Linux driver for Inverse Flash Translation Layer (INFTL) * * Copyright © 2002, Greg Ungerer (gerg@snapgear.com) * * Based heavily on the nftlcore.c code which is: * Copyright © 1999 Machine Vision Holdings, Inc. * Copyright © 1999 David Woodhouse <dwmw2@infradead.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/sched.h> #include <linux/init.h> #include <linux/kmod.h> #include <linux/hdreg.h> #include <linux/mtd/mtd.h> #include <linux/mtd/nftl.h> #include <linux/mtd/inftl.h> #include <linux/mtd/nand.h> #include <asm/uaccess.h> #include <asm/errno.h> #include <asm/io.h> /* * Maximum number of loops while examining next block, to have a * chance to detect consistency problems (they should never happen * because of the checks done in the mounting. */ #define MAX_LOOPS 10000 static void inftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd) { struct INFTLrecord *inftl; unsigned long temp; if (mtd->type != MTD_NANDFLASH || mtd->size > UINT_MAX) return; /* OK, this is moderately ugly. But probably safe. Alternatives? */ if (memcmp(mtd->name, "DiskOnChip", 10)) return; if (!mtd->_block_isbad) { printk(KERN_ERR "INFTL no longer supports the old DiskOnChip drivers loaded via docprobe.\n" "Please use the new diskonchip driver under the NAND subsystem.\n"); return; } pr_debug("INFTL: add_mtd for %s\n", mtd->name); inftl = kzalloc(sizeof(*inftl), GFP_KERNEL); if (!inftl) return; inftl->mbd.mtd = mtd; inftl->mbd.devnum = -1; inftl->mbd.tr = tr; if (INFTL_mount(inftl) < 0) { printk(KERN_WARNING "INFTL: could not mount device\n"); kfree(inftl); return; } /* OK, it's a new one. Set up all the data structures. */ /* Calculate geometry */ inftl->cylinders = 1024; inftl->heads = 16; temp = inftl->cylinders * inftl->heads; inftl->sectors = inftl->mbd.size / temp; if (inftl->mbd.size % temp) { inftl->sectors++; temp = inftl->cylinders * inftl->sectors; inftl->heads = inftl->mbd.size / temp; if (inftl->mbd.size % temp) { inftl->heads++; temp = inftl->heads * inftl->sectors; inftl->cylinders = inftl->mbd.size / temp; } } if (inftl->mbd.size != inftl->heads * inftl->cylinders * inftl->sectors) { /* Oh no we don't have mbd.size == heads * cylinders * sectors */ printk(KERN_WARNING "INFTL: cannot calculate a geometry to " "match size of 0x%lx.\n", inftl->mbd.size); printk(KERN_WARNING "INFTL: using C:%d H:%d S:%d " "(== 0x%lx sects)\n", inftl->cylinders, inftl->heads , inftl->sectors, (long)inftl->cylinders * (long)inftl->heads * (long)inftl->sectors ); } if (add_mtd_blktrans_dev(&inftl->mbd)) { kfree(inftl->PUtable); kfree(inftl->VUtable); kfree(inftl); return; } #ifdef PSYCHO_DEBUG printk(KERN_INFO "INFTL: Found new inftl%c\n", inftl->mbd.devnum + 'a'); #endif return; } static void inftl_remove_dev(struct mtd_blktrans_dev *dev) { struct INFTLrecord *inftl = (void *)dev; pr_debug("INFTL: remove_dev (i=%d)\n", dev->devnum); del_mtd_blktrans_dev(dev); kfree(inftl->PUtable); kfree(inftl->VUtable); } /* * Actual INFTL access routines. */ /* * Read oob data from flash */ int inftl_read_oob(struct mtd_info *mtd, loff_t offs, size_t len, size_t *retlen, uint8_t *buf) { struct mtd_oob_ops ops; int res; ops.mode = MTD_OPS_PLACE_OOB; ops.ooboffs = offs & (mtd->writesize - 1); ops.ooblen = len; ops.oobbuf = buf; ops.datbuf = NULL; res = mtd_read_oob(mtd, offs & ~(mtd->writesize - 1), &ops); *retlen = ops.oobretlen; return res; } /* * Write oob data to flash */ int inftl_write_oob(struct mtd_info *mtd, loff_t offs, size_t len, size_t *retlen, uint8_t *buf) { struct mtd_oob_ops ops; int res; ops.mode = MTD_OPS_PLACE_OOB; ops.ooboffs = offs & (mtd->writesize - 1); ops.ooblen = len; ops.oobbuf = buf; ops.datbuf = NULL; res = mtd_write_oob(mtd, offs & ~(mtd->writesize - 1), &ops); *retlen = ops.oobretlen; return res; } /* * Write data and oob to flash */ static int inftl_write(struct mtd_info *mtd, loff_t offs, size_t len, size_t *retlen, uint8_t *buf, uint8_t *oob) { struct mtd_oob_ops ops; int res; ops.mode = MTD_OPS_PLACE_OOB; ops.ooboffs = offs; ops.ooblen = mtd->oobsize; ops.oobbuf = oob; ops.datbuf = buf; ops.len = len; res = mtd_write_oob(mtd, offs & ~(mtd->writesize - 1), &ops); *retlen = ops.retlen; return res; } /* * INFTL_findfreeblock: Find a free Erase Unit on the INFTL partition. * This function is used when the give Virtual Unit Chain. */ static u16 INFTL_findfreeblock(struct INFTLrecord *inftl, int desperate) { u16 pot = inftl->LastFreeEUN; int silly = inftl->nb_blocks; pr_debug("INFTL: INFTL_findfreeblock(inftl=%p,desperate=%d)\n", inftl, desperate); /* * Normally, we force a fold to happen before we run out of free * blocks completely. */ if (!desperate && inftl->numfreeEUNs < 2) { pr_debug("INFTL: there are too few free EUNs (%d)\n", inftl->numfreeEUNs); return BLOCK_NIL; } /* Scan for a free block */ do { if (inftl->PUtable[pot] == BLOCK_FREE) { inftl->LastFreeEUN = pot; return pot; } if (++pot > inftl->lastEUN) pot = 0; if (!silly--) { printk(KERN_WARNING "INFTL: no free blocks found! " "EUN range = %d - %d\n", 0, inftl->LastFreeEUN); return BLOCK_NIL; } } while (pot != inftl->LastFreeEUN); return BLOCK_NIL; } static u16 INFTL_foldchain(struct INFTLrecord *inftl, unsigned thisVUC, unsigned pendingblock) { u16 BlockMap[MAX_SECTORS_PER_UNIT]; unsigned char BlockDeleted[MAX_SECTORS_PER_UNIT]; unsigned int thisEUN, prevEUN, status; struct mtd_info *mtd = inftl->mbd.mtd; int block, silly; unsigned int targetEUN; struct inftl_oob oob; size_t retlen; pr_debug("INFTL: INFTL_foldchain(inftl=%p,thisVUC=%d,pending=%d)\n", inftl, thisVUC, pendingblock); memset(BlockMap, 0xff, sizeof(BlockMap)); memset(BlockDeleted, 0, sizeof(BlockDeleted)); thisEUN = targetEUN = inftl->VUtable[thisVUC]; if (thisEUN == BLOCK_NIL) { printk(KERN_WARNING "INFTL: trying to fold non-existent " "Virtual Unit Chain %d!\n", thisVUC); return BLOCK_NIL; } /* * Scan to find the Erase Unit which holds the actual data for each * 512-byte block within the Chain. */ silly = MAX_LOOPS; while (thisEUN < inftl->nb_blocks) { for (block = 0; block < inftl->EraseSize/SECTORSIZE; block ++) { if ((BlockMap[block] != BLOCK_NIL) || BlockDeleted[block]) continue; if (inftl_read_oob(mtd, (thisEUN * inftl->EraseSize) + (block * SECTORSIZE), 16, &retlen, (char *)&oob) < 0) status = SECTOR_IGNORE; else status = oob.b.Status | oob.b.Status1; switch(status) { case SECTOR_FREE: case SECTOR_IGNORE: break; case SECTOR_USED: BlockMap[block] = thisEUN; continue; case SECTOR_DELETED: BlockDeleted[block] = 1; continue; default: printk(KERN_WARNING "INFTL: unknown status " "for block %d in EUN %d: %x\n", block, thisEUN, status); break; } } if (!silly--) { printk(KERN_WARNING "INFTL: infinite loop in Virtual " "Unit Chain 0x%x\n", thisVUC); return BLOCK_NIL; } thisEUN = inftl->PUtable[thisEUN]; } /* * OK. We now know the location of every block in the Virtual Unit * Chain, and the Erase Unit into which we are supposed to be copying. * Go for it. */ pr_debug("INFTL: folding chain %d into unit %d\n", thisVUC, targetEUN); for (block = 0; block < inftl->EraseSize/SECTORSIZE ; block++) { unsigned char movebuf[SECTORSIZE]; int ret; /* * If it's in the target EUN already, or if it's pending write, * do nothing. */ if (BlockMap[block] == targetEUN || (pendingblock == (thisVUC * (inftl->EraseSize / SECTORSIZE) + block))) { continue; } /* * Copy only in non free block (free blocks can only * happen in case of media errors or deleted blocks). */ if (BlockMap[block] == BLOCK_NIL) continue; ret = mtd_read(mtd, (inftl->EraseSize * BlockMap[block]) + (block * SECTORSIZE), SECTORSIZE, &retlen, movebuf); if (ret < 0 && !mtd_is_bitflip(ret)) { ret = mtd_read(mtd, (inftl->EraseSize * BlockMap[block]) + (block * SECTORSIZE), SECTORSIZE, &retlen, movebuf); if (ret != -EIO) pr_debug("INFTL: error went away on retry?\n"); } memset(&oob, 0xff, sizeof(struct inftl_oob)); oob.b.Status = oob.b.Status1 = SECTOR_USED; inftl_write(inftl->mbd.mtd, (inftl->EraseSize * targetEUN) + (block * SECTORSIZE), SECTORSIZE, &retlen, movebuf, (char *)&oob); } /* * Newest unit in chain now contains data from _all_ older units. * So go through and erase each unit in chain, oldest first. (This * is important, by doing oldest first if we crash/reboot then it * it is relatively simple to clean up the mess). */ pr_debug("INFTL: want to erase virtual chain %d\n", thisVUC); for (;;) { /* Find oldest unit in chain. */ thisEUN = inftl->VUtable[thisVUC]; prevEUN = BLOCK_NIL; while (inftl->PUtable[thisEUN] != BLOCK_NIL) { prevEUN = thisEUN; thisEUN = inftl->PUtable[thisEUN]; } /* Check if we are all done */ if (thisEUN == targetEUN) break; /* Unlink the last block from the chain. */ inftl->PUtable[prevEUN] = BLOCK_NIL; /* Now try to erase it. */ if (INFTL_formatblock(inftl, thisEUN) < 0) { /* * Could not erase : mark block as reserved. */ inftl->PUtable[thisEUN] = BLOCK_RESERVED; } else { /* Correctly erased : mark it as free */ inftl->PUtable[thisEUN] = BLOCK_FREE; inftl->numfreeEUNs++; } } return targetEUN; } static u16 INFTL_makefreeblock(struct INFTLrecord *inftl, unsigned pendingblock) { /* * This is the part that needs some cleverness applied. * For now, I'm doing the minimum applicable to actually * get the thing to work. * Wear-levelling and other clever stuff needs to be implemented * and we also need to do some assessment of the results when * the system loses power half-way through the routine. */ u16 LongestChain = 0; u16 ChainLength = 0, thislen; u16 chain, EUN; pr_debug("INFTL: INFTL_makefreeblock(inftl=%p," "pending=%d)\n", inftl, pendingblock); for (chain = 0; chain < inftl->nb_blocks; chain++) { EUN = inftl->VUtable[chain]; thislen = 0; while (EUN <= inftl->lastEUN) { thislen++; EUN = inftl->PUtable[EUN]; if (thislen > 0xff00) { printk(KERN_WARNING "INFTL: endless loop in " "Virtual Chain %d: Unit %x\n", chain, EUN); /* * Actually, don't return failure. * Just ignore this chain and get on with it. */ thislen = 0; break; } } if (thislen > ChainLength) { ChainLength = thislen; LongestChain = chain; } } if (ChainLength < 2) { printk(KERN_WARNING "INFTL: no Virtual Unit Chains available " "for folding. Failing request\n"); return BLOCK_NIL; } return INFTL_foldchain(inftl, LongestChain, pendingblock); } static int nrbits(unsigned int val, int bitcount) { int i, total = 0; for (i = 0; (i < bitcount); i++) total += (((0x1 << i) & val) ? 1 : 0); return total; } /* * INFTL_findwriteunit: Return the unit number into which we can write * for this block. Make it available if it isn't already. */ static inline u16 INFTL_findwriteunit(struct INFTLrecord *inftl, unsigned block) { unsigned int thisVUC = block / (inftl->EraseSize / SECTORSIZE); unsigned int thisEUN, writeEUN, prev_block, status; unsigned long blockofs = (block * SECTORSIZE) & (inftl->EraseSize -1); struct mtd_info *mtd = inftl->mbd.mtd; struct inftl_oob oob; struct inftl_bci bci; unsigned char anac, nacs, parity; size_t retlen; int silly, silly2 = 3; pr_debug("INFTL: INFTL_findwriteunit(inftl=%p,block=%d)\n", inftl, block); do { /* * Scan the media to find a unit in the VUC which has * a free space for the block in question. */ writeEUN = BLOCK_NIL; thisEUN = inftl->VUtable[thisVUC]; silly = MAX_LOOPS; while (thisEUN <= inftl->lastEUN) { inftl_read_oob(mtd, (thisEUN * inftl->EraseSize) + blockofs, 8, &retlen, (char *)&bci); status = bci.Status | bci.Status1; pr_debug("INFTL: status of block %d in EUN %d is %x\n", block , writeEUN, status); switch(status) { case SECTOR_FREE: writeEUN = thisEUN; break; case SECTOR_DELETED: case SECTOR_USED: /* Can't go any further */ goto hitused; case SECTOR_IGNORE: break; default: /* * Invalid block. Don't use it any more. * Must implement. */ break; } if (!silly--) { printk(KERN_WARNING "INFTL: infinite loop in " "Virtual Unit Chain 0x%x\n", thisVUC); return BLOCK_NIL; } /* Skip to next block in chain */ thisEUN = inftl->PUtable[thisEUN]; } hitused: if (writeEUN != BLOCK_NIL) return writeEUN; /* * OK. We didn't find one in the existing chain, or there * is no existing chain. Allocate a new one. */ writeEUN = INFTL_findfreeblock(inftl, 0); if (writeEUN == BLOCK_NIL) { /* * That didn't work - there were no free blocks just * waiting to be picked up. We're going to have to fold * a chain to make room. */ thisEUN = INFTL_makefreeblock(inftl, block); /* * Hopefully we free something, lets try again. * This time we are desperate... */ pr_debug("INFTL: using desperate==1 to find free EUN " "to accommodate write to VUC %d\n", thisVUC); writeEUN = INFTL_findfreeblock(inftl, 1); if (writeEUN == BLOCK_NIL) { /* * Ouch. This should never happen - we should * always be able to make some room somehow. * If we get here, we've allocated more storage * space than actual media, or our makefreeblock * routine is missing something. */ printk(KERN_WARNING "INFTL: cannot make free " "space.\n"); #ifdef DEBUG INFTL_dumptables(inftl); INFTL_dumpVUchains(inftl); #endif return BLOCK_NIL; } } /* * Insert new block into virtual chain. Firstly update the * block headers in flash... */ anac = 0; nacs = 0; thisEUN = inftl->VUtable[thisVUC]; if (thisEUN != BLOCK_NIL) { inftl_read_oob(mtd, thisEUN * inftl->EraseSize + 8, 8, &retlen, (char *)&oob.u); anac = oob.u.a.ANAC + 1; nacs = oob.u.a.NACs + 1; } prev_block = inftl->VUtable[thisVUC]; if (prev_block < inftl->nb_blocks) prev_block -= inftl->firstEUN; parity = (nrbits(thisVUC, 16) & 0x1) ? 0x1 : 0; parity |= (nrbits(prev_block, 16) & 0x1) ? 0x2 : 0; parity |= (nrbits(anac, 8) & 0x1) ? 0x4 : 0; parity |= (nrbits(nacs, 8) & 0x1) ? 0x8 : 0; oob.u.a.virtualUnitNo = cpu_to_le16(thisVUC); oob.u.a.prevUnitNo = cpu_to_le16(prev_block); oob.u.a.ANAC = anac; oob.u.a.NACs = nacs; oob.u.a.parityPerField = parity; oob.u.a.discarded = 0xaa; inftl_write_oob(mtd, writeEUN * inftl->EraseSize + 8, 8, &retlen, (char *)&oob.u); /* Also back up header... */ oob.u.b.virtualUnitNo = cpu_to_le16(thisVUC); oob.u.b.prevUnitNo = cpu_to_le16(prev_block); oob.u.b.ANAC = anac; oob.u.b.NACs = nacs; oob.u.b.parityPerField = parity; oob.u.b.discarded = 0xaa; inftl_write_oob(mtd, writeEUN * inftl->EraseSize + SECTORSIZE * 4 + 8, 8, &retlen, (char *)&oob.u); inftl->PUtable[writeEUN] = inftl->VUtable[thisVUC]; inftl->VUtable[thisVUC] = writeEUN; inftl->numfreeEUNs--; return writeEUN; } while (silly2--); printk(KERN_WARNING "INFTL: error folding to make room for Virtual " "Unit Chain 0x%x\n", thisVUC); return BLOCK_NIL; } /* * Given a Virtual Unit Chain, see if it can be deleted, and if so do it. */ static void INFTL_trydeletechain(struct INFTLrecord *inftl, unsigned thisVUC) { struct mtd_info *mtd = inftl->mbd.mtd; unsigned char BlockUsed[MAX_SECTORS_PER_UNIT]; unsigned char BlockDeleted[MAX_SECTORS_PER_UNIT]; unsigned int thisEUN, status; int block, silly; struct inftl_bci bci; size_t retlen; pr_debug("INFTL: INFTL_trydeletechain(inftl=%p," "thisVUC=%d)\n", inftl, thisVUC); memset(BlockUsed, 0, sizeof(BlockUsed)); memset(BlockDeleted, 0, sizeof(BlockDeleted)); thisEUN = inftl->VUtable[thisVUC]; if (thisEUN == BLOCK_NIL) { printk(KERN_WARNING "INFTL: trying to delete non-existent " "Virtual Unit Chain %d!\n", thisVUC); return; } /* * Scan through the Erase Units to determine whether any data is in * each of the 512-byte blocks within the Chain. */ silly = MAX_LOOPS; while (thisEUN < inftl->nb_blocks) { for (block = 0; block < inftl->EraseSize/SECTORSIZE; block++) { if (BlockUsed[block] || BlockDeleted[block]) continue; if (inftl_read_oob(mtd, (thisEUN * inftl->EraseSize) + (block * SECTORSIZE), 8 , &retlen, (char *)&bci) < 0) status = SECTOR_IGNORE; else status = bci.Status | bci.Status1; switch(status) { case SECTOR_FREE: case SECTOR_IGNORE: break; case SECTOR_USED: BlockUsed[block] = 1; continue; case SECTOR_DELETED: BlockDeleted[block] = 1; continue; default: printk(KERN_WARNING "INFTL: unknown status " "for block %d in EUN %d: 0x%x\n", block, thisEUN, status); } } if (!silly--) { printk(KERN_WARNING "INFTL: infinite loop in Virtual " "Unit Chain 0x%x\n", thisVUC); return; } thisEUN = inftl->PUtable[thisEUN]; } for (block = 0; block < inftl->EraseSize/SECTORSIZE; block++) if (BlockUsed[block]) return; /* * For each block in the chain free it and make it available * for future use. Erase from the oldest unit first. */ pr_debug("INFTL: deleting empty VUC %d\n", thisVUC); for (;;) { u16 *prevEUN = &inftl->VUtable[thisVUC]; thisEUN = *prevEUN; /* If the chain is all gone already, we're done */ if (thisEUN == BLOCK_NIL) { pr_debug("INFTL: Empty VUC %d for deletion was already absent\n", thisEUN); return; } /* Find oldest unit in chain. */ while (inftl->PUtable[thisEUN] != BLOCK_NIL) { BUG_ON(thisEUN >= inftl->nb_blocks); prevEUN = &inftl->PUtable[thisEUN]; thisEUN = *prevEUN; } pr_debug("Deleting EUN %d from VUC %d\n", thisEUN, thisVUC); if (INFTL_formatblock(inftl, thisEUN) < 0) { /* * Could not erase : mark block as reserved. */ inftl->PUtable[thisEUN] = BLOCK_RESERVED; } else { /* Correctly erased : mark it as free */ inftl->PUtable[thisEUN] = BLOCK_FREE; inftl->numfreeEUNs++; } /* Now sort out whatever was pointing to it... */ *prevEUN = BLOCK_NIL; /* Ideally we'd actually be responsive to new requests while we're doing this -- if there's free space why should others be made to wait? */ cond_resched(); } inftl->VUtable[thisVUC] = BLOCK_NIL; } static int INFTL_deleteblock(struct INFTLrecord *inftl, unsigned block) { unsigned int thisEUN = inftl->VUtable[block / (inftl->EraseSize / SECTORSIZE)]; unsigned long blockofs = (block * SECTORSIZE) & (inftl->EraseSize - 1); struct mtd_info *mtd = inftl->mbd.mtd; unsigned int status; int silly = MAX_LOOPS; size_t retlen; struct inftl_bci bci; pr_debug("INFTL: INFTL_deleteblock(inftl=%p," "block=%d)\n", inftl, block); while (thisEUN < inftl->nb_blocks) { if (inftl_read_oob(mtd, (thisEUN * inftl->EraseSize) + blockofs, 8, &retlen, (char *)&bci) < 0) status = SECTOR_IGNORE; else status = bci.Status | bci.Status1; switch (status) { case SECTOR_FREE: case SECTOR_IGNORE: break; case SECTOR_DELETED: thisEUN = BLOCK_NIL; goto foundit; case SECTOR_USED: goto foundit; default: printk(KERN_WARNING "INFTL: unknown status for " "block %d in EUN %d: 0x%x\n", block, thisEUN, status); break; } if (!silly--) { printk(KERN_WARNING "INFTL: infinite loop in Virtual " "Unit Chain 0x%x\n", block / (inftl->EraseSize / SECTORSIZE)); return 1; } thisEUN = inftl->PUtable[thisEUN]; } foundit: if (thisEUN != BLOCK_NIL) { loff_t ptr = (thisEUN * inftl->EraseSize) + blockofs; if (inftl_read_oob(mtd, ptr, 8, &retlen, (char *)&bci) < 0) return -EIO; bci.Status = bci.Status1 = SECTOR_DELETED; if (inftl_write_oob(mtd, ptr, 8, &retlen, (char *)&bci) < 0) return -EIO; INFTL_trydeletechain(inftl, block / (inftl->EraseSize / SECTORSIZE)); } return 0; } static int inftl_writeblock(struct mtd_blktrans_dev *mbd, unsigned long block, char *buffer) { struct INFTLrecord *inftl = (void *)mbd; unsigned int writeEUN; unsigned long blockofs = (block * SECTORSIZE) & (inftl->EraseSize - 1); size_t retlen; struct inftl_oob oob; char *p, *pend; pr_debug("INFTL: inftl_writeblock(inftl=%p,block=%ld," "buffer=%p)\n", inftl, block, buffer); /* Is block all zero? */ pend = buffer + SECTORSIZE; for (p = buffer; p < pend && !*p; p++) ; if (p < pend) { writeEUN = INFTL_findwriteunit(inftl, block); if (writeEUN == BLOCK_NIL) { printk(KERN_WARNING "inftl_writeblock(): cannot find " "block to write to\n"); /* * If we _still_ haven't got a block to use, * we're screwed. */ return 1; } memset(&oob, 0xff, sizeof(struct inftl_oob)); oob.b.Status = oob.b.Status1 = SECTOR_USED; inftl_write(inftl->mbd.mtd, (writeEUN * inftl->EraseSize) + blockofs, SECTORSIZE, &retlen, (char *)buffer, (char *)&oob); /* * need to write SECTOR_USED flags since they are not written * in mtd_writeecc */ } else { INFTL_deleteblock(inftl, block); } return 0; } static int inftl_readblock(struct mtd_blktrans_dev *mbd, unsigned long block, char *buffer) { struct INFTLrecord *inftl = (void *)mbd; unsigned int thisEUN = inftl->VUtable[block / (inftl->EraseSize / SECTORSIZE)]; unsigned long blockofs = (block * SECTORSIZE) & (inftl->EraseSize - 1); struct mtd_info *mtd = inftl->mbd.mtd; unsigned int status; int silly = MAX_LOOPS; struct inftl_bci bci; size_t retlen; pr_debug("INFTL: inftl_readblock(inftl=%p,block=%ld," "buffer=%p)\n", inftl, block, buffer); while (thisEUN < inftl->nb_blocks) { if (inftl_read_oob(mtd, (thisEUN * inftl->EraseSize) + blockofs, 8, &retlen, (char *)&bci) < 0) status = SECTOR_IGNORE; else status = bci.Status | bci.Status1; switch (status) { case SECTOR_DELETED: thisEUN = BLOCK_NIL; goto foundit; case SECTOR_USED: goto foundit; case SECTOR_FREE: case SECTOR_IGNORE: break; default: printk(KERN_WARNING "INFTL: unknown status for " "block %ld in EUN %d: 0x%04x\n", block, thisEUN, status); break; } if (!silly--) { printk(KERN_WARNING "INFTL: infinite loop in " "Virtual Unit Chain 0x%lx\n", block / (inftl->EraseSize / SECTORSIZE)); return 1; } thisEUN = inftl->PUtable[thisEUN]; } foundit: if (thisEUN == BLOCK_NIL) { /* The requested block is not on the media, return all 0x00 */ memset(buffer, 0, SECTORSIZE); } else { size_t retlen; loff_t ptr = (thisEUN * inftl->EraseSize) + blockofs; int ret = mtd_read(mtd, ptr, SECTORSIZE, &retlen, buffer); /* Handle corrected bit flips gracefully */ if (ret < 0 && !mtd_is_bitflip(ret)) return -EIO; } return 0; } static int inftl_getgeo(struct mtd_blktrans_dev *dev, struct hd_geometry *geo) { struct INFTLrecord *inftl = (void *)dev; geo->heads = inftl->heads; geo->sectors = inftl->sectors; geo->cylinders = inftl->cylinders; return 0; } static struct mtd_blktrans_ops inftl_tr = { .name = "inftl", .major = INFTL_MAJOR, .part_bits = INFTL_PARTN_BITS, .blksize = 512, .getgeo = inftl_getgeo, .readsect = inftl_readblock, .writesect = inftl_writeblock, .add_mtd = inftl_add_mtd, .remove_dev = inftl_remove_dev, .owner = THIS_MODULE, }; static int __init init_inftl(void) { return register_mtd_blktrans(&inftl_tr); } static void __exit cleanup_inftl(void) { deregister_mtd_blktrans(&inftl_tr); } module_init(init_inftl); module_exit(cleanup_inftl); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Greg Ungerer <gerg@snapgear.com>, David Woodhouse <dwmw2@infradead.org>, Fabrice Bellard <fabrice.bellard@netgem.com> et al."); MODULE_DESCRIPTION("Support code for Inverse Flash Translation Layer, used on M-Systems DiskOnChip 2000, Millennium and Millennium Plus");
gpl-2.0
I8552-CM/android_kernel_arubaslim
drivers/mtd/inftlcore.c
7345
25065
/* * inftlcore.c -- Linux driver for Inverse Flash Translation Layer (INFTL) * * Copyright © 2002, Greg Ungerer (gerg@snapgear.com) * * Based heavily on the nftlcore.c code which is: * Copyright © 1999 Machine Vision Holdings, Inc. * Copyright © 1999 David Woodhouse <dwmw2@infradead.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/sched.h> #include <linux/init.h> #include <linux/kmod.h> #include <linux/hdreg.h> #include <linux/mtd/mtd.h> #include <linux/mtd/nftl.h> #include <linux/mtd/inftl.h> #include <linux/mtd/nand.h> #include <asm/uaccess.h> #include <asm/errno.h> #include <asm/io.h> /* * Maximum number of loops while examining next block, to have a * chance to detect consistency problems (they should never happen * because of the checks done in the mounting. */ #define MAX_LOOPS 10000 static void inftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd) { struct INFTLrecord *inftl; unsigned long temp; if (mtd->type != MTD_NANDFLASH || mtd->size > UINT_MAX) return; /* OK, this is moderately ugly. But probably safe. Alternatives? */ if (memcmp(mtd->name, "DiskOnChip", 10)) return; if (!mtd->_block_isbad) { printk(KERN_ERR "INFTL no longer supports the old DiskOnChip drivers loaded via docprobe.\n" "Please use the new diskonchip driver under the NAND subsystem.\n"); return; } pr_debug("INFTL: add_mtd for %s\n", mtd->name); inftl = kzalloc(sizeof(*inftl), GFP_KERNEL); if (!inftl) return; inftl->mbd.mtd = mtd; inftl->mbd.devnum = -1; inftl->mbd.tr = tr; if (INFTL_mount(inftl) < 0) { printk(KERN_WARNING "INFTL: could not mount device\n"); kfree(inftl); return; } /* OK, it's a new one. Set up all the data structures. */ /* Calculate geometry */ inftl->cylinders = 1024; inftl->heads = 16; temp = inftl->cylinders * inftl->heads; inftl->sectors = inftl->mbd.size / temp; if (inftl->mbd.size % temp) { inftl->sectors++; temp = inftl->cylinders * inftl->sectors; inftl->heads = inftl->mbd.size / temp; if (inftl->mbd.size % temp) { inftl->heads++; temp = inftl->heads * inftl->sectors; inftl->cylinders = inftl->mbd.size / temp; } } if (inftl->mbd.size != inftl->heads * inftl->cylinders * inftl->sectors) { /* Oh no we don't have mbd.size == heads * cylinders * sectors */ printk(KERN_WARNING "INFTL: cannot calculate a geometry to " "match size of 0x%lx.\n", inftl->mbd.size); printk(KERN_WARNING "INFTL: using C:%d H:%d S:%d " "(== 0x%lx sects)\n", inftl->cylinders, inftl->heads , inftl->sectors, (long)inftl->cylinders * (long)inftl->heads * (long)inftl->sectors ); } if (add_mtd_blktrans_dev(&inftl->mbd)) { kfree(inftl->PUtable); kfree(inftl->VUtable); kfree(inftl); return; } #ifdef PSYCHO_DEBUG printk(KERN_INFO "INFTL: Found new inftl%c\n", inftl->mbd.devnum + 'a'); #endif return; } static void inftl_remove_dev(struct mtd_blktrans_dev *dev) { struct INFTLrecord *inftl = (void *)dev; pr_debug("INFTL: remove_dev (i=%d)\n", dev->devnum); del_mtd_blktrans_dev(dev); kfree(inftl->PUtable); kfree(inftl->VUtable); } /* * Actual INFTL access routines. */ /* * Read oob data from flash */ int inftl_read_oob(struct mtd_info *mtd, loff_t offs, size_t len, size_t *retlen, uint8_t *buf) { struct mtd_oob_ops ops; int res; ops.mode = MTD_OPS_PLACE_OOB; ops.ooboffs = offs & (mtd->writesize - 1); ops.ooblen = len; ops.oobbuf = buf; ops.datbuf = NULL; res = mtd_read_oob(mtd, offs & ~(mtd->writesize - 1), &ops); *retlen = ops.oobretlen; return res; } /* * Write oob data to flash */ int inftl_write_oob(struct mtd_info *mtd, loff_t offs, size_t len, size_t *retlen, uint8_t *buf) { struct mtd_oob_ops ops; int res; ops.mode = MTD_OPS_PLACE_OOB; ops.ooboffs = offs & (mtd->writesize - 1); ops.ooblen = len; ops.oobbuf = buf; ops.datbuf = NULL; res = mtd_write_oob(mtd, offs & ~(mtd->writesize - 1), &ops); *retlen = ops.oobretlen; return res; } /* * Write data and oob to flash */ static int inftl_write(struct mtd_info *mtd, loff_t offs, size_t len, size_t *retlen, uint8_t *buf, uint8_t *oob) { struct mtd_oob_ops ops; int res; ops.mode = MTD_OPS_PLACE_OOB; ops.ooboffs = offs; ops.ooblen = mtd->oobsize; ops.oobbuf = oob; ops.datbuf = buf; ops.len = len; res = mtd_write_oob(mtd, offs & ~(mtd->writesize - 1), &ops); *retlen = ops.retlen; return res; } /* * INFTL_findfreeblock: Find a free Erase Unit on the INFTL partition. * This function is used when the give Virtual Unit Chain. */ static u16 INFTL_findfreeblock(struct INFTLrecord *inftl, int desperate) { u16 pot = inftl->LastFreeEUN; int silly = inftl->nb_blocks; pr_debug("INFTL: INFTL_findfreeblock(inftl=%p,desperate=%d)\n", inftl, desperate); /* * Normally, we force a fold to happen before we run out of free * blocks completely. */ if (!desperate && inftl->numfreeEUNs < 2) { pr_debug("INFTL: there are too few free EUNs (%d)\n", inftl->numfreeEUNs); return BLOCK_NIL; } /* Scan for a free block */ do { if (inftl->PUtable[pot] == BLOCK_FREE) { inftl->LastFreeEUN = pot; return pot; } if (++pot > inftl->lastEUN) pot = 0; if (!silly--) { printk(KERN_WARNING "INFTL: no free blocks found! " "EUN range = %d - %d\n", 0, inftl->LastFreeEUN); return BLOCK_NIL; } } while (pot != inftl->LastFreeEUN); return BLOCK_NIL; } static u16 INFTL_foldchain(struct INFTLrecord *inftl, unsigned thisVUC, unsigned pendingblock) { u16 BlockMap[MAX_SECTORS_PER_UNIT]; unsigned char BlockDeleted[MAX_SECTORS_PER_UNIT]; unsigned int thisEUN, prevEUN, status; struct mtd_info *mtd = inftl->mbd.mtd; int block, silly; unsigned int targetEUN; struct inftl_oob oob; size_t retlen; pr_debug("INFTL: INFTL_foldchain(inftl=%p,thisVUC=%d,pending=%d)\n", inftl, thisVUC, pendingblock); memset(BlockMap, 0xff, sizeof(BlockMap)); memset(BlockDeleted, 0, sizeof(BlockDeleted)); thisEUN = targetEUN = inftl->VUtable[thisVUC]; if (thisEUN == BLOCK_NIL) { printk(KERN_WARNING "INFTL: trying to fold non-existent " "Virtual Unit Chain %d!\n", thisVUC); return BLOCK_NIL; } /* * Scan to find the Erase Unit which holds the actual data for each * 512-byte block within the Chain. */ silly = MAX_LOOPS; while (thisEUN < inftl->nb_blocks) { for (block = 0; block < inftl->EraseSize/SECTORSIZE; block ++) { if ((BlockMap[block] != BLOCK_NIL) || BlockDeleted[block]) continue; if (inftl_read_oob(mtd, (thisEUN * inftl->EraseSize) + (block * SECTORSIZE), 16, &retlen, (char *)&oob) < 0) status = SECTOR_IGNORE; else status = oob.b.Status | oob.b.Status1; switch(status) { case SECTOR_FREE: case SECTOR_IGNORE: break; case SECTOR_USED: BlockMap[block] = thisEUN; continue; case SECTOR_DELETED: BlockDeleted[block] = 1; continue; default: printk(KERN_WARNING "INFTL: unknown status " "for block %d in EUN %d: %x\n", block, thisEUN, status); break; } } if (!silly--) { printk(KERN_WARNING "INFTL: infinite loop in Virtual " "Unit Chain 0x%x\n", thisVUC); return BLOCK_NIL; } thisEUN = inftl->PUtable[thisEUN]; } /* * OK. We now know the location of every block in the Virtual Unit * Chain, and the Erase Unit into which we are supposed to be copying. * Go for it. */ pr_debug("INFTL: folding chain %d into unit %d\n", thisVUC, targetEUN); for (block = 0; block < inftl->EraseSize/SECTORSIZE ; block++) { unsigned char movebuf[SECTORSIZE]; int ret; /* * If it's in the target EUN already, or if it's pending write, * do nothing. */ if (BlockMap[block] == targetEUN || (pendingblock == (thisVUC * (inftl->EraseSize / SECTORSIZE) + block))) { continue; } /* * Copy only in non free block (free blocks can only * happen in case of media errors or deleted blocks). */ if (BlockMap[block] == BLOCK_NIL) continue; ret = mtd_read(mtd, (inftl->EraseSize * BlockMap[block]) + (block * SECTORSIZE), SECTORSIZE, &retlen, movebuf); if (ret < 0 && !mtd_is_bitflip(ret)) { ret = mtd_read(mtd, (inftl->EraseSize * BlockMap[block]) + (block * SECTORSIZE), SECTORSIZE, &retlen, movebuf); if (ret != -EIO) pr_debug("INFTL: error went away on retry?\n"); } memset(&oob, 0xff, sizeof(struct inftl_oob)); oob.b.Status = oob.b.Status1 = SECTOR_USED; inftl_write(inftl->mbd.mtd, (inftl->EraseSize * targetEUN) + (block * SECTORSIZE), SECTORSIZE, &retlen, movebuf, (char *)&oob); } /* * Newest unit in chain now contains data from _all_ older units. * So go through and erase each unit in chain, oldest first. (This * is important, by doing oldest first if we crash/reboot then it * it is relatively simple to clean up the mess). */ pr_debug("INFTL: want to erase virtual chain %d\n", thisVUC); for (;;) { /* Find oldest unit in chain. */ thisEUN = inftl->VUtable[thisVUC]; prevEUN = BLOCK_NIL; while (inftl->PUtable[thisEUN] != BLOCK_NIL) { prevEUN = thisEUN; thisEUN = inftl->PUtable[thisEUN]; } /* Check if we are all done */ if (thisEUN == targetEUN) break; /* Unlink the last block from the chain. */ inftl->PUtable[prevEUN] = BLOCK_NIL; /* Now try to erase it. */ if (INFTL_formatblock(inftl, thisEUN) < 0) { /* * Could not erase : mark block as reserved. */ inftl->PUtable[thisEUN] = BLOCK_RESERVED; } else { /* Correctly erased : mark it as free */ inftl->PUtable[thisEUN] = BLOCK_FREE; inftl->numfreeEUNs++; } } return targetEUN; } static u16 INFTL_makefreeblock(struct INFTLrecord *inftl, unsigned pendingblock) { /* * This is the part that needs some cleverness applied. * For now, I'm doing the minimum applicable to actually * get the thing to work. * Wear-levelling and other clever stuff needs to be implemented * and we also need to do some assessment of the results when * the system loses power half-way through the routine. */ u16 LongestChain = 0; u16 ChainLength = 0, thislen; u16 chain, EUN; pr_debug("INFTL: INFTL_makefreeblock(inftl=%p," "pending=%d)\n", inftl, pendingblock); for (chain = 0; chain < inftl->nb_blocks; chain++) { EUN = inftl->VUtable[chain]; thislen = 0; while (EUN <= inftl->lastEUN) { thislen++; EUN = inftl->PUtable[EUN]; if (thislen > 0xff00) { printk(KERN_WARNING "INFTL: endless loop in " "Virtual Chain %d: Unit %x\n", chain, EUN); /* * Actually, don't return failure. * Just ignore this chain and get on with it. */ thislen = 0; break; } } if (thislen > ChainLength) { ChainLength = thislen; LongestChain = chain; } } if (ChainLength < 2) { printk(KERN_WARNING "INFTL: no Virtual Unit Chains available " "for folding. Failing request\n"); return BLOCK_NIL; } return INFTL_foldchain(inftl, LongestChain, pendingblock); } static int nrbits(unsigned int val, int bitcount) { int i, total = 0; for (i = 0; (i < bitcount); i++) total += (((0x1 << i) & val) ? 1 : 0); return total; } /* * INFTL_findwriteunit: Return the unit number into which we can write * for this block. Make it available if it isn't already. */ static inline u16 INFTL_findwriteunit(struct INFTLrecord *inftl, unsigned block) { unsigned int thisVUC = block / (inftl->EraseSize / SECTORSIZE); unsigned int thisEUN, writeEUN, prev_block, status; unsigned long blockofs = (block * SECTORSIZE) & (inftl->EraseSize -1); struct mtd_info *mtd = inftl->mbd.mtd; struct inftl_oob oob; struct inftl_bci bci; unsigned char anac, nacs, parity; size_t retlen; int silly, silly2 = 3; pr_debug("INFTL: INFTL_findwriteunit(inftl=%p,block=%d)\n", inftl, block); do { /* * Scan the media to find a unit in the VUC which has * a free space for the block in question. */ writeEUN = BLOCK_NIL; thisEUN = inftl->VUtable[thisVUC]; silly = MAX_LOOPS; while (thisEUN <= inftl->lastEUN) { inftl_read_oob(mtd, (thisEUN * inftl->EraseSize) + blockofs, 8, &retlen, (char *)&bci); status = bci.Status | bci.Status1; pr_debug("INFTL: status of block %d in EUN %d is %x\n", block , writeEUN, status); switch(status) { case SECTOR_FREE: writeEUN = thisEUN; break; case SECTOR_DELETED: case SECTOR_USED: /* Can't go any further */ goto hitused; case SECTOR_IGNORE: break; default: /* * Invalid block. Don't use it any more. * Must implement. */ break; } if (!silly--) { printk(KERN_WARNING "INFTL: infinite loop in " "Virtual Unit Chain 0x%x\n", thisVUC); return BLOCK_NIL; } /* Skip to next block in chain */ thisEUN = inftl->PUtable[thisEUN]; } hitused: if (writeEUN != BLOCK_NIL) return writeEUN; /* * OK. We didn't find one in the existing chain, or there * is no existing chain. Allocate a new one. */ writeEUN = INFTL_findfreeblock(inftl, 0); if (writeEUN == BLOCK_NIL) { /* * That didn't work - there were no free blocks just * waiting to be picked up. We're going to have to fold * a chain to make room. */ thisEUN = INFTL_makefreeblock(inftl, block); /* * Hopefully we free something, lets try again. * This time we are desperate... */ pr_debug("INFTL: using desperate==1 to find free EUN " "to accommodate write to VUC %d\n", thisVUC); writeEUN = INFTL_findfreeblock(inftl, 1); if (writeEUN == BLOCK_NIL) { /* * Ouch. This should never happen - we should * always be able to make some room somehow. * If we get here, we've allocated more storage * space than actual media, or our makefreeblock * routine is missing something. */ printk(KERN_WARNING "INFTL: cannot make free " "space.\n"); #ifdef DEBUG INFTL_dumptables(inftl); INFTL_dumpVUchains(inftl); #endif return BLOCK_NIL; } } /* * Insert new block into virtual chain. Firstly update the * block headers in flash... */ anac = 0; nacs = 0; thisEUN = inftl->VUtable[thisVUC]; if (thisEUN != BLOCK_NIL) { inftl_read_oob(mtd, thisEUN * inftl->EraseSize + 8, 8, &retlen, (char *)&oob.u); anac = oob.u.a.ANAC + 1; nacs = oob.u.a.NACs + 1; } prev_block = inftl->VUtable[thisVUC]; if (prev_block < inftl->nb_blocks) prev_block -= inftl->firstEUN; parity = (nrbits(thisVUC, 16) & 0x1) ? 0x1 : 0; parity |= (nrbits(prev_block, 16) & 0x1) ? 0x2 : 0; parity |= (nrbits(anac, 8) & 0x1) ? 0x4 : 0; parity |= (nrbits(nacs, 8) & 0x1) ? 0x8 : 0; oob.u.a.virtualUnitNo = cpu_to_le16(thisVUC); oob.u.a.prevUnitNo = cpu_to_le16(prev_block); oob.u.a.ANAC = anac; oob.u.a.NACs = nacs; oob.u.a.parityPerField = parity; oob.u.a.discarded = 0xaa; inftl_write_oob(mtd, writeEUN * inftl->EraseSize + 8, 8, &retlen, (char *)&oob.u); /* Also back up header... */ oob.u.b.virtualUnitNo = cpu_to_le16(thisVUC); oob.u.b.prevUnitNo = cpu_to_le16(prev_block); oob.u.b.ANAC = anac; oob.u.b.NACs = nacs; oob.u.b.parityPerField = parity; oob.u.b.discarded = 0xaa; inftl_write_oob(mtd, writeEUN * inftl->EraseSize + SECTORSIZE * 4 + 8, 8, &retlen, (char *)&oob.u); inftl->PUtable[writeEUN] = inftl->VUtable[thisVUC]; inftl->VUtable[thisVUC] = writeEUN; inftl->numfreeEUNs--; return writeEUN; } while (silly2--); printk(KERN_WARNING "INFTL: error folding to make room for Virtual " "Unit Chain 0x%x\n", thisVUC); return BLOCK_NIL; } /* * Given a Virtual Unit Chain, see if it can be deleted, and if so do it. */ static void INFTL_trydeletechain(struct INFTLrecord *inftl, unsigned thisVUC) { struct mtd_info *mtd = inftl->mbd.mtd; unsigned char BlockUsed[MAX_SECTORS_PER_UNIT]; unsigned char BlockDeleted[MAX_SECTORS_PER_UNIT]; unsigned int thisEUN, status; int block, silly; struct inftl_bci bci; size_t retlen; pr_debug("INFTL: INFTL_trydeletechain(inftl=%p," "thisVUC=%d)\n", inftl, thisVUC); memset(BlockUsed, 0, sizeof(BlockUsed)); memset(BlockDeleted, 0, sizeof(BlockDeleted)); thisEUN = inftl->VUtable[thisVUC]; if (thisEUN == BLOCK_NIL) { printk(KERN_WARNING "INFTL: trying to delete non-existent " "Virtual Unit Chain %d!\n", thisVUC); return; } /* * Scan through the Erase Units to determine whether any data is in * each of the 512-byte blocks within the Chain. */ silly = MAX_LOOPS; while (thisEUN < inftl->nb_blocks) { for (block = 0; block < inftl->EraseSize/SECTORSIZE; block++) { if (BlockUsed[block] || BlockDeleted[block]) continue; if (inftl_read_oob(mtd, (thisEUN * inftl->EraseSize) + (block * SECTORSIZE), 8 , &retlen, (char *)&bci) < 0) status = SECTOR_IGNORE; else status = bci.Status | bci.Status1; switch(status) { case SECTOR_FREE: case SECTOR_IGNORE: break; case SECTOR_USED: BlockUsed[block] = 1; continue; case SECTOR_DELETED: BlockDeleted[block] = 1; continue; default: printk(KERN_WARNING "INFTL: unknown status " "for block %d in EUN %d: 0x%x\n", block, thisEUN, status); } } if (!silly--) { printk(KERN_WARNING "INFTL: infinite loop in Virtual " "Unit Chain 0x%x\n", thisVUC); return; } thisEUN = inftl->PUtable[thisEUN]; } for (block = 0; block < inftl->EraseSize/SECTORSIZE; block++) if (BlockUsed[block]) return; /* * For each block in the chain free it and make it available * for future use. Erase from the oldest unit first. */ pr_debug("INFTL: deleting empty VUC %d\n", thisVUC); for (;;) { u16 *prevEUN = &inftl->VUtable[thisVUC]; thisEUN = *prevEUN; /* If the chain is all gone already, we're done */ if (thisEUN == BLOCK_NIL) { pr_debug("INFTL: Empty VUC %d for deletion was already absent\n", thisEUN); return; } /* Find oldest unit in chain. */ while (inftl->PUtable[thisEUN] != BLOCK_NIL) { BUG_ON(thisEUN >= inftl->nb_blocks); prevEUN = &inftl->PUtable[thisEUN]; thisEUN = *prevEUN; } pr_debug("Deleting EUN %d from VUC %d\n", thisEUN, thisVUC); if (INFTL_formatblock(inftl, thisEUN) < 0) { /* * Could not erase : mark block as reserved. */ inftl->PUtable[thisEUN] = BLOCK_RESERVED; } else { /* Correctly erased : mark it as free */ inftl->PUtable[thisEUN] = BLOCK_FREE; inftl->numfreeEUNs++; } /* Now sort out whatever was pointing to it... */ *prevEUN = BLOCK_NIL; /* Ideally we'd actually be responsive to new requests while we're doing this -- if there's free space why should others be made to wait? */ cond_resched(); } inftl->VUtable[thisVUC] = BLOCK_NIL; } static int INFTL_deleteblock(struct INFTLrecord *inftl, unsigned block) { unsigned int thisEUN = inftl->VUtable[block / (inftl->EraseSize / SECTORSIZE)]; unsigned long blockofs = (block * SECTORSIZE) & (inftl->EraseSize - 1); struct mtd_info *mtd = inftl->mbd.mtd; unsigned int status; int silly = MAX_LOOPS; size_t retlen; struct inftl_bci bci; pr_debug("INFTL: INFTL_deleteblock(inftl=%p," "block=%d)\n", inftl, block); while (thisEUN < inftl->nb_blocks) { if (inftl_read_oob(mtd, (thisEUN * inftl->EraseSize) + blockofs, 8, &retlen, (char *)&bci) < 0) status = SECTOR_IGNORE; else status = bci.Status | bci.Status1; switch (status) { case SECTOR_FREE: case SECTOR_IGNORE: break; case SECTOR_DELETED: thisEUN = BLOCK_NIL; goto foundit; case SECTOR_USED: goto foundit; default: printk(KERN_WARNING "INFTL: unknown status for " "block %d in EUN %d: 0x%x\n", block, thisEUN, status); break; } if (!silly--) { printk(KERN_WARNING "INFTL: infinite loop in Virtual " "Unit Chain 0x%x\n", block / (inftl->EraseSize / SECTORSIZE)); return 1; } thisEUN = inftl->PUtable[thisEUN]; } foundit: if (thisEUN != BLOCK_NIL) { loff_t ptr = (thisEUN * inftl->EraseSize) + blockofs; if (inftl_read_oob(mtd, ptr, 8, &retlen, (char *)&bci) < 0) return -EIO; bci.Status = bci.Status1 = SECTOR_DELETED; if (inftl_write_oob(mtd, ptr, 8, &retlen, (char *)&bci) < 0) return -EIO; INFTL_trydeletechain(inftl, block / (inftl->EraseSize / SECTORSIZE)); } return 0; } static int inftl_writeblock(struct mtd_blktrans_dev *mbd, unsigned long block, char *buffer) { struct INFTLrecord *inftl = (void *)mbd; unsigned int writeEUN; unsigned long blockofs = (block * SECTORSIZE) & (inftl->EraseSize - 1); size_t retlen; struct inftl_oob oob; char *p, *pend; pr_debug("INFTL: inftl_writeblock(inftl=%p,block=%ld," "buffer=%p)\n", inftl, block, buffer); /* Is block all zero? */ pend = buffer + SECTORSIZE; for (p = buffer; p < pend && !*p; p++) ; if (p < pend) { writeEUN = INFTL_findwriteunit(inftl, block); if (writeEUN == BLOCK_NIL) { printk(KERN_WARNING "inftl_writeblock(): cannot find " "block to write to\n"); /* * If we _still_ haven't got a block to use, * we're screwed. */ return 1; } memset(&oob, 0xff, sizeof(struct inftl_oob)); oob.b.Status = oob.b.Status1 = SECTOR_USED; inftl_write(inftl->mbd.mtd, (writeEUN * inftl->EraseSize) + blockofs, SECTORSIZE, &retlen, (char *)buffer, (char *)&oob); /* * need to write SECTOR_USED flags since they are not written * in mtd_writeecc */ } else { INFTL_deleteblock(inftl, block); } return 0; } static int inftl_readblock(struct mtd_blktrans_dev *mbd, unsigned long block, char *buffer) { struct INFTLrecord *inftl = (void *)mbd; unsigned int thisEUN = inftl->VUtable[block / (inftl->EraseSize / SECTORSIZE)]; unsigned long blockofs = (block * SECTORSIZE) & (inftl->EraseSize - 1); struct mtd_info *mtd = inftl->mbd.mtd; unsigned int status; int silly = MAX_LOOPS; struct inftl_bci bci; size_t retlen; pr_debug("INFTL: inftl_readblock(inftl=%p,block=%ld," "buffer=%p)\n", inftl, block, buffer); while (thisEUN < inftl->nb_blocks) { if (inftl_read_oob(mtd, (thisEUN * inftl->EraseSize) + blockofs, 8, &retlen, (char *)&bci) < 0) status = SECTOR_IGNORE; else status = bci.Status | bci.Status1; switch (status) { case SECTOR_DELETED: thisEUN = BLOCK_NIL; goto foundit; case SECTOR_USED: goto foundit; case SECTOR_FREE: case SECTOR_IGNORE: break; default: printk(KERN_WARNING "INFTL: unknown status for " "block %ld in EUN %d: 0x%04x\n", block, thisEUN, status); break; } if (!silly--) { printk(KERN_WARNING "INFTL: infinite loop in " "Virtual Unit Chain 0x%lx\n", block / (inftl->EraseSize / SECTORSIZE)); return 1; } thisEUN = inftl->PUtable[thisEUN]; } foundit: if (thisEUN == BLOCK_NIL) { /* The requested block is not on the media, return all 0x00 */ memset(buffer, 0, SECTORSIZE); } else { size_t retlen; loff_t ptr = (thisEUN * inftl->EraseSize) + blockofs; int ret = mtd_read(mtd, ptr, SECTORSIZE, &retlen, buffer); /* Handle corrected bit flips gracefully */ if (ret < 0 && !mtd_is_bitflip(ret)) return -EIO; } return 0; } static int inftl_getgeo(struct mtd_blktrans_dev *dev, struct hd_geometry *geo) { struct INFTLrecord *inftl = (void *)dev; geo->heads = inftl->heads; geo->sectors = inftl->sectors; geo->cylinders = inftl->cylinders; return 0; } static struct mtd_blktrans_ops inftl_tr = { .name = "inftl", .major = INFTL_MAJOR, .part_bits = INFTL_PARTN_BITS, .blksize = 512, .getgeo = inftl_getgeo, .readsect = inftl_readblock, .writesect = inftl_writeblock, .add_mtd = inftl_add_mtd, .remove_dev = inftl_remove_dev, .owner = THIS_MODULE, }; static int __init init_inftl(void) { return register_mtd_blktrans(&inftl_tr); } static void __exit cleanup_inftl(void) { deregister_mtd_blktrans(&inftl_tr); } module_init(init_inftl); module_exit(cleanup_inftl); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Greg Ungerer <gerg@snapgear.com>, David Woodhouse <dwmw2@infradead.org>, Fabrice Bellard <fabrice.bellard@netgem.com> et al."); MODULE_DESCRIPTION("Support code for Inverse Flash Translation Layer, used on M-Systems DiskOnChip 2000, Millennium and Millennium Plus");
gpl-2.0
TeamEOS/kernel_google_msm
drivers/char/nwbutton.c
8113
8048
/* * NetWinder Button Driver- * Copyright (C) Alex Holden <alex@linuxhacker.org> 1998, 1999. * */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/interrupt.h> #include <linux/time.h> #include <linux/timer.h> #include <linux/fs.h> #include <linux/miscdevice.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/init.h> #include <asm/uaccess.h> #include <asm/irq.h> #include <asm/mach-types.h> #define __NWBUTTON_C /* Tell the header file who we are */ #include "nwbutton.h" static void button_sequence_finished (unsigned long parameters); static int button_press_count; /* The count of button presses */ /* Times for the end of a sequence */ static DEFINE_TIMER(button_timer, button_sequence_finished, 0, 0); static DECLARE_WAIT_QUEUE_HEAD(button_wait_queue); /* Used for blocking read */ static char button_output_buffer[32]; /* Stores data to write out of device */ static int bcount; /* The number of bytes in the buffer */ static int bdelay = BUTTON_DELAY; /* The delay, in jiffies */ static struct button_callback button_callback_list[32]; /* The callback list */ static int callback_count; /* The number of callbacks registered */ static int reboot_count = NUM_PRESSES_REBOOT; /* Number of presses to reboot */ /* * This function is called by other drivers to register a callback function * to be called when a particular number of button presses occurs. * The callback list is a static array of 32 entries (I somehow doubt many * people are ever going to want to register more than 32 different actions * to be performed by the kernel on different numbers of button presses ;). * However, if an attempt to register a 33rd entry (perhaps a stuck loop * somewhere registering the same entry over and over?) it will fail to * do so and return -ENOMEM. If an attempt is made to register a null pointer, * it will fail to do so and return -EINVAL. * Because callbacks can be unregistered at random the list can become * fragmented, so we need to search through the list until we find the first * free entry. * * FIXME: Has anyone spotted any locking functions int his code recently ?? */ int button_add_callback (void (*callback) (void), int count) { int lp = 0; if (callback_count == 32) { return -ENOMEM; } if (!callback) { return -EINVAL; } callback_count++; for (; (button_callback_list [lp].callback); lp++); button_callback_list [lp].callback = callback; button_callback_list [lp].count = count; return 0; } /* * This function is called by other drivers to deregister a callback function. * If you attempt to unregister a callback which does not exist, it will fail * with -EINVAL. If there is more than one entry with the same address, * because it searches the list from end to beginning, it will unregister the * last one to be registered first (FILO- First In Last Out). * Note that this is not necessarily true if the entries are not submitted * at the same time, because another driver could have unregistered a callback * between the submissions creating a gap earlier in the list, which would * be filled first at submission time. */ int button_del_callback (void (*callback) (void)) { int lp = 31; if (!callback) { return -EINVAL; } while (lp >= 0) { if ((button_callback_list [lp].callback) == callback) { button_callback_list [lp].callback = NULL; button_callback_list [lp].count = 0; callback_count--; return 0; }; lp--; }; return -EINVAL; } /* * This function is called by button_sequence_finished to search through the * list of callback functions, and call any of them whose count argument * matches the current count of button presses. It starts at the beginning * of the list and works up to the end. It will refuse to follow a null * pointer (which should never happen anyway). */ static void button_consume_callbacks (int bpcount) { int lp = 0; for (; lp <= 31; lp++) { if ((button_callback_list [lp].count) == bpcount) { if (button_callback_list [lp].callback) { button_callback_list[lp].callback(); } } } } /* * This function is called when the button_timer times out. * ie. When you don't press the button for bdelay jiffies, this is taken to * mean you have ended the sequence of key presses, and this function is * called to wind things up (write the press_count out to /dev/button, call * any matching registered function callbacks, initiate reboot, etc.). */ static void button_sequence_finished (unsigned long parameters) { #ifdef CONFIG_NWBUTTON_REBOOT /* Reboot using button is enabled */ if (button_press_count == reboot_count) kill_cad_pid(SIGINT, 1); /* Ask init to reboot us */ #endif /* CONFIG_NWBUTTON_REBOOT */ button_consume_callbacks (button_press_count); bcount = sprintf (button_output_buffer, "%d\n", button_press_count); button_press_count = 0; /* Reset the button press counter */ wake_up_interruptible (&button_wait_queue); } /* * This handler is called when the orange button is pressed (GPIO 10 of the * SuperIO chip, which maps to logical IRQ 26). If the press_count is 0, * this is the first press, so it starts a timer and increments the counter. * If it is higher than 0, it deletes the old timer, starts a new one, and * increments the counter. */ static irqreturn_t button_handler (int irq, void *dev_id) { button_press_count++; mod_timer(&button_timer, jiffies + bdelay); return IRQ_HANDLED; } /* * This function is called when a user space program attempts to read * /dev/nwbutton. It puts the device to sleep on the wait queue until * button_sequence_finished writes some data to the buffer and flushes * the queue, at which point it writes the data out to the device and * returns the number of characters it has written. This function is * reentrant, so that many processes can be attempting to read from the * device at any one time. */ static int button_read (struct file *filp, char __user *buffer, size_t count, loff_t *ppos) { interruptible_sleep_on (&button_wait_queue); return (copy_to_user (buffer, &button_output_buffer, bcount)) ? -EFAULT : bcount; } /* * This structure is the file operations structure, which specifies what * callbacks functions the kernel should call when a user mode process * attempts to perform these operations on the device. */ static const struct file_operations button_fops = { .owner = THIS_MODULE, .read = button_read, .llseek = noop_llseek, }; /* * This structure is the misc device structure, which specifies the minor * device number (158 in this case), the name of the device (for /proc/misc), * and the address of the above file operations structure. */ static struct miscdevice button_misc_device = { BUTTON_MINOR, "nwbutton", &button_fops, }; /* * This function is called to initialise the driver, either from misc.c at * bootup if the driver is compiled into the kernel, or from init_module * below at module insert time. It attempts to register the device node * and the IRQ and fails with a warning message if either fails, though * neither ever should because the device number and IRQ are unique to * this driver. */ static int __init nwbutton_init(void) { if (!machine_is_netwinder()) return -ENODEV; printk (KERN_INFO "NetWinder Button Driver Version %s (C) Alex Holden " "<alex@linuxhacker.org> 1998.\n", VERSION); if (misc_register (&button_misc_device)) { printk (KERN_WARNING "nwbutton: Couldn't register device 10, " "%d.\n", BUTTON_MINOR); return -EBUSY; } if (request_irq (IRQ_NETWINDER_BUTTON, button_handler, IRQF_DISABLED, "nwbutton", NULL)) { printk (KERN_WARNING "nwbutton: IRQ %d is not free.\n", IRQ_NETWINDER_BUTTON); misc_deregister (&button_misc_device); return -EIO; } return 0; } static void __exit nwbutton_exit (void) { free_irq (IRQ_NETWINDER_BUTTON, NULL); misc_deregister (&button_misc_device); } MODULE_AUTHOR("Alex Holden"); MODULE_LICENSE("GPL"); module_init(nwbutton_init); module_exit(nwbutton_exit);
gpl-2.0
jrior001/Torched_kernel
drivers/s390/char/tape_class.c
9137
3017
/* * (C) Copyright IBM Corp. 2004 * tape_class.c * * Tape class device support * * Author: Stefan Bader <shbader@de.ibm.com> * Based on simple class device code by Greg K-H */ #define KMSG_COMPONENT "tape" #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt #include <linux/slab.h> #include "tape_class.h" MODULE_AUTHOR("Stefan Bader <shbader@de.ibm.com>"); MODULE_DESCRIPTION( "(C) Copyright IBM Corp. 2004 All Rights Reserved.\n" "tape_class.c" ); MODULE_LICENSE("GPL"); static struct class *tape_class; /* * Register a tape device and return a pointer to the cdev structure. * * device * The pointer to the struct device of the physical (base) device. * drivername * The pointer to the drivers name for it's character devices. * dev * The intended major/minor number. The major number may be 0 to * get a dynamic major number. * fops * The pointer to the drivers file operations for the tape device. * devname * The pointer to the name of the character device. */ struct tape_class_device *register_tape_dev( struct device * device, dev_t dev, const struct file_operations *fops, char * device_name, char * mode_name) { struct tape_class_device * tcd; int rc; char * s; tcd = kzalloc(sizeof(struct tape_class_device), GFP_KERNEL); if (!tcd) return ERR_PTR(-ENOMEM); strncpy(tcd->device_name, device_name, TAPECLASS_NAME_LEN); for (s = strchr(tcd->device_name, '/'); s; s = strchr(s, '/')) *s = '!'; strncpy(tcd->mode_name, mode_name, TAPECLASS_NAME_LEN); for (s = strchr(tcd->mode_name, '/'); s; s = strchr(s, '/')) *s = '!'; tcd->char_device = cdev_alloc(); if (!tcd->char_device) { rc = -ENOMEM; goto fail_with_tcd; } tcd->char_device->owner = fops->owner; tcd->char_device->ops = fops; tcd->char_device->dev = dev; rc = cdev_add(tcd->char_device, tcd->char_device->dev, 1); if (rc) goto fail_with_cdev; tcd->class_device = device_create(tape_class, device, tcd->char_device->dev, NULL, "%s", tcd->device_name); rc = IS_ERR(tcd->class_device) ? PTR_ERR(tcd->class_device) : 0; if (rc) goto fail_with_cdev; rc = sysfs_create_link( &device->kobj, &tcd->class_device->kobj, tcd->mode_name ); if (rc) goto fail_with_class_device; return tcd; fail_with_class_device: device_destroy(tape_class, tcd->char_device->dev); fail_with_cdev: cdev_del(tcd->char_device); fail_with_tcd: kfree(tcd); return ERR_PTR(rc); } EXPORT_SYMBOL(register_tape_dev); void unregister_tape_dev(struct device *device, struct tape_class_device *tcd) { if (tcd != NULL && !IS_ERR(tcd)) { sysfs_remove_link(&device->kobj, tcd->mode_name); device_destroy(tape_class, tcd->char_device->dev); cdev_del(tcd->char_device); kfree(tcd); } } EXPORT_SYMBOL(unregister_tape_dev); static int __init tape_init(void) { tape_class = class_create(THIS_MODULE, "tape390"); return 0; } static void __exit tape_exit(void) { class_destroy(tape_class); tape_class = NULL; } postcore_initcall(tape_init); module_exit(tape_exit);
gpl-2.0
NaokiXie/android_kernel_samsung_wilcox
lib/mpi/generic_mpih-lshift.c
9905
2151
/* mpihelp-lshift.c - MPI helper functions * Copyright (C) 1994, 1996, 1998, 2001 Free Software Foundation, Inc. * * This file is part of GnuPG. * * GnuPG is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * GnuPG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA * * Note: This code is heavily based on the GNU MP Library. * Actually it's the same code with only minor changes in the * way the data is stored; this is to support the abstraction * of an optional secure memory allocation which may be used * to avoid revealing of sensitive data due to paging etc. * The GNU MP Library itself is published under the LGPL; * however I decided to publish this code under the plain GPL. */ #include "mpi-internal.h" /* Shift U (pointed to by UP and USIZE digits long) CNT bits to the left * and store the USIZE least significant digits of the result at WP. * Return the bits shifted out from the most significant digit. * * Argument constraints: * 1. 0 < CNT < BITS_PER_MP_LIMB * 2. If the result is to be written over the input, WP must be >= UP. */ mpi_limb_t mpihelp_lshift(mpi_ptr_t wp, mpi_ptr_t up, mpi_size_t usize, unsigned int cnt) { mpi_limb_t high_limb, low_limb; unsigned sh_1, sh_2; mpi_size_t i; mpi_limb_t retval; sh_1 = cnt; wp += 1; sh_2 = BITS_PER_MPI_LIMB - sh_1; i = usize - 1; low_limb = up[i]; retval = low_limb >> sh_2; high_limb = low_limb; while (--i >= 0) { low_limb = up[i]; wp[i] = (high_limb << sh_1) | (low_limb >> sh_2); high_limb = low_limb; } wp[i] = high_limb << sh_1; return retval; }
gpl-2.0
drod2169/Linux-3.10.x
arch/unicore32/kernel/ptrace.c
11953
3501
/* * linux/arch/unicore32/kernel/ptrace.c * * Code specific to PKUnity SoC and UniCore ISA * * Copyright (C) 2001-2010 GUAN Xue-tao * * By Ross Biro 1/23/92 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/ptrace.h> #include <linux/signal.h> #include <linux/uaccess.h> /* * this routine will get a word off of the processes privileged stack. * the offset is how far from the base addr as stored in the THREAD. * this routine assumes that all the privileged stacks are in our * data space. */ static inline long get_user_reg(struct task_struct *task, int offset) { return task_pt_regs(task)->uregs[offset]; } /* * this routine will put a word on the processes privileged stack. * the offset is how far from the base addr as stored in the THREAD. * this routine assumes that all the privileged stacks are in our * data space. */ static inline int put_user_reg(struct task_struct *task, int offset, long data) { struct pt_regs newregs, *regs = task_pt_regs(task); int ret = -EINVAL; newregs = *regs; newregs.uregs[offset] = data; if (valid_user_regs(&newregs)) { regs->uregs[offset] = data; ret = 0; } return ret; } /* * Called by kernel/ptrace.c when detaching.. */ void ptrace_disable(struct task_struct *child) { } /* * We actually access the pt_regs stored on the kernel stack. */ static int ptrace_read_user(struct task_struct *tsk, unsigned long off, unsigned long __user *ret) { unsigned long tmp; tmp = 0; if (off < sizeof(struct pt_regs)) tmp = get_user_reg(tsk, off >> 2); return put_user(tmp, ret); } /* * We actually access the pt_regs stored on the kernel stack. */ static int ptrace_write_user(struct task_struct *tsk, unsigned long off, unsigned long val) { if (off >= sizeof(struct pt_regs)) return 0; return put_user_reg(tsk, off >> 2, val); } long arch_ptrace(struct task_struct *child, long request, unsigned long addr, unsigned long data) { int ret; unsigned long __user *datap = (unsigned long __user *) data; switch (request) { case PTRACE_PEEKUSR: ret = ptrace_read_user(child, addr, datap); break; case PTRACE_POKEUSR: ret = ptrace_write_user(child, addr, data); break; case PTRACE_GET_THREAD_AREA: ret = put_user(task_pt_regs(child)->UCreg_16, datap); break; default: ret = ptrace_request(child, request, addr, data); break; } return ret; } asmlinkage int syscall_trace(int why, struct pt_regs *regs, int scno) { unsigned long ip; if (!test_thread_flag(TIF_SYSCALL_TRACE)) return scno; if (!(current->ptrace & PT_PTRACED)) return scno; /* * Save IP. IP is used to denote syscall entry/exit: * IP = 0 -> entry, = 1 -> exit */ ip = regs->UCreg_ip; regs->UCreg_ip = why; current_thread_info()->syscall = scno; /* the 0x80 provides a way for the tracing parent to distinguish between a syscall stop and SIGTRAP delivery */ ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) ? 0x80 : 0)); /* * this isn't the same as continuing with a signal, but it will do * for normal use. strace only continues with a signal if the * stopping signal is not SIGTRAP. -brl */ if (current->exit_code) { send_sig(current->exit_code, current, 1); current->exit_code = 0; } regs->UCreg_ip = ip; return current_thread_info()->syscall; }
gpl-2.0
GuoqingJiang/SLE12-clustermd
drivers/scsi/arm/queue.c
12977
8113
/* * linux/drivers/acorn/scsi/queue.c: queue handling primitives * * Copyright (C) 1997-2000 Russell King * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * Changelog: * 15-Sep-1997 RMK Created. * 11-Oct-1997 RMK Corrected problem with queue_remove_exclude * not updating internal linked list properly * (was causing commands to go missing). * 30-Aug-2000 RMK Use Linux list handling and spinlocks */ #include <linux/module.h> #include <linux/blkdev.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/list.h> #include <linux/init.h> #include "../scsi.h" #define DEBUG typedef struct queue_entry { struct list_head list; struct scsi_cmnd *SCpnt; #ifdef DEBUG unsigned long magic; #endif } QE_t; #ifdef DEBUG #define QUEUE_MAGIC_FREE 0xf7e1c9a3 #define QUEUE_MAGIC_USED 0xf7e1cc33 #define SET_MAGIC(q,m) ((q)->magic = (m)) #define BAD_MAGIC(q,m) ((q)->magic != (m)) #else #define SET_MAGIC(q,m) do { } while (0) #define BAD_MAGIC(q,m) (0) #endif #include "queue.h" #define NR_QE 32 /* * Function: void queue_initialise (Queue_t *queue) * Purpose : initialise a queue * Params : queue - queue to initialise */ int queue_initialise (Queue_t *queue) { unsigned int nqueues = NR_QE; QE_t *q; spin_lock_init(&queue->queue_lock); INIT_LIST_HEAD(&queue->head); INIT_LIST_HEAD(&queue->free); /* * If life was easier, then SCpnt would have a * host-available list head, and we wouldn't * need to keep free lists or allocate this * memory. */ queue->alloc = q = kmalloc(sizeof(QE_t) * nqueues, GFP_KERNEL); if (q) { for (; nqueues; q++, nqueues--) { SET_MAGIC(q, QUEUE_MAGIC_FREE); q->SCpnt = NULL; list_add(&q->list, &queue->free); } } return queue->alloc != NULL; } /* * Function: void queue_free (Queue_t *queue) * Purpose : free a queue * Params : queue - queue to free */ void queue_free (Queue_t *queue) { if (!list_empty(&queue->head)) printk(KERN_WARNING "freeing non-empty queue %p\n", queue); kfree(queue->alloc); } /* * Function: int __queue_add(Queue_t *queue, struct scsi_cmnd *SCpnt, int head) * Purpose : Add a new command onto a queue, adding REQUEST_SENSE to head. * Params : queue - destination queue * SCpnt - command to add * head - add command to head of queue * Returns : 0 on error, !0 on success */ int __queue_add(Queue_t *queue, struct scsi_cmnd *SCpnt, int head) { unsigned long flags; struct list_head *l; QE_t *q; int ret = 0; spin_lock_irqsave(&queue->queue_lock, flags); if (list_empty(&queue->free)) goto empty; l = queue->free.next; list_del(l); q = list_entry(l, QE_t, list); BUG_ON(BAD_MAGIC(q, QUEUE_MAGIC_FREE)); SET_MAGIC(q, QUEUE_MAGIC_USED); q->SCpnt = SCpnt; if (head) list_add(l, &queue->head); else list_add_tail(l, &queue->head); ret = 1; empty: spin_unlock_irqrestore(&queue->queue_lock, flags); return ret; } static struct scsi_cmnd *__queue_remove(Queue_t *queue, struct list_head *ent) { QE_t *q; /* * Move the entry from the "used" list onto the "free" list */ list_del(ent); q = list_entry(ent, QE_t, list); BUG_ON(BAD_MAGIC(q, QUEUE_MAGIC_USED)); SET_MAGIC(q, QUEUE_MAGIC_FREE); list_add(ent, &queue->free); return q->SCpnt; } /* * Function: struct scsi_cmnd *queue_remove_exclude (queue, exclude) * Purpose : remove a SCSI command from a queue * Params : queue - queue to remove command from * exclude - bit array of target&lun which is busy * Returns : struct scsi_cmnd if successful (and a reference), or NULL if no command available */ struct scsi_cmnd *queue_remove_exclude(Queue_t *queue, unsigned long *exclude) { unsigned long flags; struct list_head *l; struct scsi_cmnd *SCpnt = NULL; spin_lock_irqsave(&queue->queue_lock, flags); list_for_each(l, &queue->head) { QE_t *q = list_entry(l, QE_t, list); if (!test_bit(q->SCpnt->device->id * 8 + q->SCpnt->device->lun, exclude)) { SCpnt = __queue_remove(queue, l); break; } } spin_unlock_irqrestore(&queue->queue_lock, flags); return SCpnt; } /* * Function: struct scsi_cmnd *queue_remove (queue) * Purpose : removes first SCSI command from a queue * Params : queue - queue to remove command from * Returns : struct scsi_cmnd if successful (and a reference), or NULL if no command available */ struct scsi_cmnd *queue_remove(Queue_t *queue) { unsigned long flags; struct scsi_cmnd *SCpnt = NULL; spin_lock_irqsave(&queue->queue_lock, flags); if (!list_empty(&queue->head)) SCpnt = __queue_remove(queue, queue->head.next); spin_unlock_irqrestore(&queue->queue_lock, flags); return SCpnt; } /* * Function: struct scsi_cmnd *queue_remove_tgtluntag (queue, target, lun, tag) * Purpose : remove a SCSI command from the queue for a specified target/lun/tag * Params : queue - queue to remove command from * target - target that we want * lun - lun on device * tag - tag on device * Returns : struct scsi_cmnd if successful, or NULL if no command satisfies requirements */ struct scsi_cmnd *queue_remove_tgtluntag(Queue_t *queue, int target, int lun, int tag) { unsigned long flags; struct list_head *l; struct scsi_cmnd *SCpnt = NULL; spin_lock_irqsave(&queue->queue_lock, flags); list_for_each(l, &queue->head) { QE_t *q = list_entry(l, QE_t, list); if (q->SCpnt->device->id == target && q->SCpnt->device->lun == lun && q->SCpnt->tag == tag) { SCpnt = __queue_remove(queue, l); break; } } spin_unlock_irqrestore(&queue->queue_lock, flags); return SCpnt; } /* * Function: queue_remove_all_target(queue, target) * Purpose : remove all SCSI commands from the queue for a specified target * Params : queue - queue to remove command from * target - target device id * Returns : nothing */ void queue_remove_all_target(Queue_t *queue, int target) { unsigned long flags; struct list_head *l; spin_lock_irqsave(&queue->queue_lock, flags); list_for_each(l, &queue->head) { QE_t *q = list_entry(l, QE_t, list); if (q->SCpnt->device->id == target) __queue_remove(queue, l); } spin_unlock_irqrestore(&queue->queue_lock, flags); } /* * Function: int queue_probetgtlun (queue, target, lun) * Purpose : check to see if we have a command in the queue for the specified * target/lun. * Params : queue - queue to look in * target - target we want to probe * lun - lun on target * Returns : 0 if not found, != 0 if found */ int queue_probetgtlun (Queue_t *queue, int target, int lun) { unsigned long flags; struct list_head *l; int found = 0; spin_lock_irqsave(&queue->queue_lock, flags); list_for_each(l, &queue->head) { QE_t *q = list_entry(l, QE_t, list); if (q->SCpnt->device->id == target && q->SCpnt->device->lun == lun) { found = 1; break; } } spin_unlock_irqrestore(&queue->queue_lock, flags); return found; } /* * Function: int queue_remove_cmd(Queue_t *queue, struct scsi_cmnd *SCpnt) * Purpose : remove a specific command from the queues * Params : queue - queue to look in * SCpnt - command to find * Returns : 0 if not found */ int queue_remove_cmd(Queue_t *queue, struct scsi_cmnd *SCpnt) { unsigned long flags; struct list_head *l; int found = 0; spin_lock_irqsave(&queue->queue_lock, flags); list_for_each(l, &queue->head) { QE_t *q = list_entry(l, QE_t, list); if (q->SCpnt == SCpnt) { __queue_remove(queue, l); found = 1; break; } } spin_unlock_irqrestore(&queue->queue_lock, flags); return found; } EXPORT_SYMBOL(queue_initialise); EXPORT_SYMBOL(queue_free); EXPORT_SYMBOL(__queue_add); EXPORT_SYMBOL(queue_remove); EXPORT_SYMBOL(queue_remove_exclude); EXPORT_SYMBOL(queue_remove_tgtluntag); EXPORT_SYMBOL(queue_remove_cmd); EXPORT_SYMBOL(queue_remove_all_target); EXPORT_SYMBOL(queue_probetgtlun); MODULE_AUTHOR("Russell King"); MODULE_DESCRIPTION("SCSI command queueing"); MODULE_LICENSE("GPL");
gpl-2.0
webos-internals/webos-linux-kernel
sound/core/seq/oss/seq_oss_rw.c
14769
5346
/* * OSS compatible sequencer driver * * read/write/select interface to device file * * Copyright (C) 1998,99 Takashi Iwai <tiwai@suse.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include "seq_oss_device.h" #include "seq_oss_readq.h" #include "seq_oss_writeq.h" #include "seq_oss_synth.h" #include <sound/seq_oss_legacy.h> #include "seq_oss_event.h" #include "seq_oss_timer.h" #include "../seq_clientmgr.h" /* * protoypes */ static int insert_queue(struct seq_oss_devinfo *dp, union evrec *rec, struct file *opt); /* * read interface */ int snd_seq_oss_read(struct seq_oss_devinfo *dp, char __user *buf, int count) { struct seq_oss_readq *readq = dp->readq; int result = 0, err = 0; int ev_len; union evrec rec; unsigned long flags; if (readq == NULL || ! is_read_mode(dp->file_mode)) return -ENXIO; while (count >= SHORT_EVENT_SIZE) { snd_seq_oss_readq_lock(readq, flags); err = snd_seq_oss_readq_pick(readq, &rec); if (err == -EAGAIN && !is_nonblock_mode(dp->file_mode) && result == 0) { snd_seq_oss_readq_unlock(readq, flags); snd_seq_oss_readq_wait(readq); snd_seq_oss_readq_lock(readq, flags); if (signal_pending(current)) err = -ERESTARTSYS; else err = snd_seq_oss_readq_pick(readq, &rec); } if (err < 0) { snd_seq_oss_readq_unlock(readq, flags); break; } ev_len = ev_length(&rec); if (ev_len < count) { snd_seq_oss_readq_unlock(readq, flags); break; } snd_seq_oss_readq_free(readq); snd_seq_oss_readq_unlock(readq, flags); if (copy_to_user(buf, &rec, ev_len)) { err = -EFAULT; break; } result += ev_len; buf += ev_len; count -= ev_len; } return result > 0 ? result : err; } /* * write interface */ int snd_seq_oss_write(struct seq_oss_devinfo *dp, const char __user *buf, int count, struct file *opt) { int result = 0, err = 0; int ev_size, fmt; union evrec rec; if (! is_write_mode(dp->file_mode) || dp->writeq == NULL) return -ENXIO; while (count >= SHORT_EVENT_SIZE) { if (copy_from_user(&rec, buf, SHORT_EVENT_SIZE)) { err = -EFAULT; break; } if (rec.s.code == SEQ_FULLSIZE) { /* load patch */ if (result > 0) { err = -EINVAL; break; } fmt = (*(unsigned short *)rec.c) & 0xffff; /* FIXME the return value isn't correct */ return snd_seq_oss_synth_load_patch(dp, rec.s.dev, fmt, buf, 0, count); } if (ev_is_long(&rec)) { /* extended code */ if (rec.s.code == SEQ_EXTENDED && dp->seq_mode == SNDRV_SEQ_OSS_MODE_MUSIC) { err = -EINVAL; break; } ev_size = LONG_EVENT_SIZE; if (count < ev_size) break; /* copy the reset 4 bytes */ if (copy_from_user(rec.c + SHORT_EVENT_SIZE, buf + SHORT_EVENT_SIZE, LONG_EVENT_SIZE - SHORT_EVENT_SIZE)) { err = -EFAULT; break; } } else { /* old-type code */ if (dp->seq_mode == SNDRV_SEQ_OSS_MODE_MUSIC) { err = -EINVAL; break; } ev_size = SHORT_EVENT_SIZE; } /* insert queue */ if ((err = insert_queue(dp, &rec, opt)) < 0) break; result += ev_size; buf += ev_size; count -= ev_size; } return result > 0 ? result : err; } /* * insert event record to write queue * return: 0 = OK, non-zero = NG */ static int insert_queue(struct seq_oss_devinfo *dp, union evrec *rec, struct file *opt) { int rc = 0; struct snd_seq_event event; /* if this is a timing event, process the current time */ if (snd_seq_oss_process_timer_event(dp->timer, rec)) return 0; /* no need to insert queue */ /* parse this event */ memset(&event, 0, sizeof(event)); /* set dummy -- to be sure */ event.type = SNDRV_SEQ_EVENT_NOTEOFF; snd_seq_oss_fill_addr(dp, &event, dp->addr.port, dp->addr.client); if (snd_seq_oss_process_event(dp, rec, &event)) return 0; /* invalid event - no need to insert queue */ event.time.tick = snd_seq_oss_timer_cur_tick(dp->timer); if (dp->timer->realtime || !dp->timer->running) { snd_seq_oss_dispatch(dp, &event, 0, 0); } else { if (is_nonblock_mode(dp->file_mode)) rc = snd_seq_kernel_client_enqueue(dp->cseq, &event, 0, 0); else rc = snd_seq_kernel_client_enqueue_blocking(dp->cseq, &event, opt, 0, 0); } return rc; } /* * select / poll */ unsigned int snd_seq_oss_poll(struct seq_oss_devinfo *dp, struct file *file, poll_table * wait) { unsigned int mask = 0; /* input */ if (dp->readq && is_read_mode(dp->file_mode)) { if (snd_seq_oss_readq_poll(dp->readq, file, wait)) mask |= POLLIN | POLLRDNORM; } /* output */ if (dp->writeq && is_write_mode(dp->file_mode)) { if (snd_seq_kernel_client_write_poll(dp->cseq, file, wait)) mask |= POLLOUT | POLLWRNORM; } return mask; }
gpl-2.0
ka6sox/nook_kernel
arch/mn10300/unit-asb2305/unit-init.c
178
1466
/* ASB2305 Initialisation * * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public Licence * as published by the Free Software Foundation; either version * 2 of the Licence, or (at your option) any later version. */ #include <linux/kernel.h> #include <linux/param.h> #include <linux/init.h> #include <linux/pci.h> #include <asm/io.h> #include <asm/setup.h> #include <asm/processor.h> #include <asm/cpu/intctl-regs.h> #include <asm/cpu/rtc-regs.h> #include <asm/cpu/serial-regs.h> #include <asm/unit/serial.h> /* * initialise some of the unit hardware before gdbstub is set up */ asmlinkage void __init unit_init(void) { #ifndef CONFIG_GDBSTUB_ON_TTYSx /* set the 16550 interrupt line to level 3 if not being used for GDB */ set_intr_level(XIRQ0, GxICR_LEVEL_3); #endif } /* * initialise the rest of the unit hardware after gdbstub is ready */ void __init unit_setup(void) { #ifdef CONFIG_PCI unit_pci_init(); #endif } /* * initialise the external interrupts used by a unit of this type */ void __init unit_init_IRQ(void) { unsigned int extnum; for (extnum = 0; extnum < NR_XIRQS; extnum++) { switch (GET_XIRQ_TRIGGER(extnum)) { case XIRQ_TRIGGER_HILEVEL: case XIRQ_TRIGGER_LOWLEVEL: set_intr_postackable(XIRQ2IRQ(extnum)); break; default: break; } } }
gpl-2.0
gao-feng/net
drivers/net/hyperv/netvsc.c
178
25974
/* * Copyright (c) 2009, Microsoft Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 59 Temple * Place - Suite 330, Boston, MA 02111-1307 USA. * * Authors: * Haiyang Zhang <haiyangz@microsoft.com> * Hank Janssen <hjanssen@microsoft.com> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/sched.h> #include <linux/wait.h> #include <linux/mm.h> #include <linux/delay.h> #include <linux/io.h> #include <linux/slab.h> #include <linux/netdevice.h> #include <linux/if_ether.h> #include "hyperv_net.h" static struct netvsc_device *alloc_net_device(struct hv_device *device) { struct netvsc_device *net_device; struct net_device *ndev = hv_get_drvdata(device); net_device = kzalloc(sizeof(struct netvsc_device), GFP_KERNEL); if (!net_device) return NULL; init_waitqueue_head(&net_device->wait_drain); net_device->start_remove = false; net_device->destroy = false; net_device->dev = device; net_device->ndev = ndev; hv_set_drvdata(device, net_device); return net_device; } static struct netvsc_device *get_outbound_net_device(struct hv_device *device) { struct netvsc_device *net_device; net_device = hv_get_drvdata(device); if (net_device && net_device->destroy) net_device = NULL; return net_device; } static struct netvsc_device *get_inbound_net_device(struct hv_device *device) { struct netvsc_device *net_device; net_device = hv_get_drvdata(device); if (!net_device) goto get_in_err; if (net_device->destroy && atomic_read(&net_device->num_outstanding_sends) == 0) net_device = NULL; get_in_err: return net_device; } static int netvsc_destroy_recv_buf(struct netvsc_device *net_device) { struct nvsp_message *revoke_packet; int ret = 0; struct net_device *ndev = net_device->ndev; /* * If we got a section count, it means we received a * SendReceiveBufferComplete msg (ie sent * NvspMessage1TypeSendReceiveBuffer msg) therefore, we need * to send a revoke msg here */ if (net_device->recv_section_cnt) { /* Send the revoke receive buffer */ revoke_packet = &net_device->revoke_packet; memset(revoke_packet, 0, sizeof(struct nvsp_message)); revoke_packet->hdr.msg_type = NVSP_MSG1_TYPE_REVOKE_RECV_BUF; revoke_packet->msg.v1_msg. revoke_recv_buf.id = NETVSC_RECEIVE_BUFFER_ID; ret = vmbus_sendpacket(net_device->dev->channel, revoke_packet, sizeof(struct nvsp_message), (unsigned long)revoke_packet, VM_PKT_DATA_INBAND, 0); /* * If we failed here, we might as well return and * have a leak rather than continue and a bugchk */ if (ret != 0) { netdev_err(ndev, "unable to send " "revoke receive buffer to netvsp\n"); return ret; } } /* Teardown the gpadl on the vsp end */ if (net_device->recv_buf_gpadl_handle) { ret = vmbus_teardown_gpadl(net_device->dev->channel, net_device->recv_buf_gpadl_handle); /* If we failed here, we might as well return and have a leak * rather than continue and a bugchk */ if (ret != 0) { netdev_err(ndev, "unable to teardown receive buffer's gpadl\n"); return ret; } net_device->recv_buf_gpadl_handle = 0; } if (net_device->recv_buf) { /* Free up the receive buffer */ free_pages((unsigned long)net_device->recv_buf, get_order(net_device->recv_buf_size)); net_device->recv_buf = NULL; } if (net_device->recv_section) { net_device->recv_section_cnt = 0; kfree(net_device->recv_section); net_device->recv_section = NULL; } return ret; } static int netvsc_init_recv_buf(struct hv_device *device) { int ret = 0; int t; struct netvsc_device *net_device; struct nvsp_message *init_packet; struct net_device *ndev; net_device = get_outbound_net_device(device); if (!net_device) return -ENODEV; ndev = net_device->ndev; net_device->recv_buf = (void *)__get_free_pages(GFP_KERNEL|__GFP_ZERO, get_order(net_device->recv_buf_size)); if (!net_device->recv_buf) { netdev_err(ndev, "unable to allocate receive " "buffer of size %d\n", net_device->recv_buf_size); ret = -ENOMEM; goto cleanup; } /* * Establish the gpadl handle for this buffer on this * channel. Note: This call uses the vmbus connection rather * than the channel to establish the gpadl handle. */ ret = vmbus_establish_gpadl(device->channel, net_device->recv_buf, net_device->recv_buf_size, &net_device->recv_buf_gpadl_handle); if (ret != 0) { netdev_err(ndev, "unable to establish receive buffer's gpadl\n"); goto cleanup; } /* Notify the NetVsp of the gpadl handle */ init_packet = &net_device->channel_init_pkt; memset(init_packet, 0, sizeof(struct nvsp_message)); init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_RECV_BUF; init_packet->msg.v1_msg.send_recv_buf. gpadl_handle = net_device->recv_buf_gpadl_handle; init_packet->msg.v1_msg. send_recv_buf.id = NETVSC_RECEIVE_BUFFER_ID; /* Send the gpadl notification request */ ret = vmbus_sendpacket(device->channel, init_packet, sizeof(struct nvsp_message), (unsigned long)init_packet, VM_PKT_DATA_INBAND, VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); if (ret != 0) { netdev_err(ndev, "unable to send receive buffer's gpadl to netvsp\n"); goto cleanup; } t = wait_for_completion_timeout(&net_device->channel_init_wait, 5*HZ); BUG_ON(t == 0); /* Check the response */ if (init_packet->msg.v1_msg. send_recv_buf_complete.status != NVSP_STAT_SUCCESS) { netdev_err(ndev, "Unable to complete receive buffer " "initialization with NetVsp - status %d\n", init_packet->msg.v1_msg. send_recv_buf_complete.status); ret = -EINVAL; goto cleanup; } /* Parse the response */ net_device->recv_section_cnt = init_packet->msg. v1_msg.send_recv_buf_complete.num_sections; net_device->recv_section = kmemdup( init_packet->msg.v1_msg.send_recv_buf_complete.sections, net_device->recv_section_cnt * sizeof(struct nvsp_1_receive_buffer_section), GFP_KERNEL); if (net_device->recv_section == NULL) { ret = -EINVAL; goto cleanup; } /* * For 1st release, there should only be 1 section that represents the * entire receive buffer */ if (net_device->recv_section_cnt != 1 || net_device->recv_section->offset != 0) { ret = -EINVAL; goto cleanup; } goto exit; cleanup: netvsc_destroy_recv_buf(net_device); exit: return ret; } /* Negotiate NVSP protocol version */ static int negotiate_nvsp_ver(struct hv_device *device, struct netvsc_device *net_device, struct nvsp_message *init_packet, u32 nvsp_ver) { int ret, t; memset(init_packet, 0, sizeof(struct nvsp_message)); init_packet->hdr.msg_type = NVSP_MSG_TYPE_INIT; init_packet->msg.init_msg.init.min_protocol_ver = nvsp_ver; init_packet->msg.init_msg.init.max_protocol_ver = nvsp_ver; /* Send the init request */ ret = vmbus_sendpacket(device->channel, init_packet, sizeof(struct nvsp_message), (unsigned long)init_packet, VM_PKT_DATA_INBAND, VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); if (ret != 0) return ret; t = wait_for_completion_timeout(&net_device->channel_init_wait, 5*HZ); if (t == 0) return -ETIMEDOUT; if (init_packet->msg.init_msg.init_complete.status != NVSP_STAT_SUCCESS) return -EINVAL; if (nvsp_ver != NVSP_PROTOCOL_VERSION_2) return 0; /* NVSPv2 only: Send NDIS config */ memset(init_packet, 0, sizeof(struct nvsp_message)); init_packet->hdr.msg_type = NVSP_MSG2_TYPE_SEND_NDIS_CONFIG; init_packet->msg.v2_msg.send_ndis_config.mtu = net_device->ndev->mtu; init_packet->msg.v2_msg.send_ndis_config.capability.ieee8021q = 1; ret = vmbus_sendpacket(device->channel, init_packet, sizeof(struct nvsp_message), (unsigned long)init_packet, VM_PKT_DATA_INBAND, 0); return ret; } static int netvsc_connect_vsp(struct hv_device *device) { int ret; struct netvsc_device *net_device; struct nvsp_message *init_packet; int ndis_version; struct net_device *ndev; net_device = get_outbound_net_device(device); if (!net_device) return -ENODEV; ndev = net_device->ndev; init_packet = &net_device->channel_init_pkt; /* Negotiate the latest NVSP protocol supported */ if (negotiate_nvsp_ver(device, net_device, init_packet, NVSP_PROTOCOL_VERSION_2) == 0) { net_device->nvsp_version = NVSP_PROTOCOL_VERSION_2; } else if (negotiate_nvsp_ver(device, net_device, init_packet, NVSP_PROTOCOL_VERSION_1) == 0) { net_device->nvsp_version = NVSP_PROTOCOL_VERSION_1; } else { ret = -EPROTO; goto cleanup; } pr_debug("Negotiated NVSP version:%x\n", net_device->nvsp_version); /* Send the ndis version */ memset(init_packet, 0, sizeof(struct nvsp_message)); ndis_version = 0x00050001; init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_NDIS_VER; init_packet->msg.v1_msg. send_ndis_ver.ndis_major_ver = (ndis_version & 0xFFFF0000) >> 16; init_packet->msg.v1_msg. send_ndis_ver.ndis_minor_ver = ndis_version & 0xFFFF; /* Send the init request */ ret = vmbus_sendpacket(device->channel, init_packet, sizeof(struct nvsp_message), (unsigned long)init_packet, VM_PKT_DATA_INBAND, 0); if (ret != 0) goto cleanup; /* Post the big receive buffer to NetVSP */ ret = netvsc_init_recv_buf(device); cleanup: return ret; } static void netvsc_disconnect_vsp(struct netvsc_device *net_device) { netvsc_destroy_recv_buf(net_device); } /* * netvsc_device_remove - Callback when the root bus device is removed */ int netvsc_device_remove(struct hv_device *device) { struct netvsc_device *net_device; struct hv_netvsc_packet *netvsc_packet, *pos; unsigned long flags; net_device = hv_get_drvdata(device); netvsc_disconnect_vsp(net_device); /* * Since we have already drained, we don't need to busy wait * as was done in final_release_stor_device() * Note that we cannot set the ext pointer to NULL until * we have drained - to drain the outgoing packets, we need to * allow incoming packets. */ spin_lock_irqsave(&device->channel->inbound_lock, flags); hv_set_drvdata(device, NULL); spin_unlock_irqrestore(&device->channel->inbound_lock, flags); /* * At this point, no one should be accessing net_device * except in here */ dev_notice(&device->device, "net device safe to remove\n"); /* Now, we can close the channel safely */ vmbus_close(device->channel); /* Release all resources */ list_for_each_entry_safe(netvsc_packet, pos, &net_device->recv_pkt_list, list_ent) { list_del(&netvsc_packet->list_ent); kfree(netvsc_packet); } kfree(net_device); return 0; } #define RING_AVAIL_PERCENT_HIWATER 20 #define RING_AVAIL_PERCENT_LOWATER 10 /* * Get the percentage of available bytes to write in the ring. * The return value is in range from 0 to 100. */ static inline u32 hv_ringbuf_avail_percent( struct hv_ring_buffer_info *ring_info) { u32 avail_read, avail_write; hv_get_ringbuffer_availbytes(ring_info, &avail_read, &avail_write); return avail_write * 100 / ring_info->ring_datasize; } static void netvsc_send_completion(struct hv_device *device, struct vmpacket_descriptor *packet) { struct netvsc_device *net_device; struct nvsp_message *nvsp_packet; struct hv_netvsc_packet *nvsc_packet; struct net_device *ndev; net_device = get_inbound_net_device(device); if (!net_device) return; ndev = net_device->ndev; nvsp_packet = (struct nvsp_message *)((unsigned long)packet + (packet->offset8 << 3)); if ((nvsp_packet->hdr.msg_type == NVSP_MSG_TYPE_INIT_COMPLETE) || (nvsp_packet->hdr.msg_type == NVSP_MSG1_TYPE_SEND_RECV_BUF_COMPLETE) || (nvsp_packet->hdr.msg_type == NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE)) { /* Copy the response back */ memcpy(&net_device->channel_init_pkt, nvsp_packet, sizeof(struct nvsp_message)); complete(&net_device->channel_init_wait); } else if (nvsp_packet->hdr.msg_type == NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE) { int num_outstanding_sends; /* Get the send context */ nvsc_packet = (struct hv_netvsc_packet *)(unsigned long) packet->trans_id; /* Notify the layer above us */ nvsc_packet->completion.send.send_completion( nvsc_packet->completion.send.send_completion_ctx); num_outstanding_sends = atomic_dec_return(&net_device->num_outstanding_sends); if (net_device->destroy && num_outstanding_sends == 0) wake_up(&net_device->wait_drain); if (netif_queue_stopped(ndev) && !net_device->start_remove && (hv_ringbuf_avail_percent(&device->channel->outbound) > RING_AVAIL_PERCENT_HIWATER || num_outstanding_sends < 1)) netif_wake_queue(ndev); } else { netdev_err(ndev, "Unknown send completion packet type- " "%d received!!\n", nvsp_packet->hdr.msg_type); } } int netvsc_send(struct hv_device *device, struct hv_netvsc_packet *packet) { struct netvsc_device *net_device; int ret = 0; struct nvsp_message sendMessage; struct net_device *ndev; net_device = get_outbound_net_device(device); if (!net_device) return -ENODEV; ndev = net_device->ndev; sendMessage.hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT; if (packet->is_data_pkt) { /* 0 is RMC_DATA; */ sendMessage.msg.v1_msg.send_rndis_pkt.channel_type = 0; } else { /* 1 is RMC_CONTROL; */ sendMessage.msg.v1_msg.send_rndis_pkt.channel_type = 1; } /* Not using send buffer section */ sendMessage.msg.v1_msg.send_rndis_pkt.send_buf_section_index = 0xFFFFFFFF; sendMessage.msg.v1_msg.send_rndis_pkt.send_buf_section_size = 0; if (packet->page_buf_cnt) { ret = vmbus_sendpacket_pagebuffer(device->channel, packet->page_buf, packet->page_buf_cnt, &sendMessage, sizeof(struct nvsp_message), (unsigned long)packet); } else { ret = vmbus_sendpacket(device->channel, &sendMessage, sizeof(struct nvsp_message), (unsigned long)packet, VM_PKT_DATA_INBAND, VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); } if (ret == 0) { atomic_inc(&net_device->num_outstanding_sends); if (hv_ringbuf_avail_percent(&device->channel->outbound) < RING_AVAIL_PERCENT_LOWATER) { netif_stop_queue(ndev); if (atomic_read(&net_device-> num_outstanding_sends) < 1) netif_wake_queue(ndev); } } else if (ret == -EAGAIN) { netif_stop_queue(ndev); if (atomic_read(&net_device->num_outstanding_sends) < 1) { netif_wake_queue(ndev); ret = -ENOSPC; } } else { netdev_err(ndev, "Unable to send packet %p ret %d\n", packet, ret); } return ret; } static void netvsc_send_recv_completion(struct hv_device *device, u64 transaction_id, u32 status) { struct nvsp_message recvcompMessage; int retries = 0; int ret; struct net_device *ndev; struct netvsc_device *net_device = hv_get_drvdata(device); ndev = net_device->ndev; recvcompMessage.hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE; recvcompMessage.msg.v1_msg.send_rndis_pkt_complete.status = status; retry_send_cmplt: /* Send the completion */ ret = vmbus_sendpacket(device->channel, &recvcompMessage, sizeof(struct nvsp_message), transaction_id, VM_PKT_COMP, 0); if (ret == 0) { /* success */ /* no-op */ } else if (ret == -EAGAIN) { /* no more room...wait a bit and attempt to retry 3 times */ retries++; netdev_err(ndev, "unable to send receive completion pkt" " (tid %llx)...retrying %d\n", transaction_id, retries); if (retries < 4) { udelay(100); goto retry_send_cmplt; } else { netdev_err(ndev, "unable to send receive " "completion pkt (tid %llx)...give up retrying\n", transaction_id); } } else { netdev_err(ndev, "unable to send receive " "completion pkt - %llx\n", transaction_id); } } /* Send a receive completion packet to RNDIS device (ie NetVsp) */ static void netvsc_receive_completion(void *context) { struct hv_netvsc_packet *packet = context; struct hv_device *device = packet->device; struct netvsc_device *net_device; u64 transaction_id = 0; bool fsend_receive_comp = false; unsigned long flags; struct net_device *ndev; u32 status = NVSP_STAT_NONE; /* * Even though it seems logical to do a GetOutboundNetDevice() here to * send out receive completion, we are using GetInboundNetDevice() * since we may have disable outbound traffic already. */ net_device = get_inbound_net_device(device); if (!net_device) return; ndev = net_device->ndev; /* Overloading use of the lock. */ spin_lock_irqsave(&net_device->recv_pkt_list_lock, flags); if (packet->status != NVSP_STAT_SUCCESS) packet->xfer_page_pkt->status = NVSP_STAT_FAIL; packet->xfer_page_pkt->count--; /* * Last one in the line that represent 1 xfer page packet. * Return the xfer page packet itself to the freelist */ if (packet->xfer_page_pkt->count == 0) { fsend_receive_comp = true; transaction_id = packet->completion.recv.recv_completion_tid; status = packet->xfer_page_pkt->status; list_add_tail(&packet->xfer_page_pkt->list_ent, &net_device->recv_pkt_list); } /* Put the packet back */ list_add_tail(&packet->list_ent, &net_device->recv_pkt_list); spin_unlock_irqrestore(&net_device->recv_pkt_list_lock, flags); /* Send a receive completion for the xfer page packet */ if (fsend_receive_comp) netvsc_send_recv_completion(device, transaction_id, status); } static void netvsc_receive(struct hv_device *device, struct vmpacket_descriptor *packet) { struct netvsc_device *net_device; struct vmtransfer_page_packet_header *vmxferpage_packet; struct nvsp_message *nvsp_packet; struct hv_netvsc_packet *netvsc_packet = NULL; /* struct netvsc_driver *netvscDriver; */ struct xferpage_packet *xferpage_packet = NULL; int i; int count = 0; unsigned long flags; struct net_device *ndev; LIST_HEAD(listHead); net_device = get_inbound_net_device(device); if (!net_device) return; ndev = net_device->ndev; /* * All inbound packets other than send completion should be xfer page * packet */ if (packet->type != VM_PKT_DATA_USING_XFER_PAGES) { netdev_err(ndev, "Unknown packet type received - %d\n", packet->type); return; } nvsp_packet = (struct nvsp_message *)((unsigned long)packet + (packet->offset8 << 3)); /* Make sure this is a valid nvsp packet */ if (nvsp_packet->hdr.msg_type != NVSP_MSG1_TYPE_SEND_RNDIS_PKT) { netdev_err(ndev, "Unknown nvsp packet type received-" " %d\n", nvsp_packet->hdr.msg_type); return; } vmxferpage_packet = (struct vmtransfer_page_packet_header *)packet; if (vmxferpage_packet->xfer_pageset_id != NETVSC_RECEIVE_BUFFER_ID) { netdev_err(ndev, "Invalid xfer page set id - " "expecting %x got %x\n", NETVSC_RECEIVE_BUFFER_ID, vmxferpage_packet->xfer_pageset_id); return; } /* * Grab free packets (range count + 1) to represent this xfer * page packet. +1 to represent the xfer page packet itself. * We grab it here so that we know exactly how many we can * fulfil */ spin_lock_irqsave(&net_device->recv_pkt_list_lock, flags); while (!list_empty(&net_device->recv_pkt_list)) { list_move_tail(net_device->recv_pkt_list.next, &listHead); if (++count == vmxferpage_packet->range_cnt + 1) break; } spin_unlock_irqrestore(&net_device->recv_pkt_list_lock, flags); /* * We need at least 2 netvsc pkts (1 to represent the xfer * page and at least 1 for the range) i.e. we can handled * some of the xfer page packet ranges... */ if (count < 2) { netdev_err(ndev, "Got only %d netvsc pkt...needed " "%d pkts. Dropping this xfer page packet completely!\n", count, vmxferpage_packet->range_cnt + 1); /* Return it to the freelist */ spin_lock_irqsave(&net_device->recv_pkt_list_lock, flags); for (i = count; i != 0; i--) { list_move_tail(listHead.next, &net_device->recv_pkt_list); } spin_unlock_irqrestore(&net_device->recv_pkt_list_lock, flags); netvsc_send_recv_completion(device, vmxferpage_packet->d.trans_id, NVSP_STAT_FAIL); return; } /* Remove the 1st packet to represent the xfer page packet itself */ xferpage_packet = (struct xferpage_packet *)listHead.next; list_del(&xferpage_packet->list_ent); xferpage_packet->status = NVSP_STAT_SUCCESS; /* This is how much we can satisfy */ xferpage_packet->count = count - 1; if (xferpage_packet->count != vmxferpage_packet->range_cnt) { netdev_err(ndev, "Needed %d netvsc pkts to satisfy " "this xfer page...got %d\n", vmxferpage_packet->range_cnt, xferpage_packet->count); } /* Each range represents 1 RNDIS pkt that contains 1 ethernet frame */ for (i = 0; i < (count - 1); i++) { netvsc_packet = (struct hv_netvsc_packet *)listHead.next; list_del(&netvsc_packet->list_ent); /* Initialize the netvsc packet */ netvsc_packet->status = NVSP_STAT_SUCCESS; netvsc_packet->xfer_page_pkt = xferpage_packet; netvsc_packet->completion.recv.recv_completion = netvsc_receive_completion; netvsc_packet->completion.recv.recv_completion_ctx = netvsc_packet; netvsc_packet->device = device; /* Save this so that we can send it back */ netvsc_packet->completion.recv.recv_completion_tid = vmxferpage_packet->d.trans_id; netvsc_packet->data = (void *)((unsigned long)net_device-> recv_buf + vmxferpage_packet->ranges[i].byte_offset); netvsc_packet->total_data_buflen = vmxferpage_packet->ranges[i].byte_count; /* Pass it to the upper layer */ rndis_filter_receive(device, netvsc_packet); netvsc_receive_completion(netvsc_packet-> completion.recv.recv_completion_ctx); } } static void netvsc_channel_cb(void *context) { int ret; struct hv_device *device = context; struct netvsc_device *net_device; u32 bytes_recvd; u64 request_id; unsigned char *packet; struct vmpacket_descriptor *desc; unsigned char *buffer; int bufferlen = NETVSC_PACKET_SIZE; struct net_device *ndev; packet = kzalloc(NETVSC_PACKET_SIZE * sizeof(unsigned char), GFP_ATOMIC); if (!packet) return; buffer = packet; net_device = get_inbound_net_device(device); if (!net_device) goto out; ndev = net_device->ndev; do { ret = vmbus_recvpacket_raw(device->channel, buffer, bufferlen, &bytes_recvd, &request_id); if (ret == 0) { if (bytes_recvd > 0) { desc = (struct vmpacket_descriptor *)buffer; switch (desc->type) { case VM_PKT_COMP: netvsc_send_completion(device, desc); break; case VM_PKT_DATA_USING_XFER_PAGES: netvsc_receive(device, desc); break; default: netdev_err(ndev, "unhandled packet type %d, " "tid %llx len %d\n", desc->type, request_id, bytes_recvd); break; } /* reset */ if (bufferlen > NETVSC_PACKET_SIZE) { kfree(buffer); buffer = packet; bufferlen = NETVSC_PACKET_SIZE; } } else { /* reset */ if (bufferlen > NETVSC_PACKET_SIZE) { kfree(buffer); buffer = packet; bufferlen = NETVSC_PACKET_SIZE; } break; } } else if (ret == -ENOBUFS) { /* Handle large packet */ buffer = kmalloc(bytes_recvd, GFP_ATOMIC); if (buffer == NULL) { /* Try again next time around */ netdev_err(ndev, "unable to allocate buffer of size " "(%d)!!\n", bytes_recvd); break; } bufferlen = bytes_recvd; } } while (1); out: kfree(buffer); return; } /* * netvsc_device_add - Callback when the device belonging to this * driver is added */ int netvsc_device_add(struct hv_device *device, void *additional_info) { int ret = 0; int i; int ring_size = ((struct netvsc_device_info *)additional_info)->ring_size; struct netvsc_device *net_device; struct hv_netvsc_packet *packet, *pos; struct net_device *ndev; net_device = alloc_net_device(device); if (!net_device) { ret = -ENOMEM; goto cleanup; } /* * Coming into this function, struct net_device * is * registered as the driver private data. * In alloc_net_device(), we register struct netvsc_device * * as the driver private data and stash away struct net_device * * in struct netvsc_device *. */ ndev = net_device->ndev; /* Initialize the NetVSC channel extension */ net_device->recv_buf_size = NETVSC_RECEIVE_BUFFER_SIZE; spin_lock_init(&net_device->recv_pkt_list_lock); INIT_LIST_HEAD(&net_device->recv_pkt_list); for (i = 0; i < NETVSC_RECEIVE_PACKETLIST_COUNT; i++) { packet = kzalloc(sizeof(struct hv_netvsc_packet), GFP_KERNEL); if (!packet) break; list_add_tail(&packet->list_ent, &net_device->recv_pkt_list); } init_completion(&net_device->channel_init_wait); /* Open the channel */ ret = vmbus_open(device->channel, ring_size * PAGE_SIZE, ring_size * PAGE_SIZE, NULL, 0, netvsc_channel_cb, device); if (ret != 0) { netdev_err(ndev, "unable to open channel: %d\n", ret); goto cleanup; } /* Channel is opened */ pr_info("hv_netvsc channel opened successfully\n"); /* Connect with the NetVsp */ ret = netvsc_connect_vsp(device); if (ret != 0) { netdev_err(ndev, "unable to connect to NetVSP - %d\n", ret); goto close; } return ret; close: /* Now, we can close the channel safely */ vmbus_close(device->channel); cleanup: if (net_device) { list_for_each_entry_safe(packet, pos, &net_device->recv_pkt_list, list_ent) { list_del(&packet->list_ent); kfree(packet); } kfree(net_device); } return ret; }
gpl-2.0
matanb10/linux
drivers/watchdog/cpwd.c
178
16264
/* cpwd.c - driver implementation for hardware watchdog * timers found on Sun Microsystems CP1400 and CP1500 boards. * * This device supports both the generic Linux watchdog * interface and Solaris-compatible ioctls as best it is * able. * * NOTE: CP1400 systems appear to have a defective intr_mask * register on the PLD, preventing the disabling of * timer interrupts. We use a timer to periodically * reset 'stopped' watchdogs on affected platforms. * * Copyright (c) 2000 Eric Brower (ebrower@usa.net) * Copyright (C) 2008 David S. Miller <davem@davemloft.net> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/module.h> #include <linux/fs.h> #include <linux/errno.h> #include <linux/major.h> #include <linux/miscdevice.h> #include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/timer.h> #include <linux/slab.h> #include <linux/mutex.h> #include <linux/io.h> #include <linux/of.h> #include <linux/of_device.h> #include <linux/uaccess.h> #include <asm/irq.h> #include <asm/watchdog.h> #define DRIVER_NAME "cpwd" #define WD_OBPNAME "watchdog" #define WD_BADMODEL "SUNW,501-5336" #define WD_BTIMEOUT (jiffies + (HZ * 1000)) #define WD_BLIMIT 0xFFFF #define WD0_MINOR 212 #define WD1_MINOR 213 #define WD2_MINOR 214 /* Internal driver definitions. */ #define WD0_ID 0 #define WD1_ID 1 #define WD2_ID 2 #define WD_NUMDEVS 3 #define WD_INTR_OFF 0 #define WD_INTR_ON 1 #define WD_STAT_INIT 0x01 /* Watchdog timer is initialized */ #define WD_STAT_BSTOP 0x02 /* Watchdog timer is brokenstopped */ #define WD_STAT_SVCD 0x04 /* Watchdog interrupt occurred */ /* Register value definitions */ #define WD0_INTR_MASK 0x01 /* Watchdog device interrupt masks */ #define WD1_INTR_MASK 0x02 #define WD2_INTR_MASK 0x04 #define WD_S_RUNNING 0x01 /* Watchdog device status running */ #define WD_S_EXPIRED 0x02 /* Watchdog device status expired */ struct cpwd { void __iomem *regs; spinlock_t lock; unsigned int irq; unsigned long timeout; bool enabled; bool reboot; bool broken; bool initialized; struct { struct miscdevice misc; void __iomem *regs; u8 intr_mask; u8 runstatus; u16 timeout; } devs[WD_NUMDEVS]; }; static DEFINE_MUTEX(cpwd_mutex); static struct cpwd *cpwd_device; /* Sun uses Altera PLD EPF8820ATC144-4 * providing three hardware watchdogs: * * 1) RIC - sends an interrupt when triggered * 2) XIR - asserts XIR_B_RESET when triggered, resets CPU * 3) POR - asserts POR_B_RESET when triggered, resets CPU, backplane, board * *** Timer register block definition (struct wd_timer_regblk) * * dcntr and limit registers (halfword access): * ------------------- * | 15 | ...| 1 | 0 | * ------------------- * |- counter val -| * ------------------- * dcntr - Current 16-bit downcounter value. * When downcounter reaches '0' watchdog expires. * Reading this register resets downcounter with * 'limit' value. * limit - 16-bit countdown value in 1/10th second increments. * Writing this register begins countdown with input value. * Reading from this register does not affect counter. * NOTES: After watchdog reset, dcntr and limit contain '1' * * status register (byte access): * --------------------------- * | 7 | ... | 2 | 1 | 0 | * --------------+------------ * |- UNUSED -| EXP | RUN | * --------------------------- * status- Bit 0 - Watchdog is running * Bit 1 - Watchdog has expired * *** PLD register block definition (struct wd_pld_regblk) * * intr_mask register (byte access): * --------------------------------- * | 7 | ... | 3 | 2 | 1 | 0 | * +-------------+------------------ * |- UNUSED -| WD3 | WD2 | WD1 | * --------------------------------- * WD3 - 1 == Interrupt disabled for watchdog 3 * WD2 - 1 == Interrupt disabled for watchdog 2 * WD1 - 1 == Interrupt disabled for watchdog 1 * * pld_status register (byte access): * UNKNOWN, MAGICAL MYSTERY REGISTER * */ #define WD_TIMER_REGSZ 16 #define WD0_OFF 0 #define WD1_OFF (WD_TIMER_REGSZ * 1) #define WD2_OFF (WD_TIMER_REGSZ * 2) #define PLD_OFF (WD_TIMER_REGSZ * 3) #define WD_DCNTR 0x00 #define WD_LIMIT 0x04 #define WD_STATUS 0x08 #define PLD_IMASK (PLD_OFF + 0x00) #define PLD_STATUS (PLD_OFF + 0x04) static struct timer_list cpwd_timer; static int wd0_timeout; static int wd1_timeout; static int wd2_timeout; module_param(wd0_timeout, int, 0); MODULE_PARM_DESC(wd0_timeout, "Default watchdog0 timeout in 1/10secs"); module_param(wd1_timeout, int, 0); MODULE_PARM_DESC(wd1_timeout, "Default watchdog1 timeout in 1/10secs"); module_param(wd2_timeout, int, 0); MODULE_PARM_DESC(wd2_timeout, "Default watchdog2 timeout in 1/10secs"); MODULE_AUTHOR("Eric Brower <ebrower@usa.net>"); MODULE_DESCRIPTION("Hardware watchdog driver for Sun Microsystems CP1400/1500"); MODULE_LICENSE("GPL"); MODULE_SUPPORTED_DEVICE("watchdog"); static void cpwd_writew(u16 val, void __iomem *addr) { writew(cpu_to_le16(val), addr); } static u16 cpwd_readw(void __iomem *addr) { u16 val = readw(addr); return le16_to_cpu(val); } static void cpwd_writeb(u8 val, void __iomem *addr) { writeb(val, addr); } static u8 cpwd_readb(void __iomem *addr) { return readb(addr); } /* Enable or disable watchdog interrupts * Because of the CP1400 defect this should only be * called during initialzation or by wd_[start|stop]timer() * * index - sub-device index, or -1 for 'all' * enable - non-zero to enable interrupts, zero to disable */ static void cpwd_toggleintr(struct cpwd *p, int index, int enable) { unsigned char curregs = cpwd_readb(p->regs + PLD_IMASK); unsigned char setregs = (index == -1) ? (WD0_INTR_MASK | WD1_INTR_MASK | WD2_INTR_MASK) : (p->devs[index].intr_mask); if (enable == WD_INTR_ON) curregs &= ~setregs; else curregs |= setregs; cpwd_writeb(curregs, p->regs + PLD_IMASK); } /* Restarts timer with maximum limit value and * does not unset 'brokenstop' value. */ static void cpwd_resetbrokentimer(struct cpwd *p, int index) { cpwd_toggleintr(p, index, WD_INTR_ON); cpwd_writew(WD_BLIMIT, p->devs[index].regs + WD_LIMIT); } /* Timer method called to reset stopped watchdogs-- * because of the PLD bug on CP1400, we cannot mask * interrupts within the PLD so me must continually * reset the timers ad infinitum. */ static void cpwd_brokentimer(unsigned long data) { struct cpwd *p = (struct cpwd *) data; int id, tripped = 0; /* kill a running timer instance, in case we * were called directly instead of by kernel timer */ if (timer_pending(&cpwd_timer)) del_timer(&cpwd_timer); for (id = 0; id < WD_NUMDEVS; id++) { if (p->devs[id].runstatus & WD_STAT_BSTOP) { ++tripped; cpwd_resetbrokentimer(p, id); } } if (tripped) { /* there is at least one timer brokenstopped-- reschedule */ cpwd_timer.expires = WD_BTIMEOUT; add_timer(&cpwd_timer); } } /* Reset countdown timer with 'limit' value and continue countdown. * This will not start a stopped timer. */ static void cpwd_pingtimer(struct cpwd *p, int index) { if (cpwd_readb(p->devs[index].regs + WD_STATUS) & WD_S_RUNNING) cpwd_readw(p->devs[index].regs + WD_DCNTR); } /* Stop a running watchdog timer-- the timer actually keeps * running, but the interrupt is masked so that no action is * taken upon expiration. */ static void cpwd_stoptimer(struct cpwd *p, int index) { if (cpwd_readb(p->devs[index].regs + WD_STATUS) & WD_S_RUNNING) { cpwd_toggleintr(p, index, WD_INTR_OFF); if (p->broken) { p->devs[index].runstatus |= WD_STAT_BSTOP; cpwd_brokentimer((unsigned long) p); } } } /* Start a watchdog timer with the specified limit value * If the watchdog is running, it will be restarted with * the provided limit value. * * This function will enable interrupts on the specified * watchdog. */ static void cpwd_starttimer(struct cpwd *p, int index) { if (p->broken) p->devs[index].runstatus &= ~WD_STAT_BSTOP; p->devs[index].runstatus &= ~WD_STAT_SVCD; cpwd_writew(p->devs[index].timeout, p->devs[index].regs + WD_LIMIT); cpwd_toggleintr(p, index, WD_INTR_ON); } static int cpwd_getstatus(struct cpwd *p, int index) { unsigned char stat = cpwd_readb(p->devs[index].regs + WD_STATUS); unsigned char intr = cpwd_readb(p->devs[index].regs + PLD_IMASK); unsigned char ret = WD_STOPPED; /* determine STOPPED */ if (!stat) return ret; /* determine EXPIRED vs FREERUN vs RUNNING */ else if (WD_S_EXPIRED & stat) { ret = WD_EXPIRED; } else if (WD_S_RUNNING & stat) { if (intr & p->devs[index].intr_mask) { ret = WD_FREERUN; } else { /* Fudge WD_EXPIRED status for defective CP1400-- * IF timer is running * AND brokenstop is set * AND an interrupt has been serviced * we are WD_EXPIRED. * * IF timer is running * AND brokenstop is set * AND no interrupt has been serviced * we are WD_FREERUN. */ if (p->broken && (p->devs[index].runstatus & WD_STAT_BSTOP)) { if (p->devs[index].runstatus & WD_STAT_SVCD) { ret = WD_EXPIRED; } else { /* we could as well pretend * we are expired */ ret = WD_FREERUN; } } else { ret = WD_RUNNING; } } } /* determine SERVICED */ if (p->devs[index].runstatus & WD_STAT_SVCD) ret |= WD_SERVICED; return ret; } static irqreturn_t cpwd_interrupt(int irq, void *dev_id) { struct cpwd *p = dev_id; /* Only WD0 will interrupt-- others are NMI and we won't * see them here.... */ spin_lock_irq(&p->lock); cpwd_stoptimer(p, WD0_ID); p->devs[WD0_ID].runstatus |= WD_STAT_SVCD; spin_unlock_irq(&p->lock); return IRQ_HANDLED; } static int cpwd_open(struct inode *inode, struct file *f) { struct cpwd *p = cpwd_device; mutex_lock(&cpwd_mutex); switch (iminor(inode)) { case WD0_MINOR: case WD1_MINOR: case WD2_MINOR: break; default: mutex_unlock(&cpwd_mutex); return -ENODEV; } /* Register IRQ on first open of device */ if (!p->initialized) { if (request_irq(p->irq, &cpwd_interrupt, IRQF_SHARED, DRIVER_NAME, p)) { pr_err("Cannot register IRQ %d\n", p->irq); mutex_unlock(&cpwd_mutex); return -EBUSY; } p->initialized = true; } mutex_unlock(&cpwd_mutex); return nonseekable_open(inode, f); } static int cpwd_release(struct inode *inode, struct file *file) { return 0; } static long cpwd_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { static const struct watchdog_info info = { .options = WDIOF_SETTIMEOUT, .firmware_version = 1, .identity = DRIVER_NAME, }; void __user *argp = (void __user *)arg; struct inode *inode = file_inode(file); int index = iminor(inode) - WD0_MINOR; struct cpwd *p = cpwd_device; int setopt = 0; switch (cmd) { /* Generic Linux IOCTLs */ case WDIOC_GETSUPPORT: if (copy_to_user(argp, &info, sizeof(struct watchdog_info))) return -EFAULT; break; case WDIOC_GETSTATUS: case WDIOC_GETBOOTSTATUS: if (put_user(0, (int __user *)argp)) return -EFAULT; break; case WDIOC_KEEPALIVE: cpwd_pingtimer(p, index); break; case WDIOC_SETOPTIONS: if (copy_from_user(&setopt, argp, sizeof(unsigned int))) return -EFAULT; if (setopt & WDIOS_DISABLECARD) { if (p->enabled) return -EINVAL; cpwd_stoptimer(p, index); } else if (setopt & WDIOS_ENABLECARD) { cpwd_starttimer(p, index); } else { return -EINVAL; } break; /* Solaris-compatible IOCTLs */ case WIOCGSTAT: setopt = cpwd_getstatus(p, index); if (copy_to_user(argp, &setopt, sizeof(unsigned int))) return -EFAULT; break; case WIOCSTART: cpwd_starttimer(p, index); break; case WIOCSTOP: if (p->enabled) return -EINVAL; cpwd_stoptimer(p, index); break; default: return -EINVAL; } return 0; } static long cpwd_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { int rval = -ENOIOCTLCMD; switch (cmd) { /* solaris ioctls are specific to this driver */ case WIOCSTART: case WIOCSTOP: case WIOCGSTAT: mutex_lock(&cpwd_mutex); rval = cpwd_ioctl(file, cmd, arg); mutex_unlock(&cpwd_mutex); break; /* everything else is handled by the generic compat layer */ default: break; } return rval; } static ssize_t cpwd_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { struct inode *inode = file_inode(file); struct cpwd *p = cpwd_device; int index = iminor(inode); if (count) { cpwd_pingtimer(p, index); return 1; } return 0; } static ssize_t cpwd_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos) { return -EINVAL; } static const struct file_operations cpwd_fops = { .owner = THIS_MODULE, .unlocked_ioctl = cpwd_ioctl, .compat_ioctl = cpwd_compat_ioctl, .open = cpwd_open, .write = cpwd_write, .read = cpwd_read, .release = cpwd_release, .llseek = no_llseek, }; static int cpwd_probe(struct platform_device *op) { struct device_node *options; const char *str_prop; const void *prop_val; int i, err = -EINVAL; struct cpwd *p; if (cpwd_device) return -EINVAL; p = kzalloc(sizeof(*p), GFP_KERNEL); err = -ENOMEM; if (!p) { pr_err("Unable to allocate struct cpwd\n"); goto out; } p->irq = op->archdata.irqs[0]; spin_lock_init(&p->lock); p->regs = of_ioremap(&op->resource[0], 0, 4 * WD_TIMER_REGSZ, DRIVER_NAME); if (!p->regs) { pr_err("Unable to map registers\n"); goto out_free; } options = of_find_node_by_path("/options"); err = -ENODEV; if (!options) { pr_err("Unable to find /options node\n"); goto out_iounmap; } prop_val = of_get_property(options, "watchdog-enable?", NULL); p->enabled = (prop_val ? true : false); prop_val = of_get_property(options, "watchdog-reboot?", NULL); p->reboot = (prop_val ? true : false); str_prop = of_get_property(options, "watchdog-timeout", NULL); if (str_prop) p->timeout = simple_strtoul(str_prop, NULL, 10); /* CP1400s seem to have broken PLD implementations-- the * interrupt_mask register cannot be written, so no timer * interrupts can be masked within the PLD. */ str_prop = of_get_property(op->dev.of_node, "model", NULL); p->broken = (str_prop && !strcmp(str_prop, WD_BADMODEL)); if (!p->enabled) cpwd_toggleintr(p, -1, WD_INTR_OFF); for (i = 0; i < WD_NUMDEVS; i++) { static const char *cpwd_names[] = { "RIC", "XIR", "POR" }; static int *parms[] = { &wd0_timeout, &wd1_timeout, &wd2_timeout }; struct miscdevice *mp = &p->devs[i].misc; mp->minor = WD0_MINOR + i; mp->name = cpwd_names[i]; mp->fops = &cpwd_fops; p->devs[i].regs = p->regs + (i * WD_TIMER_REGSZ); p->devs[i].intr_mask = (WD0_INTR_MASK << i); p->devs[i].runstatus &= ~WD_STAT_BSTOP; p->devs[i].runstatus |= WD_STAT_INIT; p->devs[i].timeout = p->timeout; if (*parms[i]) p->devs[i].timeout = *parms[i]; err = misc_register(&p->devs[i].misc); if (err) { pr_err("Could not register misc device for dev %d\n", i); goto out_unregister; } } if (p->broken) { setup_timer(&cpwd_timer, cpwd_brokentimer, (unsigned long)p); cpwd_timer.expires = WD_BTIMEOUT; pr_info("PLD defect workaround enabled for model %s\n", WD_BADMODEL); } platform_set_drvdata(op, p); cpwd_device = p; err = 0; out: return err; out_unregister: for (i--; i >= 0; i--) misc_deregister(&p->devs[i].misc); out_iounmap: of_iounmap(&op->resource[0], p->regs, 4 * WD_TIMER_REGSZ); out_free: kfree(p); goto out; } static int cpwd_remove(struct platform_device *op) { struct cpwd *p = platform_get_drvdata(op); int i; for (i = 0; i < WD_NUMDEVS; i++) { misc_deregister(&p->devs[i].misc); if (!p->enabled) { cpwd_stoptimer(p, i); if (p->devs[i].runstatus & WD_STAT_BSTOP) cpwd_resetbrokentimer(p, i); } } if (p->broken) del_timer_sync(&cpwd_timer); if (p->initialized) free_irq(p->irq, p); of_iounmap(&op->resource[0], p->regs, 4 * WD_TIMER_REGSZ); kfree(p); cpwd_device = NULL; return 0; } static const struct of_device_id cpwd_match[] = { { .name = "watchdog", }, {}, }; MODULE_DEVICE_TABLE(of, cpwd_match); static struct platform_driver cpwd_driver = { .driver = { .name = DRIVER_NAME, .of_match_table = cpwd_match, }, .probe = cpwd_probe, .remove = cpwd_remove, }; module_platform_driver(cpwd_driver);
gpl-2.0
agat63/AGAT_R970_USC
drivers/video/msm/mdp4_video_enhance.c
178
19254
/* Copyright (c) 2009-2011, Code Aurora Forum. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. * */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/clk.h> #include <linux/mutex.h> #include <linux/poll.h> #include <linux/wait.h> #include <linux/fs.h> #include <linux/irq.h> #include <linux/mm.h> #include <linux/fb.h> #include <linux/ctype.h> #include <linux/miscdevice.h> #include <linux/dma-mapping.h> #include <linux/delay.h> #include <linux/device.h> #include <linux/fb.h> #include <linux/msm_mdp.h> #include <linux/ioctl.h> #include "mdp4_video_enhance.h" #include "mdp4_video_tuning.h" #include "msm_fb.h" #include "mdp.h" #include "mdp4.h" #define MDP4_VIDEO_ENHANCE_TUNING #define VIDEO_ENHANCE_DEBUG #ifdef VIDEO_ENHANCE_DEBUG #define DPRINT(x...) printk(KERN_ERR "mdnie " x) #else #define DPRINT(x...) #endif #define MAX_LUT_SIZE 256 unsigned int mDNIe_data[MAX_LUT_SIZE * 3]; int play_speed_1_5; int mDNIe_data_sharpness; enum Lcd_mDNIe_UI { mDNIe_UI_MODE, mDNIe_VIDEO_MODE, mDNIe_CAMERA_MODE, mDNIe_NAVI, mDNIe_GALLERY, mDNIe_BYPASS, mDNIe_DMB_MODE, #ifdef BROWSER_COLOR_TONE_SET mDNIe_BROWSER_TONE1 = 40, mDNIe_BROWSER_TONE2, mDNIe_BROWSER_TONE3, #endif }; enum Lcd_mDNIe_User_Set { mDNIe_DYNAMIC, mDNIe_STANDARD, mDNIe_MOVIE, }; enum Lcd_mDNIe_Negative { mDNIe_NEGATIVE_OFF = 0, mDNIe_NEGATIVE_ON, }; static struct class *mdnie_class; struct device *tune_mdnie_dev; enum Lcd_mDNIe_UI current_mDNIe_Mode = mDNIe_UI_MODE; enum Lcd_mDNIe_Negative current_Negative_Mode = mDNIe_NEGATIVE_OFF; static bool g_mdine_enable ; #ifdef MDP4_VIDEO_ENHANCE_TUNING #define MAX_FILE_NAME 128 #define TUNING_FILE_PATH "/sdcard/tuning/" static int tuning_enable ; static char tuning_filename[MAX_FILE_NAME]; static int load_tuning_data(char *filename); static int parse_text(char *src, int len); #endif #ifdef MDP4_VIDEO_ENHANCE_TUNING static int parse_text(char *src, int len) { int i, count, ret; int index = 0; int j = 0; char *str_line[300]; char *sstart; char *c; unsigned int data1, data2, data3; int sharpvalue; c = src; count = 0; sstart = c; sharpvalue = 0; for (i = 0; i < len; i++, c++) { char a = *c; if (a == '\r' || a == '\n') { if (c > sstart) { str_line[count] = sstart; count++; } *c = '\0'; sstart = c + 1; } } if (c > sstart) { str_line[count] = sstart; count++; } ret = sscanf(str_line[0], "%d\n", &sharpvalue); DPRINT("sharp ret %d, sharpvalue %d\n", ret, sharpvalue); mDNIe_data_sharpness = sharpvalue; for (i = 1; i < count; i++) { DPRINT("line:%d, [start]%s[end]\n", i, str_line[i]); ret = sscanf(str_line[i], "0x%x, 0x%x, 0x%x\n", &data1, &data2, &data3); DPRINT("Result => [0x%2x 0x%2x 0x%2x] %s\n", data1, data2, data3, (ret == 3) ? "Ok" : "Not available"); DPRINT("ret => %d\n", ret); if (ret == 3) { mDNIe_data[j++] = data1; mDNIe_data[j++] = data2; mDNIe_data[j++] = data3; index++; } } return index; } static int load_tuning_data(char *filename) { struct file *filp; char *dp; long l; loff_t pos; int ret, num; mm_segment_t fs; DPRINT("[CMC623:INFO]:%s called loading file name : [%s]\n", __func__, filename); fs = get_fs(); set_fs(get_ds()); filp = filp_open(filename, O_RDONLY, 0); if (IS_ERR(filp)) { printk(KERN_ERR "[CMC623:ERROR]:File open failed\n"); return -1; } l = filp->f_path.dentry->d_inode->i_size; DPRINT("[CMC623:INFO]: Loading File Size : %ld(bytes)", l); dp = kmalloc(l + 10, GFP_KERNEL); if (dp == NULL) { DPRINT ("[CMC623:ERROR]:Can't not alloc memory"\ "for tuning file load\n"); filp_close(filp, current->files); return -1; } pos = 0; memset(dp, 0, l); DPRINT("[CMC623:INFO] : before vfs_read()\n"); ret = vfs_read(filp, (char __user *)dp, l, &pos); DPRINT("[CMC623:INFO] : after vfs_read()\n"); if (ret != l) { DPRINT("[CMC623:ERROR] : vfs_read() filed ret : %d\n", ret); kfree(dp); filp_close(filp, current->files); return -1; } filp_close(filp, current->files); set_fs(fs); num = parse_text(dp, l); if (!num) { DPRINT("[CMC623:ERROR]:Nothing to parse\n"); kfree(dp); return -1; } DPRINT("[CMC623:INFO] : Loading Tuning Value's Count : %d", num); lut_tune(num, mDNIe_data); sharpness_tune(mDNIe_data_sharpness); kfree(dp); return num; } static ssize_t tuning_show(struct device *dev, struct device_attribute *attr, char *buf) { int ret = 0; ret = sprintf(buf, "Tunned File Name : %s\n", tuning_filename); return ret; } static ssize_t tuning_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { char *pt; memset(tuning_filename, 0, sizeof(tuning_filename)); sprintf(tuning_filename, "%s%s", TUNING_FILE_PATH, buf); pt = tuning_filename; while (*pt) { if (*pt == '\r' || *pt == '\n') { *pt = 0; break; } pt++; } DPRINT("[CMC623:INFO]:%s:%s\n", __func__, tuning_filename); if (load_tuning_data(tuning_filename) <= 0) { DPRINT("[CMC623:ERROR]:load_tunig_data() failed\n"); return size; } tuning_enable = 1; return size; } static DEVICE_ATTR(tuning, 0664, tuning_show, tuning_store); #endif void free_cmap(struct fb_cmap *cmap) { kfree(cmap->red); kfree(cmap->green); kfree(cmap->blue); } void lut_tune(int num, unsigned int *pLutTable) { __u16 *r, *g, *b, i; int j; struct fb_info *info; struct fb_cmap test_cmap; struct fb_cmap *cmap; struct msm_fb_data_type *mfd; uint32_t out; /*for final assignment*/ u16 r_1, g_1, b_1; info = registered_fb[0]; cmap = &test_cmap; /*===================================== * cmap allocation =====================================*/ cmap->red = 0; cmap->green = 0; cmap->blue = 0; cmap->transp = 0; cmap->start = 0; cmap->len = num; /*MAX_LUT_SIZE;//LUT has 256 entries*/ cmap->red = kmalloc(cmap->len * sizeof(__u16), GFP_KERNEL); if (!cmap->red) { printk(KERN_ERR "can't malloc cmap!"); goto fail_rest; } cmap->green = kmalloc(cmap->len * sizeof(__u16), GFP_KERNEL); if (!cmap->green) { printk(KERN_ERR "can't malloc cmap!"); goto fail_rest; } cmap->blue = kmalloc(cmap->len * sizeof(__u16), GFP_KERNEL); if (!cmap->blue) { printk(KERN_ERR "can't malloc cmap!"); goto fail_rest; } r = cmap->red; g = cmap->green; b = cmap->blue; j = 0; DPRINT("cmap->len %d\n", cmap->len); /* Assigning the cmap */ for (i = 0; i < cmap->len; i++) { *r++ = pLutTable[j++]; *g++ = pLutTable[j++]; *b++ = pLutTable[j++]; } /*instead of an ioctl */ mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE); j = 0; for (i = 0; i < cmap->len; i++) { r_1 = pLutTable[j++]; g_1 = pLutTable[j++]; b_1 = pLutTable[j++]; #ifdef CONFIG_FB_MSM_MDP40 MDP_OUTP(MDP_BASE + 0x94800 + #else MDP_OUTP(MDP_BASE + 0x93800 + #endif (0x400 * mdp_lut_i) + cmap->start * 4 + i * 4, ((g_1 & 0xff) | ((b_1 & 0xff) << 8) | ((r_1 & 0xff) << 16))); } mfd = (struct msm_fb_data_type *) registered_fb[0]->par; if (mfd->panel.type == MIPI_CMD_PANEL) { mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE); mutex_lock(&mdp_lut_push_sem); mdp_lut_push = 1; mdp_lut_push_i = mdp_lut_i; mutex_unlock(&mdp_lut_push_sem); } else { /*mask off non LUT select bits*/ out = inpdw(MDP_BASE + 0x90070) & ~((0x1 << 10) | 0x7); MDP_OUTP(MDP_BASE + 0x90070, (mdp_lut_i << 10) | 0x7 | out); mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE); } mdp_lut_i = (mdp_lut_i + 1)%2; fail_rest: free_cmap(cmap); /*close(fb);*/ } void sharpness_tune(int num) { char *vg_base; pr_info("%s num : %d", __func__, num); mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE); vg_base = MDP_BASE + MDP4_VIDEO_BASE; outpdw(vg_base + 0x8200, mdp4_ss_table_value((int8_t) num, 0)); outpdw(vg_base + 0x8204, mdp4_ss_table_value((int8_t) num, 1)); mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE); } int s3c_mdnie_start() { g_mdine_enable = 1; return 0; } int s3c_mdnie_off() { g_mdine_enable = 0; return 0; } void mDNIe_Set_Mode(enum Lcd_mDNIe_UI mode) { unsigned int *pLut; int sharpvalue = 0; static int isSetDMBMode; DPRINT("[mdnie set] mDNIe_Set_Mode\n"); if (!g_mdine_enable) { printk(KERN_ERR "[mDNIE WARNING] mDNIE engine is OFF. So you cannot set mDnie Mode correctly.\n"); return; } play_speed_1_5 = 0; switch (mode) { case mDNIe_UI_MODE: if (isSetDMBMode == 1) { mdp4_vg_qseed_init_VideoPlay(0); /*mdp4_vg_qseed_init_VideoPlay(1);*/ isSetDMBMode = 0; } pLut = UI_LUT; sharpvalue = SHARPNESS_BYPASS; break; case mDNIe_VIDEO_MODE: /*case mDNIe_VIDEO_WARM_MODE:*/ /*case mDNIe_VIDEO_COLD_MODE:*/ if (isSetDMBMode == 1) { mdp4_vg_qseed_init_VideoPlay(0); /*mdp4_vg_qseed_init_VideoPlay(1);*/ isSetDMBMode = 0; } pLut = VIDEO_LUT; sharpvalue = SHARPNESS_VIDEO; break; case mDNIe_CAMERA_MODE: pLut = BYPASS_LUT; sharpvalue = SHARPNESS_BYPASS; break; case mDNIe_NAVI: pLut = BYPASS_LUT; sharpvalue = SHARPNESS_BYPASS; break; case mDNIe_GALLERY: pLut = GALLERY_LUT; sharpvalue = SHARPNESS_BYPASS; break; case mDNIe_BYPASS: pLut = BYPASS_LUT; sharpvalue = SHARPNESS_BYPASS; break; case mDNIe_DMB_MODE: /*warm, clod not distinguish*/ if (isSetDMBMode == 0) { mdp4_vg_qseed_init_DMB(0); /*mdp4_vg_qseed_init_DMB(1);*/ isSetDMBMode = 1; } pLut = DMB_LUT; sharpvalue = SHARPNESS_DMB; break; #ifdef BROWSER_COLOR_TONE_SET case mDNIe_BROWSER_TONE1: pLut = BROWSER_TONE1_LUT; sharpvalue = SHARPNESS_BYPASS; break; case mDNIe_BROWSER_TONE2: pLut = BROWSER_TONE2_LUT; sharpvalue = SHARPNESS_BYPASS; break; case mDNIe_BROWSER_TONE3: pLut = BROWSER_TONE3_LUT; sharpvalue = SHARPNESS_BYPASS; break; #endif default: pLut = BYPASS_LUT; sharpvalue = SHARPNESS_BYPASS; break; } lut_tune(MAX_LUT_SIZE, pLut); sharpness_tune(sharpvalue); current_mDNIe_Mode = mode; #ifdef CONFIG_FB_S3C_MDNIE_TUNINGMODE_FOR_BACKLIGHT pre_val = -1; #endif /* CONFIG_FB_S3C_MDNIE_TUNINGMODE_FOR_BACKLIGHT */ DPRINT("[mDNIe] mDNIe_Set_Mode : Current_mDNIe_mode (%d)\n", current_mDNIe_Mode); DPRINT("[mDNIe] Sharpness value : (%d)\n", sharpvalue); } void mDNIe_set_negative(enum Lcd_mDNIe_Negative negative) { unsigned int *pLut; int sharpvalue = 0; if (negative == 0) { DPRINT("[mdnie set] mDNIe_Set_mDNIe_Mode = %d\n", current_mDNIe_Mode); mDNIe_Set_Mode(current_mDNIe_Mode); return; } else { DPRINT("[mdnie set] mDNIe_Set_Negative = %d\n", negative); pLut = NEGATIVE_LUT; sharpvalue = SHARPNESS_NEGATIVE; lut_tune(MAX_LUT_SIZE, pLut); sharpness_tune(sharpvalue); } DPRINT("[mdnie set] mDNIe_Set_Negative END\n"); } int is_negativeMode_on(void) { pr_info("is negative Mode On = %d\n", current_Negative_Mode); if (current_Negative_Mode) mDNIe_set_negative(current_Negative_Mode); else return 0; return 1; } void is_play_speed_1_5(int enable) { play_speed_1_5 = enable; } static ssize_t scenario_show(struct device *dev, struct device_attribute *attr, char *buf) { int mdnie_ui = 0; DPRINT("called %s\n", __func__); switch (current_mDNIe_Mode) { case mDNIe_UI_MODE: default: mdnie_ui = 0; break; case mDNIe_VIDEO_MODE: mdnie_ui = 1; break; case mDNIe_CAMERA_MODE: mdnie_ui = 2; break; case mDNIe_NAVI: mdnie_ui = 3; break; case mDNIe_GALLERY: mdnie_ui = 4; break; case mDNIe_BYPASS: mdnie_ui = 5; break; #if defined(CONFIG_TDMB) || defined(CONFIG_TDMB_MODULE) case mDNIe_DMB_MODE: mdnie_ui = mDNIe_DMB_MODE; break; #endif #ifdef BROWSER_COLOR_TONE_SET case mDNIe_BROWSER_TONE1: mdnie_ui = mDNIe_BROWSER_TONE1; break; case mDNIe_BROWSER_TONE2: mdnie_ui = mDNIe_BROWSER_TONE2; break; case mDNIe_BROWSER_TONE3: mdnie_ui = mDNIe_BROWSER_TONE3; break; #endif } return sprintf(buf, "%u\n", mdnie_ui); } static ssize_t scenario_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { int value; sscanf(buf, "%d", &value); switch (value) { case SIG_MDNIE_UI_MODE: current_mDNIe_Mode = mDNIe_UI_MODE; break; case SIG_MDNIE_VIDEO_MODE: current_mDNIe_Mode = mDNIe_VIDEO_MODE; break; case SIG_MDNIE_CAMERA_MODE: current_mDNIe_Mode = mDNIe_CAMERA_MODE; break; case SIG_MDNIE_NAVI: current_mDNIe_Mode = mDNIe_NAVI; break; case SIG_MDNIE_GALLERY: current_mDNIe_Mode = mDNIe_GALLERY; break; case SIG_MDNIE_BYPASS: current_mDNIe_Mode = mDNIe_BYPASS; break; case SIG_MDNIE_DMB_MODE: current_mDNIe_Mode = mDNIe_DMB_MODE; break; #ifdef BROWSER_COLOR_TONE_SET case SIG_MDNIE_BROWSER_TONE1: current_mDNIe_Mode = mDNIe_BROWSER_TONE1; break; case SIG_MDNIE_BROWSER_TONE2: current_mDNIe_Mode = mDNIe_BROWSER_TONE2; break; case SIG_MDNIE_BROWSER_TONE3: current_mDNIe_Mode = mDNIe_BROWSER_TONE3; break; #endif default: printk(KERN_ERR "scenario_store value is wrong : value(%d)\n", value); break; } if (current_Negative_Mode) { DPRINT("[mdnie set] already negative mode = %d\n", current_Negative_Mode); } else { DPRINT("[mdnie set] in scenario_store, input value = %d\n", value); mDNIe_Set_Mode(current_mDNIe_Mode); } return size; } static DEVICE_ATTR(scenario, 0664, scenario_show, scenario_store); static ssize_t mdnieset_user_select_file_cmd_show(struct device *dev, struct device_attribute *attr, char *buf) { int mdnie_ui = 0; DPRINT("called %s\n", __func__); return sprintf(buf, "%u\n", mdnie_ui); } static ssize_t mdnieset_user_select_file_cmd_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { int value; sscanf(buf, "%d", &value); DPRINT ("[mdnie set]inmdnieset_user_select_file_cmd_store, input value = %d\n", value); return size; } static DEVICE_ATTR(mdnieset_user_select_file_cmd, 0664, mdnieset_user_select_file_cmd_show, mdnieset_user_select_file_cmd_store); static ssize_t mdnieset_init_file_cmd_show(struct device *dev, struct device_attribute *attr, char *buf) { char temp[] = "mdnieset_init_file_cmd_show\n\0"; DPRINT("called %s\n", __func__); strcat(buf, temp); return strlen(buf); } static ssize_t mdnieset_init_file_cmd_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { int value; sscanf(buf, "%d", &value); DPRINT("mdnieset_init_file_cmd_store : value(%d)\n", value); switch (value) { case 0: current_mDNIe_Mode = mDNIe_UI_MODE; break; default: printk(KERN_ERR "mdnieset_init_file_cmd_store value is wrong : value(%d)\n", value); break; } mDNIe_Set_Mode(current_mDNIe_Mode); return size; } static DEVICE_ATTR(mdnieset_init_file_cmd, 0664, mdnieset_init_file_cmd_show, mdnieset_init_file_cmd_store); static ssize_t outdoor_show(struct device *dev, struct device_attribute *attr, char *buf) { DPRINT("called %s\n", __func__); return sprintf(buf, "0\n"); } static ssize_t outdoor_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { int value; sscanf(buf, "%d", &value); DPRINT ("[mdnie set]inmdnieset_outdoor_file_cmd_store, input value = %d\n", value); return size; } static DEVICE_ATTR(outdoor, 0664, outdoor_show, outdoor_store); static ssize_t negative_show(struct device *dev, struct device_attribute *attr, char *buf) { DPRINT("called %s\n", __func__); return sprintf(buf, "0\n"); } static ssize_t negative_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { int value; sscanf(buf, "%d", &value); DPRINT ("[mdnie set]negative_store, input value = %d\n", value); pr_info("[negative] value = %d\n", value); if (value == 0) current_Negative_Mode = mDNIe_NEGATIVE_OFF; else current_Negative_Mode = mDNIe_NEGATIVE_ON; mDNIe_set_negative(current_Negative_Mode); return size; } static DEVICE_ATTR(negative, 0664, negative_show, negative_store); static ssize_t playspeed_show(struct device *dev, struct device_attribute *attr, char *buf) { DPRINT("called %s\n", __func__); return sprintf(buf, "%d\n", play_speed_1_5); } static ssize_t playspeed_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { int value; sscanf(buf, "%d", &value); DPRINT("[Play Speed Set]play speed value = %d\n", value); is_play_speed_1_5(value); return size; } static DEVICE_ATTR(playspeed, 0664, playspeed_show, playspeed_store); void init_mdnie_class(void) { mdnie_class = class_create(THIS_MODULE, "mdnie"); if (IS_ERR(mdnie_class)) pr_err("Failed to create class(mdnie)!\n"); tune_mdnie_dev = device_create(mdnie_class, NULL, 0, NULL, "mdnie"); if (IS_ERR(tune_mdnie_dev)) pr_err("Failed to create device(mdnie)!\n"); if (device_create_file (tune_mdnie_dev, &dev_attr_scenario) < 0) pr_err("Failed to create device file(%s)!\n", dev_attr_scenario.attr.name); if (device_create_file (tune_mdnie_dev, &dev_attr_mdnieset_user_select_file_cmd) < 0) pr_err("Failed to create device file(%s)!\n", dev_attr_mdnieset_user_select_file_cmd.attr.name); if (device_create_file (tune_mdnie_dev, &dev_attr_mdnieset_init_file_cmd) < 0) pr_err("Failed to create device file(%s)!\n", dev_attr_mdnieset_init_file_cmd.attr.name); /* mdnieset_outdoor_class = class_create(THIS_MODULE, "mdnieset_outdoor"); if (IS_ERR(mdnieset_outdoor_class)) pr_err("Failed to create class( mdnieset_outdoor_class)!\n"); switch_mdnieset_outdoor_dev = device_create(mdnieset_outdoor_class, NULL, 0, NULL, "outdoor"); if (IS_ERR(switch_mdnieset_outdoor_dev)) pr_err ("Failed to create device( switch_mdnieset_outdoor_dev)!\n"); */ if (device_create_file (tune_mdnie_dev, &dev_attr_outdoor) < 0) pr_err("Failed to create device file(%s)!\n", dev_attr_outdoor.attr.name); if (device_create_file (tune_mdnie_dev, &dev_attr_negative) < 0) pr_err("Failed to create device file(%s)!\n", dev_attr_negative.attr.name); if (device_create_file (tune_mdnie_dev, &dev_attr_playspeed) < 0) pr_err("Failed to create device file(%s)!=n", dev_attr_playspeed.attr.name); #ifdef MDP4_VIDEO_ENHANCE_TUNING if (device_create_file(tune_mdnie_dev, &dev_attr_tuning) < 0) { pr_err("Failed to create device file(%s)!\n", dev_attr_tuning.attr.name); } #endif s3c_mdnie_start(); sharpness_tune(0); }
gpl-2.0
agayev/linux
arch/alpha/kernel/osf_sys.c
434
35221
/* * linux/arch/alpha/kernel/osf_sys.c * * Copyright (C) 1995 Linus Torvalds */ /* * This file handles some of the stranger OSF/1 system call interfaces. * Some of the system calls expect a non-C calling standard, others have * special parameter blocks.. */ #include <linux/errno.h> #include <linux/sched.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/smp.h> #include <linux/stddef.h> #include <linux/syscalls.h> #include <linux/unistd.h> #include <linux/ptrace.h> #include <linux/user.h> #include <linux/utsname.h> #include <linux/time.h> #include <linux/timex.h> #include <linux/major.h> #include <linux/stat.h> #include <linux/mman.h> #include <linux/shm.h> #include <linux/poll.h> #include <linux/file.h> #include <linux/types.h> #include <linux/ipc.h> #include <linux/namei.h> #include <linux/uio.h> #include <linux/vfs.h> #include <linux/rcupdate.h> #include <linux/slab.h> #include <asm/fpu.h> #include <asm/io.h> #include <asm/uaccess.h> #include <asm/sysinfo.h> #include <asm/thread_info.h> #include <asm/hwrpb.h> #include <asm/processor.h> /* * Brk needs to return an error. Still support Linux's brk(0) query idiom, * which OSF programs just shouldn't be doing. We're still not quite * identical to OSF as we don't return 0 on success, but doing otherwise * would require changes to libc. Hopefully this is good enough. */ SYSCALL_DEFINE1(osf_brk, unsigned long, brk) { unsigned long retval = sys_brk(brk); if (brk && brk != retval) retval = -ENOMEM; return retval; } /* * This is pure guess-work.. */ SYSCALL_DEFINE4(osf_set_program_attributes, unsigned long, text_start, unsigned long, text_len, unsigned long, bss_start, unsigned long, bss_len) { struct mm_struct *mm; mm = current->mm; mm->end_code = bss_start + bss_len; mm->start_brk = bss_start + bss_len; mm->brk = bss_start + bss_len; #if 0 printk("set_program_attributes(%lx %lx %lx %lx)\n", text_start, text_len, bss_start, bss_len); #endif return 0; } /* * OSF/1 directory handling functions... * * The "getdents()" interface is much more sane: the "basep" stuff is * braindamage (it can't really handle filesystems where the directory * offset differences aren't the same as "d_reclen"). */ #define NAME_OFFSET offsetof (struct osf_dirent, d_name) struct osf_dirent { unsigned int d_ino; unsigned short d_reclen; unsigned short d_namlen; char d_name[1]; }; struct osf_dirent_callback { struct dir_context ctx; struct osf_dirent __user *dirent; long __user *basep; unsigned int count; int error; }; static int osf_filldir(struct dir_context *ctx, const char *name, int namlen, loff_t offset, u64 ino, unsigned int d_type) { struct osf_dirent __user *dirent; struct osf_dirent_callback *buf = container_of(ctx, struct osf_dirent_callback, ctx); unsigned int reclen = ALIGN(NAME_OFFSET + namlen + 1, sizeof(u32)); unsigned int d_ino; buf->error = -EINVAL; /* only used if we fail */ if (reclen > buf->count) return -EINVAL; d_ino = ino; if (sizeof(d_ino) < sizeof(ino) && d_ino != ino) { buf->error = -EOVERFLOW; return -EOVERFLOW; } if (buf->basep) { if (put_user(offset, buf->basep)) goto Efault; buf->basep = NULL; } dirent = buf->dirent; if (put_user(d_ino, &dirent->d_ino) || put_user(namlen, &dirent->d_namlen) || put_user(reclen, &dirent->d_reclen) || copy_to_user(dirent->d_name, name, namlen) || put_user(0, dirent->d_name + namlen)) goto Efault; dirent = (void __user *)dirent + reclen; buf->dirent = dirent; buf->count -= reclen; return 0; Efault: buf->error = -EFAULT; return -EFAULT; } SYSCALL_DEFINE4(osf_getdirentries, unsigned int, fd, struct osf_dirent __user *, dirent, unsigned int, count, long __user *, basep) { int error; struct fd arg = fdget(fd); struct osf_dirent_callback buf = { .ctx.actor = osf_filldir, .dirent = dirent, .basep = basep, .count = count }; if (!arg.file) return -EBADF; error = iterate_dir(arg.file, &buf.ctx); if (error >= 0) error = buf.error; if (count != buf.count) error = count - buf.count; fdput(arg); return error; } #undef NAME_OFFSET SYSCALL_DEFINE6(osf_mmap, unsigned long, addr, unsigned long, len, unsigned long, prot, unsigned long, flags, unsigned long, fd, unsigned long, off) { unsigned long ret = -EINVAL; #if 0 if (flags & (_MAP_HASSEMAPHORE | _MAP_INHERIT | _MAP_UNALIGNED)) printk("%s: unimplemented OSF mmap flags %04lx\n", current->comm, flags); #endif if ((off + PAGE_ALIGN(len)) < off) goto out; if (off & ~PAGE_MASK) goto out; ret = sys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT); out: return ret; } struct osf_stat { int st_dev; int st_pad1; unsigned st_mode; unsigned short st_nlink; short st_nlink_reserved; unsigned st_uid; unsigned st_gid; int st_rdev; int st_ldev; long st_size; int st_pad2; int st_uatime; int st_pad3; int st_umtime; int st_pad4; int st_uctime; int st_pad5; int st_pad6; unsigned st_flags; unsigned st_gen; long st_spare[4]; unsigned st_ino; int st_ino_reserved; int st_atime; int st_atime_reserved; int st_mtime; int st_mtime_reserved; int st_ctime; int st_ctime_reserved; long st_blksize; long st_blocks; }; /* * The OSF/1 statfs structure is much larger, but this should * match the beginning, at least. */ struct osf_statfs { short f_type; short f_flags; int f_fsize; int f_bsize; int f_blocks; int f_bfree; int f_bavail; int f_files; int f_ffree; __kernel_fsid_t f_fsid; }; struct osf_statfs64 { short f_type; short f_flags; int f_pad1; int f_pad2; int f_pad3; int f_pad4; int f_pad5; int f_pad6; int f_pad7; __kernel_fsid_t f_fsid; u_short f_namemax; short f_reserved1; int f_spare[8]; char f_pad8[90]; char f_pad9[90]; long mount_info[10]; u_long f_flags2; long f_spare2[14]; long f_fsize; long f_bsize; long f_blocks; long f_bfree; long f_bavail; long f_files; long f_ffree; }; static int linux_to_osf_stat(struct kstat *lstat, struct osf_stat __user *osf_stat) { struct osf_stat tmp = { 0 }; tmp.st_dev = lstat->dev; tmp.st_mode = lstat->mode; tmp.st_nlink = lstat->nlink; tmp.st_uid = from_kuid_munged(current_user_ns(), lstat->uid); tmp.st_gid = from_kgid_munged(current_user_ns(), lstat->gid); tmp.st_rdev = lstat->rdev; tmp.st_ldev = lstat->rdev; tmp.st_size = lstat->size; tmp.st_uatime = lstat->atime.tv_nsec / 1000; tmp.st_umtime = lstat->mtime.tv_nsec / 1000; tmp.st_uctime = lstat->ctime.tv_nsec / 1000; tmp.st_ino = lstat->ino; tmp.st_atime = lstat->atime.tv_sec; tmp.st_mtime = lstat->mtime.tv_sec; tmp.st_ctime = lstat->ctime.tv_sec; tmp.st_blksize = lstat->blksize; tmp.st_blocks = lstat->blocks; return copy_to_user(osf_stat, &tmp, sizeof(tmp)) ? -EFAULT : 0; } static int linux_to_osf_statfs(struct kstatfs *linux_stat, struct osf_statfs __user *osf_stat, unsigned long bufsiz) { struct osf_statfs tmp_stat; tmp_stat.f_type = linux_stat->f_type; tmp_stat.f_flags = 0; /* mount flags */ tmp_stat.f_fsize = linux_stat->f_frsize; tmp_stat.f_bsize = linux_stat->f_bsize; tmp_stat.f_blocks = linux_stat->f_blocks; tmp_stat.f_bfree = linux_stat->f_bfree; tmp_stat.f_bavail = linux_stat->f_bavail; tmp_stat.f_files = linux_stat->f_files; tmp_stat.f_ffree = linux_stat->f_ffree; tmp_stat.f_fsid = linux_stat->f_fsid; if (bufsiz > sizeof(tmp_stat)) bufsiz = sizeof(tmp_stat); return copy_to_user(osf_stat, &tmp_stat, bufsiz) ? -EFAULT : 0; } static int linux_to_osf_statfs64(struct kstatfs *linux_stat, struct osf_statfs64 __user *osf_stat, unsigned long bufsiz) { struct osf_statfs64 tmp_stat = { 0 }; tmp_stat.f_type = linux_stat->f_type; tmp_stat.f_fsize = linux_stat->f_frsize; tmp_stat.f_bsize = linux_stat->f_bsize; tmp_stat.f_blocks = linux_stat->f_blocks; tmp_stat.f_bfree = linux_stat->f_bfree; tmp_stat.f_bavail = linux_stat->f_bavail; tmp_stat.f_files = linux_stat->f_files; tmp_stat.f_ffree = linux_stat->f_ffree; tmp_stat.f_fsid = linux_stat->f_fsid; if (bufsiz > sizeof(tmp_stat)) bufsiz = sizeof(tmp_stat); return copy_to_user(osf_stat, &tmp_stat, bufsiz) ? -EFAULT : 0; } SYSCALL_DEFINE3(osf_statfs, const char __user *, pathname, struct osf_statfs __user *, buffer, unsigned long, bufsiz) { struct kstatfs linux_stat; int error = user_statfs(pathname, &linux_stat); if (!error) error = linux_to_osf_statfs(&linux_stat, buffer, bufsiz); return error; } SYSCALL_DEFINE2(osf_stat, char __user *, name, struct osf_stat __user *, buf) { struct kstat stat; int error; error = vfs_stat(name, &stat); if (error) return error; return linux_to_osf_stat(&stat, buf); } SYSCALL_DEFINE2(osf_lstat, char __user *, name, struct osf_stat __user *, buf) { struct kstat stat; int error; error = vfs_lstat(name, &stat); if (error) return error; return linux_to_osf_stat(&stat, buf); } SYSCALL_DEFINE2(osf_fstat, int, fd, struct osf_stat __user *, buf) { struct kstat stat; int error; error = vfs_fstat(fd, &stat); if (error) return error; return linux_to_osf_stat(&stat, buf); } SYSCALL_DEFINE3(osf_fstatfs, unsigned long, fd, struct osf_statfs __user *, buffer, unsigned long, bufsiz) { struct kstatfs linux_stat; int error = fd_statfs(fd, &linux_stat); if (!error) error = linux_to_osf_statfs(&linux_stat, buffer, bufsiz); return error; } SYSCALL_DEFINE3(osf_statfs64, char __user *, pathname, struct osf_statfs64 __user *, buffer, unsigned long, bufsiz) { struct kstatfs linux_stat; int error = user_statfs(pathname, &linux_stat); if (!error) error = linux_to_osf_statfs64(&linux_stat, buffer, bufsiz); return error; } SYSCALL_DEFINE3(osf_fstatfs64, unsigned long, fd, struct osf_statfs64 __user *, buffer, unsigned long, bufsiz) { struct kstatfs linux_stat; int error = fd_statfs(fd, &linux_stat); if (!error) error = linux_to_osf_statfs64(&linux_stat, buffer, bufsiz); return error; } /* * Uhh.. OSF/1 mount parameters aren't exactly obvious.. * * Although to be frank, neither are the native Linux/i386 ones.. */ struct ufs_args { char __user *devname; int flags; uid_t exroot; }; struct cdfs_args { char __user *devname; int flags; uid_t exroot; /* This has lots more here, which Linux handles with the option block but I'm too lazy to do the translation into ASCII. */ }; struct procfs_args { char __user *devname; int flags; uid_t exroot; }; /* * We can't actually handle ufs yet, so we translate UFS mounts to * ext2fs mounts. I wouldn't mind a UFS filesystem, but the UFS * layout is so braindead it's a major headache doing it. * * Just how long ago was it written? OTOH our UFS driver may be still * unhappy with OSF UFS. [CHECKME] */ static int osf_ufs_mount(const char __user *dirname, struct ufs_args __user *args, int flags) { int retval; struct cdfs_args tmp; struct filename *devname; retval = -EFAULT; if (copy_from_user(&tmp, args, sizeof(tmp))) goto out; devname = getname(tmp.devname); retval = PTR_ERR(devname); if (IS_ERR(devname)) goto out; retval = do_mount(devname->name, dirname, "ext2", flags, NULL); putname(devname); out: return retval; } static int osf_cdfs_mount(const char __user *dirname, struct cdfs_args __user *args, int flags) { int retval; struct cdfs_args tmp; struct filename *devname; retval = -EFAULT; if (copy_from_user(&tmp, args, sizeof(tmp))) goto out; devname = getname(tmp.devname); retval = PTR_ERR(devname); if (IS_ERR(devname)) goto out; retval = do_mount(devname->name, dirname, "iso9660", flags, NULL); putname(devname); out: return retval; } static int osf_procfs_mount(const char __user *dirname, struct procfs_args __user *args, int flags) { struct procfs_args tmp; if (copy_from_user(&tmp, args, sizeof(tmp))) return -EFAULT; return do_mount("", dirname, "proc", flags, NULL); } SYSCALL_DEFINE4(osf_mount, unsigned long, typenr, const char __user *, path, int, flag, void __user *, data) { int retval; switch (typenr) { case 1: retval = osf_ufs_mount(path, data, flag); break; case 6: retval = osf_cdfs_mount(path, data, flag); break; case 9: retval = osf_procfs_mount(path, data, flag); break; default: retval = -EINVAL; printk("osf_mount(%ld, %x)\n", typenr, flag); } return retval; } SYSCALL_DEFINE1(osf_utsname, char __user *, name) { int error; down_read(&uts_sem); error = -EFAULT; if (copy_to_user(name + 0, utsname()->sysname, 32)) goto out; if (copy_to_user(name + 32, utsname()->nodename, 32)) goto out; if (copy_to_user(name + 64, utsname()->release, 32)) goto out; if (copy_to_user(name + 96, utsname()->version, 32)) goto out; if (copy_to_user(name + 128, utsname()->machine, 32)) goto out; error = 0; out: up_read(&uts_sem); return error; } SYSCALL_DEFINE0(getpagesize) { return PAGE_SIZE; } SYSCALL_DEFINE0(getdtablesize) { return sysctl_nr_open; } /* * For compatibility with OSF/1 only. Use utsname(2) instead. */ SYSCALL_DEFINE2(osf_getdomainname, char __user *, name, int, namelen) { unsigned len; int i; if (!access_ok(VERIFY_WRITE, name, namelen)) return -EFAULT; len = namelen; if (len > 32) len = 32; down_read(&uts_sem); for (i = 0; i < len; ++i) { __put_user(utsname()->domainname[i], name + i); if (utsname()->domainname[i] == '\0') break; } up_read(&uts_sem); return 0; } /* * The following stuff should move into a header file should it ever * be labeled "officially supported." Right now, there is just enough * support to avoid applications (such as tar) printing error * messages. The attributes are not really implemented. */ /* * Values for Property list entry flag */ #define PLE_PROPAGATE_ON_COPY 0x1 /* cp(1) will copy entry by default */ #define PLE_FLAG_MASK 0x1 /* Valid flag values */ #define PLE_FLAG_ALL -1 /* All flag value */ struct proplistname_args { unsigned int pl_mask; unsigned int pl_numnames; char **pl_names; }; union pl_args { struct setargs { char __user *path; long follow; long nbytes; char __user *buf; } set; struct fsetargs { long fd; long nbytes; char __user *buf; } fset; struct getargs { char __user *path; long follow; struct proplistname_args __user *name_args; long nbytes; char __user *buf; int __user *min_buf_size; } get; struct fgetargs { long fd; struct proplistname_args __user *name_args; long nbytes; char __user *buf; int __user *min_buf_size; } fget; struct delargs { char __user *path; long follow; struct proplistname_args __user *name_args; } del; struct fdelargs { long fd; struct proplistname_args __user *name_args; } fdel; }; enum pl_code { PL_SET = 1, PL_FSET = 2, PL_GET = 3, PL_FGET = 4, PL_DEL = 5, PL_FDEL = 6 }; SYSCALL_DEFINE2(osf_proplist_syscall, enum pl_code, code, union pl_args __user *, args) { long error; int __user *min_buf_size_ptr; switch (code) { case PL_SET: if (get_user(error, &args->set.nbytes)) error = -EFAULT; break; case PL_FSET: if (get_user(error, &args->fset.nbytes)) error = -EFAULT; break; case PL_GET: error = get_user(min_buf_size_ptr, &args->get.min_buf_size); if (error) break; error = put_user(0, min_buf_size_ptr); break; case PL_FGET: error = get_user(min_buf_size_ptr, &args->fget.min_buf_size); if (error) break; error = put_user(0, min_buf_size_ptr); break; case PL_DEL: case PL_FDEL: error = 0; break; default: error = -EOPNOTSUPP; break; }; return error; } SYSCALL_DEFINE2(osf_sigstack, struct sigstack __user *, uss, struct sigstack __user *, uoss) { unsigned long usp = rdusp(); unsigned long oss_sp = current->sas_ss_sp + current->sas_ss_size; unsigned long oss_os = on_sig_stack(usp); int error; if (uss) { void __user *ss_sp; error = -EFAULT; if (get_user(ss_sp, &uss->ss_sp)) goto out; /* If the current stack was set with sigaltstack, don't swap stacks while we are on it. */ error = -EPERM; if (current->sas_ss_sp && on_sig_stack(usp)) goto out; /* Since we don't know the extent of the stack, and we don't track onstack-ness, but rather calculate it, we must presume a size. Ho hum this interface is lossy. */ current->sas_ss_sp = (unsigned long)ss_sp - SIGSTKSZ; current->sas_ss_size = SIGSTKSZ; } if (uoss) { error = -EFAULT; if (! access_ok(VERIFY_WRITE, uoss, sizeof(*uoss)) || __put_user(oss_sp, &uoss->ss_sp) || __put_user(oss_os, &uoss->ss_onstack)) goto out; } error = 0; out: return error; } SYSCALL_DEFINE3(osf_sysinfo, int, command, char __user *, buf, long, count) { const char *sysinfo_table[] = { utsname()->sysname, utsname()->nodename, utsname()->release, utsname()->version, utsname()->machine, "alpha", /* instruction set architecture */ "dummy", /* hardware serial number */ "dummy", /* hardware manufacturer */ "dummy", /* secure RPC domain */ }; unsigned long offset; const char *res; long len, err = -EINVAL; offset = command-1; if (offset >= ARRAY_SIZE(sysinfo_table)) { /* Digital UNIX has a few unpublished interfaces here */ printk("sysinfo(%d)", command); goto out; } down_read(&uts_sem); res = sysinfo_table[offset]; len = strlen(res)+1; if ((unsigned long)len > (unsigned long)count) len = count; if (copy_to_user(buf, res, len)) err = -EFAULT; else err = 0; up_read(&uts_sem); out: return err; } SYSCALL_DEFINE5(osf_getsysinfo, unsigned long, op, void __user *, buffer, unsigned long, nbytes, int __user *, start, void __user *, arg) { unsigned long w; struct percpu_struct *cpu; switch (op) { case GSI_IEEE_FP_CONTROL: /* Return current software fp control & status bits. */ /* Note that DU doesn't verify available space here. */ w = current_thread_info()->ieee_state & IEEE_SW_MASK; w = swcr_update_status(w, rdfpcr()); if (put_user(w, (unsigned long __user *) buffer)) return -EFAULT; return 0; case GSI_IEEE_STATE_AT_SIGNAL: /* * Not sure anybody will ever use this weird stuff. These * ops can be used (under OSF/1) to set the fpcr that should * be used when a signal handler starts executing. */ break; case GSI_UACPROC: if (nbytes < sizeof(unsigned int)) return -EINVAL; w = current_thread_info()->status & UAC_BITMASK; if (put_user(w, (unsigned int __user *)buffer)) return -EFAULT; return 1; case GSI_PROC_TYPE: if (nbytes < sizeof(unsigned long)) return -EINVAL; cpu = (struct percpu_struct*) ((char*)hwrpb + hwrpb->processor_offset); w = cpu->type; if (put_user(w, (unsigned long __user*)buffer)) return -EFAULT; return 1; case GSI_GET_HWRPB: if (nbytes > sizeof(*hwrpb)) return -EINVAL; if (copy_to_user(buffer, hwrpb, nbytes) != 0) return -EFAULT; return 1; default: break; } return -EOPNOTSUPP; } SYSCALL_DEFINE5(osf_setsysinfo, unsigned long, op, void __user *, buffer, unsigned long, nbytes, int __user *, start, void __user *, arg) { switch (op) { case SSI_IEEE_FP_CONTROL: { unsigned long swcr, fpcr; unsigned int *state; /* * Alpha Architecture Handbook 4.7.7.3: * To be fully IEEE compiant, we must track the current IEEE * exception state in software, because spurious bits can be * set in the trap shadow of a software-complete insn. */ if (get_user(swcr, (unsigned long __user *)buffer)) return -EFAULT; state = &current_thread_info()->ieee_state; /* Update softare trap enable bits. */ *state = (*state & ~IEEE_SW_MASK) | (swcr & IEEE_SW_MASK); /* Update the real fpcr. */ fpcr = rdfpcr() & FPCR_DYN_MASK; fpcr |= ieee_swcr_to_fpcr(swcr); wrfpcr(fpcr); return 0; } case SSI_IEEE_RAISE_EXCEPTION: { unsigned long exc, swcr, fpcr, fex; unsigned int *state; if (get_user(exc, (unsigned long __user *)buffer)) return -EFAULT; state = &current_thread_info()->ieee_state; exc &= IEEE_STATUS_MASK; /* Update softare trap enable bits. */ swcr = (*state & IEEE_SW_MASK) | exc; *state |= exc; /* Update the real fpcr. */ fpcr = rdfpcr(); fpcr |= ieee_swcr_to_fpcr(swcr); wrfpcr(fpcr); /* If any exceptions set by this call, and are unmasked, send a signal. Old exceptions are not signaled. */ fex = (exc >> IEEE_STATUS_TO_EXCSUM_SHIFT) & swcr; if (fex) { siginfo_t info; int si_code = 0; if (fex & IEEE_TRAP_ENABLE_DNO) si_code = FPE_FLTUND; if (fex & IEEE_TRAP_ENABLE_INE) si_code = FPE_FLTRES; if (fex & IEEE_TRAP_ENABLE_UNF) si_code = FPE_FLTUND; if (fex & IEEE_TRAP_ENABLE_OVF) si_code = FPE_FLTOVF; if (fex & IEEE_TRAP_ENABLE_DZE) si_code = FPE_FLTDIV; if (fex & IEEE_TRAP_ENABLE_INV) si_code = FPE_FLTINV; info.si_signo = SIGFPE; info.si_errno = 0; info.si_code = si_code; info.si_addr = NULL; /* FIXME */ send_sig_info(SIGFPE, &info, current); } return 0; } case SSI_IEEE_STATE_AT_SIGNAL: case SSI_IEEE_IGNORE_STATE_AT_SIGNAL: /* * Not sure anybody will ever use this weird stuff. These * ops can be used (under OSF/1) to set the fpcr that should * be used when a signal handler starts executing. */ break; case SSI_NVPAIRS: { unsigned __user *p = buffer; unsigned i; for (i = 0, p = buffer; i < nbytes; ++i, p += 2) { unsigned v, w, status; if (get_user(v, p) || get_user(w, p + 1)) return -EFAULT; switch (v) { case SSIN_UACPROC: w &= UAC_BITMASK; status = current_thread_info()->status; status = (status & ~UAC_BITMASK) | w; current_thread_info()->status = status; break; default: return -EOPNOTSUPP; } } return 0; } case SSI_LMF: return 0; default: break; } return -EOPNOTSUPP; } /* Translations due to the fact that OSF's time_t is an int. Which affects all sorts of things, like timeval and itimerval. */ extern struct timezone sys_tz; struct timeval32 { int tv_sec, tv_usec; }; struct itimerval32 { struct timeval32 it_interval; struct timeval32 it_value; }; static inline long get_tv32(struct timeval *o, struct timeval32 __user *i) { return (!access_ok(VERIFY_READ, i, sizeof(*i)) || (__get_user(o->tv_sec, &i->tv_sec) | __get_user(o->tv_usec, &i->tv_usec))); } static inline long put_tv32(struct timeval32 __user *o, struct timeval *i) { return (!access_ok(VERIFY_WRITE, o, sizeof(*o)) || (__put_user(i->tv_sec, &o->tv_sec) | __put_user(i->tv_usec, &o->tv_usec))); } static inline long get_it32(struct itimerval *o, struct itimerval32 __user *i) { return (!access_ok(VERIFY_READ, i, sizeof(*i)) || (__get_user(o->it_interval.tv_sec, &i->it_interval.tv_sec) | __get_user(o->it_interval.tv_usec, &i->it_interval.tv_usec) | __get_user(o->it_value.tv_sec, &i->it_value.tv_sec) | __get_user(o->it_value.tv_usec, &i->it_value.tv_usec))); } static inline long put_it32(struct itimerval32 __user *o, struct itimerval *i) { return (!access_ok(VERIFY_WRITE, o, sizeof(*o)) || (__put_user(i->it_interval.tv_sec, &o->it_interval.tv_sec) | __put_user(i->it_interval.tv_usec, &o->it_interval.tv_usec) | __put_user(i->it_value.tv_sec, &o->it_value.tv_sec) | __put_user(i->it_value.tv_usec, &o->it_value.tv_usec))); } static inline void jiffies_to_timeval32(unsigned long jiffies, struct timeval32 *value) { value->tv_usec = (jiffies % HZ) * (1000000L / HZ); value->tv_sec = jiffies / HZ; } SYSCALL_DEFINE2(osf_gettimeofday, struct timeval32 __user *, tv, struct timezone __user *, tz) { if (tv) { struct timeval ktv; do_gettimeofday(&ktv); if (put_tv32(tv, &ktv)) return -EFAULT; } if (tz) { if (copy_to_user(tz, &sys_tz, sizeof(sys_tz))) return -EFAULT; } return 0; } SYSCALL_DEFINE2(osf_settimeofday, struct timeval32 __user *, tv, struct timezone __user *, tz) { struct timespec kts; struct timezone ktz; if (tv) { if (get_tv32((struct timeval *)&kts, tv)) return -EFAULT; kts.tv_nsec *= 1000; } if (tz) { if (copy_from_user(&ktz, tz, sizeof(*tz))) return -EFAULT; } return do_sys_settimeofday(tv ? &kts : NULL, tz ? &ktz : NULL); } SYSCALL_DEFINE2(osf_getitimer, int, which, struct itimerval32 __user *, it) { struct itimerval kit; int error; error = do_getitimer(which, &kit); if (!error && put_it32(it, &kit)) error = -EFAULT; return error; } SYSCALL_DEFINE3(osf_setitimer, int, which, struct itimerval32 __user *, in, struct itimerval32 __user *, out) { struct itimerval kin, kout; int error; if (in) { if (get_it32(&kin, in)) return -EFAULT; } else memset(&kin, 0, sizeof(kin)); error = do_setitimer(which, &kin, out ? &kout : NULL); if (error || !out) return error; if (put_it32(out, &kout)) return -EFAULT; return 0; } SYSCALL_DEFINE2(osf_utimes, const char __user *, filename, struct timeval32 __user *, tvs) { struct timespec tv[2]; if (tvs) { struct timeval ktvs[2]; if (get_tv32(&ktvs[0], &tvs[0]) || get_tv32(&ktvs[1], &tvs[1])) return -EFAULT; if (ktvs[0].tv_usec < 0 || ktvs[0].tv_usec >= 1000000 || ktvs[1].tv_usec < 0 || ktvs[1].tv_usec >= 1000000) return -EINVAL; tv[0].tv_sec = ktvs[0].tv_sec; tv[0].tv_nsec = 1000 * ktvs[0].tv_usec; tv[1].tv_sec = ktvs[1].tv_sec; tv[1].tv_nsec = 1000 * ktvs[1].tv_usec; } return do_utimes(AT_FDCWD, filename, tvs ? tv : NULL, 0); } SYSCALL_DEFINE5(osf_select, int, n, fd_set __user *, inp, fd_set __user *, outp, fd_set __user *, exp, struct timeval32 __user *, tvp) { struct timespec end_time, *to = NULL; if (tvp) { time_t sec, usec; to = &end_time; if (!access_ok(VERIFY_READ, tvp, sizeof(*tvp)) || __get_user(sec, &tvp->tv_sec) || __get_user(usec, &tvp->tv_usec)) { return -EFAULT; } if (sec < 0 || usec < 0) return -EINVAL; if (poll_select_set_timeout(to, sec, usec * NSEC_PER_USEC)) return -EINVAL; } /* OSF does not copy back the remaining time. */ return core_sys_select(n, inp, outp, exp, to); } struct rusage32 { struct timeval32 ru_utime; /* user time used */ struct timeval32 ru_stime; /* system time used */ long ru_maxrss; /* maximum resident set size */ long ru_ixrss; /* integral shared memory size */ long ru_idrss; /* integral unshared data size */ long ru_isrss; /* integral unshared stack size */ long ru_minflt; /* page reclaims */ long ru_majflt; /* page faults */ long ru_nswap; /* swaps */ long ru_inblock; /* block input operations */ long ru_oublock; /* block output operations */ long ru_msgsnd; /* messages sent */ long ru_msgrcv; /* messages received */ long ru_nsignals; /* signals received */ long ru_nvcsw; /* voluntary context switches */ long ru_nivcsw; /* involuntary " */ }; SYSCALL_DEFINE2(osf_getrusage, int, who, struct rusage32 __user *, ru) { struct rusage32 r; cputime_t utime, stime; unsigned long utime_jiffies, stime_jiffies; if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN) return -EINVAL; memset(&r, 0, sizeof(r)); switch (who) { case RUSAGE_SELF: task_cputime(current, &utime, &stime); utime_jiffies = cputime_to_jiffies(utime); stime_jiffies = cputime_to_jiffies(stime); jiffies_to_timeval32(utime_jiffies, &r.ru_utime); jiffies_to_timeval32(stime_jiffies, &r.ru_stime); r.ru_minflt = current->min_flt; r.ru_majflt = current->maj_flt; break; case RUSAGE_CHILDREN: utime_jiffies = cputime_to_jiffies(current->signal->cutime); stime_jiffies = cputime_to_jiffies(current->signal->cstime); jiffies_to_timeval32(utime_jiffies, &r.ru_utime); jiffies_to_timeval32(stime_jiffies, &r.ru_stime); r.ru_minflt = current->signal->cmin_flt; r.ru_majflt = current->signal->cmaj_flt; break; } return copy_to_user(ru, &r, sizeof(r)) ? -EFAULT : 0; } SYSCALL_DEFINE4(osf_wait4, pid_t, pid, int __user *, ustatus, int, options, struct rusage32 __user *, ur) { struct rusage r; long ret, err; unsigned int status = 0; mm_segment_t old_fs; if (!ur) return sys_wait4(pid, ustatus, options, NULL); old_fs = get_fs(); set_fs (KERNEL_DS); ret = sys_wait4(pid, (unsigned int __user *) &status, options, (struct rusage __user *) &r); set_fs (old_fs); if (!access_ok(VERIFY_WRITE, ur, sizeof(*ur))) return -EFAULT; err = 0; err |= put_user(status, ustatus); err |= __put_user(r.ru_utime.tv_sec, &ur->ru_utime.tv_sec); err |= __put_user(r.ru_utime.tv_usec, &ur->ru_utime.tv_usec); err |= __put_user(r.ru_stime.tv_sec, &ur->ru_stime.tv_sec); err |= __put_user(r.ru_stime.tv_usec, &ur->ru_stime.tv_usec); err |= __put_user(r.ru_maxrss, &ur->ru_maxrss); err |= __put_user(r.ru_ixrss, &ur->ru_ixrss); err |= __put_user(r.ru_idrss, &ur->ru_idrss); err |= __put_user(r.ru_isrss, &ur->ru_isrss); err |= __put_user(r.ru_minflt, &ur->ru_minflt); err |= __put_user(r.ru_majflt, &ur->ru_majflt); err |= __put_user(r.ru_nswap, &ur->ru_nswap); err |= __put_user(r.ru_inblock, &ur->ru_inblock); err |= __put_user(r.ru_oublock, &ur->ru_oublock); err |= __put_user(r.ru_msgsnd, &ur->ru_msgsnd); err |= __put_user(r.ru_msgrcv, &ur->ru_msgrcv); err |= __put_user(r.ru_nsignals, &ur->ru_nsignals); err |= __put_user(r.ru_nvcsw, &ur->ru_nvcsw); err |= __put_user(r.ru_nivcsw, &ur->ru_nivcsw); return err ? err : ret; } /* * I don't know what the parameters are: the first one * seems to be a timeval pointer, and I suspect the second * one is the time remaining.. Ho humm.. No documentation. */ SYSCALL_DEFINE2(osf_usleep_thread, struct timeval32 __user *, sleep, struct timeval32 __user *, remain) { struct timeval tmp; unsigned long ticks; if (get_tv32(&tmp, sleep)) goto fault; ticks = timeval_to_jiffies(&tmp); ticks = schedule_timeout_interruptible(ticks); if (remain) { jiffies_to_timeval(ticks, &tmp); if (put_tv32(remain, &tmp)) goto fault; } return 0; fault: return -EFAULT; } struct timex32 { unsigned int modes; /* mode selector */ long offset; /* time offset (usec) */ long freq; /* frequency offset (scaled ppm) */ long maxerror; /* maximum error (usec) */ long esterror; /* estimated error (usec) */ int status; /* clock command/status */ long constant; /* pll time constant */ long precision; /* clock precision (usec) (read only) */ long tolerance; /* clock frequency tolerance (ppm) * (read only) */ struct timeval32 time; /* (read only) */ long tick; /* (modified) usecs between clock ticks */ long ppsfreq; /* pps frequency (scaled ppm) (ro) */ long jitter; /* pps jitter (us) (ro) */ int shift; /* interval duration (s) (shift) (ro) */ long stabil; /* pps stability (scaled ppm) (ro) */ long jitcnt; /* jitter limit exceeded (ro) */ long calcnt; /* calibration intervals (ro) */ long errcnt; /* calibration errors (ro) */ long stbcnt; /* stability limit exceeded (ro) */ int :32; int :32; int :32; int :32; int :32; int :32; int :32; int :32; int :32; int :32; int :32; int :32; }; SYSCALL_DEFINE1(old_adjtimex, struct timex32 __user *, txc_p) { struct timex txc; int ret; /* copy relevant bits of struct timex. */ if (copy_from_user(&txc, txc_p, offsetof(struct timex32, time)) || copy_from_user(&txc.tick, &txc_p->tick, sizeof(struct timex32) - offsetof(struct timex32, time))) return -EFAULT; ret = do_adjtimex(&txc); if (ret < 0) return ret; /* copy back to timex32 */ if (copy_to_user(txc_p, &txc, offsetof(struct timex32, time)) || (copy_to_user(&txc_p->tick, &txc.tick, sizeof(struct timex32) - offsetof(struct timex32, tick))) || (put_tv32(&txc_p->time, &txc.time))) return -EFAULT; return ret; } /* Get an address range which is currently unmapped. Similar to the generic version except that we know how to honor ADDR_LIMIT_32BIT. */ static unsigned long arch_get_unmapped_area_1(unsigned long addr, unsigned long len, unsigned long limit) { struct vm_unmapped_area_info info; info.flags = 0; info.length = len; info.low_limit = addr; info.high_limit = limit; info.align_mask = 0; info.align_offset = 0; return vm_unmapped_area(&info); } unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { unsigned long limit; /* "32 bit" actually means 31 bit, since pointers sign extend. */ if (current->personality & ADDR_LIMIT_32BIT) limit = 0x80000000; else limit = TASK_SIZE; if (len > limit) return -ENOMEM; if (flags & MAP_FIXED) return addr; /* First, see if the given suggestion fits. The OSF/1 loader (/sbin/loader) relies on us returning an address larger than the requested if one exists, which is a terribly broken way to program. That said, I can see the use in being able to suggest not merely specific addresses, but regions of memory -- perhaps this feature should be incorporated into all ports? */ if (addr) { addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit); if (addr != (unsigned long) -ENOMEM) return addr; } /* Next, try allocating at TASK_UNMAPPED_BASE. */ addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE), len, limit); if (addr != (unsigned long) -ENOMEM) return addr; /* Finally, try allocating in low memory. */ addr = arch_get_unmapped_area_1 (PAGE_SIZE, len, limit); return addr; } #ifdef CONFIG_OSF4_COMPAT /* Clear top 32 bits of iov_len in the user's buffer for compatibility with old versions of OSF/1 where iov_len was defined as int. */ static int osf_fix_iov_len(const struct iovec __user *iov, unsigned long count) { unsigned long i; for (i = 0 ; i < count ; i++) { int __user *iov_len_high = (int __user *)&iov[i].iov_len + 1; if (put_user(0, iov_len_high)) return -EFAULT; } return 0; } SYSCALL_DEFINE3(osf_readv, unsigned long, fd, const struct iovec __user *, vector, unsigned long, count) { if (unlikely(personality(current->personality) == PER_OSF4)) if (osf_fix_iov_len(vector, count)) return -EFAULT; return sys_readv(fd, vector, count); } SYSCALL_DEFINE3(osf_writev, unsigned long, fd, const struct iovec __user *, vector, unsigned long, count) { if (unlikely(personality(current->personality) == PER_OSF4)) if (osf_fix_iov_len(vector, count)) return -EFAULT; return sys_writev(fd, vector, count); } #endif SYSCALL_DEFINE2(osf_getpriority, int, which, int, who) { int prio = sys_getpriority(which, who); if (prio >= 0) { /* Return value is the unbiased priority, i.e. 20 - prio. This does result in negative return values, so signal no error */ force_successful_syscall_return(); prio = 20 - prio; } return prio; } SYSCALL_DEFINE0(getxuid) { current_pt_regs()->r20 = sys_geteuid(); return sys_getuid(); } SYSCALL_DEFINE0(getxgid) { current_pt_regs()->r20 = sys_getegid(); return sys_getgid(); } SYSCALL_DEFINE0(getxpid) { current_pt_regs()->r20 = sys_getppid(); return sys_getpid(); } SYSCALL_DEFINE0(alpha_pipe) { int fd[2]; int res = do_pipe_flags(fd, 0); if (!res) { /* The return values are in $0 and $20. */ current_pt_regs()->r20 = fd[1]; res = fd[0]; } return res; } SYSCALL_DEFINE1(sethae, unsigned long, val) { current_pt_regs()->hae = val; return 0; }
gpl-2.0
rabeeh/linux-linaro-stable-mx6-unmaintained-will-be-deleted
drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
1202
10309
/* * Copyright (C) 2012 Texas Instruments * Author: Rob Clark <robdclark@gmail.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/i2c.h> #include <linux/of_i2c.h> #include <linux/gpio.h> #include <linux/of_gpio.h> #include <linux/pinctrl/pinmux.h> #include <linux/pinctrl/consumer.h> #include "tilcdc_drv.h" struct tfp410_module { struct tilcdc_module base; struct i2c_adapter *i2c; int gpio; }; #define to_tfp410_module(x) container_of(x, struct tfp410_module, base) static const struct tilcdc_panel_info dvi_info = { .ac_bias = 255, .ac_bias_intrpt = 0, .dma_burst_sz = 16, .bpp = 16, .fdd = 0x80, .tft_alt_mode = 0, .sync_edge = 0, .sync_ctrl = 1, .raster_order = 0, }; /* * Encoder: */ struct tfp410_encoder { struct drm_encoder base; struct tfp410_module *mod; int dpms; }; #define to_tfp410_encoder(x) container_of(x, struct tfp410_encoder, base) static void tfp410_encoder_destroy(struct drm_encoder *encoder) { struct tfp410_encoder *tfp410_encoder = to_tfp410_encoder(encoder); drm_encoder_cleanup(encoder); kfree(tfp410_encoder); } static void tfp410_encoder_dpms(struct drm_encoder *encoder, int mode) { struct tfp410_encoder *tfp410_encoder = to_tfp410_encoder(encoder); if (tfp410_encoder->dpms == mode) return; if (mode == DRM_MODE_DPMS_ON) { DBG("Power on"); gpio_direction_output(tfp410_encoder->mod->gpio, 1); } else { DBG("Power off"); gpio_direction_output(tfp410_encoder->mod->gpio, 0); } tfp410_encoder->dpms = mode; } static bool tfp410_encoder_mode_fixup(struct drm_encoder *encoder, const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { /* nothing needed */ return true; } static void tfp410_encoder_prepare(struct drm_encoder *encoder) { tfp410_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); tilcdc_crtc_set_panel_info(encoder->crtc, &dvi_info); } static void tfp410_encoder_commit(struct drm_encoder *encoder) { tfp410_encoder_dpms(encoder, DRM_MODE_DPMS_ON); } static void tfp410_encoder_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { /* nothing needed */ } static const struct drm_encoder_funcs tfp410_encoder_funcs = { .destroy = tfp410_encoder_destroy, }; static const struct drm_encoder_helper_funcs tfp410_encoder_helper_funcs = { .dpms = tfp410_encoder_dpms, .mode_fixup = tfp410_encoder_mode_fixup, .prepare = tfp410_encoder_prepare, .commit = tfp410_encoder_commit, .mode_set = tfp410_encoder_mode_set, }; static struct drm_encoder *tfp410_encoder_create(struct drm_device *dev, struct tfp410_module *mod) { struct tfp410_encoder *tfp410_encoder; struct drm_encoder *encoder; int ret; tfp410_encoder = kzalloc(sizeof(*tfp410_encoder), GFP_KERNEL); if (!tfp410_encoder) { dev_err(dev->dev, "allocation failed\n"); return NULL; } tfp410_encoder->dpms = DRM_MODE_DPMS_OFF; tfp410_encoder->mod = mod; encoder = &tfp410_encoder->base; encoder->possible_crtcs = 1; ret = drm_encoder_init(dev, encoder, &tfp410_encoder_funcs, DRM_MODE_ENCODER_TMDS); if (ret < 0) goto fail; drm_encoder_helper_add(encoder, &tfp410_encoder_helper_funcs); return encoder; fail: tfp410_encoder_destroy(encoder); return NULL; } /* * Connector: */ struct tfp410_connector { struct drm_connector base; struct drm_encoder *encoder; /* our connected encoder */ struct tfp410_module *mod; }; #define to_tfp410_connector(x) container_of(x, struct tfp410_connector, base) static void tfp410_connector_destroy(struct drm_connector *connector) { struct tfp410_connector *tfp410_connector = to_tfp410_connector(connector); drm_connector_cleanup(connector); kfree(tfp410_connector); } static enum drm_connector_status tfp410_connector_detect( struct drm_connector *connector, bool force) { struct tfp410_connector *tfp410_connector = to_tfp410_connector(connector); if (drm_probe_ddc(tfp410_connector->mod->i2c)) return connector_status_connected; return connector_status_unknown; } static int tfp410_connector_get_modes(struct drm_connector *connector) { struct tfp410_connector *tfp410_connector = to_tfp410_connector(connector); struct edid *edid; int ret = 0; edid = drm_get_edid(connector, tfp410_connector->mod->i2c); drm_mode_connector_update_edid_property(connector, edid); if (edid) { ret = drm_add_edid_modes(connector, edid); kfree(edid); } return ret; } static int tfp410_connector_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { struct tilcdc_drm_private *priv = connector->dev->dev_private; /* our only constraints are what the crtc can generate: */ return tilcdc_crtc_mode_valid(priv->crtc, mode); } static struct drm_encoder *tfp410_connector_best_encoder( struct drm_connector *connector) { struct tfp410_connector *tfp410_connector = to_tfp410_connector(connector); return tfp410_connector->encoder; } static const struct drm_connector_funcs tfp410_connector_funcs = { .destroy = tfp410_connector_destroy, .dpms = drm_helper_connector_dpms, .detect = tfp410_connector_detect, .fill_modes = drm_helper_probe_single_connector_modes, }; static const struct drm_connector_helper_funcs tfp410_connector_helper_funcs = { .get_modes = tfp410_connector_get_modes, .mode_valid = tfp410_connector_mode_valid, .best_encoder = tfp410_connector_best_encoder, }; static struct drm_connector *tfp410_connector_create(struct drm_device *dev, struct tfp410_module *mod, struct drm_encoder *encoder) { struct tfp410_connector *tfp410_connector; struct drm_connector *connector; int ret; tfp410_connector = kzalloc(sizeof(*tfp410_connector), GFP_KERNEL); if (!tfp410_connector) { dev_err(dev->dev, "allocation failed\n"); return NULL; } tfp410_connector->encoder = encoder; tfp410_connector->mod = mod; connector = &tfp410_connector->base; drm_connector_init(dev, connector, &tfp410_connector_funcs, DRM_MODE_CONNECTOR_DVID); drm_connector_helper_add(connector, &tfp410_connector_helper_funcs); connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT; connector->interlace_allowed = 0; connector->doublescan_allowed = 0; ret = drm_mode_connector_attach_encoder(connector, encoder); if (ret) goto fail; drm_sysfs_connector_add(connector); return connector; fail: tfp410_connector_destroy(connector); return NULL; } /* * Module: */ static int tfp410_modeset_init(struct tilcdc_module *mod, struct drm_device *dev) { struct tfp410_module *tfp410_mod = to_tfp410_module(mod); struct tilcdc_drm_private *priv = dev->dev_private; struct drm_encoder *encoder; struct drm_connector *connector; encoder = tfp410_encoder_create(dev, tfp410_mod); if (!encoder) return -ENOMEM; connector = tfp410_connector_create(dev, tfp410_mod, encoder); if (!connector) return -ENOMEM; priv->encoders[priv->num_encoders++] = encoder; priv->connectors[priv->num_connectors++] = connector; return 0; } static void tfp410_destroy(struct tilcdc_module *mod) { struct tfp410_module *tfp410_mod = to_tfp410_module(mod); if (tfp410_mod->i2c) i2c_put_adapter(tfp410_mod->i2c); if (!IS_ERR_VALUE(tfp410_mod->gpio)) gpio_free(tfp410_mod->gpio); tilcdc_module_cleanup(mod); kfree(tfp410_mod); } static const struct tilcdc_module_ops tfp410_module_ops = { .modeset_init = tfp410_modeset_init, .destroy = tfp410_destroy, }; /* * Device: */ static struct of_device_id tfp410_of_match[]; static int tfp410_probe(struct platform_device *pdev) { struct device_node *node = pdev->dev.of_node; struct device_node *i2c_node; struct tfp410_module *tfp410_mod; struct tilcdc_module *mod; struct pinctrl *pinctrl; uint32_t i2c_phandle; int ret = -EINVAL; /* bail out early if no DT data: */ if (!node) { dev_err(&pdev->dev, "device-tree data is missing\n"); return -ENXIO; } tfp410_mod = kzalloc(sizeof(*tfp410_mod), GFP_KERNEL); if (!tfp410_mod) return -ENOMEM; mod = &tfp410_mod->base; tilcdc_module_init(mod, "tfp410", &tfp410_module_ops); pinctrl = devm_pinctrl_get_select_default(&pdev->dev); if (IS_ERR(pinctrl)) dev_warn(&pdev->dev, "pins are not configured\n"); if (of_property_read_u32(node, "i2c", &i2c_phandle)) { dev_err(&pdev->dev, "could not get i2c bus phandle\n"); goto fail; } i2c_node = of_find_node_by_phandle(i2c_phandle); if (!i2c_node) { dev_err(&pdev->dev, "could not get i2c bus node\n"); goto fail; } tfp410_mod->i2c = of_find_i2c_adapter_by_node(i2c_node); if (!tfp410_mod->i2c) { dev_err(&pdev->dev, "could not get i2c\n"); goto fail; } of_node_put(i2c_node); tfp410_mod->gpio = of_get_named_gpio_flags(node, "powerdn-gpio", 0, NULL); if (IS_ERR_VALUE(tfp410_mod->gpio)) { dev_warn(&pdev->dev, "No power down GPIO\n"); } else { ret = gpio_request(tfp410_mod->gpio, "DVI_PDn"); if (ret) { dev_err(&pdev->dev, "could not get DVI_PDn gpio\n"); goto fail; } } return 0; fail: tfp410_destroy(mod); return ret; } static int tfp410_remove(struct platform_device *pdev) { return 0; } static struct of_device_id tfp410_of_match[] = { { .compatible = "ti,tilcdc,tfp410", }, { }, }; struct platform_driver tfp410_driver = { .probe = tfp410_probe, .remove = tfp410_remove, .driver = { .owner = THIS_MODULE, .name = "tfp410", .of_match_table = tfp410_of_match, }, }; int __init tilcdc_tfp410_init(void) { return platform_driver_register(&tfp410_driver); } void __exit tilcdc_tfp410_fini(void) { platform_driver_unregister(&tfp410_driver); }
gpl-2.0
psyke83/ace2europa_kernel
arch/sh/drivers/pci/fixups-se7751.c
1202
4112
#include <linux/kernel.h> #include <linux/types.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/pci.h> #include <linux/io.h> #include "pci-sh4.h" int __init pcibios_map_platform_irq(u8 slot, u8 pin) { switch (slot) { case 0: return 13; case 1: return 13; /* AMD Ethernet controller */ case 2: return -1; case 3: return -1; case 4: return -1; default: printk("PCI: Bad IRQ mapping request for slot %d\n", slot); return -1; } } #define PCIMCR_MRSET_OFF 0xBFFFFFFF #define PCIMCR_RFSH_OFF 0xFFFFFFFB /* * Only long word accesses of the PCIC's internal local registers and the * configuration registers from the CPU is supported. */ #define PCIC_WRITE(x,v) writel((v), PCI_REG(x)) #define PCIC_READ(x) readl(PCI_REG(x)) /* * Description: This function sets up and initializes the pcic, sets * up the BARS, maps the DRAM into the address space etc, etc. */ int pci_fixup_pcic(struct pci_channel *chan) { unsigned long bcr1, wcr1, wcr2, wcr3, mcr; unsigned short bcr2; /* * Initialize the slave bus controller on the pcic. The values used * here should not be hardcoded, but they should be taken from the bsc * on the processor, to make this function as generic as possible. * (i.e. Another sbc may usr different SDRAM timing settings -- in order * for the pcic to work, its settings need to be exactly the same.) */ bcr1 = (*(volatile unsigned long*)(SH7751_BCR1)); bcr2 = (*(volatile unsigned short*)(SH7751_BCR2)); wcr1 = (*(volatile unsigned long*)(SH7751_WCR1)); wcr2 = (*(volatile unsigned long*)(SH7751_WCR2)); wcr3 = (*(volatile unsigned long*)(SH7751_WCR3)); mcr = (*(volatile unsigned long*)(SH7751_MCR)); bcr1 = bcr1 | 0x00080000; /* Enable Bit 19, BREQEN */ (*(volatile unsigned long*)(SH7751_BCR1)) = bcr1; bcr1 = bcr1 | 0x40080000; /* Enable Bit 19 BREQEN, set PCIC to slave */ PCIC_WRITE(SH7751_PCIBCR1, bcr1); /* PCIC BCR1 */ PCIC_WRITE(SH7751_PCIBCR2, bcr2); /* PCIC BCR2 */ PCIC_WRITE(SH7751_PCIWCR1, wcr1); /* PCIC WCR1 */ PCIC_WRITE(SH7751_PCIWCR2, wcr2); /* PCIC WCR2 */ PCIC_WRITE(SH7751_PCIWCR3, wcr3); /* PCIC WCR3 */ mcr = (mcr & PCIMCR_MRSET_OFF) & PCIMCR_RFSH_OFF; PCIC_WRITE(SH7751_PCIMCR, mcr); /* PCIC MCR */ /* Enable all interrupts, so we know what to fix */ PCIC_WRITE(SH7751_PCIINTM, 0x0000c3ff); PCIC_WRITE(SH7751_PCIAINTM, 0x0000380f); /* Set up standard PCI config registers */ PCIC_WRITE(SH7751_PCICONF1, 0xF39000C7); /* Bus Master, Mem & I/O access */ PCIC_WRITE(SH7751_PCICONF2, 0x00000000); /* PCI Class code & Revision ID */ PCIC_WRITE(SH7751_PCICONF4, 0xab000001); /* PCI I/O address (local regs) */ PCIC_WRITE(SH7751_PCICONF5, 0x0c000000); /* PCI MEM address (local RAM) */ PCIC_WRITE(SH7751_PCICONF6, 0xd0000000); /* PCI MEM address (unused) */ PCIC_WRITE(SH7751_PCICONF11, 0x35051054); /* PCI Subsystem ID & Vendor ID */ PCIC_WRITE(SH7751_PCILSR0, 0x03f00000); /* MEM (full 64M exposed) */ PCIC_WRITE(SH7751_PCILSR1, 0x00000000); /* MEM (unused) */ PCIC_WRITE(SH7751_PCILAR0, 0x0c000000); /* MEM (direct map from PCI) */ PCIC_WRITE(SH7751_PCILAR1, 0x00000000); /* MEM (unused) */ /* Now turn it on... */ PCIC_WRITE(SH7751_PCICR, 0xa5000001); /* * Set PCIMBR and PCIIOBR here, assuming a single window * (16M MEM, 256K IO) is enough. If a larger space is * needed, the readx/writex and inx/outx functions will * have to do more (e.g. setting registers for each call). */ /* * Set the MBR so PCI address is one-to-one with window, * meaning all calls go straight through... use BUG_ON to * catch erroneous assumption. */ BUG_ON(chan->resources[1].start != SH7751_PCI_MEMORY_BASE); PCIC_WRITE(SH7751_PCIMBR, chan->resources[1].start); /* Set IOBR for window containing area specified in pci.h */ PCIC_WRITE(SH7751_PCIIOBR, (chan->resources[0].start & SH7751_PCIIOBR_MASK)); /* All done, may as well say so... */ printk("SH7751 PCI: Finished initialization of the PCI controller\n"); return 1; }
gpl-2.0
Michael-Pizzileo/sunxi-2.6.36
drivers/scsi/bfa/bfa_iocfc_q.c
1458
1354
/* * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. * All rights reserved * www.brocade.com * * Linux driver for Brocade Fibre Channel Host Bus Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as * published by the Free Software Foundation * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ #include <bfa.h> #include "bfa_intr_priv.h" BFA_TRC_FILE(HAL, IOCFC_Q); void bfa_iocfc_updateq(struct bfa_s *bfa, u32 reqq_ba, u32 rspq_ba, u32 reqq_sci, u32 rspq_spi, bfa_cb_iocfc_t cbfn, void *cbarg) { struct bfa_iocfc_s *iocfc = &bfa->iocfc; struct bfi_iocfc_updateq_req_s updateq_req; iocfc->updateq_cbfn = cbfn; iocfc->updateq_cbarg = cbarg; bfi_h2i_set(updateq_req.mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_UPDATEQ_REQ, bfa_lpuid(bfa)); updateq_req.reqq_ba = bfa_os_htonl(reqq_ba); updateq_req.rspq_ba = bfa_os_htonl(rspq_ba); updateq_req.reqq_sci = bfa_os_htonl(reqq_sci); updateq_req.rspq_spi = bfa_os_htonl(rspq_spi); bfa_ioc_mbox_send(&bfa->ioc, &updateq_req, sizeof(struct bfi_iocfc_updateq_req_s)); }
gpl-2.0
whitemolecule/note8-molecule-kernel
drivers/net/wireless/iwlegacy/iwl-3945.c
1458
82225
/****************************************************************************** * * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * * Contact Information: * Intel Linux Wireless <ilw@linux.intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * *****************************************************************************/ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/pci.h> #include <linux/dma-mapping.h> #include <linux/delay.h> #include <linux/sched.h> #include <linux/skbuff.h> #include <linux/netdevice.h> #include <linux/wireless.h> #include <linux/firmware.h> #include <linux/etherdevice.h> #include <asm/unaligned.h> #include <net/mac80211.h> #include "iwl-fh.h" #include "iwl-3945-fh.h" #include "iwl-commands.h" #include "iwl-sta.h" #include "iwl-3945.h" #include "iwl-eeprom.h" #include "iwl-core.h" #include "iwl-helpers.h" #include "iwl-led.h" #include "iwl-3945-led.h" #include "iwl-3945-debugfs.h" #define IWL_DECLARE_RATE_INFO(r, ip, in, rp, rn, pp, np) \ [IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP, \ IWL_RATE_##r##M_IEEE, \ IWL_RATE_##ip##M_INDEX, \ IWL_RATE_##in##M_INDEX, \ IWL_RATE_##rp##M_INDEX, \ IWL_RATE_##rn##M_INDEX, \ IWL_RATE_##pp##M_INDEX, \ IWL_RATE_##np##M_INDEX, \ IWL_RATE_##r##M_INDEX_TABLE, \ IWL_RATE_##ip##M_INDEX_TABLE } /* * Parameter order: * rate, prev rate, next rate, prev tgg rate, next tgg rate * * If there isn't a valid next or previous rate then INV is used which * maps to IWL_RATE_INVALID * */ const struct iwl3945_rate_info iwl3945_rates[IWL_RATE_COUNT_3945] = { IWL_DECLARE_RATE_INFO(1, INV, 2, INV, 2, INV, 2), /* 1mbps */ IWL_DECLARE_RATE_INFO(2, 1, 5, 1, 5, 1, 5), /* 2mbps */ IWL_DECLARE_RATE_INFO(5, 2, 6, 2, 11, 2, 11), /*5.5mbps */ IWL_DECLARE_RATE_INFO(11, 9, 12, 5, 12, 5, 18), /* 11mbps */ IWL_DECLARE_RATE_INFO(6, 5, 9, 5, 11, 5, 11), /* 6mbps */ IWL_DECLARE_RATE_INFO(9, 6, 11, 5, 11, 5, 11), /* 9mbps */ IWL_DECLARE_RATE_INFO(12, 11, 18, 11, 18, 11, 18), /* 12mbps */ IWL_DECLARE_RATE_INFO(18, 12, 24, 12, 24, 11, 24), /* 18mbps */ IWL_DECLARE_RATE_INFO(24, 18, 36, 18, 36, 18, 36), /* 24mbps */ IWL_DECLARE_RATE_INFO(36, 24, 48, 24, 48, 24, 48), /* 36mbps */ IWL_DECLARE_RATE_INFO(48, 36, 54, 36, 54, 36, 54), /* 48mbps */ IWL_DECLARE_RATE_INFO(54, 48, INV, 48, INV, 48, INV),/* 54mbps */ }; static inline u8 iwl3945_get_prev_ieee_rate(u8 rate_index) { u8 rate = iwl3945_rates[rate_index].prev_ieee; if (rate == IWL_RATE_INVALID) rate = rate_index; return rate; } /* 1 = enable the iwl3945_disable_events() function */ #define IWL_EVT_DISABLE (0) #define IWL_EVT_DISABLE_SIZE (1532/32) /** * iwl3945_disable_events - Disable selected events in uCode event log * * Disable an event by writing "1"s into "disable" * bitmap in SRAM. Bit position corresponds to Event # (id/type). * Default values of 0 enable uCode events to be logged. * Use for only special debugging. This function is just a placeholder as-is, * you'll need to provide the special bits! ... * ... and set IWL_EVT_DISABLE to 1. */ void iwl3945_disable_events(struct iwl_priv *priv) { int i; u32 base; /* SRAM address of event log header */ u32 disable_ptr; /* SRAM address of event-disable bitmap array */ u32 array_size; /* # of u32 entries in array */ static const u32 evt_disable[IWL_EVT_DISABLE_SIZE] = { 0x00000000, /* 31 - 0 Event id numbers */ 0x00000000, /* 63 - 32 */ 0x00000000, /* 95 - 64 */ 0x00000000, /* 127 - 96 */ 0x00000000, /* 159 - 128 */ 0x00000000, /* 191 - 160 */ 0x00000000, /* 223 - 192 */ 0x00000000, /* 255 - 224 */ 0x00000000, /* 287 - 256 */ 0x00000000, /* 319 - 288 */ 0x00000000, /* 351 - 320 */ 0x00000000, /* 383 - 352 */ 0x00000000, /* 415 - 384 */ 0x00000000, /* 447 - 416 */ 0x00000000, /* 479 - 448 */ 0x00000000, /* 511 - 480 */ 0x00000000, /* 543 - 512 */ 0x00000000, /* 575 - 544 */ 0x00000000, /* 607 - 576 */ 0x00000000, /* 639 - 608 */ 0x00000000, /* 671 - 640 */ 0x00000000, /* 703 - 672 */ 0x00000000, /* 735 - 704 */ 0x00000000, /* 767 - 736 */ 0x00000000, /* 799 - 768 */ 0x00000000, /* 831 - 800 */ 0x00000000, /* 863 - 832 */ 0x00000000, /* 895 - 864 */ 0x00000000, /* 927 - 896 */ 0x00000000, /* 959 - 928 */ 0x00000000, /* 991 - 960 */ 0x00000000, /* 1023 - 992 */ 0x00000000, /* 1055 - 1024 */ 0x00000000, /* 1087 - 1056 */ 0x00000000, /* 1119 - 1088 */ 0x00000000, /* 1151 - 1120 */ 0x00000000, /* 1183 - 1152 */ 0x00000000, /* 1215 - 1184 */ 0x00000000, /* 1247 - 1216 */ 0x00000000, /* 1279 - 1248 */ 0x00000000, /* 1311 - 1280 */ 0x00000000, /* 1343 - 1312 */ 0x00000000, /* 1375 - 1344 */ 0x00000000, /* 1407 - 1376 */ 0x00000000, /* 1439 - 1408 */ 0x00000000, /* 1471 - 1440 */ 0x00000000, /* 1503 - 1472 */ }; base = le32_to_cpu(priv->card_alive.log_event_table_ptr); if (!iwl3945_hw_valid_rtc_data_addr(base)) { IWL_ERR(priv, "Invalid event log pointer 0x%08X\n", base); return; } disable_ptr = iwl_legacy_read_targ_mem(priv, base + (4 * sizeof(u32))); array_size = iwl_legacy_read_targ_mem(priv, base + (5 * sizeof(u32))); if (IWL_EVT_DISABLE && (array_size == IWL_EVT_DISABLE_SIZE)) { IWL_DEBUG_INFO(priv, "Disabling selected uCode log events at 0x%x\n", disable_ptr); for (i = 0; i < IWL_EVT_DISABLE_SIZE; i++) iwl_legacy_write_targ_mem(priv, disable_ptr + (i * sizeof(u32)), evt_disable[i]); } else { IWL_DEBUG_INFO(priv, "Selected uCode log events may be disabled\n"); IWL_DEBUG_INFO(priv, " by writing \"1\"s into disable bitmap\n"); IWL_DEBUG_INFO(priv, " in SRAM at 0x%x, size %d u32s\n", disable_ptr, array_size); } } static int iwl3945_hwrate_to_plcp_idx(u8 plcp) { int idx; for (idx = 0; idx < IWL_RATE_COUNT_3945; idx++) if (iwl3945_rates[idx].plcp == plcp) return idx; return -1; } #ifdef CONFIG_IWLWIFI_LEGACY_DEBUG #define TX_STATUS_ENTRY(x) case TX_3945_STATUS_FAIL_ ## x: return #x static const char *iwl3945_get_tx_fail_reason(u32 status) { switch (status & TX_STATUS_MSK) { case TX_3945_STATUS_SUCCESS: return "SUCCESS"; TX_STATUS_ENTRY(SHORT_LIMIT); TX_STATUS_ENTRY(LONG_LIMIT); TX_STATUS_ENTRY(FIFO_UNDERRUN); TX_STATUS_ENTRY(MGMNT_ABORT); TX_STATUS_ENTRY(NEXT_FRAG); TX_STATUS_ENTRY(LIFE_EXPIRE); TX_STATUS_ENTRY(DEST_PS); TX_STATUS_ENTRY(ABORTED); TX_STATUS_ENTRY(BT_RETRY); TX_STATUS_ENTRY(STA_INVALID); TX_STATUS_ENTRY(FRAG_DROPPED); TX_STATUS_ENTRY(TID_DISABLE); TX_STATUS_ENTRY(FRAME_FLUSHED); TX_STATUS_ENTRY(INSUFFICIENT_CF_POLL); TX_STATUS_ENTRY(TX_LOCKED); TX_STATUS_ENTRY(NO_BEACON_ON_RADAR); } return "UNKNOWN"; } #else static inline const char *iwl3945_get_tx_fail_reason(u32 status) { return ""; } #endif /* * get ieee prev rate from rate scale table. * for A and B mode we need to overright prev * value */ int iwl3945_rs_next_rate(struct iwl_priv *priv, int rate) { int next_rate = iwl3945_get_prev_ieee_rate(rate); switch (priv->band) { case IEEE80211_BAND_5GHZ: if (rate == IWL_RATE_12M_INDEX) next_rate = IWL_RATE_9M_INDEX; else if (rate == IWL_RATE_6M_INDEX) next_rate = IWL_RATE_6M_INDEX; break; case IEEE80211_BAND_2GHZ: if (!(priv->_3945.sta_supp_rates & IWL_OFDM_RATES_MASK) && iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS)) { if (rate == IWL_RATE_11M_INDEX) next_rate = IWL_RATE_5M_INDEX; } break; default: break; } return next_rate; } /** * iwl3945_tx_queue_reclaim - Reclaim Tx queue entries already Tx'd * * When FW advances 'R' index, all entries between old and new 'R' index * need to be reclaimed. As result, some free space forms. If there is * enough free space (> low mark), wake the stack that feeds us. */ static void iwl3945_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index) { struct iwl_tx_queue *txq = &priv->txq[txq_id]; struct iwl_queue *q = &txq->q; struct iwl_tx_info *tx_info; BUG_ON(txq_id == IWL39_CMD_QUEUE_NUM); for (index = iwl_legacy_queue_inc_wrap(index, q->n_bd); q->read_ptr != index; q->read_ptr = iwl_legacy_queue_inc_wrap(q->read_ptr, q->n_bd)) { tx_info = &txq->txb[txq->q.read_ptr]; ieee80211_tx_status_irqsafe(priv->hw, tx_info->skb); tx_info->skb = NULL; priv->cfg->ops->lib->txq_free_tfd(priv, txq); } if (iwl_legacy_queue_space(q) > q->low_mark && (txq_id >= 0) && (txq_id != IWL39_CMD_QUEUE_NUM) && priv->mac80211_registered) iwl_legacy_wake_queue(priv, txq); } /** * iwl3945_rx_reply_tx - Handle Tx response */ static void iwl3945_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); u16 sequence = le16_to_cpu(pkt->hdr.sequence); int txq_id = SEQ_TO_QUEUE(sequence); int index = SEQ_TO_INDEX(sequence); struct iwl_tx_queue *txq = &priv->txq[txq_id]; struct ieee80211_tx_info *info; struct iwl3945_tx_resp *tx_resp = (void *)&pkt->u.raw[0]; u32 status = le32_to_cpu(tx_resp->status); int rate_idx; int fail; if ((index >= txq->q.n_bd) || (iwl_legacy_queue_used(&txq->q, index) == 0)) { IWL_ERR(priv, "Read index for DMA queue txq_id (%d) index %d " "is out of range [0-%d] %d %d\n", txq_id, index, txq->q.n_bd, txq->q.write_ptr, txq->q.read_ptr); return; } txq->time_stamp = jiffies; info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb); ieee80211_tx_info_clear_status(info); /* Fill the MRR chain with some info about on-chip retransmissions */ rate_idx = iwl3945_hwrate_to_plcp_idx(tx_resp->rate); if (info->band == IEEE80211_BAND_5GHZ) rate_idx -= IWL_FIRST_OFDM_RATE; fail = tx_resp->failure_frame; info->status.rates[0].idx = rate_idx; info->status.rates[0].count = fail + 1; /* add final attempt */ /* tx_status->rts_retry_count = tx_resp->failure_rts; */ info->flags |= ((status & TX_STATUS_MSK) == TX_STATUS_SUCCESS) ? IEEE80211_TX_STAT_ACK : 0; IWL_DEBUG_TX(priv, "Tx queue %d Status %s (0x%08x) plcp rate %d retries %d\n", txq_id, iwl3945_get_tx_fail_reason(status), status, tx_resp->rate, tx_resp->failure_frame); IWL_DEBUG_TX_REPLY(priv, "Tx queue reclaim %d\n", index); iwl3945_tx_queue_reclaim(priv, txq_id, index); if (status & TX_ABORT_REQUIRED_MSK) IWL_ERR(priv, "TODO: Implement Tx ABORT REQUIRED!!!\n"); } /***************************************************************************** * * Intel PRO/Wireless 3945ABG/BG Network Connection * * RX handler implementations * *****************************************************************************/ #ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS static void iwl3945_accumulative_statistics(struct iwl_priv *priv, __le32 *stats) { int i; __le32 *prev_stats; u32 *accum_stats; u32 *delta, *max_delta; prev_stats = (__le32 *)&priv->_3945.statistics; accum_stats = (u32 *)&priv->_3945.accum_statistics; delta = (u32 *)&priv->_3945.delta_statistics; max_delta = (u32 *)&priv->_3945.max_delta; for (i = sizeof(__le32); i < sizeof(struct iwl3945_notif_statistics); i += sizeof(__le32), stats++, prev_stats++, delta++, max_delta++, accum_stats++) { if (le32_to_cpu(*stats) > le32_to_cpu(*prev_stats)) { *delta = (le32_to_cpu(*stats) - le32_to_cpu(*prev_stats)); *accum_stats += *delta; if (*delta > *max_delta) *max_delta = *delta; } } /* reset accumulative statistics for "no-counter" type statistics */ priv->_3945.accum_statistics.general.temperature = priv->_3945.statistics.general.temperature; priv->_3945.accum_statistics.general.ttl_timestamp = priv->_3945.statistics.general.ttl_timestamp; } #endif void iwl3945_hw_rx_statistics(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); IWL_DEBUG_RX(priv, "Statistics notification received (%d vs %d).\n", (int)sizeof(struct iwl3945_notif_statistics), le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK); #ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS iwl3945_accumulative_statistics(priv, (__le32 *)&pkt->u.raw); #endif iwl_legacy_recover_from_statistics(priv, pkt); memcpy(&priv->_3945.statistics, pkt->u.raw, sizeof(priv->_3945.statistics)); } void iwl3945_reply_statistics(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); __le32 *flag = (__le32 *)&pkt->u.raw; if (le32_to_cpu(*flag) & UCODE_STATISTICS_CLEAR_MSK) { #ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS memset(&priv->_3945.accum_statistics, 0, sizeof(struct iwl3945_notif_statistics)); memset(&priv->_3945.delta_statistics, 0, sizeof(struct iwl3945_notif_statistics)); memset(&priv->_3945.max_delta, 0, sizeof(struct iwl3945_notif_statistics)); #endif IWL_DEBUG_RX(priv, "Statistics have been cleared\n"); } iwl3945_hw_rx_statistics(priv, rxb); } /****************************************************************************** * * Misc. internal state and helper functions * ******************************************************************************/ /* This is necessary only for a number of statistics, see the caller. */ static int iwl3945_is_network_packet(struct iwl_priv *priv, struct ieee80211_hdr *header) { /* Filter incoming packets to determine if they are targeted toward * this network, discarding packets coming from ourselves */ switch (priv->iw_mode) { case NL80211_IFTYPE_ADHOC: /* Header: Dest. | Source | BSSID */ /* packets to our IBSS update information */ return !compare_ether_addr(header->addr3, priv->bssid); case NL80211_IFTYPE_STATION: /* Header: Dest. | AP{BSSID} | Source */ /* packets to our IBSS update information */ return !compare_ether_addr(header->addr2, priv->bssid); default: return 1; } } static void iwl3945_pass_packet_to_mac80211(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb, struct ieee80211_rx_status *stats) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)IWL_RX_DATA(pkt); struct iwl3945_rx_frame_hdr *rx_hdr = IWL_RX_HDR(pkt); struct iwl3945_rx_frame_end *rx_end = IWL_RX_END(pkt); u16 len = le16_to_cpu(rx_hdr->len); struct sk_buff *skb; __le16 fc = hdr->frame_control; /* We received data from the HW, so stop the watchdog */ if (unlikely(len + IWL39_RX_FRAME_SIZE > PAGE_SIZE << priv->hw_params.rx_page_order)) { IWL_DEBUG_DROP(priv, "Corruption detected!\n"); return; } /* We only process data packets if the interface is open */ if (unlikely(!priv->is_open)) { IWL_DEBUG_DROP_LIMIT(priv, "Dropping packet while interface is not open.\n"); return; } skb = dev_alloc_skb(128); if (!skb) { IWL_ERR(priv, "dev_alloc_skb failed\n"); return; } if (!iwl3945_mod_params.sw_crypto) iwl_legacy_set_decrypted_flag(priv, (struct ieee80211_hdr *)rxb_addr(rxb), le32_to_cpu(rx_end->status), stats); skb_add_rx_frag(skb, 0, rxb->page, (void *)rx_hdr->payload - (void *)pkt, len); iwl_legacy_update_stats(priv, false, fc, len); memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats)); ieee80211_rx(priv->hw, skb); priv->alloc_rxb_page--; rxb->page = NULL; } #define IWL_DELAY_NEXT_SCAN_AFTER_ASSOC (HZ*6) static void iwl3945_rx_reply_rx(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) { struct ieee80211_hdr *header; struct ieee80211_rx_status rx_status; struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl3945_rx_frame_stats *rx_stats = IWL_RX_STATS(pkt); struct iwl3945_rx_frame_hdr *rx_hdr = IWL_RX_HDR(pkt); struct iwl3945_rx_frame_end *rx_end = IWL_RX_END(pkt); u16 rx_stats_sig_avg __maybe_unused = le16_to_cpu(rx_stats->sig_avg); u16 rx_stats_noise_diff __maybe_unused = le16_to_cpu(rx_stats->noise_diff); u8 network_packet; rx_status.flag = 0; rx_status.mactime = le64_to_cpu(rx_end->timestamp); rx_status.band = (rx_hdr->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ? IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ; rx_status.freq = ieee80211_channel_to_frequency(le16_to_cpu(rx_hdr->channel), rx_status.band); rx_status.rate_idx = iwl3945_hwrate_to_plcp_idx(rx_hdr->rate); if (rx_status.band == IEEE80211_BAND_5GHZ) rx_status.rate_idx -= IWL_FIRST_OFDM_RATE; rx_status.antenna = (le16_to_cpu(rx_hdr->phy_flags) & RX_RES_PHY_FLAGS_ANTENNA_MSK) >> 4; /* set the preamble flag if appropriate */ if (rx_hdr->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK) rx_status.flag |= RX_FLAG_SHORTPRE; if ((unlikely(rx_stats->phy_count > 20))) { IWL_DEBUG_DROP(priv, "dsp size out of range [0,20]: %d/n", rx_stats->phy_count); return; } if (!(rx_end->status & RX_RES_STATUS_NO_CRC32_ERROR) || !(rx_end->status & RX_RES_STATUS_NO_RXE_OVERFLOW)) { IWL_DEBUG_RX(priv, "Bad CRC or FIFO: 0x%08X.\n", rx_end->status); return; } /* Convert 3945's rssi indicator to dBm */ rx_status.signal = rx_stats->rssi - IWL39_RSSI_OFFSET; IWL_DEBUG_STATS(priv, "Rssi %d sig_avg %d noise_diff %d\n", rx_status.signal, rx_stats_sig_avg, rx_stats_noise_diff); header = (struct ieee80211_hdr *)IWL_RX_DATA(pkt); network_packet = iwl3945_is_network_packet(priv, header); IWL_DEBUG_STATS_LIMIT(priv, "[%c] %d RSSI:%d Signal:%u, Rate:%u\n", network_packet ? '*' : ' ', le16_to_cpu(rx_hdr->channel), rx_status.signal, rx_status.signal, rx_status.rate_idx); iwl_legacy_dbg_log_rx_data_frame(priv, le16_to_cpu(rx_hdr->len), header); if (network_packet) { priv->_3945.last_beacon_time = le32_to_cpu(rx_end->beacon_timestamp); priv->_3945.last_tsf = le64_to_cpu(rx_end->timestamp); priv->_3945.last_rx_rssi = rx_status.signal; } iwl3945_pass_packet_to_mac80211(priv, rxb, &rx_status); } int iwl3945_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq, dma_addr_t addr, u16 len, u8 reset, u8 pad) { int count; struct iwl_queue *q; struct iwl3945_tfd *tfd, *tfd_tmp; q = &txq->q; tfd_tmp = (struct iwl3945_tfd *)txq->tfds; tfd = &tfd_tmp[q->write_ptr]; if (reset) memset(tfd, 0, sizeof(*tfd)); count = TFD_CTL_COUNT_GET(le32_to_cpu(tfd->control_flags)); if ((count >= NUM_TFD_CHUNKS) || (count < 0)) { IWL_ERR(priv, "Error can not send more than %d chunks\n", NUM_TFD_CHUNKS); return -EINVAL; } tfd->tbs[count].addr = cpu_to_le32(addr); tfd->tbs[count].len = cpu_to_le32(len); count++; tfd->control_flags = cpu_to_le32(TFD_CTL_COUNT_SET(count) | TFD_CTL_PAD_SET(pad)); return 0; } /** * iwl3945_hw_txq_free_tfd - Free one TFD, those at index [txq->q.read_ptr] * * Does NOT advance any indexes */ void iwl3945_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq) { struct iwl3945_tfd *tfd_tmp = (struct iwl3945_tfd *)txq->tfds; int index = txq->q.read_ptr; struct iwl3945_tfd *tfd = &tfd_tmp[index]; struct pci_dev *dev = priv->pci_dev; int i; int counter; /* sanity check */ counter = TFD_CTL_COUNT_GET(le32_to_cpu(tfd->control_flags)); if (counter > NUM_TFD_CHUNKS) { IWL_ERR(priv, "Too many chunks: %i\n", counter); /* @todo issue fatal error, it is quite serious situation */ return; } /* Unmap tx_cmd */ if (counter) pci_unmap_single(dev, dma_unmap_addr(&txq->meta[index], mapping), dma_unmap_len(&txq->meta[index], len), PCI_DMA_TODEVICE); /* unmap chunks if any */ for (i = 1; i < counter; i++) pci_unmap_single(dev, le32_to_cpu(tfd->tbs[i].addr), le32_to_cpu(tfd->tbs[i].len), PCI_DMA_TODEVICE); /* free SKB */ if (txq->txb) { struct sk_buff *skb; skb = txq->txb[txq->q.read_ptr].skb; /* can be called from irqs-disabled context */ if (skb) { dev_kfree_skb_any(skb); txq->txb[txq->q.read_ptr].skb = NULL; } } } /** * iwl3945_hw_build_tx_cmd_rate - Add rate portion to TX_CMD: * */ void iwl3945_hw_build_tx_cmd_rate(struct iwl_priv *priv, struct iwl_device_cmd *cmd, struct ieee80211_tx_info *info, struct ieee80211_hdr *hdr, int sta_id, int tx_id) { u16 hw_value = ieee80211_get_tx_rate(priv->hw, info)->hw_value; u16 rate_index = min(hw_value & 0xffff, IWL_RATE_COUNT_3945); u16 rate_mask; int rate; u8 rts_retry_limit; u8 data_retry_limit; __le32 tx_flags; __le16 fc = hdr->frame_control; struct iwl3945_tx_cmd *tx_cmd = (struct iwl3945_tx_cmd *)cmd->cmd.payload; rate = iwl3945_rates[rate_index].plcp; tx_flags = tx_cmd->tx_flags; /* We need to figure out how to get the sta->supp_rates while * in this running context */ rate_mask = IWL_RATES_MASK_3945; /* Set retry limit on DATA packets and Probe Responses*/ if (ieee80211_is_probe_resp(fc)) data_retry_limit = 3; else data_retry_limit = IWL_DEFAULT_TX_RETRY; tx_cmd->data_retry_limit = data_retry_limit; if (tx_id >= IWL39_CMD_QUEUE_NUM) rts_retry_limit = 3; else rts_retry_limit = 7; if (data_retry_limit < rts_retry_limit) rts_retry_limit = data_retry_limit; tx_cmd->rts_retry_limit = rts_retry_limit; tx_cmd->rate = rate; tx_cmd->tx_flags = tx_flags; /* OFDM */ tx_cmd->supp_rates[0] = ((rate_mask & IWL_OFDM_RATES_MASK) >> IWL_FIRST_OFDM_RATE) & 0xFF; /* CCK */ tx_cmd->supp_rates[1] = (rate_mask & 0xF); IWL_DEBUG_RATE(priv, "Tx sta id: %d, rate: %d (plcp), flags: 0x%4X " "cck/ofdm mask: 0x%x/0x%x\n", sta_id, tx_cmd->rate, le32_to_cpu(tx_cmd->tx_flags), tx_cmd->supp_rates[1], tx_cmd->supp_rates[0]); } static u8 iwl3945_sync_sta(struct iwl_priv *priv, int sta_id, u16 tx_rate) { unsigned long flags_spin; struct iwl_station_entry *station; if (sta_id == IWL_INVALID_STATION) return IWL_INVALID_STATION; spin_lock_irqsave(&priv->sta_lock, flags_spin); station = &priv->stations[sta_id]; station->sta.sta.modify_mask = STA_MODIFY_TX_RATE_MSK; station->sta.rate_n_flags = cpu_to_le16(tx_rate); station->sta.mode = STA_CONTROL_MODIFY_MSK; iwl_legacy_send_add_sta(priv, &station->sta, CMD_ASYNC); spin_unlock_irqrestore(&priv->sta_lock, flags_spin); IWL_DEBUG_RATE(priv, "SCALE sync station %d to rate %d\n", sta_id, tx_rate); return sta_id; } static void iwl3945_set_pwr_vmain(struct iwl_priv *priv) { /* * (for documentation purposes) * to set power to V_AUX, do if (pci_pme_capable(priv->pci_dev, PCI_D3cold)) { iwl_legacy_set_bits_mask_prph(priv, APMG_PS_CTRL_REG, APMG_PS_CTRL_VAL_PWR_SRC_VAUX, ~APMG_PS_CTRL_MSK_PWR_SRC); iwl_poll_bit(priv, CSR_GPIO_IN, CSR_GPIO_IN_VAL_VAUX_PWR_SRC, CSR_GPIO_IN_BIT_AUX_POWER, 5000); } */ iwl_legacy_set_bits_mask_prph(priv, APMG_PS_CTRL_REG, APMG_PS_CTRL_VAL_PWR_SRC_VMAIN, ~APMG_PS_CTRL_MSK_PWR_SRC); iwl_poll_bit(priv, CSR_GPIO_IN, CSR_GPIO_IN_VAL_VMAIN_PWR_SRC, CSR_GPIO_IN_BIT_AUX_POWER, 5000); /* uS */ } static int iwl3945_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq) { iwl_legacy_write_direct32(priv, FH39_RCSR_RBD_BASE(0), rxq->bd_dma); iwl_legacy_write_direct32(priv, FH39_RCSR_RPTR_ADDR(0), rxq->rb_stts_dma); iwl_legacy_write_direct32(priv, FH39_RCSR_WPTR(0), 0); iwl_legacy_write_direct32(priv, FH39_RCSR_CONFIG(0), FH39_RCSR_RX_CONFIG_REG_VAL_DMA_CHNL_EN_ENABLE | FH39_RCSR_RX_CONFIG_REG_VAL_RDRBD_EN_ENABLE | FH39_RCSR_RX_CONFIG_REG_BIT_WR_STTS_EN | FH39_RCSR_RX_CONFIG_REG_VAL_MAX_FRAG_SIZE_128 | (RX_QUEUE_SIZE_LOG << FH39_RCSR_RX_CONFIG_REG_POS_RBDC_SIZE) | FH39_RCSR_RX_CONFIG_REG_VAL_IRQ_DEST_INT_HOST | (1 << FH39_RCSR_RX_CONFIG_REG_POS_IRQ_RBTH) | FH39_RCSR_RX_CONFIG_REG_VAL_MSG_MODE_FH); /* fake read to flush all prev I/O */ iwl_legacy_read_direct32(priv, FH39_RSSR_CTRL); return 0; } static int iwl3945_tx_reset(struct iwl_priv *priv) { /* bypass mode */ iwl_legacy_write_prph(priv, ALM_SCD_MODE_REG, 0x2); /* RA 0 is active */ iwl_legacy_write_prph(priv, ALM_SCD_ARASTAT_REG, 0x01); /* all 6 fifo are active */ iwl_legacy_write_prph(priv, ALM_SCD_TXFACT_REG, 0x3f); iwl_legacy_write_prph(priv, ALM_SCD_SBYP_MODE_1_REG, 0x010000); iwl_legacy_write_prph(priv, ALM_SCD_SBYP_MODE_2_REG, 0x030002); iwl_legacy_write_prph(priv, ALM_SCD_TXF4MF_REG, 0x000004); iwl_legacy_write_prph(priv, ALM_SCD_TXF5MF_REG, 0x000005); iwl_legacy_write_direct32(priv, FH39_TSSR_CBB_BASE, priv->_3945.shared_phys); iwl_legacy_write_direct32(priv, FH39_TSSR_MSG_CONFIG, FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TXPD_ON | FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_TXPD_ON | FH39_TSSR_TX_MSG_CONFIG_REG_VAL_MAX_FRAG_SIZE_128B | FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TFD_ON | FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_CBB_ON | FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RSP_WAIT_TH | FH39_TSSR_TX_MSG_CONFIG_REG_VAL_RSP_WAIT_TH); return 0; } /** * iwl3945_txq_ctx_reset - Reset TX queue context * * Destroys all DMA structures and initialize them again */ static int iwl3945_txq_ctx_reset(struct iwl_priv *priv) { int rc; int txq_id, slots_num; iwl3945_hw_txq_ctx_free(priv); /* allocate tx queue structure */ rc = iwl_legacy_alloc_txq_mem(priv); if (rc) return rc; /* Tx CMD queue */ rc = iwl3945_tx_reset(priv); if (rc) goto error; /* Tx queue(s) */ for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) { slots_num = (txq_id == IWL39_CMD_QUEUE_NUM) ? TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; rc = iwl_legacy_tx_queue_init(priv, &priv->txq[txq_id], slots_num, txq_id); if (rc) { IWL_ERR(priv, "Tx %d queue init failed\n", txq_id); goto error; } } return rc; error: iwl3945_hw_txq_ctx_free(priv); return rc; } /* * Start up 3945's basic functionality after it has been reset * (e.g. after platform boot, or shutdown via iwl_legacy_apm_stop()) * NOTE: This does not load uCode nor start the embedded processor */ static int iwl3945_apm_init(struct iwl_priv *priv) { int ret = iwl_legacy_apm_init(priv); /* Clear APMG (NIC's internal power management) interrupts */ iwl_legacy_write_prph(priv, APMG_RTC_INT_MSK_REG, 0x0); iwl_legacy_write_prph(priv, APMG_RTC_INT_STT_REG, 0xFFFFFFFF); /* Reset radio chip */ iwl_legacy_set_bits_prph(priv, APMG_PS_CTRL_REG, APMG_PS_CTRL_VAL_RESET_REQ); udelay(5); iwl_legacy_clear_bits_prph(priv, APMG_PS_CTRL_REG, APMG_PS_CTRL_VAL_RESET_REQ); return ret; } static void iwl3945_nic_config(struct iwl_priv *priv) { struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom; unsigned long flags; u8 rev_id = priv->pci_dev->revision; spin_lock_irqsave(&priv->lock, flags); /* Determine HW type */ IWL_DEBUG_INFO(priv, "HW Revision ID = 0x%X\n", rev_id); if (rev_id & PCI_CFG_REV_ID_BIT_RTP) IWL_DEBUG_INFO(priv, "RTP type\n"); else if (rev_id & PCI_CFG_REV_ID_BIT_BASIC_SKU) { IWL_DEBUG_INFO(priv, "3945 RADIO-MB type\n"); iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG, CSR39_HW_IF_CONFIG_REG_BIT_3945_MB); } else { IWL_DEBUG_INFO(priv, "3945 RADIO-MM type\n"); iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG, CSR39_HW_IF_CONFIG_REG_BIT_3945_MM); } if (EEPROM_SKU_CAP_OP_MODE_MRC == eeprom->sku_cap) { IWL_DEBUG_INFO(priv, "SKU OP mode is mrc\n"); iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG, CSR39_HW_IF_CONFIG_REG_BIT_SKU_MRC); } else IWL_DEBUG_INFO(priv, "SKU OP mode is basic\n"); if ((eeprom->board_revision & 0xF0) == 0xD0) { IWL_DEBUG_INFO(priv, "3945ABG revision is 0x%X\n", eeprom->board_revision); iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG, CSR39_HW_IF_CONFIG_REG_BIT_BOARD_TYPE); } else { IWL_DEBUG_INFO(priv, "3945ABG revision is 0x%X\n", eeprom->board_revision); iwl_legacy_clear_bit(priv, CSR_HW_IF_CONFIG_REG, CSR39_HW_IF_CONFIG_REG_BIT_BOARD_TYPE); } if (eeprom->almgor_m_version <= 1) { iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG, CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_A); IWL_DEBUG_INFO(priv, "Card M type A version is 0x%X\n", eeprom->almgor_m_version); } else { IWL_DEBUG_INFO(priv, "Card M type B version is 0x%X\n", eeprom->almgor_m_version); iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG, CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_B); } spin_unlock_irqrestore(&priv->lock, flags); if (eeprom->sku_cap & EEPROM_SKU_CAP_SW_RF_KILL_ENABLE) IWL_DEBUG_RF_KILL(priv, "SW RF KILL supported in EEPROM.\n"); if (eeprom->sku_cap & EEPROM_SKU_CAP_HW_RF_KILL_ENABLE) IWL_DEBUG_RF_KILL(priv, "HW RF KILL supported in EEPROM.\n"); } int iwl3945_hw_nic_init(struct iwl_priv *priv) { int rc; unsigned long flags; struct iwl_rx_queue *rxq = &priv->rxq; spin_lock_irqsave(&priv->lock, flags); priv->cfg->ops->lib->apm_ops.init(priv); spin_unlock_irqrestore(&priv->lock, flags); iwl3945_set_pwr_vmain(priv); priv->cfg->ops->lib->apm_ops.config(priv); /* Allocate the RX queue, or reset if it is already allocated */ if (!rxq->bd) { rc = iwl_legacy_rx_queue_alloc(priv); if (rc) { IWL_ERR(priv, "Unable to initialize Rx queue\n"); return -ENOMEM; } } else iwl3945_rx_queue_reset(priv, rxq); iwl3945_rx_replenish(priv); iwl3945_rx_init(priv, rxq); /* Look at using this instead: rxq->need_update = 1; iwl_legacy_rx_queue_update_write_ptr(priv, rxq); */ iwl_legacy_write_direct32(priv, FH39_RCSR_WPTR(0), rxq->write & ~7); rc = iwl3945_txq_ctx_reset(priv); if (rc) return rc; set_bit(STATUS_INIT, &priv->status); return 0; } /** * iwl3945_hw_txq_ctx_free - Free TXQ Context * * Destroy all TX DMA queues and structures */ void iwl3945_hw_txq_ctx_free(struct iwl_priv *priv) { int txq_id; /* Tx queues */ if (priv->txq) for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) if (txq_id == IWL39_CMD_QUEUE_NUM) iwl_legacy_cmd_queue_free(priv); else iwl_legacy_tx_queue_free(priv, txq_id); /* free tx queue structure */ iwl_legacy_txq_mem(priv); } void iwl3945_hw_txq_ctx_stop(struct iwl_priv *priv) { int txq_id; /* stop SCD */ iwl_legacy_write_prph(priv, ALM_SCD_MODE_REG, 0); iwl_legacy_write_prph(priv, ALM_SCD_TXFACT_REG, 0); /* reset TFD queues */ for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) { iwl_legacy_write_direct32(priv, FH39_TCSR_CONFIG(txq_id), 0x0); iwl_poll_direct_bit(priv, FH39_TSSR_TX_STATUS, FH39_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(txq_id), 1000); } iwl3945_hw_txq_ctx_free(priv); } /** * iwl3945_hw_reg_adjust_power_by_temp * return index delta into power gain settings table */ static int iwl3945_hw_reg_adjust_power_by_temp(int new_reading, int old_reading) { return (new_reading - old_reading) * (-11) / 100; } /** * iwl3945_hw_reg_temp_out_of_range - Keep temperature in sane range */ static inline int iwl3945_hw_reg_temp_out_of_range(int temperature) { return ((temperature < -260) || (temperature > 25)) ? 1 : 0; } int iwl3945_hw_get_temperature(struct iwl_priv *priv) { return iwl_read32(priv, CSR_UCODE_DRV_GP2); } /** * iwl3945_hw_reg_txpower_get_temperature * get the current temperature by reading from NIC */ static int iwl3945_hw_reg_txpower_get_temperature(struct iwl_priv *priv) { struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom; int temperature; temperature = iwl3945_hw_get_temperature(priv); /* driver's okay range is -260 to +25. * human readable okay range is 0 to +285 */ IWL_DEBUG_INFO(priv, "Temperature: %d\n", temperature + IWL_TEMP_CONVERT); /* handle insane temp reading */ if (iwl3945_hw_reg_temp_out_of_range(temperature)) { IWL_ERR(priv, "Error bad temperature value %d\n", temperature); /* if really really hot(?), * substitute the 3rd band/group's temp measured at factory */ if (priv->last_temperature > 100) temperature = eeprom->groups[2].temperature; else /* else use most recent "sane" value from driver */ temperature = priv->last_temperature; } return temperature; /* raw, not "human readable" */ } /* Adjust Txpower only if temperature variance is greater than threshold. * * Both are lower than older versions' 9 degrees */ #define IWL_TEMPERATURE_LIMIT_TIMER 6 /** * iwl3945_is_temp_calib_needed - determines if new calibration is needed * * records new temperature in tx_mgr->temperature. * replaces tx_mgr->last_temperature *only* if calib needed * (assumes caller will actually do the calibration!). */ static int iwl3945_is_temp_calib_needed(struct iwl_priv *priv) { int temp_diff; priv->temperature = iwl3945_hw_reg_txpower_get_temperature(priv); temp_diff = priv->temperature - priv->last_temperature; /* get absolute value */ if (temp_diff < 0) { IWL_DEBUG_POWER(priv, "Getting cooler, delta %d,\n", temp_diff); temp_diff = -temp_diff; } else if (temp_diff == 0) IWL_DEBUG_POWER(priv, "Same temp,\n"); else IWL_DEBUG_POWER(priv, "Getting warmer, delta %d,\n", temp_diff); /* if we don't need calibration, *don't* update last_temperature */ if (temp_diff < IWL_TEMPERATURE_LIMIT_TIMER) { IWL_DEBUG_POWER(priv, "Timed thermal calib not needed\n"); return 0; } IWL_DEBUG_POWER(priv, "Timed thermal calib needed\n"); /* assume that caller will actually do calib ... * update the "last temperature" value */ priv->last_temperature = priv->temperature; return 1; } #define IWL_MAX_GAIN_ENTRIES 78 #define IWL_CCK_FROM_OFDM_POWER_DIFF -5 #define IWL_CCK_FROM_OFDM_INDEX_DIFF (10) /* radio and DSP power table, each step is 1/2 dB. * 1st number is for RF analog gain, 2nd number is for DSP pre-DAC gain. */ static struct iwl3945_tx_power power_gain_table[2][IWL_MAX_GAIN_ENTRIES] = { { {251, 127}, /* 2.4 GHz, highest power */ {251, 127}, {251, 127}, {251, 127}, {251, 125}, {251, 110}, {251, 105}, {251, 98}, {187, 125}, {187, 115}, {187, 108}, {187, 99}, {243, 119}, {243, 111}, {243, 105}, {243, 97}, {243, 92}, {211, 106}, {211, 100}, {179, 120}, {179, 113}, {179, 107}, {147, 125}, {147, 119}, {147, 112}, {147, 106}, {147, 101}, {147, 97}, {147, 91}, {115, 107}, {235, 121}, {235, 115}, {235, 109}, {203, 127}, {203, 121}, {203, 115}, {203, 108}, {203, 102}, {203, 96}, {203, 92}, {171, 110}, {171, 104}, {171, 98}, {139, 116}, {227, 125}, {227, 119}, {227, 113}, {227, 107}, {227, 101}, {227, 96}, {195, 113}, {195, 106}, {195, 102}, {195, 95}, {163, 113}, {163, 106}, {163, 102}, {163, 95}, {131, 113}, {131, 106}, {131, 102}, {131, 95}, {99, 113}, {99, 106}, {99, 102}, {99, 95}, {67, 113}, {67, 106}, {67, 102}, {67, 95}, {35, 113}, {35, 106}, {35, 102}, {35, 95}, {3, 113}, {3, 106}, {3, 102}, {3, 95} }, /* 2.4 GHz, lowest power */ { {251, 127}, /* 5.x GHz, highest power */ {251, 120}, {251, 114}, {219, 119}, {219, 101}, {187, 113}, {187, 102}, {155, 114}, {155, 103}, {123, 117}, {123, 107}, {123, 99}, {123, 92}, {91, 108}, {59, 125}, {59, 118}, {59, 109}, {59, 102}, {59, 96}, {59, 90}, {27, 104}, {27, 98}, {27, 92}, {115, 118}, {115, 111}, {115, 104}, {83, 126}, {83, 121}, {83, 113}, {83, 105}, {83, 99}, {51, 118}, {51, 111}, {51, 104}, {51, 98}, {19, 116}, {19, 109}, {19, 102}, {19, 98}, {19, 93}, {171, 113}, {171, 107}, {171, 99}, {139, 120}, {139, 113}, {139, 107}, {139, 99}, {107, 120}, {107, 113}, {107, 107}, {107, 99}, {75, 120}, {75, 113}, {75, 107}, {75, 99}, {43, 120}, {43, 113}, {43, 107}, {43, 99}, {11, 120}, {11, 113}, {11, 107}, {11, 99}, {131, 107}, {131, 99}, {99, 120}, {99, 113}, {99, 107}, {99, 99}, {67, 120}, {67, 113}, {67, 107}, {67, 99}, {35, 120}, {35, 113}, {35, 107}, {35, 99}, {3, 120} } /* 5.x GHz, lowest power */ }; static inline u8 iwl3945_hw_reg_fix_power_index(int index) { if (index < 0) return 0; if (index >= IWL_MAX_GAIN_ENTRIES) return IWL_MAX_GAIN_ENTRIES - 1; return (u8) index; } /* Kick off thermal recalibration check every 60 seconds */ #define REG_RECALIB_PERIOD (60) /** * iwl3945_hw_reg_set_scan_power - Set Tx power for scan probe requests * * Set (in our channel info database) the direct scan Tx power for 1 Mbit (CCK) * or 6 Mbit (OFDM) rates. */ static void iwl3945_hw_reg_set_scan_power(struct iwl_priv *priv, u32 scan_tbl_index, s32 rate_index, const s8 *clip_pwrs, struct iwl_channel_info *ch_info, int band_index) { struct iwl3945_scan_power_info *scan_power_info; s8 power; u8 power_index; scan_power_info = &ch_info->scan_pwr_info[scan_tbl_index]; /* use this channel group's 6Mbit clipping/saturation pwr, * but cap at regulatory scan power restriction (set during init * based on eeprom channel data) for this channel. */ power = min(ch_info->scan_power, clip_pwrs[IWL_RATE_6M_INDEX_TABLE]); power = min(power, priv->tx_power_user_lmt); scan_power_info->requested_power = power; /* find difference between new scan *power* and current "normal" * Tx *power* for 6Mb. Use this difference (x2) to adjust the * current "normal" temperature-compensated Tx power *index* for * this rate (1Mb or 6Mb) to yield new temp-compensated scan power * *index*. */ power_index = ch_info->power_info[rate_index].power_table_index - (power - ch_info->power_info [IWL_RATE_6M_INDEX_TABLE].requested_power) * 2; /* store reference index that we use when adjusting *all* scan * powers. So we can accommodate user (all channel) or spectrum * management (single channel) power changes "between" temperature * feedback compensation procedures. * don't force fit this reference index into gain table; it may be a * negative number. This will help avoid errors when we're at * the lower bounds (highest gains, for warmest temperatures) * of the table. */ /* don't exceed table bounds for "real" setting */ power_index = iwl3945_hw_reg_fix_power_index(power_index); scan_power_info->power_table_index = power_index; scan_power_info->tpc.tx_gain = power_gain_table[band_index][power_index].tx_gain; scan_power_info->tpc.dsp_atten = power_gain_table[band_index][power_index].dsp_atten; } /** * iwl3945_send_tx_power - fill in Tx Power command with gain settings * * Configures power settings for all rates for the current channel, * using values from channel info struct, and send to NIC */ static int iwl3945_send_tx_power(struct iwl_priv *priv) { int rate_idx, i; const struct iwl_channel_info *ch_info = NULL; struct iwl3945_txpowertable_cmd txpower = { .channel = priv->contexts[IWL_RXON_CTX_BSS].active.channel, }; u16 chan; if (WARN_ONCE(test_bit(STATUS_SCAN_HW, &priv->status), "TX Power requested while scanning!\n")) return -EAGAIN; chan = le16_to_cpu(priv->contexts[IWL_RXON_CTX_BSS].active.channel); txpower.band = (priv->band == IEEE80211_BAND_5GHZ) ? 0 : 1; ch_info = iwl_legacy_get_channel_info(priv, priv->band, chan); if (!ch_info) { IWL_ERR(priv, "Failed to get channel info for channel %d [%d]\n", chan, priv->band); return -EINVAL; } if (!iwl_legacy_is_channel_valid(ch_info)) { IWL_DEBUG_POWER(priv, "Not calling TX_PWR_TABLE_CMD on " "non-Tx channel.\n"); return 0; } /* fill cmd with power settings for all rates for current channel */ /* Fill OFDM rate */ for (rate_idx = IWL_FIRST_OFDM_RATE, i = 0; rate_idx <= IWL39_LAST_OFDM_RATE; rate_idx++, i++) { txpower.power[i].tpc = ch_info->power_info[i].tpc; txpower.power[i].rate = iwl3945_rates[rate_idx].plcp; IWL_DEBUG_POWER(priv, "ch %d:%d rf %d dsp %3d rate code 0x%02x\n", le16_to_cpu(txpower.channel), txpower.band, txpower.power[i].tpc.tx_gain, txpower.power[i].tpc.dsp_atten, txpower.power[i].rate); } /* Fill CCK rates */ for (rate_idx = IWL_FIRST_CCK_RATE; rate_idx <= IWL_LAST_CCK_RATE; rate_idx++, i++) { txpower.power[i].tpc = ch_info->power_info[i].tpc; txpower.power[i].rate = iwl3945_rates[rate_idx].plcp; IWL_DEBUG_POWER(priv, "ch %d:%d rf %d dsp %3d rate code 0x%02x\n", le16_to_cpu(txpower.channel), txpower.band, txpower.power[i].tpc.tx_gain, txpower.power[i].tpc.dsp_atten, txpower.power[i].rate); } return iwl_legacy_send_cmd_pdu(priv, REPLY_TX_PWR_TABLE_CMD, sizeof(struct iwl3945_txpowertable_cmd), &txpower); } /** * iwl3945_hw_reg_set_new_power - Configures power tables at new levels * @ch_info: Channel to update. Uses power_info.requested_power. * * Replace requested_power and base_power_index ch_info fields for * one channel. * * Called if user or spectrum management changes power preferences. * Takes into account h/w and modulation limitations (clip power). * * This does *not* send anything to NIC, just sets up ch_info for one channel. * * NOTE: reg_compensate_for_temperature_dif() *must* be run after this to * properly fill out the scan powers, and actual h/w gain settings, * and send changes to NIC */ static int iwl3945_hw_reg_set_new_power(struct iwl_priv *priv, struct iwl_channel_info *ch_info) { struct iwl3945_channel_power_info *power_info; int power_changed = 0; int i; const s8 *clip_pwrs; int power; /* Get this chnlgrp's rate-to-max/clip-powers table */ clip_pwrs = priv->_3945.clip_groups[ch_info->group_index].clip_powers; /* Get this channel's rate-to-current-power settings table */ power_info = ch_info->power_info; /* update OFDM Txpower settings */ for (i = IWL_RATE_6M_INDEX_TABLE; i <= IWL_RATE_54M_INDEX_TABLE; i++, ++power_info) { int delta_idx; /* limit new power to be no more than h/w capability */ power = min(ch_info->curr_txpow, clip_pwrs[i]); if (power == power_info->requested_power) continue; /* find difference between old and new requested powers, * update base (non-temp-compensated) power index */ delta_idx = (power - power_info->requested_power) * 2; power_info->base_power_index -= delta_idx; /* save new requested power value */ power_info->requested_power = power; power_changed = 1; } /* update CCK Txpower settings, based on OFDM 12M setting ... * ... all CCK power settings for a given channel are the *same*. */ if (power_changed) { power = ch_info->power_info[IWL_RATE_12M_INDEX_TABLE]. requested_power + IWL_CCK_FROM_OFDM_POWER_DIFF; /* do all CCK rates' iwl3945_channel_power_info structures */ for (i = IWL_RATE_1M_INDEX_TABLE; i <= IWL_RATE_11M_INDEX_TABLE; i++) { power_info->requested_power = power; power_info->base_power_index = ch_info->power_info[IWL_RATE_12M_INDEX_TABLE]. base_power_index + IWL_CCK_FROM_OFDM_INDEX_DIFF; ++power_info; } } return 0; } /** * iwl3945_hw_reg_get_ch_txpower_limit - returns new power limit for channel * * NOTE: Returned power limit may be less (but not more) than requested, * based strictly on regulatory (eeprom and spectrum mgt) limitations * (no consideration for h/w clipping limitations). */ static int iwl3945_hw_reg_get_ch_txpower_limit(struct iwl_channel_info *ch_info) { s8 max_power; #if 0 /* if we're using TGd limits, use lower of TGd or EEPROM */ if (ch_info->tgd_data.max_power != 0) max_power = min(ch_info->tgd_data.max_power, ch_info->eeprom.max_power_avg); /* else just use EEPROM limits */ else #endif max_power = ch_info->eeprom.max_power_avg; return min(max_power, ch_info->max_power_avg); } /** * iwl3945_hw_reg_comp_txpower_temp - Compensate for temperature * * Compensate txpower settings of *all* channels for temperature. * This only accounts for the difference between current temperature * and the factory calibration temperatures, and bases the new settings * on the channel's base_power_index. * * If RxOn is "associated", this sends the new Txpower to NIC! */ static int iwl3945_hw_reg_comp_txpower_temp(struct iwl_priv *priv) { struct iwl_channel_info *ch_info = NULL; struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom; int delta_index; const s8 *clip_pwrs; /* array of h/w max power levels for each rate */ u8 a_band; u8 rate_index; u8 scan_tbl_index; u8 i; int ref_temp; int temperature = priv->temperature; if (priv->disable_tx_power_cal || test_bit(STATUS_SCANNING, &priv->status)) { /* do not perform tx power calibration */ return 0; } /* set up new Tx power info for each and every channel, 2.4 and 5.x */ for (i = 0; i < priv->channel_count; i++) { ch_info = &priv->channel_info[i]; a_band = iwl_legacy_is_channel_a_band(ch_info); /* Get this chnlgrp's factory calibration temperature */ ref_temp = (s16)eeprom->groups[ch_info->group_index]. temperature; /* get power index adjustment based on current and factory * temps */ delta_index = iwl3945_hw_reg_adjust_power_by_temp(temperature, ref_temp); /* set tx power value for all rates, OFDM and CCK */ for (rate_index = 0; rate_index < IWL_RATE_COUNT_3945; rate_index++) { int power_idx = ch_info->power_info[rate_index].base_power_index; /* temperature compensate */ power_idx += delta_index; /* stay within table range */ power_idx = iwl3945_hw_reg_fix_power_index(power_idx); ch_info->power_info[rate_index]. power_table_index = (u8) power_idx; ch_info->power_info[rate_index].tpc = power_gain_table[a_band][power_idx]; } /* Get this chnlgrp's rate-to-max/clip-powers table */ clip_pwrs = priv->_3945.clip_groups[ch_info->group_index].clip_powers; /* set scan tx power, 1Mbit for CCK, 6Mbit for OFDM */ for (scan_tbl_index = 0; scan_tbl_index < IWL_NUM_SCAN_RATES; scan_tbl_index++) { s32 actual_index = (scan_tbl_index == 0) ? IWL_RATE_1M_INDEX_TABLE : IWL_RATE_6M_INDEX_TABLE; iwl3945_hw_reg_set_scan_power(priv, scan_tbl_index, actual_index, clip_pwrs, ch_info, a_band); } } /* send Txpower command for current channel to ucode */ return priv->cfg->ops->lib->send_tx_power(priv); } int iwl3945_hw_reg_set_txpower(struct iwl_priv *priv, s8 power) { struct iwl_channel_info *ch_info; s8 max_power; u8 a_band; u8 i; if (priv->tx_power_user_lmt == power) { IWL_DEBUG_POWER(priv, "Requested Tx power same as current " "limit: %ddBm.\n", power); return 0; } IWL_DEBUG_POWER(priv, "Setting upper limit clamp to %ddBm.\n", power); priv->tx_power_user_lmt = power; /* set up new Tx powers for each and every channel, 2.4 and 5.x */ for (i = 0; i < priv->channel_count; i++) { ch_info = &priv->channel_info[i]; a_band = iwl_legacy_is_channel_a_band(ch_info); /* find minimum power of all user and regulatory constraints * (does not consider h/w clipping limitations) */ max_power = iwl3945_hw_reg_get_ch_txpower_limit(ch_info); max_power = min(power, max_power); if (max_power != ch_info->curr_txpow) { ch_info->curr_txpow = max_power; /* this considers the h/w clipping limitations */ iwl3945_hw_reg_set_new_power(priv, ch_info); } } /* update txpower settings for all channels, * send to NIC if associated. */ iwl3945_is_temp_calib_needed(priv); iwl3945_hw_reg_comp_txpower_temp(priv); return 0; } static int iwl3945_send_rxon_assoc(struct iwl_priv *priv, struct iwl_rxon_context *ctx) { int rc = 0; struct iwl_rx_packet *pkt; struct iwl3945_rxon_assoc_cmd rxon_assoc; struct iwl_host_cmd cmd = { .id = REPLY_RXON_ASSOC, .len = sizeof(rxon_assoc), .flags = CMD_WANT_SKB, .data = &rxon_assoc, }; const struct iwl_legacy_rxon_cmd *rxon1 = &ctx->staging; const struct iwl_legacy_rxon_cmd *rxon2 = &ctx->active; if ((rxon1->flags == rxon2->flags) && (rxon1->filter_flags == rxon2->filter_flags) && (rxon1->cck_basic_rates == rxon2->cck_basic_rates) && (rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates)) { IWL_DEBUG_INFO(priv, "Using current RXON_ASSOC. Not resending.\n"); return 0; } rxon_assoc.flags = ctx->staging.flags; rxon_assoc.filter_flags = ctx->staging.filter_flags; rxon_assoc.ofdm_basic_rates = ctx->staging.ofdm_basic_rates; rxon_assoc.cck_basic_rates = ctx->staging.cck_basic_rates; rxon_assoc.reserved = 0; rc = iwl_legacy_send_cmd_sync(priv, &cmd); if (rc) return rc; pkt = (struct iwl_rx_packet *)cmd.reply_page; if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) { IWL_ERR(priv, "Bad return from REPLY_RXON_ASSOC command\n"); rc = -EIO; } iwl_legacy_free_pages(priv, cmd.reply_page); return rc; } /** * iwl3945_commit_rxon - commit staging_rxon to hardware * * The RXON command in staging_rxon is committed to the hardware and * the active_rxon structure is updated with the new data. This * function correctly transitions out of the RXON_ASSOC_MSK state if * a HW tune is required based on the RXON structure changes. */ int iwl3945_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx) { /* cast away the const for active_rxon in this function */ struct iwl3945_rxon_cmd *active_rxon = (void *)&ctx->active; struct iwl3945_rxon_cmd *staging_rxon = (void *)&ctx->staging; int rc = 0; bool new_assoc = !!(staging_rxon->filter_flags & RXON_FILTER_ASSOC_MSK); if (test_bit(STATUS_EXIT_PENDING, &priv->status)) return -EINVAL; if (!iwl_legacy_is_alive(priv)) return -1; /* always get timestamp with Rx frame */ staging_rxon->flags |= RXON_FLG_TSF2HOST_MSK; /* select antenna */ staging_rxon->flags &= ~(RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_SEL_MSK); staging_rxon->flags |= iwl3945_get_antenna_flags(priv); rc = iwl_legacy_check_rxon_cmd(priv, ctx); if (rc) { IWL_ERR(priv, "Invalid RXON configuration. Not committing.\n"); return -EINVAL; } /* If we don't need to send a full RXON, we can use * iwl3945_rxon_assoc_cmd which is used to reconfigure filter * and other flags for the current radio configuration. */ if (!iwl_legacy_full_rxon_required(priv, &priv->contexts[IWL_RXON_CTX_BSS])) { rc = iwl_legacy_send_rxon_assoc(priv, &priv->contexts[IWL_RXON_CTX_BSS]); if (rc) { IWL_ERR(priv, "Error setting RXON_ASSOC " "configuration (%d).\n", rc); return rc; } memcpy(active_rxon, staging_rxon, sizeof(*active_rxon)); /* * We do not commit tx power settings while channel changing, * do it now if tx power changed. */ iwl_legacy_set_tx_power(priv, priv->tx_power_next, false); return 0; } /* If we are currently associated and the new config requires * an RXON_ASSOC and the new config wants the associated mask enabled, * we must clear the associated from the active configuration * before we apply the new config */ if (iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS) && new_assoc) { IWL_DEBUG_INFO(priv, "Toggling associated bit on current RXON\n"); active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK; /* * reserved4 and 5 could have been filled by the iwlcore code. * Let's clear them before pushing to the 3945. */ active_rxon->reserved4 = 0; active_rxon->reserved5 = 0; rc = iwl_legacy_send_cmd_pdu(priv, REPLY_RXON, sizeof(struct iwl3945_rxon_cmd), &priv->contexts[IWL_RXON_CTX_BSS].active); /* If the mask clearing failed then we set * active_rxon back to what it was previously */ if (rc) { active_rxon->filter_flags |= RXON_FILTER_ASSOC_MSK; IWL_ERR(priv, "Error clearing ASSOC_MSK on current " "configuration (%d).\n", rc); return rc; } iwl_legacy_clear_ucode_stations(priv, &priv->contexts[IWL_RXON_CTX_BSS]); iwl_legacy_restore_stations(priv, &priv->contexts[IWL_RXON_CTX_BSS]); } IWL_DEBUG_INFO(priv, "Sending RXON\n" "* with%s RXON_FILTER_ASSOC_MSK\n" "* channel = %d\n" "* bssid = %pM\n", (new_assoc ? "" : "out"), le16_to_cpu(staging_rxon->channel), staging_rxon->bssid_addr); /* * reserved4 and 5 could have been filled by the iwlcore code. * Let's clear them before pushing to the 3945. */ staging_rxon->reserved4 = 0; staging_rxon->reserved5 = 0; iwl_legacy_set_rxon_hwcrypto(priv, ctx, !iwl3945_mod_params.sw_crypto); /* Apply the new configuration */ rc = iwl_legacy_send_cmd_pdu(priv, REPLY_RXON, sizeof(struct iwl3945_rxon_cmd), staging_rxon); if (rc) { IWL_ERR(priv, "Error setting new configuration (%d).\n", rc); return rc; } memcpy(active_rxon, staging_rxon, sizeof(*active_rxon)); if (!new_assoc) { iwl_legacy_clear_ucode_stations(priv, &priv->contexts[IWL_RXON_CTX_BSS]); iwl_legacy_restore_stations(priv, &priv->contexts[IWL_RXON_CTX_BSS]); } /* If we issue a new RXON command which required a tune then we must * send a new TXPOWER command or we won't be able to Tx any frames */ rc = iwl_legacy_set_tx_power(priv, priv->tx_power_next, true); if (rc) { IWL_ERR(priv, "Error setting Tx power (%d).\n", rc); return rc; } /* Init the hardware's rate fallback order based on the band */ rc = iwl3945_init_hw_rate_table(priv); if (rc) { IWL_ERR(priv, "Error setting HW rate table: %02X\n", rc); return -EIO; } return 0; } /** * iwl3945_reg_txpower_periodic - called when time to check our temperature. * * -- reset periodic timer * -- see if temp has changed enough to warrant re-calibration ... if so: * -- correct coeffs for temp (can reset temp timer) * -- save this temp as "last", * -- send new set of gain settings to NIC * NOTE: This should continue working, even when we're not associated, * so we can keep our internal table of scan powers current. */ void iwl3945_reg_txpower_periodic(struct iwl_priv *priv) { /* This will kick in the "brute force" * iwl3945_hw_reg_comp_txpower_temp() below */ if (!iwl3945_is_temp_calib_needed(priv)) goto reschedule; /* Set up a new set of temp-adjusted TxPowers, send to NIC. * This is based *only* on current temperature, * ignoring any previous power measurements */ iwl3945_hw_reg_comp_txpower_temp(priv); reschedule: queue_delayed_work(priv->workqueue, &priv->_3945.thermal_periodic, REG_RECALIB_PERIOD * HZ); } static void iwl3945_bg_reg_txpower_periodic(struct work_struct *work) { struct iwl_priv *priv = container_of(work, struct iwl_priv, _3945.thermal_periodic.work); mutex_lock(&priv->mutex); if (test_bit(STATUS_EXIT_PENDING, &priv->status) || priv->txq == NULL) goto out; iwl3945_reg_txpower_periodic(priv); out: mutex_unlock(&priv->mutex); } /** * iwl3945_hw_reg_get_ch_grp_index - find the channel-group index (0-4) * for the channel. * * This function is used when initializing channel-info structs. * * NOTE: These channel groups do *NOT* match the bands above! * These channel groups are based on factory-tested channels; * on A-band, EEPROM's "group frequency" entries represent the top * channel in each group 1-4. Group 5 All B/G channels are in group 0. */ static u16 iwl3945_hw_reg_get_ch_grp_index(struct iwl_priv *priv, const struct iwl_channel_info *ch_info) { struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom; struct iwl3945_eeprom_txpower_group *ch_grp = &eeprom->groups[0]; u8 group; u16 group_index = 0; /* based on factory calib frequencies */ u8 grp_channel; /* Find the group index for the channel ... don't use index 1(?) */ if (iwl_legacy_is_channel_a_band(ch_info)) { for (group = 1; group < 5; group++) { grp_channel = ch_grp[group].group_channel; if (ch_info->channel <= grp_channel) { group_index = group; break; } } /* group 4 has a few channels *above* its factory cal freq */ if (group == 5) group_index = 4; } else group_index = 0; /* 2.4 GHz, group 0 */ IWL_DEBUG_POWER(priv, "Chnl %d mapped to grp %d\n", ch_info->channel, group_index); return group_index; } /** * iwl3945_hw_reg_get_matched_power_index - Interpolate to get nominal index * * Interpolate to get nominal (i.e. at factory calibration temperature) index * into radio/DSP gain settings table for requested power. */ static int iwl3945_hw_reg_get_matched_power_index(struct iwl_priv *priv, s8 requested_power, s32 setting_index, s32 *new_index) { const struct iwl3945_eeprom_txpower_group *chnl_grp = NULL; struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom; s32 index0, index1; s32 power = 2 * requested_power; s32 i; const struct iwl3945_eeprom_txpower_sample *samples; s32 gains0, gains1; s32 res; s32 denominator; chnl_grp = &eeprom->groups[setting_index]; samples = chnl_grp->samples; for (i = 0; i < 5; i++) { if (power == samples[i].power) { *new_index = samples[i].gain_index; return 0; } } if (power > samples[1].power) { index0 = 0; index1 = 1; } else if (power > samples[2].power) { index0 = 1; index1 = 2; } else if (power > samples[3].power) { index0 = 2; index1 = 3; } else { index0 = 3; index1 = 4; } denominator = (s32) samples[index1].power - (s32) samples[index0].power; if (denominator == 0) return -EINVAL; gains0 = (s32) samples[index0].gain_index * (1 << 19); gains1 = (s32) samples[index1].gain_index * (1 << 19); res = gains0 + (gains1 - gains0) * ((s32) power - (s32) samples[index0].power) / denominator + (1 << 18); *new_index = res >> 19; return 0; } static void iwl3945_hw_reg_init_channel_groups(struct iwl_priv *priv) { u32 i; s32 rate_index; struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom; const struct iwl3945_eeprom_txpower_group *group; IWL_DEBUG_POWER(priv, "Initializing factory calib info from EEPROM\n"); for (i = 0; i < IWL_NUM_TX_CALIB_GROUPS; i++) { s8 *clip_pwrs; /* table of power levels for each rate */ s8 satur_pwr; /* saturation power for each chnl group */ group = &eeprom->groups[i]; /* sanity check on factory saturation power value */ if (group->saturation_power < 40) { IWL_WARN(priv, "Error: saturation power is %d, " "less than minimum expected 40\n", group->saturation_power); return; } /* * Derive requested power levels for each rate, based on * hardware capabilities (saturation power for band). * Basic value is 3dB down from saturation, with further * power reductions for highest 3 data rates. These * backoffs provide headroom for high rate modulation * power peaks, without too much distortion (clipping). */ /* we'll fill in this array with h/w max power levels */ clip_pwrs = (s8 *) priv->_3945.clip_groups[i].clip_powers; /* divide factory saturation power by 2 to find -3dB level */ satur_pwr = (s8) (group->saturation_power >> 1); /* fill in channel group's nominal powers for each rate */ for (rate_index = 0; rate_index < IWL_RATE_COUNT_3945; rate_index++, clip_pwrs++) { switch (rate_index) { case IWL_RATE_36M_INDEX_TABLE: if (i == 0) /* B/G */ *clip_pwrs = satur_pwr; else /* A */ *clip_pwrs = satur_pwr - 5; break; case IWL_RATE_48M_INDEX_TABLE: if (i == 0) *clip_pwrs = satur_pwr - 7; else *clip_pwrs = satur_pwr - 10; break; case IWL_RATE_54M_INDEX_TABLE: if (i == 0) *clip_pwrs = satur_pwr - 9; else *clip_pwrs = satur_pwr - 12; break; default: *clip_pwrs = satur_pwr; break; } } } } /** * iwl3945_txpower_set_from_eeprom - Set channel power info based on EEPROM * * Second pass (during init) to set up priv->channel_info * * Set up Tx-power settings in our channel info database for each VALID * (for this geo/SKU) channel, at all Tx data rates, based on eeprom values * and current temperature. * * Since this is based on current temperature (at init time), these values may * not be valid for very long, but it gives us a starting/default point, * and allows us to active (i.e. using Tx) scan. * * This does *not* write values to NIC, just sets up our internal table. */ int iwl3945_txpower_set_from_eeprom(struct iwl_priv *priv) { struct iwl_channel_info *ch_info = NULL; struct iwl3945_channel_power_info *pwr_info; struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom; int delta_index; u8 rate_index; u8 scan_tbl_index; const s8 *clip_pwrs; /* array of power levels for each rate */ u8 gain, dsp_atten; s8 power; u8 pwr_index, base_pwr_index, a_band; u8 i; int temperature; /* save temperature reference, * so we can determine next time to calibrate */ temperature = iwl3945_hw_reg_txpower_get_temperature(priv); priv->last_temperature = temperature; iwl3945_hw_reg_init_channel_groups(priv); /* initialize Tx power info for each and every channel, 2.4 and 5.x */ for (i = 0, ch_info = priv->channel_info; i < priv->channel_count; i++, ch_info++) { a_band = iwl_legacy_is_channel_a_band(ch_info); if (!iwl_legacy_is_channel_valid(ch_info)) continue; /* find this channel's channel group (*not* "band") index */ ch_info->group_index = iwl3945_hw_reg_get_ch_grp_index(priv, ch_info); /* Get this chnlgrp's rate->max/clip-powers table */ clip_pwrs = priv->_3945.clip_groups[ch_info->group_index].clip_powers; /* calculate power index *adjustment* value according to * diff between current temperature and factory temperature */ delta_index = iwl3945_hw_reg_adjust_power_by_temp(temperature, eeprom->groups[ch_info->group_index]. temperature); IWL_DEBUG_POWER(priv, "Delta index for channel %d: %d [%d]\n", ch_info->channel, delta_index, temperature + IWL_TEMP_CONVERT); /* set tx power value for all OFDM rates */ for (rate_index = 0; rate_index < IWL_OFDM_RATES; rate_index++) { s32 uninitialized_var(power_idx); int rc; /* use channel group's clip-power table, * but don't exceed channel's max power */ s8 pwr = min(ch_info->max_power_avg, clip_pwrs[rate_index]); pwr_info = &ch_info->power_info[rate_index]; /* get base (i.e. at factory-measured temperature) * power table index for this rate's power */ rc = iwl3945_hw_reg_get_matched_power_index(priv, pwr, ch_info->group_index, &power_idx); if (rc) { IWL_ERR(priv, "Invalid power index\n"); return rc; } pwr_info->base_power_index = (u8) power_idx; /* temperature compensate */ power_idx += delta_index; /* stay within range of gain table */ power_idx = iwl3945_hw_reg_fix_power_index(power_idx); /* fill 1 OFDM rate's iwl3945_channel_power_info struct */ pwr_info->requested_power = pwr; pwr_info->power_table_index = (u8) power_idx; pwr_info->tpc.tx_gain = power_gain_table[a_band][power_idx].tx_gain; pwr_info->tpc.dsp_atten = power_gain_table[a_band][power_idx].dsp_atten; } /* set tx power for CCK rates, based on OFDM 12 Mbit settings*/ pwr_info = &ch_info->power_info[IWL_RATE_12M_INDEX_TABLE]; power = pwr_info->requested_power + IWL_CCK_FROM_OFDM_POWER_DIFF; pwr_index = pwr_info->power_table_index + IWL_CCK_FROM_OFDM_INDEX_DIFF; base_pwr_index = pwr_info->base_power_index + IWL_CCK_FROM_OFDM_INDEX_DIFF; /* stay within table range */ pwr_index = iwl3945_hw_reg_fix_power_index(pwr_index); gain = power_gain_table[a_band][pwr_index].tx_gain; dsp_atten = power_gain_table[a_band][pwr_index].dsp_atten; /* fill each CCK rate's iwl3945_channel_power_info structure * NOTE: All CCK-rate Txpwrs are the same for a given chnl! * NOTE: CCK rates start at end of OFDM rates! */ for (rate_index = 0; rate_index < IWL_CCK_RATES; rate_index++) { pwr_info = &ch_info->power_info[rate_index+IWL_OFDM_RATES]; pwr_info->requested_power = power; pwr_info->power_table_index = pwr_index; pwr_info->base_power_index = base_pwr_index; pwr_info->tpc.tx_gain = gain; pwr_info->tpc.dsp_atten = dsp_atten; } /* set scan tx power, 1Mbit for CCK, 6Mbit for OFDM */ for (scan_tbl_index = 0; scan_tbl_index < IWL_NUM_SCAN_RATES; scan_tbl_index++) { s32 actual_index = (scan_tbl_index == 0) ? IWL_RATE_1M_INDEX_TABLE : IWL_RATE_6M_INDEX_TABLE; iwl3945_hw_reg_set_scan_power(priv, scan_tbl_index, actual_index, clip_pwrs, ch_info, a_band); } } return 0; } int iwl3945_hw_rxq_stop(struct iwl_priv *priv) { int rc; iwl_legacy_write_direct32(priv, FH39_RCSR_CONFIG(0), 0); rc = iwl_poll_direct_bit(priv, FH39_RSSR_STATUS, FH39_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000); if (rc < 0) IWL_ERR(priv, "Can't stop Rx DMA.\n"); return 0; } int iwl3945_hw_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq) { int txq_id = txq->q.id; struct iwl3945_shared *shared_data = priv->_3945.shared_virt; shared_data->tx_base_ptr[txq_id] = cpu_to_le32((u32)txq->q.dma_addr); iwl_legacy_write_direct32(priv, FH39_CBCC_CTRL(txq_id), 0); iwl_legacy_write_direct32(priv, FH39_CBCC_BASE(txq_id), 0); iwl_legacy_write_direct32(priv, FH39_TCSR_CONFIG(txq_id), FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_NOINT | FH39_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_TXF | FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD | FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL | FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE); /* fake read to flush all prev. writes */ iwl_read32(priv, FH39_TSSR_CBB_BASE); return 0; } /* * HCMD utils */ static u16 iwl3945_get_hcmd_size(u8 cmd_id, u16 len) { switch (cmd_id) { case REPLY_RXON: return sizeof(struct iwl3945_rxon_cmd); case POWER_TABLE_CMD: return sizeof(struct iwl3945_powertable_cmd); default: return len; } } static u16 iwl3945_build_addsta_hcmd(const struct iwl_legacy_addsta_cmd *cmd, u8 *data) { struct iwl3945_addsta_cmd *addsta = (struct iwl3945_addsta_cmd *)data; addsta->mode = cmd->mode; memcpy(&addsta->sta, &cmd->sta, sizeof(struct sta_id_modify)); memcpy(&addsta->key, &cmd->key, sizeof(struct iwl4965_keyinfo)); addsta->station_flags = cmd->station_flags; addsta->station_flags_msk = cmd->station_flags_msk; addsta->tid_disable_tx = cpu_to_le16(0); addsta->rate_n_flags = cmd->rate_n_flags; addsta->add_immediate_ba_tid = cmd->add_immediate_ba_tid; addsta->remove_immediate_ba_tid = cmd->remove_immediate_ba_tid; addsta->add_immediate_ba_ssn = cmd->add_immediate_ba_ssn; return (u16)sizeof(struct iwl3945_addsta_cmd); } static int iwl3945_add_bssid_station(struct iwl_priv *priv, const u8 *addr, u8 *sta_id_r) { struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; int ret; u8 sta_id; unsigned long flags; if (sta_id_r) *sta_id_r = IWL_INVALID_STATION; ret = iwl_legacy_add_station_common(priv, ctx, addr, 0, NULL, &sta_id); if (ret) { IWL_ERR(priv, "Unable to add station %pM\n", addr); return ret; } if (sta_id_r) *sta_id_r = sta_id; spin_lock_irqsave(&priv->sta_lock, flags); priv->stations[sta_id].used |= IWL_STA_LOCAL; spin_unlock_irqrestore(&priv->sta_lock, flags); return 0; } static int iwl3945_manage_ibss_station(struct iwl_priv *priv, struct ieee80211_vif *vif, bool add) { struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv; int ret; if (add) { ret = iwl3945_add_bssid_station(priv, vif->bss_conf.bssid, &vif_priv->ibss_bssid_sta_id); if (ret) return ret; iwl3945_sync_sta(priv, vif_priv->ibss_bssid_sta_id, (priv->band == IEEE80211_BAND_5GHZ) ? IWL_RATE_6M_PLCP : IWL_RATE_1M_PLCP); iwl3945_rate_scale_init(priv->hw, vif_priv->ibss_bssid_sta_id); return 0; } return iwl_legacy_remove_station(priv, vif_priv->ibss_bssid_sta_id, vif->bss_conf.bssid); } /** * iwl3945_init_hw_rate_table - Initialize the hardware rate fallback table */ int iwl3945_init_hw_rate_table(struct iwl_priv *priv) { int rc, i, index, prev_index; struct iwl3945_rate_scaling_cmd rate_cmd = { .reserved = {0, 0, 0}, }; struct iwl3945_rate_scaling_info *table = rate_cmd.table; for (i = 0; i < ARRAY_SIZE(iwl3945_rates); i++) { index = iwl3945_rates[i].table_rs_index; table[index].rate_n_flags = iwl3945_hw_set_rate_n_flags(iwl3945_rates[i].plcp, 0); table[index].try_cnt = priv->retry_rate; prev_index = iwl3945_get_prev_ieee_rate(i); table[index].next_rate_index = iwl3945_rates[prev_index].table_rs_index; } switch (priv->band) { case IEEE80211_BAND_5GHZ: IWL_DEBUG_RATE(priv, "Select A mode rate scale\n"); /* If one of the following CCK rates is used, * have it fall back to the 6M OFDM rate */ for (i = IWL_RATE_1M_INDEX_TABLE; i <= IWL_RATE_11M_INDEX_TABLE; i++) table[i].next_rate_index = iwl3945_rates[IWL_FIRST_OFDM_RATE].table_rs_index; /* Don't fall back to CCK rates */ table[IWL_RATE_12M_INDEX_TABLE].next_rate_index = IWL_RATE_9M_INDEX_TABLE; /* Don't drop out of OFDM rates */ table[IWL_RATE_6M_INDEX_TABLE].next_rate_index = iwl3945_rates[IWL_FIRST_OFDM_RATE].table_rs_index; break; case IEEE80211_BAND_2GHZ: IWL_DEBUG_RATE(priv, "Select B/G mode rate scale\n"); /* If an OFDM rate is used, have it fall back to the * 1M CCK rates */ if (!(priv->_3945.sta_supp_rates & IWL_OFDM_RATES_MASK) && iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS)) { index = IWL_FIRST_CCK_RATE; for (i = IWL_RATE_6M_INDEX_TABLE; i <= IWL_RATE_54M_INDEX_TABLE; i++) table[i].next_rate_index = iwl3945_rates[index].table_rs_index; index = IWL_RATE_11M_INDEX_TABLE; /* CCK shouldn't fall back to OFDM... */ table[index].next_rate_index = IWL_RATE_5M_INDEX_TABLE; } break; default: WARN_ON(1); break; } /* Update the rate scaling for control frame Tx */ rate_cmd.table_id = 0; rc = iwl_legacy_send_cmd_pdu(priv, REPLY_RATE_SCALE, sizeof(rate_cmd), &rate_cmd); if (rc) return rc; /* Update the rate scaling for data frame Tx */ rate_cmd.table_id = 1; return iwl_legacy_send_cmd_pdu(priv, REPLY_RATE_SCALE, sizeof(rate_cmd), &rate_cmd); } /* Called when initializing driver */ int iwl3945_hw_set_hw_params(struct iwl_priv *priv) { memset((void *)&priv->hw_params, 0, sizeof(struct iwl_hw_params)); priv->_3945.shared_virt = dma_alloc_coherent(&priv->pci_dev->dev, sizeof(struct iwl3945_shared), &priv->_3945.shared_phys, GFP_KERNEL); if (!priv->_3945.shared_virt) { IWL_ERR(priv, "failed to allocate pci memory\n"); return -ENOMEM; } /* Assign number of Usable TX queues */ priv->hw_params.max_txq_num = priv->cfg->base_params->num_of_queues; priv->hw_params.tfd_size = sizeof(struct iwl3945_tfd); priv->hw_params.rx_page_order = get_order(IWL_RX_BUF_SIZE_3K); priv->hw_params.max_rxq_size = RX_QUEUE_SIZE; priv->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG; priv->hw_params.max_stations = IWL3945_STATION_COUNT; priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWL3945_BROADCAST_ID; priv->sta_key_max_num = STA_KEY_MAX_NUM; priv->hw_params.rx_wrt_ptr_reg = FH39_RSCSR_CHNL0_WPTR; priv->hw_params.max_beacon_itrvl = IWL39_MAX_UCODE_BEACON_INTERVAL; priv->hw_params.beacon_time_tsf_bits = IWL3945_EXT_BEACON_TIME_POS; return 0; } unsigned int iwl3945_hw_get_beacon_cmd(struct iwl_priv *priv, struct iwl3945_frame *frame, u8 rate) { struct iwl3945_tx_beacon_cmd *tx_beacon_cmd; unsigned int frame_size; tx_beacon_cmd = (struct iwl3945_tx_beacon_cmd *)&frame->u; memset(tx_beacon_cmd, 0, sizeof(*tx_beacon_cmd)); tx_beacon_cmd->tx.sta_id = priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id; tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; frame_size = iwl3945_fill_beacon_frame(priv, tx_beacon_cmd->frame, sizeof(frame->u) - sizeof(*tx_beacon_cmd)); BUG_ON(frame_size > MAX_MPDU_SIZE); tx_beacon_cmd->tx.len = cpu_to_le16((u16)frame_size); tx_beacon_cmd->tx.rate = rate; tx_beacon_cmd->tx.tx_flags = (TX_CMD_FLG_SEQ_CTL_MSK | TX_CMD_FLG_TSF_MSK); /* supp_rates[0] == OFDM start at IWL_FIRST_OFDM_RATE*/ tx_beacon_cmd->tx.supp_rates[0] = (IWL_OFDM_BASIC_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF; tx_beacon_cmd->tx.supp_rates[1] = (IWL_CCK_BASIC_RATES_MASK & 0xF); return sizeof(struct iwl3945_tx_beacon_cmd) + frame_size; } void iwl3945_hw_rx_handler_setup(struct iwl_priv *priv) { priv->rx_handlers[REPLY_TX] = iwl3945_rx_reply_tx; priv->rx_handlers[REPLY_3945_RX] = iwl3945_rx_reply_rx; } void iwl3945_hw_setup_deferred_work(struct iwl_priv *priv) { INIT_DELAYED_WORK(&priv->_3945.thermal_periodic, iwl3945_bg_reg_txpower_periodic); } void iwl3945_hw_cancel_deferred_work(struct iwl_priv *priv) { cancel_delayed_work(&priv->_3945.thermal_periodic); } /* check contents of special bootstrap uCode SRAM */ static int iwl3945_verify_bsm(struct iwl_priv *priv) { __le32 *image = priv->ucode_boot.v_addr; u32 len = priv->ucode_boot.len; u32 reg; u32 val; IWL_DEBUG_INFO(priv, "Begin verify bsm\n"); /* verify BSM SRAM contents */ val = iwl_legacy_read_prph(priv, BSM_WR_DWCOUNT_REG); for (reg = BSM_SRAM_LOWER_BOUND; reg < BSM_SRAM_LOWER_BOUND + len; reg += sizeof(u32), image++) { val = iwl_legacy_read_prph(priv, reg); if (val != le32_to_cpu(*image)) { IWL_ERR(priv, "BSM uCode verification failed at " "addr 0x%08X+%u (of %u), is 0x%x, s/b 0x%x\n", BSM_SRAM_LOWER_BOUND, reg - BSM_SRAM_LOWER_BOUND, len, val, le32_to_cpu(*image)); return -EIO; } } IWL_DEBUG_INFO(priv, "BSM bootstrap uCode image OK\n"); return 0; } /****************************************************************************** * * EEPROM related functions * ******************************************************************************/ /* * Clear the OWNER_MSK, to establish driver (instead of uCode running on * embedded controller) as EEPROM reader; each read is a series of pulses * to/from the EEPROM chip, not a single event, so even reads could conflict * if they weren't arbitrated by some ownership mechanism. Here, the driver * simply claims ownership, which should be safe when this function is called * (i.e. before loading uCode!). */ static int iwl3945_eeprom_acquire_semaphore(struct iwl_priv *priv) { _iwl_legacy_clear_bit(priv, CSR_EEPROM_GP, CSR_EEPROM_GP_IF_OWNER_MSK); return 0; } static void iwl3945_eeprom_release_semaphore(struct iwl_priv *priv) { return; } /** * iwl3945_load_bsm - Load bootstrap instructions * * BSM operation: * * The Bootstrap State Machine (BSM) stores a short bootstrap uCode program * in special SRAM that does not power down during RFKILL. When powering back * up after power-saving sleeps (or during initial uCode load), the BSM loads * the bootstrap program into the on-board processor, and starts it. * * The bootstrap program loads (via DMA) instructions and data for a new * program from host DRAM locations indicated by the host driver in the * BSM_DRAM_* registers. Once the new program is loaded, it starts * automatically. * * When initializing the NIC, the host driver points the BSM to the * "initialize" uCode image. This uCode sets up some internal data, then * notifies host via "initialize alive" that it is complete. * * The host then replaces the BSM_DRAM_* pointer values to point to the * normal runtime uCode instructions and a backup uCode data cache buffer * (filled initially with starting data values for the on-board processor), * then triggers the "initialize" uCode to load and launch the runtime uCode, * which begins normal operation. * * When doing a power-save shutdown, runtime uCode saves data SRAM into * the backup data cache in DRAM before SRAM is powered down. * * When powering back up, the BSM loads the bootstrap program. This reloads * the runtime uCode instructions and the backup data cache into SRAM, * and re-launches the runtime uCode from where it left off. */ static int iwl3945_load_bsm(struct iwl_priv *priv) { __le32 *image = priv->ucode_boot.v_addr; u32 len = priv->ucode_boot.len; dma_addr_t pinst; dma_addr_t pdata; u32 inst_len; u32 data_len; int rc; int i; u32 done; u32 reg_offset; IWL_DEBUG_INFO(priv, "Begin load bsm\n"); /* make sure bootstrap program is no larger than BSM's SRAM size */ if (len > IWL39_MAX_BSM_SIZE) return -EINVAL; /* Tell bootstrap uCode where to find the "Initialize" uCode * in host DRAM ... host DRAM physical address bits 31:0 for 3945. * NOTE: iwl3945_initialize_alive_start() will replace these values, * after the "initialize" uCode has run, to point to * runtime/protocol instructions and backup data cache. */ pinst = priv->ucode_init.p_addr; pdata = priv->ucode_init_data.p_addr; inst_len = priv->ucode_init.len; data_len = priv->ucode_init_data.len; iwl_legacy_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst); iwl_legacy_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata); iwl_legacy_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG, inst_len); iwl_legacy_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG, data_len); /* Fill BSM memory with bootstrap instructions */ for (reg_offset = BSM_SRAM_LOWER_BOUND; reg_offset < BSM_SRAM_LOWER_BOUND + len; reg_offset += sizeof(u32), image++) _iwl_legacy_write_prph(priv, reg_offset, le32_to_cpu(*image)); rc = iwl3945_verify_bsm(priv); if (rc) return rc; /* Tell BSM to copy from BSM SRAM into instruction SRAM, when asked */ iwl_legacy_write_prph(priv, BSM_WR_MEM_SRC_REG, 0x0); iwl_legacy_write_prph(priv, BSM_WR_MEM_DST_REG, IWL39_RTC_INST_LOWER_BOUND); iwl_legacy_write_prph(priv, BSM_WR_DWCOUNT_REG, len / sizeof(u32)); /* Load bootstrap code into instruction SRAM now, * to prepare to load "initialize" uCode */ iwl_legacy_write_prph(priv, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START); /* Wait for load of bootstrap uCode to finish */ for (i = 0; i < 100; i++) { done = iwl_legacy_read_prph(priv, BSM_WR_CTRL_REG); if (!(done & BSM_WR_CTRL_REG_BIT_START)) break; udelay(10); } if (i < 100) IWL_DEBUG_INFO(priv, "BSM write complete, poll %d iterations\n", i); else { IWL_ERR(priv, "BSM write did not complete!\n"); return -EIO; } /* Enable future boot loads whenever power management unit triggers it * (e.g. when powering back up after power-save shutdown) */ iwl_legacy_write_prph(priv, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START_EN); return 0; } static struct iwl_hcmd_ops iwl3945_hcmd = { .rxon_assoc = iwl3945_send_rxon_assoc, .commit_rxon = iwl3945_commit_rxon, }; static struct iwl_lib_ops iwl3945_lib = { .txq_attach_buf_to_tfd = iwl3945_hw_txq_attach_buf_to_tfd, .txq_free_tfd = iwl3945_hw_txq_free_tfd, .txq_init = iwl3945_hw_tx_queue_init, .load_ucode = iwl3945_load_bsm, .dump_nic_event_log = iwl3945_dump_nic_event_log, .dump_nic_error_log = iwl3945_dump_nic_error_log, .apm_ops = { .init = iwl3945_apm_init, .config = iwl3945_nic_config, }, .eeprom_ops = { .regulatory_bands = { EEPROM_REGULATORY_BAND_1_CHANNELS, EEPROM_REGULATORY_BAND_2_CHANNELS, EEPROM_REGULATORY_BAND_3_CHANNELS, EEPROM_REGULATORY_BAND_4_CHANNELS, EEPROM_REGULATORY_BAND_5_CHANNELS, EEPROM_REGULATORY_BAND_NO_HT40, EEPROM_REGULATORY_BAND_NO_HT40, }, .acquire_semaphore = iwl3945_eeprom_acquire_semaphore, .release_semaphore = iwl3945_eeprom_release_semaphore, }, .send_tx_power = iwl3945_send_tx_power, .is_valid_rtc_data_addr = iwl3945_hw_valid_rtc_data_addr, .debugfs_ops = { .rx_stats_read = iwl3945_ucode_rx_stats_read, .tx_stats_read = iwl3945_ucode_tx_stats_read, .general_stats_read = iwl3945_ucode_general_stats_read, }, }; static const struct iwl_legacy_ops iwl3945_legacy_ops = { .post_associate = iwl3945_post_associate, .config_ap = iwl3945_config_ap, .manage_ibss_station = iwl3945_manage_ibss_station, }; static struct iwl_hcmd_utils_ops iwl3945_hcmd_utils = { .get_hcmd_size = iwl3945_get_hcmd_size, .build_addsta_hcmd = iwl3945_build_addsta_hcmd, .request_scan = iwl3945_request_scan, .post_scan = iwl3945_post_scan, }; static const struct iwl_ops iwl3945_ops = { .lib = &iwl3945_lib, .hcmd = &iwl3945_hcmd, .utils = &iwl3945_hcmd_utils, .led = &iwl3945_led_ops, .legacy = &iwl3945_legacy_ops, .ieee80211_ops = &iwl3945_hw_ops, }; static struct iwl_base_params iwl3945_base_params = { .eeprom_size = IWL3945_EEPROM_IMG_SIZE, .num_of_queues = IWL39_NUM_QUEUES, .pll_cfg_val = CSR39_ANA_PLL_CFG_VAL, .set_l0s = false, .use_bsm = true, .led_compensation = 64, .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF, .wd_timeout = IWL_DEF_WD_TIMEOUT, .max_event_log_size = 512, }; static struct iwl_cfg iwl3945_bg_cfg = { .name = "3945BG", .fw_name_pre = IWL3945_FW_PRE, .ucode_api_max = IWL3945_UCODE_API_MAX, .ucode_api_min = IWL3945_UCODE_API_MIN, .sku = IWL_SKU_G, .eeprom_ver = EEPROM_3945_EEPROM_VERSION, .ops = &iwl3945_ops, .mod_params = &iwl3945_mod_params, .base_params = &iwl3945_base_params, .led_mode = IWL_LED_BLINK, }; static struct iwl_cfg iwl3945_abg_cfg = { .name = "3945ABG", .fw_name_pre = IWL3945_FW_PRE, .ucode_api_max = IWL3945_UCODE_API_MAX, .ucode_api_min = IWL3945_UCODE_API_MIN, .sku = IWL_SKU_A|IWL_SKU_G, .eeprom_ver = EEPROM_3945_EEPROM_VERSION, .ops = &iwl3945_ops, .mod_params = &iwl3945_mod_params, .base_params = &iwl3945_base_params, .led_mode = IWL_LED_BLINK, }; DEFINE_PCI_DEVICE_TABLE(iwl3945_hw_card_ids) = { {IWL_PCI_DEVICE(0x4222, 0x1005, iwl3945_bg_cfg)}, {IWL_PCI_DEVICE(0x4222, 0x1034, iwl3945_bg_cfg)}, {IWL_PCI_DEVICE(0x4222, 0x1044, iwl3945_bg_cfg)}, {IWL_PCI_DEVICE(0x4227, 0x1014, iwl3945_bg_cfg)}, {IWL_PCI_DEVICE(0x4222, PCI_ANY_ID, iwl3945_abg_cfg)}, {IWL_PCI_DEVICE(0x4227, PCI_ANY_ID, iwl3945_abg_cfg)}, {0} }; MODULE_DEVICE_TABLE(pci, iwl3945_hw_card_ids);
gpl-2.0
damageless/linux-kernel-ican-tab
arch/m32r/platforms/usrv/setup.c
1458
6736
/* * linux/arch/m32r/platforms/usrv/setup.c * * Setup routines for MITSUBISHI uServer * * Copyright (c) 2001, 2002, 2003 Hiroyuki Kondo, Hirokazu Takata, * Hitoshi Yamamoto */ #include <linux/irq.h> #include <linux/kernel.h> #include <linux/init.h> #include <asm/system.h> #include <asm/m32r.h> #include <asm/io.h> #define irq2port(x) (M32R_ICU_CR1_PORTL + ((x - 1) * sizeof(unsigned long))) icu_data_t icu_data[M32700UT_NUM_CPU_IRQ]; static void disable_mappi_irq(unsigned int irq) { unsigned long port, data; port = irq2port(irq); data = icu_data[irq].icucr|M32R_ICUCR_ILEVEL7; outl(data, port); } static void enable_mappi_irq(unsigned int irq) { unsigned long port, data; port = irq2port(irq); data = icu_data[irq].icucr|M32R_ICUCR_IEN|M32R_ICUCR_ILEVEL6; outl(data, port); } static void mask_and_ack_mappi(unsigned int irq) { disable_mappi_irq(irq); } static void end_mappi_irq(unsigned int irq) { enable_mappi_irq(irq); } static unsigned int startup_mappi_irq(unsigned int irq) { enable_mappi_irq(irq); return 0; } static void shutdown_mappi_irq(unsigned int irq) { unsigned long port; port = irq2port(irq); outl(M32R_ICUCR_ILEVEL7, port); } static struct irq_chip mappi_irq_type = { .typename = "M32700-IRQ", .startup = startup_mappi_irq, .shutdown = shutdown_mappi_irq, .enable = enable_mappi_irq, .disable = disable_mappi_irq, .ack = mask_and_ack_mappi, .end = end_mappi_irq }; /* * Interrupt Control Unit of PLD on M32700UT (Level 2) */ #define irq2pldirq(x) ((x) - M32700UT_PLD_IRQ_BASE) #define pldirq2port(x) (unsigned long)((int)PLD_ICUCR1 + \ (((x) - 1) * sizeof(unsigned short))) typedef struct { unsigned short icucr; /* ICU Control Register */ } pld_icu_data_t; static pld_icu_data_t pld_icu_data[M32700UT_NUM_PLD_IRQ]; static void disable_m32700ut_pld_irq(unsigned int irq) { unsigned long port, data; unsigned int pldirq; pldirq = irq2pldirq(irq); port = pldirq2port(pldirq); data = pld_icu_data[pldirq].icucr|PLD_ICUCR_ILEVEL7; outw(data, port); } static void enable_m32700ut_pld_irq(unsigned int irq) { unsigned long port, data; unsigned int pldirq; pldirq = irq2pldirq(irq); port = pldirq2port(pldirq); data = pld_icu_data[pldirq].icucr|PLD_ICUCR_IEN|PLD_ICUCR_ILEVEL6; outw(data, port); } static void mask_and_ack_m32700ut_pld(unsigned int irq) { disable_m32700ut_pld_irq(irq); } static void end_m32700ut_pld_irq(unsigned int irq) { enable_m32700ut_pld_irq(irq); end_mappi_irq(M32R_IRQ_INT1); } static unsigned int startup_m32700ut_pld_irq(unsigned int irq) { enable_m32700ut_pld_irq(irq); return 0; } static void shutdown_m32700ut_pld_irq(unsigned int irq) { unsigned long port; unsigned int pldirq; pldirq = irq2pldirq(irq); port = pldirq2port(pldirq); outw(PLD_ICUCR_ILEVEL7, port); } static struct irq_chip m32700ut_pld_irq_type = { .typename = "USRV-PLD-IRQ", .startup = startup_m32700ut_pld_irq, .shutdown = shutdown_m32700ut_pld_irq, .enable = enable_m32700ut_pld_irq, .disable = disable_m32700ut_pld_irq, .ack = mask_and_ack_m32700ut_pld, .end = end_m32700ut_pld_irq }; void __init init_IRQ(void) { static int once = 0; int i; if (once) return; else once++; /* MFT2 : system timer */ irq_desc[M32R_IRQ_MFT2].status = IRQ_DISABLED; irq_desc[M32R_IRQ_MFT2].chip = &mappi_irq_type; irq_desc[M32R_IRQ_MFT2].action = 0; irq_desc[M32R_IRQ_MFT2].depth = 1; icu_data[M32R_IRQ_MFT2].icucr = M32R_ICUCR_IEN; disable_mappi_irq(M32R_IRQ_MFT2); #if defined(CONFIG_SERIAL_M32R_SIO) /* SIO0_R : uart receive data */ irq_desc[M32R_IRQ_SIO0_R].status = IRQ_DISABLED; irq_desc[M32R_IRQ_SIO0_R].chip = &mappi_irq_type; irq_desc[M32R_IRQ_SIO0_R].action = 0; irq_desc[M32R_IRQ_SIO0_R].depth = 1; icu_data[M32R_IRQ_SIO0_R].icucr = 0; disable_mappi_irq(M32R_IRQ_SIO0_R); /* SIO0_S : uart send data */ irq_desc[M32R_IRQ_SIO0_S].status = IRQ_DISABLED; irq_desc[M32R_IRQ_SIO0_S].chip = &mappi_irq_type; irq_desc[M32R_IRQ_SIO0_S].action = 0; irq_desc[M32R_IRQ_SIO0_S].depth = 1; icu_data[M32R_IRQ_SIO0_S].icucr = 0; disable_mappi_irq(M32R_IRQ_SIO0_S); /* SIO1_R : uart receive data */ irq_desc[M32R_IRQ_SIO1_R].status = IRQ_DISABLED; irq_desc[M32R_IRQ_SIO1_R].chip = &mappi_irq_type; irq_desc[M32R_IRQ_SIO1_R].action = 0; irq_desc[M32R_IRQ_SIO1_R].depth = 1; icu_data[M32R_IRQ_SIO1_R].icucr = 0; disable_mappi_irq(M32R_IRQ_SIO1_R); /* SIO1_S : uart send data */ irq_desc[M32R_IRQ_SIO1_S].status = IRQ_DISABLED; irq_desc[M32R_IRQ_SIO1_S].chip = &mappi_irq_type; irq_desc[M32R_IRQ_SIO1_S].action = 0; irq_desc[M32R_IRQ_SIO1_S].depth = 1; icu_data[M32R_IRQ_SIO1_S].icucr = 0; disable_mappi_irq(M32R_IRQ_SIO1_S); #endif /* CONFIG_SERIAL_M32R_SIO */ /* INT#67-#71: CFC#0 IREQ on PLD */ for (i = 0 ; i < CONFIG_M32R_CFC_NUM ; i++ ) { irq_desc[PLD_IRQ_CF0 + i].status = IRQ_DISABLED; irq_desc[PLD_IRQ_CF0 + i].chip = &m32700ut_pld_irq_type; irq_desc[PLD_IRQ_CF0 + i].action = 0; irq_desc[PLD_IRQ_CF0 + i].depth = 1; /* disable nested irq */ pld_icu_data[irq2pldirq(PLD_IRQ_CF0 + i)].icucr = PLD_ICUCR_ISMOD01; /* 'L' level sense */ disable_m32700ut_pld_irq(PLD_IRQ_CF0 + i); } #if defined(CONFIG_SERIAL_8250) || defined(CONFIG_SERIAL_8250_MODULE) /* INT#76: 16552D#0 IREQ on PLD */ irq_desc[PLD_IRQ_UART0].status = IRQ_DISABLED; irq_desc[PLD_IRQ_UART0].chip = &m32700ut_pld_irq_type; irq_desc[PLD_IRQ_UART0].action = 0; irq_desc[PLD_IRQ_UART0].depth = 1; /* disable nested irq */ pld_icu_data[irq2pldirq(PLD_IRQ_UART0)].icucr = PLD_ICUCR_ISMOD03; /* 'H' level sense */ disable_m32700ut_pld_irq(PLD_IRQ_UART0); /* INT#77: 16552D#1 IREQ on PLD */ irq_desc[PLD_IRQ_UART1].status = IRQ_DISABLED; irq_desc[PLD_IRQ_UART1].chip = &m32700ut_pld_irq_type; irq_desc[PLD_IRQ_UART1].action = 0; irq_desc[PLD_IRQ_UART1].depth = 1; /* disable nested irq */ pld_icu_data[irq2pldirq(PLD_IRQ_UART1)].icucr = PLD_ICUCR_ISMOD03; /* 'H' level sense */ disable_m32700ut_pld_irq(PLD_IRQ_UART1); #endif /* CONFIG_SERIAL_8250 || CONFIG_SERIAL_8250_MODULE */ #if defined(CONFIG_IDC_AK4524) || defined(CONFIG_IDC_AK4524_MODULE) /* INT#80: AK4524 IREQ on PLD */ irq_desc[PLD_IRQ_SNDINT].status = IRQ_DISABLED; irq_desc[PLD_IRQ_SNDINT].chip = &m32700ut_pld_irq_type; irq_desc[PLD_IRQ_SNDINT].action = 0; irq_desc[PLD_IRQ_SNDINT].depth = 1; /* disable nested irq */ pld_icu_data[irq2pldirq(PLD_IRQ_SNDINT)].icucr = PLD_ICUCR_ISMOD01; /* 'L' level sense */ disable_m32700ut_pld_irq(PLD_IRQ_SNDINT); #endif /* CONFIG_IDC_AK4524 || CONFIG_IDC_AK4524_MODULE */ /* * INT1# is used for UART, MMC, CF Controller in FPGA. * We enable it here. */ icu_data[M32R_IRQ_INT1].icucr = M32R_ICUCR_ISMOD11; enable_mappi_irq(M32R_IRQ_INT1); }
gpl-2.0
ptmr3/GalaxyNote2_Kernel2
drivers/i2c/busses/i2c-nforce2.c
1714
13314
/* SMBus driver for nVidia nForce2 MCP Added nForce3 Pro 150 Thomas Leibold <thomas@plx.com>, Ported to 2.5 Patrick Dreker <patrick@dreker.de>, Copyright (c) 2003 Hans-Frieder Vogt <hfvogt@arcor.de>, Based on SMBus 2.0 driver for AMD-8111 IO-Hub Copyright (c) 2002 Vojtech Pavlik This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* SUPPORTED DEVICES PCI ID nForce2 MCP 0064 nForce2 Ultra 400 MCP 0084 nForce3 Pro150 MCP 00D4 nForce3 250Gb MCP 00E4 nForce4 MCP 0052 nForce4 MCP-04 0034 nForce MCP51 0264 nForce MCP55 0368 nForce MCP61 03EB nForce MCP65 0446 nForce MCP67 0542 nForce MCP73 07D8 nForce MCP78S 0752 nForce MCP79 0AA2 This driver supports the 2 SMBuses that are included in the MCP of the nForce2/3/4/5xx chipsets. */ /* Note: we assume there can only be one nForce2, with two SMBus interfaces */ #include <linux/module.h> #include <linux/pci.h> #include <linux/kernel.h> #include <linux/stddef.h> #include <linux/ioport.h> #include <linux/init.h> #include <linux/i2c.h> #include <linux/delay.h> #include <linux/dmi.h> #include <linux/acpi.h> #include <linux/slab.h> #include <linux/io.h> MODULE_LICENSE("GPL"); MODULE_AUTHOR ("Hans-Frieder Vogt <hfvogt@gmx.net>"); MODULE_DESCRIPTION("nForce2/3/4/5xx SMBus driver"); struct nforce2_smbus { struct i2c_adapter adapter; int base; int size; int blockops; int can_abort; }; /* * nVidia nForce2 SMBus control register definitions * (Newer incarnations use standard BARs 4 and 5 instead) */ #define NFORCE_PCI_SMB1 0x50 #define NFORCE_PCI_SMB2 0x54 /* * ACPI 2.0 chapter 13 SMBus 2.0 EC register model */ #define NVIDIA_SMB_PRTCL (smbus->base + 0x00) /* protocol, PEC */ #define NVIDIA_SMB_STS (smbus->base + 0x01) /* status */ #define NVIDIA_SMB_ADDR (smbus->base + 0x02) /* address */ #define NVIDIA_SMB_CMD (smbus->base + 0x03) /* command */ #define NVIDIA_SMB_DATA (smbus->base + 0x04) /* 32 data registers */ #define NVIDIA_SMB_BCNT (smbus->base + 0x24) /* number of data bytes */ #define NVIDIA_SMB_STATUS_ABRT (smbus->base + 0x3c) /* register used to check the status of the abort command */ #define NVIDIA_SMB_CTRL (smbus->base + 0x3e) /* control register */ #define NVIDIA_SMB_STATUS_ABRT_STS 0x01 /* Bit to notify that abort succeeded */ #define NVIDIA_SMB_CTRL_ABORT 0x20 #define NVIDIA_SMB_STS_DONE 0x80 #define NVIDIA_SMB_STS_ALRM 0x40 #define NVIDIA_SMB_STS_RES 0x20 #define NVIDIA_SMB_STS_STATUS 0x1f #define NVIDIA_SMB_PRTCL_WRITE 0x00 #define NVIDIA_SMB_PRTCL_READ 0x01 #define NVIDIA_SMB_PRTCL_QUICK 0x02 #define NVIDIA_SMB_PRTCL_BYTE 0x04 #define NVIDIA_SMB_PRTCL_BYTE_DATA 0x06 #define NVIDIA_SMB_PRTCL_WORD_DATA 0x08 #define NVIDIA_SMB_PRTCL_BLOCK_DATA 0x0a #define NVIDIA_SMB_PRTCL_PEC 0x80 /* Misc definitions */ #define MAX_TIMEOUT 100 /* We disable the second SMBus channel on these boards */ static struct dmi_system_id __devinitdata nforce2_dmi_blacklist2[] = { { .ident = "DFI Lanparty NF4 Expert", .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "DFI Corp,LTD"), DMI_MATCH(DMI_BOARD_NAME, "LP UT NF4 Expert"), }, }, { } }; static struct pci_driver nforce2_driver; /* For multiplexing support, we need a global reference to the 1st SMBus channel */ #if defined CONFIG_I2C_NFORCE2_S4985 || defined CONFIG_I2C_NFORCE2_S4985_MODULE struct i2c_adapter *nforce2_smbus; EXPORT_SYMBOL_GPL(nforce2_smbus); static void nforce2_set_reference(struct i2c_adapter *adap) { nforce2_smbus = adap; } #else static inline void nforce2_set_reference(struct i2c_adapter *adap) { } #endif static void nforce2_abort(struct i2c_adapter *adap) { struct nforce2_smbus *smbus = adap->algo_data; int timeout = 0; unsigned char temp; dev_dbg(&adap->dev, "Aborting current transaction\n"); outb_p(NVIDIA_SMB_CTRL_ABORT, NVIDIA_SMB_CTRL); do { msleep(1); temp = inb_p(NVIDIA_SMB_STATUS_ABRT); } while (!(temp & NVIDIA_SMB_STATUS_ABRT_STS) && (timeout++ < MAX_TIMEOUT)); if (!(temp & NVIDIA_SMB_STATUS_ABRT_STS)) dev_err(&adap->dev, "Can't reset the smbus\n"); outb_p(NVIDIA_SMB_STATUS_ABRT_STS, NVIDIA_SMB_STATUS_ABRT); } static int nforce2_check_status(struct i2c_adapter *adap) { struct nforce2_smbus *smbus = adap->algo_data; int timeout = 0; unsigned char temp; do { msleep(1); temp = inb_p(NVIDIA_SMB_STS); } while ((!temp) && (timeout++ < MAX_TIMEOUT)); if (timeout > MAX_TIMEOUT) { dev_dbg(&adap->dev, "SMBus Timeout!\n"); if (smbus->can_abort) nforce2_abort(adap); return -ETIMEDOUT; } if (!(temp & NVIDIA_SMB_STS_DONE) || (temp & NVIDIA_SMB_STS_STATUS)) { dev_dbg(&adap->dev, "Transaction failed (0x%02x)!\n", temp); return -EIO; } return 0; } /* Return negative errno on error */ static s32 nforce2_access(struct i2c_adapter * adap, u16 addr, unsigned short flags, char read_write, u8 command, int size, union i2c_smbus_data * data) { struct nforce2_smbus *smbus = adap->algo_data; unsigned char protocol, pec; u8 len; int i, status; protocol = (read_write == I2C_SMBUS_READ) ? NVIDIA_SMB_PRTCL_READ : NVIDIA_SMB_PRTCL_WRITE; pec = (flags & I2C_CLIENT_PEC) ? NVIDIA_SMB_PRTCL_PEC : 0; switch (size) { case I2C_SMBUS_QUICK: protocol |= NVIDIA_SMB_PRTCL_QUICK; read_write = I2C_SMBUS_WRITE; break; case I2C_SMBUS_BYTE: if (read_write == I2C_SMBUS_WRITE) outb_p(command, NVIDIA_SMB_CMD); protocol |= NVIDIA_SMB_PRTCL_BYTE; break; case I2C_SMBUS_BYTE_DATA: outb_p(command, NVIDIA_SMB_CMD); if (read_write == I2C_SMBUS_WRITE) outb_p(data->byte, NVIDIA_SMB_DATA); protocol |= NVIDIA_SMB_PRTCL_BYTE_DATA; break; case I2C_SMBUS_WORD_DATA: outb_p(command, NVIDIA_SMB_CMD); if (read_write == I2C_SMBUS_WRITE) { outb_p(data->word, NVIDIA_SMB_DATA); outb_p(data->word >> 8, NVIDIA_SMB_DATA+1); } protocol |= NVIDIA_SMB_PRTCL_WORD_DATA | pec; break; case I2C_SMBUS_BLOCK_DATA: outb_p(command, NVIDIA_SMB_CMD); if (read_write == I2C_SMBUS_WRITE) { len = data->block[0]; if ((len == 0) || (len > I2C_SMBUS_BLOCK_MAX)) { dev_err(&adap->dev, "Transaction failed " "(requested block size: %d)\n", len); return -EINVAL; } outb_p(len, NVIDIA_SMB_BCNT); for (i = 0; i < I2C_SMBUS_BLOCK_MAX; i++) outb_p(data->block[i + 1], NVIDIA_SMB_DATA+i); } protocol |= NVIDIA_SMB_PRTCL_BLOCK_DATA | pec; break; default: dev_err(&adap->dev, "Unsupported transaction %d\n", size); return -EOPNOTSUPP; } outb_p((addr & 0x7f) << 1, NVIDIA_SMB_ADDR); outb_p(protocol, NVIDIA_SMB_PRTCL); status = nforce2_check_status(adap); if (status) return status; if (read_write == I2C_SMBUS_WRITE) return 0; switch (size) { case I2C_SMBUS_BYTE: case I2C_SMBUS_BYTE_DATA: data->byte = inb_p(NVIDIA_SMB_DATA); break; case I2C_SMBUS_WORD_DATA: data->word = inb_p(NVIDIA_SMB_DATA) | (inb_p(NVIDIA_SMB_DATA+1) << 8); break; case I2C_SMBUS_BLOCK_DATA: len = inb_p(NVIDIA_SMB_BCNT); if ((len <= 0) || (len > I2C_SMBUS_BLOCK_MAX)) { dev_err(&adap->dev, "Transaction failed " "(received block size: 0x%02x)\n", len); return -EPROTO; } for (i = 0; i < len; i++) data->block[i+1] = inb_p(NVIDIA_SMB_DATA + i); data->block[0] = len; break; } return 0; } static u32 nforce2_func(struct i2c_adapter *adapter) { /* other functionality might be possible, but is not tested */ return I2C_FUNC_SMBUS_QUICK | I2C_FUNC_SMBUS_BYTE | I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA | I2C_FUNC_SMBUS_PEC | (((struct nforce2_smbus*)adapter->algo_data)->blockops ? I2C_FUNC_SMBUS_BLOCK_DATA : 0); } static struct i2c_algorithm smbus_algorithm = { .smbus_xfer = nforce2_access, .functionality = nforce2_func, }; static const struct pci_device_id nforce2_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2_SMBUS) }, { PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SMBUS) }, { PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3_SMBUS) }, { PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SMBUS) }, { PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE4_SMBUS) }, { PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SMBUS) }, { PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SMBUS) }, { PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SMBUS) }, { PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SMBUS) }, { PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP65_SMBUS) }, { PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP67_SMBUS) }, { PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP73_SMBUS) }, { PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP78S_SMBUS) }, { PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP79_SMBUS) }, { 0 } }; MODULE_DEVICE_TABLE (pci, nforce2_ids); static int __devinit nforce2_probe_smb (struct pci_dev *dev, int bar, int alt_reg, struct nforce2_smbus *smbus, const char *name) { int error; smbus->base = pci_resource_start(dev, bar); if (smbus->base) { smbus->size = pci_resource_len(dev, bar); } else { /* Older incarnations of the device used non-standard BARs */ u16 iobase; if (pci_read_config_word(dev, alt_reg, &iobase) != PCIBIOS_SUCCESSFUL) { dev_err(&dev->dev, "Error reading PCI config for %s\n", name); return -EIO; } smbus->base = iobase & PCI_BASE_ADDRESS_IO_MASK; smbus->size = 64; } error = acpi_check_region(smbus->base, smbus->size, nforce2_driver.name); if (error) return error; if (!request_region(smbus->base, smbus->size, nforce2_driver.name)) { dev_err(&smbus->adapter.dev, "Error requesting region %02x .. %02X for %s\n", smbus->base, smbus->base+smbus->size-1, name); return -EBUSY; } smbus->adapter.owner = THIS_MODULE; smbus->adapter.class = I2C_CLASS_HWMON | I2C_CLASS_SPD; smbus->adapter.algo = &smbus_algorithm; smbus->adapter.algo_data = smbus; smbus->adapter.dev.parent = &dev->dev; snprintf(smbus->adapter.name, sizeof(smbus->adapter.name), "SMBus nForce2 adapter at %04x", smbus->base); error = i2c_add_adapter(&smbus->adapter); if (error) { dev_err(&smbus->adapter.dev, "Failed to register adapter.\n"); release_region(smbus->base, smbus->size); return error; } dev_info(&smbus->adapter.dev, "nForce2 SMBus adapter at %#x\n", smbus->base); return 0; } static int __devinit nforce2_probe(struct pci_dev *dev, const struct pci_device_id *id) { struct nforce2_smbus *smbuses; int res1, res2; /* we support 2 SMBus adapters */ if (!(smbuses = kzalloc(2*sizeof(struct nforce2_smbus), GFP_KERNEL))) return -ENOMEM; pci_set_drvdata(dev, smbuses); switch(dev->device) { case PCI_DEVICE_ID_NVIDIA_NFORCE2_SMBUS: case PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SMBUS: case PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SMBUS: smbuses[0].blockops = 1; smbuses[1].blockops = 1; smbuses[0].can_abort = 1; smbuses[1].can_abort = 1; } /* SMBus adapter 1 */ res1 = nforce2_probe_smb(dev, 4, NFORCE_PCI_SMB1, &smbuses[0], "SMB1"); if (res1 < 0) smbuses[0].base = 0; /* to have a check value */ /* SMBus adapter 2 */ if (dmi_check_system(nforce2_dmi_blacklist2)) { dev_err(&dev->dev, "Disabling SMB2 for safety reasons.\n"); res2 = -EPERM; smbuses[1].base = 0; } else { res2 = nforce2_probe_smb(dev, 5, NFORCE_PCI_SMB2, &smbuses[1], "SMB2"); if (res2 < 0) smbuses[1].base = 0; /* to have a check value */ } if ((res1 < 0) && (res2 < 0)) { /* we did not find even one of the SMBuses, so we give up */ kfree(smbuses); return -ENODEV; } nforce2_set_reference(&smbuses[0].adapter); return 0; } static void __devexit nforce2_remove(struct pci_dev *dev) { struct nforce2_smbus *smbuses = pci_get_drvdata(dev); nforce2_set_reference(NULL); if (smbuses[0].base) { i2c_del_adapter(&smbuses[0].adapter); release_region(smbuses[0].base, smbuses[0].size); } if (smbuses[1].base) { i2c_del_adapter(&smbuses[1].adapter); release_region(smbuses[1].base, smbuses[1].size); } kfree(smbuses); } static struct pci_driver nforce2_driver = { .name = "nForce2_smbus", .id_table = nforce2_ids, .probe = nforce2_probe, .remove = __devexit_p(nforce2_remove), }; static int __init nforce2_init(void) { return pci_register_driver(&nforce2_driver); } static void __exit nforce2_exit(void) { pci_unregister_driver(&nforce2_driver); } module_init(nforce2_init); module_exit(nforce2_exit);
gpl-2.0
Hacker432-Y550/android_kernel_huawei_msm8916
drivers/gpu/drm/drm_stub.c
1970
12304
/** * \file drm_stub.h * Stub support * * \author Rickard E. (Rik) Faith <faith@valinux.com> */ /* * Created: Fri Jan 19 10:48:35 2001 by faith@acm.org * * Copyright 2001 VA Linux Systems, Inc., Sunnyvale, California. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/slab.h> #include <drm/drmP.h> #include <drm/drm_core.h> unsigned int drm_debug = 0; /* 1 to enable debug output */ EXPORT_SYMBOL(drm_debug); unsigned int drm_vblank_offdelay = 5000; /* Default to 5000 msecs. */ EXPORT_SYMBOL(drm_vblank_offdelay); unsigned int drm_timestamp_precision = 20; /* Default to 20 usecs. */ EXPORT_SYMBOL(drm_timestamp_precision); /* * Default to use monotonic timestamps for wait-for-vblank and page-flip * complete events. */ unsigned int drm_timestamp_monotonic = 1; MODULE_AUTHOR(CORE_AUTHOR); MODULE_DESCRIPTION(CORE_DESC); MODULE_LICENSE("GPL and additional rights"); MODULE_PARM_DESC(debug, "Enable debug output"); MODULE_PARM_DESC(vblankoffdelay, "Delay until vblank irq auto-disable [msecs]"); MODULE_PARM_DESC(timestamp_precision_usec, "Max. error on timestamps [usecs]"); MODULE_PARM_DESC(timestamp_monotonic, "Use monotonic timestamps"); module_param_named(debug, drm_debug, int, 0600); module_param_named(vblankoffdelay, drm_vblank_offdelay, int, 0600); module_param_named(timestamp_precision_usec, drm_timestamp_precision, int, 0600); module_param_named(timestamp_monotonic, drm_timestamp_monotonic, int, 0600); struct idr drm_minors_idr; struct class *drm_class; struct proc_dir_entry *drm_proc_root; struct dentry *drm_debugfs_root; int drm_err(const char *func, const char *format, ...) { struct va_format vaf; va_list args; int r; va_start(args, format); vaf.fmt = format; vaf.va = &args; r = printk(KERN_ERR "[" DRM_NAME ":%s] *ERROR* %pV", func, &vaf); va_end(args); return r; } EXPORT_SYMBOL(drm_err); void drm_ut_debug_printk(unsigned int request_level, const char *prefix, const char *function_name, const char *format, ...) { va_list args; if (drm_debug & request_level) { if (function_name) printk(KERN_DEBUG "[%s:%s], ", prefix, function_name); va_start(args, format); vprintk(format, args); va_end(args); } } EXPORT_SYMBOL(drm_ut_debug_printk); static int drm_minor_get_id(struct drm_device *dev, int type) { int ret; int base = 0, limit = 63; if (type == DRM_MINOR_CONTROL) { base += 64; limit = base + 127; } else if (type == DRM_MINOR_RENDER) { base += 128; limit = base + 255; } mutex_lock(&dev->struct_mutex); ret = idr_alloc(&drm_minors_idr, NULL, base, limit, GFP_KERNEL); mutex_unlock(&dev->struct_mutex); return ret == -ENOSPC ? -EINVAL : ret; } struct drm_master *drm_master_create(struct drm_minor *minor) { struct drm_master *master; master = kzalloc(sizeof(*master), GFP_KERNEL); if (!master) return NULL; kref_init(&master->refcount); spin_lock_init(&master->lock.spinlock); init_waitqueue_head(&master->lock.lock_queue); drm_ht_create(&master->magiclist, DRM_MAGIC_HASH_ORDER); INIT_LIST_HEAD(&master->magicfree); master->minor = minor; list_add_tail(&master->head, &minor->master_list); return master; } struct drm_master *drm_master_get(struct drm_master *master) { kref_get(&master->refcount); return master; } EXPORT_SYMBOL(drm_master_get); static void drm_master_destroy(struct kref *kref) { struct drm_master *master = container_of(kref, struct drm_master, refcount); struct drm_magic_entry *pt, *next; struct drm_device *dev = master->minor->dev; struct drm_map_list *r_list, *list_temp; list_del(&master->head); if (dev->driver->master_destroy) dev->driver->master_destroy(dev, master); list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) { if (r_list->master == master) { drm_rmmap_locked(dev, r_list->map); r_list = NULL; } } if (master->unique) { kfree(master->unique); master->unique = NULL; master->unique_len = 0; } kfree(dev->devname); dev->devname = NULL; list_for_each_entry_safe(pt, next, &master->magicfree, head) { list_del(&pt->head); drm_ht_remove_item(&master->magiclist, &pt->hash_item); kfree(pt); } drm_ht_remove(&master->magiclist); kfree(master); } void drm_master_put(struct drm_master **master) { kref_put(&(*master)->refcount, drm_master_destroy); *master = NULL; } EXPORT_SYMBOL(drm_master_put); int drm_setmaster_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { int ret; if (file_priv->is_master) return 0; if (file_priv->minor->master && file_priv->minor->master != file_priv->master) return -EINVAL; if (!file_priv->master) return -EINVAL; if (file_priv->minor->master) return -EINVAL; mutex_lock(&dev->struct_mutex); file_priv->minor->master = drm_master_get(file_priv->master); file_priv->is_master = 1; if (dev->driver->master_set) { ret = dev->driver->master_set(dev, file_priv, false); if (unlikely(ret != 0)) { file_priv->is_master = 0; drm_master_put(&file_priv->minor->master); } } mutex_unlock(&dev->struct_mutex); return 0; } int drm_dropmaster_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { if (!file_priv->is_master) return -EINVAL; if (!file_priv->minor->master) return -EINVAL; mutex_lock(&dev->struct_mutex); if (dev->driver->master_drop) dev->driver->master_drop(dev, file_priv, false); drm_master_put(&file_priv->minor->master); file_priv->is_master = 0; mutex_unlock(&dev->struct_mutex); return 0; } int drm_fill_in_dev(struct drm_device *dev, const struct pci_device_id *ent, struct drm_driver *driver) { int retcode; INIT_LIST_HEAD(&dev->filelist); INIT_LIST_HEAD(&dev->ctxlist); INIT_LIST_HEAD(&dev->vmalist); INIT_LIST_HEAD(&dev->maplist); INIT_LIST_HEAD(&dev->vblank_event_list); spin_lock_init(&dev->count_lock); spin_lock_init(&dev->event_lock); mutex_init(&dev->struct_mutex); mutex_init(&dev->ctxlist_mutex); if (drm_ht_create(&dev->map_hash, 12)) { return -ENOMEM; } /* the DRM has 6 basic counters */ dev->counters = 6; dev->types[0] = _DRM_STAT_LOCK; dev->types[1] = _DRM_STAT_OPENS; dev->types[2] = _DRM_STAT_CLOSES; dev->types[3] = _DRM_STAT_IOCTLS; dev->types[4] = _DRM_STAT_LOCKS; dev->types[5] = _DRM_STAT_UNLOCKS; dev->driver = driver; if (dev->driver->bus->agp_init) { retcode = dev->driver->bus->agp_init(dev); if (retcode) goto error_out_unreg; } retcode = drm_ctxbitmap_init(dev); if (retcode) { DRM_ERROR("Cannot allocate memory for context bitmap.\n"); goto error_out_unreg; } if (driver->driver_features & DRIVER_GEM) { retcode = drm_gem_init(dev); if (retcode) { DRM_ERROR("Cannot initialize graphics execution " "manager (GEM)\n"); goto error_out_unreg; } } return 0; error_out_unreg: drm_lastclose(dev); return retcode; } EXPORT_SYMBOL(drm_fill_in_dev); /** * Get a secondary minor number. * * \param dev device data structure * \param sec-minor structure to hold the assigned minor * \return negative number on failure. * * Search an empty entry and initialize it to the given parameters, and * create the proc init entry via proc_init(). This routines assigns * minor numbers to secondary heads of multi-headed cards */ int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, int type) { struct drm_minor *new_minor; int ret; int minor_id; DRM_DEBUG("\n"); minor_id = drm_minor_get_id(dev, type); if (minor_id < 0) return minor_id; new_minor = kzalloc(sizeof(struct drm_minor), GFP_KERNEL); if (!new_minor) { ret = -ENOMEM; goto err_idr; } new_minor->type = type; new_minor->device = MKDEV(DRM_MAJOR, minor_id); new_minor->dev = dev; new_minor->index = minor_id; INIT_LIST_HEAD(&new_minor->master_list); idr_replace(&drm_minors_idr, new_minor, minor_id); if (type == DRM_MINOR_LEGACY) { ret = drm_proc_init(new_minor, drm_proc_root); if (ret) { DRM_ERROR("DRM: Failed to initialize /proc/dri.\n"); goto err_mem; } } else new_minor->proc_root = NULL; #if defined(CONFIG_DEBUG_FS) ret = drm_debugfs_init(new_minor, minor_id, drm_debugfs_root); if (ret) { DRM_ERROR("DRM: Failed to initialize /sys/kernel/debug/dri.\n"); goto err_g2; } #endif ret = drm_sysfs_device_add(new_minor); if (ret) { printk(KERN_ERR "DRM: Error sysfs_device_add.\n"); goto err_g2; } *minor = new_minor; DRM_DEBUG("new minor assigned %d\n", minor_id); return 0; err_g2: if (new_minor->type == DRM_MINOR_LEGACY) drm_proc_cleanup(new_minor, drm_proc_root); err_mem: kfree(new_minor); err_idr: idr_remove(&drm_minors_idr, minor_id); *minor = NULL; return ret; } EXPORT_SYMBOL(drm_get_minor); /** * Put a secondary minor number. * * \param sec_minor - structure to be released * \return always zero * * Cleans up the proc resources. Not legal for this to be the * last minor released. * */ int drm_put_minor(struct drm_minor **minor_p) { struct drm_minor *minor = *minor_p; DRM_DEBUG("release secondary minor %d\n", minor->index); if (minor->type == DRM_MINOR_LEGACY) drm_proc_cleanup(minor, drm_proc_root); #if defined(CONFIG_DEBUG_FS) drm_debugfs_cleanup(minor); #endif drm_sysfs_device_remove(minor); idr_remove(&drm_minors_idr, minor->index); kfree(minor); *minor_p = NULL; return 0; } EXPORT_SYMBOL(drm_put_minor); static void drm_unplug_minor(struct drm_minor *minor) { drm_sysfs_device_remove(minor); } /** * Called via drm_exit() at module unload time or when pci device is * unplugged. * * Cleans up all DRM device, calling drm_lastclose(). * */ void drm_put_dev(struct drm_device *dev) { struct drm_driver *driver; struct drm_map_list *r_list, *list_temp; DRM_DEBUG("\n"); if (!dev) { DRM_ERROR("cleanup called no dev\n"); return; } driver = dev->driver; drm_lastclose(dev); if (drm_core_has_MTRR(dev) && drm_core_has_AGP(dev) && dev->agp && dev->agp->agp_mtrr >= 0) { int retval; retval = mtrr_del(dev->agp->agp_mtrr, dev->agp->agp_info.aper_base, dev->agp->agp_info.aper_size * 1024 * 1024); DRM_DEBUG("mtrr_del=%d\n", retval); } if (dev->driver->unload) dev->driver->unload(dev); if (drm_core_has_AGP(dev) && dev->agp) { kfree(dev->agp); dev->agp = NULL; } drm_vblank_cleanup(dev); list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) drm_rmmap(dev, r_list->map); drm_ht_remove(&dev->map_hash); drm_ctxbitmap_cleanup(dev); if (drm_core_check_feature(dev, DRIVER_MODESET)) drm_put_minor(&dev->control); if (driver->driver_features & DRIVER_GEM) drm_gem_destroy(dev); drm_put_minor(&dev->primary); list_del(&dev->driver_item); kfree(dev->devname); kfree(dev); } EXPORT_SYMBOL(drm_put_dev); void drm_unplug_dev(struct drm_device *dev) { /* for a USB device */ if (drm_core_check_feature(dev, DRIVER_MODESET)) drm_unplug_minor(dev->control); drm_unplug_minor(dev->primary); mutex_lock(&drm_global_mutex); drm_device_set_unplugged(dev); if (dev->open_count == 0) { drm_put_dev(dev); } mutex_unlock(&drm_global_mutex); } EXPORT_SYMBOL(drm_unplug_dev);
gpl-2.0
airidosas252/android_jellykernel_vee7
net/nfc/nci/core.c
4786
21246
/* * The NFC Controller Interface is the communication protocol between an * NFC Controller (NFCC) and a Device Host (DH). * * Copyright (C) 2011 Texas Instruments, Inc. * * Written by Ilan Elias <ilane@ti.com> * * Acknowledgements: * This file is based on hci_core.c, which was written * by Maxim Krasnyansky. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #define pr_fmt(fmt) KBUILD_MODNAME ": %s: " fmt, __func__ #include <linux/types.h> #include <linux/workqueue.h> #include <linux/completion.h> #include <linux/export.h> #include <linux/sched.h> #include <linux/bitops.h> #include <linux/skbuff.h> #include "../nfc.h" #include <net/nfc/nci.h> #include <net/nfc/nci_core.h> #include <linux/nfc.h> static void nci_cmd_work(struct work_struct *work); static void nci_rx_work(struct work_struct *work); static void nci_tx_work(struct work_struct *work); /* ---- NCI requests ---- */ void nci_req_complete(struct nci_dev *ndev, int result) { if (ndev->req_status == NCI_REQ_PEND) { ndev->req_result = result; ndev->req_status = NCI_REQ_DONE; complete(&ndev->req_completion); } } static void nci_req_cancel(struct nci_dev *ndev, int err) { if (ndev->req_status == NCI_REQ_PEND) { ndev->req_result = err; ndev->req_status = NCI_REQ_CANCELED; complete(&ndev->req_completion); } } /* Execute request and wait for completion. */ static int __nci_request(struct nci_dev *ndev, void (*req)(struct nci_dev *ndev, unsigned long opt), unsigned long opt, __u32 timeout) { int rc = 0; long completion_rc; ndev->req_status = NCI_REQ_PEND; init_completion(&ndev->req_completion); req(ndev, opt); completion_rc = wait_for_completion_interruptible_timeout(&ndev->req_completion, timeout); pr_debug("wait_for_completion return %ld\n", completion_rc); if (completion_rc > 0) { switch (ndev->req_status) { case NCI_REQ_DONE: rc = nci_to_errno(ndev->req_result); break; case NCI_REQ_CANCELED: rc = -ndev->req_result; break; default: rc = -ETIMEDOUT; break; } } else { pr_err("wait_for_completion_interruptible_timeout failed %ld\n", completion_rc); rc = ((completion_rc == 0) ? (-ETIMEDOUT) : (completion_rc)); } ndev->req_status = ndev->req_result = 0; return rc; } static inline int nci_request(struct nci_dev *ndev, void (*req)(struct nci_dev *ndev, unsigned long opt), unsigned long opt, __u32 timeout) { int rc; if (!test_bit(NCI_UP, &ndev->flags)) return -ENETDOWN; /* Serialize all requests */ mutex_lock(&ndev->req_lock); rc = __nci_request(ndev, req, opt, timeout); mutex_unlock(&ndev->req_lock); return rc; } static void nci_reset_req(struct nci_dev *ndev, unsigned long opt) { struct nci_core_reset_cmd cmd; cmd.reset_type = NCI_RESET_TYPE_RESET_CONFIG; nci_send_cmd(ndev, NCI_OP_CORE_RESET_CMD, 1, &cmd); } static void nci_init_req(struct nci_dev *ndev, unsigned long opt) { nci_send_cmd(ndev, NCI_OP_CORE_INIT_CMD, 0, NULL); } static void nci_init_complete_req(struct nci_dev *ndev, unsigned long opt) { struct nci_rf_disc_map_cmd cmd; struct disc_map_config *cfg = cmd.mapping_configs; __u8 *num = &cmd.num_mapping_configs; int i; /* set rf mapping configurations */ *num = 0; /* by default mapping is set to NCI_RF_INTERFACE_FRAME */ for (i = 0; i < ndev->num_supported_rf_interfaces; i++) { if (ndev->supported_rf_interfaces[i] == NCI_RF_INTERFACE_ISO_DEP) { cfg[*num].rf_protocol = NCI_RF_PROTOCOL_ISO_DEP; cfg[*num].mode = NCI_DISC_MAP_MODE_POLL | NCI_DISC_MAP_MODE_LISTEN; cfg[*num].rf_interface = NCI_RF_INTERFACE_ISO_DEP; (*num)++; } else if (ndev->supported_rf_interfaces[i] == NCI_RF_INTERFACE_NFC_DEP) { cfg[*num].rf_protocol = NCI_RF_PROTOCOL_NFC_DEP; cfg[*num].mode = NCI_DISC_MAP_MODE_POLL | NCI_DISC_MAP_MODE_LISTEN; cfg[*num].rf_interface = NCI_RF_INTERFACE_NFC_DEP; (*num)++; } if (*num == NCI_MAX_NUM_MAPPING_CONFIGS) break; } nci_send_cmd(ndev, NCI_OP_RF_DISCOVER_MAP_CMD, (1 + ((*num) * sizeof(struct disc_map_config))), &cmd); } static void nci_rf_discover_req(struct nci_dev *ndev, unsigned long opt) { struct nci_rf_disc_cmd cmd; __u32 protocols = opt; cmd.num_disc_configs = 0; if ((cmd.num_disc_configs < NCI_MAX_NUM_RF_CONFIGS) && (protocols & NFC_PROTO_JEWEL_MASK || protocols & NFC_PROTO_MIFARE_MASK || protocols & NFC_PROTO_ISO14443_MASK || protocols & NFC_PROTO_NFC_DEP_MASK)) { cmd.disc_configs[cmd.num_disc_configs].rf_tech_and_mode = NCI_NFC_A_PASSIVE_POLL_MODE; cmd.disc_configs[cmd.num_disc_configs].frequency = 1; cmd.num_disc_configs++; } if ((cmd.num_disc_configs < NCI_MAX_NUM_RF_CONFIGS) && (protocols & NFC_PROTO_ISO14443_MASK)) { cmd.disc_configs[cmd.num_disc_configs].rf_tech_and_mode = NCI_NFC_B_PASSIVE_POLL_MODE; cmd.disc_configs[cmd.num_disc_configs].frequency = 1; cmd.num_disc_configs++; } if ((cmd.num_disc_configs < NCI_MAX_NUM_RF_CONFIGS) && (protocols & NFC_PROTO_FELICA_MASK || protocols & NFC_PROTO_NFC_DEP_MASK)) { cmd.disc_configs[cmd.num_disc_configs].rf_tech_and_mode = NCI_NFC_F_PASSIVE_POLL_MODE; cmd.disc_configs[cmd.num_disc_configs].frequency = 1; cmd.num_disc_configs++; } nci_send_cmd(ndev, NCI_OP_RF_DISCOVER_CMD, (1 + (cmd.num_disc_configs * sizeof(struct disc_config))), &cmd); } struct nci_rf_discover_select_param { __u8 rf_discovery_id; __u8 rf_protocol; }; static void nci_rf_discover_select_req(struct nci_dev *ndev, unsigned long opt) { struct nci_rf_discover_select_param *param = (struct nci_rf_discover_select_param *)opt; struct nci_rf_discover_select_cmd cmd; cmd.rf_discovery_id = param->rf_discovery_id; cmd.rf_protocol = param->rf_protocol; switch (cmd.rf_protocol) { case NCI_RF_PROTOCOL_ISO_DEP: cmd.rf_interface = NCI_RF_INTERFACE_ISO_DEP; break; case NCI_RF_PROTOCOL_NFC_DEP: cmd.rf_interface = NCI_RF_INTERFACE_NFC_DEP; break; default: cmd.rf_interface = NCI_RF_INTERFACE_FRAME; break; } nci_send_cmd(ndev, NCI_OP_RF_DISCOVER_SELECT_CMD, sizeof(struct nci_rf_discover_select_cmd), &cmd); } static void nci_rf_deactivate_req(struct nci_dev *ndev, unsigned long opt) { struct nci_rf_deactivate_cmd cmd; cmd.type = NCI_DEACTIVATE_TYPE_IDLE_MODE; nci_send_cmd(ndev, NCI_OP_RF_DEACTIVATE_CMD, sizeof(struct nci_rf_deactivate_cmd), &cmd); } static int nci_open_device(struct nci_dev *ndev) { int rc = 0; mutex_lock(&ndev->req_lock); if (test_bit(NCI_UP, &ndev->flags)) { rc = -EALREADY; goto done; } if (ndev->ops->open(ndev)) { rc = -EIO; goto done; } atomic_set(&ndev->cmd_cnt, 1); set_bit(NCI_INIT, &ndev->flags); rc = __nci_request(ndev, nci_reset_req, 0, msecs_to_jiffies(NCI_RESET_TIMEOUT)); if (!rc) { rc = __nci_request(ndev, nci_init_req, 0, msecs_to_jiffies(NCI_INIT_TIMEOUT)); } if (!rc) { rc = __nci_request(ndev, nci_init_complete_req, 0, msecs_to_jiffies(NCI_INIT_TIMEOUT)); } clear_bit(NCI_INIT, &ndev->flags); if (!rc) { set_bit(NCI_UP, &ndev->flags); nci_clear_target_list(ndev); atomic_set(&ndev->state, NCI_IDLE); } else { /* Init failed, cleanup */ skb_queue_purge(&ndev->cmd_q); skb_queue_purge(&ndev->rx_q); skb_queue_purge(&ndev->tx_q); ndev->ops->close(ndev); ndev->flags = 0; } done: mutex_unlock(&ndev->req_lock); return rc; } static int nci_close_device(struct nci_dev *ndev) { nci_req_cancel(ndev, ENODEV); mutex_lock(&ndev->req_lock); if (!test_and_clear_bit(NCI_UP, &ndev->flags)) { del_timer_sync(&ndev->cmd_timer); del_timer_sync(&ndev->data_timer); mutex_unlock(&ndev->req_lock); return 0; } /* Drop RX and TX queues */ skb_queue_purge(&ndev->rx_q); skb_queue_purge(&ndev->tx_q); /* Flush RX and TX wq */ flush_workqueue(ndev->rx_wq); flush_workqueue(ndev->tx_wq); /* Reset device */ skb_queue_purge(&ndev->cmd_q); atomic_set(&ndev->cmd_cnt, 1); set_bit(NCI_INIT, &ndev->flags); __nci_request(ndev, nci_reset_req, 0, msecs_to_jiffies(NCI_RESET_TIMEOUT)); clear_bit(NCI_INIT, &ndev->flags); /* Flush cmd wq */ flush_workqueue(ndev->cmd_wq); /* After this point our queues are empty * and no works are scheduled. */ ndev->ops->close(ndev); /* Clear flags */ ndev->flags = 0; mutex_unlock(&ndev->req_lock); return 0; } /* NCI command timer function */ static void nci_cmd_timer(unsigned long arg) { struct nci_dev *ndev = (void *) arg; atomic_set(&ndev->cmd_cnt, 1); queue_work(ndev->cmd_wq, &ndev->cmd_work); } /* NCI data exchange timer function */ static void nci_data_timer(unsigned long arg) { struct nci_dev *ndev = (void *) arg; set_bit(NCI_DATA_EXCHANGE_TO, &ndev->flags); queue_work(ndev->rx_wq, &ndev->rx_work); } static int nci_dev_up(struct nfc_dev *nfc_dev) { struct nci_dev *ndev = nfc_get_drvdata(nfc_dev); return nci_open_device(ndev); } static int nci_dev_down(struct nfc_dev *nfc_dev) { struct nci_dev *ndev = nfc_get_drvdata(nfc_dev); return nci_close_device(ndev); } static int nci_start_poll(struct nfc_dev *nfc_dev, __u32 protocols) { struct nci_dev *ndev = nfc_get_drvdata(nfc_dev); int rc; if ((atomic_read(&ndev->state) == NCI_DISCOVERY) || (atomic_read(&ndev->state) == NCI_W4_ALL_DISCOVERIES)) { pr_err("unable to start poll, since poll is already active\n"); return -EBUSY; } if (ndev->target_active_prot) { pr_err("there is an active target\n"); return -EBUSY; } if ((atomic_read(&ndev->state) == NCI_W4_HOST_SELECT) || (atomic_read(&ndev->state) == NCI_POLL_ACTIVE)) { pr_debug("target active or w4 select, implicitly deactivate\n"); rc = nci_request(ndev, nci_rf_deactivate_req, 0, msecs_to_jiffies(NCI_RF_DEACTIVATE_TIMEOUT)); if (rc) return -EBUSY; } rc = nci_request(ndev, nci_rf_discover_req, protocols, msecs_to_jiffies(NCI_RF_DISC_TIMEOUT)); if (!rc) ndev->poll_prots = protocols; return rc; } static void nci_stop_poll(struct nfc_dev *nfc_dev) { struct nci_dev *ndev = nfc_get_drvdata(nfc_dev); if ((atomic_read(&ndev->state) != NCI_DISCOVERY) && (atomic_read(&ndev->state) != NCI_W4_ALL_DISCOVERIES)) { pr_err("unable to stop poll, since poll is not active\n"); return; } nci_request(ndev, nci_rf_deactivate_req, 0, msecs_to_jiffies(NCI_RF_DEACTIVATE_TIMEOUT)); } static int nci_activate_target(struct nfc_dev *nfc_dev, __u32 target_idx, __u32 protocol) { struct nci_dev *ndev = nfc_get_drvdata(nfc_dev); struct nci_rf_discover_select_param param; struct nfc_target *target = NULL; int i; int rc = 0; pr_debug("target_idx %d, protocol 0x%x\n", target_idx, protocol); if ((atomic_read(&ndev->state) != NCI_W4_HOST_SELECT) && (atomic_read(&ndev->state) != NCI_POLL_ACTIVE)) { pr_err("there is no available target to activate\n"); return -EINVAL; } if (ndev->target_active_prot) { pr_err("there is already an active target\n"); return -EBUSY; } for (i = 0; i < ndev->n_targets; i++) { if (ndev->targets[i].idx == target_idx) { target = &ndev->targets[i]; break; } } if (!target) { pr_err("unable to find the selected target\n"); return -EINVAL; } if (!(target->supported_protocols & (1 << protocol))) { pr_err("target does not support the requested protocol 0x%x\n", protocol); return -EINVAL; } if (atomic_read(&ndev->state) == NCI_W4_HOST_SELECT) { param.rf_discovery_id = target->idx; if (protocol == NFC_PROTO_JEWEL) param.rf_protocol = NCI_RF_PROTOCOL_T1T; else if (protocol == NFC_PROTO_MIFARE) param.rf_protocol = NCI_RF_PROTOCOL_T2T; else if (protocol == NFC_PROTO_FELICA) param.rf_protocol = NCI_RF_PROTOCOL_T3T; else if (protocol == NFC_PROTO_ISO14443) param.rf_protocol = NCI_RF_PROTOCOL_ISO_DEP; else param.rf_protocol = NCI_RF_PROTOCOL_NFC_DEP; rc = nci_request(ndev, nci_rf_discover_select_req, (unsigned long)&param, msecs_to_jiffies(NCI_RF_DISC_SELECT_TIMEOUT)); } if (!rc) ndev->target_active_prot = protocol; return rc; } static void nci_deactivate_target(struct nfc_dev *nfc_dev, __u32 target_idx) { struct nci_dev *ndev = nfc_get_drvdata(nfc_dev); pr_debug("target_idx %d\n", target_idx); if (!ndev->target_active_prot) { pr_err("unable to deactivate target, no active target\n"); return; } ndev->target_active_prot = 0; if (atomic_read(&ndev->state) == NCI_POLL_ACTIVE) { nci_request(ndev, nci_rf_deactivate_req, 0, msecs_to_jiffies(NCI_RF_DEACTIVATE_TIMEOUT)); } } static int nci_data_exchange(struct nfc_dev *nfc_dev, __u32 target_idx, struct sk_buff *skb, data_exchange_cb_t cb, void *cb_context) { struct nci_dev *ndev = nfc_get_drvdata(nfc_dev); int rc; pr_debug("target_idx %d, len %d\n", target_idx, skb->len); if (!ndev->target_active_prot) { pr_err("unable to exchange data, no active target\n"); return -EINVAL; } if (test_and_set_bit(NCI_DATA_EXCHANGE, &ndev->flags)) return -EBUSY; /* store cb and context to be used on receiving data */ ndev->data_exchange_cb = cb; ndev->data_exchange_cb_context = cb_context; rc = nci_send_data(ndev, NCI_STATIC_RF_CONN_ID, skb); if (rc) clear_bit(NCI_DATA_EXCHANGE, &ndev->flags); return rc; } static struct nfc_ops nci_nfc_ops = { .dev_up = nci_dev_up, .dev_down = nci_dev_down, .start_poll = nci_start_poll, .stop_poll = nci_stop_poll, .activate_target = nci_activate_target, .deactivate_target = nci_deactivate_target, .data_exchange = nci_data_exchange, }; /* ---- Interface to NCI drivers ---- */ /** * nci_allocate_device - allocate a new nci device * * @ops: device operations * @supported_protocols: NFC protocols supported by the device */ struct nci_dev *nci_allocate_device(struct nci_ops *ops, __u32 supported_protocols, int tx_headroom, int tx_tailroom) { struct nci_dev *ndev; pr_debug("supported_protocols 0x%x\n", supported_protocols); if (!ops->open || !ops->close || !ops->send) return NULL; if (!supported_protocols) return NULL; ndev = kzalloc(sizeof(struct nci_dev), GFP_KERNEL); if (!ndev) return NULL; ndev->ops = ops; ndev->tx_headroom = tx_headroom; ndev->tx_tailroom = tx_tailroom; ndev->nfc_dev = nfc_allocate_device(&nci_nfc_ops, supported_protocols, tx_headroom + NCI_DATA_HDR_SIZE, tx_tailroom); if (!ndev->nfc_dev) goto free_exit; nfc_set_drvdata(ndev->nfc_dev, ndev); return ndev; free_exit: kfree(ndev); return NULL; } EXPORT_SYMBOL(nci_allocate_device); /** * nci_free_device - deallocate nci device * * @ndev: The nci device to deallocate */ void nci_free_device(struct nci_dev *ndev) { nfc_free_device(ndev->nfc_dev); kfree(ndev); } EXPORT_SYMBOL(nci_free_device); /** * nci_register_device - register a nci device in the nfc subsystem * * @dev: The nci device to register */ int nci_register_device(struct nci_dev *ndev) { int rc; struct device *dev = &ndev->nfc_dev->dev; char name[32]; rc = nfc_register_device(ndev->nfc_dev); if (rc) goto exit; ndev->flags = 0; INIT_WORK(&ndev->cmd_work, nci_cmd_work); snprintf(name, sizeof(name), "%s_nci_cmd_wq", dev_name(dev)); ndev->cmd_wq = create_singlethread_workqueue(name); if (!ndev->cmd_wq) { rc = -ENOMEM; goto unreg_exit; } INIT_WORK(&ndev->rx_work, nci_rx_work); snprintf(name, sizeof(name), "%s_nci_rx_wq", dev_name(dev)); ndev->rx_wq = create_singlethread_workqueue(name); if (!ndev->rx_wq) { rc = -ENOMEM; goto destroy_cmd_wq_exit; } INIT_WORK(&ndev->tx_work, nci_tx_work); snprintf(name, sizeof(name), "%s_nci_tx_wq", dev_name(dev)); ndev->tx_wq = create_singlethread_workqueue(name); if (!ndev->tx_wq) { rc = -ENOMEM; goto destroy_rx_wq_exit; } skb_queue_head_init(&ndev->cmd_q); skb_queue_head_init(&ndev->rx_q); skb_queue_head_init(&ndev->tx_q); setup_timer(&ndev->cmd_timer, nci_cmd_timer, (unsigned long) ndev); setup_timer(&ndev->data_timer, nci_data_timer, (unsigned long) ndev); mutex_init(&ndev->req_lock); goto exit; destroy_rx_wq_exit: destroy_workqueue(ndev->rx_wq); destroy_cmd_wq_exit: destroy_workqueue(ndev->cmd_wq); unreg_exit: nfc_unregister_device(ndev->nfc_dev); exit: return rc; } EXPORT_SYMBOL(nci_register_device); /** * nci_unregister_device - unregister a nci device in the nfc subsystem * * @dev: The nci device to unregister */ void nci_unregister_device(struct nci_dev *ndev) { nci_close_device(ndev); destroy_workqueue(ndev->cmd_wq); destroy_workqueue(ndev->rx_wq); destroy_workqueue(ndev->tx_wq); nfc_unregister_device(ndev->nfc_dev); } EXPORT_SYMBOL(nci_unregister_device); /** * nci_recv_frame - receive frame from NCI drivers * * @skb: The sk_buff to receive */ int nci_recv_frame(struct sk_buff *skb) { struct nci_dev *ndev = (struct nci_dev *) skb->dev; pr_debug("len %d\n", skb->len); if (!ndev || (!test_bit(NCI_UP, &ndev->flags) && !test_bit(NCI_INIT, &ndev->flags))) { kfree_skb(skb); return -ENXIO; } /* Queue frame for rx worker thread */ skb_queue_tail(&ndev->rx_q, skb); queue_work(ndev->rx_wq, &ndev->rx_work); return 0; } EXPORT_SYMBOL(nci_recv_frame); static int nci_send_frame(struct sk_buff *skb) { struct nci_dev *ndev = (struct nci_dev *) skb->dev; pr_debug("len %d\n", skb->len); if (!ndev) { kfree_skb(skb); return -ENODEV; } /* Get rid of skb owner, prior to sending to the driver. */ skb_orphan(skb); return ndev->ops->send(skb); } /* Send NCI command */ int nci_send_cmd(struct nci_dev *ndev, __u16 opcode, __u8 plen, void *payload) { struct nci_ctrl_hdr *hdr; struct sk_buff *skb; pr_debug("opcode 0x%x, plen %d\n", opcode, plen); skb = nci_skb_alloc(ndev, (NCI_CTRL_HDR_SIZE + plen), GFP_KERNEL); if (!skb) { pr_err("no memory for command\n"); return -ENOMEM; } hdr = (struct nci_ctrl_hdr *) skb_put(skb, NCI_CTRL_HDR_SIZE); hdr->gid = nci_opcode_gid(opcode); hdr->oid = nci_opcode_oid(opcode); hdr->plen = plen; nci_mt_set((__u8 *)hdr, NCI_MT_CMD_PKT); nci_pbf_set((__u8 *)hdr, NCI_PBF_LAST); if (plen) memcpy(skb_put(skb, plen), payload, plen); skb->dev = (void *) ndev; skb_queue_tail(&ndev->cmd_q, skb); queue_work(ndev->cmd_wq, &ndev->cmd_work); return 0; } /* ---- NCI TX Data worker thread ---- */ static void nci_tx_work(struct work_struct *work) { struct nci_dev *ndev = container_of(work, struct nci_dev, tx_work); struct sk_buff *skb; pr_debug("credits_cnt %d\n", atomic_read(&ndev->credits_cnt)); /* Send queued tx data */ while (atomic_read(&ndev->credits_cnt)) { skb = skb_dequeue(&ndev->tx_q); if (!skb) return; /* Check if data flow control is used */ if (atomic_read(&ndev->credits_cnt) != NCI_DATA_FLOW_CONTROL_NOT_USED) atomic_dec(&ndev->credits_cnt); pr_debug("NCI TX: MT=data, PBF=%d, conn_id=%d, plen=%d\n", nci_pbf(skb->data), nci_conn_id(skb->data), nci_plen(skb->data)); nci_send_frame(skb); mod_timer(&ndev->data_timer, jiffies + msecs_to_jiffies(NCI_DATA_TIMEOUT)); } } /* ----- NCI RX worker thread (data & control) ----- */ static void nci_rx_work(struct work_struct *work) { struct nci_dev *ndev = container_of(work, struct nci_dev, rx_work); struct sk_buff *skb; while ((skb = skb_dequeue(&ndev->rx_q))) { /* Process frame */ switch (nci_mt(skb->data)) { case NCI_MT_RSP_PKT: nci_rsp_packet(ndev, skb); break; case NCI_MT_NTF_PKT: nci_ntf_packet(ndev, skb); break; case NCI_MT_DATA_PKT: nci_rx_data_packet(ndev, skb); break; default: pr_err("unknown MT 0x%x\n", nci_mt(skb->data)); kfree_skb(skb); break; } } /* check if a data exchange timout has occurred */ if (test_bit(NCI_DATA_EXCHANGE_TO, &ndev->flags)) { /* complete the data exchange transaction, if exists */ if (test_bit(NCI_DATA_EXCHANGE, &ndev->flags)) nci_data_exchange_complete(ndev, NULL, -ETIMEDOUT); clear_bit(NCI_DATA_EXCHANGE_TO, &ndev->flags); } } /* ----- NCI TX CMD worker thread ----- */ static void nci_cmd_work(struct work_struct *work) { struct nci_dev *ndev = container_of(work, struct nci_dev, cmd_work); struct sk_buff *skb; pr_debug("cmd_cnt %d\n", atomic_read(&ndev->cmd_cnt)); /* Send queued command */ if (atomic_read(&ndev->cmd_cnt)) { skb = skb_dequeue(&ndev->cmd_q); if (!skb) return; atomic_dec(&ndev->cmd_cnt); pr_debug("NCI TX: MT=cmd, PBF=%d, GID=0x%x, OID=0x%x, plen=%d\n", nci_pbf(skb->data), nci_opcode_gid(nci_opcode(skb->data)), nci_opcode_oid(nci_opcode(skb->data)), nci_plen(skb->data)); nci_send_frame(skb); mod_timer(&ndev->cmd_timer, jiffies + msecs_to_jiffies(NCI_CMD_TIMEOUT)); } }
gpl-2.0
DesolationStaging/kernel_motorola_msm8226
arch/arm/mach-sa1100/leds-lart.c
4786
2001
/* * linux/arch/arm/mach-sa1100/leds-lart.c * * (C) Erik Mouw (J.A.K.Mouw@its.tudelft.nl), April 21, 2000 * * LART uses the LED as follows: * - GPIO23 is the LED, on if system is not idle * You can use both CONFIG_LEDS_CPU and CONFIG_LEDS_TIMER at the same * time, but in that case the timer events will still dictate the * pace of the LED. */ #include <linux/init.h> #include <mach/hardware.h> #include <asm/leds.h> #include "leds.h" #define LED_STATE_ENABLED 1 #define LED_STATE_CLAIMED 2 static unsigned int led_state; static unsigned int hw_led_state; #define LED_23 GPIO_GPIO23 #define LED_MASK (LED_23) void lart_leds_event(led_event_t evt) { unsigned long flags; local_irq_save(flags); switch(evt) { case led_start: /* pin 23 is output pin */ GPDR |= LED_23; hw_led_state = LED_MASK; led_state = LED_STATE_ENABLED; break; case led_stop: led_state &= ~LED_STATE_ENABLED; break; case led_claim: led_state |= LED_STATE_CLAIMED; hw_led_state = LED_MASK; break; case led_release: led_state &= ~LED_STATE_CLAIMED; hw_led_state = LED_MASK; break; #ifdef CONFIG_LEDS_TIMER case led_timer: if (!(led_state & LED_STATE_CLAIMED)) hw_led_state ^= LED_23; break; #endif #ifdef CONFIG_LEDS_CPU case led_idle_start: /* The LART people like the LED to be off when the system is idle... */ if (!(led_state & LED_STATE_CLAIMED)) hw_led_state &= ~LED_23; break; case led_idle_end: /* ... and on if the system is not idle */ if (!(led_state & LED_STATE_CLAIMED)) hw_led_state |= LED_23; break; #endif case led_red_on: if (led_state & LED_STATE_CLAIMED) hw_led_state &= ~LED_23; break; case led_red_off: if (led_state & LED_STATE_CLAIMED) hw_led_state |= LED_23; break; default: break; } /* Now set the GPIO state, or nothing will happen at all */ if (led_state & LED_STATE_ENABLED) { GPSR = hw_led_state; GPCR = hw_led_state ^ LED_MASK; } local_irq_restore(flags); }
gpl-2.0
hollycroxton/android_kernel_htc_m7
drivers/net/ethernet/intel/e1000e/ich8lan.c
4786
115098
/******************************************************************************* Intel PRO/1000 Linux driver Copyright(c) 1999 - 2012 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, version 2, as published by the Free Software Foundation. This program is distributed in the hope it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. The full GNU General Public License is included in this distribution in the file called "COPYING". Contact Information: Linux NICS <linux.nics@intel.com> e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 *******************************************************************************/ /* * 82562G 10/100 Network Connection * 82562G-2 10/100 Network Connection * 82562GT 10/100 Network Connection * 82562GT-2 10/100 Network Connection * 82562V 10/100 Network Connection * 82562V-2 10/100 Network Connection * 82566DC-2 Gigabit Network Connection * 82566DC Gigabit Network Connection * 82566DM-2 Gigabit Network Connection * 82566DM Gigabit Network Connection * 82566MC Gigabit Network Connection * 82566MM Gigabit Network Connection * 82567LM Gigabit Network Connection * 82567LF Gigabit Network Connection * 82567V Gigabit Network Connection * 82567LM-2 Gigabit Network Connection * 82567LF-2 Gigabit Network Connection * 82567V-2 Gigabit Network Connection * 82567LF-3 Gigabit Network Connection * 82567LM-3 Gigabit Network Connection * 82567LM-4 Gigabit Network Connection * 82577LM Gigabit Network Connection * 82577LC Gigabit Network Connection * 82578DM Gigabit Network Connection * 82578DC Gigabit Network Connection * 82579LM Gigabit Network Connection * 82579V Gigabit Network Connection */ #include "e1000.h" #define ICH_FLASH_GFPREG 0x0000 #define ICH_FLASH_HSFSTS 0x0004 #define ICH_FLASH_HSFCTL 0x0006 #define ICH_FLASH_FADDR 0x0008 #define ICH_FLASH_FDATA0 0x0010 #define ICH_FLASH_PR0 0x0074 #define ICH_FLASH_READ_COMMAND_TIMEOUT 500 #define ICH_FLASH_WRITE_COMMAND_TIMEOUT 500 #define ICH_FLASH_ERASE_COMMAND_TIMEOUT 3000000 #define ICH_FLASH_LINEAR_ADDR_MASK 0x00FFFFFF #define ICH_FLASH_CYCLE_REPEAT_COUNT 10 #define ICH_CYCLE_READ 0 #define ICH_CYCLE_WRITE 2 #define ICH_CYCLE_ERASE 3 #define FLASH_GFPREG_BASE_MASK 0x1FFF #define FLASH_SECTOR_ADDR_SHIFT 12 #define ICH_FLASH_SEG_SIZE_256 256 #define ICH_FLASH_SEG_SIZE_4K 4096 #define ICH_FLASH_SEG_SIZE_8K 8192 #define ICH_FLASH_SEG_SIZE_64K 65536 #define E1000_ICH_FWSM_RSPCIPHY 0x00000040 /* Reset PHY on PCI Reset */ /* FW established a valid mode */ #define E1000_ICH_FWSM_FW_VALID 0x00008000 #define E1000_ICH_MNG_IAMT_MODE 0x2 #define ID_LED_DEFAULT_ICH8LAN ((ID_LED_DEF1_DEF2 << 12) | \ (ID_LED_DEF1_OFF2 << 8) | \ (ID_LED_DEF1_ON2 << 4) | \ (ID_LED_DEF1_DEF2)) #define E1000_ICH_NVM_SIG_WORD 0x13 #define E1000_ICH_NVM_SIG_MASK 0xC000 #define E1000_ICH_NVM_VALID_SIG_MASK 0xC0 #define E1000_ICH_NVM_SIG_VALUE 0x80 #define E1000_ICH8_LAN_INIT_TIMEOUT 1500 #define E1000_FEXTNVM_SW_CONFIG 1 #define E1000_FEXTNVM_SW_CONFIG_ICH8M (1 << 27) /* Bit redefined for ICH8M :/ */ #define E1000_FEXTNVM4_BEACON_DURATION_MASK 0x7 #define E1000_FEXTNVM4_BEACON_DURATION_8USEC 0x7 #define E1000_FEXTNVM4_BEACON_DURATION_16USEC 0x3 #define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL #define E1000_ICH_RAR_ENTRIES 7 #define PHY_PAGE_SHIFT 5 #define PHY_REG(page, reg) (((page) << PHY_PAGE_SHIFT) | \ ((reg) & MAX_PHY_REG_ADDRESS)) #define IGP3_KMRN_DIAG PHY_REG(770, 19) /* KMRN Diagnostic */ #define IGP3_VR_CTRL PHY_REG(776, 18) /* Voltage Regulator Control */ #define IGP3_KMRN_DIAG_PCS_LOCK_LOSS 0x0002 #define IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK 0x0300 #define IGP3_VR_CTRL_MODE_SHUTDOWN 0x0200 #define HV_LED_CONFIG PHY_REG(768, 30) /* LED Configuration */ #define SW_FLAG_TIMEOUT 1000 /* SW Semaphore flag timeout in milliseconds */ /* SMBus Address Phy Register */ #define HV_SMB_ADDR PHY_REG(768, 26) #define HV_SMB_ADDR_MASK 0x007F #define HV_SMB_ADDR_PEC_EN 0x0200 #define HV_SMB_ADDR_VALID 0x0080 /* PHY Power Management Control */ #define HV_PM_CTRL PHY_REG(770, 17) /* PHY Low Power Idle Control */ #define I82579_LPI_CTRL PHY_REG(772, 20) #define I82579_LPI_CTRL_ENABLE_MASK 0x6000 #define I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT 0x80 /* EMI Registers */ #define I82579_EMI_ADDR 0x10 #define I82579_EMI_DATA 0x11 #define I82579_LPI_UPDATE_TIMER 0x4805 /* in 40ns units + 40 ns base value */ #define I82579_MSE_THRESHOLD 0x084F /* Mean Square Error Threshold */ #define I82579_MSE_LINK_DOWN 0x2411 /* MSE count before dropping link */ /* Strapping Option Register - RO */ #define E1000_STRAP 0x0000C #define E1000_STRAP_SMBUS_ADDRESS_MASK 0x00FE0000 #define E1000_STRAP_SMBUS_ADDRESS_SHIFT 17 /* OEM Bits Phy Register */ #define HV_OEM_BITS PHY_REG(768, 25) #define HV_OEM_BITS_LPLU 0x0004 /* Low Power Link Up */ #define HV_OEM_BITS_GBE_DIS 0x0040 /* Gigabit Disable */ #define HV_OEM_BITS_RESTART_AN 0x0400 /* Restart Auto-negotiation */ #define E1000_NVM_K1_CONFIG 0x1B /* NVM K1 Config Word */ #define E1000_NVM_K1_ENABLE 0x1 /* NVM Enable K1 bit */ /* KMRN Mode Control */ #define HV_KMRN_MODE_CTRL PHY_REG(769, 16) #define HV_KMRN_MDIO_SLOW 0x0400 /* KMRN FIFO Control and Status */ #define HV_KMRN_FIFO_CTRLSTA PHY_REG(770, 16) #define HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK 0x7000 #define HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT 12 /* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */ /* Offset 04h HSFSTS */ union ich8_hws_flash_status { struct ich8_hsfsts { u16 flcdone :1; /* bit 0 Flash Cycle Done */ u16 flcerr :1; /* bit 1 Flash Cycle Error */ u16 dael :1; /* bit 2 Direct Access error Log */ u16 berasesz :2; /* bit 4:3 Sector Erase Size */ u16 flcinprog :1; /* bit 5 flash cycle in Progress */ u16 reserved1 :2; /* bit 13:6 Reserved */ u16 reserved2 :6; /* bit 13:6 Reserved */ u16 fldesvalid :1; /* bit 14 Flash Descriptor Valid */ u16 flockdn :1; /* bit 15 Flash Config Lock-Down */ } hsf_status; u16 regval; }; /* ICH GbE Flash Hardware Sequencing Flash control Register bit breakdown */ /* Offset 06h FLCTL */ union ich8_hws_flash_ctrl { struct ich8_hsflctl { u16 flcgo :1; /* 0 Flash Cycle Go */ u16 flcycle :2; /* 2:1 Flash Cycle */ u16 reserved :5; /* 7:3 Reserved */ u16 fldbcount :2; /* 9:8 Flash Data Byte Count */ u16 flockdn :6; /* 15:10 Reserved */ } hsf_ctrl; u16 regval; }; /* ICH Flash Region Access Permissions */ union ich8_hws_flash_regacc { struct ich8_flracc { u32 grra :8; /* 0:7 GbE region Read Access */ u32 grwa :8; /* 8:15 GbE region Write Access */ u32 gmrag :8; /* 23:16 GbE Master Read Access Grant */ u32 gmwag :8; /* 31:24 GbE Master Write Access Grant */ } hsf_flregacc; u16 regval; }; /* ICH Flash Protected Region */ union ich8_flash_protected_range { struct ich8_pr { u32 base:13; /* 0:12 Protected Range Base */ u32 reserved1:2; /* 13:14 Reserved */ u32 rpe:1; /* 15 Read Protection Enable */ u32 limit:13; /* 16:28 Protected Range Limit */ u32 reserved2:2; /* 29:30 Reserved */ u32 wpe:1; /* 31 Write Protection Enable */ } range; u32 regval; }; static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw); static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw); static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw); static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank); static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset, u8 byte); static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset, u8 *data); static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset, u16 *data); static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, u8 size, u16 *data); static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw); static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw); static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw); static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw); static s32 e1000_led_on_ich8lan(struct e1000_hw *hw); static s32 e1000_led_off_ich8lan(struct e1000_hw *hw); static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw); static s32 e1000_setup_led_pchlan(struct e1000_hw *hw); static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw); static s32 e1000_led_on_pchlan(struct e1000_hw *hw); static s32 e1000_led_off_pchlan(struct e1000_hw *hw); static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active); static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw); static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw); static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link); static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw); static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw); static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw); static s32 e1000_k1_workaround_lv(struct e1000_hw *hw); static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate); static inline u16 __er16flash(struct e1000_hw *hw, unsigned long reg) { return readw(hw->flash_address + reg); } static inline u32 __er32flash(struct e1000_hw *hw, unsigned long reg) { return readl(hw->flash_address + reg); } static inline void __ew16flash(struct e1000_hw *hw, unsigned long reg, u16 val) { writew(val, hw->flash_address + reg); } static inline void __ew32flash(struct e1000_hw *hw, unsigned long reg, u32 val) { writel(val, hw->flash_address + reg); } #define er16flash(reg) __er16flash(hw, (reg)) #define er32flash(reg) __er32flash(hw, (reg)) #define ew16flash(reg, val) __ew16flash(hw, (reg), (val)) #define ew32flash(reg, val) __ew32flash(hw, (reg), (val)) static void e1000_toggle_lanphypc_value_ich8lan(struct e1000_hw *hw) { u32 ctrl; ctrl = er32(CTRL); ctrl |= E1000_CTRL_LANPHYPC_OVERRIDE; ctrl &= ~E1000_CTRL_LANPHYPC_VALUE; ew32(CTRL, ctrl); e1e_flush(); udelay(10); ctrl &= ~E1000_CTRL_LANPHYPC_OVERRIDE; ew32(CTRL, ctrl); } /** * e1000_init_phy_params_pchlan - Initialize PHY function pointers * @hw: pointer to the HW structure * * Initialize family-specific PHY parameters and function pointers. **/ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val = 0; phy->addr = 1; phy->reset_delay_us = 100; phy->ops.set_page = e1000_set_page_igp; phy->ops.read_reg = e1000_read_phy_reg_hv; phy->ops.read_reg_locked = e1000_read_phy_reg_hv_locked; phy->ops.read_reg_page = e1000_read_phy_reg_page_hv; phy->ops.set_d0_lplu_state = e1000_set_lplu_state_pchlan; phy->ops.set_d3_lplu_state = e1000_set_lplu_state_pchlan; phy->ops.write_reg = e1000_write_phy_reg_hv; phy->ops.write_reg_locked = e1000_write_phy_reg_hv_locked; phy->ops.write_reg_page = e1000_write_phy_reg_page_hv; phy->ops.power_up = e1000_power_up_phy_copper; phy->ops.power_down = e1000_power_down_phy_copper_ich8lan; phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; if (!hw->phy.ops.check_reset_block(hw)) { u32 fwsm = er32(FWSM); /* * The MAC-PHY interconnect may still be in SMBus mode after * Sx->S0. If resetting the PHY is not blocked, toggle the * LANPHYPC Value bit to force the interconnect to PCIe mode. */ e1000_toggle_lanphypc_value_ich8lan(hw); msleep(50); /* * Gate automatic PHY configuration by hardware on * non-managed 82579 */ if ((hw->mac.type == e1000_pch2lan) && !(fwsm & E1000_ICH_FWSM_FW_VALID)) e1000_gate_hw_phy_config_ich8lan(hw, true); /* * Reset the PHY before any access to it. Doing so, ensures * that the PHY is in a known good state before we read/write * PHY registers. The generic reset is sufficient here, * because we haven't determined the PHY type yet. */ ret_val = e1000e_phy_hw_reset_generic(hw); if (ret_val) return ret_val; /* Ungate automatic PHY configuration on non-managed 82579 */ if ((hw->mac.type == e1000_pch2lan) && !(fwsm & E1000_ICH_FWSM_FW_VALID)) { usleep_range(10000, 20000); e1000_gate_hw_phy_config_ich8lan(hw, false); } } phy->id = e1000_phy_unknown; switch (hw->mac.type) { default: ret_val = e1000e_get_phy_id(hw); if (ret_val) return ret_val; if ((phy->id != 0) && (phy->id != PHY_REVISION_MASK)) break; /* fall-through */ case e1000_pch2lan: /* * In case the PHY needs to be in mdio slow mode, * set slow mode and try to get the PHY id again. */ ret_val = e1000_set_mdio_slow_mode_hv(hw); if (ret_val) return ret_val; ret_val = e1000e_get_phy_id(hw); if (ret_val) return ret_val; break; } phy->type = e1000e_get_phy_type_from_id(phy->id); switch (phy->type) { case e1000_phy_82577: case e1000_phy_82579: phy->ops.check_polarity = e1000_check_polarity_82577; phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_82577; phy->ops.get_cable_length = e1000_get_cable_length_82577; phy->ops.get_info = e1000_get_phy_info_82577; phy->ops.commit = e1000e_phy_sw_reset; break; case e1000_phy_82578: phy->ops.check_polarity = e1000_check_polarity_m88; phy->ops.force_speed_duplex = e1000e_phy_force_speed_duplex_m88; phy->ops.get_cable_length = e1000e_get_cable_length_m88; phy->ops.get_info = e1000e_get_phy_info_m88; break; default: ret_val = -E1000_ERR_PHY; break; } return ret_val; } /** * e1000_init_phy_params_ich8lan - Initialize PHY function pointers * @hw: pointer to the HW structure * * Initialize family-specific PHY parameters and function pointers. **/ static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; u16 i = 0; phy->addr = 1; phy->reset_delay_us = 100; phy->ops.power_up = e1000_power_up_phy_copper; phy->ops.power_down = e1000_power_down_phy_copper_ich8lan; /* * We may need to do this twice - once for IGP and if that fails, * we'll set BM func pointers and try again */ ret_val = e1000e_determine_phy_address(hw); if (ret_val) { phy->ops.write_reg = e1000e_write_phy_reg_bm; phy->ops.read_reg = e1000e_read_phy_reg_bm; ret_val = e1000e_determine_phy_address(hw); if (ret_val) { e_dbg("Cannot determine PHY addr. Erroring out\n"); return ret_val; } } phy->id = 0; while ((e1000_phy_unknown == e1000e_get_phy_type_from_id(phy->id)) && (i++ < 100)) { usleep_range(1000, 2000); ret_val = e1000e_get_phy_id(hw); if (ret_val) return ret_val; } /* Verify phy id */ switch (phy->id) { case IGP03E1000_E_PHY_ID: phy->type = e1000_phy_igp_3; phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; phy->ops.read_reg_locked = e1000e_read_phy_reg_igp_locked; phy->ops.write_reg_locked = e1000e_write_phy_reg_igp_locked; phy->ops.get_info = e1000e_get_phy_info_igp; phy->ops.check_polarity = e1000_check_polarity_igp; phy->ops.force_speed_duplex = e1000e_phy_force_speed_duplex_igp; break; case IFE_E_PHY_ID: case IFE_PLUS_E_PHY_ID: case IFE_C_E_PHY_ID: phy->type = e1000_phy_ife; phy->autoneg_mask = E1000_ALL_NOT_GIG; phy->ops.get_info = e1000_get_phy_info_ife; phy->ops.check_polarity = e1000_check_polarity_ife; phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_ife; break; case BME1000_E_PHY_ID: phy->type = e1000_phy_bm; phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; phy->ops.read_reg = e1000e_read_phy_reg_bm; phy->ops.write_reg = e1000e_write_phy_reg_bm; phy->ops.commit = e1000e_phy_sw_reset; phy->ops.get_info = e1000e_get_phy_info_m88; phy->ops.check_polarity = e1000_check_polarity_m88; phy->ops.force_speed_duplex = e1000e_phy_force_speed_duplex_m88; break; default: return -E1000_ERR_PHY; break; } return 0; } /** * e1000_init_nvm_params_ich8lan - Initialize NVM function pointers * @hw: pointer to the HW structure * * Initialize family-specific NVM parameters and function * pointers. **/ static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw) { struct e1000_nvm_info *nvm = &hw->nvm; struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; u32 gfpreg, sector_base_addr, sector_end_addr; u16 i; /* Can't read flash registers if the register set isn't mapped. */ if (!hw->flash_address) { e_dbg("ERROR: Flash registers not mapped\n"); return -E1000_ERR_CONFIG; } nvm->type = e1000_nvm_flash_sw; gfpreg = er32flash(ICH_FLASH_GFPREG); /* * sector_X_addr is a "sector"-aligned address (4096 bytes) * Add 1 to sector_end_addr since this sector is included in * the overall size. */ sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK; sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1; /* flash_base_addr is byte-aligned */ nvm->flash_base_addr = sector_base_addr << FLASH_SECTOR_ADDR_SHIFT; /* * find total size of the NVM, then cut in half since the total * size represents two separate NVM banks. */ nvm->flash_bank_size = (sector_end_addr - sector_base_addr) << FLASH_SECTOR_ADDR_SHIFT; nvm->flash_bank_size /= 2; /* Adjust to word count */ nvm->flash_bank_size /= sizeof(u16); nvm->word_size = E1000_ICH8_SHADOW_RAM_WORDS; /* Clear shadow ram */ for (i = 0; i < nvm->word_size; i++) { dev_spec->shadow_ram[i].modified = false; dev_spec->shadow_ram[i].value = 0xFFFF; } return 0; } /** * e1000_init_mac_params_ich8lan - Initialize MAC function pointers * @hw: pointer to the HW structure * * Initialize family-specific MAC parameters and function * pointers. **/ static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw) { struct e1000_mac_info *mac = &hw->mac; /* Set media type function pointer */ hw->phy.media_type = e1000_media_type_copper; /* Set mta register count */ mac->mta_reg_count = 32; /* Set rar entry count */ mac->rar_entry_count = E1000_ICH_RAR_ENTRIES; if (mac->type == e1000_ich8lan) mac->rar_entry_count--; /* FWSM register */ mac->has_fwsm = true; /* ARC subsystem not supported */ mac->arc_subsystem_valid = false; /* Adaptive IFS supported */ mac->adaptive_ifs = true; /* LED operations */ switch (mac->type) { case e1000_ich8lan: case e1000_ich9lan: case e1000_ich10lan: /* check management mode */ mac->ops.check_mng_mode = e1000_check_mng_mode_ich8lan; /* ID LED init */ mac->ops.id_led_init = e1000e_id_led_init_generic; /* blink LED */ mac->ops.blink_led = e1000e_blink_led_generic; /* setup LED */ mac->ops.setup_led = e1000e_setup_led_generic; /* cleanup LED */ mac->ops.cleanup_led = e1000_cleanup_led_ich8lan; /* turn on/off LED */ mac->ops.led_on = e1000_led_on_ich8lan; mac->ops.led_off = e1000_led_off_ich8lan; break; case e1000_pchlan: case e1000_pch2lan: /* check management mode */ mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan; /* ID LED init */ mac->ops.id_led_init = e1000_id_led_init_pchlan; /* setup LED */ mac->ops.setup_led = e1000_setup_led_pchlan; /* cleanup LED */ mac->ops.cleanup_led = e1000_cleanup_led_pchlan; /* turn on/off LED */ mac->ops.led_on = e1000_led_on_pchlan; mac->ops.led_off = e1000_led_off_pchlan; break; default: break; } /* Enable PCS Lock-loss workaround for ICH8 */ if (mac->type == e1000_ich8lan) e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw, true); /* Gate automatic PHY configuration by hardware on managed 82579 */ if ((mac->type == e1000_pch2lan) && (er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) e1000_gate_hw_phy_config_ich8lan(hw, true); return 0; } /** * e1000_set_eee_pchlan - Enable/disable EEE support * @hw: pointer to the HW structure * * Enable/disable EEE based on setting in dev_spec structure. The bits in * the LPI Control register will remain set only if/when link is up. **/ static s32 e1000_set_eee_pchlan(struct e1000_hw *hw) { s32 ret_val = 0; u16 phy_reg; if (hw->phy.type != e1000_phy_82579) return 0; ret_val = e1e_rphy(hw, I82579_LPI_CTRL, &phy_reg); if (ret_val) return ret_val; if (hw->dev_spec.ich8lan.eee_disable) phy_reg &= ~I82579_LPI_CTRL_ENABLE_MASK; else phy_reg |= I82579_LPI_CTRL_ENABLE_MASK; return e1e_wphy(hw, I82579_LPI_CTRL, phy_reg); } /** * e1000_check_for_copper_link_ich8lan - Check for link (Copper) * @hw: pointer to the HW structure * * Checks to see of the link status of the hardware has changed. If a * change in link status has been detected, then we read the PHY registers * to get the current speed/duplex if link exists. **/ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) { struct e1000_mac_info *mac = &hw->mac; s32 ret_val; bool link; u16 phy_reg; /* * We only want to go out to the PHY registers to see if Auto-Neg * has completed and/or if our link status has changed. The * get_link_status flag is set upon receiving a Link Status * Change or Rx Sequence Error interrupt. */ if (!mac->get_link_status) return 0; /* * First we want to see if the MII Status Register reports * link. If so, then we want to get the current speed/duplex * of the PHY. */ ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link); if (ret_val) return ret_val; if (hw->mac.type == e1000_pchlan) { ret_val = e1000_k1_gig_workaround_hv(hw, link); if (ret_val) return ret_val; } if (!link) return 0; /* No link detected */ mac->get_link_status = false; switch (hw->mac.type) { case e1000_pch2lan: ret_val = e1000_k1_workaround_lv(hw); if (ret_val) return ret_val; /* fall-thru */ case e1000_pchlan: if (hw->phy.type == e1000_phy_82578) { ret_val = e1000_link_stall_workaround_hv(hw); if (ret_val) return ret_val; } /* * Workaround for PCHx parts in half-duplex: * Set the number of preambles removed from the packet * when it is passed from the PHY to the MAC to prevent * the MAC from misinterpreting the packet type. */ e1e_rphy(hw, HV_KMRN_FIFO_CTRLSTA, &phy_reg); phy_reg &= ~HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK; if ((er32(STATUS) & E1000_STATUS_FD) != E1000_STATUS_FD) phy_reg |= (1 << HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT); e1e_wphy(hw, HV_KMRN_FIFO_CTRLSTA, phy_reg); break; default: break; } /* * Check if there was DownShift, must be checked * immediately after link-up */ e1000e_check_downshift(hw); /* Enable/Disable EEE after link up */ ret_val = e1000_set_eee_pchlan(hw); if (ret_val) return ret_val; /* * If we are forcing speed/duplex, then we simply return since * we have already determined whether we have link or not. */ if (!mac->autoneg) return -E1000_ERR_CONFIG; /* * Auto-Neg is enabled. Auto Speed Detection takes care * of MAC speed/duplex configuration. So we only need to * configure Collision Distance in the MAC. */ mac->ops.config_collision_dist(hw); /* * Configure Flow Control now that Auto-Neg has completed. * First, we need to restore the desired flow control * settings because we may have had to re-autoneg with a * different link partner. */ ret_val = e1000e_config_fc_after_link_up(hw); if (ret_val) e_dbg("Error configuring flow control\n"); return ret_val; } static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; s32 rc; rc = e1000_init_mac_params_ich8lan(hw); if (rc) return rc; rc = e1000_init_nvm_params_ich8lan(hw); if (rc) return rc; switch (hw->mac.type) { case e1000_ich8lan: case e1000_ich9lan: case e1000_ich10lan: rc = e1000_init_phy_params_ich8lan(hw); break; case e1000_pchlan: case e1000_pch2lan: rc = e1000_init_phy_params_pchlan(hw); break; default: break; } if (rc) return rc; /* * Disable Jumbo Frame support on parts with Intel 10/100 PHY or * on parts with MACsec enabled in NVM (reflected in CTRL_EXT). */ if ((adapter->hw.phy.type == e1000_phy_ife) || ((adapter->hw.mac.type >= e1000_pch2lan) && (!(er32(CTRL_EXT) & E1000_CTRL_EXT_LSECCK)))) { adapter->flags &= ~FLAG_HAS_JUMBO_FRAMES; adapter->max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN; hw->mac.ops.blink_led = NULL; } if ((adapter->hw.mac.type == e1000_ich8lan) && (adapter->hw.phy.type != e1000_phy_ife)) adapter->flags |= FLAG_LSC_GIG_SPEED_DROP; /* Enable workaround for 82579 w/ ME enabled */ if ((adapter->hw.mac.type == e1000_pch2lan) && (er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) adapter->flags2 |= FLAG2_PCIM2PCI_ARBITER_WA; /* Disable EEE by default until IEEE802.3az spec is finalized */ if (adapter->flags2 & FLAG2_HAS_EEE) adapter->hw.dev_spec.ich8lan.eee_disable = true; return 0; } static DEFINE_MUTEX(nvm_mutex); /** * e1000_acquire_nvm_ich8lan - Acquire NVM mutex * @hw: pointer to the HW structure * * Acquires the mutex for performing NVM operations. **/ static s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw) { mutex_lock(&nvm_mutex); return 0; } /** * e1000_release_nvm_ich8lan - Release NVM mutex * @hw: pointer to the HW structure * * Releases the mutex used while performing NVM operations. **/ static void e1000_release_nvm_ich8lan(struct e1000_hw *hw) { mutex_unlock(&nvm_mutex); } /** * e1000_acquire_swflag_ich8lan - Acquire software control flag * @hw: pointer to the HW structure * * Acquires the software control flag for performing PHY and select * MAC CSR accesses. **/ static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw) { u32 extcnf_ctrl, timeout = PHY_CFG_TIMEOUT; s32 ret_val = 0; if (test_and_set_bit(__E1000_ACCESS_SHARED_RESOURCE, &hw->adapter->state)) { e_dbg("contention for Phy access\n"); return -E1000_ERR_PHY; } while (timeout) { extcnf_ctrl = er32(EXTCNF_CTRL); if (!(extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)) break; mdelay(1); timeout--; } if (!timeout) { e_dbg("SW has already locked the resource.\n"); ret_val = -E1000_ERR_CONFIG; goto out; } timeout = SW_FLAG_TIMEOUT; extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG; ew32(EXTCNF_CTRL, extcnf_ctrl); while (timeout) { extcnf_ctrl = er32(EXTCNF_CTRL); if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG) break; mdelay(1); timeout--; } if (!timeout) { e_dbg("Failed to acquire the semaphore, FW or HW has it: FWSM=0x%8.8x EXTCNF_CTRL=0x%8.8x)\n", er32(FWSM), extcnf_ctrl); extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG; ew32(EXTCNF_CTRL, extcnf_ctrl); ret_val = -E1000_ERR_CONFIG; goto out; } out: if (ret_val) clear_bit(__E1000_ACCESS_SHARED_RESOURCE, &hw->adapter->state); return ret_val; } /** * e1000_release_swflag_ich8lan - Release software control flag * @hw: pointer to the HW structure * * Releases the software control flag for performing PHY and select * MAC CSR accesses. **/ static void e1000_release_swflag_ich8lan(struct e1000_hw *hw) { u32 extcnf_ctrl; extcnf_ctrl = er32(EXTCNF_CTRL); if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG) { extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG; ew32(EXTCNF_CTRL, extcnf_ctrl); } else { e_dbg("Semaphore unexpectedly released by sw/fw/hw\n"); } clear_bit(__E1000_ACCESS_SHARED_RESOURCE, &hw->adapter->state); } /** * e1000_check_mng_mode_ich8lan - Checks management mode * @hw: pointer to the HW structure * * This checks if the adapter has any manageability enabled. * This is a function pointer entry point only called by read/write * routines for the PHY and NVM parts. **/ static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw) { u32 fwsm; fwsm = er32(FWSM); return (fwsm & E1000_ICH_FWSM_FW_VALID) && ((fwsm & E1000_FWSM_MODE_MASK) == (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT)); } /** * e1000_check_mng_mode_pchlan - Checks management mode * @hw: pointer to the HW structure * * This checks if the adapter has iAMT enabled. * This is a function pointer entry point only called by read/write * routines for the PHY and NVM parts. **/ static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw) { u32 fwsm; fwsm = er32(FWSM); return (fwsm & E1000_ICH_FWSM_FW_VALID) && (fwsm & (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT)); } /** * e1000_check_reset_block_ich8lan - Check if PHY reset is blocked * @hw: pointer to the HW structure * * Checks if firmware is blocking the reset of the PHY. * This is a function pointer entry point only called by * reset routines. **/ static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw) { u32 fwsm; fwsm = er32(FWSM); return (fwsm & E1000_ICH_FWSM_RSPCIPHY) ? 0 : E1000_BLK_PHY_RESET; } /** * e1000_write_smbus_addr - Write SMBus address to PHY needed during Sx states * @hw: pointer to the HW structure * * Assumes semaphore already acquired. * **/ static s32 e1000_write_smbus_addr(struct e1000_hw *hw) { u16 phy_data; u32 strap = er32(STRAP); s32 ret_val = 0; strap &= E1000_STRAP_SMBUS_ADDRESS_MASK; ret_val = e1000_read_phy_reg_hv_locked(hw, HV_SMB_ADDR, &phy_data); if (ret_val) return ret_val; phy_data &= ~HV_SMB_ADDR_MASK; phy_data |= (strap >> E1000_STRAP_SMBUS_ADDRESS_SHIFT); phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID; return e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR, phy_data); } /** * e1000_sw_lcd_config_ich8lan - SW-based LCD Configuration * @hw: pointer to the HW structure * * SW should configure the LCD from the NVM extended configuration region * as a workaround for certain parts. **/ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask; s32 ret_val = 0; u16 word_addr, reg_data, reg_addr, phy_page = 0; /* * Initialize the PHY from the NVM on ICH platforms. This * is needed due to an issue where the NVM configuration is * not properly autoloaded after power transitions. * Therefore, after each PHY reset, we will load the * configuration data out of the NVM manually. */ switch (hw->mac.type) { case e1000_ich8lan: if (phy->type != e1000_phy_igp_3) return ret_val; if ((hw->adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_AMT) || (hw->adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_C)) { sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG; break; } /* Fall-thru */ case e1000_pchlan: case e1000_pch2lan: sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M; break; default: return ret_val; } ret_val = hw->phy.ops.acquire(hw); if (ret_val) return ret_val; data = er32(FEXTNVM); if (!(data & sw_cfg_mask)) goto release; /* * Make sure HW does not configure LCD from PHY * extended configuration before SW configuration */ data = er32(EXTCNF_CTRL); if (!(hw->mac.type == e1000_pch2lan)) { if (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE) goto release; } cnf_size = er32(EXTCNF_SIZE); cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK; cnf_size >>= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT; if (!cnf_size) goto release; cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK; cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT; if ((!(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE) && (hw->mac.type == e1000_pchlan)) || (hw->mac.type == e1000_pch2lan)) { /* * HW configures the SMBus address and LEDs when the * OEM and LCD Write Enable bits are set in the NVM. * When both NVM bits are cleared, SW will configure * them instead. */ ret_val = e1000_write_smbus_addr(hw); if (ret_val) goto release; data = er32(LEDCTL); ret_val = e1000_write_phy_reg_hv_locked(hw, HV_LED_CONFIG, (u16)data); if (ret_val) goto release; } /* Configure LCD from extended configuration region. */ /* cnf_base_addr is in DWORD */ word_addr = (u16)(cnf_base_addr << 1); for (i = 0; i < cnf_size; i++) { ret_val = e1000_read_nvm(hw, (word_addr + i * 2), 1, &reg_data); if (ret_val) goto release; ret_val = e1000_read_nvm(hw, (word_addr + i * 2 + 1), 1, &reg_addr); if (ret_val) goto release; /* Save off the PHY page for future writes. */ if (reg_addr == IGP01E1000_PHY_PAGE_SELECT) { phy_page = reg_data; continue; } reg_addr &= PHY_REG_MASK; reg_addr |= phy_page; ret_val = phy->ops.write_reg_locked(hw, (u32)reg_addr, reg_data); if (ret_val) goto release; } release: hw->phy.ops.release(hw); return ret_val; } /** * e1000_k1_gig_workaround_hv - K1 Si workaround * @hw: pointer to the HW structure * @link: link up bool flag * * If K1 is enabled for 1Gbps, the MAC might stall when transitioning * from a lower speed. This workaround disables K1 whenever link is at 1Gig * If link is down, the function will restore the default K1 setting located * in the NVM. **/ static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link) { s32 ret_val = 0; u16 status_reg = 0; bool k1_enable = hw->dev_spec.ich8lan.nvm_k1_enabled; if (hw->mac.type != e1000_pchlan) return 0; /* Wrap the whole flow with the sw flag */ ret_val = hw->phy.ops.acquire(hw); if (ret_val) return ret_val; /* Disable K1 when link is 1Gbps, otherwise use the NVM setting */ if (link) { if (hw->phy.type == e1000_phy_82578) { ret_val = hw->phy.ops.read_reg_locked(hw, BM_CS_STATUS, &status_reg); if (ret_val) goto release; status_reg &= BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED | BM_CS_STATUS_SPEED_MASK; if (status_reg == (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED | BM_CS_STATUS_SPEED_1000)) k1_enable = false; } if (hw->phy.type == e1000_phy_82577) { ret_val = hw->phy.ops.read_reg_locked(hw, HV_M_STATUS, &status_reg); if (ret_val) goto release; status_reg &= HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE | HV_M_STATUS_SPEED_MASK; if (status_reg == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE | HV_M_STATUS_SPEED_1000)) k1_enable = false; } /* Link stall fix for link up */ ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19), 0x0100); if (ret_val) goto release; } else { /* Link stall fix for link down */ ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19), 0x4100); if (ret_val) goto release; } ret_val = e1000_configure_k1_ich8lan(hw, k1_enable); release: hw->phy.ops.release(hw); return ret_val; } /** * e1000_configure_k1_ich8lan - Configure K1 power state * @hw: pointer to the HW structure * @enable: K1 state to configure * * Configure the K1 power state based on the provided parameter. * Assumes semaphore already acquired. * * Success returns 0, Failure returns -E1000_ERR_PHY (-2) **/ s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable) { s32 ret_val = 0; u32 ctrl_reg = 0; u32 ctrl_ext = 0; u32 reg = 0; u16 kmrn_reg = 0; ret_val = e1000e_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG, &kmrn_reg); if (ret_val) return ret_val; if (k1_enable) kmrn_reg |= E1000_KMRNCTRLSTA_K1_ENABLE; else kmrn_reg &= ~E1000_KMRNCTRLSTA_K1_ENABLE; ret_val = e1000e_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG, kmrn_reg); if (ret_val) return ret_val; udelay(20); ctrl_ext = er32(CTRL_EXT); ctrl_reg = er32(CTRL); reg = ctrl_reg & ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100); reg |= E1000_CTRL_FRCSPD; ew32(CTRL, reg); ew32(CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_SPD_BYPS); e1e_flush(); udelay(20); ew32(CTRL, ctrl_reg); ew32(CTRL_EXT, ctrl_ext); e1e_flush(); udelay(20); return 0; } /** * e1000_oem_bits_config_ich8lan - SW-based LCD Configuration * @hw: pointer to the HW structure * @d0_state: boolean if entering d0 or d3 device state * * SW will configure Gbe Disable and LPLU based on the NVM. The four bits are * collectively called OEM bits. The OEM Write Enable bit and SW Config bit * in NVM determines whether HW should configure LPLU and Gbe Disable. **/ static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state) { s32 ret_val = 0; u32 mac_reg; u16 oem_reg; if ((hw->mac.type != e1000_pch2lan) && (hw->mac.type != e1000_pchlan)) return ret_val; ret_val = hw->phy.ops.acquire(hw); if (ret_val) return ret_val; if (!(hw->mac.type == e1000_pch2lan)) { mac_reg = er32(EXTCNF_CTRL); if (mac_reg & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE) goto release; } mac_reg = er32(FEXTNVM); if (!(mac_reg & E1000_FEXTNVM_SW_CONFIG_ICH8M)) goto release; mac_reg = er32(PHY_CTRL); ret_val = hw->phy.ops.read_reg_locked(hw, HV_OEM_BITS, &oem_reg); if (ret_val) goto release; oem_reg &= ~(HV_OEM_BITS_GBE_DIS | HV_OEM_BITS_LPLU); if (d0_state) { if (mac_reg & E1000_PHY_CTRL_GBE_DISABLE) oem_reg |= HV_OEM_BITS_GBE_DIS; if (mac_reg & E1000_PHY_CTRL_D0A_LPLU) oem_reg |= HV_OEM_BITS_LPLU; } else { if (mac_reg & (E1000_PHY_CTRL_GBE_DISABLE | E1000_PHY_CTRL_NOND0A_GBE_DISABLE)) oem_reg |= HV_OEM_BITS_GBE_DIS; if (mac_reg & (E1000_PHY_CTRL_D0A_LPLU | E1000_PHY_CTRL_NOND0A_LPLU)) oem_reg |= HV_OEM_BITS_LPLU; } /* Set Restart auto-neg to activate the bits */ if ((d0_state || (hw->mac.type != e1000_pchlan)) && !hw->phy.ops.check_reset_block(hw)) oem_reg |= HV_OEM_BITS_RESTART_AN; ret_val = hw->phy.ops.write_reg_locked(hw, HV_OEM_BITS, oem_reg); release: hw->phy.ops.release(hw); return ret_val; } /** * e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode * @hw: pointer to the HW structure **/ static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw) { s32 ret_val; u16 data; ret_val = e1e_rphy(hw, HV_KMRN_MODE_CTRL, &data); if (ret_val) return ret_val; data |= HV_KMRN_MDIO_SLOW; ret_val = e1e_wphy(hw, HV_KMRN_MODE_CTRL, data); return ret_val; } /** * e1000_hv_phy_workarounds_ich8lan - A series of Phy workarounds to be * done after every PHY reset. **/ static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw) { s32 ret_val = 0; u16 phy_data; if (hw->mac.type != e1000_pchlan) return 0; /* Set MDIO slow mode before any other MDIO access */ if (hw->phy.type == e1000_phy_82577) { ret_val = e1000_set_mdio_slow_mode_hv(hw); if (ret_val) return ret_val; } if (((hw->phy.type == e1000_phy_82577) && ((hw->phy.revision == 1) || (hw->phy.revision == 2))) || ((hw->phy.type == e1000_phy_82578) && (hw->phy.revision == 1))) { /* Disable generation of early preamble */ ret_val = e1e_wphy(hw, PHY_REG(769, 25), 0x4431); if (ret_val) return ret_val; /* Preamble tuning for SSC */ ret_val = e1e_wphy(hw, HV_KMRN_FIFO_CTRLSTA, 0xA204); if (ret_val) return ret_val; } if (hw->phy.type == e1000_phy_82578) { /* * Return registers to default by doing a soft reset then * writing 0x3140 to the control register. */ if (hw->phy.revision < 2) { e1000e_phy_sw_reset(hw); ret_val = e1e_wphy(hw, PHY_CONTROL, 0x3140); } } /* Select page 0 */ ret_val = hw->phy.ops.acquire(hw); if (ret_val) return ret_val; hw->phy.addr = 1; ret_val = e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0); hw->phy.ops.release(hw); if (ret_val) return ret_val; /* * Configure the K1 Si workaround during phy reset assuming there is * link so that it disables K1 if link is in 1Gbps. */ ret_val = e1000_k1_gig_workaround_hv(hw, true); if (ret_val) return ret_val; /* Workaround for link disconnects on a busy hub in half duplex */ ret_val = hw->phy.ops.acquire(hw); if (ret_val) return ret_val; ret_val = hw->phy.ops.read_reg_locked(hw, BM_PORT_GEN_CFG, &phy_data); if (ret_val) goto release; ret_val = hw->phy.ops.write_reg_locked(hw, BM_PORT_GEN_CFG, phy_data & 0x00FF); release: hw->phy.ops.release(hw); return ret_val; } /** * e1000_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY * @hw: pointer to the HW structure **/ void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw) { u32 mac_reg; u16 i, phy_reg = 0; s32 ret_val; ret_val = hw->phy.ops.acquire(hw); if (ret_val) return; ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg); if (ret_val) goto release; /* Copy both RAL/H (rar_entry_count) and SHRAL/H (+4) to PHY */ for (i = 0; i < (hw->mac.rar_entry_count + 4); i++) { mac_reg = er32(RAL(i)); hw->phy.ops.write_reg_page(hw, BM_RAR_L(i), (u16)(mac_reg & 0xFFFF)); hw->phy.ops.write_reg_page(hw, BM_RAR_M(i), (u16)((mac_reg >> 16) & 0xFFFF)); mac_reg = er32(RAH(i)); hw->phy.ops.write_reg_page(hw, BM_RAR_H(i), (u16)(mac_reg & 0xFFFF)); hw->phy.ops.write_reg_page(hw, BM_RAR_CTRL(i), (u16)((mac_reg & E1000_RAH_AV) >> 16)); } e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg); release: hw->phy.ops.release(hw); } /** * e1000_lv_jumbo_workaround_ich8lan - required for jumbo frame operation * with 82579 PHY * @hw: pointer to the HW structure * @enable: flag to enable/disable workaround when enabling/disabling jumbos **/ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable) { s32 ret_val = 0; u16 phy_reg, data; u32 mac_reg; u16 i; if (hw->mac.type != e1000_pch2lan) return 0; /* disable Rx path while enabling/disabling workaround */ e1e_rphy(hw, PHY_REG(769, 20), &phy_reg); ret_val = e1e_wphy(hw, PHY_REG(769, 20), phy_reg | (1 << 14)); if (ret_val) return ret_val; if (enable) { /* * Write Rx addresses (rar_entry_count for RAL/H, +4 for * SHRAL/H) and initial CRC values to the MAC */ for (i = 0; i < (hw->mac.rar_entry_count + 4); i++) { u8 mac_addr[ETH_ALEN] = {0}; u32 addr_high, addr_low; addr_high = er32(RAH(i)); if (!(addr_high & E1000_RAH_AV)) continue; addr_low = er32(RAL(i)); mac_addr[0] = (addr_low & 0xFF); mac_addr[1] = ((addr_low >> 8) & 0xFF); mac_addr[2] = ((addr_low >> 16) & 0xFF); mac_addr[3] = ((addr_low >> 24) & 0xFF); mac_addr[4] = (addr_high & 0xFF); mac_addr[5] = ((addr_high >> 8) & 0xFF); ew32(PCH_RAICC(i), ~ether_crc_le(ETH_ALEN, mac_addr)); } /* Write Rx addresses to the PHY */ e1000_copy_rx_addrs_to_phy_ich8lan(hw); /* Enable jumbo frame workaround in the MAC */ mac_reg = er32(FFLT_DBG); mac_reg &= ~(1 << 14); mac_reg |= (7 << 15); ew32(FFLT_DBG, mac_reg); mac_reg = er32(RCTL); mac_reg |= E1000_RCTL_SECRC; ew32(RCTL, mac_reg); ret_val = e1000e_read_kmrn_reg(hw, E1000_KMRNCTRLSTA_CTRL_OFFSET, &data); if (ret_val) return ret_val; ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_CTRL_OFFSET, data | (1 << 0)); if (ret_val) return ret_val; ret_val = e1000e_read_kmrn_reg(hw, E1000_KMRNCTRLSTA_HD_CTRL, &data); if (ret_val) return ret_val; data &= ~(0xF << 8); data |= (0xB << 8); ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_HD_CTRL, data); if (ret_val) return ret_val; /* Enable jumbo frame workaround in the PHY */ e1e_rphy(hw, PHY_REG(769, 23), &data); data &= ~(0x7F << 5); data |= (0x37 << 5); ret_val = e1e_wphy(hw, PHY_REG(769, 23), data); if (ret_val) return ret_val; e1e_rphy(hw, PHY_REG(769, 16), &data); data &= ~(1 << 13); ret_val = e1e_wphy(hw, PHY_REG(769, 16), data); if (ret_val) return ret_val; e1e_rphy(hw, PHY_REG(776, 20), &data); data &= ~(0x3FF << 2); data |= (0x1A << 2); ret_val = e1e_wphy(hw, PHY_REG(776, 20), data); if (ret_val) return ret_val; ret_val = e1e_wphy(hw, PHY_REG(776, 23), 0xF100); if (ret_val) return ret_val; e1e_rphy(hw, HV_PM_CTRL, &data); ret_val = e1e_wphy(hw, HV_PM_CTRL, data | (1 << 10)); if (ret_val) return ret_val; } else { /* Write MAC register values back to h/w defaults */ mac_reg = er32(FFLT_DBG); mac_reg &= ~(0xF << 14); ew32(FFLT_DBG, mac_reg); mac_reg = er32(RCTL); mac_reg &= ~E1000_RCTL_SECRC; ew32(RCTL, mac_reg); ret_val = e1000e_read_kmrn_reg(hw, E1000_KMRNCTRLSTA_CTRL_OFFSET, &data); if (ret_val) return ret_val; ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_CTRL_OFFSET, data & ~(1 << 0)); if (ret_val) return ret_val; ret_val = e1000e_read_kmrn_reg(hw, E1000_KMRNCTRLSTA_HD_CTRL, &data); if (ret_val) return ret_val; data &= ~(0xF << 8); data |= (0xB << 8); ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_HD_CTRL, data); if (ret_val) return ret_val; /* Write PHY register values back to h/w defaults */ e1e_rphy(hw, PHY_REG(769, 23), &data); data &= ~(0x7F << 5); ret_val = e1e_wphy(hw, PHY_REG(769, 23), data); if (ret_val) return ret_val; e1e_rphy(hw, PHY_REG(769, 16), &data); data |= (1 << 13); ret_val = e1e_wphy(hw, PHY_REG(769, 16), data); if (ret_val) return ret_val; e1e_rphy(hw, PHY_REG(776, 20), &data); data &= ~(0x3FF << 2); data |= (0x8 << 2); ret_val = e1e_wphy(hw, PHY_REG(776, 20), data); if (ret_val) return ret_val; ret_val = e1e_wphy(hw, PHY_REG(776, 23), 0x7E00); if (ret_val) return ret_val; e1e_rphy(hw, HV_PM_CTRL, &data); ret_val = e1e_wphy(hw, HV_PM_CTRL, data & ~(1 << 10)); if (ret_val) return ret_val; } /* re-enable Rx path after enabling/disabling workaround */ return e1e_wphy(hw, PHY_REG(769, 20), phy_reg & ~(1 << 14)); } /** * e1000_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be * done after every PHY reset. **/ static s32 e1000_lv_phy_workarounds_ich8lan(struct e1000_hw *hw) { s32 ret_val = 0; if (hw->mac.type != e1000_pch2lan) return 0; /* Set MDIO slow mode before any other MDIO access */ ret_val = e1000_set_mdio_slow_mode_hv(hw); ret_val = hw->phy.ops.acquire(hw); if (ret_val) return ret_val; ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_ADDR, I82579_MSE_THRESHOLD); if (ret_val) goto release; /* set MSE higher to enable link to stay up when noise is high */ ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_DATA, 0x0034); if (ret_val) goto release; ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_ADDR, I82579_MSE_LINK_DOWN); if (ret_val) goto release; /* drop link after 5 times MSE threshold was reached */ ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_DATA, 0x0005); release: hw->phy.ops.release(hw); return ret_val; } /** * e1000_k1_gig_workaround_lv - K1 Si workaround * @hw: pointer to the HW structure * * Workaround to set the K1 beacon duration for 82579 parts **/ static s32 e1000_k1_workaround_lv(struct e1000_hw *hw) { s32 ret_val = 0; u16 status_reg = 0; u32 mac_reg; u16 phy_reg; if (hw->mac.type != e1000_pch2lan) return 0; /* Set K1 beacon duration based on 1Gbps speed or otherwise */ ret_val = e1e_rphy(hw, HV_M_STATUS, &status_reg); if (ret_val) return ret_val; if ((status_reg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) { mac_reg = er32(FEXTNVM4); mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK; ret_val = e1e_rphy(hw, I82579_LPI_CTRL, &phy_reg); if (ret_val) return ret_val; if (status_reg & HV_M_STATUS_SPEED_1000) { mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC; phy_reg &= ~I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT; } else { mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC; phy_reg |= I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT; } ew32(FEXTNVM4, mac_reg); ret_val = e1e_wphy(hw, I82579_LPI_CTRL, phy_reg); } return ret_val; } /** * e1000_gate_hw_phy_config_ich8lan - disable PHY config via hardware * @hw: pointer to the HW structure * @gate: boolean set to true to gate, false to ungate * * Gate/ungate the automatic PHY configuration via hardware; perform * the configuration via software instead. **/ static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate) { u32 extcnf_ctrl; if (hw->mac.type != e1000_pch2lan) return; extcnf_ctrl = er32(EXTCNF_CTRL); if (gate) extcnf_ctrl |= E1000_EXTCNF_CTRL_GATE_PHY_CFG; else extcnf_ctrl &= ~E1000_EXTCNF_CTRL_GATE_PHY_CFG; ew32(EXTCNF_CTRL, extcnf_ctrl); } /** * e1000_lan_init_done_ich8lan - Check for PHY config completion * @hw: pointer to the HW structure * * Check the appropriate indication the MAC has finished configuring the * PHY after a software reset. **/ static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw) { u32 data, loop = E1000_ICH8_LAN_INIT_TIMEOUT; /* Wait for basic configuration completes before proceeding */ do { data = er32(STATUS); data &= E1000_STATUS_LAN_INIT_DONE; udelay(100); } while ((!data) && --loop); /* * If basic configuration is incomplete before the above loop * count reaches 0, loading the configuration from NVM will * leave the PHY in a bad state possibly resulting in no link. */ if (loop == 0) e_dbg("LAN_INIT_DONE not set, increase timeout\n"); /* Clear the Init Done bit for the next init event */ data = er32(STATUS); data &= ~E1000_STATUS_LAN_INIT_DONE; ew32(STATUS, data); } /** * e1000_post_phy_reset_ich8lan - Perform steps required after a PHY reset * @hw: pointer to the HW structure **/ static s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw) { s32 ret_val = 0; u16 reg; if (hw->phy.ops.check_reset_block(hw)) return 0; /* Allow time for h/w to get to quiescent state after reset */ usleep_range(10000, 20000); /* Perform any necessary post-reset workarounds */ switch (hw->mac.type) { case e1000_pchlan: ret_val = e1000_hv_phy_workarounds_ich8lan(hw); if (ret_val) return ret_val; break; case e1000_pch2lan: ret_val = e1000_lv_phy_workarounds_ich8lan(hw); if (ret_val) return ret_val; break; default: break; } /* Clear the host wakeup bit after lcd reset */ if (hw->mac.type >= e1000_pchlan) { e1e_rphy(hw, BM_PORT_GEN_CFG, &reg); reg &= ~BM_WUC_HOST_WU_BIT; e1e_wphy(hw, BM_PORT_GEN_CFG, reg); } /* Configure the LCD with the extended configuration region in NVM */ ret_val = e1000_sw_lcd_config_ich8lan(hw); if (ret_val) return ret_val; /* Configure the LCD with the OEM bits in NVM */ ret_val = e1000_oem_bits_config_ich8lan(hw, true); if (hw->mac.type == e1000_pch2lan) { /* Ungate automatic PHY configuration on non-managed 82579 */ if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) { usleep_range(10000, 20000); e1000_gate_hw_phy_config_ich8lan(hw, false); } /* Set EEE LPI Update Timer to 200usec */ ret_val = hw->phy.ops.acquire(hw); if (ret_val) return ret_val; ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_ADDR, I82579_LPI_UPDATE_TIMER); if (!ret_val) ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_DATA, 0x1387); hw->phy.ops.release(hw); } return ret_val; } /** * e1000_phy_hw_reset_ich8lan - Performs a PHY reset * @hw: pointer to the HW structure * * Resets the PHY * This is a function pointer entry point called by drivers * or other shared routines. **/ static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw) { s32 ret_val = 0; /* Gate automatic PHY configuration by hardware on non-managed 82579 */ if ((hw->mac.type == e1000_pch2lan) && !(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) e1000_gate_hw_phy_config_ich8lan(hw, true); ret_val = e1000e_phy_hw_reset_generic(hw); if (ret_val) return ret_val; return e1000_post_phy_reset_ich8lan(hw); } /** * e1000_set_lplu_state_pchlan - Set Low Power Link Up state * @hw: pointer to the HW structure * @active: true to enable LPLU, false to disable * * Sets the LPLU state according to the active flag. For PCH, if OEM write * bit are disabled in the NVM, writing the LPLU bits in the MAC will not set * the phy speed. This function will manually set the LPLU bit and restart * auto-neg as hw would do. D3 and D0 LPLU will call the same function * since it configures the same bit. **/ static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active) { s32 ret_val = 0; u16 oem_reg; ret_val = e1e_rphy(hw, HV_OEM_BITS, &oem_reg); if (ret_val) return ret_val; if (active) oem_reg |= HV_OEM_BITS_LPLU; else oem_reg &= ~HV_OEM_BITS_LPLU; if (!hw->phy.ops.check_reset_block(hw)) oem_reg |= HV_OEM_BITS_RESTART_AN; return e1e_wphy(hw, HV_OEM_BITS, oem_reg); } /** * e1000_set_d0_lplu_state_ich8lan - Set Low Power Linkup D0 state * @hw: pointer to the HW structure * @active: true to enable LPLU, false to disable * * Sets the LPLU D0 state according to the active flag. When * activating LPLU this function also disables smart speed * and vice versa. LPLU will not be activated unless the * device autonegotiation advertisement meets standards of * either 10 or 10/100 or 10/100/1000 at all duplexes. * This is a function pointer entry point only called by * PHY setup routines. **/ static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active) { struct e1000_phy_info *phy = &hw->phy; u32 phy_ctrl; s32 ret_val = 0; u16 data; if (phy->type == e1000_phy_ife) return 0; phy_ctrl = er32(PHY_CTRL); if (active) { phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU; ew32(PHY_CTRL, phy_ctrl); if (phy->type != e1000_phy_igp_3) return 0; /* * Call gig speed drop workaround on LPLU before accessing * any PHY registers */ if (hw->mac.type == e1000_ich8lan) e1000e_gig_downshift_workaround_ich8lan(hw); /* When LPLU is enabled, we should disable SmartSpeed */ ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data); data &= ~IGP01E1000_PSCFR_SMART_SPEED; ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data); if (ret_val) return ret_val; } else { phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU; ew32(PHY_CTRL, phy_ctrl); if (phy->type != e1000_phy_igp_3) return 0; /* * LPLU and SmartSpeed are mutually exclusive. LPLU is used * during Dx states where the power conservation is most * important. During driver activity we should enable * SmartSpeed, so performance is maintained. */ if (phy->smart_speed == e1000_smart_speed_on) { ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data); if (ret_val) return ret_val; data |= IGP01E1000_PSCFR_SMART_SPEED; ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data); if (ret_val) return ret_val; } else if (phy->smart_speed == e1000_smart_speed_off) { ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data); if (ret_val) return ret_val; data &= ~IGP01E1000_PSCFR_SMART_SPEED; ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data); if (ret_val) return ret_val; } } return 0; } /** * e1000_set_d3_lplu_state_ich8lan - Set Low Power Linkup D3 state * @hw: pointer to the HW structure * @active: true to enable LPLU, false to disable * * Sets the LPLU D3 state according to the active flag. When * activating LPLU this function also disables smart speed * and vice versa. LPLU will not be activated unless the * device autonegotiation advertisement meets standards of * either 10 or 10/100 or 10/100/1000 at all duplexes. * This is a function pointer entry point only called by * PHY setup routines. **/ static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active) { struct e1000_phy_info *phy = &hw->phy; u32 phy_ctrl; s32 ret_val = 0; u16 data; phy_ctrl = er32(PHY_CTRL); if (!active) { phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU; ew32(PHY_CTRL, phy_ctrl); if (phy->type != e1000_phy_igp_3) return 0; /* * LPLU and SmartSpeed are mutually exclusive. LPLU is used * during Dx states where the power conservation is most * important. During driver activity we should enable * SmartSpeed, so performance is maintained. */ if (phy->smart_speed == e1000_smart_speed_on) { ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data); if (ret_val) return ret_val; data |= IGP01E1000_PSCFR_SMART_SPEED; ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data); if (ret_val) return ret_val; } else if (phy->smart_speed == e1000_smart_speed_off) { ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data); if (ret_val) return ret_val; data &= ~IGP01E1000_PSCFR_SMART_SPEED; ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data); if (ret_val) return ret_val; } } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) || (phy->autoneg_advertised == E1000_ALL_NOT_GIG) || (phy->autoneg_advertised == E1000_ALL_10_SPEED)) { phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU; ew32(PHY_CTRL, phy_ctrl); if (phy->type != e1000_phy_igp_3) return 0; /* * Call gig speed drop workaround on LPLU before accessing * any PHY registers */ if (hw->mac.type == e1000_ich8lan) e1000e_gig_downshift_workaround_ich8lan(hw); /* When LPLU is enabled, we should disable SmartSpeed */ ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data); if (ret_val) return ret_val; data &= ~IGP01E1000_PSCFR_SMART_SPEED; ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data); } return ret_val; } /** * e1000_valid_nvm_bank_detect_ich8lan - finds out the valid bank 0 or 1 * @hw: pointer to the HW structure * @bank: pointer to the variable that returns the active bank * * Reads signature byte from the NVM using the flash access registers. * Word 0x13 bits 15:14 = 10b indicate a valid signature for that bank. **/ static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank) { u32 eecd; struct e1000_nvm_info *nvm = &hw->nvm; u32 bank1_offset = nvm->flash_bank_size * sizeof(u16); u32 act_offset = E1000_ICH_NVM_SIG_WORD * 2 + 1; u8 sig_byte = 0; s32 ret_val; switch (hw->mac.type) { case e1000_ich8lan: case e1000_ich9lan: eecd = er32(EECD); if ((eecd & E1000_EECD_SEC1VAL_VALID_MASK) == E1000_EECD_SEC1VAL_VALID_MASK) { if (eecd & E1000_EECD_SEC1VAL) *bank = 1; else *bank = 0; return 0; } e_dbg("Unable to determine valid NVM bank via EEC - reading flash signature\n"); /* fall-thru */ default: /* set bank to 0 in case flash read fails */ *bank = 0; /* Check bank 0 */ ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset, &sig_byte); if (ret_val) return ret_val; if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) == E1000_ICH_NVM_SIG_VALUE) { *bank = 0; return 0; } /* Check bank 1 */ ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset + bank1_offset, &sig_byte); if (ret_val) return ret_val; if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) == E1000_ICH_NVM_SIG_VALUE) { *bank = 1; return 0; } e_dbg("ERROR: No valid NVM bank present\n"); return -E1000_ERR_NVM; } } /** * e1000_read_nvm_ich8lan - Read word(s) from the NVM * @hw: pointer to the HW structure * @offset: The offset (in bytes) of the word(s) to read. * @words: Size of data to read in words * @data: Pointer to the word(s) to read at offset. * * Reads a word(s) from the NVM using the flash access registers. **/ static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) { struct e1000_nvm_info *nvm = &hw->nvm; struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; u32 act_offset; s32 ret_val = 0; u32 bank = 0; u16 i, word; if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) || (words == 0)) { e_dbg("nvm parameter(s) out of bounds\n"); ret_val = -E1000_ERR_NVM; goto out; } nvm->ops.acquire(hw); ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank); if (ret_val) { e_dbg("Could not detect valid bank, assuming bank 0\n"); bank = 0; } act_offset = (bank) ? nvm->flash_bank_size : 0; act_offset += offset; ret_val = 0; for (i = 0; i < words; i++) { if (dev_spec->shadow_ram[offset+i].modified) { data[i] = dev_spec->shadow_ram[offset+i].value; } else { ret_val = e1000_read_flash_word_ich8lan(hw, act_offset + i, &word); if (ret_val) break; data[i] = word; } } nvm->ops.release(hw); out: if (ret_val) e_dbg("NVM read error: %d\n", ret_val); return ret_val; } /** * e1000_flash_cycle_init_ich8lan - Initialize flash * @hw: pointer to the HW structure * * This function does initial flash setup so that a new read/write/erase cycle * can be started. **/ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw) { union ich8_hws_flash_status hsfsts; s32 ret_val = -E1000_ERR_NVM; hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); /* Check if the flash descriptor is valid */ if (hsfsts.hsf_status.fldesvalid == 0) { e_dbg("Flash descriptor invalid. SW Sequencing must be used.\n"); return -E1000_ERR_NVM; } /* Clear FCERR and DAEL in hw status by writing 1 */ hsfsts.hsf_status.flcerr = 1; hsfsts.hsf_status.dael = 1; ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval); /* * Either we should have a hardware SPI cycle in progress * bit to check against, in order to start a new cycle or * FDONE bit should be changed in the hardware so that it * is 1 after hardware reset, which can then be used as an * indication whether a cycle is in progress or has been * completed. */ if (hsfsts.hsf_status.flcinprog == 0) { /* * There is no cycle running at present, * so we can start a cycle. * Begin by setting Flash Cycle Done. */ hsfsts.hsf_status.flcdone = 1; ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval); ret_val = 0; } else { s32 i; /* * Otherwise poll for sometime so the current * cycle has a chance to end before giving up. */ for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) { hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); if (hsfsts.hsf_status.flcinprog == 0) { ret_val = 0; break; } udelay(1); } if (!ret_val) { /* * Successful in waiting for previous cycle to timeout, * now set the Flash Cycle Done. */ hsfsts.hsf_status.flcdone = 1; ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval); } else { e_dbg("Flash controller busy, cannot get access\n"); } } return ret_val; } /** * e1000_flash_cycle_ich8lan - Starts flash cycle (read/write/erase) * @hw: pointer to the HW structure * @timeout: maximum time to wait for completion * * This function starts a flash cycle and waits for its completion. **/ static s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout) { union ich8_hws_flash_ctrl hsflctl; union ich8_hws_flash_status hsfsts; u32 i = 0; /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */ hsflctl.regval = er16flash(ICH_FLASH_HSFCTL); hsflctl.hsf_ctrl.flcgo = 1; ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval); /* wait till FDONE bit is set to 1 */ do { hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); if (hsfsts.hsf_status.flcdone == 1) break; udelay(1); } while (i++ < timeout); if (hsfsts.hsf_status.flcdone == 1 && hsfsts.hsf_status.flcerr == 0) return 0; return -E1000_ERR_NVM; } /** * e1000_read_flash_word_ich8lan - Read word from flash * @hw: pointer to the HW structure * @offset: offset to data location * @data: pointer to the location for storing the data * * Reads the flash word at offset into data. Offset is converted * to bytes before read. **/ static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset, u16 *data) { /* Must convert offset into bytes. */ offset <<= 1; return e1000_read_flash_data_ich8lan(hw, offset, 2, data); } /** * e1000_read_flash_byte_ich8lan - Read byte from flash * @hw: pointer to the HW structure * @offset: The offset of the byte to read. * @data: Pointer to a byte to store the value read. * * Reads a single byte from the NVM using the flash access registers. **/ static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset, u8 *data) { s32 ret_val; u16 word = 0; ret_val = e1000_read_flash_data_ich8lan(hw, offset, 1, &word); if (ret_val) return ret_val; *data = (u8)word; return 0; } /** * e1000_read_flash_data_ich8lan - Read byte or word from NVM * @hw: pointer to the HW structure * @offset: The offset (in bytes) of the byte or word to read. * @size: Size of data to read, 1=byte 2=word * @data: Pointer to the word to store the value read. * * Reads a byte or word from the NVM using the flash access registers. **/ static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, u8 size, u16 *data) { union ich8_hws_flash_status hsfsts; union ich8_hws_flash_ctrl hsflctl; u32 flash_linear_addr; u32 flash_data = 0; s32 ret_val = -E1000_ERR_NVM; u8 count = 0; if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK) return -E1000_ERR_NVM; flash_linear_addr = (ICH_FLASH_LINEAR_ADDR_MASK & offset) + hw->nvm.flash_base_addr; do { udelay(1); /* Steps */ ret_val = e1000_flash_cycle_init_ich8lan(hw); if (ret_val) break; hsflctl.regval = er16flash(ICH_FLASH_HSFCTL); /* 0b/1b corresponds to 1 or 2 byte size, respectively. */ hsflctl.hsf_ctrl.fldbcount = size - 1; hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ; ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval); ew32flash(ICH_FLASH_FADDR, flash_linear_addr); ret_val = e1000_flash_cycle_ich8lan(hw, ICH_FLASH_READ_COMMAND_TIMEOUT); /* * Check if FCERR is set to 1, if set to 1, clear it * and try the whole sequence a few more times, else * read in (shift in) the Flash Data0, the order is * least significant byte first msb to lsb */ if (!ret_val) { flash_data = er32flash(ICH_FLASH_FDATA0); if (size == 1) *data = (u8)(flash_data & 0x000000FF); else if (size == 2) *data = (u16)(flash_data & 0x0000FFFF); break; } else { /* * If we've gotten here, then things are probably * completely hosed, but if the error condition is * detected, it won't hurt to give it another try... * ICH_FLASH_CYCLE_REPEAT_COUNT times. */ hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); if (hsfsts.hsf_status.flcerr == 1) { /* Repeat for some time before giving up. */ continue; } else if (hsfsts.hsf_status.flcdone == 0) { e_dbg("Timeout error - flash cycle did not complete.\n"); break; } } } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT); return ret_val; } /** * e1000_write_nvm_ich8lan - Write word(s) to the NVM * @hw: pointer to the HW structure * @offset: The offset (in bytes) of the word(s) to write. * @words: Size of data to write in words * @data: Pointer to the word(s) to write at offset. * * Writes a byte or word to the NVM using the flash access registers. **/ static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) { struct e1000_nvm_info *nvm = &hw->nvm; struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; u16 i; if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) || (words == 0)) { e_dbg("nvm parameter(s) out of bounds\n"); return -E1000_ERR_NVM; } nvm->ops.acquire(hw); for (i = 0; i < words; i++) { dev_spec->shadow_ram[offset+i].modified = true; dev_spec->shadow_ram[offset+i].value = data[i]; } nvm->ops.release(hw); return 0; } /** * e1000_update_nvm_checksum_ich8lan - Update the checksum for NVM * @hw: pointer to the HW structure * * The NVM checksum is updated by calling the generic update_nvm_checksum, * which writes the checksum to the shadow ram. The changes in the shadow * ram are then committed to the EEPROM by processing each bank at a time * checking for the modified bit and writing only the pending changes. * After a successful commit, the shadow ram is cleared and is ready for * future writes. **/ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw) { struct e1000_nvm_info *nvm = &hw->nvm; struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; u32 i, act_offset, new_bank_offset, old_bank_offset, bank; s32 ret_val; u16 data; ret_val = e1000e_update_nvm_checksum_generic(hw); if (ret_val) goto out; if (nvm->type != e1000_nvm_flash_sw) goto out; nvm->ops.acquire(hw); /* * We're writing to the opposite bank so if we're on bank 1, * write to bank 0 etc. We also need to erase the segment that * is going to be written */ ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank); if (ret_val) { e_dbg("Could not detect valid bank, assuming bank 0\n"); bank = 0; } if (bank == 0) { new_bank_offset = nvm->flash_bank_size; old_bank_offset = 0; ret_val = e1000_erase_flash_bank_ich8lan(hw, 1); if (ret_val) goto release; } else { old_bank_offset = nvm->flash_bank_size; new_bank_offset = 0; ret_val = e1000_erase_flash_bank_ich8lan(hw, 0); if (ret_val) goto release; } for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) { /* * Determine whether to write the value stored * in the other NVM bank or a modified value stored * in the shadow RAM */ if (dev_spec->shadow_ram[i].modified) { data = dev_spec->shadow_ram[i].value; } else { ret_val = e1000_read_flash_word_ich8lan(hw, i + old_bank_offset, &data); if (ret_val) break; } /* * If the word is 0x13, then make sure the signature bits * (15:14) are 11b until the commit has completed. * This will allow us to write 10b which indicates the * signature is valid. We want to do this after the write * has completed so that we don't mark the segment valid * while the write is still in progress */ if (i == E1000_ICH_NVM_SIG_WORD) data |= E1000_ICH_NVM_SIG_MASK; /* Convert offset to bytes. */ act_offset = (i + new_bank_offset) << 1; udelay(100); /* Write the bytes to the new bank. */ ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, (u8)data); if (ret_val) break; udelay(100); ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset + 1, (u8)(data >> 8)); if (ret_val) break; } /* * Don't bother writing the segment valid bits if sector * programming failed. */ if (ret_val) { /* Possibly read-only, see e1000e_write_protect_nvm_ich8lan() */ e_dbg("Flash commit failed.\n"); goto release; } /* * Finally validate the new segment by setting bit 15:14 * to 10b in word 0x13 , this can be done without an * erase as well since these bits are 11 to start with * and we need to change bit 14 to 0b */ act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD; ret_val = e1000_read_flash_word_ich8lan(hw, act_offset, &data); if (ret_val) goto release; data &= 0xBFFF; ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset * 2 + 1, (u8)(data >> 8)); if (ret_val) goto release; /* * And invalidate the previously valid segment by setting * its signature word (0x13) high_byte to 0b. This can be * done without an erase because flash erase sets all bits * to 1's. We can write 1's to 0's without an erase */ act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1; ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0); if (ret_val) goto release; /* Great! Everything worked, we can now clear the cached entries. */ for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) { dev_spec->shadow_ram[i].modified = false; dev_spec->shadow_ram[i].value = 0xFFFF; } release: nvm->ops.release(hw); /* * Reload the EEPROM, or else modifications will not appear * until after the next adapter reset. */ if (!ret_val) { nvm->ops.reload(hw); usleep_range(10000, 20000); } out: if (ret_val) e_dbg("NVM update error: %d\n", ret_val); return ret_val; } /** * e1000_validate_nvm_checksum_ich8lan - Validate EEPROM checksum * @hw: pointer to the HW structure * * Check to see if checksum needs to be fixed by reading bit 6 in word 0x19. * If the bit is 0, that the EEPROM had been modified, but the checksum was not * calculated, in which case we need to calculate the checksum and set bit 6. **/ static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw) { s32 ret_val; u16 data; /* * Read 0x19 and check bit 6. If this bit is 0, the checksum * needs to be fixed. This bit is an indication that the NVM * was prepared by OEM software and did not calculate the * checksum...a likely scenario. */ ret_val = e1000_read_nvm(hw, 0x19, 1, &data); if (ret_val) return ret_val; if ((data & 0x40) == 0) { data |= 0x40; ret_val = e1000_write_nvm(hw, 0x19, 1, &data); if (ret_val) return ret_val; ret_val = e1000e_update_nvm_checksum(hw); if (ret_val) return ret_val; } return e1000e_validate_nvm_checksum_generic(hw); } /** * e1000e_write_protect_nvm_ich8lan - Make the NVM read-only * @hw: pointer to the HW structure * * To prevent malicious write/erase of the NVM, set it to be read-only * so that the hardware ignores all write/erase cycles of the NVM via * the flash control registers. The shadow-ram copy of the NVM will * still be updated, however any updates to this copy will not stick * across driver reloads. **/ void e1000e_write_protect_nvm_ich8lan(struct e1000_hw *hw) { struct e1000_nvm_info *nvm = &hw->nvm; union ich8_flash_protected_range pr0; union ich8_hws_flash_status hsfsts; u32 gfpreg; nvm->ops.acquire(hw); gfpreg = er32flash(ICH_FLASH_GFPREG); /* Write-protect GbE Sector of NVM */ pr0.regval = er32flash(ICH_FLASH_PR0); pr0.range.base = gfpreg & FLASH_GFPREG_BASE_MASK; pr0.range.limit = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK); pr0.range.wpe = true; ew32flash(ICH_FLASH_PR0, pr0.regval); /* * Lock down a subset of GbE Flash Control Registers, e.g. * PR0 to prevent the write-protection from being lifted. * Once FLOCKDN is set, the registers protected by it cannot * be written until FLOCKDN is cleared by a hardware reset. */ hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); hsfsts.hsf_status.flockdn = true; ew32flash(ICH_FLASH_HSFSTS, hsfsts.regval); nvm->ops.release(hw); } /** * e1000_write_flash_data_ich8lan - Writes bytes to the NVM * @hw: pointer to the HW structure * @offset: The offset (in bytes) of the byte/word to read. * @size: Size of data to read, 1=byte 2=word * @data: The byte(s) to write to the NVM. * * Writes one/two bytes to the NVM using the flash access registers. **/ static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, u8 size, u16 data) { union ich8_hws_flash_status hsfsts; union ich8_hws_flash_ctrl hsflctl; u32 flash_linear_addr; u32 flash_data = 0; s32 ret_val; u8 count = 0; if (size < 1 || size > 2 || data > size * 0xff || offset > ICH_FLASH_LINEAR_ADDR_MASK) return -E1000_ERR_NVM; flash_linear_addr = (ICH_FLASH_LINEAR_ADDR_MASK & offset) + hw->nvm.flash_base_addr; do { udelay(1); /* Steps */ ret_val = e1000_flash_cycle_init_ich8lan(hw); if (ret_val) break; hsflctl.regval = er16flash(ICH_FLASH_HSFCTL); /* 0b/1b corresponds to 1 or 2 byte size, respectively. */ hsflctl.hsf_ctrl.fldbcount = size -1; hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE; ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval); ew32flash(ICH_FLASH_FADDR, flash_linear_addr); if (size == 1) flash_data = (u32)data & 0x00FF; else flash_data = (u32)data; ew32flash(ICH_FLASH_FDATA0, flash_data); /* * check if FCERR is set to 1 , if set to 1, clear it * and try the whole sequence a few more times else done */ ret_val = e1000_flash_cycle_ich8lan(hw, ICH_FLASH_WRITE_COMMAND_TIMEOUT); if (!ret_val) break; /* * If we're here, then things are most likely * completely hosed, but if the error condition * is detected, it won't hurt to give it another * try...ICH_FLASH_CYCLE_REPEAT_COUNT times. */ hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); if (hsfsts.hsf_status.flcerr == 1) /* Repeat for some time before giving up. */ continue; if (hsfsts.hsf_status.flcdone == 0) { e_dbg("Timeout error - flash cycle did not complete.\n"); break; } } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT); return ret_val; } /** * e1000_write_flash_byte_ich8lan - Write a single byte to NVM * @hw: pointer to the HW structure * @offset: The index of the byte to read. * @data: The byte to write to the NVM. * * Writes a single byte to the NVM using the flash access registers. **/ static s32 e1000_write_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset, u8 data) { u16 word = (u16)data; return e1000_write_flash_data_ich8lan(hw, offset, 1, word); } /** * e1000_retry_write_flash_byte_ich8lan - Writes a single byte to NVM * @hw: pointer to the HW structure * @offset: The offset of the byte to write. * @byte: The byte to write to the NVM. * * Writes a single byte to the NVM using the flash access registers. * Goes through a retry algorithm before giving up. **/ static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset, u8 byte) { s32 ret_val; u16 program_retries; ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte); if (!ret_val) return ret_val; for (program_retries = 0; program_retries < 100; program_retries++) { e_dbg("Retrying Byte %2.2X at offset %u\n", byte, offset); udelay(100); ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte); if (!ret_val) break; } if (program_retries == 100) return -E1000_ERR_NVM; return 0; } /** * e1000_erase_flash_bank_ich8lan - Erase a bank (4k) from NVM * @hw: pointer to the HW structure * @bank: 0 for first bank, 1 for second bank, etc. * * Erases the bank specified. Each bank is a 4k block. Banks are 0 based. * bank N is 4096 * N + flash_reg_addr. **/ static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank) { struct e1000_nvm_info *nvm = &hw->nvm; union ich8_hws_flash_status hsfsts; union ich8_hws_flash_ctrl hsflctl; u32 flash_linear_addr; /* bank size is in 16bit words - adjust to bytes */ u32 flash_bank_size = nvm->flash_bank_size * 2; s32 ret_val; s32 count = 0; s32 j, iteration, sector_size; hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); /* * Determine HW Sector size: Read BERASE bits of hw flash status * register * 00: The Hw sector is 256 bytes, hence we need to erase 16 * consecutive sectors. The start index for the nth Hw sector * can be calculated as = bank * 4096 + n * 256 * 01: The Hw sector is 4K bytes, hence we need to erase 1 sector. * The start index for the nth Hw sector can be calculated * as = bank * 4096 * 10: The Hw sector is 8K bytes, nth sector = bank * 8192 * (ich9 only, otherwise error condition) * 11: The Hw sector is 64K bytes, nth sector = bank * 65536 */ switch (hsfsts.hsf_status.berasesz) { case 0: /* Hw sector size 256 */ sector_size = ICH_FLASH_SEG_SIZE_256; iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_256; break; case 1: sector_size = ICH_FLASH_SEG_SIZE_4K; iteration = 1; break; case 2: sector_size = ICH_FLASH_SEG_SIZE_8K; iteration = 1; break; case 3: sector_size = ICH_FLASH_SEG_SIZE_64K; iteration = 1; break; default: return -E1000_ERR_NVM; } /* Start with the base address, then add the sector offset. */ flash_linear_addr = hw->nvm.flash_base_addr; flash_linear_addr += (bank) ? flash_bank_size : 0; for (j = 0; j < iteration ; j++) { do { /* Steps */ ret_val = e1000_flash_cycle_init_ich8lan(hw); if (ret_val) return ret_val; /* * Write a value 11 (block Erase) in Flash * Cycle field in hw flash control */ hsflctl.regval = er16flash(ICH_FLASH_HSFCTL); hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE; ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval); /* * Write the last 24 bits of an index within the * block into Flash Linear address field in Flash * Address. */ flash_linear_addr += (j * sector_size); ew32flash(ICH_FLASH_FADDR, flash_linear_addr); ret_val = e1000_flash_cycle_ich8lan(hw, ICH_FLASH_ERASE_COMMAND_TIMEOUT); if (!ret_val) break; /* * Check if FCERR is set to 1. If 1, * clear it and try the whole sequence * a few more times else Done */ hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); if (hsfsts.hsf_status.flcerr == 1) /* repeat for some time before giving up */ continue; else if (hsfsts.hsf_status.flcdone == 0) return ret_val; } while (++count < ICH_FLASH_CYCLE_REPEAT_COUNT); } return 0; } /** * e1000_valid_led_default_ich8lan - Set the default LED settings * @hw: pointer to the HW structure * @data: Pointer to the LED settings * * Reads the LED default settings from the NVM to data. If the NVM LED * settings is all 0's or F's, set the LED default to a valid LED default * setting. **/ static s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw, u16 *data) { s32 ret_val; ret_val = e1000_read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data); if (ret_val) { e_dbg("NVM Read Error\n"); return ret_val; } if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) *data = ID_LED_DEFAULT_ICH8LAN; return 0; } /** * e1000_id_led_init_pchlan - store LED configurations * @hw: pointer to the HW structure * * PCH does not control LEDs via the LEDCTL register, rather it uses * the PHY LED configuration register. * * PCH also does not have an "always on" or "always off" mode which * complicates the ID feature. Instead of using the "on" mode to indicate * in ledctl_mode2 the LEDs to use for ID (see e1000e_id_led_init_generic()), * use "link_up" mode. The LEDs will still ID on request if there is no * link based on logic in e1000_led_[on|off]_pchlan(). **/ static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw) { struct e1000_mac_info *mac = &hw->mac; s32 ret_val; const u32 ledctl_on = E1000_LEDCTL_MODE_LINK_UP; const u32 ledctl_off = E1000_LEDCTL_MODE_LINK_UP | E1000_PHY_LED0_IVRT; u16 data, i, temp, shift; /* Get default ID LED modes */ ret_val = hw->nvm.ops.valid_led_default(hw, &data); if (ret_val) return ret_val; mac->ledctl_default = er32(LEDCTL); mac->ledctl_mode1 = mac->ledctl_default; mac->ledctl_mode2 = mac->ledctl_default; for (i = 0; i < 4; i++) { temp = (data >> (i << 2)) & E1000_LEDCTL_LED0_MODE_MASK; shift = (i * 5); switch (temp) { case ID_LED_ON1_DEF2: case ID_LED_ON1_ON2: case ID_LED_ON1_OFF2: mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift); mac->ledctl_mode1 |= (ledctl_on << shift); break; case ID_LED_OFF1_DEF2: case ID_LED_OFF1_ON2: case ID_LED_OFF1_OFF2: mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift); mac->ledctl_mode1 |= (ledctl_off << shift); break; default: /* Do nothing */ break; } switch (temp) { case ID_LED_DEF1_ON2: case ID_LED_ON1_ON2: case ID_LED_OFF1_ON2: mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift); mac->ledctl_mode2 |= (ledctl_on << shift); break; case ID_LED_DEF1_OFF2: case ID_LED_ON1_OFF2: case ID_LED_OFF1_OFF2: mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift); mac->ledctl_mode2 |= (ledctl_off << shift); break; default: /* Do nothing */ break; } } return 0; } /** * e1000_get_bus_info_ich8lan - Get/Set the bus type and width * @hw: pointer to the HW structure * * ICH8 use the PCI Express bus, but does not contain a PCI Express Capability * register, so the the bus width is hard coded. **/ static s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw) { struct e1000_bus_info *bus = &hw->bus; s32 ret_val; ret_val = e1000e_get_bus_info_pcie(hw); /* * ICH devices are "PCI Express"-ish. They have * a configuration space, but do not contain * PCI Express Capability registers, so bus width * must be hardcoded. */ if (bus->width == e1000_bus_width_unknown) bus->width = e1000_bus_width_pcie_x1; return ret_val; } /** * e1000_reset_hw_ich8lan - Reset the hardware * @hw: pointer to the HW structure * * Does a full reset of the hardware which includes a reset of the PHY and * MAC. **/ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw) { struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; u16 reg; u32 ctrl, kab; s32 ret_val; /* * Prevent the PCI-E bus from sticking if there is no TLP connection * on the last TLP read/write transaction when MAC is reset. */ ret_val = e1000e_disable_pcie_master(hw); if (ret_val) e_dbg("PCI-E Master disable polling has failed.\n"); e_dbg("Masking off all interrupts\n"); ew32(IMC, 0xffffffff); /* * Disable the Transmit and Receive units. Then delay to allow * any pending transactions to complete before we hit the MAC * with the global reset. */ ew32(RCTL, 0); ew32(TCTL, E1000_TCTL_PSP); e1e_flush(); usleep_range(10000, 20000); /* Workaround for ICH8 bit corruption issue in FIFO memory */ if (hw->mac.type == e1000_ich8lan) { /* Set Tx and Rx buffer allocation to 8k apiece. */ ew32(PBA, E1000_PBA_8K); /* Set Packet Buffer Size to 16k. */ ew32(PBS, E1000_PBS_16K); } if (hw->mac.type == e1000_pchlan) { /* Save the NVM K1 bit setting*/ ret_val = e1000_read_nvm(hw, E1000_NVM_K1_CONFIG, 1, &reg); if (ret_val) return ret_val; if (reg & E1000_NVM_K1_ENABLE) dev_spec->nvm_k1_enabled = true; else dev_spec->nvm_k1_enabled = false; } ctrl = er32(CTRL); if (!hw->phy.ops.check_reset_block(hw)) { /* * Full-chip reset requires MAC and PHY reset at the same * time to make sure the interface between MAC and the * external PHY is reset. */ ctrl |= E1000_CTRL_PHY_RST; /* * Gate automatic PHY configuration by hardware on * non-managed 82579 */ if ((hw->mac.type == e1000_pch2lan) && !(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) e1000_gate_hw_phy_config_ich8lan(hw, true); } ret_val = e1000_acquire_swflag_ich8lan(hw); e_dbg("Issuing a global reset to ich8lan\n"); ew32(CTRL, (ctrl | E1000_CTRL_RST)); /* cannot issue a flush here because it hangs the hardware */ msleep(20); if (!ret_val) clear_bit(__E1000_ACCESS_SHARED_RESOURCE, &hw->adapter->state); if (ctrl & E1000_CTRL_PHY_RST) { ret_val = hw->phy.ops.get_cfg_done(hw); if (ret_val) return ret_val; ret_val = e1000_post_phy_reset_ich8lan(hw); if (ret_val) return ret_val; } /* * For PCH, this write will make sure that any noise * will be detected as a CRC error and be dropped rather than show up * as a bad packet to the DMA engine. */ if (hw->mac.type == e1000_pchlan) ew32(CRC_OFFSET, 0x65656565); ew32(IMC, 0xffffffff); er32(ICR); kab = er32(KABGTXD); kab |= E1000_KABGTXD_BGSQLBIAS; ew32(KABGTXD, kab); return 0; } /** * e1000_init_hw_ich8lan - Initialize the hardware * @hw: pointer to the HW structure * * Prepares the hardware for transmit and receive by doing the following: * - initialize hardware bits * - initialize LED identification * - setup receive address registers * - setup flow control * - setup transmit descriptors * - clear statistics **/ static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw) { struct e1000_mac_info *mac = &hw->mac; u32 ctrl_ext, txdctl, snoop; s32 ret_val; u16 i; e1000_initialize_hw_bits_ich8lan(hw); /* Initialize identification LED */ ret_val = mac->ops.id_led_init(hw); if (ret_val) e_dbg("Error initializing identification LED\n"); /* This is not fatal and we should not stop init due to this */ /* Setup the receive address. */ e1000e_init_rx_addrs(hw, mac->rar_entry_count); /* Zero out the Multicast HASH table */ e_dbg("Zeroing the MTA\n"); for (i = 0; i < mac->mta_reg_count; i++) E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0); /* * The 82578 Rx buffer will stall if wakeup is enabled in host and * the ME. Disable wakeup by clearing the host wakeup bit. * Reset the phy after disabling host wakeup to reset the Rx buffer. */ if (hw->phy.type == e1000_phy_82578) { e1e_rphy(hw, BM_PORT_GEN_CFG, &i); i &= ~BM_WUC_HOST_WU_BIT; e1e_wphy(hw, BM_PORT_GEN_CFG, i); ret_val = e1000_phy_hw_reset_ich8lan(hw); if (ret_val) return ret_val; } /* Setup link and flow control */ ret_val = mac->ops.setup_link(hw); /* Set the transmit descriptor write-back policy for both queues */ txdctl = er32(TXDCTL(0)); txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) | E1000_TXDCTL_FULL_TX_DESC_WB; txdctl = (txdctl & ~E1000_TXDCTL_PTHRESH) | E1000_TXDCTL_MAX_TX_DESC_PREFETCH; ew32(TXDCTL(0), txdctl); txdctl = er32(TXDCTL(1)); txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) | E1000_TXDCTL_FULL_TX_DESC_WB; txdctl = (txdctl & ~E1000_TXDCTL_PTHRESH) | E1000_TXDCTL_MAX_TX_DESC_PREFETCH; ew32(TXDCTL(1), txdctl); /* * ICH8 has opposite polarity of no_snoop bits. * By default, we should use snoop behavior. */ if (mac->type == e1000_ich8lan) snoop = PCIE_ICH8_SNOOP_ALL; else snoop = (u32) ~(PCIE_NO_SNOOP_ALL); e1000e_set_pcie_no_snoop(hw, snoop); ctrl_ext = er32(CTRL_EXT); ctrl_ext |= E1000_CTRL_EXT_RO_DIS; ew32(CTRL_EXT, ctrl_ext); /* * Clear all of the statistics registers (clear on read). It is * important that we do this after we have tried to establish link * because the symbol error count will increment wildly if there * is no link. */ e1000_clear_hw_cntrs_ich8lan(hw); return ret_val; } /** * e1000_initialize_hw_bits_ich8lan - Initialize required hardware bits * @hw: pointer to the HW structure * * Sets/Clears required hardware bits necessary for correctly setting up the * hardware for transmit and receive. **/ static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw) { u32 reg; /* Extended Device Control */ reg = er32(CTRL_EXT); reg |= (1 << 22); /* Enable PHY low-power state when MAC is at D3 w/o WoL */ if (hw->mac.type >= e1000_pchlan) reg |= E1000_CTRL_EXT_PHYPDEN; ew32(CTRL_EXT, reg); /* Transmit Descriptor Control 0 */ reg = er32(TXDCTL(0)); reg |= (1 << 22); ew32(TXDCTL(0), reg); /* Transmit Descriptor Control 1 */ reg = er32(TXDCTL(1)); reg |= (1 << 22); ew32(TXDCTL(1), reg); /* Transmit Arbitration Control 0 */ reg = er32(TARC(0)); if (hw->mac.type == e1000_ich8lan) reg |= (1 << 28) | (1 << 29); reg |= (1 << 23) | (1 << 24) | (1 << 26) | (1 << 27); ew32(TARC(0), reg); /* Transmit Arbitration Control 1 */ reg = er32(TARC(1)); if (er32(TCTL) & E1000_TCTL_MULR) reg &= ~(1 << 28); else reg |= (1 << 28); reg |= (1 << 24) | (1 << 26) | (1 << 30); ew32(TARC(1), reg); /* Device Status */ if (hw->mac.type == e1000_ich8lan) { reg = er32(STATUS); reg &= ~(1 << 31); ew32(STATUS, reg); } /* * work-around descriptor data corruption issue during nfs v2 udp * traffic, just disable the nfs filtering capability */ reg = er32(RFCTL); reg |= (E1000_RFCTL_NFSW_DIS | E1000_RFCTL_NFSR_DIS); ew32(RFCTL, reg); } /** * e1000_setup_link_ich8lan - Setup flow control and link settings * @hw: pointer to the HW structure * * Determines which flow control settings to use, then configures flow * control. Calls the appropriate media-specific link configuration * function. Assuming the adapter has a valid link partner, a valid link * should be established. Assumes the hardware has previously been reset * and the transmitter and receiver are not enabled. **/ static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw) { s32 ret_val; if (hw->phy.ops.check_reset_block(hw)) return 0; /* * ICH parts do not have a word in the NVM to determine * the default flow control setting, so we explicitly * set it to full. */ if (hw->fc.requested_mode == e1000_fc_default) { /* Workaround h/w hang when Tx flow control enabled */ if (hw->mac.type == e1000_pchlan) hw->fc.requested_mode = e1000_fc_rx_pause; else hw->fc.requested_mode = e1000_fc_full; } /* * Save off the requested flow control mode for use later. Depending * on the link partner's capabilities, we may or may not use this mode. */ hw->fc.current_mode = hw->fc.requested_mode; e_dbg("After fix-ups FlowControl is now = %x\n", hw->fc.current_mode); /* Continue to configure the copper link. */ ret_val = hw->mac.ops.setup_physical_interface(hw); if (ret_val) return ret_val; ew32(FCTTV, hw->fc.pause_time); if ((hw->phy.type == e1000_phy_82578) || (hw->phy.type == e1000_phy_82579) || (hw->phy.type == e1000_phy_82577)) { ew32(FCRTV_PCH, hw->fc.refresh_time); ret_val = e1e_wphy(hw, PHY_REG(BM_PORT_CTRL_PAGE, 27), hw->fc.pause_time); if (ret_val) return ret_val; } return e1000e_set_fc_watermarks(hw); } /** * e1000_setup_copper_link_ich8lan - Configure MAC/PHY interface * @hw: pointer to the HW structure * * Configures the kumeran interface to the PHY to wait the appropriate time * when polling the PHY, then call the generic setup_copper_link to finish * configuring the copper link. **/ static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw) { u32 ctrl; s32 ret_val; u16 reg_data; ctrl = er32(CTRL); ctrl |= E1000_CTRL_SLU; ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); ew32(CTRL, ctrl); /* * Set the mac to wait the maximum time between each iteration * and increase the max iterations when polling the phy; * this fixes erroneous timeouts at 10Mbps. */ ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_TIMEOUTS, 0xFFFF); if (ret_val) return ret_val; ret_val = e1000e_read_kmrn_reg(hw, E1000_KMRNCTRLSTA_INBAND_PARAM, &reg_data); if (ret_val) return ret_val; reg_data |= 0x3F; ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_INBAND_PARAM, reg_data); if (ret_val) return ret_val; switch (hw->phy.type) { case e1000_phy_igp_3: ret_val = e1000e_copper_link_setup_igp(hw); if (ret_val) return ret_val; break; case e1000_phy_bm: case e1000_phy_82578: ret_val = e1000e_copper_link_setup_m88(hw); if (ret_val) return ret_val; break; case e1000_phy_82577: case e1000_phy_82579: ret_val = e1000_copper_link_setup_82577(hw); if (ret_val) return ret_val; break; case e1000_phy_ife: ret_val = e1e_rphy(hw, IFE_PHY_MDIX_CONTROL, &reg_data); if (ret_val) return ret_val; reg_data &= ~IFE_PMC_AUTO_MDIX; switch (hw->phy.mdix) { case 1: reg_data &= ~IFE_PMC_FORCE_MDIX; break; case 2: reg_data |= IFE_PMC_FORCE_MDIX; break; case 0: default: reg_data |= IFE_PMC_AUTO_MDIX; break; } ret_val = e1e_wphy(hw, IFE_PHY_MDIX_CONTROL, reg_data); if (ret_val) return ret_val; break; default: break; } return e1000e_setup_copper_link(hw); } /** * e1000_get_link_up_info_ich8lan - Get current link speed and duplex * @hw: pointer to the HW structure * @speed: pointer to store current link speed * @duplex: pointer to store the current link duplex * * Calls the generic get_speed_and_duplex to retrieve the current link * information and then calls the Kumeran lock loss workaround for links at * gigabit speeds. **/ static s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw, u16 *speed, u16 *duplex) { s32 ret_val; ret_val = e1000e_get_speed_and_duplex_copper(hw, speed, duplex); if (ret_val) return ret_val; if ((hw->mac.type == e1000_ich8lan) && (hw->phy.type == e1000_phy_igp_3) && (*speed == SPEED_1000)) { ret_val = e1000_kmrn_lock_loss_workaround_ich8lan(hw); } return ret_val; } /** * e1000_kmrn_lock_loss_workaround_ich8lan - Kumeran workaround * @hw: pointer to the HW structure * * Work-around for 82566 Kumeran PCS lock loss: * On link status change (i.e. PCI reset, speed change) and link is up and * speed is gigabit- * 0) if workaround is optionally disabled do nothing * 1) wait 1ms for Kumeran link to come up * 2) check Kumeran Diagnostic register PCS lock loss bit * 3) if not set the link is locked (all is good), otherwise... * 4) reset the PHY * 5) repeat up to 10 times * Note: this is only called for IGP3 copper when speed is 1gb. **/ static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw) { struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; u32 phy_ctrl; s32 ret_val; u16 i, data; bool link; if (!dev_spec->kmrn_lock_loss_workaround_enabled) return 0; /* * Make sure link is up before proceeding. If not just return. * Attempting this while link is negotiating fouled up link * stability */ ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link); if (!link) return 0; for (i = 0; i < 10; i++) { /* read once to clear */ ret_val = e1e_rphy(hw, IGP3_KMRN_DIAG, &data); if (ret_val) return ret_val; /* and again to get new status */ ret_val = e1e_rphy(hw, IGP3_KMRN_DIAG, &data); if (ret_val) return ret_val; /* check for PCS lock */ if (!(data & IGP3_KMRN_DIAG_PCS_LOCK_LOSS)) return 0; /* Issue PHY reset */ e1000_phy_hw_reset(hw); mdelay(5); } /* Disable GigE link negotiation */ phy_ctrl = er32(PHY_CTRL); phy_ctrl |= (E1000_PHY_CTRL_GBE_DISABLE | E1000_PHY_CTRL_NOND0A_GBE_DISABLE); ew32(PHY_CTRL, phy_ctrl); /* * Call gig speed drop workaround on Gig disable before accessing * any PHY registers */ e1000e_gig_downshift_workaround_ich8lan(hw); /* unable to acquire PCS lock */ return -E1000_ERR_PHY; } /** * e1000e_set_kmrn_lock_loss_workaround_ich8lan - Set Kumeran workaround state * @hw: pointer to the HW structure * @state: boolean value used to set the current Kumeran workaround state * * If ICH8, set the current Kumeran workaround state (enabled - true * /disabled - false). **/ void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw, bool state) { struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; if (hw->mac.type != e1000_ich8lan) { e_dbg("Workaround applies to ICH8 only.\n"); return; } dev_spec->kmrn_lock_loss_workaround_enabled = state; } /** * e1000_ipg3_phy_powerdown_workaround_ich8lan - Power down workaround on D3 * @hw: pointer to the HW structure * * Workaround for 82566 power-down on D3 entry: * 1) disable gigabit link * 2) write VR power-down enable * 3) read it back * Continue if successful, else issue LCD reset and repeat **/ void e1000e_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw) { u32 reg; u16 data; u8 retry = 0; if (hw->phy.type != e1000_phy_igp_3) return; /* Try the workaround twice (if needed) */ do { /* Disable link */ reg = er32(PHY_CTRL); reg |= (E1000_PHY_CTRL_GBE_DISABLE | E1000_PHY_CTRL_NOND0A_GBE_DISABLE); ew32(PHY_CTRL, reg); /* * Call gig speed drop workaround on Gig disable before * accessing any PHY registers */ if (hw->mac.type == e1000_ich8lan) e1000e_gig_downshift_workaround_ich8lan(hw); /* Write VR power-down enable */ e1e_rphy(hw, IGP3_VR_CTRL, &data); data &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK; e1e_wphy(hw, IGP3_VR_CTRL, data | IGP3_VR_CTRL_MODE_SHUTDOWN); /* Read it back and test */ e1e_rphy(hw, IGP3_VR_CTRL, &data); data &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK; if ((data == IGP3_VR_CTRL_MODE_SHUTDOWN) || retry) break; /* Issue PHY reset and repeat at most one more time */ reg = er32(CTRL); ew32(CTRL, reg | E1000_CTRL_PHY_RST); retry++; } while (retry); } /** * e1000e_gig_downshift_workaround_ich8lan - WoL from S5 stops working * @hw: pointer to the HW structure * * Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC), * LPLU, Gig disable, MDIC PHY reset): * 1) Set Kumeran Near-end loopback * 2) Clear Kumeran Near-end loopback * Should only be called for ICH8[m] devices with any 1G Phy. **/ void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw) { s32 ret_val; u16 reg_data; if ((hw->mac.type != e1000_ich8lan) || (hw->phy.type == e1000_phy_ife)) return; ret_val = e1000e_read_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET, &reg_data); if (ret_val) return; reg_data |= E1000_KMRNCTRLSTA_DIAG_NELPBK; ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET, reg_data); if (ret_val) return; reg_data &= ~E1000_KMRNCTRLSTA_DIAG_NELPBK; ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET, reg_data); } /** * e1000_suspend_workarounds_ich8lan - workarounds needed during S0->Sx * @hw: pointer to the HW structure * * During S0 to Sx transition, it is possible the link remains at gig * instead of negotiating to a lower speed. Before going to Sx, set * 'Gig Disable' to force link speed negotiation to a lower speed based on * the LPLU setting in the NVM or custom setting. For PCH and newer parts, * the OEM bits PHY register (LED, GbE disable and LPLU configurations) also * needs to be written. **/ void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw) { u32 phy_ctrl; s32 ret_val; phy_ctrl = er32(PHY_CTRL); phy_ctrl |= E1000_PHY_CTRL_GBE_DISABLE; ew32(PHY_CTRL, phy_ctrl); if (hw->mac.type == e1000_ich8lan) e1000e_gig_downshift_workaround_ich8lan(hw); if (hw->mac.type >= e1000_pchlan) { e1000_oem_bits_config_ich8lan(hw, false); /* Reset PHY to activate OEM bits on 82577/8 */ if (hw->mac.type == e1000_pchlan) e1000e_phy_hw_reset_generic(hw); ret_val = hw->phy.ops.acquire(hw); if (ret_val) return; e1000_write_smbus_addr(hw); hw->phy.ops.release(hw); } } /** * e1000_resume_workarounds_pchlan - workarounds needed during Sx->S0 * @hw: pointer to the HW structure * * During Sx to S0 transitions on non-managed devices or managed devices * on which PHY resets are not blocked, if the PHY registers cannot be * accessed properly by the s/w toggle the LANPHYPC value to power cycle * the PHY. **/ void e1000_resume_workarounds_pchlan(struct e1000_hw *hw) { u16 phy_id1, phy_id2; s32 ret_val; if ((hw->mac.type != e1000_pch2lan) || hw->phy.ops.check_reset_block(hw)) return; ret_val = hw->phy.ops.acquire(hw); if (ret_val) { e_dbg("Failed to acquire PHY semaphore in resume\n"); return; } /* Test access to the PHY registers by reading the ID regs */ ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID1, &phy_id1); if (ret_val) goto release; ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID2, &phy_id2); if (ret_val) goto release; if (hw->phy.id == ((u32)(phy_id1 << 16) | (u32)(phy_id2 & PHY_REVISION_MASK))) goto release; e1000_toggle_lanphypc_value_ich8lan(hw); hw->phy.ops.release(hw); msleep(50); e1000_phy_hw_reset(hw); msleep(50); return; release: hw->phy.ops.release(hw); } /** * e1000_cleanup_led_ich8lan - Restore the default LED operation * @hw: pointer to the HW structure * * Return the LED back to the default configuration. **/ static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw) { if (hw->phy.type == e1000_phy_ife) return e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED, 0); ew32(LEDCTL, hw->mac.ledctl_default); return 0; } /** * e1000_led_on_ich8lan - Turn LEDs on * @hw: pointer to the HW structure * * Turn on the LEDs. **/ static s32 e1000_led_on_ich8lan(struct e1000_hw *hw) { if (hw->phy.type == e1000_phy_ife) return e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED, (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_ON)); ew32(LEDCTL, hw->mac.ledctl_mode2); return 0; } /** * e1000_led_off_ich8lan - Turn LEDs off * @hw: pointer to the HW structure * * Turn off the LEDs. **/ static s32 e1000_led_off_ich8lan(struct e1000_hw *hw) { if (hw->phy.type == e1000_phy_ife) return e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED, (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_OFF)); ew32(LEDCTL, hw->mac.ledctl_mode1); return 0; } /** * e1000_setup_led_pchlan - Configures SW controllable LED * @hw: pointer to the HW structure * * This prepares the SW controllable LED for use. **/ static s32 e1000_setup_led_pchlan(struct e1000_hw *hw) { return e1e_wphy(hw, HV_LED_CONFIG, (u16)hw->mac.ledctl_mode1); } /** * e1000_cleanup_led_pchlan - Restore the default LED operation * @hw: pointer to the HW structure * * Return the LED back to the default configuration. **/ static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw) { return e1e_wphy(hw, HV_LED_CONFIG, (u16)hw->mac.ledctl_default); } /** * e1000_led_on_pchlan - Turn LEDs on * @hw: pointer to the HW structure * * Turn on the LEDs. **/ static s32 e1000_led_on_pchlan(struct e1000_hw *hw) { u16 data = (u16)hw->mac.ledctl_mode2; u32 i, led; /* * If no link, then turn LED on by setting the invert bit * for each LED that's mode is "link_up" in ledctl_mode2. */ if (!(er32(STATUS) & E1000_STATUS_LU)) { for (i = 0; i < 3; i++) { led = (data >> (i * 5)) & E1000_PHY_LED0_MASK; if ((led & E1000_PHY_LED0_MODE_MASK) != E1000_LEDCTL_MODE_LINK_UP) continue; if (led & E1000_PHY_LED0_IVRT) data &= ~(E1000_PHY_LED0_IVRT << (i * 5)); else data |= (E1000_PHY_LED0_IVRT << (i * 5)); } } return e1e_wphy(hw, HV_LED_CONFIG, data); } /** * e1000_led_off_pchlan - Turn LEDs off * @hw: pointer to the HW structure * * Turn off the LEDs. **/ static s32 e1000_led_off_pchlan(struct e1000_hw *hw) { u16 data = (u16)hw->mac.ledctl_mode1; u32 i, led; /* * If no link, then turn LED off by clearing the invert bit * for each LED that's mode is "link_up" in ledctl_mode1. */ if (!(er32(STATUS) & E1000_STATUS_LU)) { for (i = 0; i < 3; i++) { led = (data >> (i * 5)) & E1000_PHY_LED0_MASK; if ((led & E1000_PHY_LED0_MODE_MASK) != E1000_LEDCTL_MODE_LINK_UP) continue; if (led & E1000_PHY_LED0_IVRT) data &= ~(E1000_PHY_LED0_IVRT << (i * 5)); else data |= (E1000_PHY_LED0_IVRT << (i * 5)); } } return e1e_wphy(hw, HV_LED_CONFIG, data); } /** * e1000_get_cfg_done_ich8lan - Read config done bit after Full or PHY reset * @hw: pointer to the HW structure * * Read appropriate register for the config done bit for completion status * and configure the PHY through s/w for EEPROM-less parts. * * NOTE: some silicon which is EEPROM-less will fail trying to read the * config done bit, so only an error is logged and continues. If we were * to return with error, EEPROM-less silicon would not be able to be reset * or change link. **/ static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw) { s32 ret_val = 0; u32 bank = 0; u32 status; e1000e_get_cfg_done(hw); /* Wait for indication from h/w that it has completed basic config */ if (hw->mac.type >= e1000_ich10lan) { e1000_lan_init_done_ich8lan(hw); } else { ret_val = e1000e_get_auto_rd_done(hw); if (ret_val) { /* * When auto config read does not complete, do not * return with an error. This can happen in situations * where there is no eeprom and prevents getting link. */ e_dbg("Auto Read Done did not complete\n"); ret_val = 0; } } /* Clear PHY Reset Asserted bit */ status = er32(STATUS); if (status & E1000_STATUS_PHYRA) ew32(STATUS, status & ~E1000_STATUS_PHYRA); else e_dbg("PHY Reset Asserted not set - needs delay\n"); /* If EEPROM is not marked present, init the IGP 3 PHY manually */ if (hw->mac.type <= e1000_ich9lan) { if (((er32(EECD) & E1000_EECD_PRES) == 0) && (hw->phy.type == e1000_phy_igp_3)) { e1000e_phy_init_script_igp3(hw); } } else { if (e1000_valid_nvm_bank_detect_ich8lan(hw, &bank)) { /* Maybe we should do a basic PHY config */ e_dbg("EEPROM not present\n"); ret_val = -E1000_ERR_CONFIG; } } return ret_val; } /** * e1000_power_down_phy_copper_ich8lan - Remove link during PHY power down * @hw: pointer to the HW structure * * In the case of a PHY power down to save power, or to turn off link during a * driver unload, or wake on lan is not enabled, remove the link. **/ static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw) { /* If the management interface is not enabled, then power down */ if (!(hw->mac.ops.check_mng_mode(hw) || hw->phy.ops.check_reset_block(hw))) e1000_power_down_phy_copper(hw); } /** * e1000_clear_hw_cntrs_ich8lan - Clear statistical counters * @hw: pointer to the HW structure * * Clears hardware counters specific to the silicon family and calls * clear_hw_cntrs_generic to clear all general purpose counters. **/ static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw) { u16 phy_data; s32 ret_val; e1000e_clear_hw_cntrs_base(hw); er32(ALGNERRC); er32(RXERRC); er32(TNCRS); er32(CEXTERR); er32(TSCTC); er32(TSCTFC); er32(MGTPRC); er32(MGTPDC); er32(MGTPTC); er32(IAC); er32(ICRXOC); /* Clear PHY statistics registers */ if ((hw->phy.type == e1000_phy_82578) || (hw->phy.type == e1000_phy_82579) || (hw->phy.type == e1000_phy_82577)) { ret_val = hw->phy.ops.acquire(hw); if (ret_val) return; ret_val = hw->phy.ops.set_page(hw, HV_STATS_PAGE << IGP_PAGE_SHIFT); if (ret_val) goto release; hw->phy.ops.read_reg_page(hw, HV_SCC_UPPER, &phy_data); hw->phy.ops.read_reg_page(hw, HV_SCC_LOWER, &phy_data); hw->phy.ops.read_reg_page(hw, HV_ECOL_UPPER, &phy_data); hw->phy.ops.read_reg_page(hw, HV_ECOL_LOWER, &phy_data); hw->phy.ops.read_reg_page(hw, HV_MCC_UPPER, &phy_data); hw->phy.ops.read_reg_page(hw, HV_MCC_LOWER, &phy_data); hw->phy.ops.read_reg_page(hw, HV_LATECOL_UPPER, &phy_data); hw->phy.ops.read_reg_page(hw, HV_LATECOL_LOWER, &phy_data); hw->phy.ops.read_reg_page(hw, HV_COLC_UPPER, &phy_data); hw->phy.ops.read_reg_page(hw, HV_COLC_LOWER, &phy_data); hw->phy.ops.read_reg_page(hw, HV_DC_UPPER, &phy_data); hw->phy.ops.read_reg_page(hw, HV_DC_LOWER, &phy_data); hw->phy.ops.read_reg_page(hw, HV_TNCRS_UPPER, &phy_data); hw->phy.ops.read_reg_page(hw, HV_TNCRS_LOWER, &phy_data); release: hw->phy.ops.release(hw); } } static const struct e1000_mac_operations ich8_mac_ops = { /* check_mng_mode dependent on mac type */ .check_for_link = e1000_check_for_copper_link_ich8lan, /* cleanup_led dependent on mac type */ .clear_hw_cntrs = e1000_clear_hw_cntrs_ich8lan, .get_bus_info = e1000_get_bus_info_ich8lan, .set_lan_id = e1000_set_lan_id_single_port, .get_link_up_info = e1000_get_link_up_info_ich8lan, /* led_on dependent on mac type */ /* led_off dependent on mac type */ .update_mc_addr_list = e1000e_update_mc_addr_list_generic, .reset_hw = e1000_reset_hw_ich8lan, .init_hw = e1000_init_hw_ich8lan, .setup_link = e1000_setup_link_ich8lan, .setup_physical_interface= e1000_setup_copper_link_ich8lan, /* id_led_init dependent on mac type */ .config_collision_dist = e1000e_config_collision_dist_generic, }; static const struct e1000_phy_operations ich8_phy_ops = { .acquire = e1000_acquire_swflag_ich8lan, .check_reset_block = e1000_check_reset_block_ich8lan, .commit = NULL, .get_cfg_done = e1000_get_cfg_done_ich8lan, .get_cable_length = e1000e_get_cable_length_igp_2, .read_reg = e1000e_read_phy_reg_igp, .release = e1000_release_swflag_ich8lan, .reset = e1000_phy_hw_reset_ich8lan, .set_d0_lplu_state = e1000_set_d0_lplu_state_ich8lan, .set_d3_lplu_state = e1000_set_d3_lplu_state_ich8lan, .write_reg = e1000e_write_phy_reg_igp, }; static const struct e1000_nvm_operations ich8_nvm_ops = { .acquire = e1000_acquire_nvm_ich8lan, .read = e1000_read_nvm_ich8lan, .release = e1000_release_nvm_ich8lan, .reload = e1000e_reload_nvm_generic, .update = e1000_update_nvm_checksum_ich8lan, .valid_led_default = e1000_valid_led_default_ich8lan, .validate = e1000_validate_nvm_checksum_ich8lan, .write = e1000_write_nvm_ich8lan, }; const struct e1000_info e1000_ich8_info = { .mac = e1000_ich8lan, .flags = FLAG_HAS_WOL | FLAG_IS_ICH | FLAG_HAS_CTRLEXT_ON_LOAD | FLAG_HAS_AMT | FLAG_HAS_FLASH | FLAG_APME_IN_WUC, .pba = 8, .max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN, .get_variants = e1000_get_variants_ich8lan, .mac_ops = &ich8_mac_ops, .phy_ops = &ich8_phy_ops, .nvm_ops = &ich8_nvm_ops, }; const struct e1000_info e1000_ich9_info = { .mac = e1000_ich9lan, .flags = FLAG_HAS_JUMBO_FRAMES | FLAG_IS_ICH | FLAG_HAS_WOL | FLAG_HAS_CTRLEXT_ON_LOAD | FLAG_HAS_AMT | FLAG_HAS_FLASH | FLAG_APME_IN_WUC, .pba = 18, .max_hw_frame_size = DEFAULT_JUMBO, .get_variants = e1000_get_variants_ich8lan, .mac_ops = &ich8_mac_ops, .phy_ops = &ich8_phy_ops, .nvm_ops = &ich8_nvm_ops, }; const struct e1000_info e1000_ich10_info = { .mac = e1000_ich10lan, .flags = FLAG_HAS_JUMBO_FRAMES | FLAG_IS_ICH | FLAG_HAS_WOL | FLAG_HAS_CTRLEXT_ON_LOAD | FLAG_HAS_AMT | FLAG_HAS_FLASH | FLAG_APME_IN_WUC, .pba = 18, .max_hw_frame_size = DEFAULT_JUMBO, .get_variants = e1000_get_variants_ich8lan, .mac_ops = &ich8_mac_ops, .phy_ops = &ich8_phy_ops, .nvm_ops = &ich8_nvm_ops, }; const struct e1000_info e1000_pch_info = { .mac = e1000_pchlan, .flags = FLAG_IS_ICH | FLAG_HAS_WOL | FLAG_HAS_CTRLEXT_ON_LOAD | FLAG_HAS_AMT | FLAG_HAS_FLASH | FLAG_HAS_JUMBO_FRAMES | FLAG_DISABLE_FC_PAUSE_TIME /* errata */ | FLAG_APME_IN_WUC, .flags2 = FLAG2_HAS_PHY_STATS, .pba = 26, .max_hw_frame_size = 4096, .get_variants = e1000_get_variants_ich8lan, .mac_ops = &ich8_mac_ops, .phy_ops = &ich8_phy_ops, .nvm_ops = &ich8_nvm_ops, }; const struct e1000_info e1000_pch2_info = { .mac = e1000_pch2lan, .flags = FLAG_IS_ICH | FLAG_HAS_WOL | FLAG_HAS_CTRLEXT_ON_LOAD | FLAG_HAS_AMT | FLAG_HAS_FLASH | FLAG_HAS_JUMBO_FRAMES | FLAG_APME_IN_WUC, .flags2 = FLAG2_HAS_PHY_STATS | FLAG2_HAS_EEE, .pba = 26, .max_hw_frame_size = DEFAULT_JUMBO, .get_variants = e1000_get_variants_ich8lan, .mac_ops = &ich8_mac_ops, .phy_ops = &ich8_phy_ops, .nvm_ops = &ich8_nvm_ops, };
gpl-2.0
infected-lp/android_kernel_sony_msm8974
net/bridge/br_sysfs_br.c
5042
23348
/* * Sysfs attributes of bridge ports * Linux ethernet bridge * * Authors: * Stephen Hemminger <shemminger@osdl.org> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/capability.h> #include <linux/kernel.h> #include <linux/netdevice.h> #include <linux/if_bridge.h> #include <linux/rtnetlink.h> #include <linux/spinlock.h> #include <linux/times.h> #include "br_private.h" #define to_dev(obj) container_of(obj, struct device, kobj) #define to_bridge(cd) ((struct net_bridge *)netdev_priv(to_net_dev(cd))) /* * Common code for storing bridge parameters. */ static ssize_t store_bridge_parm(struct device *d, const char *buf, size_t len, int (*set)(struct net_bridge *, unsigned long)) { struct net_bridge *br = to_bridge(d); char *endp; unsigned long val; int err; if (!capable(CAP_NET_ADMIN)) return -EPERM; val = simple_strtoul(buf, &endp, 0); if (endp == buf) return -EINVAL; err = (*set)(br, val); return err ? err : len; } static ssize_t show_forward_delay(struct device *d, struct device_attribute *attr, char *buf) { struct net_bridge *br = to_bridge(d); return sprintf(buf, "%lu\n", jiffies_to_clock_t(br->forward_delay)); } static ssize_t store_forward_delay(struct device *d, struct device_attribute *attr, const char *buf, size_t len) { return store_bridge_parm(d, buf, len, br_set_forward_delay); } static DEVICE_ATTR(forward_delay, S_IRUGO | S_IWUSR, show_forward_delay, store_forward_delay); static ssize_t show_hello_time(struct device *d, struct device_attribute *attr, char *buf) { return sprintf(buf, "%lu\n", jiffies_to_clock_t(to_bridge(d)->hello_time)); } static ssize_t store_hello_time(struct device *d, struct device_attribute *attr, const char *buf, size_t len) { return store_bridge_parm(d, buf, len, br_set_hello_time); } static DEVICE_ATTR(hello_time, S_IRUGO | S_IWUSR, show_hello_time, store_hello_time); static ssize_t show_max_age(struct device *d, struct device_attribute *attr, char *buf) { return sprintf(buf, "%lu\n", jiffies_to_clock_t(to_bridge(d)->max_age)); } static ssize_t store_max_age(struct device *d, struct device_attribute *attr, const char *buf, size_t len) { return store_bridge_parm(d, buf, len, br_set_max_age); } static DEVICE_ATTR(max_age, S_IRUGO | S_IWUSR, show_max_age, store_max_age); static ssize_t show_ageing_time(struct device *d, struct device_attribute *attr, char *buf) { struct net_bridge *br = to_bridge(d); return sprintf(buf, "%lu\n", jiffies_to_clock_t(br->ageing_time)); } static int set_ageing_time(struct net_bridge *br, unsigned long val) { br->ageing_time = clock_t_to_jiffies(val); return 0; } static ssize_t store_ageing_time(struct device *d, struct device_attribute *attr, const char *buf, size_t len) { return store_bridge_parm(d, buf, len, set_ageing_time); } static DEVICE_ATTR(ageing_time, S_IRUGO | S_IWUSR, show_ageing_time, store_ageing_time); static ssize_t show_stp_state(struct device *d, struct device_attribute *attr, char *buf) { struct net_bridge *br = to_bridge(d); return sprintf(buf, "%d\n", br->stp_enabled); } static ssize_t store_stp_state(struct device *d, struct device_attribute *attr, const char *buf, size_t len) { struct net_bridge *br = to_bridge(d); char *endp; unsigned long val; if (!capable(CAP_NET_ADMIN)) return -EPERM; val = simple_strtoul(buf, &endp, 0); if (endp == buf) return -EINVAL; if (!rtnl_trylock()) return restart_syscall(); br_stp_set_enabled(br, val); rtnl_unlock(); return len; } static DEVICE_ATTR(stp_state, S_IRUGO | S_IWUSR, show_stp_state, store_stp_state); static ssize_t show_group_fwd_mask(struct device *d, struct device_attribute *attr, char *buf) { struct net_bridge *br = to_bridge(d); return sprintf(buf, "%#x\n", br->group_fwd_mask); } static ssize_t store_group_fwd_mask(struct device *d, struct device_attribute *attr, const char *buf, size_t len) { struct net_bridge *br = to_bridge(d); char *endp; unsigned long val; if (!capable(CAP_NET_ADMIN)) return -EPERM; val = simple_strtoul(buf, &endp, 0); if (endp == buf) return -EINVAL; if (val & BR_GROUPFWD_RESTRICTED) return -EINVAL; br->group_fwd_mask = val; return len; } static DEVICE_ATTR(group_fwd_mask, S_IRUGO | S_IWUSR, show_group_fwd_mask, store_group_fwd_mask); static ssize_t show_priority(struct device *d, struct device_attribute *attr, char *buf) { struct net_bridge *br = to_bridge(d); return sprintf(buf, "%d\n", (br->bridge_id.prio[0] << 8) | br->bridge_id.prio[1]); } static int set_priority(struct net_bridge *br, unsigned long val) { br_stp_set_bridge_priority(br, (u16) val); return 0; } static ssize_t store_priority(struct device *d, struct device_attribute *attr, const char *buf, size_t len) { return store_bridge_parm(d, buf, len, set_priority); } static DEVICE_ATTR(priority, S_IRUGO | S_IWUSR, show_priority, store_priority); static ssize_t show_root_id(struct device *d, struct device_attribute *attr, char *buf) { return br_show_bridge_id(buf, &to_bridge(d)->designated_root); } static DEVICE_ATTR(root_id, S_IRUGO, show_root_id, NULL); static ssize_t show_bridge_id(struct device *d, struct device_attribute *attr, char *buf) { return br_show_bridge_id(buf, &to_bridge(d)->bridge_id); } static DEVICE_ATTR(bridge_id, S_IRUGO, show_bridge_id, NULL); static ssize_t show_root_port(struct device *d, struct device_attribute *attr, char *buf) { return sprintf(buf, "%d\n", to_bridge(d)->root_port); } static DEVICE_ATTR(root_port, S_IRUGO, show_root_port, NULL); static ssize_t show_root_path_cost(struct device *d, struct device_attribute *attr, char *buf) { return sprintf(buf, "%d\n", to_bridge(d)->root_path_cost); } static DEVICE_ATTR(root_path_cost, S_IRUGO, show_root_path_cost, NULL); static ssize_t show_topology_change(struct device *d, struct device_attribute *attr, char *buf) { return sprintf(buf, "%d\n", to_bridge(d)->topology_change); } static DEVICE_ATTR(topology_change, S_IRUGO, show_topology_change, NULL); static ssize_t show_topology_change_detected(struct device *d, struct device_attribute *attr, char *buf) { struct net_bridge *br = to_bridge(d); return sprintf(buf, "%d\n", br->topology_change_detected); } static DEVICE_ATTR(topology_change_detected, S_IRUGO, show_topology_change_detected, NULL); static ssize_t show_hello_timer(struct device *d, struct device_attribute *attr, char *buf) { struct net_bridge *br = to_bridge(d); return sprintf(buf, "%ld\n", br_timer_value(&br->hello_timer)); } static DEVICE_ATTR(hello_timer, S_IRUGO, show_hello_timer, NULL); static ssize_t show_tcn_timer(struct device *d, struct device_attribute *attr, char *buf) { struct net_bridge *br = to_bridge(d); return sprintf(buf, "%ld\n", br_timer_value(&br->tcn_timer)); } static DEVICE_ATTR(tcn_timer, S_IRUGO, show_tcn_timer, NULL); static ssize_t show_topology_change_timer(struct device *d, struct device_attribute *attr, char *buf) { struct net_bridge *br = to_bridge(d); return sprintf(buf, "%ld\n", br_timer_value(&br->topology_change_timer)); } static DEVICE_ATTR(topology_change_timer, S_IRUGO, show_topology_change_timer, NULL); static ssize_t show_gc_timer(struct device *d, struct device_attribute *attr, char *buf) { struct net_bridge *br = to_bridge(d); return sprintf(buf, "%ld\n", br_timer_value(&br->gc_timer)); } static DEVICE_ATTR(gc_timer, S_IRUGO, show_gc_timer, NULL); static ssize_t show_group_addr(struct device *d, struct device_attribute *attr, char *buf) { struct net_bridge *br = to_bridge(d); return sprintf(buf, "%x:%x:%x:%x:%x:%x\n", br->group_addr[0], br->group_addr[1], br->group_addr[2], br->group_addr[3], br->group_addr[4], br->group_addr[5]); } static ssize_t store_group_addr(struct device *d, struct device_attribute *attr, const char *buf, size_t len) { struct net_bridge *br = to_bridge(d); unsigned new_addr[6]; int i; if (!capable(CAP_NET_ADMIN)) return -EPERM; if (sscanf(buf, "%x:%x:%x:%x:%x:%x", &new_addr[0], &new_addr[1], &new_addr[2], &new_addr[3], &new_addr[4], &new_addr[5]) != 6) return -EINVAL; /* Must be 01:80:c2:00:00:0X */ for (i = 0; i < 5; i++) if (new_addr[i] != br_group_address[i]) return -EINVAL; if (new_addr[5] & ~0xf) return -EINVAL; if (new_addr[5] == 1 || /* 802.3x Pause address */ new_addr[5] == 2 || /* 802.3ad Slow protocols */ new_addr[5] == 3) /* 802.1X PAE address */ return -EINVAL; spin_lock_bh(&br->lock); for (i = 0; i < 6; i++) br->group_addr[i] = new_addr[i]; spin_unlock_bh(&br->lock); return len; } static DEVICE_ATTR(group_addr, S_IRUGO | S_IWUSR, show_group_addr, store_group_addr); static ssize_t store_flush(struct device *d, struct device_attribute *attr, const char *buf, size_t len) { struct net_bridge *br = to_bridge(d); if (!capable(CAP_NET_ADMIN)) return -EPERM; br_fdb_flush(br); return len; } static DEVICE_ATTR(flush, S_IWUSR, NULL, store_flush); #ifdef CONFIG_BRIDGE_IGMP_SNOOPING static ssize_t show_multicast_router(struct device *d, struct device_attribute *attr, char *buf) { struct net_bridge *br = to_bridge(d); return sprintf(buf, "%d\n", br->multicast_router); } static ssize_t store_multicast_router(struct device *d, struct device_attribute *attr, const char *buf, size_t len) { return store_bridge_parm(d, buf, len, br_multicast_set_router); } static DEVICE_ATTR(multicast_router, S_IRUGO | S_IWUSR, show_multicast_router, store_multicast_router); static ssize_t show_multicast_snooping(struct device *d, struct device_attribute *attr, char *buf) { struct net_bridge *br = to_bridge(d); return sprintf(buf, "%d\n", !br->multicast_disabled); } static ssize_t store_multicast_snooping(struct device *d, struct device_attribute *attr, const char *buf, size_t len) { return store_bridge_parm(d, buf, len, br_multicast_toggle); } static DEVICE_ATTR(multicast_snooping, S_IRUGO | S_IWUSR, show_multicast_snooping, store_multicast_snooping); static ssize_t show_hash_elasticity(struct device *d, struct device_attribute *attr, char *buf) { struct net_bridge *br = to_bridge(d); return sprintf(buf, "%u\n", br->hash_elasticity); } static int set_elasticity(struct net_bridge *br, unsigned long val) { br->hash_elasticity = val; return 0; } static ssize_t store_hash_elasticity(struct device *d, struct device_attribute *attr, const char *buf, size_t len) { return store_bridge_parm(d, buf, len, set_elasticity); } static DEVICE_ATTR(hash_elasticity, S_IRUGO | S_IWUSR, show_hash_elasticity, store_hash_elasticity); static ssize_t show_hash_max(struct device *d, struct device_attribute *attr, char *buf) { struct net_bridge *br = to_bridge(d); return sprintf(buf, "%u\n", br->hash_max); } static ssize_t store_hash_max(struct device *d, struct device_attribute *attr, const char *buf, size_t len) { return store_bridge_parm(d, buf, len, br_multicast_set_hash_max); } static DEVICE_ATTR(hash_max, S_IRUGO | S_IWUSR, show_hash_max, store_hash_max); static ssize_t show_multicast_last_member_count(struct device *d, struct device_attribute *attr, char *buf) { struct net_bridge *br = to_bridge(d); return sprintf(buf, "%u\n", br->multicast_last_member_count); } static int set_last_member_count(struct net_bridge *br, unsigned long val) { br->multicast_last_member_count = val; return 0; } static ssize_t store_multicast_last_member_count(struct device *d, struct device_attribute *attr, const char *buf, size_t len) { return store_bridge_parm(d, buf, len, set_last_member_count); } static DEVICE_ATTR(multicast_last_member_count, S_IRUGO | S_IWUSR, show_multicast_last_member_count, store_multicast_last_member_count); static ssize_t show_multicast_startup_query_count( struct device *d, struct device_attribute *attr, char *buf) { struct net_bridge *br = to_bridge(d); return sprintf(buf, "%u\n", br->multicast_startup_query_count); } static int set_startup_query_count(struct net_bridge *br, unsigned long val) { br->multicast_startup_query_count = val; return 0; } static ssize_t store_multicast_startup_query_count( struct device *d, struct device_attribute *attr, const char *buf, size_t len) { return store_bridge_parm(d, buf, len, set_startup_query_count); } static DEVICE_ATTR(multicast_startup_query_count, S_IRUGO | S_IWUSR, show_multicast_startup_query_count, store_multicast_startup_query_count); static ssize_t show_multicast_last_member_interval( struct device *d, struct device_attribute *attr, char *buf) { struct net_bridge *br = to_bridge(d); return sprintf(buf, "%lu\n", jiffies_to_clock_t(br->multicast_last_member_interval)); } static int set_last_member_interval(struct net_bridge *br, unsigned long val) { br->multicast_last_member_interval = clock_t_to_jiffies(val); return 0; } static ssize_t store_multicast_last_member_interval( struct device *d, struct device_attribute *attr, const char *buf, size_t len) { return store_bridge_parm(d, buf, len, set_last_member_interval); } static DEVICE_ATTR(multicast_last_member_interval, S_IRUGO | S_IWUSR, show_multicast_last_member_interval, store_multicast_last_member_interval); static ssize_t show_multicast_membership_interval( struct device *d, struct device_attribute *attr, char *buf) { struct net_bridge *br = to_bridge(d); return sprintf(buf, "%lu\n", jiffies_to_clock_t(br->multicast_membership_interval)); } static int set_membership_interval(struct net_bridge *br, unsigned long val) { br->multicast_membership_interval = clock_t_to_jiffies(val); return 0; } static ssize_t store_multicast_membership_interval( struct device *d, struct device_attribute *attr, const char *buf, size_t len) { return store_bridge_parm(d, buf, len, set_membership_interval); } static DEVICE_ATTR(multicast_membership_interval, S_IRUGO | S_IWUSR, show_multicast_membership_interval, store_multicast_membership_interval); static ssize_t show_multicast_querier_interval(struct device *d, struct device_attribute *attr, char *buf) { struct net_bridge *br = to_bridge(d); return sprintf(buf, "%lu\n", jiffies_to_clock_t(br->multicast_querier_interval)); } static int set_querier_interval(struct net_bridge *br, unsigned long val) { br->multicast_querier_interval = clock_t_to_jiffies(val); return 0; } static ssize_t store_multicast_querier_interval(struct device *d, struct device_attribute *attr, const char *buf, size_t len) { return store_bridge_parm(d, buf, len, set_querier_interval); } static DEVICE_ATTR(multicast_querier_interval, S_IRUGO | S_IWUSR, show_multicast_querier_interval, store_multicast_querier_interval); static ssize_t show_multicast_query_interval(struct device *d, struct device_attribute *attr, char *buf) { struct net_bridge *br = to_bridge(d); return sprintf(buf, "%lu\n", jiffies_to_clock_t(br->multicast_query_interval)); } static int set_query_interval(struct net_bridge *br, unsigned long val) { br->multicast_query_interval = clock_t_to_jiffies(val); return 0; } static ssize_t store_multicast_query_interval(struct device *d, struct device_attribute *attr, const char *buf, size_t len) { return store_bridge_parm(d, buf, len, set_query_interval); } static DEVICE_ATTR(multicast_query_interval, S_IRUGO | S_IWUSR, show_multicast_query_interval, store_multicast_query_interval); static ssize_t show_multicast_query_response_interval( struct device *d, struct device_attribute *attr, char *buf) { struct net_bridge *br = to_bridge(d); return sprintf( buf, "%lu\n", jiffies_to_clock_t(br->multicast_query_response_interval)); } static int set_query_response_interval(struct net_bridge *br, unsigned long val) { br->multicast_query_response_interval = clock_t_to_jiffies(val); return 0; } static ssize_t store_multicast_query_response_interval( struct device *d, struct device_attribute *attr, const char *buf, size_t len) { return store_bridge_parm(d, buf, len, set_query_response_interval); } static DEVICE_ATTR(multicast_query_response_interval, S_IRUGO | S_IWUSR, show_multicast_query_response_interval, store_multicast_query_response_interval); static ssize_t show_multicast_startup_query_interval( struct device *d, struct device_attribute *attr, char *buf) { struct net_bridge *br = to_bridge(d); return sprintf( buf, "%lu\n", jiffies_to_clock_t(br->multicast_startup_query_interval)); } static int set_startup_query_interval(struct net_bridge *br, unsigned long val) { br->multicast_startup_query_interval = clock_t_to_jiffies(val); return 0; } static ssize_t store_multicast_startup_query_interval( struct device *d, struct device_attribute *attr, const char *buf, size_t len) { return store_bridge_parm(d, buf, len, set_startup_query_interval); } static DEVICE_ATTR(multicast_startup_query_interval, S_IRUGO | S_IWUSR, show_multicast_startup_query_interval, store_multicast_startup_query_interval); #endif #ifdef CONFIG_BRIDGE_NETFILTER static ssize_t show_nf_call_iptables( struct device *d, struct device_attribute *attr, char *buf) { struct net_bridge *br = to_bridge(d); return sprintf(buf, "%u\n", br->nf_call_iptables); } static int set_nf_call_iptables(struct net_bridge *br, unsigned long val) { br->nf_call_iptables = val ? true : false; return 0; } static ssize_t store_nf_call_iptables( struct device *d, struct device_attribute *attr, const char *buf, size_t len) { return store_bridge_parm(d, buf, len, set_nf_call_iptables); } static DEVICE_ATTR(nf_call_iptables, S_IRUGO | S_IWUSR, show_nf_call_iptables, store_nf_call_iptables); static ssize_t show_nf_call_ip6tables( struct device *d, struct device_attribute *attr, char *buf) { struct net_bridge *br = to_bridge(d); return sprintf(buf, "%u\n", br->nf_call_ip6tables); } static int set_nf_call_ip6tables(struct net_bridge *br, unsigned long val) { br->nf_call_ip6tables = val ? true : false; return 0; } static ssize_t store_nf_call_ip6tables( struct device *d, struct device_attribute *attr, const char *buf, size_t len) { return store_bridge_parm(d, buf, len, set_nf_call_ip6tables); } static DEVICE_ATTR(nf_call_ip6tables, S_IRUGO | S_IWUSR, show_nf_call_ip6tables, store_nf_call_ip6tables); static ssize_t show_nf_call_arptables( struct device *d, struct device_attribute *attr, char *buf) { struct net_bridge *br = to_bridge(d); return sprintf(buf, "%u\n", br->nf_call_arptables); } static int set_nf_call_arptables(struct net_bridge *br, unsigned long val) { br->nf_call_arptables = val ? true : false; return 0; } static ssize_t store_nf_call_arptables( struct device *d, struct device_attribute *attr, const char *buf, size_t len) { return store_bridge_parm(d, buf, len, set_nf_call_arptables); } static DEVICE_ATTR(nf_call_arptables, S_IRUGO | S_IWUSR, show_nf_call_arptables, store_nf_call_arptables); #endif static struct attribute *bridge_attrs[] = { &dev_attr_forward_delay.attr, &dev_attr_hello_time.attr, &dev_attr_max_age.attr, &dev_attr_ageing_time.attr, &dev_attr_stp_state.attr, &dev_attr_group_fwd_mask.attr, &dev_attr_priority.attr, &dev_attr_bridge_id.attr, &dev_attr_root_id.attr, &dev_attr_root_path_cost.attr, &dev_attr_root_port.attr, &dev_attr_topology_change.attr, &dev_attr_topology_change_detected.attr, &dev_attr_hello_timer.attr, &dev_attr_tcn_timer.attr, &dev_attr_topology_change_timer.attr, &dev_attr_gc_timer.attr, &dev_attr_group_addr.attr, &dev_attr_flush.attr, #ifdef CONFIG_BRIDGE_IGMP_SNOOPING &dev_attr_multicast_router.attr, &dev_attr_multicast_snooping.attr, &dev_attr_hash_elasticity.attr, &dev_attr_hash_max.attr, &dev_attr_multicast_last_member_count.attr, &dev_attr_multicast_startup_query_count.attr, &dev_attr_multicast_last_member_interval.attr, &dev_attr_multicast_membership_interval.attr, &dev_attr_multicast_querier_interval.attr, &dev_attr_multicast_query_interval.attr, &dev_attr_multicast_query_response_interval.attr, &dev_attr_multicast_startup_query_interval.attr, #endif #ifdef CONFIG_BRIDGE_NETFILTER &dev_attr_nf_call_iptables.attr, &dev_attr_nf_call_ip6tables.attr, &dev_attr_nf_call_arptables.attr, #endif NULL }; static struct attribute_group bridge_group = { .name = SYSFS_BRIDGE_ATTR, .attrs = bridge_attrs, }; /* * Export the forwarding information table as a binary file * The records are struct __fdb_entry. * * Returns the number of bytes read. */ static ssize_t brforward_read(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { struct device *dev = to_dev(kobj); struct net_bridge *br = to_bridge(dev); int n; /* must read whole records */ if (off % sizeof(struct __fdb_entry) != 0) return -EINVAL; n = br_fdb_fillbuf(br, buf, count / sizeof(struct __fdb_entry), off / sizeof(struct __fdb_entry)); if (n > 0) n *= sizeof(struct __fdb_entry); return n; } static struct bin_attribute bridge_forward = { .attr = { .name = SYSFS_BRIDGE_FDB, .mode = S_IRUGO, }, .read = brforward_read, }; /* * Add entries in sysfs onto the existing network class device * for the bridge. * Adds a attribute group "bridge" containing tuning parameters. * Binary attribute containing the forward table * Sub directory to hold links to interfaces. * * Note: the ifobj exists only to be a subdirectory * to hold links. The ifobj exists in same data structure * as it's parent the bridge so reference counting works. */ int br_sysfs_addbr(struct net_device *dev) { struct kobject *brobj = &dev->dev.kobj; struct net_bridge *br = netdev_priv(dev); int err; err = sysfs_create_group(brobj, &bridge_group); if (err) { pr_info("%s: can't create group %s/%s\n", __func__, dev->name, bridge_group.name); goto out1; } err = sysfs_create_bin_file(brobj, &bridge_forward); if (err) { pr_info("%s: can't create attribute file %s/%s\n", __func__, dev->name, bridge_forward.attr.name); goto out2; } br->ifobj = kobject_create_and_add(SYSFS_BRIDGE_PORT_SUBDIR, brobj); if (!br->ifobj) { pr_info("%s: can't add kobject (directory) %s/%s\n", __func__, dev->name, SYSFS_BRIDGE_PORT_SUBDIR); goto out3; } return 0; out3: sysfs_remove_bin_file(&dev->dev.kobj, &bridge_forward); out2: sysfs_remove_group(&dev->dev.kobj, &bridge_group); out1: return err; } void br_sysfs_delbr(struct net_device *dev) { struct kobject *kobj = &dev->dev.kobj; struct net_bridge *br = netdev_priv(dev); kobject_put(br->ifobj); sysfs_remove_bin_file(kobj, &bridge_forward); sysfs_remove_group(kobj, &bridge_group); }
gpl-2.0
Renzo-Olivares/android_kernel_htc_vigor
drivers/input/touchscreen/da9034-ts.c
5042
8860
/* * Touchscreen driver for Dialog Semiconductor DA9034 * * Copyright (C) 2006-2008 Marvell International Ltd. * Fengwei Yin <fengwei.yin@marvell.com> * Bin Yang <bin.yang@marvell.com> * Eric Miao <eric.miao@marvell.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/platform_device.h> #include <linux/input.h> #include <linux/workqueue.h> #include <linux/mfd/da903x.h> #include <linux/slab.h> #define DA9034_MANUAL_CTRL 0x50 #define DA9034_LDO_ADC_EN (1 << 4) #define DA9034_AUTO_CTRL1 0x51 #define DA9034_AUTO_CTRL2 0x52 #define DA9034_AUTO_TSI_EN (1 << 3) #define DA9034_PEN_DETECT (1 << 4) #define DA9034_TSI_CTRL1 0x53 #define DA9034_TSI_CTRL2 0x54 #define DA9034_TSI_X_MSB 0x6c #define DA9034_TSI_Y_MSB 0x6d #define DA9034_TSI_XY_LSB 0x6e enum { STATE_IDLE, /* wait for pendown */ STATE_BUSY, /* TSI busy sampling */ STATE_STOP, /* sample available */ STATE_WAIT, /* Wait to start next sample */ }; enum { EVENT_PEN_DOWN, EVENT_PEN_UP, EVENT_TSI_READY, EVENT_TIMEDOUT, }; struct da9034_touch { struct device *da9034_dev; struct input_dev *input_dev; struct delayed_work tsi_work; struct notifier_block notifier; int state; int interval_ms; int x_inverted; int y_inverted; int last_x; int last_y; }; static inline int is_pen_down(struct da9034_touch *touch) { return da903x_query_status(touch->da9034_dev, DA9034_STATUS_PEN_DOWN); } static inline int detect_pen_down(struct da9034_touch *touch, int on) { if (on) return da903x_set_bits(touch->da9034_dev, DA9034_AUTO_CTRL2, DA9034_PEN_DETECT); else return da903x_clr_bits(touch->da9034_dev, DA9034_AUTO_CTRL2, DA9034_PEN_DETECT); } static int read_tsi(struct da9034_touch *touch) { uint8_t _x, _y, _v; int ret; ret = da903x_read(touch->da9034_dev, DA9034_TSI_X_MSB, &_x); if (ret) return ret; ret = da903x_read(touch->da9034_dev, DA9034_TSI_Y_MSB, &_y); if (ret) return ret; ret = da903x_read(touch->da9034_dev, DA9034_TSI_XY_LSB, &_v); if (ret) return ret; touch->last_x = ((_x << 2) & 0x3fc) | (_v & 0x3); touch->last_y = ((_y << 2) & 0x3fc) | ((_v & 0xc) >> 2); return 0; } static inline int start_tsi(struct da9034_touch *touch) { return da903x_set_bits(touch->da9034_dev, DA9034_AUTO_CTRL2, DA9034_AUTO_TSI_EN); } static inline int stop_tsi(struct da9034_touch *touch) { return da903x_clr_bits(touch->da9034_dev, DA9034_AUTO_CTRL2, DA9034_AUTO_TSI_EN); } static inline void report_pen_down(struct da9034_touch *touch) { int x = touch->last_x; int y = touch->last_y; x &= 0xfff; if (touch->x_inverted) x = 1024 - x; y &= 0xfff; if (touch->y_inverted) y = 1024 - y; input_report_abs(touch->input_dev, ABS_X, x); input_report_abs(touch->input_dev, ABS_Y, y); input_report_key(touch->input_dev, BTN_TOUCH, 1); input_sync(touch->input_dev); } static inline void report_pen_up(struct da9034_touch *touch) { input_report_key(touch->input_dev, BTN_TOUCH, 0); input_sync(touch->input_dev); } static void da9034_event_handler(struct da9034_touch *touch, int event) { int err; switch (touch->state) { case STATE_IDLE: if (event != EVENT_PEN_DOWN) break; /* Enable auto measurement of the TSI, this will * automatically disable pen down detection */ err = start_tsi(touch); if (err) goto err_reset; touch->state = STATE_BUSY; break; case STATE_BUSY: if (event != EVENT_TSI_READY) break; err = read_tsi(touch); if (err) goto err_reset; /* Disable auto measurement of the TSI, so that * pen down status will be available */ err = stop_tsi(touch); if (err) goto err_reset; touch->state = STATE_STOP; /* FIXME: PEN_{UP/DOWN} events are expected to be * available by stopping TSI, but this is found not * always true, delay and simulate such an event * here is more reliable */ mdelay(1); da9034_event_handler(touch, is_pen_down(touch) ? EVENT_PEN_DOWN : EVENT_PEN_UP); break; case STATE_STOP: if (event == EVENT_PEN_DOWN) { report_pen_down(touch); schedule_delayed_work(&touch->tsi_work, msecs_to_jiffies(touch->interval_ms)); touch->state = STATE_WAIT; } if (event == EVENT_PEN_UP) { report_pen_up(touch); touch->state = STATE_IDLE; } break; case STATE_WAIT: if (event != EVENT_TIMEDOUT) break; if (is_pen_down(touch)) { start_tsi(touch); touch->state = STATE_BUSY; } else { report_pen_up(touch); touch->state = STATE_IDLE; } break; } return; err_reset: touch->state = STATE_IDLE; stop_tsi(touch); detect_pen_down(touch, 1); } static void da9034_tsi_work(struct work_struct *work) { struct da9034_touch *touch = container_of(work, struct da9034_touch, tsi_work.work); da9034_event_handler(touch, EVENT_TIMEDOUT); } static int da9034_touch_notifier(struct notifier_block *nb, unsigned long event, void *data) { struct da9034_touch *touch = container_of(nb, struct da9034_touch, notifier); if (event & DA9034_EVENT_TSI_READY) da9034_event_handler(touch, EVENT_TSI_READY); if ((event & DA9034_EVENT_PEN_DOWN) && touch->state == STATE_IDLE) da9034_event_handler(touch, EVENT_PEN_DOWN); return 0; } static int da9034_touch_open(struct input_dev *dev) { struct da9034_touch *touch = input_get_drvdata(dev); int ret; ret = da903x_register_notifier(touch->da9034_dev, &touch->notifier, DA9034_EVENT_PEN_DOWN | DA9034_EVENT_TSI_READY); if (ret) return -EBUSY; /* Enable ADC LDO */ ret = da903x_set_bits(touch->da9034_dev, DA9034_MANUAL_CTRL, DA9034_LDO_ADC_EN); if (ret) return ret; /* TSI_DELAY: 3 slots, TSI_SKIP: 3 slots */ ret = da903x_write(touch->da9034_dev, DA9034_TSI_CTRL1, 0x1b); if (ret) return ret; ret = da903x_write(touch->da9034_dev, DA9034_TSI_CTRL2, 0x00); if (ret) return ret; touch->state = STATE_IDLE; detect_pen_down(touch, 1); return 0; } static void da9034_touch_close(struct input_dev *dev) { struct da9034_touch *touch = input_get_drvdata(dev); da903x_unregister_notifier(touch->da9034_dev, &touch->notifier, DA9034_EVENT_PEN_DOWN | DA9034_EVENT_TSI_READY); cancel_delayed_work_sync(&touch->tsi_work); touch->state = STATE_IDLE; stop_tsi(touch); detect_pen_down(touch, 0); /* Disable ADC LDO */ da903x_clr_bits(touch->da9034_dev, DA9034_MANUAL_CTRL, DA9034_LDO_ADC_EN); } static int __devinit da9034_touch_probe(struct platform_device *pdev) { struct da9034_touch_pdata *pdata = pdev->dev.platform_data; struct da9034_touch *touch; struct input_dev *input_dev; int ret; touch = kzalloc(sizeof(struct da9034_touch), GFP_KERNEL); if (touch == NULL) { dev_err(&pdev->dev, "failed to allocate driver data\n"); return -ENOMEM; } touch->da9034_dev = pdev->dev.parent; if (pdata) { touch->interval_ms = pdata->interval_ms; touch->x_inverted = pdata->x_inverted; touch->y_inverted = pdata->y_inverted; } else /* fallback into default */ touch->interval_ms = 10; INIT_DELAYED_WORK(&touch->tsi_work, da9034_tsi_work); touch->notifier.notifier_call = da9034_touch_notifier; input_dev = input_allocate_device(); if (!input_dev) { dev_err(&pdev->dev, "failed to allocate input device\n"); ret = -ENOMEM; goto err_free_touch; } input_dev->name = pdev->name; input_dev->open = da9034_touch_open; input_dev->close = da9034_touch_close; input_dev->dev.parent = &pdev->dev; __set_bit(EV_ABS, input_dev->evbit); __set_bit(ABS_X, input_dev->absbit); __set_bit(ABS_Y, input_dev->absbit); input_set_abs_params(input_dev, ABS_X, 0, 1023, 0, 0); input_set_abs_params(input_dev, ABS_Y, 0, 1023, 0, 0); __set_bit(EV_KEY, input_dev->evbit); __set_bit(BTN_TOUCH, input_dev->keybit); touch->input_dev = input_dev; input_set_drvdata(input_dev, touch); ret = input_register_device(input_dev); if (ret) goto err_free_input; platform_set_drvdata(pdev, touch); return 0; err_free_input: input_free_device(input_dev); err_free_touch: kfree(touch); return ret; } static int __devexit da9034_touch_remove(struct platform_device *pdev) { struct da9034_touch *touch = platform_get_drvdata(pdev); input_unregister_device(touch->input_dev); kfree(touch); return 0; } static struct platform_driver da9034_touch_driver = { .driver = { .name = "da9034-touch", .owner = THIS_MODULE, }, .probe = da9034_touch_probe, .remove = __devexit_p(da9034_touch_remove), }; module_platform_driver(da9034_touch_driver); MODULE_DESCRIPTION("Touchscreen driver for Dialog Semiconductor DA9034"); MODULE_AUTHOR("Eric Miao <eric.miao@marvell.com>, Bin Yang <bin.yang@marvell.com>"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:da9034-touch");
gpl-2.0
TeamBliss-Devices/android_kernel_google_msm
drivers/staging/vt6655/tether.c
8370
2852
/* * Copyright (c) 2003 VIA Networking, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * * * File: tether.c * * Purpose: * * Author: Tevin Chen * * Date: May 21, 1996 * * Functions: * ETHbyGetHashIndexByCrc32 - Caculate multicast hash value by CRC32 * ETHbIsBufferCrc32Ok - Check CRC value of the buffer if Ok or not * * Revision History: * */ #include "device.h" #include "tmacro.h" #include "tcrc.h" #include "tether.h" /*--------------------- Static Definitions -------------------------*/ /*--------------------- Static Classes ----------------------------*/ /*--------------------- Static Variables --------------------------*/ /*--------------------- Static Functions --------------------------*/ /*--------------------- Export Variables --------------------------*/ /* * Description: Caculate multicast hash value by CRC32 * * Parameters: * In: * pbyMultiAddr - Multicast Address * Out: * none * * Return Value: Hash value * */ unsigned char ETHbyGetHashIndexByCrc32 (unsigned char *pbyMultiAddr) { int ii; unsigned char byTmpHash; unsigned char byHash = 0; // get the least 6-bits from CRC generator byTmpHash = (unsigned char)(CRCdwCrc32(pbyMultiAddr, ETH_ALEN, 0xFFFFFFFFL) & 0x3F); // reverse most bit to least bit for (ii = 0; ii < (sizeof(byTmpHash) * 8); ii++) { byHash <<= 1; if (byTmpHash & 0x01) byHash |= 1; byTmpHash >>= 1; } // adjust 6-bits to the right most return (byHash >> 2); } /* * Description: Check CRC value of the buffer if Ok or not * * Parameters: * In: * pbyBuffer - pointer of buffer (normally is rx buffer) * cbFrameLength - length of buffer, including CRC portion * Out: * none * * Return Value: true if ok; false if error. * */ bool ETHbIsBufferCrc32Ok (unsigned char *pbyBuffer, unsigned int cbFrameLength) { unsigned long dwCRC; dwCRC = CRCdwGetCrc32(pbyBuffer, cbFrameLength - 4); if (cpu_to_le32(*((unsigned long *)(pbyBuffer + cbFrameLength - 4))) != dwCRC) { return false; } return true; }
gpl-2.0
felipito/linux-stable
arch/x86/pci/xen.c
179
15374
/* * Xen PCI - handle PCI (INTx) and MSI infrastructure calls for PV, HVM and * initial domain support. We also handle the DSDT _PRT callbacks for GSI's * used in HVM and initial domain mode (PV does not parse ACPI, so it has no * concept of GSIs). Under PV we hook under the pnbbios API for IRQs and * 0xcf8 PCI configuration read/write. * * Author: Ryan Wilson <hap9@epoch.ncsc.mil> * Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> * Stefano Stabellini <stefano.stabellini@eu.citrix.com> */ #include <linux/module.h> #include <linux/init.h> #include <linux/pci.h> #include <linux/acpi.h> #include <linux/io.h> #include <asm/io_apic.h> #include <asm/pci_x86.h> #include <asm/xen/hypervisor.h> #include <xen/features.h> #include <xen/events.h> #include <asm/xen/pci.h> #include <asm/i8259.h> static int xen_pcifront_enable_irq(struct pci_dev *dev) { int rc; int share = 1; int pirq; u8 gsi; rc = pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &gsi); if (rc < 0) { dev_warn(&dev->dev, "Xen PCI: failed to read interrupt line: %d\n", rc); return rc; } /* In PV DomU the Xen PCI backend puts the PIRQ in the interrupt line.*/ pirq = gsi; if (gsi < nr_legacy_irqs()) share = 0; rc = xen_bind_pirq_gsi_to_irq(gsi, pirq, share, "pcifront"); if (rc < 0) { dev_warn(&dev->dev, "Xen PCI: failed to bind GSI%d (PIRQ%d) to IRQ: %d\n", gsi, pirq, rc); return rc; } dev->irq = rc; dev_info(&dev->dev, "Xen PCI mapped GSI%d to IRQ%d\n", gsi, dev->irq); return 0; } #ifdef CONFIG_ACPI static int xen_register_pirq(u32 gsi, int gsi_override, int triggering, bool set_pirq) { int rc, pirq = -1, irq = -1; struct physdev_map_pirq map_irq; int shareable = 0; char *name; irq = xen_irq_from_gsi(gsi); if (irq > 0) return irq; if (set_pirq) pirq = gsi; map_irq.domid = DOMID_SELF; map_irq.type = MAP_PIRQ_TYPE_GSI; map_irq.index = gsi; map_irq.pirq = pirq; rc = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &map_irq); if (rc) { printk(KERN_WARNING "xen map irq failed %d\n", rc); return -1; } if (triggering == ACPI_EDGE_SENSITIVE) { shareable = 0; name = "ioapic-edge"; } else { shareable = 1; name = "ioapic-level"; } if (gsi_override >= 0) gsi = gsi_override; irq = xen_bind_pirq_gsi_to_irq(gsi, map_irq.pirq, shareable, name); if (irq < 0) goto out; printk(KERN_DEBUG "xen: --> pirq=%d -> irq=%d (gsi=%d)\n", map_irq.pirq, irq, gsi); out: return irq; } static int acpi_register_gsi_xen_hvm(struct device *dev, u32 gsi, int trigger, int polarity) { if (!xen_hvm_domain()) return -1; return xen_register_pirq(gsi, -1 /* no GSI override */, trigger, false /* no mapping of GSI to PIRQ */); } #ifdef CONFIG_XEN_DOM0 static int xen_register_gsi(u32 gsi, int gsi_override, int triggering, int polarity) { int rc, irq; struct physdev_setup_gsi setup_gsi; if (!xen_pv_domain()) return -1; printk(KERN_DEBUG "xen: registering gsi %u triggering %d polarity %d\n", gsi, triggering, polarity); irq = xen_register_pirq(gsi, gsi_override, triggering, true); setup_gsi.gsi = gsi; setup_gsi.triggering = (triggering == ACPI_EDGE_SENSITIVE ? 0 : 1); setup_gsi.polarity = (polarity == ACPI_ACTIVE_HIGH ? 0 : 1); rc = HYPERVISOR_physdev_op(PHYSDEVOP_setup_gsi, &setup_gsi); if (rc == -EEXIST) printk(KERN_INFO "Already setup the GSI :%d\n", gsi); else if (rc) { printk(KERN_ERR "Failed to setup GSI :%d, err_code:%d\n", gsi, rc); } return irq; } static int acpi_register_gsi_xen(struct device *dev, u32 gsi, int trigger, int polarity) { return xen_register_gsi(gsi, -1 /* no GSI override */, trigger, polarity); } #endif #endif #if defined(CONFIG_PCI_MSI) #include <linux/msi.h> #include <asm/msidef.h> struct xen_pci_frontend_ops *xen_pci_frontend; EXPORT_SYMBOL_GPL(xen_pci_frontend); static int xen_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) { int irq, ret, i; struct msi_desc *msidesc; int *v; if (type == PCI_CAP_ID_MSI && nvec > 1) return 1; v = kzalloc(sizeof(int) * max(1, nvec), GFP_KERNEL); if (!v) return -ENOMEM; if (type == PCI_CAP_ID_MSIX) ret = xen_pci_frontend_enable_msix(dev, v, nvec); else ret = xen_pci_frontend_enable_msi(dev, v); if (ret) goto error; i = 0; list_for_each_entry(msidesc, &dev->msi_list, list) { irq = xen_bind_pirq_msi_to_irq(dev, msidesc, v[i], (type == PCI_CAP_ID_MSI) ? nvec : 1, (type == PCI_CAP_ID_MSIX) ? "pcifront-msi-x" : "pcifront-msi", DOMID_SELF); if (irq < 0) { ret = irq; goto free; } i++; } kfree(v); return 0; error: dev_err(&dev->dev, "Xen PCI frontend has not registered MSI/MSI-X support!\n"); free: kfree(v); return ret; } #define XEN_PIRQ_MSI_DATA (MSI_DATA_TRIGGER_EDGE | \ MSI_DATA_LEVEL_ASSERT | (3 << 8) | MSI_DATA_VECTOR(0)) static void xen_msi_compose_msg(struct pci_dev *pdev, unsigned int pirq, struct msi_msg *msg) { /* We set vector == 0 to tell the hypervisor we don't care about it, * but we want a pirq setup instead. * We use the dest_id field to pass the pirq that we want. */ msg->address_hi = MSI_ADDR_BASE_HI | MSI_ADDR_EXT_DEST_ID(pirq); msg->address_lo = MSI_ADDR_BASE_LO | MSI_ADDR_DEST_MODE_PHYSICAL | MSI_ADDR_REDIRECTION_CPU | MSI_ADDR_DEST_ID(pirq); msg->data = XEN_PIRQ_MSI_DATA; } static int xen_hvm_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) { int irq, pirq; struct msi_desc *msidesc; struct msi_msg msg; if (type == PCI_CAP_ID_MSI && nvec > 1) return 1; list_for_each_entry(msidesc, &dev->msi_list, list) { __read_msi_msg(msidesc, &msg); pirq = MSI_ADDR_EXT_DEST_ID(msg.address_hi) | ((msg.address_lo >> MSI_ADDR_DEST_ID_SHIFT) & 0xff); if (msg.data != XEN_PIRQ_MSI_DATA || xen_irq_from_pirq(pirq) < 0) { pirq = xen_allocate_pirq_msi(dev, msidesc); if (pirq < 0) { irq = -ENODEV; goto error; } xen_msi_compose_msg(dev, pirq, &msg); __write_msi_msg(msidesc, &msg); dev_dbg(&dev->dev, "xen: msi bound to pirq=%d\n", pirq); } else { dev_dbg(&dev->dev, "xen: msi already bound to pirq=%d\n", pirq); } irq = xen_bind_pirq_msi_to_irq(dev, msidesc, pirq, (type == PCI_CAP_ID_MSI) ? nvec : 1, (type == PCI_CAP_ID_MSIX) ? "msi-x" : "msi", DOMID_SELF); if (irq < 0) goto error; dev_dbg(&dev->dev, "xen: msi --> pirq=%d --> irq=%d\n", pirq, irq); } return 0; error: dev_err(&dev->dev, "Xen PCI frontend has not registered MSI/MSI-X support!\n"); return irq; } #ifdef CONFIG_XEN_DOM0 static bool __read_mostly pci_seg_supported = true; static int xen_initdom_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) { int ret = 0; struct msi_desc *msidesc; list_for_each_entry(msidesc, &dev->msi_list, list) { struct physdev_map_pirq map_irq; domid_t domid; domid = ret = xen_find_device_domain_owner(dev); /* N.B. Casting int's -ENODEV to uint16_t results in 0xFFED, * hence check ret value for < 0. */ if (ret < 0) domid = DOMID_SELF; memset(&map_irq, 0, sizeof(map_irq)); map_irq.domid = domid; map_irq.type = MAP_PIRQ_TYPE_MSI_SEG; map_irq.index = -1; map_irq.pirq = -1; map_irq.bus = dev->bus->number | (pci_domain_nr(dev->bus) << 16); map_irq.devfn = dev->devfn; if (type == PCI_CAP_ID_MSI && nvec > 1) { map_irq.type = MAP_PIRQ_TYPE_MULTI_MSI; map_irq.entry_nr = nvec; } else if (type == PCI_CAP_ID_MSIX) { int pos; u32 table_offset, bir; pos = dev->msix_cap; pci_read_config_dword(dev, pos + PCI_MSIX_TABLE, &table_offset); bir = (u8)(table_offset & PCI_MSIX_TABLE_BIR); map_irq.table_base = pci_resource_start(dev, bir); map_irq.entry_nr = msidesc->msi_attrib.entry_nr; } ret = -EINVAL; if (pci_seg_supported) ret = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &map_irq); if (type == PCI_CAP_ID_MSI && nvec > 1 && ret) { /* * If MAP_PIRQ_TYPE_MULTI_MSI is not available * there's nothing else we can do in this case. * Just set ret > 0 so driver can retry with * single MSI. */ ret = 1; goto out; } if (ret == -EINVAL && !pci_domain_nr(dev->bus)) { map_irq.type = MAP_PIRQ_TYPE_MSI; map_irq.index = -1; map_irq.pirq = -1; map_irq.bus = dev->bus->number; ret = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &map_irq); if (ret != -EINVAL) pci_seg_supported = false; } if (ret) { dev_warn(&dev->dev, "xen map irq failed %d for %d domain\n", ret, domid); goto out; } ret = xen_bind_pirq_msi_to_irq(dev, msidesc, map_irq.pirq, (type == PCI_CAP_ID_MSI) ? nvec : 1, (type == PCI_CAP_ID_MSIX) ? "msi-x" : "msi", domid); if (ret < 0) goto out; } ret = 0; out: return ret; } static void xen_initdom_restore_msi_irqs(struct pci_dev *dev) { int ret = 0; if (pci_seg_supported) { struct physdev_pci_device restore_ext; restore_ext.seg = pci_domain_nr(dev->bus); restore_ext.bus = dev->bus->number; restore_ext.devfn = dev->devfn; ret = HYPERVISOR_physdev_op(PHYSDEVOP_restore_msi_ext, &restore_ext); if (ret == -ENOSYS) pci_seg_supported = false; WARN(ret && ret != -ENOSYS, "restore_msi_ext -> %d\n", ret); } if (!pci_seg_supported) { struct physdev_restore_msi restore; restore.bus = dev->bus->number; restore.devfn = dev->devfn; ret = HYPERVISOR_physdev_op(PHYSDEVOP_restore_msi, &restore); WARN(ret && ret != -ENOSYS, "restore_msi -> %d\n", ret); } } #endif static void xen_teardown_msi_irqs(struct pci_dev *dev) { struct msi_desc *msidesc; msidesc = list_entry(dev->msi_list.next, struct msi_desc, list); if (msidesc->msi_attrib.is_msix) xen_pci_frontend_disable_msix(dev); else xen_pci_frontend_disable_msi(dev); /* Free the IRQ's and the msidesc using the generic code. */ default_teardown_msi_irqs(dev); } static void xen_teardown_msi_irq(unsigned int irq) { xen_destroy_irq(irq); } static u32 xen_nop_msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag) { return 0; } static u32 xen_nop_msix_mask_irq(struct msi_desc *desc, u32 flag) { return 0; } #endif int __init pci_xen_init(void) { if (!xen_pv_domain() || xen_initial_domain()) return -ENODEV; printk(KERN_INFO "PCI: setting up Xen PCI frontend stub\n"); pcibios_set_cache_line_size(); pcibios_enable_irq = xen_pcifront_enable_irq; pcibios_disable_irq = NULL; #ifdef CONFIG_ACPI /* Keep ACPI out of the picture */ acpi_noirq = 1; #endif #ifdef CONFIG_PCI_MSI x86_msi.setup_msi_irqs = xen_setup_msi_irqs; x86_msi.teardown_msi_irq = xen_teardown_msi_irq; x86_msi.teardown_msi_irqs = xen_teardown_msi_irqs; x86_msi.msi_mask_irq = xen_nop_msi_mask_irq; x86_msi.msix_mask_irq = xen_nop_msix_mask_irq; #endif return 0; } int __init pci_xen_hvm_init(void) { if (!xen_have_vector_callback || !xen_feature(XENFEAT_hvm_pirqs)) return 0; #ifdef CONFIG_ACPI /* * We don't want to change the actual ACPI delivery model, * just how GSIs get registered. */ __acpi_register_gsi = acpi_register_gsi_xen_hvm; #endif #ifdef CONFIG_PCI_MSI x86_msi.setup_msi_irqs = xen_hvm_setup_msi_irqs; x86_msi.teardown_msi_irq = xen_teardown_msi_irq; #endif return 0; } #ifdef CONFIG_XEN_DOM0 static __init void xen_setup_acpi_sci(void) { int rc; int trigger, polarity; int gsi = acpi_sci_override_gsi; int irq = -1; int gsi_override = -1; if (!gsi) return; rc = acpi_get_override_irq(gsi, &trigger, &polarity); if (rc) { printk(KERN_WARNING "xen: acpi_get_override_irq failed for acpi" " sci, rc=%d\n", rc); return; } trigger = trigger ? ACPI_LEVEL_SENSITIVE : ACPI_EDGE_SENSITIVE; polarity = polarity ? ACPI_ACTIVE_LOW : ACPI_ACTIVE_HIGH; printk(KERN_INFO "xen: sci override: global_irq=%d trigger=%d " "polarity=%d\n", gsi, trigger, polarity); /* Before we bind the GSI to a Linux IRQ, check whether * we need to override it with bus_irq (IRQ) value. Usually for * IRQs below IRQ_LEGACY_IRQ this holds IRQ == GSI, as so: * ACPI: INT_SRC_OVR (bus 0 bus_irq 9 global_irq 9 low level) * but there are oddballs where the IRQ != GSI: * ACPI: INT_SRC_OVR (bus 0 bus_irq 9 global_irq 20 low level) * which ends up being: gsi_to_irq[9] == 20 * (which is what acpi_gsi_to_irq ends up calling when starting the * the ACPI interpreter and keels over since IRQ 9 has not been * setup as we had setup IRQ 20 for it). */ if (acpi_gsi_to_irq(gsi, &irq) == 0) { /* Use the provided value if it's valid. */ if (irq >= 0) gsi_override = irq; } gsi = xen_register_gsi(gsi, gsi_override, trigger, polarity); printk(KERN_INFO "xen: acpi sci %d\n", gsi); return; } int __init pci_xen_initial_domain(void) { int irq; #ifdef CONFIG_PCI_MSI x86_msi.setup_msi_irqs = xen_initdom_setup_msi_irqs; x86_msi.teardown_msi_irq = xen_teardown_msi_irq; x86_msi.restore_msi_irqs = xen_initdom_restore_msi_irqs; x86_msi.msi_mask_irq = xen_nop_msi_mask_irq; x86_msi.msix_mask_irq = xen_nop_msix_mask_irq; #endif xen_setup_acpi_sci(); __acpi_register_gsi = acpi_register_gsi_xen; /* Pre-allocate legacy irqs */ for (irq = 0; irq < nr_legacy_irqs(); irq++) { int trigger, polarity; if (acpi_get_override_irq(irq, &trigger, &polarity) == -1) continue; xen_register_pirq(irq, -1 /* no GSI override */, trigger ? ACPI_LEVEL_SENSITIVE : ACPI_EDGE_SENSITIVE, true /* Map GSI to PIRQ */); } if (0 == nr_ioapics) { for (irq = 0; irq < nr_legacy_irqs(); irq++) xen_bind_pirq_gsi_to_irq(irq, irq, 0, "xt-pic"); } return 0; } struct xen_device_domain_owner { domid_t domain; struct pci_dev *dev; struct list_head list; }; static DEFINE_SPINLOCK(dev_domain_list_spinlock); static struct list_head dev_domain_list = LIST_HEAD_INIT(dev_domain_list); static struct xen_device_domain_owner *find_device(struct pci_dev *dev) { struct xen_device_domain_owner *owner; list_for_each_entry(owner, &dev_domain_list, list) { if (owner->dev == dev) return owner; } return NULL; } int xen_find_device_domain_owner(struct pci_dev *dev) { struct xen_device_domain_owner *owner; int domain = -ENODEV; spin_lock(&dev_domain_list_spinlock); owner = find_device(dev); if (owner) domain = owner->domain; spin_unlock(&dev_domain_list_spinlock); return domain; } EXPORT_SYMBOL_GPL(xen_find_device_domain_owner); int xen_register_device_domain_owner(struct pci_dev *dev, uint16_t domain) { struct xen_device_domain_owner *owner; owner = kzalloc(sizeof(struct xen_device_domain_owner), GFP_KERNEL); if (!owner) return -ENODEV; spin_lock(&dev_domain_list_spinlock); if (find_device(dev)) { spin_unlock(&dev_domain_list_spinlock); kfree(owner); return -EEXIST; } owner->domain = domain; owner->dev = dev; list_add_tail(&owner->list, &dev_domain_list); spin_unlock(&dev_domain_list_spinlock); return 0; } EXPORT_SYMBOL_GPL(xen_register_device_domain_owner); int xen_unregister_device_domain_owner(struct pci_dev *dev) { struct xen_device_domain_owner *owner; spin_lock(&dev_domain_list_spinlock); owner = find_device(dev); if (!owner) { spin_unlock(&dev_domain_list_spinlock); return -ENODEV; } list_del(&owner->list); spin_unlock(&dev_domain_list_spinlock); kfree(owner); return 0; } EXPORT_SYMBOL_GPL(xen_unregister_device_domain_owner); #endif
gpl-2.0
rcrobles/linux-stable-4.3
drivers/xen/privcmd.c
179
15205
/****************************************************************************** * privcmd.c * * Interface to privileged domain-0 commands. * * Copyright (c) 2002-2004, K A Fraser, B Dragovic */ #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/module.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/mm.h> #include <linux/mman.h> #include <linux/uaccess.h> #include <linux/swap.h> #include <linux/highmem.h> #include <linux/pagemap.h> #include <linux/seq_file.h> #include <linux/miscdevice.h> #include <asm/pgalloc.h> #include <asm/pgtable.h> #include <asm/tlb.h> #include <asm/xen/hypervisor.h> #include <asm/xen/hypercall.h> #include <xen/xen.h> #include <xen/privcmd.h> #include <xen/interface/xen.h> #include <xen/features.h> #include <xen/page.h> #include <xen/xen-ops.h> #include <xen/balloon.h> #include "privcmd.h" MODULE_LICENSE("GPL"); #define PRIV_VMA_LOCKED ((void *)1) static int privcmd_vma_range_is_mapped( struct vm_area_struct *vma, unsigned long addr, unsigned long nr_pages); static long privcmd_ioctl_hypercall(void __user *udata) { struct privcmd_hypercall hypercall; long ret; if (copy_from_user(&hypercall, udata, sizeof(hypercall))) return -EFAULT; xen_preemptible_hcall_begin(); ret = privcmd_call(hypercall.op, hypercall.arg[0], hypercall.arg[1], hypercall.arg[2], hypercall.arg[3], hypercall.arg[4]); xen_preemptible_hcall_end(); return ret; } static void free_page_list(struct list_head *pages) { struct page *p, *n; list_for_each_entry_safe(p, n, pages, lru) __free_page(p); INIT_LIST_HEAD(pages); } /* * Given an array of items in userspace, return a list of pages * containing the data. If copying fails, either because of memory * allocation failure or a problem reading user memory, return an * error code; its up to the caller to dispose of any partial list. */ static int gather_array(struct list_head *pagelist, unsigned nelem, size_t size, const void __user *data) { unsigned pageidx; void *pagedata; int ret; if (size > PAGE_SIZE) return 0; pageidx = PAGE_SIZE; pagedata = NULL; /* quiet, gcc */ while (nelem--) { if (pageidx > PAGE_SIZE-size) { struct page *page = alloc_page(GFP_KERNEL); ret = -ENOMEM; if (page == NULL) goto fail; pagedata = page_address(page); list_add_tail(&page->lru, pagelist); pageidx = 0; } ret = -EFAULT; if (copy_from_user(pagedata + pageidx, data, size)) goto fail; data += size; pageidx += size; } ret = 0; fail: return ret; } /* * Call function "fn" on each element of the array fragmented * over a list of pages. */ static int traverse_pages(unsigned nelem, size_t size, struct list_head *pos, int (*fn)(void *data, void *state), void *state) { void *pagedata; unsigned pageidx; int ret = 0; BUG_ON(size > PAGE_SIZE); pageidx = PAGE_SIZE; pagedata = NULL; /* hush, gcc */ while (nelem--) { if (pageidx > PAGE_SIZE-size) { struct page *page; pos = pos->next; page = list_entry(pos, struct page, lru); pagedata = page_address(page); pageidx = 0; } ret = (*fn)(pagedata + pageidx, state); if (ret) break; pageidx += size; } return ret; } /* * Similar to traverse_pages, but use each page as a "block" of * data to be processed as one unit. */ static int traverse_pages_block(unsigned nelem, size_t size, struct list_head *pos, int (*fn)(void *data, int nr, void *state), void *state) { void *pagedata; unsigned pageidx; int ret = 0; BUG_ON(size > PAGE_SIZE); pageidx = PAGE_SIZE; while (nelem) { int nr = (PAGE_SIZE/size); struct page *page; if (nr > nelem) nr = nelem; pos = pos->next; page = list_entry(pos, struct page, lru); pagedata = page_address(page); ret = (*fn)(pagedata, nr, state); if (ret) break; nelem -= nr; } return ret; } struct mmap_gfn_state { unsigned long va; struct vm_area_struct *vma; domid_t domain; }; static int mmap_gfn_range(void *data, void *state) { struct privcmd_mmap_entry *msg = data; struct mmap_gfn_state *st = state; struct vm_area_struct *vma = st->vma; int rc; /* Do not allow range to wrap the address space. */ if ((msg->npages > (LONG_MAX >> PAGE_SHIFT)) || ((unsigned long)(msg->npages << PAGE_SHIFT) >= -st->va)) return -EINVAL; /* Range chunks must be contiguous in va space. */ if ((msg->va != st->va) || ((msg->va+(msg->npages<<PAGE_SHIFT)) > vma->vm_end)) return -EINVAL; rc = xen_remap_domain_gfn_range(vma, msg->va & PAGE_MASK, msg->mfn, msg->npages, vma->vm_page_prot, st->domain, NULL); if (rc < 0) return rc; st->va += msg->npages << PAGE_SHIFT; return 0; } static long privcmd_ioctl_mmap(void __user *udata) { struct privcmd_mmap mmapcmd; struct mm_struct *mm = current->mm; struct vm_area_struct *vma; int rc; LIST_HEAD(pagelist); struct mmap_gfn_state state; /* We only support privcmd_ioctl_mmap_batch for auto translated. */ if (xen_feature(XENFEAT_auto_translated_physmap)) return -ENOSYS; if (copy_from_user(&mmapcmd, udata, sizeof(mmapcmd))) return -EFAULT; rc = gather_array(&pagelist, mmapcmd.num, sizeof(struct privcmd_mmap_entry), mmapcmd.entry); if (rc || list_empty(&pagelist)) goto out; down_write(&mm->mmap_sem); { struct page *page = list_first_entry(&pagelist, struct page, lru); struct privcmd_mmap_entry *msg = page_address(page); vma = find_vma(mm, msg->va); rc = -EINVAL; if (!vma || (msg->va != vma->vm_start) || vma->vm_private_data) goto out_up; vma->vm_private_data = PRIV_VMA_LOCKED; } state.va = vma->vm_start; state.vma = vma; state.domain = mmapcmd.dom; rc = traverse_pages(mmapcmd.num, sizeof(struct privcmd_mmap_entry), &pagelist, mmap_gfn_range, &state); out_up: up_write(&mm->mmap_sem); out: free_page_list(&pagelist); return rc; } struct mmap_batch_state { domid_t domain; unsigned long va; struct vm_area_struct *vma; int index; /* A tristate: * 0 for no errors * 1 if at least one error has happened (and no * -ENOENT errors have happened) * -ENOENT if at least 1 -ENOENT has happened. */ int global_error; int version; /* User-space gfn array to store errors in the second pass for V1. */ xen_pfn_t __user *user_gfn; /* User-space int array to store errors in the second pass for V2. */ int __user *user_err; }; /* auto translated dom0 note: if domU being created is PV, then gfn is * mfn(addr on bus). If it's auto xlated, then gfn is pfn (input to HAP). */ static int mmap_batch_fn(void *data, int nr, void *state) { xen_pfn_t *gfnp = data; struct mmap_batch_state *st = state; struct vm_area_struct *vma = st->vma; struct page **pages = vma->vm_private_data; struct page **cur_pages = NULL; int ret; if (xen_feature(XENFEAT_auto_translated_physmap)) cur_pages = &pages[st->index]; BUG_ON(nr < 0); ret = xen_remap_domain_gfn_array(st->vma, st->va & PAGE_MASK, gfnp, nr, (int *)gfnp, st->vma->vm_page_prot, st->domain, cur_pages); /* Adjust the global_error? */ if (ret != nr) { if (ret == -ENOENT) st->global_error = -ENOENT; else { /* Record that at least one error has happened. */ if (st->global_error == 0) st->global_error = 1; } } st->va += PAGE_SIZE * nr; st->index += nr; return 0; } static int mmap_return_error(int err, struct mmap_batch_state *st) { int ret; if (st->version == 1) { if (err) { xen_pfn_t gfn; ret = get_user(gfn, st->user_gfn); if (ret < 0) return ret; /* * V1 encodes the error codes in the 32bit top * nibble of the gfn (with its known * limitations vis-a-vis 64 bit callers). */ gfn |= (err == -ENOENT) ? PRIVCMD_MMAPBATCH_PAGED_ERROR : PRIVCMD_MMAPBATCH_MFN_ERROR; return __put_user(gfn, st->user_gfn++); } else st->user_gfn++; } else { /* st->version == 2 */ if (err) return __put_user(err, st->user_err++); else st->user_err++; } return 0; } static int mmap_return_errors(void *data, int nr, void *state) { struct mmap_batch_state *st = state; int *errs = data; int i; int ret; for (i = 0; i < nr; i++) { ret = mmap_return_error(errs[i], st); if (ret < 0) return ret; } return 0; } /* Allocate pfns that are then mapped with gfns from foreign domid. Update * the vma with the page info to use later. * Returns: 0 if success, otherwise -errno */ static int alloc_empty_pages(struct vm_area_struct *vma, int numpgs) { int rc; struct page **pages; pages = kcalloc(numpgs, sizeof(pages[0]), GFP_KERNEL); if (pages == NULL) return -ENOMEM; rc = alloc_xenballooned_pages(numpgs, pages, 0); if (rc != 0) { pr_warn("%s Could not alloc %d pfns rc:%d\n", __func__, numpgs, rc); kfree(pages); return -ENOMEM; } BUG_ON(vma->vm_private_data != NULL); vma->vm_private_data = pages; return 0; } static const struct vm_operations_struct privcmd_vm_ops; static long privcmd_ioctl_mmap_batch(void __user *udata, int version) { int ret; struct privcmd_mmapbatch_v2 m; struct mm_struct *mm = current->mm; struct vm_area_struct *vma; unsigned long nr_pages; LIST_HEAD(pagelist); struct mmap_batch_state state; switch (version) { case 1: if (copy_from_user(&m, udata, sizeof(struct privcmd_mmapbatch))) return -EFAULT; /* Returns per-frame error in m.arr. */ m.err = NULL; if (!access_ok(VERIFY_WRITE, m.arr, m.num * sizeof(*m.arr))) return -EFAULT; break; case 2: if (copy_from_user(&m, udata, sizeof(struct privcmd_mmapbatch_v2))) return -EFAULT; /* Returns per-frame error code in m.err. */ if (!access_ok(VERIFY_WRITE, m.err, m.num * (sizeof(*m.err)))) return -EFAULT; break; default: return -EINVAL; } nr_pages = m.num; if ((m.num <= 0) || (nr_pages > (LONG_MAX >> PAGE_SHIFT))) return -EINVAL; ret = gather_array(&pagelist, m.num, sizeof(xen_pfn_t), m.arr); if (ret) goto out; if (list_empty(&pagelist)) { ret = -EINVAL; goto out; } if (version == 2) { /* Zero error array now to only copy back actual errors. */ if (clear_user(m.err, sizeof(int) * m.num)) { ret = -EFAULT; goto out; } } down_write(&mm->mmap_sem); vma = find_vma(mm, m.addr); if (!vma || vma->vm_ops != &privcmd_vm_ops) { ret = -EINVAL; goto out_unlock; } /* * Caller must either: * * Map the whole VMA range, which will also allocate all the * pages required for the auto_translated_physmap case. * * Or * * Map unmapped holes left from a previous map attempt (e.g., * because those foreign frames were previously paged out). */ if (vma->vm_private_data == NULL) { if (m.addr != vma->vm_start || m.addr + (nr_pages << PAGE_SHIFT) != vma->vm_end) { ret = -EINVAL; goto out_unlock; } if (xen_feature(XENFEAT_auto_translated_physmap)) { ret = alloc_empty_pages(vma, m.num); if (ret < 0) goto out_unlock; } else vma->vm_private_data = PRIV_VMA_LOCKED; } else { if (m.addr < vma->vm_start || m.addr + (nr_pages << PAGE_SHIFT) > vma->vm_end) { ret = -EINVAL; goto out_unlock; } if (privcmd_vma_range_is_mapped(vma, m.addr, nr_pages)) { ret = -EINVAL; goto out_unlock; } } state.domain = m.dom; state.vma = vma; state.va = m.addr; state.index = 0; state.global_error = 0; state.version = version; /* mmap_batch_fn guarantees ret == 0 */ BUG_ON(traverse_pages_block(m.num, sizeof(xen_pfn_t), &pagelist, mmap_batch_fn, &state)); up_write(&mm->mmap_sem); if (state.global_error) { /* Write back errors in second pass. */ state.user_gfn = (xen_pfn_t *)m.arr; state.user_err = m.err; ret = traverse_pages_block(m.num, sizeof(xen_pfn_t), &pagelist, mmap_return_errors, &state); } else ret = 0; /* If we have not had any EFAULT-like global errors then set the global * error to -ENOENT if necessary. */ if ((ret == 0) && (state.global_error == -ENOENT)) ret = -ENOENT; out: free_page_list(&pagelist); return ret; out_unlock: up_write(&mm->mmap_sem); goto out; } static long privcmd_ioctl(struct file *file, unsigned int cmd, unsigned long data) { int ret = -ENOSYS; void __user *udata = (void __user *) data; switch (cmd) { case IOCTL_PRIVCMD_HYPERCALL: ret = privcmd_ioctl_hypercall(udata); break; case IOCTL_PRIVCMD_MMAP: ret = privcmd_ioctl_mmap(udata); break; case IOCTL_PRIVCMD_MMAPBATCH: ret = privcmd_ioctl_mmap_batch(udata, 1); break; case IOCTL_PRIVCMD_MMAPBATCH_V2: ret = privcmd_ioctl_mmap_batch(udata, 2); break; default: ret = -EINVAL; break; } return ret; } static void privcmd_close(struct vm_area_struct *vma) { struct page **pages = vma->vm_private_data; int numpgs = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; int rc; if (!xen_feature(XENFEAT_auto_translated_physmap) || !numpgs || !pages) return; rc = xen_unmap_domain_gfn_range(vma, numpgs, pages); if (rc == 0) free_xenballooned_pages(numpgs, pages); else pr_crit("unable to unmap MFN range: leaking %d pages. rc=%d\n", numpgs, rc); kfree(pages); } static int privcmd_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { printk(KERN_DEBUG "privcmd_fault: vma=%p %lx-%lx, pgoff=%lx, uv=%p\n", vma, vma->vm_start, vma->vm_end, vmf->pgoff, vmf->virtual_address); return VM_FAULT_SIGBUS; } static const struct vm_operations_struct privcmd_vm_ops = { .close = privcmd_close, .fault = privcmd_fault }; static int privcmd_mmap(struct file *file, struct vm_area_struct *vma) { /* DONTCOPY is essential for Xen because copy_page_range doesn't know * how to recreate these mappings */ vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP; vma->vm_ops = &privcmd_vm_ops; vma->vm_private_data = NULL; return 0; } /* * For MMAPBATCH*. This allows asserting the singleshot mapping * on a per pfn/pte basis. Mapping calls that fail with ENOENT * can be then retried until success. */ static int is_mapped_fn(pte_t *pte, struct page *pmd_page, unsigned long addr, void *data) { return pte_none(*pte) ? 0 : -EBUSY; } static int privcmd_vma_range_is_mapped( struct vm_area_struct *vma, unsigned long addr, unsigned long nr_pages) { return apply_to_page_range(vma->vm_mm, addr, nr_pages << PAGE_SHIFT, is_mapped_fn, NULL) != 0; } const struct file_operations xen_privcmd_fops = { .owner = THIS_MODULE, .unlocked_ioctl = privcmd_ioctl, .mmap = privcmd_mmap, }; EXPORT_SYMBOL_GPL(xen_privcmd_fops); static struct miscdevice privcmd_dev = { .minor = MISC_DYNAMIC_MINOR, .name = "xen/privcmd", .fops = &xen_privcmd_fops, }; static int __init privcmd_init(void) { int err; if (!xen_domain()) return -ENODEV; err = misc_register(&privcmd_dev); if (err != 0) { pr_err("Could not register Xen privcmd device\n"); return err; } return 0; } static void __exit privcmd_exit(void) { misc_deregister(&privcmd_dev); } module_init(privcmd_init); module_exit(privcmd_exit);
gpl-2.0
guh/linux-imx6-3.14-tune
drivers/pci/hotplug/acpiphp_ibm.c
435
14390
/* * ACPI PCI Hot Plug IBM Extension * * Copyright (C) 2004 Vernon Mauery <vernux@us.ibm.com> * Copyright (C) 2004 IBM Corp. * * All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or (at * your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * Send feedback to <vernux@us.ibm.com> * */ #define pr_fmt(fmt) "acpiphp_ibm: " fmt #include <linux/init.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/sysfs.h> #include <linux/kobject.h> #include <linux/moduleparam.h> #include <linux/pci.h> #include <asm/uaccess.h> #include "acpiphp.h" #include "../pci.h" #define DRIVER_VERSION "1.0.1" #define DRIVER_AUTHOR "Irene Zubarev <zubarev@us.ibm.com>, Vernon Mauery <vernux@us.ibm.com>" #define DRIVER_DESC "ACPI Hot Plug PCI Controller Driver IBM extension" MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL"); MODULE_VERSION(DRIVER_VERSION); #define FOUND_APCI 0x61504349 /* these are the names for the IBM ACPI pseudo-device */ #define IBM_HARDWARE_ID1 "IBM37D0" #define IBM_HARDWARE_ID2 "IBM37D4" #define hpslot_to_sun(A) (((struct slot *)((A)->private))->sun) /* union apci_descriptor - allows access to the * various device descriptors that are embedded in the * aPCI table */ union apci_descriptor { struct { char sig[4]; u8 len; } header; struct { u8 type; u8 len; u16 slot_id; u8 bus_id; u8 dev_num; u8 slot_num; u8 slot_attr[2]; u8 attn; u8 status[2]; u8 sun; u8 res[3]; } slot; struct { u8 type; u8 len; } generic; }; /* struct notification - keeps info about the device * that cause the ACPI notification event */ struct notification { struct acpi_device *device; u8 event; }; static int ibm_set_attention_status(struct hotplug_slot *slot, u8 status); static int ibm_get_attention_status(struct hotplug_slot *slot, u8 *status); static void ibm_handle_events(acpi_handle handle, u32 event, void *context); static int ibm_get_table_from_acpi(char **bufp); static ssize_t ibm_read_apci_table(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buffer, loff_t pos, size_t size); static acpi_status __init ibm_find_acpi_device(acpi_handle handle, u32 lvl, void *context, void **rv); static int __init ibm_acpiphp_init(void); static void __exit ibm_acpiphp_exit(void); static acpi_handle ibm_acpi_handle; static struct notification ibm_note; static struct bin_attribute ibm_apci_table_attr = { .attr = { .name = "apci_table", .mode = S_IRUGO, }, .read = ibm_read_apci_table, .write = NULL, }; static struct acpiphp_attention_info ibm_attention_info = { .set_attn = ibm_set_attention_status, .get_attn = ibm_get_attention_status, .owner = THIS_MODULE, }; /** * ibm_slot_from_id - workaround for bad ibm hardware * @id: the slot number that linux refers to the slot by * * Description: This method returns the aCPI slot descriptor * corresponding to the Linux slot number. This descriptor * has info about the aPCI slot id and attention status. * This descriptor must be freed using kfree when done. */ static union apci_descriptor *ibm_slot_from_id(int id) { int ind = 0, size; union apci_descriptor *ret = NULL, *des; char *table; size = ibm_get_table_from_acpi(&table); des = (union apci_descriptor *)table; if (memcmp(des->header.sig, "aPCI", 4) != 0) goto ibm_slot_done; des = (union apci_descriptor *)&table[ind += des->header.len]; while (ind < size && (des->generic.type != 0x82 || des->slot.slot_num != id)) { des = (union apci_descriptor *)&table[ind += des->generic.len]; } if (ind < size && des->slot.slot_num == id) ret = des; ibm_slot_done: if (ret) { ret = kmalloc(sizeof(union apci_descriptor), GFP_KERNEL); memcpy(ret, des, sizeof(union apci_descriptor)); } kfree(table); return ret; } /** * ibm_set_attention_status - callback method to set the attention LED * @slot: the hotplug_slot to work with * @status: what to set the LED to (0 or 1) * * Description: This method is registered with the acpiphp module as a * callback to do the device specific task of setting the LED status. */ static int ibm_set_attention_status(struct hotplug_slot *slot, u8 status) { union acpi_object args[2]; struct acpi_object_list params = { .pointer = args, .count = 2 }; acpi_status stat; unsigned long long rc; union apci_descriptor *ibm_slot; ibm_slot = ibm_slot_from_id(hpslot_to_sun(slot)); pr_debug("%s: set slot %d (%d) attention status to %d\n", __func__, ibm_slot->slot.slot_num, ibm_slot->slot.slot_id, (status ? 1 : 0)); args[0].type = ACPI_TYPE_INTEGER; args[0].integer.value = ibm_slot->slot.slot_id; args[1].type = ACPI_TYPE_INTEGER; args[1].integer.value = (status) ? 1 : 0; kfree(ibm_slot); stat = acpi_evaluate_integer(ibm_acpi_handle, "APLS", &params, &rc); if (ACPI_FAILURE(stat)) { pr_err("APLS evaluation failed: 0x%08x\n", stat); return -ENODEV; } else if (!rc) { pr_err("APLS method failed: 0x%08llx\n", rc); return -ERANGE; } return 0; } /** * ibm_get_attention_status - callback method to get attention LED status * @slot: the hotplug_slot to work with * @status: returns what the LED is set to (0 or 1) * * Description: This method is registered with the acpiphp module as a * callback to do the device specific task of getting the LED status. * * Because there is no direct method of getting the LED status directly * from an ACPI call, we read the aPCI table and parse out our * slot descriptor to read the status from that. */ static int ibm_get_attention_status(struct hotplug_slot *slot, u8 *status) { union apci_descriptor *ibm_slot; ibm_slot = ibm_slot_from_id(hpslot_to_sun(slot)); if (ibm_slot->slot.attn & 0xa0 || ibm_slot->slot.status[1] & 0x08) *status = 1; else *status = 0; pr_debug("%s: get slot %d (%d) attention status is %d\n", __func__, ibm_slot->slot.slot_num, ibm_slot->slot.slot_id, *status); kfree(ibm_slot); return 0; } /** * ibm_handle_events - listens for ACPI events for the IBM37D0 device * @handle: an ACPI handle to the device that caused the event * @event: the event info (device specific) * @context: passed context (our notification struct) * * Description: This method is registered as a callback with the ACPI * subsystem it is called when this device has an event to notify the OS of. * * The events actually come from the device as two events that get * synthesized into one event with data by this function. The event * ID comes first and then the slot number that caused it. We report * this as one event to the OS. * * From section 5.6.2.2 of the ACPI 2.0 spec, I understand that the OSPM will * only re-enable the interrupt that causes this event AFTER this method * has returned, thereby enforcing serial access for the notification struct. */ static void ibm_handle_events(acpi_handle handle, u32 event, void *context) { u8 detail = event & 0x0f; u8 subevent = event & 0xf0; struct notification *note = context; pr_debug("%s: Received notification %02x\n", __func__, event); if (subevent == 0x80) { pr_debug("%s: generating bus event\n", __func__); acpi_bus_generate_netlink_event(note->device->pnp.device_class, dev_name(&note->device->dev), note->event, detail); } else note->event = event; } /** * ibm_get_table_from_acpi - reads the APLS buffer from ACPI * @bufp: address to pointer to allocate for the table * * Description: This method reads the APLS buffer in from ACPI and * stores the "stripped" table into a single buffer * it allocates and passes the address back in bufp. * * If NULL is passed in as buffer, this method only calculates * the size of the table and returns that without filling * in the buffer. * * Returns < 0 on error or the size of the table on success. */ static int ibm_get_table_from_acpi(char **bufp) { union acpi_object *package; struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; acpi_status status; char *lbuf = NULL; int i, size = -EIO; status = acpi_evaluate_object(ibm_acpi_handle, "APCI", NULL, &buffer); if (ACPI_FAILURE(status)) { pr_err("%s: APCI evaluation failed\n", __func__); return -ENODEV; } package = (union acpi_object *) buffer.pointer; if (!(package) || (package->type != ACPI_TYPE_PACKAGE) || !(package->package.elements)) { pr_err("%s: Invalid APCI object\n", __func__); goto read_table_done; } for(size = 0, i = 0; i < package->package.count; i++) { if (package->package.elements[i].type != ACPI_TYPE_BUFFER) { pr_err("%s: Invalid APCI element %d\n", __func__, i); goto read_table_done; } size += package->package.elements[i].buffer.length; } if (bufp == NULL) goto read_table_done; lbuf = kzalloc(size, GFP_KERNEL); pr_debug("%s: element count: %i, ASL table size: %i, &table = 0x%p\n", __func__, package->package.count, size, lbuf); if (lbuf) { *bufp = lbuf; } else { size = -ENOMEM; goto read_table_done; } size = 0; for (i=0; i<package->package.count; i++) { memcpy(&lbuf[size], package->package.elements[i].buffer.pointer, package->package.elements[i].buffer.length); size += package->package.elements[i].buffer.length; } read_table_done: kfree(buffer.pointer); return size; } /** * ibm_read_apci_table - callback for the sysfs apci_table file * @filp: the open sysfs file * @kobj: the kobject this binary attribute is a part of * @bin_attr: struct bin_attribute for this file * @buffer: the kernel space buffer to fill * @pos: the offset into the file * @size: the number of bytes requested * * Description: Gets registered with sysfs as the reader callback * to be executed when /sys/bus/pci/slots/apci_table gets read. * * Since we don't get notified on open and close for this file, * things get really tricky here... * our solution is to only allow reading the table in all at once. */ static ssize_t ibm_read_apci_table(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buffer, loff_t pos, size_t size) { int bytes_read = -EINVAL; char *table = NULL; pr_debug("%s: pos = %d, size = %zd\n", __func__, (int)pos, size); if (pos == 0) { bytes_read = ibm_get_table_from_acpi(&table); if (bytes_read > 0 && bytes_read <= size) memcpy(buffer, table, bytes_read); kfree(table); } return bytes_read; } /** * ibm_find_acpi_device - callback to find our ACPI device * @handle: the ACPI handle of the device we are inspecting * @lvl: depth into the namespace tree * @context: a pointer to our handle to fill when we find the device * @rv: a return value to fill if desired * * Description: Used as a callback when calling acpi_walk_namespace * to find our device. When this method returns non-zero * acpi_walk_namespace quits its search and returns our value. */ static acpi_status __init ibm_find_acpi_device(acpi_handle handle, u32 lvl, void *context, void **rv) { acpi_handle *phandle = (acpi_handle *)context; acpi_status status; struct acpi_device_info *info; int retval = 0; status = acpi_get_object_info(handle, &info); if (ACPI_FAILURE(status)) { pr_err("%s: Failed to get device information status=0x%x\n", __func__, status); return retval; } if (info->current_status && (info->valid & ACPI_VALID_HID) && (!strcmp(info->hardware_id.string, IBM_HARDWARE_ID1) || !strcmp(info->hardware_id.string, IBM_HARDWARE_ID2))) { pr_debug("found hardware: %s, handle: %p\n", info->hardware_id.string, handle); *phandle = handle; /* returning non-zero causes the search to stop * and returns this value to the caller of * acpi_walk_namespace, but it also causes some warnings * in the acpi debug code to print... */ retval = FOUND_APCI; } kfree(info); return retval; } static int __init ibm_acpiphp_init(void) { int retval = 0; acpi_status status; struct acpi_device *device; struct kobject *sysdir = &pci_slots_kset->kobj; pr_debug("%s\n", __func__); if (acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, ACPI_UINT32_MAX, ibm_find_acpi_device, NULL, &ibm_acpi_handle, NULL) != FOUND_APCI) { pr_err("%s: acpi_walk_namespace failed\n", __func__); retval = -ENODEV; goto init_return; } pr_debug("%s: found IBM aPCI device\n", __func__); if (acpi_bus_get_device(ibm_acpi_handle, &device)) { pr_err("%s: acpi_bus_get_device failed\n", __func__); retval = -ENODEV; goto init_return; } if (acpiphp_register_attention(&ibm_attention_info)) { retval = -ENODEV; goto init_return; } ibm_note.device = device; status = acpi_install_notify_handler(ibm_acpi_handle, ACPI_DEVICE_NOTIFY, ibm_handle_events, &ibm_note); if (ACPI_FAILURE(status)) { pr_err("%s: Failed to register notification handler\n", __func__); retval = -EBUSY; goto init_cleanup; } ibm_apci_table_attr.size = ibm_get_table_from_acpi(NULL); retval = sysfs_create_bin_file(sysdir, &ibm_apci_table_attr); return retval; init_cleanup: acpiphp_unregister_attention(&ibm_attention_info); init_return: return retval; } static void __exit ibm_acpiphp_exit(void) { acpi_status status; struct kobject *sysdir = &pci_slots_kset->kobj; pr_debug("%s\n", __func__); if (acpiphp_unregister_attention(&ibm_attention_info)) pr_err("%s: attention info deregistration failed", __func__); status = acpi_remove_notify_handler( ibm_acpi_handle, ACPI_DEVICE_NOTIFY, ibm_handle_events); if (ACPI_FAILURE(status)) pr_err("%s: Notification handler removal failed\n", __func__); /* remove the /sys entries */ sysfs_remove_bin_file(sysdir, &ibm_apci_table_attr); } module_init(ibm_acpiphp_init); module_exit(ibm_acpiphp_exit);
gpl-2.0
lsigithub/axxia_yocto_linux_4.1
drivers/tty/hvc/hvc_tile.c
435
4714
/* * Copyright 2010 Tilera Corporation. All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation, version 2. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for * more details. * * Tilera TILE Processor hypervisor console */ #include <linux/console.h> #include <linux/delay.h> #include <linux/err.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/moduleparam.h> #include <linux/platform_device.h> #include <linux/types.h> #include <asm/setup.h> #include <arch/sim_def.h> #include <hv/hypervisor.h> #include "hvc_console.h" static int use_sim_console; static int __init sim_console(char *str) { use_sim_console = 1; return 0; } early_param("sim_console", sim_console); int tile_console_write(const char *buf, int count) { if (unlikely(use_sim_console)) { int i; for (i = 0; i < count; ++i) __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_PUTC | (buf[i] << _SIM_CONTROL_OPERATOR_BITS)); __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_PUTC | (SIM_PUTC_FLUSH_BINARY << _SIM_CONTROL_OPERATOR_BITS)); return 0; } else { return hv_console_write((HV_VirtAddr)buf, count); } } static int hvc_tile_put_chars(uint32_t vt, const char *buf, int count) { return tile_console_write(buf, count); } static int hvc_tile_get_chars(uint32_t vt, char *buf, int count) { int i, c; for (i = 0; i < count; ++i) { c = hv_console_read_if_ready(); if (c < 0) break; buf[i] = c; } return i; } #ifdef __tilegx__ /* * IRQ based callbacks. */ static int hvc_tile_notifier_add_irq(struct hvc_struct *hp, int irq) { int rc; int cpu = raw_smp_processor_id(); /* Choose an arbitrary cpu */ HV_Coord coord = { .x = cpu_x(cpu), .y = cpu_y(cpu) }; rc = notifier_add_irq(hp, irq); if (rc) return rc; /* * Request that the hypervisor start sending us interrupts. * If the hypervisor returns an error, we still return 0, so that * we can fall back to polling. */ if (hv_console_set_ipi(KERNEL_PL, irq, coord) < 0) notifier_del_irq(hp, irq); return 0; } static void hvc_tile_notifier_del_irq(struct hvc_struct *hp, int irq) { HV_Coord coord = { 0, 0 }; /* Tell the hypervisor to stop sending us interrupts. */ hv_console_set_ipi(KERNEL_PL, -1, coord); notifier_del_irq(hp, irq); } static void hvc_tile_notifier_hangup_irq(struct hvc_struct *hp, int irq) { hvc_tile_notifier_del_irq(hp, irq); } #endif static const struct hv_ops hvc_tile_get_put_ops = { .get_chars = hvc_tile_get_chars, .put_chars = hvc_tile_put_chars, #ifdef __tilegx__ .notifier_add = hvc_tile_notifier_add_irq, .notifier_del = hvc_tile_notifier_del_irq, .notifier_hangup = hvc_tile_notifier_hangup_irq, #endif }; #ifdef __tilegx__ static int hvc_tile_probe(struct platform_device *pdev) { struct hvc_struct *hp; int tile_hvc_irq; /* Create our IRQ and register it. */ tile_hvc_irq = irq_alloc_hwirq(-1); if (!tile_hvc_irq) return -ENXIO; tile_irq_activate(tile_hvc_irq, TILE_IRQ_PERCPU); hp = hvc_alloc(0, tile_hvc_irq, &hvc_tile_get_put_ops, 128); if (IS_ERR(hp)) { irq_free_hwirq(tile_hvc_irq); return PTR_ERR(hp); } dev_set_drvdata(&pdev->dev, hp); return 0; } static int hvc_tile_remove(struct platform_device *pdev) { int rc; struct hvc_struct *hp = dev_get_drvdata(&pdev->dev); rc = hvc_remove(hp); if (rc == 0) irq_free_hwirq(hp->data); return rc; } static void hvc_tile_shutdown(struct platform_device *pdev) { struct hvc_struct *hp = dev_get_drvdata(&pdev->dev); hvc_tile_notifier_del_irq(hp, hp->data); } static struct platform_device hvc_tile_pdev = { .name = "hvc-tile", .id = 0, }; static struct platform_driver hvc_tile_driver = { .probe = hvc_tile_probe, .remove = hvc_tile_remove, .shutdown = hvc_tile_shutdown, .driver = { .name = "hvc-tile", } }; #endif static int __init hvc_tile_console_init(void) { hvc_instantiate(0, 0, &hvc_tile_get_put_ops); add_preferred_console("hvc", 0, NULL); return 0; } console_initcall(hvc_tile_console_init); static int __init hvc_tile_init(void) { #ifndef __tilegx__ struct hvc_struct *hp; hp = hvc_alloc(0, 0, &hvc_tile_get_put_ops, 128); return PTR_ERR_OR_ZERO(hp); #else platform_device_register(&hvc_tile_pdev); return platform_driver_register(&hvc_tile_driver); #endif } device_initcall(hvc_tile_init);
gpl-2.0
DirtyUnicorns/android_kernel_sony_apq8064
kernel/time/tick-common.c
1203
9786
/* * linux/kernel/time/tick-common.c * * This file contains the base functions to manage periodic tick * related events. * * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de> * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar * Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner * * This code is licenced under the GPL version 2. For details see * kernel-base/COPYING. */ #include <linux/cpu.h> #include <linux/err.h> #include <linux/hrtimer.h> #include <linux/interrupt.h> #include <linux/percpu.h> #include <linux/profile.h> #include <linux/sched.h> #include <asm/irq_regs.h> #include "tick-internal.h" /* * Tick devices */ DEFINE_PER_CPU(struct tick_device, tick_cpu_device); /* * Tick next event: keeps track of the tick time */ ktime_t tick_next_period; ktime_t tick_period; int tick_do_timer_cpu __read_mostly = TICK_DO_TIMER_BOOT; static DEFINE_RAW_SPINLOCK(tick_device_lock); /* * Debugging: see timer_list.c */ struct tick_device *tick_get_device(int cpu) { return &per_cpu(tick_cpu_device, cpu); } /** * tick_is_oneshot_available - check for a oneshot capable event device */ int tick_is_oneshot_available(void) { struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev); if (!dev || !(dev->features & CLOCK_EVT_FEAT_ONESHOT)) return 0; if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) return 1; return tick_broadcast_oneshot_available(); } /* * Periodic tick */ static void tick_periodic(int cpu) { if (tick_do_timer_cpu == cpu) { write_seqlock(&xtime_lock); /* Keep track of the next tick event */ tick_next_period = ktime_add(tick_next_period, tick_period); do_timer(1); write_sequnlock(&xtime_lock); } update_process_times(user_mode(get_irq_regs())); profile_tick(CPU_PROFILING); } /* * Event handler for periodic ticks */ void tick_handle_periodic(struct clock_event_device *dev) { int cpu = smp_processor_id(); ktime_t next; tick_periodic(cpu); if (dev->mode != CLOCK_EVT_MODE_ONESHOT) return; /* * Setup the next period for devices, which do not have * periodic mode: */ next = ktime_add(dev->next_event, tick_period); for (;;) { if (!clockevents_program_event(dev, next, false)) return; /* * Have to be careful here. If we're in oneshot mode, * before we call tick_periodic() in a loop, we need * to be sure we're using a real hardware clocksource. * Otherwise we could get trapped in an infinite * loop, as the tick_periodic() increments jiffies, * when then will increment time, posibly causing * the loop to trigger again and again. */ if (timekeeping_valid_for_hres()) tick_periodic(cpu); next = ktime_add(next, tick_period); } } /* * Setup the device for a periodic tick */ void tick_setup_periodic(struct clock_event_device *dev, int broadcast) { tick_set_periodic_handler(dev, broadcast); /* Broadcast setup ? */ if (!tick_device_is_functional(dev)) return; if ((dev->features & CLOCK_EVT_FEAT_PERIODIC) && !tick_broadcast_oneshot_active()) { clockevents_set_mode(dev, CLOCK_EVT_MODE_PERIODIC); } else { unsigned long seq; ktime_t next; do { seq = read_seqbegin(&xtime_lock); next = tick_next_period; } while (read_seqretry(&xtime_lock, seq)); clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT); for (;;) { if (!clockevents_program_event(dev, next, false)) return; next = ktime_add(next, tick_period); } } } /* * Setup the tick device */ static void tick_setup_device(struct tick_device *td, struct clock_event_device *newdev, int cpu, const struct cpumask *cpumask) { ktime_t next_event; void (*handler)(struct clock_event_device *) = NULL; /* * First device setup ? */ if (!td->evtdev) { /* * If no cpu took the do_timer update, assign it to * this cpu: */ if (tick_do_timer_cpu == TICK_DO_TIMER_BOOT) { tick_do_timer_cpu = cpu; tick_next_period = ktime_get(); tick_period = ktime_set(0, NSEC_PER_SEC / HZ); } /* * Startup in periodic mode first. */ td->mode = TICKDEV_MODE_PERIODIC; } else { handler = td->evtdev->event_handler; next_event = td->evtdev->next_event; td->evtdev->event_handler = clockevents_handle_noop; } td->evtdev = newdev; /* * When the device is not per cpu, pin the interrupt to the * current cpu: */ if (!cpumask_equal(newdev->cpumask, cpumask)) irq_set_affinity(newdev->irq, cpumask); /* * When global broadcasting is active, check if the current * device is registered as a placeholder for broadcast mode. * This allows us to handle this x86 misfeature in a generic * way. */ if (tick_device_uses_broadcast(newdev, cpu)) return; if (td->mode == TICKDEV_MODE_PERIODIC) tick_setup_periodic(newdev, 0); else tick_setup_oneshot(newdev, handler, next_event); } /* * Check, if the new registered device should be used. */ static int tick_check_new_device(struct clock_event_device *newdev) { struct clock_event_device *curdev; struct tick_device *td; int cpu, ret = NOTIFY_OK; unsigned long flags; raw_spin_lock_irqsave(&tick_device_lock, flags); cpu = smp_processor_id(); if (!cpumask_test_cpu(cpu, newdev->cpumask)) goto out_bc; td = &per_cpu(tick_cpu_device, cpu); curdev = td->evtdev; /* cpu local device ? */ if (!cpumask_equal(newdev->cpumask, cpumask_of(cpu))) { /* * If the cpu affinity of the device interrupt can not * be set, ignore it. */ if (!irq_can_set_affinity(newdev->irq)) goto out_bc; /* * If we have a cpu local device already, do not replace it * by a non cpu local device */ if (curdev && cpumask_equal(curdev->cpumask, cpumask_of(cpu))) goto out_bc; } /* * If we have an active device, then check the rating and the oneshot * feature. */ if (curdev) { /* * Prefer one shot capable devices ! */ if ((curdev->features & CLOCK_EVT_FEAT_ONESHOT) && !(newdev->features & CLOCK_EVT_FEAT_ONESHOT)) goto out_bc; /* * Check the rating */ if (curdev->rating >= newdev->rating) goto out_bc; } /* * Replace the eventually existing device by the new * device. If the current device is the broadcast device, do * not give it back to the clockevents layer ! */ if (tick_is_broadcast_device(curdev)) { clockevents_shutdown(curdev); curdev = NULL; } clockevents_exchange_device(curdev, newdev); tick_setup_device(td, newdev, cpu, cpumask_of(cpu)); if (newdev->features & CLOCK_EVT_FEAT_ONESHOT) tick_oneshot_notify(); raw_spin_unlock_irqrestore(&tick_device_lock, flags); return NOTIFY_STOP; out_bc: /* * Can the new device be used as a broadcast device ? */ if (tick_check_broadcast_device(newdev)) ret = NOTIFY_STOP; raw_spin_unlock_irqrestore(&tick_device_lock, flags); return ret; } /* * Transfer the do_timer job away from a dying cpu. * * Called with interrupts disabled. */ static void tick_handover_do_timer(int *cpup) { if (*cpup == tick_do_timer_cpu) { int cpu = cpumask_first(cpu_online_mask); tick_do_timer_cpu = (cpu < nr_cpu_ids) ? cpu : TICK_DO_TIMER_NONE; } } /* * Shutdown an event device on a given cpu: * * This is called on a life CPU, when a CPU is dead. So we cannot * access the hardware device itself. * We just set the mode and remove it from the lists. */ static void tick_shutdown(unsigned int *cpup) { struct tick_device *td = &per_cpu(tick_cpu_device, *cpup); struct clock_event_device *dev = td->evtdev; unsigned long flags; raw_spin_lock_irqsave(&tick_device_lock, flags); td->mode = TICKDEV_MODE_PERIODIC; if (dev) { /* * Prevent that the clock events layer tries to call * the set mode function! */ dev->mode = CLOCK_EVT_MODE_UNUSED; clockevents_exchange_device(dev, NULL); dev->event_handler = clockevents_handle_noop; td->evtdev = NULL; } raw_spin_unlock_irqrestore(&tick_device_lock, flags); } static void tick_suspend(void) { struct tick_device *td = &__get_cpu_var(tick_cpu_device); unsigned long flags; raw_spin_lock_irqsave(&tick_device_lock, flags); clockevents_shutdown(td->evtdev); raw_spin_unlock_irqrestore(&tick_device_lock, flags); } static void tick_resume(void) { struct tick_device *td = &__get_cpu_var(tick_cpu_device); unsigned long flags; int broadcast = tick_resume_broadcast(); raw_spin_lock_irqsave(&tick_device_lock, flags); clockevents_set_mode(td->evtdev, CLOCK_EVT_MODE_RESUME); if (!broadcast) { if (td->mode == TICKDEV_MODE_PERIODIC) tick_setup_periodic(td->evtdev, 0); else tick_resume_oneshot(); } raw_spin_unlock_irqrestore(&tick_device_lock, flags); } /* * Notification about clock event devices */ static int tick_notify(struct notifier_block *nb, unsigned long reason, void *dev) { switch (reason) { case CLOCK_EVT_NOTIFY_ADD: return tick_check_new_device(dev); case CLOCK_EVT_NOTIFY_BROADCAST_ON: case CLOCK_EVT_NOTIFY_BROADCAST_OFF: case CLOCK_EVT_NOTIFY_BROADCAST_FORCE: tick_broadcast_on_off(reason, dev); break; case CLOCK_EVT_NOTIFY_BROADCAST_ENTER: case CLOCK_EVT_NOTIFY_BROADCAST_EXIT: tick_broadcast_oneshot_control(reason); break; case CLOCK_EVT_NOTIFY_CPU_DYING: tick_handover_do_timer(dev); break; case CLOCK_EVT_NOTIFY_CPU_DEAD: tick_shutdown_broadcast_oneshot(dev); tick_shutdown_broadcast(dev); tick_shutdown(dev); break; case CLOCK_EVT_NOTIFY_SUSPEND: tick_suspend(); tick_suspend_broadcast(); break; case CLOCK_EVT_NOTIFY_RESUME: tick_resume(); break; default: break; } return NOTIFY_OK; } static struct notifier_block tick_notifier = { .notifier_call = tick_notify, }; /** * tick_init - initialize the tick control * * Register the notifier with the clockevents framework */ void __init tick_init(void) { clockevents_register_notifier(&tick_notifier); }
gpl-2.0
Split-Screen/android_kernel_yu_msm8916
arch/mips/mti-malta/malta-platform.c
2227
3381
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2006, 07 MIPS Technologies, Inc. * written by Ralf Baechle (ralf@linux-mips.org) * written by Ralf Baechle <ralf@linux-mips.org> * * Copyright (C) 2008 Wind River Systems, Inc. * updated by Tiejun Chen <tiejun.chen@windriver.com> * * 1. Probe driver for the Malta's UART ports: * * o 2 ports in the SMC SuperIO * o 1 port in the CBUS UART, a discrete 16550 which normally is only used * for bringups. * * We don't use 8250_platform.c on Malta as it would result in the CBUS * UART becoming ttyS0. * * 2. Register RTC-CMOS platform device on Malta. */ #include <linux/init.h> #include <linux/serial_8250.h> #include <linux/mc146818rtc.h> #include <linux/module.h> #include <linux/irq.h> #include <linux/mtd/partitions.h> #include <linux/mtd/physmap.h> #include <linux/platform_device.h> #include <asm/mips-boards/maltaint.h> #include <mtd/mtd-abi.h> #define SMC_PORT(base, int) \ { \ .iobase = base, \ .irq = int, \ .uartclk = 1843200, \ .iotype = UPIO_PORT, \ .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST, \ .regshift = 0, \ } #define CBUS_UART_FLAGS (UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | UPF_IOREMAP) static struct plat_serial8250_port uart8250_data[] = { SMC_PORT(0x3F8, 4), SMC_PORT(0x2F8, 3), { .mapbase = 0x1f000900, /* The CBUS UART */ .irq = MIPS_CPU_IRQ_BASE + MIPSCPU_INT_MB2, .uartclk = 3686400, /* Twice the usual clk! */ .iotype = UPIO_MEM32, .flags = CBUS_UART_FLAGS, .regshift = 3, }, { }, }; static struct platform_device malta_uart8250_device = { .name = "serial8250", .id = PLAT8250_DEV_PLATFORM, .dev = { .platform_data = uart8250_data, }, }; struct resource malta_rtc_resources[] = { { .start = RTC_PORT(0), .end = RTC_PORT(7), .flags = IORESOURCE_IO, }, { .start = RTC_IRQ, .end = RTC_IRQ, .flags = IORESOURCE_IRQ, } }; static struct platform_device malta_rtc_device = { .name = "rtc_cmos", .id = -1, .resource = malta_rtc_resources, .num_resources = ARRAY_SIZE(malta_rtc_resources), }; static struct mtd_partition malta_mtd_partitions[] = { { .name = "YAMON", .offset = 0x0, .size = 0x100000, .mask_flags = MTD_WRITEABLE }, { .name = "User FS", .offset = 0x100000, .size = 0x2e0000 }, { .name = "Board Config", .offset = 0x3e0000, .size = 0x020000, .mask_flags = MTD_WRITEABLE } }; static struct physmap_flash_data malta_flash_data = { .width = 4, .nr_parts = ARRAY_SIZE(malta_mtd_partitions), .parts = malta_mtd_partitions }; static struct resource malta_flash_resource = { .start = 0x1e000000, .end = 0x1e3fffff, .flags = IORESOURCE_MEM }; static struct platform_device malta_flash_device = { .name = "physmap-flash", .id = 0, .dev = { .platform_data = &malta_flash_data, }, .num_resources = 1, .resource = &malta_flash_resource, }; static struct platform_device *malta_devices[] __initdata = { &malta_uart8250_device, &malta_rtc_device, &malta_flash_device, }; static int __init malta_add_devices(void) { int err; err = platform_add_devices(malta_devices, ARRAY_SIZE(malta_devices)); if (err) return err; return 0; } device_initcall(malta_add_devices);
gpl-2.0
TeamExodus/kernel_yu_msm8916
drivers/scsi/mvsas/mv_init.c
2227
22812
/* * Marvell 88SE64xx/88SE94xx pci init * * Copyright 2007 Red Hat, Inc. * Copyright 2008 Marvell. <kewei@marvell.com> * Copyright 2009-2011 Marvell. <yuxiangl@marvell.com> * * This file is licensed under GPLv2. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; version 2 of the * License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 * USA */ #include "mv_sas.h" static int lldd_max_execute_num = 1; module_param_named(collector, lldd_max_execute_num, int, S_IRUGO); MODULE_PARM_DESC(collector, "\n" "\tIf greater than one, tells the SAS Layer to run in Task Collector\n" "\tMode. If 1 or 0, tells the SAS Layer to run in Direct Mode.\n" "\tThe mvsas SAS LLDD supports both modes.\n" "\tDefault: 1 (Direct Mode).\n"); int interrupt_coalescing = 0x80; static struct scsi_transport_template *mvs_stt; struct kmem_cache *mvs_task_list_cache; static const struct mvs_chip_info mvs_chips[] = { [chip_6320] = { 1, 2, 0x400, 17, 16, 6, 9, &mvs_64xx_dispatch, }, [chip_6440] = { 1, 4, 0x400, 17, 16, 6, 9, &mvs_64xx_dispatch, }, [chip_6485] = { 1, 8, 0x800, 33, 32, 6, 10, &mvs_64xx_dispatch, }, [chip_9180] = { 2, 4, 0x800, 17, 64, 8, 9, &mvs_94xx_dispatch, }, [chip_9480] = { 2, 4, 0x800, 17, 64, 8, 9, &mvs_94xx_dispatch, }, [chip_9445] = { 1, 4, 0x800, 17, 64, 8, 11, &mvs_94xx_dispatch, }, [chip_9485] = { 2, 4, 0x800, 17, 64, 8, 11, &mvs_94xx_dispatch, }, [chip_1300] = { 1, 4, 0x400, 17, 16, 6, 9, &mvs_64xx_dispatch, }, [chip_1320] = { 2, 4, 0x800, 17, 64, 8, 9, &mvs_94xx_dispatch, }, }; struct device_attribute *mvst_host_attrs[]; #define SOC_SAS_NUM 2 static struct scsi_host_template mvs_sht = { .module = THIS_MODULE, .name = DRV_NAME, .queuecommand = sas_queuecommand, .target_alloc = sas_target_alloc, .slave_configure = sas_slave_configure, .scan_finished = mvs_scan_finished, .scan_start = mvs_scan_start, .change_queue_depth = sas_change_queue_depth, .change_queue_type = sas_change_queue_type, .bios_param = sas_bios_param, .can_queue = 1, .cmd_per_lun = 1, .this_id = -1, .sg_tablesize = SG_ALL, .max_sectors = SCSI_DEFAULT_MAX_SECTORS, .use_clustering = ENABLE_CLUSTERING, .eh_device_reset_handler = sas_eh_device_reset_handler, .eh_bus_reset_handler = sas_eh_bus_reset_handler, .target_destroy = sas_target_destroy, .ioctl = sas_ioctl, .shost_attrs = mvst_host_attrs, }; static struct sas_domain_function_template mvs_transport_ops = { .lldd_dev_found = mvs_dev_found, .lldd_dev_gone = mvs_dev_gone, .lldd_execute_task = mvs_queue_command, .lldd_control_phy = mvs_phy_control, .lldd_abort_task = mvs_abort_task, .lldd_abort_task_set = mvs_abort_task_set, .lldd_clear_aca = mvs_clear_aca, .lldd_clear_task_set = mvs_clear_task_set, .lldd_I_T_nexus_reset = mvs_I_T_nexus_reset, .lldd_lu_reset = mvs_lu_reset, .lldd_query_task = mvs_query_task, .lldd_port_formed = mvs_port_formed, .lldd_port_deformed = mvs_port_deformed, }; static void mvs_phy_init(struct mvs_info *mvi, int phy_id) { struct mvs_phy *phy = &mvi->phy[phy_id]; struct asd_sas_phy *sas_phy = &phy->sas_phy; phy->mvi = mvi; phy->port = NULL; init_timer(&phy->timer); sas_phy->enabled = (phy_id < mvi->chip->n_phy) ? 1 : 0; sas_phy->class = SAS; sas_phy->iproto = SAS_PROTOCOL_ALL; sas_phy->tproto = 0; sas_phy->type = PHY_TYPE_PHYSICAL; sas_phy->role = PHY_ROLE_INITIATOR; sas_phy->oob_mode = OOB_NOT_CONNECTED; sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN; sas_phy->id = phy_id; sas_phy->sas_addr = &mvi->sas_addr[0]; sas_phy->frame_rcvd = &phy->frame_rcvd[0]; sas_phy->ha = (struct sas_ha_struct *)mvi->shost->hostdata; sas_phy->lldd_phy = phy; } static void mvs_free(struct mvs_info *mvi) { struct mvs_wq *mwq; int slot_nr; if (!mvi) return; if (mvi->flags & MVF_FLAG_SOC) slot_nr = MVS_SOC_SLOTS; else slot_nr = MVS_CHIP_SLOT_SZ; if (mvi->dma_pool) pci_pool_destroy(mvi->dma_pool); if (mvi->tx) dma_free_coherent(mvi->dev, sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ, mvi->tx, mvi->tx_dma); if (mvi->rx_fis) dma_free_coherent(mvi->dev, MVS_RX_FISL_SZ, mvi->rx_fis, mvi->rx_fis_dma); if (mvi->rx) dma_free_coherent(mvi->dev, sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1), mvi->rx, mvi->rx_dma); if (mvi->slot) dma_free_coherent(mvi->dev, sizeof(*mvi->slot) * slot_nr, mvi->slot, mvi->slot_dma); if (mvi->bulk_buffer) dma_free_coherent(mvi->dev, TRASH_BUCKET_SIZE, mvi->bulk_buffer, mvi->bulk_buffer_dma); if (mvi->bulk_buffer1) dma_free_coherent(mvi->dev, TRASH_BUCKET_SIZE, mvi->bulk_buffer1, mvi->bulk_buffer_dma1); MVS_CHIP_DISP->chip_iounmap(mvi); if (mvi->shost) scsi_host_put(mvi->shost); list_for_each_entry(mwq, &mvi->wq_list, entry) cancel_delayed_work(&mwq->work_q); kfree(mvi->tags); kfree(mvi); } #ifdef CONFIG_SCSI_MVSAS_TASKLET static void mvs_tasklet(unsigned long opaque) { u32 stat; u16 core_nr, i = 0; struct mvs_info *mvi; struct sas_ha_struct *sha = (struct sas_ha_struct *)opaque; core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host; mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[0]; if (unlikely(!mvi)) BUG_ON(1); stat = MVS_CHIP_DISP->isr_status(mvi, mvi->pdev->irq); if (!stat) goto out; for (i = 0; i < core_nr; i++) { mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[i]; MVS_CHIP_DISP->isr(mvi, mvi->pdev->irq, stat); } out: MVS_CHIP_DISP->interrupt_enable(mvi); } #endif static irqreturn_t mvs_interrupt(int irq, void *opaque) { u32 core_nr; u32 stat; struct mvs_info *mvi; struct sas_ha_struct *sha = opaque; #ifndef CONFIG_SCSI_MVSAS_TASKLET u32 i; #endif core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host; mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[0]; if (unlikely(!mvi)) return IRQ_NONE; #ifdef CONFIG_SCSI_MVSAS_TASKLET MVS_CHIP_DISP->interrupt_disable(mvi); #endif stat = MVS_CHIP_DISP->isr_status(mvi, irq); if (!stat) { #ifdef CONFIG_SCSI_MVSAS_TASKLET MVS_CHIP_DISP->interrupt_enable(mvi); #endif return IRQ_NONE; } #ifdef CONFIG_SCSI_MVSAS_TASKLET tasklet_schedule(&((struct mvs_prv_info *)sha->lldd_ha)->mv_tasklet); #else for (i = 0; i < core_nr; i++) { mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[i]; MVS_CHIP_DISP->isr(mvi, irq, stat); } #endif return IRQ_HANDLED; } static int mvs_alloc(struct mvs_info *mvi, struct Scsi_Host *shost) { int i = 0, slot_nr; char pool_name[32]; if (mvi->flags & MVF_FLAG_SOC) slot_nr = MVS_SOC_SLOTS; else slot_nr = MVS_CHIP_SLOT_SZ; spin_lock_init(&mvi->lock); for (i = 0; i < mvi->chip->n_phy; i++) { mvs_phy_init(mvi, i); mvi->port[i].wide_port_phymap = 0; mvi->port[i].port_attached = 0; INIT_LIST_HEAD(&mvi->port[i].list); } for (i = 0; i < MVS_MAX_DEVICES; i++) { mvi->devices[i].taskfileset = MVS_ID_NOT_MAPPED; mvi->devices[i].dev_type = SAS_PHY_UNUSED; mvi->devices[i].device_id = i; mvi->devices[i].dev_status = MVS_DEV_NORMAL; init_timer(&mvi->devices[i].timer); } /* * alloc and init our DMA areas */ mvi->tx = dma_alloc_coherent(mvi->dev, sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ, &mvi->tx_dma, GFP_KERNEL); if (!mvi->tx) goto err_out; memset(mvi->tx, 0, sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ); mvi->rx_fis = dma_alloc_coherent(mvi->dev, MVS_RX_FISL_SZ, &mvi->rx_fis_dma, GFP_KERNEL); if (!mvi->rx_fis) goto err_out; memset(mvi->rx_fis, 0, MVS_RX_FISL_SZ); mvi->rx = dma_alloc_coherent(mvi->dev, sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1), &mvi->rx_dma, GFP_KERNEL); if (!mvi->rx) goto err_out; memset(mvi->rx, 0, sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1)); mvi->rx[0] = cpu_to_le32(0xfff); mvi->rx_cons = 0xfff; mvi->slot = dma_alloc_coherent(mvi->dev, sizeof(*mvi->slot) * slot_nr, &mvi->slot_dma, GFP_KERNEL); if (!mvi->slot) goto err_out; memset(mvi->slot, 0, sizeof(*mvi->slot) * slot_nr); mvi->bulk_buffer = dma_alloc_coherent(mvi->dev, TRASH_BUCKET_SIZE, &mvi->bulk_buffer_dma, GFP_KERNEL); if (!mvi->bulk_buffer) goto err_out; mvi->bulk_buffer1 = dma_alloc_coherent(mvi->dev, TRASH_BUCKET_SIZE, &mvi->bulk_buffer_dma1, GFP_KERNEL); if (!mvi->bulk_buffer1) goto err_out; sprintf(pool_name, "%s%d", "mvs_dma_pool", mvi->id); mvi->dma_pool = pci_pool_create(pool_name, mvi->pdev, MVS_SLOT_BUF_SZ, 16, 0); if (!mvi->dma_pool) { printk(KERN_DEBUG "failed to create dma pool %s.\n", pool_name); goto err_out; } mvi->tags_num = slot_nr; /* Initialize tags */ mvs_tag_init(mvi); return 0; err_out: return 1; } int mvs_ioremap(struct mvs_info *mvi, int bar, int bar_ex) { unsigned long res_start, res_len, res_flag, res_flag_ex = 0; struct pci_dev *pdev = mvi->pdev; if (bar_ex != -1) { /* * ioremap main and peripheral registers */ res_start = pci_resource_start(pdev, bar_ex); res_len = pci_resource_len(pdev, bar_ex); if (!res_start || !res_len) goto err_out; res_flag_ex = pci_resource_flags(pdev, bar_ex); if (res_flag_ex & IORESOURCE_MEM) { if (res_flag_ex & IORESOURCE_CACHEABLE) mvi->regs_ex = ioremap(res_start, res_len); else mvi->regs_ex = ioremap_nocache(res_start, res_len); } else mvi->regs_ex = (void *)res_start; if (!mvi->regs_ex) goto err_out; } res_start = pci_resource_start(pdev, bar); res_len = pci_resource_len(pdev, bar); if (!res_start || !res_len) goto err_out; res_flag = pci_resource_flags(pdev, bar); if (res_flag & IORESOURCE_CACHEABLE) mvi->regs = ioremap(res_start, res_len); else mvi->regs = ioremap_nocache(res_start, res_len); if (!mvi->regs) { if (mvi->regs_ex && (res_flag_ex & IORESOURCE_MEM)) iounmap(mvi->regs_ex); mvi->regs_ex = NULL; goto err_out; } return 0; err_out: return -1; } void mvs_iounmap(void __iomem *regs) { iounmap(regs); } static struct mvs_info *mvs_pci_alloc(struct pci_dev *pdev, const struct pci_device_id *ent, struct Scsi_Host *shost, unsigned int id) { struct mvs_info *mvi = NULL; struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); mvi = kzalloc(sizeof(*mvi) + (1L << mvs_chips[ent->driver_data].slot_width) * sizeof(struct mvs_slot_info), GFP_KERNEL); if (!mvi) return NULL; mvi->pdev = pdev; mvi->dev = &pdev->dev; mvi->chip_id = ent->driver_data; mvi->chip = &mvs_chips[mvi->chip_id]; INIT_LIST_HEAD(&mvi->wq_list); ((struct mvs_prv_info *)sha->lldd_ha)->mvi[id] = mvi; ((struct mvs_prv_info *)sha->lldd_ha)->n_phy = mvi->chip->n_phy; mvi->id = id; mvi->sas = sha; mvi->shost = shost; mvi->tags = kzalloc(MVS_CHIP_SLOT_SZ>>3, GFP_KERNEL); if (!mvi->tags) goto err_out; if (MVS_CHIP_DISP->chip_ioremap(mvi)) goto err_out; if (!mvs_alloc(mvi, shost)) return mvi; err_out: mvs_free(mvi); return NULL; } static int pci_go_64(struct pci_dev *pdev) { int rc; if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); if (rc) { rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); if (rc) { dev_printk(KERN_ERR, &pdev->dev, "64-bit DMA enable failed\n"); return rc; } } } else { rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (rc) { dev_printk(KERN_ERR, &pdev->dev, "32-bit DMA enable failed\n"); return rc; } rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); if (rc) { dev_printk(KERN_ERR, &pdev->dev, "32-bit consistent DMA enable failed\n"); return rc; } } return rc; } static int mvs_prep_sas_ha_init(struct Scsi_Host *shost, const struct mvs_chip_info *chip_info) { int phy_nr, port_nr; unsigned short core_nr; struct asd_sas_phy **arr_phy; struct asd_sas_port **arr_port; struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); core_nr = chip_info->n_host; phy_nr = core_nr * chip_info->n_phy; port_nr = phy_nr; memset(sha, 0x00, sizeof(struct sas_ha_struct)); arr_phy = kcalloc(phy_nr, sizeof(void *), GFP_KERNEL); arr_port = kcalloc(port_nr, sizeof(void *), GFP_KERNEL); if (!arr_phy || !arr_port) goto exit_free; sha->sas_phy = arr_phy; sha->sas_port = arr_port; sha->core.shost = shost; sha->lldd_ha = kzalloc(sizeof(struct mvs_prv_info), GFP_KERNEL); if (!sha->lldd_ha) goto exit_free; ((struct mvs_prv_info *)sha->lldd_ha)->n_host = core_nr; shost->transportt = mvs_stt; shost->max_id = MVS_MAX_DEVICES; shost->max_lun = ~0; shost->max_channel = 1; shost->max_cmd_len = 16; return 0; exit_free: kfree(arr_phy); kfree(arr_port); return -1; } static void mvs_post_sas_ha_init(struct Scsi_Host *shost, const struct mvs_chip_info *chip_info) { int can_queue, i = 0, j = 0; struct mvs_info *mvi = NULL; struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); unsigned short nr_core = ((struct mvs_prv_info *)sha->lldd_ha)->n_host; for (j = 0; j < nr_core; j++) { mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[j]; for (i = 0; i < chip_info->n_phy; i++) { sha->sas_phy[j * chip_info->n_phy + i] = &mvi->phy[i].sas_phy; sha->sas_port[j * chip_info->n_phy + i] = &mvi->port[i].sas_port; } } sha->sas_ha_name = DRV_NAME; sha->dev = mvi->dev; sha->lldd_module = THIS_MODULE; sha->sas_addr = &mvi->sas_addr[0]; sha->num_phys = nr_core * chip_info->n_phy; sha->lldd_max_execute_num = lldd_max_execute_num; if (mvi->flags & MVF_FLAG_SOC) can_queue = MVS_SOC_CAN_QUEUE; else can_queue = MVS_CHIP_SLOT_SZ; sha->lldd_queue_size = can_queue; shost->sg_tablesize = min_t(u16, SG_ALL, MVS_MAX_SG); shost->can_queue = can_queue; mvi->shost->cmd_per_lun = MVS_QUEUE_SIZE; sha->core.shost = mvi->shost; } static void mvs_init_sas_add(struct mvs_info *mvi) { u8 i; for (i = 0; i < mvi->chip->n_phy; i++) { mvi->phy[i].dev_sas_addr = 0x5005043011ab0000ULL; mvi->phy[i].dev_sas_addr = cpu_to_be64((u64)(*(u64 *)&mvi->phy[i].dev_sas_addr)); } memcpy(mvi->sas_addr, &mvi->phy[0].dev_sas_addr, SAS_ADDR_SIZE); } static int mvs_pci_init(struct pci_dev *pdev, const struct pci_device_id *ent) { unsigned int rc, nhost = 0; struct mvs_info *mvi; struct mvs_prv_info *mpi; irq_handler_t irq_handler = mvs_interrupt; struct Scsi_Host *shost = NULL; const struct mvs_chip_info *chip; dev_printk(KERN_INFO, &pdev->dev, "mvsas: driver version %s\n", DRV_VERSION); rc = pci_enable_device(pdev); if (rc) goto err_out_enable; pci_set_master(pdev); rc = pci_request_regions(pdev, DRV_NAME); if (rc) goto err_out_disable; rc = pci_go_64(pdev); if (rc) goto err_out_regions; shost = scsi_host_alloc(&mvs_sht, sizeof(void *)); if (!shost) { rc = -ENOMEM; goto err_out_regions; } chip = &mvs_chips[ent->driver_data]; SHOST_TO_SAS_HA(shost) = kcalloc(1, sizeof(struct sas_ha_struct), GFP_KERNEL); if (!SHOST_TO_SAS_HA(shost)) { kfree(shost); rc = -ENOMEM; goto err_out_regions; } rc = mvs_prep_sas_ha_init(shost, chip); if (rc) { kfree(shost); rc = -ENOMEM; goto err_out_regions; } pci_set_drvdata(pdev, SHOST_TO_SAS_HA(shost)); do { mvi = mvs_pci_alloc(pdev, ent, shost, nhost); if (!mvi) { rc = -ENOMEM; goto err_out_regions; } memset(&mvi->hba_info_param, 0xFF, sizeof(struct hba_info_page)); mvs_init_sas_add(mvi); mvi->instance = nhost; rc = MVS_CHIP_DISP->chip_init(mvi); if (rc) { mvs_free(mvi); goto err_out_regions; } nhost++; } while (nhost < chip->n_host); mpi = (struct mvs_prv_info *)(SHOST_TO_SAS_HA(shost)->lldd_ha); #ifdef CONFIG_SCSI_MVSAS_TASKLET tasklet_init(&(mpi->mv_tasklet), mvs_tasklet, (unsigned long)SHOST_TO_SAS_HA(shost)); #endif mvs_post_sas_ha_init(shost, chip); rc = scsi_add_host(shost, &pdev->dev); if (rc) goto err_out_shost; rc = sas_register_ha(SHOST_TO_SAS_HA(shost)); if (rc) goto err_out_shost; rc = request_irq(pdev->irq, irq_handler, IRQF_SHARED, DRV_NAME, SHOST_TO_SAS_HA(shost)); if (rc) goto err_not_sas; MVS_CHIP_DISP->interrupt_enable(mvi); scsi_scan_host(mvi->shost); return 0; err_not_sas: sas_unregister_ha(SHOST_TO_SAS_HA(shost)); err_out_shost: scsi_remove_host(mvi->shost); err_out_regions: pci_release_regions(pdev); err_out_disable: pci_disable_device(pdev); err_out_enable: return rc; } static void mvs_pci_remove(struct pci_dev *pdev) { unsigned short core_nr, i = 0; struct sas_ha_struct *sha = pci_get_drvdata(pdev); struct mvs_info *mvi = NULL; core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host; mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[0]; #ifdef CONFIG_SCSI_MVSAS_TASKLET tasklet_kill(&((struct mvs_prv_info *)sha->lldd_ha)->mv_tasklet); #endif pci_set_drvdata(pdev, NULL); sas_unregister_ha(sha); sas_remove_host(mvi->shost); scsi_remove_host(mvi->shost); MVS_CHIP_DISP->interrupt_disable(mvi); free_irq(mvi->pdev->irq, sha); for (i = 0; i < core_nr; i++) { mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[i]; mvs_free(mvi); } kfree(sha->sas_phy); kfree(sha->sas_port); kfree(sha); pci_release_regions(pdev); pci_disable_device(pdev); return; } static struct pci_device_id mvs_pci_table[] = { { PCI_VDEVICE(MARVELL, 0x6320), chip_6320 }, { PCI_VDEVICE(MARVELL, 0x6340), chip_6440 }, { .vendor = PCI_VENDOR_ID_MARVELL, .device = 0x6440, .subvendor = PCI_ANY_ID, .subdevice = 0x6480, .class = 0, .class_mask = 0, .driver_data = chip_6485, }, { PCI_VDEVICE(MARVELL, 0x6440), chip_6440 }, { PCI_VDEVICE(MARVELL, 0x6485), chip_6485 }, { PCI_VDEVICE(MARVELL, 0x9480), chip_9480 }, { PCI_VDEVICE(MARVELL, 0x9180), chip_9180 }, { PCI_VDEVICE(ARECA, PCI_DEVICE_ID_ARECA_1300), chip_1300 }, { PCI_VDEVICE(ARECA, PCI_DEVICE_ID_ARECA_1320), chip_1320 }, { PCI_VDEVICE(ADAPTEC2, 0x0450), chip_6440 }, { PCI_VDEVICE(TTI, 0x2710), chip_9480 }, { PCI_VDEVICE(TTI, 0x2720), chip_9480 }, { PCI_VDEVICE(TTI, 0x2721), chip_9480 }, { PCI_VDEVICE(TTI, 0x2722), chip_9480 }, { PCI_VDEVICE(TTI, 0x2740), chip_9480 }, { PCI_VDEVICE(TTI, 0x2744), chip_9480 }, { PCI_VDEVICE(TTI, 0x2760), chip_9480 }, { .vendor = PCI_VENDOR_ID_MARVELL_EXT, .device = 0x9480, .subvendor = PCI_ANY_ID, .subdevice = 0x9480, .class = 0, .class_mask = 0, .driver_data = chip_9480, }, { .vendor = PCI_VENDOR_ID_MARVELL_EXT, .device = 0x9445, .subvendor = PCI_ANY_ID, .subdevice = 0x9480, .class = 0, .class_mask = 0, .driver_data = chip_9445, }, { .vendor = PCI_VENDOR_ID_MARVELL_EXT, .device = 0x9485, .subvendor = PCI_ANY_ID, .subdevice = 0x9480, .class = 0, .class_mask = 0, .driver_data = chip_9485, }, { PCI_VDEVICE(OCZ, 0x1021), chip_9485}, /* OCZ RevoDrive3 */ { PCI_VDEVICE(OCZ, 0x1022), chip_9485}, /* OCZ RevoDrive3/zDriveR4 (exact model unknown) */ { PCI_VDEVICE(OCZ, 0x1040), chip_9485}, /* OCZ RevoDrive3/zDriveR4 (exact model unknown) */ { PCI_VDEVICE(OCZ, 0x1041), chip_9485}, /* OCZ RevoDrive3/zDriveR4 (exact model unknown) */ { PCI_VDEVICE(OCZ, 0x1042), chip_9485}, /* OCZ RevoDrive3/zDriveR4 (exact model unknown) */ { PCI_VDEVICE(OCZ, 0x1043), chip_9485}, /* OCZ RevoDrive3/zDriveR4 (exact model unknown) */ { PCI_VDEVICE(OCZ, 0x1044), chip_9485}, /* OCZ RevoDrive3/zDriveR4 (exact model unknown) */ { PCI_VDEVICE(OCZ, 0x1080), chip_9485}, /* OCZ RevoDrive3/zDriveR4 (exact model unknown) */ { PCI_VDEVICE(OCZ, 0x1083), chip_9485}, /* OCZ RevoDrive3/zDriveR4 (exact model unknown) */ { PCI_VDEVICE(OCZ, 0x1084), chip_9485}, /* OCZ RevoDrive3/zDriveR4 (exact model unknown) */ { } /* terminate list */ }; static struct pci_driver mvs_pci_driver = { .name = DRV_NAME, .id_table = mvs_pci_table, .probe = mvs_pci_init, .remove = mvs_pci_remove, }; static ssize_t mvs_show_driver_version(struct device *cdev, struct device_attribute *attr, char *buffer) { return snprintf(buffer, PAGE_SIZE, "%s\n", DRV_VERSION); } static DEVICE_ATTR(driver_version, S_IRUGO, mvs_show_driver_version, NULL); static ssize_t mvs_store_interrupt_coalescing(struct device *cdev, struct device_attribute *attr, const char *buffer, size_t size) { int val = 0; struct mvs_info *mvi = NULL; struct Scsi_Host *shost = class_to_shost(cdev); struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); u8 i, core_nr; if (buffer == NULL) return size; if (sscanf(buffer, "%d", &val) != 1) return -EINVAL; if (val >= 0x10000) { mv_dprintk("interrupt coalescing timer %d us is" "too long\n", val); return strlen(buffer); } interrupt_coalescing = val; core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host; mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[0]; if (unlikely(!mvi)) return -EINVAL; for (i = 0; i < core_nr; i++) { mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[i]; if (MVS_CHIP_DISP->tune_interrupt) MVS_CHIP_DISP->tune_interrupt(mvi, interrupt_coalescing); } mv_dprintk("set interrupt coalescing time to %d us\n", interrupt_coalescing); return strlen(buffer); } static ssize_t mvs_show_interrupt_coalescing(struct device *cdev, struct device_attribute *attr, char *buffer) { return snprintf(buffer, PAGE_SIZE, "%d\n", interrupt_coalescing); } static DEVICE_ATTR(interrupt_coalescing, S_IRUGO|S_IWUSR, mvs_show_interrupt_coalescing, mvs_store_interrupt_coalescing); /* task handler */ struct task_struct *mvs_th; static int __init mvs_init(void) { int rc; mvs_stt = sas_domain_attach_transport(&mvs_transport_ops); if (!mvs_stt) return -ENOMEM; mvs_task_list_cache = kmem_cache_create("mvs_task_list", sizeof(struct mvs_task_list), 0, SLAB_HWCACHE_ALIGN, NULL); if (!mvs_task_list_cache) { rc = -ENOMEM; mv_printk("%s: mvs_task_list_cache alloc failed! \n", __func__); goto err_out; } rc = pci_register_driver(&mvs_pci_driver); if (rc) goto err_out; return 0; err_out: sas_release_transport(mvs_stt); return rc; } static void __exit mvs_exit(void) { pci_unregister_driver(&mvs_pci_driver); sas_release_transport(mvs_stt); kmem_cache_destroy(mvs_task_list_cache); } struct device_attribute *mvst_host_attrs[] = { &dev_attr_driver_version, &dev_attr_interrupt_coalescing, NULL, }; module_init(mvs_init); module_exit(mvs_exit); MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>"); MODULE_DESCRIPTION("Marvell 88SE6440 SAS/SATA controller driver"); MODULE_VERSION(DRV_VERSION); MODULE_LICENSE("GPL"); #ifdef CONFIG_PCI MODULE_DEVICE_TABLE(pci, mvs_pci_table); #endif
gpl-2.0
allanmatthew/linux-fslc
arch/avr32/oprofile/op_model_avr32.c
2227
5145
/* * AVR32 Performance Counter Driver * * Copyright (C) 2005-2007 Atmel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * Author: Ronny Pedersen */ #include <linux/errno.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/oprofile.h> #include <linux/sched.h> #include <linux/types.h> #include <asm/sysreg.h> #define AVR32_PERFCTR_IRQ_GROUP 0 #define AVR32_PERFCTR_IRQ_LINE 1 void avr32_backtrace(struct pt_regs * const regs, unsigned int depth); enum { PCCNT, PCNT0, PCNT1, NR_counter }; struct avr32_perf_counter { unsigned long enabled; unsigned long event; unsigned long count; unsigned long unit_mask; unsigned long kernel; unsigned long user; u32 ie_mask; u32 flag_mask; }; static struct avr32_perf_counter counter[NR_counter] = { { .ie_mask = SYSREG_BIT(IEC), .flag_mask = SYSREG_BIT(FC), }, { .ie_mask = SYSREG_BIT(IE0), .flag_mask = SYSREG_BIT(F0), }, { .ie_mask = SYSREG_BIT(IE1), .flag_mask = SYSREG_BIT(F1), }, }; static void avr32_perf_counter_reset(void) { /* Reset all counter and disable/clear all interrupts */ sysreg_write(PCCR, (SYSREG_BIT(PCCR_R) | SYSREG_BIT(PCCR_C) | SYSREG_BIT(FC) | SYSREG_BIT(F0) | SYSREG_BIT(F1))); } static irqreturn_t avr32_perf_counter_interrupt(int irq, void *dev_id) { struct avr32_perf_counter *ctr = dev_id; struct pt_regs *regs; u32 pccr; if (likely(!(intc_get_pending(AVR32_PERFCTR_IRQ_GROUP) & (1 << AVR32_PERFCTR_IRQ_LINE)))) return IRQ_NONE; regs = get_irq_regs(); pccr = sysreg_read(PCCR); /* Clear the interrupt flags we're about to handle */ sysreg_write(PCCR, pccr); /* PCCNT */ if (ctr->enabled && (pccr & ctr->flag_mask)) { sysreg_write(PCCNT, -ctr->count); oprofile_add_sample(regs, PCCNT); } ctr++; /* PCNT0 */ if (ctr->enabled && (pccr & ctr->flag_mask)) { sysreg_write(PCNT0, -ctr->count); oprofile_add_sample(regs, PCNT0); } ctr++; /* PCNT1 */ if (ctr->enabled && (pccr & ctr->flag_mask)) { sysreg_write(PCNT1, -ctr->count); oprofile_add_sample(regs, PCNT1); } return IRQ_HANDLED; } static int avr32_perf_counter_create_files(struct dentry *root) { struct dentry *dir; unsigned int i; char filename[4]; for (i = 0; i < NR_counter; i++) { snprintf(filename, sizeof(filename), "%u", i); dir = oprofilefs_mkdir(root, filename); oprofilefs_create_ulong(dir, "enabled", &counter[i].enabled); oprofilefs_create_ulong(dir, "event", &counter[i].event); oprofilefs_create_ulong(dir, "count", &counter[i].count); /* Dummy entries */ oprofilefs_create_ulong(dir, "kernel", &counter[i].kernel); oprofilefs_create_ulong(dir, "user", &counter[i].user); oprofilefs_create_ulong(dir, "unit_mask", &counter[i].unit_mask); } return 0; } static int avr32_perf_counter_setup(void) { struct avr32_perf_counter *ctr; u32 pccr; int ret; int i; pr_debug("avr32_perf_counter_setup\n"); if (sysreg_read(PCCR) & SYSREG_BIT(PCCR_E)) { printk(KERN_ERR "oprofile: setup: perf counter already enabled\n"); return -EBUSY; } ret = request_irq(AVR32_PERFCTR_IRQ_GROUP, avr32_perf_counter_interrupt, IRQF_SHARED, "oprofile", counter); if (ret) return ret; avr32_perf_counter_reset(); pccr = 0; for (i = PCCNT; i < NR_counter; i++) { ctr = &counter[i]; if (!ctr->enabled) continue; pr_debug("enabling counter %d...\n", i); pccr |= ctr->ie_mask; switch (i) { case PCCNT: /* PCCNT always counts cycles, so no events */ sysreg_write(PCCNT, -ctr->count); break; case PCNT0: pccr |= SYSREG_BF(CONF0, ctr->event); sysreg_write(PCNT0, -ctr->count); break; case PCNT1: pccr |= SYSREG_BF(CONF1, ctr->event); sysreg_write(PCNT1, -ctr->count); break; } } pr_debug("oprofile: writing 0x%x to PCCR...\n", pccr); sysreg_write(PCCR, pccr); return 0; } static void avr32_perf_counter_shutdown(void) { pr_debug("avr32_perf_counter_shutdown\n"); avr32_perf_counter_reset(); free_irq(AVR32_PERFCTR_IRQ_GROUP, counter); } static int avr32_perf_counter_start(void) { pr_debug("avr32_perf_counter_start\n"); sysreg_write(PCCR, sysreg_read(PCCR) | SYSREG_BIT(PCCR_E)); return 0; } static void avr32_perf_counter_stop(void) { pr_debug("avr32_perf_counter_stop\n"); sysreg_write(PCCR, sysreg_read(PCCR) & ~SYSREG_BIT(PCCR_E)); } static struct oprofile_operations avr32_perf_counter_ops __initdata = { .create_files = avr32_perf_counter_create_files, .setup = avr32_perf_counter_setup, .shutdown = avr32_perf_counter_shutdown, .start = avr32_perf_counter_start, .stop = avr32_perf_counter_stop, .cpu_type = "avr32", }; int __init oprofile_arch_init(struct oprofile_operations *ops) { if (!(current_cpu_data.features & AVR32_FEATURE_PCTR)) return -ENODEV; memcpy(ops, &avr32_perf_counter_ops, sizeof(struct oprofile_operations)); ops->backtrace = avr32_backtrace; printk(KERN_INFO "oprofile: using AVR32 performance monitoring.\n"); return 0; } void oprofile_arch_exit(void) { }
gpl-2.0
Flemmard/htc7x30-3.0
drivers/tty/serial/crisv10.c
2227
129632
/* * Serial port driver for the ETRAX 100LX chip * * Copyright (C) 1998-2007 Axis Communications AB * * Many, many authors. Based once upon a time on serial.c for 16x50. * */ static char *serial_version = "$Revision: 1.25 $"; #include <linux/types.h> #include <linux/errno.h> #include <linux/signal.h> #include <linux/sched.h> #include <linux/timer.h> #include <linux/interrupt.h> #include <linux/tty.h> #include <linux/tty_flip.h> #include <linux/major.h> #include <linux/string.h> #include <linux/fcntl.h> #include <linux/mm.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/mutex.h> #include <linux/bitops.h> #include <linux/seq_file.h> #include <linux/delay.h> #include <linux/module.h> #include <linux/uaccess.h> #include <linux/io.h> #include <asm/irq.h> #include <asm/dma.h> #include <asm/system.h> #include <arch/svinto.h> /* non-arch dependent serial structures are in linux/serial.h */ #include <linux/serial.h> /* while we keep our own stuff (struct e100_serial) in a local .h file */ #include "crisv10.h" #include <asm/fasttimer.h> #include <arch/io_interface_mux.h> #ifdef CONFIG_ETRAX_SERIAL_FAST_TIMER #ifndef CONFIG_ETRAX_FAST_TIMER #error "Enable FAST_TIMER to use SERIAL_FAST_TIMER" #endif #endif #if defined(CONFIG_ETRAX_SERIAL_RX_TIMEOUT_TICKS) && \ (CONFIG_ETRAX_SERIAL_RX_TIMEOUT_TICKS == 0) #error "RX_TIMEOUT_TICKS == 0 not allowed, use 1" #endif #if defined(CONFIG_ETRAX_RS485_ON_PA) && defined(CONFIG_ETRAX_RS485_ON_PORT_G) #error "Disable either CONFIG_ETRAX_RS485_ON_PA or CONFIG_ETRAX_RS485_ON_PORT_G" #endif /* * All of the compatibilty code so we can compile serial.c against * older kernels is hidden in serial_compat.h */ #if defined(LOCAL_HEADERS) #include "serial_compat.h" #endif struct tty_driver *serial_driver; /* number of characters left in xmit buffer before we ask for more */ #define WAKEUP_CHARS 256 //#define SERIAL_DEBUG_INTR //#define SERIAL_DEBUG_OPEN //#define SERIAL_DEBUG_FLOW //#define SERIAL_DEBUG_DATA //#define SERIAL_DEBUG_THROTTLE //#define SERIAL_DEBUG_IO /* Debug for Extra control and status pins */ //#define SERIAL_DEBUG_LINE 0 /* What serport we want to debug */ /* Enable this to use serial interrupts to handle when you expect the first received event on the serial port to be an error, break or similar. Used to be able to flash IRMA from eLinux */ #define SERIAL_HANDLE_EARLY_ERRORS /* Currently 16 descriptors x 128 bytes = 2048 bytes */ #define SERIAL_DESCR_BUF_SIZE 256 #define SERIAL_PRESCALE_BASE 3125000 /* 3.125MHz */ #define DEF_BAUD_BASE SERIAL_PRESCALE_BASE /* We don't want to load the system with massive fast timer interrupt * on high baudrates so limit it to 250 us (4kHz) */ #define MIN_FLUSH_TIME_USEC 250 /* Add an x here to log a lot of timer stuff */ #define TIMERD(x) /* Debug details of interrupt handling */ #define DINTR1(x) /* irq on/off, errors */ #define DINTR2(x) /* tx and rx */ /* Debug flip buffer stuff */ #define DFLIP(x) /* Debug flow control and overview of data flow */ #define DFLOW(x) #define DBAUD(x) #define DLOG_INT_TRIG(x) //#define DEBUG_LOG_INCLUDED #ifndef DEBUG_LOG_INCLUDED #define DEBUG_LOG(line, string, value) #else struct debug_log_info { unsigned long time; unsigned long timer_data; // int line; const char *string; int value; }; #define DEBUG_LOG_SIZE 4096 struct debug_log_info debug_log[DEBUG_LOG_SIZE]; int debug_log_pos = 0; #define DEBUG_LOG(_line, _string, _value) do { \ if ((_line) == SERIAL_DEBUG_LINE) {\ debug_log_func(_line, _string, _value); \ }\ }while(0) void debug_log_func(int line, const char *string, int value) { if (debug_log_pos < DEBUG_LOG_SIZE) { debug_log[debug_log_pos].time = jiffies; debug_log[debug_log_pos].timer_data = *R_TIMER_DATA; // debug_log[debug_log_pos].line = line; debug_log[debug_log_pos].string = string; debug_log[debug_log_pos].value = value; debug_log_pos++; } /*printk(string, value);*/ } #endif #ifndef CONFIG_ETRAX_SERIAL_RX_TIMEOUT_TICKS /* Default number of timer ticks before flushing rx fifo * When using "little data, low latency applications: use 0 * When using "much data applications (PPP)" use ~5 */ #define CONFIG_ETRAX_SERIAL_RX_TIMEOUT_TICKS 5 #endif unsigned long timer_data_to_ns(unsigned long timer_data); static void change_speed(struct e100_serial *info); static void rs_throttle(struct tty_struct * tty); static void rs_wait_until_sent(struct tty_struct *tty, int timeout); static int rs_write(struct tty_struct *tty, const unsigned char *buf, int count); #ifdef CONFIG_ETRAX_RS485 static int e100_write_rs485(struct tty_struct *tty, const unsigned char *buf, int count); #endif static int get_lsr_info(struct e100_serial *info, unsigned int *value); #define DEF_BAUD 115200 /* 115.2 kbit/s */ #define STD_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST) #define DEF_RX 0x20 /* or SERIAL_CTRL_W >> 8 */ /* Default value of tx_ctrl register: has txd(bit 7)=1 (idle) as default */ #define DEF_TX 0x80 /* or SERIAL_CTRL_B */ /* offsets from R_SERIALx_CTRL */ #define REG_DATA 0 #define REG_DATA_STATUS32 0 /* this is the 32 bit register R_SERIALx_READ */ #define REG_TR_DATA 0 #define REG_STATUS 1 #define REG_TR_CTRL 1 #define REG_REC_CTRL 2 #define REG_BAUD 3 #define REG_XOFF 4 /* this is a 32 bit register */ /* The bitfields are the same for all serial ports */ #define SER_RXD_MASK IO_MASK(R_SERIAL0_STATUS, rxd) #define SER_DATA_AVAIL_MASK IO_MASK(R_SERIAL0_STATUS, data_avail) #define SER_FRAMING_ERR_MASK IO_MASK(R_SERIAL0_STATUS, framing_err) #define SER_PAR_ERR_MASK IO_MASK(R_SERIAL0_STATUS, par_err) #define SER_OVERRUN_MASK IO_MASK(R_SERIAL0_STATUS, overrun) #define SER_ERROR_MASK (SER_OVERRUN_MASK | SER_PAR_ERR_MASK | SER_FRAMING_ERR_MASK) /* Values for info->errorcode */ #define ERRCODE_SET_BREAK (TTY_BREAK) #define ERRCODE_INSERT 0x100 #define ERRCODE_INSERT_BREAK (ERRCODE_INSERT | TTY_BREAK) #define FORCE_EOP(info) *R_SET_EOP = 1U << info->iseteop; /* * General note regarding the use of IO_* macros in this file: * * We will use the bits defined for DMA channel 6 when using various * IO_* macros (e.g. IO_STATE, IO_MASK, IO_EXTRACT) and _assume_ they are * the same for all channels (which of course they are). * * We will also use the bits defined for serial port 0 when writing commands * to the different ports, as these bits too are the same for all ports. */ /* Mask for the irqs possibly enabled in R_IRQ_MASK1_RD etc. */ static const unsigned long e100_ser_int_mask = 0 #ifdef CONFIG_ETRAX_SERIAL_PORT0 | IO_MASK(R_IRQ_MASK1_RD, ser0_data) | IO_MASK(R_IRQ_MASK1_RD, ser0_ready) #endif #ifdef CONFIG_ETRAX_SERIAL_PORT1 | IO_MASK(R_IRQ_MASK1_RD, ser1_data) | IO_MASK(R_IRQ_MASK1_RD, ser1_ready) #endif #ifdef CONFIG_ETRAX_SERIAL_PORT2 | IO_MASK(R_IRQ_MASK1_RD, ser2_data) | IO_MASK(R_IRQ_MASK1_RD, ser2_ready) #endif #ifdef CONFIG_ETRAX_SERIAL_PORT3 | IO_MASK(R_IRQ_MASK1_RD, ser3_data) | IO_MASK(R_IRQ_MASK1_RD, ser3_ready) #endif ; unsigned long r_alt_ser_baudrate_shadow = 0; /* this is the data for the four serial ports in the etrax100 */ /* DMA2(ser2), DMA4(ser3), DMA6(ser0) or DMA8(ser1) */ /* R_DMA_CHx_CLR_INTR, R_DMA_CHx_FIRST, R_DMA_CHx_CMD */ static struct e100_serial rs_table[] = { { .baud = DEF_BAUD, .ioport = (unsigned char *)R_SERIAL0_CTRL, .irq = 1U << 12, /* uses DMA 6 and 7 */ .oclrintradr = R_DMA_CH6_CLR_INTR, .ofirstadr = R_DMA_CH6_FIRST, .ocmdadr = R_DMA_CH6_CMD, .ostatusadr = R_DMA_CH6_STATUS, .iclrintradr = R_DMA_CH7_CLR_INTR, .ifirstadr = R_DMA_CH7_FIRST, .icmdadr = R_DMA_CH7_CMD, .idescradr = R_DMA_CH7_DESCR, .flags = STD_FLAGS, .rx_ctrl = DEF_RX, .tx_ctrl = DEF_TX, .iseteop = 2, .dma_owner = dma_ser0, .io_if = if_serial_0, #ifdef CONFIG_ETRAX_SERIAL_PORT0 .enabled = 1, #ifdef CONFIG_ETRAX_SERIAL_PORT0_DMA6_OUT .dma_out_enabled = 1, .dma_out_nbr = SER0_TX_DMA_NBR, .dma_out_irq_nbr = SER0_DMA_TX_IRQ_NBR, .dma_out_irq_flags = IRQF_DISABLED, .dma_out_irq_description = "serial 0 dma tr", #else .dma_out_enabled = 0, .dma_out_nbr = UINT_MAX, .dma_out_irq_nbr = 0, .dma_out_irq_flags = 0, .dma_out_irq_description = NULL, #endif #ifdef CONFIG_ETRAX_SERIAL_PORT0_DMA7_IN .dma_in_enabled = 1, .dma_in_nbr = SER0_RX_DMA_NBR, .dma_in_irq_nbr = SER0_DMA_RX_IRQ_NBR, .dma_in_irq_flags = IRQF_DISABLED, .dma_in_irq_description = "serial 0 dma rec", #else .dma_in_enabled = 0, .dma_in_nbr = UINT_MAX, .dma_in_irq_nbr = 0, .dma_in_irq_flags = 0, .dma_in_irq_description = NULL, #endif #else .enabled = 0, .io_if_description = NULL, .dma_out_enabled = 0, .dma_in_enabled = 0 #endif }, /* ttyS0 */ #ifndef CONFIG_SVINTO_SIM { .baud = DEF_BAUD, .ioport = (unsigned char *)R_SERIAL1_CTRL, .irq = 1U << 16, /* uses DMA 8 and 9 */ .oclrintradr = R_DMA_CH8_CLR_INTR, .ofirstadr = R_DMA_CH8_FIRST, .ocmdadr = R_DMA_CH8_CMD, .ostatusadr = R_DMA_CH8_STATUS, .iclrintradr = R_DMA_CH9_CLR_INTR, .ifirstadr = R_DMA_CH9_FIRST, .icmdadr = R_DMA_CH9_CMD, .idescradr = R_DMA_CH9_DESCR, .flags = STD_FLAGS, .rx_ctrl = DEF_RX, .tx_ctrl = DEF_TX, .iseteop = 3, .dma_owner = dma_ser1, .io_if = if_serial_1, #ifdef CONFIG_ETRAX_SERIAL_PORT1 .enabled = 1, .io_if_description = "ser1", #ifdef CONFIG_ETRAX_SERIAL_PORT1_DMA8_OUT .dma_out_enabled = 1, .dma_out_nbr = SER1_TX_DMA_NBR, .dma_out_irq_nbr = SER1_DMA_TX_IRQ_NBR, .dma_out_irq_flags = IRQF_DISABLED, .dma_out_irq_description = "serial 1 dma tr", #else .dma_out_enabled = 0, .dma_out_nbr = UINT_MAX, .dma_out_irq_nbr = 0, .dma_out_irq_flags = 0, .dma_out_irq_description = NULL, #endif #ifdef CONFIG_ETRAX_SERIAL_PORT1_DMA9_IN .dma_in_enabled = 1, .dma_in_nbr = SER1_RX_DMA_NBR, .dma_in_irq_nbr = SER1_DMA_RX_IRQ_NBR, .dma_in_irq_flags = IRQF_DISABLED, .dma_in_irq_description = "serial 1 dma rec", #else .dma_in_enabled = 0, .dma_in_enabled = 0, .dma_in_nbr = UINT_MAX, .dma_in_irq_nbr = 0, .dma_in_irq_flags = 0, .dma_in_irq_description = NULL, #endif #else .enabled = 0, .io_if_description = NULL, .dma_in_irq_nbr = 0, .dma_out_enabled = 0, .dma_in_enabled = 0 #endif }, /* ttyS1 */ { .baud = DEF_BAUD, .ioport = (unsigned char *)R_SERIAL2_CTRL, .irq = 1U << 4, /* uses DMA 2 and 3 */ .oclrintradr = R_DMA_CH2_CLR_INTR, .ofirstadr = R_DMA_CH2_FIRST, .ocmdadr = R_DMA_CH2_CMD, .ostatusadr = R_DMA_CH2_STATUS, .iclrintradr = R_DMA_CH3_CLR_INTR, .ifirstadr = R_DMA_CH3_FIRST, .icmdadr = R_DMA_CH3_CMD, .idescradr = R_DMA_CH3_DESCR, .flags = STD_FLAGS, .rx_ctrl = DEF_RX, .tx_ctrl = DEF_TX, .iseteop = 0, .dma_owner = dma_ser2, .io_if = if_serial_2, #ifdef CONFIG_ETRAX_SERIAL_PORT2 .enabled = 1, .io_if_description = "ser2", #ifdef CONFIG_ETRAX_SERIAL_PORT2_DMA2_OUT .dma_out_enabled = 1, .dma_out_nbr = SER2_TX_DMA_NBR, .dma_out_irq_nbr = SER2_DMA_TX_IRQ_NBR, .dma_out_irq_flags = IRQF_DISABLED, .dma_out_irq_description = "serial 2 dma tr", #else .dma_out_enabled = 0, .dma_out_nbr = UINT_MAX, .dma_out_irq_nbr = 0, .dma_out_irq_flags = 0, .dma_out_irq_description = NULL, #endif #ifdef CONFIG_ETRAX_SERIAL_PORT2_DMA3_IN .dma_in_enabled = 1, .dma_in_nbr = SER2_RX_DMA_NBR, .dma_in_irq_nbr = SER2_DMA_RX_IRQ_NBR, .dma_in_irq_flags = IRQF_DISABLED, .dma_in_irq_description = "serial 2 dma rec", #else .dma_in_enabled = 0, .dma_in_nbr = UINT_MAX, .dma_in_irq_nbr = 0, .dma_in_irq_flags = 0, .dma_in_irq_description = NULL, #endif #else .enabled = 0, .io_if_description = NULL, .dma_out_enabled = 0, .dma_in_enabled = 0 #endif }, /* ttyS2 */ { .baud = DEF_BAUD, .ioport = (unsigned char *)R_SERIAL3_CTRL, .irq = 1U << 8, /* uses DMA 4 and 5 */ .oclrintradr = R_DMA_CH4_CLR_INTR, .ofirstadr = R_DMA_CH4_FIRST, .ocmdadr = R_DMA_CH4_CMD, .ostatusadr = R_DMA_CH4_STATUS, .iclrintradr = R_DMA_CH5_CLR_INTR, .ifirstadr = R_DMA_CH5_FIRST, .icmdadr = R_DMA_CH5_CMD, .idescradr = R_DMA_CH5_DESCR, .flags = STD_FLAGS, .rx_ctrl = DEF_RX, .tx_ctrl = DEF_TX, .iseteop = 1, .dma_owner = dma_ser3, .io_if = if_serial_3, #ifdef CONFIG_ETRAX_SERIAL_PORT3 .enabled = 1, .io_if_description = "ser3", #ifdef CONFIG_ETRAX_SERIAL_PORT3_DMA4_OUT .dma_out_enabled = 1, .dma_out_nbr = SER3_TX_DMA_NBR, .dma_out_irq_nbr = SER3_DMA_TX_IRQ_NBR, .dma_out_irq_flags = IRQF_DISABLED, .dma_out_irq_description = "serial 3 dma tr", #else .dma_out_enabled = 0, .dma_out_nbr = UINT_MAX, .dma_out_irq_nbr = 0, .dma_out_irq_flags = 0, .dma_out_irq_description = NULL, #endif #ifdef CONFIG_ETRAX_SERIAL_PORT3_DMA5_IN .dma_in_enabled = 1, .dma_in_nbr = SER3_RX_DMA_NBR, .dma_in_irq_nbr = SER3_DMA_RX_IRQ_NBR, .dma_in_irq_flags = IRQF_DISABLED, .dma_in_irq_description = "serial 3 dma rec", #else .dma_in_enabled = 0, .dma_in_nbr = UINT_MAX, .dma_in_irq_nbr = 0, .dma_in_irq_flags = 0, .dma_in_irq_description = NULL #endif #else .enabled = 0, .io_if_description = NULL, .dma_out_enabled = 0, .dma_in_enabled = 0 #endif } /* ttyS3 */ #endif }; #define NR_PORTS (sizeof(rs_table)/sizeof(struct e100_serial)) #ifdef CONFIG_ETRAX_SERIAL_FAST_TIMER static struct fast_timer fast_timers[NR_PORTS]; #endif #ifdef CONFIG_ETRAX_SERIAL_PROC_ENTRY #define PROCSTAT(x) x struct ser_statistics_type { int overrun_cnt; int early_errors_cnt; int ser_ints_ok_cnt; int errors_cnt; unsigned long int processing_flip; unsigned long processing_flip_still_room; unsigned long int timeout_flush_cnt; int rx_dma_ints; int tx_dma_ints; int rx_tot; int tx_tot; }; static struct ser_statistics_type ser_stat[NR_PORTS]; #else #define PROCSTAT(x) #endif /* CONFIG_ETRAX_SERIAL_PROC_ENTRY */ /* RS-485 */ #if defined(CONFIG_ETRAX_RS485) #ifdef CONFIG_ETRAX_FAST_TIMER static struct fast_timer fast_timers_rs485[NR_PORTS]; #endif #if defined(CONFIG_ETRAX_RS485_ON_PA) static int rs485_pa_bit = CONFIG_ETRAX_RS485_ON_PA_BIT; #endif #if defined(CONFIG_ETRAX_RS485_ON_PORT_G) static int rs485_port_g_bit = CONFIG_ETRAX_RS485_ON_PORT_G_BIT; #endif #endif /* Info and macros needed for each ports extra control/status signals. */ #define E100_STRUCT_PORT(line, pinname) \ ((CONFIG_ETRAX_SER##line##_##pinname##_ON_PA_BIT >= 0)? \ (R_PORT_PA_DATA): ( \ (CONFIG_ETRAX_SER##line##_##pinname##_ON_PB_BIT >= 0)? \ (R_PORT_PB_DATA):&dummy_ser[line])) #define E100_STRUCT_SHADOW(line, pinname) \ ((CONFIG_ETRAX_SER##line##_##pinname##_ON_PA_BIT >= 0)? \ (&port_pa_data_shadow): ( \ (CONFIG_ETRAX_SER##line##_##pinname##_ON_PB_BIT >= 0)? \ (&port_pb_data_shadow):&dummy_ser[line])) #define E100_STRUCT_MASK(line, pinname) \ ((CONFIG_ETRAX_SER##line##_##pinname##_ON_PA_BIT >= 0)? \ (1<<CONFIG_ETRAX_SER##line##_##pinname##_ON_PA_BIT): ( \ (CONFIG_ETRAX_SER##line##_##pinname##_ON_PB_BIT >= 0)? \ (1<<CONFIG_ETRAX_SER##line##_##pinname##_ON_PB_BIT):DUMMY_##pinname##_MASK)) #define DUMMY_DTR_MASK 1 #define DUMMY_RI_MASK 2 #define DUMMY_DSR_MASK 4 #define DUMMY_CD_MASK 8 static unsigned char dummy_ser[NR_PORTS] = {0xFF, 0xFF, 0xFF,0xFF}; /* If not all status pins are used or disabled, use mixed mode */ #ifdef CONFIG_ETRAX_SERIAL_PORT0 #define SER0_PA_BITSUM (CONFIG_ETRAX_SER0_DTR_ON_PA_BIT+CONFIG_ETRAX_SER0_RI_ON_PA_BIT+CONFIG_ETRAX_SER0_DSR_ON_PA_BIT+CONFIG_ETRAX_SER0_CD_ON_PA_BIT) #if SER0_PA_BITSUM != -4 # if CONFIG_ETRAX_SER0_DTR_ON_PA_BIT == -1 # ifndef CONFIG_ETRAX_SER0_DTR_RI_DSR_CD_MIXED # define CONFIG_ETRAX_SER0_DTR_RI_DSR_CD_MIXED 1 # endif # endif # if CONFIG_ETRAX_SER0_RI_ON_PA_BIT == -1 # ifndef CONFIG_ETRAX_SER0_DTR_RI_DSR_CD_MIXED # define CONFIG_ETRAX_SER0_DTR_RI_DSR_CD_MIXED 1 # endif # endif # if CONFIG_ETRAX_SER0_DSR_ON_PA_BIT == -1 # ifndef CONFIG_ETRAX_SER0_DTR_RI_DSR_CD_MIXED # define CONFIG_ETRAX_SER0_DTR_RI_DSR_CD_MIXED 1 # endif # endif # if CONFIG_ETRAX_SER0_CD_ON_PA_BIT == -1 # ifndef CONFIG_ETRAX_SER0_DTR_RI_DSR_CD_MIXED # define CONFIG_ETRAX_SER0_DTR_RI_DSR_CD_MIXED 1 # endif # endif #endif #define SER0_PB_BITSUM (CONFIG_ETRAX_SER0_DTR_ON_PB_BIT+CONFIG_ETRAX_SER0_RI_ON_PB_BIT+CONFIG_ETRAX_SER0_DSR_ON_PB_BIT+CONFIG_ETRAX_SER0_CD_ON_PB_BIT) #if SER0_PB_BITSUM != -4 # if CONFIG_ETRAX_SER0_DTR_ON_PB_BIT == -1 # ifndef CONFIG_ETRAX_SER0_DTR_RI_DSR_CD_MIXED # define CONFIG_ETRAX_SER0_DTR_RI_DSR_CD_MIXED 1 # endif # endif # if CONFIG_ETRAX_SER0_RI_ON_PB_BIT == -1 # ifndef CONFIG_ETRAX_SER0_DTR_RI_DSR_CD_MIXED # define CONFIG_ETRAX_SER0_DTR_RI_DSR_CD_MIXED 1 # endif # endif # if CONFIG_ETRAX_SER0_DSR_ON_PB_BIT == -1 # ifndef CONFIG_ETRAX_SER0_DTR_RI_DSR_CD_MIXED # define CONFIG_ETRAX_SER0_DTR_RI_DSR_CD_MIXED 1 # endif # endif # if CONFIG_ETRAX_SER0_CD_ON_PB_BIT == -1 # ifndef CONFIG_ETRAX_SER0_DTR_RI_DSR_CD_MIXED # define CONFIG_ETRAX_SER0_DTR_RI_DSR_CD_MIXED 1 # endif # endif #endif #endif /* PORT0 */ #ifdef CONFIG_ETRAX_SERIAL_PORT1 #define SER1_PA_BITSUM (CONFIG_ETRAX_SER1_DTR_ON_PA_BIT+CONFIG_ETRAX_SER1_RI_ON_PA_BIT+CONFIG_ETRAX_SER1_DSR_ON_PA_BIT+CONFIG_ETRAX_SER1_CD_ON_PA_BIT) #if SER1_PA_BITSUM != -4 # if CONFIG_ETRAX_SER1_DTR_ON_PA_BIT == -1 # ifndef CONFIG_ETRAX_SER1_DTR_RI_DSR_CD_MIXED # define CONFIG_ETRAX_SER1_DTR_RI_DSR_CD_MIXED 1 # endif # endif # if CONFIG_ETRAX_SER1_RI_ON_PA_BIT == -1 # ifndef CONFIG_ETRAX_SER1_DTR_RI_DSR_CD_MIXED # define CONFIG_ETRAX_SER1_DTR_RI_DSR_CD_MIXED 1 # endif # endif # if CONFIG_ETRAX_SER1_DSR_ON_PA_BIT == -1 # ifndef CONFIG_ETRAX_SER1_DTR_RI_DSR_CD_MIXED # define CONFIG_ETRAX_SER1_DTR_RI_DSR_CD_MIXED 1 # endif # endif # if CONFIG_ETRAX_SER1_CD_ON_PA_BIT == -1 # ifndef CONFIG_ETRAX_SER1_DTR_RI_DSR_CD_MIXED # define CONFIG_ETRAX_SER1_DTR_RI_DSR_CD_MIXED 1 # endif # endif #endif #define SER1_PB_BITSUM (CONFIG_ETRAX_SER1_DTR_ON_PB_BIT+CONFIG_ETRAX_SER1_RI_ON_PB_BIT+CONFIG_ETRAX_SER1_DSR_ON_PB_BIT+CONFIG_ETRAX_SER1_CD_ON_PB_BIT) #if SER1_PB_BITSUM != -4 # if CONFIG_ETRAX_SER1_DTR_ON_PB_BIT == -1 # ifndef CONFIG_ETRAX_SER1_DTR_RI_DSR_CD_MIXED # define CONFIG_ETRAX_SER1_DTR_RI_DSR_CD_MIXED 1 # endif # endif # if CONFIG_ETRAX_SER1_RI_ON_PB_BIT == -1 # ifndef CONFIG_ETRAX_SER1_DTR_RI_DSR_CD_MIXED # define CONFIG_ETRAX_SER1_DTR_RI_DSR_CD_MIXED 1 # endif # endif # if CONFIG_ETRAX_SER1_DSR_ON_PB_BIT == -1 # ifndef CONFIG_ETRAX_SER1_DTR_RI_DSR_CD_MIXED # define CONFIG_ETRAX_SER1_DTR_RI_DSR_CD_MIXED 1 # endif # endif # if CONFIG_ETRAX_SER1_CD_ON_PB_BIT == -1 # ifndef CONFIG_ETRAX_SER1_DTR_RI_DSR_CD_MIXED # define CONFIG_ETRAX_SER1_DTR_RI_DSR_CD_MIXED 1 # endif # endif #endif #endif /* PORT1 */ #ifdef CONFIG_ETRAX_SERIAL_PORT2 #define SER2_PA_BITSUM (CONFIG_ETRAX_SER2_DTR_ON_PA_BIT+CONFIG_ETRAX_SER2_RI_ON_PA_BIT+CONFIG_ETRAX_SER2_DSR_ON_PA_BIT+CONFIG_ETRAX_SER2_CD_ON_PA_BIT) #if SER2_PA_BITSUM != -4 # if CONFIG_ETRAX_SER2_DTR_ON_PA_BIT == -1 # ifndef CONFIG_ETRAX_SER2_DTR_RI_DSR_CD_MIXED # define CONFIG_ETRAX_SER2_DTR_RI_DSR_CD_MIXED 1 # endif # endif # if CONFIG_ETRAX_SER2_RI_ON_PA_BIT == -1 # ifndef CONFIG_ETRAX_SER2_DTR_RI_DSR_CD_MIXED # define CONFIG_ETRAX_SER2_DTR_RI_DSR_CD_MIXED 1 # endif # endif # if CONFIG_ETRAX_SER2_DSR_ON_PA_BIT == -1 # ifndef CONFIG_ETRAX_SER2_DTR_RI_DSR_CD_MIXED # define CONFIG_ETRAX_SER2_DTR_RI_DSR_CD_MIXED 1 # endif # endif # if CONFIG_ETRAX_SER2_CD_ON_PA_BIT == -1 # ifndef CONFIG_ETRAX_SER2_DTR_RI_DSR_CD_MIXED # define CONFIG_ETRAX_SER2_DTR_RI_DSR_CD_MIXED 1 # endif # endif #endif #define SER2_PB_BITSUM (CONFIG_ETRAX_SER2_DTR_ON_PB_BIT+CONFIG_ETRAX_SER2_RI_ON_PB_BIT+CONFIG_ETRAX_SER2_DSR_ON_PB_BIT+CONFIG_ETRAX_SER2_CD_ON_PB_BIT) #if SER2_PB_BITSUM != -4 # if CONFIG_ETRAX_SER2_DTR_ON_PB_BIT == -1 # ifndef CONFIG_ETRAX_SER2_DTR_RI_DSR_CD_MIXED # define CONFIG_ETRAX_SER2_DTR_RI_DSR_CD_MIXED 1 # endif # endif # if CONFIG_ETRAX_SER2_RI_ON_PB_BIT == -1 # ifndef CONFIG_ETRAX_SER2_DTR_RI_DSR_CD_MIXED # define CONFIG_ETRAX_SER2_DTR_RI_DSR_CD_MIXED 1 # endif # endif # if CONFIG_ETRAX_SER2_DSR_ON_PB_BIT == -1 # ifndef CONFIG_ETRAX_SER2_DTR_RI_DSR_CD_MIXED # define CONFIG_ETRAX_SER2_DTR_RI_DSR_CD_MIXED 1 # endif # endif # if CONFIG_ETRAX_SER2_CD_ON_PB_BIT == -1 # ifndef CONFIG_ETRAX_SER2_DTR_RI_DSR_CD_MIXED # define CONFIG_ETRAX_SER2_DTR_RI_DSR_CD_MIXED 1 # endif # endif #endif #endif /* PORT2 */ #ifdef CONFIG_ETRAX_SERIAL_PORT3 #define SER3_PA_BITSUM (CONFIG_ETRAX_SER3_DTR_ON_PA_BIT+CONFIG_ETRAX_SER3_RI_ON_PA_BIT+CONFIG_ETRAX_SER3_DSR_ON_PA_BIT+CONFIG_ETRAX_SER3_CD_ON_PA_BIT) #if SER3_PA_BITSUM != -4 # if CONFIG_ETRAX_SER3_DTR_ON_PA_BIT == -1 # ifndef CONFIG_ETRAX_SER3_DTR_RI_DSR_CD_MIXED # define CONFIG_ETRAX_SER3_DTR_RI_DSR_CD_MIXED 1 # endif # endif # if CONFIG_ETRAX_SER3_RI_ON_PA_BIT == -1 # ifndef CONFIG_ETRAX_SER3_DTR_RI_DSR_CD_MIXED # define CONFIG_ETRAX_SER3_DTR_RI_DSR_CD_MIXED 1 # endif # endif # if CONFIG_ETRAX_SER3_DSR_ON_PA_BIT == -1 # ifndef CONFIG_ETRAX_SER3_DTR_RI_DSR_CD_MIXED # define CONFIG_ETRAX_SER3_DTR_RI_DSR_CD_MIXED 1 # endif # endif # if CONFIG_ETRAX_SER3_CD_ON_PA_BIT == -1 # ifndef CONFIG_ETRAX_SER3_DTR_RI_DSR_CD_MIXED # define CONFIG_ETRAX_SER3_DTR_RI_DSR_CD_MIXED 1 # endif # endif #endif #define SER3_PB_BITSUM (CONFIG_ETRAX_SER3_DTR_ON_PB_BIT+CONFIG_ETRAX_SER3_RI_ON_PB_BIT+CONFIG_ETRAX_SER3_DSR_ON_PB_BIT+CONFIG_ETRAX_SER3_CD_ON_PB_BIT) #if SER3_PB_BITSUM != -4 # if CONFIG_ETRAX_SER3_DTR_ON_PB_BIT == -1 # ifndef CONFIG_ETRAX_SER3_DTR_RI_DSR_CD_MIXED # define CONFIG_ETRAX_SER3_DTR_RI_DSR_CD_MIXED 1 # endif # endif # if CONFIG_ETRAX_SER3_RI_ON_PB_BIT == -1 # ifndef CONFIG_ETRAX_SER3_DTR_RI_DSR_CD_MIXED # define CONFIG_ETRAX_SER3_DTR_RI_DSR_CD_MIXED 1 # endif # endif # if CONFIG_ETRAX_SER3_DSR_ON_PB_BIT == -1 # ifndef CONFIG_ETRAX_SER3_DTR_RI_DSR_CD_MIXED # define CONFIG_ETRAX_SER3_DTR_RI_DSR_CD_MIXED 1 # endif # endif # if CONFIG_ETRAX_SER3_CD_ON_PB_BIT == -1 # ifndef CONFIG_ETRAX_SER3_DTR_RI_DSR_CD_MIXED # define CONFIG_ETRAX_SER3_DTR_RI_DSR_CD_MIXED 1 # endif # endif #endif #endif /* PORT3 */ #if defined(CONFIG_ETRAX_SER0_DTR_RI_DSR_CD_MIXED) || \ defined(CONFIG_ETRAX_SER1_DTR_RI_DSR_CD_MIXED) || \ defined(CONFIG_ETRAX_SER2_DTR_RI_DSR_CD_MIXED) || \ defined(CONFIG_ETRAX_SER3_DTR_RI_DSR_CD_MIXED) #define CONFIG_ETRAX_SERX_DTR_RI_DSR_CD_MIXED #endif #ifdef CONFIG_ETRAX_SERX_DTR_RI_DSR_CD_MIXED /* The pins can be mixed on PA and PB */ #define CONTROL_PINS_PORT_NOT_USED(line) \ &dummy_ser[line], &dummy_ser[line], \ &dummy_ser[line], &dummy_ser[line], \ &dummy_ser[line], &dummy_ser[line], \ &dummy_ser[line], &dummy_ser[line], \ DUMMY_DTR_MASK, DUMMY_RI_MASK, DUMMY_DSR_MASK, DUMMY_CD_MASK struct control_pins { volatile unsigned char *dtr_port; unsigned char *dtr_shadow; volatile unsigned char *ri_port; unsigned char *ri_shadow; volatile unsigned char *dsr_port; unsigned char *dsr_shadow; volatile unsigned char *cd_port; unsigned char *cd_shadow; unsigned char dtr_mask; unsigned char ri_mask; unsigned char dsr_mask; unsigned char cd_mask; }; static const struct control_pins e100_modem_pins[NR_PORTS] = { /* Ser 0 */ { #ifdef CONFIG_ETRAX_SERIAL_PORT0 E100_STRUCT_PORT(0,DTR), E100_STRUCT_SHADOW(0,DTR), E100_STRUCT_PORT(0,RI), E100_STRUCT_SHADOW(0,RI), E100_STRUCT_PORT(0,DSR), E100_STRUCT_SHADOW(0,DSR), E100_STRUCT_PORT(0,CD), E100_STRUCT_SHADOW(0,CD), E100_STRUCT_MASK(0,DTR), E100_STRUCT_MASK(0,RI), E100_STRUCT_MASK(0,DSR), E100_STRUCT_MASK(0,CD) #else CONTROL_PINS_PORT_NOT_USED(0) #endif }, /* Ser 1 */ { #ifdef CONFIG_ETRAX_SERIAL_PORT1 E100_STRUCT_PORT(1,DTR), E100_STRUCT_SHADOW(1,DTR), E100_STRUCT_PORT(1,RI), E100_STRUCT_SHADOW(1,RI), E100_STRUCT_PORT(1,DSR), E100_STRUCT_SHADOW(1,DSR), E100_STRUCT_PORT(1,CD), E100_STRUCT_SHADOW(1,CD), E100_STRUCT_MASK(1,DTR), E100_STRUCT_MASK(1,RI), E100_STRUCT_MASK(1,DSR), E100_STRUCT_MASK(1,CD) #else CONTROL_PINS_PORT_NOT_USED(1) #endif }, /* Ser 2 */ { #ifdef CONFIG_ETRAX_SERIAL_PORT2 E100_STRUCT_PORT(2,DTR), E100_STRUCT_SHADOW(2,DTR), E100_STRUCT_PORT(2,RI), E100_STRUCT_SHADOW(2,RI), E100_STRUCT_PORT(2,DSR), E100_STRUCT_SHADOW(2,DSR), E100_STRUCT_PORT(2,CD), E100_STRUCT_SHADOW(2,CD), E100_STRUCT_MASK(2,DTR), E100_STRUCT_MASK(2,RI), E100_STRUCT_MASK(2,DSR), E100_STRUCT_MASK(2,CD) #else CONTROL_PINS_PORT_NOT_USED(2) #endif }, /* Ser 3 */ { #ifdef CONFIG_ETRAX_SERIAL_PORT3 E100_STRUCT_PORT(3,DTR), E100_STRUCT_SHADOW(3,DTR), E100_STRUCT_PORT(3,RI), E100_STRUCT_SHADOW(3,RI), E100_STRUCT_PORT(3,DSR), E100_STRUCT_SHADOW(3,DSR), E100_STRUCT_PORT(3,CD), E100_STRUCT_SHADOW(3,CD), E100_STRUCT_MASK(3,DTR), E100_STRUCT_MASK(3,RI), E100_STRUCT_MASK(3,DSR), E100_STRUCT_MASK(3,CD) #else CONTROL_PINS_PORT_NOT_USED(3) #endif } }; #else /* CONFIG_ETRAX_SERX_DTR_RI_DSR_CD_MIXED */ /* All pins are on either PA or PB for each serial port */ #define CONTROL_PINS_PORT_NOT_USED(line) \ &dummy_ser[line], &dummy_ser[line], \ DUMMY_DTR_MASK, DUMMY_RI_MASK, DUMMY_DSR_MASK, DUMMY_CD_MASK struct control_pins { volatile unsigned char *port; unsigned char *shadow; unsigned char dtr_mask; unsigned char ri_mask; unsigned char dsr_mask; unsigned char cd_mask; }; #define dtr_port port #define dtr_shadow shadow #define ri_port port #define ri_shadow shadow #define dsr_port port #define dsr_shadow shadow #define cd_port port #define cd_shadow shadow static const struct control_pins e100_modem_pins[NR_PORTS] = { /* Ser 0 */ { #ifdef CONFIG_ETRAX_SERIAL_PORT0 E100_STRUCT_PORT(0,DTR), E100_STRUCT_SHADOW(0,DTR), E100_STRUCT_MASK(0,DTR), E100_STRUCT_MASK(0,RI), E100_STRUCT_MASK(0,DSR), E100_STRUCT_MASK(0,CD) #else CONTROL_PINS_PORT_NOT_USED(0) #endif }, /* Ser 1 */ { #ifdef CONFIG_ETRAX_SERIAL_PORT1 E100_STRUCT_PORT(1,DTR), E100_STRUCT_SHADOW(1,DTR), E100_STRUCT_MASK(1,DTR), E100_STRUCT_MASK(1,RI), E100_STRUCT_MASK(1,DSR), E100_STRUCT_MASK(1,CD) #else CONTROL_PINS_PORT_NOT_USED(1) #endif }, /* Ser 2 */ { #ifdef CONFIG_ETRAX_SERIAL_PORT2 E100_STRUCT_PORT(2,DTR), E100_STRUCT_SHADOW(2,DTR), E100_STRUCT_MASK(2,DTR), E100_STRUCT_MASK(2,RI), E100_STRUCT_MASK(2,DSR), E100_STRUCT_MASK(2,CD) #else CONTROL_PINS_PORT_NOT_USED(2) #endif }, /* Ser 3 */ { #ifdef CONFIG_ETRAX_SERIAL_PORT3 E100_STRUCT_PORT(3,DTR), E100_STRUCT_SHADOW(3,DTR), E100_STRUCT_MASK(3,DTR), E100_STRUCT_MASK(3,RI), E100_STRUCT_MASK(3,DSR), E100_STRUCT_MASK(3,CD) #else CONTROL_PINS_PORT_NOT_USED(3) #endif } }; #endif /* !CONFIG_ETRAX_SERX_DTR_RI_DSR_CD_MIXED */ #define E100_RTS_MASK 0x20 #define E100_CTS_MASK 0x40 /* All serial port signals are active low: * active = 0 -> 3.3V to RS-232 driver -> -12V on RS-232 level * inactive = 1 -> 0V to RS-232 driver -> +12V on RS-232 level * * These macros returns the pin value: 0=0V, >=1 = 3.3V on ETRAX chip */ /* Output */ #define E100_RTS_GET(info) ((info)->rx_ctrl & E100_RTS_MASK) /* Input */ #define E100_CTS_GET(info) ((info)->ioport[REG_STATUS] & E100_CTS_MASK) /* These are typically PA or PB and 0 means 0V, 1 means 3.3V */ /* Is an output */ #define E100_DTR_GET(info) ((*e100_modem_pins[(info)->line].dtr_shadow) & e100_modem_pins[(info)->line].dtr_mask) /* Normally inputs */ #define E100_RI_GET(info) ((*e100_modem_pins[(info)->line].ri_port) & e100_modem_pins[(info)->line].ri_mask) #define E100_CD_GET(info) ((*e100_modem_pins[(info)->line].cd_port) & e100_modem_pins[(info)->line].cd_mask) /* Input */ #define E100_DSR_GET(info) ((*e100_modem_pins[(info)->line].dsr_port) & e100_modem_pins[(info)->line].dsr_mask) /* * tmp_buf is used as a temporary buffer by serial_write. We need to * lock it in case the memcpy_fromfs blocks while swapping in a page, * and some other program tries to do a serial write at the same time. * Since the lock will only come under contention when the system is * swapping and available memory is low, it makes sense to share one * buffer across all the serial ports, since it significantly saves * memory if large numbers of serial ports are open. */ static unsigned char *tmp_buf; static DEFINE_MUTEX(tmp_buf_mutex); /* Calculate the chartime depending on baudrate, numbor of bits etc. */ static void update_char_time(struct e100_serial * info) { tcflag_t cflags = info->port.tty->termios->c_cflag; int bits; /* calc. number of bits / data byte */ /* databits + startbit and 1 stopbit */ if ((cflags & CSIZE) == CS7) bits = 9; else bits = 10; if (cflags & CSTOPB) /* 2 stopbits ? */ bits++; if (cflags & PARENB) /* parity bit ? */ bits++; /* calc timeout */ info->char_time_usec = ((bits * 1000000) / info->baud) + 1; info->flush_time_usec = 4*info->char_time_usec; if (info->flush_time_usec < MIN_FLUSH_TIME_USEC) info->flush_time_usec = MIN_FLUSH_TIME_USEC; } /* * This function maps from the Bxxxx defines in asm/termbits.h into real * baud rates. */ static int cflag_to_baud(unsigned int cflag) { static int baud_table[] = { 0, 50, 75, 110, 134, 150, 200, 300, 600, 1200, 1800, 2400, 4800, 9600, 19200, 38400 }; static int ext_baud_table[] = { 0, 57600, 115200, 230400, 460800, 921600, 1843200, 6250000, 0, 0, 0, 0, 0, 0, 0, 0 }; if (cflag & CBAUDEX) return ext_baud_table[(cflag & CBAUD) & ~CBAUDEX]; else return baud_table[cflag & CBAUD]; } /* and this maps to an etrax100 hardware baud constant */ static unsigned char cflag_to_etrax_baud(unsigned int cflag) { char retval; static char baud_table[] = { -1, -1, -1, -1, -1, -1, -1, 0, 1, 2, -1, 3, 4, 5, 6, 7 }; static char ext_baud_table[] = { -1, 8, 9, 10, 11, 12, 13, 14, -1, -1, -1, -1, -1, -1, -1, -1 }; if (cflag & CBAUDEX) retval = ext_baud_table[(cflag & CBAUD) & ~CBAUDEX]; else retval = baud_table[cflag & CBAUD]; if (retval < 0) { printk(KERN_WARNING "serdriver tried setting invalid baud rate, flags %x.\n", cflag); retval = 5; /* choose default 9600 instead */ } return retval | (retval << 4); /* choose same for both TX and RX */ } /* Various static support functions */ /* Functions to set or clear DTR/RTS on the requested line */ /* It is complicated by the fact that RTS is a serial port register, while * DTR might not be implemented in the HW at all, and if it is, it can be on * any general port. */ static inline void e100_dtr(struct e100_serial *info, int set) { #ifndef CONFIG_SVINTO_SIM unsigned char mask = e100_modem_pins[info->line].dtr_mask; #ifdef SERIAL_DEBUG_IO printk("ser%i dtr %i mask: 0x%02X\n", info->line, set, mask); printk("ser%i shadow before 0x%02X get: %i\n", info->line, *e100_modem_pins[info->line].dtr_shadow, E100_DTR_GET(info)); #endif /* DTR is active low */ { unsigned long flags; local_irq_save(flags); *e100_modem_pins[info->line].dtr_shadow &= ~mask; *e100_modem_pins[info->line].dtr_shadow |= (set ? 0 : mask); *e100_modem_pins[info->line].dtr_port = *e100_modem_pins[info->line].dtr_shadow; local_irq_restore(flags); } #ifdef SERIAL_DEBUG_IO printk("ser%i shadow after 0x%02X get: %i\n", info->line, *e100_modem_pins[info->line].dtr_shadow, E100_DTR_GET(info)); #endif #endif } /* set = 0 means 3.3V on the pin, bitvalue: 0=active, 1=inactive * 0=0V , 1=3.3V */ static inline void e100_rts(struct e100_serial *info, int set) { #ifndef CONFIG_SVINTO_SIM unsigned long flags; local_irq_save(flags); info->rx_ctrl &= ~E100_RTS_MASK; info->rx_ctrl |= (set ? 0 : E100_RTS_MASK); /* RTS is active low */ info->ioport[REG_REC_CTRL] = info->rx_ctrl; local_irq_restore(flags); #ifdef SERIAL_DEBUG_IO printk("ser%i rts %i\n", info->line, set); #endif #endif } /* If this behaves as a modem, RI and CD is an output */ static inline void e100_ri_out(struct e100_serial *info, int set) { #ifndef CONFIG_SVINTO_SIM /* RI is active low */ { unsigned char mask = e100_modem_pins[info->line].ri_mask; unsigned long flags; local_irq_save(flags); *e100_modem_pins[info->line].ri_shadow &= ~mask; *e100_modem_pins[info->line].ri_shadow |= (set ? 0 : mask); *e100_modem_pins[info->line].ri_port = *e100_modem_pins[info->line].ri_shadow; local_irq_restore(flags); } #endif } static inline void e100_cd_out(struct e100_serial *info, int set) { #ifndef CONFIG_SVINTO_SIM /* CD is active low */ { unsigned char mask = e100_modem_pins[info->line].cd_mask; unsigned long flags; local_irq_save(flags); *e100_modem_pins[info->line].cd_shadow &= ~mask; *e100_modem_pins[info->line].cd_shadow |= (set ? 0 : mask); *e100_modem_pins[info->line].cd_port = *e100_modem_pins[info->line].cd_shadow; local_irq_restore(flags); } #endif } static inline void e100_disable_rx(struct e100_serial *info) { #ifndef CONFIG_SVINTO_SIM /* disable the receiver */ info->ioport[REG_REC_CTRL] = (info->rx_ctrl &= ~IO_MASK(R_SERIAL0_REC_CTRL, rec_enable)); #endif } static inline void e100_enable_rx(struct e100_serial *info) { #ifndef CONFIG_SVINTO_SIM /* enable the receiver */ info->ioport[REG_REC_CTRL] = (info->rx_ctrl |= IO_MASK(R_SERIAL0_REC_CTRL, rec_enable)); #endif } /* the rx DMA uses both the dma_descr and the dma_eop interrupts */ static inline void e100_disable_rxdma_irq(struct e100_serial *info) { #ifdef SERIAL_DEBUG_INTR printk("rxdma_irq(%d): 0\n",info->line); #endif DINTR1(DEBUG_LOG(info->line,"IRQ disable_rxdma_irq %i\n", info->line)); *R_IRQ_MASK2_CLR = (info->irq << 2) | (info->irq << 3); } static inline void e100_enable_rxdma_irq(struct e100_serial *info) { #ifdef SERIAL_DEBUG_INTR printk("rxdma_irq(%d): 1\n",info->line); #endif DINTR1(DEBUG_LOG(info->line,"IRQ enable_rxdma_irq %i\n", info->line)); *R_IRQ_MASK2_SET = (info->irq << 2) | (info->irq << 3); } /* the tx DMA uses only dma_descr interrupt */ static void e100_disable_txdma_irq(struct e100_serial *info) { #ifdef SERIAL_DEBUG_INTR printk("txdma_irq(%d): 0\n",info->line); #endif DINTR1(DEBUG_LOG(info->line,"IRQ disable_txdma_irq %i\n", info->line)); *R_IRQ_MASK2_CLR = info->irq; } static void e100_enable_txdma_irq(struct e100_serial *info) { #ifdef SERIAL_DEBUG_INTR printk("txdma_irq(%d): 1\n",info->line); #endif DINTR1(DEBUG_LOG(info->line,"IRQ enable_txdma_irq %i\n", info->line)); *R_IRQ_MASK2_SET = info->irq; } static void e100_disable_txdma_channel(struct e100_serial *info) { unsigned long flags; /* Disable output DMA channel for the serial port in question * ( set to something other than serialX) */ local_irq_save(flags); DFLOW(DEBUG_LOG(info->line, "disable_txdma_channel %i\n", info->line)); if (info->line == 0) { if ((genconfig_shadow & IO_MASK(R_GEN_CONFIG, dma6)) == IO_STATE(R_GEN_CONFIG, dma6, serial0)) { genconfig_shadow &= ~IO_MASK(R_GEN_CONFIG, dma6); genconfig_shadow |= IO_STATE(R_GEN_CONFIG, dma6, unused); } } else if (info->line == 1) { if ((genconfig_shadow & IO_MASK(R_GEN_CONFIG, dma8)) == IO_STATE(R_GEN_CONFIG, dma8, serial1)) { genconfig_shadow &= ~IO_MASK(R_GEN_CONFIG, dma8); genconfig_shadow |= IO_STATE(R_GEN_CONFIG, dma8, usb); } } else if (info->line == 2) { if ((genconfig_shadow & IO_MASK(R_GEN_CONFIG, dma2)) == IO_STATE(R_GEN_CONFIG, dma2, serial2)) { genconfig_shadow &= ~IO_MASK(R_GEN_CONFIG, dma2); genconfig_shadow |= IO_STATE(R_GEN_CONFIG, dma2, par0); } } else if (info->line == 3) { if ((genconfig_shadow & IO_MASK(R_GEN_CONFIG, dma4)) == IO_STATE(R_GEN_CONFIG, dma4, serial3)) { genconfig_shadow &= ~IO_MASK(R_GEN_CONFIG, dma4); genconfig_shadow |= IO_STATE(R_GEN_CONFIG, dma4, par1); } } *R_GEN_CONFIG = genconfig_shadow; local_irq_restore(flags); } static void e100_enable_txdma_channel(struct e100_serial *info) { unsigned long flags; local_irq_save(flags); DFLOW(DEBUG_LOG(info->line, "enable_txdma_channel %i\n", info->line)); /* Enable output DMA channel for the serial port in question */ if (info->line == 0) { genconfig_shadow &= ~IO_MASK(R_GEN_CONFIG, dma6); genconfig_shadow |= IO_STATE(R_GEN_CONFIG, dma6, serial0); } else if (info->line == 1) { genconfig_shadow &= ~IO_MASK(R_GEN_CONFIG, dma8); genconfig_shadow |= IO_STATE(R_GEN_CONFIG, dma8, serial1); } else if (info->line == 2) { genconfig_shadow &= ~IO_MASK(R_GEN_CONFIG, dma2); genconfig_shadow |= IO_STATE(R_GEN_CONFIG, dma2, serial2); } else if (info->line == 3) { genconfig_shadow &= ~IO_MASK(R_GEN_CONFIG, dma4); genconfig_shadow |= IO_STATE(R_GEN_CONFIG, dma4, serial3); } *R_GEN_CONFIG = genconfig_shadow; local_irq_restore(flags); } static void e100_disable_rxdma_channel(struct e100_serial *info) { unsigned long flags; /* Disable input DMA channel for the serial port in question * ( set to something other than serialX) */ local_irq_save(flags); if (info->line == 0) { if ((genconfig_shadow & IO_MASK(R_GEN_CONFIG, dma7)) == IO_STATE(R_GEN_CONFIG, dma7, serial0)) { genconfig_shadow &= ~IO_MASK(R_GEN_CONFIG, dma7); genconfig_shadow |= IO_STATE(R_GEN_CONFIG, dma7, unused); } } else if (info->line == 1) { if ((genconfig_shadow & IO_MASK(R_GEN_CONFIG, dma9)) == IO_STATE(R_GEN_CONFIG, dma9, serial1)) { genconfig_shadow &= ~IO_MASK(R_GEN_CONFIG, dma9); genconfig_shadow |= IO_STATE(R_GEN_CONFIG, dma9, usb); } } else if (info->line == 2) { if ((genconfig_shadow & IO_MASK(R_GEN_CONFIG, dma3)) == IO_STATE(R_GEN_CONFIG, dma3, serial2)) { genconfig_shadow &= ~IO_MASK(R_GEN_CONFIG, dma3); genconfig_shadow |= IO_STATE(R_GEN_CONFIG, dma3, par0); } } else if (info->line == 3) { if ((genconfig_shadow & IO_MASK(R_GEN_CONFIG, dma5)) == IO_STATE(R_GEN_CONFIG, dma5, serial3)) { genconfig_shadow &= ~IO_MASK(R_GEN_CONFIG, dma5); genconfig_shadow |= IO_STATE(R_GEN_CONFIG, dma5, par1); } } *R_GEN_CONFIG = genconfig_shadow; local_irq_restore(flags); } static void e100_enable_rxdma_channel(struct e100_serial *info) { unsigned long flags; local_irq_save(flags); /* Enable input DMA channel for the serial port in question */ if (info->line == 0) { genconfig_shadow &= ~IO_MASK(R_GEN_CONFIG, dma7); genconfig_shadow |= IO_STATE(R_GEN_CONFIG, dma7, serial0); } else if (info->line == 1) { genconfig_shadow &= ~IO_MASK(R_GEN_CONFIG, dma9); genconfig_shadow |= IO_STATE(R_GEN_CONFIG, dma9, serial1); } else if (info->line == 2) { genconfig_shadow &= ~IO_MASK(R_GEN_CONFIG, dma3); genconfig_shadow |= IO_STATE(R_GEN_CONFIG, dma3, serial2); } else if (info->line == 3) { genconfig_shadow &= ~IO_MASK(R_GEN_CONFIG, dma5); genconfig_shadow |= IO_STATE(R_GEN_CONFIG, dma5, serial3); } *R_GEN_CONFIG = genconfig_shadow; local_irq_restore(flags); } #ifdef SERIAL_HANDLE_EARLY_ERRORS /* in order to detect and fix errors on the first byte we have to use the serial interrupts as well. */ static inline void e100_disable_serial_data_irq(struct e100_serial *info) { #ifdef SERIAL_DEBUG_INTR printk("ser_irq(%d): 0\n",info->line); #endif DINTR1(DEBUG_LOG(info->line,"IRQ disable data_irq %i\n", info->line)); *R_IRQ_MASK1_CLR = (1U << (8+2*info->line)); } static inline void e100_enable_serial_data_irq(struct e100_serial *info) { #ifdef SERIAL_DEBUG_INTR printk("ser_irq(%d): 1\n",info->line); printk("**** %d = %d\n", (8+2*info->line), (1U << (8+2*info->line))); #endif DINTR1(DEBUG_LOG(info->line,"IRQ enable data_irq %i\n", info->line)); *R_IRQ_MASK1_SET = (1U << (8+2*info->line)); } #endif static inline void e100_disable_serial_tx_ready_irq(struct e100_serial *info) { #ifdef SERIAL_DEBUG_INTR printk("ser_tx_irq(%d): 0\n",info->line); #endif DINTR1(DEBUG_LOG(info->line,"IRQ disable ready_irq %i\n", info->line)); *R_IRQ_MASK1_CLR = (1U << (8+1+2*info->line)); } static inline void e100_enable_serial_tx_ready_irq(struct e100_serial *info) { #ifdef SERIAL_DEBUG_INTR printk("ser_tx_irq(%d): 1\n",info->line); printk("**** %d = %d\n", (8+1+2*info->line), (1U << (8+1+2*info->line))); #endif DINTR2(DEBUG_LOG(info->line,"IRQ enable ready_irq %i\n", info->line)); *R_IRQ_MASK1_SET = (1U << (8+1+2*info->line)); } static inline void e100_enable_rx_irq(struct e100_serial *info) { if (info->uses_dma_in) e100_enable_rxdma_irq(info); else e100_enable_serial_data_irq(info); } static inline void e100_disable_rx_irq(struct e100_serial *info) { if (info->uses_dma_in) e100_disable_rxdma_irq(info); else e100_disable_serial_data_irq(info); } #if defined(CONFIG_ETRAX_RS485) /* Enable RS-485 mode on selected port. This is UGLY. */ static int e100_enable_rs485(struct tty_struct *tty, struct serial_rs485 *r) { struct e100_serial * info = (struct e100_serial *)tty->driver_data; #if defined(CONFIG_ETRAX_RS485_ON_PA) *R_PORT_PA_DATA = port_pa_data_shadow |= (1 << rs485_pa_bit); #endif #if defined(CONFIG_ETRAX_RS485_ON_PORT_G) REG_SHADOW_SET(R_PORT_G_DATA, port_g_data_shadow, rs485_port_g_bit, 1); #endif #if defined(CONFIG_ETRAX_RS485_LTC1387) REG_SHADOW_SET(R_PORT_G_DATA, port_g_data_shadow, CONFIG_ETRAX_RS485_LTC1387_DXEN_PORT_G_BIT, 1); REG_SHADOW_SET(R_PORT_G_DATA, port_g_data_shadow, CONFIG_ETRAX_RS485_LTC1387_RXEN_PORT_G_BIT, 1); #endif info->rs485 = *r; /* Maximum delay before RTS equal to 1000 */ if (info->rs485.delay_rts_before_send >= 1000) info->rs485.delay_rts_before_send = 1000; /* printk("rts: on send = %i, after = %i, enabled = %i", info->rs485.rts_on_send, info->rs485.rts_after_sent, info->rs485.enabled ); */ return 0; } static int e100_write_rs485(struct tty_struct *tty, const unsigned char *buf, int count) { struct e100_serial * info = (struct e100_serial *)tty->driver_data; int old_value = (info->rs485.flags) & SER_RS485_ENABLED; /* rs485 is always implicitly enabled if we're using the ioctl() * but it doesn't have to be set in the serial_rs485 * (to be backward compatible with old apps) * So we store, set and restore it. */ info->rs485.flags |= SER_RS485_ENABLED; /* rs_write now deals with RS485 if enabled */ count = rs_write(tty, buf, count); if (!old_value) info->rs485.flags &= ~(SER_RS485_ENABLED); return count; } #ifdef CONFIG_ETRAX_FAST_TIMER /* Timer function to toggle RTS when using FAST_TIMER */ static void rs485_toggle_rts_timer_function(unsigned long data) { struct e100_serial *info = (struct e100_serial *)data; fast_timers_rs485[info->line].function = NULL; e100_rts(info, (info->rs485.flags & SER_RS485_RTS_AFTER_SEND)); #if defined(CONFIG_ETRAX_RS485_DISABLE_RECEIVER) e100_enable_rx(info); e100_enable_rx_irq(info); #endif } #endif #endif /* CONFIG_ETRAX_RS485 */ /* * ------------------------------------------------------------ * rs_stop() and rs_start() * * This routines are called before setting or resetting tty->stopped. * They enable or disable transmitter using the XOFF registers, as necessary. * ------------------------------------------------------------ */ static void rs_stop(struct tty_struct *tty) { struct e100_serial *info = (struct e100_serial *)tty->driver_data; if (info) { unsigned long flags; unsigned long xoff; local_irq_save(flags); DFLOW(DEBUG_LOG(info->line, "XOFF rs_stop xmit %i\n", CIRC_CNT(info->xmit.head, info->xmit.tail,SERIAL_XMIT_SIZE))); xoff = IO_FIELD(R_SERIAL0_XOFF, xoff_char, STOP_CHAR(info->port.tty)); xoff |= IO_STATE(R_SERIAL0_XOFF, tx_stop, stop); if (tty->termios->c_iflag & IXON ) { xoff |= IO_STATE(R_SERIAL0_XOFF, auto_xoff, enable); } *((unsigned long *)&info->ioport[REG_XOFF]) = xoff; local_irq_restore(flags); } } static void rs_start(struct tty_struct *tty) { struct e100_serial *info = (struct e100_serial *)tty->driver_data; if (info) { unsigned long flags; unsigned long xoff; local_irq_save(flags); DFLOW(DEBUG_LOG(info->line, "XOFF rs_start xmit %i\n", CIRC_CNT(info->xmit.head, info->xmit.tail,SERIAL_XMIT_SIZE))); xoff = IO_FIELD(R_SERIAL0_XOFF, xoff_char, STOP_CHAR(tty)); xoff |= IO_STATE(R_SERIAL0_XOFF, tx_stop, enable); if (tty->termios->c_iflag & IXON ) { xoff |= IO_STATE(R_SERIAL0_XOFF, auto_xoff, enable); } *((unsigned long *)&info->ioport[REG_XOFF]) = xoff; if (!info->uses_dma_out && info->xmit.head != info->xmit.tail && info->xmit.buf) e100_enable_serial_tx_ready_irq(info); local_irq_restore(flags); } } /* * ---------------------------------------------------------------------- * * Here starts the interrupt handling routines. All of the following * subroutines are declared as inline and are folded into * rs_interrupt(). They were separated out for readability's sake. * * Note: rs_interrupt() is a "fast" interrupt, which means that it * runs with interrupts turned off. People who may want to modify * rs_interrupt() should try to keep the interrupt handler as fast as * possible. After you are done making modifications, it is not a bad * idea to do: * * gcc -S -DKERNEL -Wall -Wstrict-prototypes -O6 -fomit-frame-pointer serial.c * * and look at the resulting assemble code in serial.s. * * - Ted Ts'o (tytso@mit.edu), 7-Mar-93 * ----------------------------------------------------------------------- */ /* * This routine is used by the interrupt handler to schedule * processing in the software interrupt portion of the driver. */ static void rs_sched_event(struct e100_serial *info, int event) { if (info->event & (1 << event)) return; info->event |= 1 << event; schedule_work(&info->work); } /* The output DMA channel is free - use it to send as many chars as possible * NOTES: * We don't pay attention to info->x_char, which means if the TTY wants to * use XON/XOFF it will set info->x_char but we won't send any X char! * * To implement this, we'd just start a DMA send of 1 byte pointing at a * buffer containing the X char, and skip updating xmit. We'd also have to * check if the last sent char was the X char when we enter this function * the next time, to avoid updating xmit with the sent X value. */ static void transmit_chars_dma(struct e100_serial *info) { unsigned int c, sentl; struct etrax_dma_descr *descr; #ifdef CONFIG_SVINTO_SIM /* This will output too little if tail is not 0 always since * we don't reloop to send the other part. Anyway this SHOULD be a * no-op - transmit_chars_dma would never really be called during sim * since rs_write does not write into the xmit buffer then. */ if (info->xmit.tail) printk("Error in serial.c:transmit_chars-dma(), tail!=0\n"); if (info->xmit.head != info->xmit.tail) { SIMCOUT(info->xmit.buf + info->xmit.tail, CIRC_CNT(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE)); info->xmit.head = info->xmit.tail; /* move back head */ info->tr_running = 0; } return; #endif /* acknowledge both dma_descr and dma_eop irq in R_DMA_CHx_CLR_INTR */ *info->oclrintradr = IO_STATE(R_DMA_CH6_CLR_INTR, clr_descr, do) | IO_STATE(R_DMA_CH6_CLR_INTR, clr_eop, do); #ifdef SERIAL_DEBUG_INTR if (info->line == SERIAL_DEBUG_LINE) printk("tc\n"); #endif if (!info->tr_running) { /* weirdo... we shouldn't get here! */ printk(KERN_WARNING "Achtung: transmit_chars_dma with !tr_running\n"); return; } descr = &info->tr_descr; /* first get the amount of bytes sent during the last DMA transfer, and update xmit accordingly */ /* if the stop bit was not set, all data has been sent */ if (!(descr->status & d_stop)) { sentl = descr->sw_len; } else /* otherwise we find the amount of data sent here */ sentl = descr->hw_len; DFLOW(DEBUG_LOG(info->line, "TX %i done\n", sentl)); /* update stats */ info->icount.tx += sentl; /* update xmit buffer */ info->xmit.tail = (info->xmit.tail + sentl) & (SERIAL_XMIT_SIZE - 1); /* if there is only a few chars left in the buf, wake up the blocked write if any */ if (CIRC_CNT(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE) < WAKEUP_CHARS) rs_sched_event(info, RS_EVENT_WRITE_WAKEUP); /* find out the largest amount of consecutive bytes we want to send now */ c = CIRC_CNT_TO_END(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE); /* Don't send all in one DMA transfer - divide it so we wake up * application before all is sent */ if (c >= 4*WAKEUP_CHARS) c = c/2; if (c <= 0) { /* our job here is done, don't schedule any new DMA transfer */ info->tr_running = 0; #if defined(CONFIG_ETRAX_RS485) && defined(CONFIG_ETRAX_FAST_TIMER) if (info->rs485.flags & SER_RS485_ENABLED) { /* Set a short timer to toggle RTS */ start_one_shot_timer(&fast_timers_rs485[info->line], rs485_toggle_rts_timer_function, (unsigned long)info, info->char_time_usec*2, "RS-485"); } #endif /* RS485 */ return; } /* ok we can schedule a dma send of c chars starting at info->xmit.tail */ /* set up the descriptor correctly for output */ DFLOW(DEBUG_LOG(info->line, "TX %i\n", c)); descr->ctrl = d_int | d_eol | d_wait; /* Wait needed for tty_wait_until_sent() */ descr->sw_len = c; descr->buf = virt_to_phys(info->xmit.buf + info->xmit.tail); descr->status = 0; *info->ofirstadr = virt_to_phys(descr); /* write to R_DMAx_FIRST */ *info->ocmdadr = IO_STATE(R_DMA_CH6_CMD, cmd, start); /* DMA is now running (hopefully) */ } /* transmit_chars_dma */ static void start_transmit(struct e100_serial *info) { #if 0 if (info->line == SERIAL_DEBUG_LINE) printk("x\n"); #endif info->tr_descr.sw_len = 0; info->tr_descr.hw_len = 0; info->tr_descr.status = 0; info->tr_running = 1; if (info->uses_dma_out) transmit_chars_dma(info); else e100_enable_serial_tx_ready_irq(info); } /* start_transmit */ #ifdef CONFIG_ETRAX_SERIAL_FAST_TIMER static int serial_fast_timer_started = 0; static int serial_fast_timer_expired = 0; static void flush_timeout_function(unsigned long data); #define START_FLUSH_FAST_TIMER_TIME(info, string, usec) {\ unsigned long timer_flags; \ local_irq_save(timer_flags); \ if (fast_timers[info->line].function == NULL) { \ serial_fast_timer_started++; \ TIMERD(DEBUG_LOG(info->line, "start_timer %i ", info->line)); \ TIMERD(DEBUG_LOG(info->line, "num started: %i\n", serial_fast_timer_started)); \ start_one_shot_timer(&fast_timers[info->line], \ flush_timeout_function, \ (unsigned long)info, \ (usec), \ string); \ } \ else { \ TIMERD(DEBUG_LOG(info->line, "timer %i already running\n", info->line)); \ } \ local_irq_restore(timer_flags); \ } #define START_FLUSH_FAST_TIMER(info, string) START_FLUSH_FAST_TIMER_TIME(info, string, info->flush_time_usec) #else #define START_FLUSH_FAST_TIMER_TIME(info, string, usec) #define START_FLUSH_FAST_TIMER(info, string) #endif static struct etrax_recv_buffer * alloc_recv_buffer(unsigned int size) { struct etrax_recv_buffer *buffer; if (!(buffer = kmalloc(sizeof *buffer + size, GFP_ATOMIC))) return NULL; buffer->next = NULL; buffer->length = 0; buffer->error = TTY_NORMAL; return buffer; } static void append_recv_buffer(struct e100_serial *info, struct etrax_recv_buffer *buffer) { unsigned long flags; local_irq_save(flags); if (!info->first_recv_buffer) info->first_recv_buffer = buffer; else info->last_recv_buffer->next = buffer; info->last_recv_buffer = buffer; info->recv_cnt += buffer->length; if (info->recv_cnt > info->max_recv_cnt) info->max_recv_cnt = info->recv_cnt; local_irq_restore(flags); } static int add_char_and_flag(struct e100_serial *info, unsigned char data, unsigned char flag) { struct etrax_recv_buffer *buffer; if (info->uses_dma_in) { if (!(buffer = alloc_recv_buffer(4))) return 0; buffer->length = 1; buffer->error = flag; buffer->buffer[0] = data; append_recv_buffer(info, buffer); info->icount.rx++; } else { struct tty_struct *tty = info->port.tty; tty_insert_flip_char(tty, data, flag); info->icount.rx++; } return 1; } static unsigned int handle_descr_data(struct e100_serial *info, struct etrax_dma_descr *descr, unsigned int recvl) { struct etrax_recv_buffer *buffer = phys_to_virt(descr->buf) - sizeof *buffer; if (info->recv_cnt + recvl > 65536) { printk(KERN_CRIT "%s: Too much pending incoming serial data! Dropping %u bytes.\n", __func__, recvl); return 0; } buffer->length = recvl; if (info->errorcode == ERRCODE_SET_BREAK) buffer->error = TTY_BREAK; info->errorcode = 0; append_recv_buffer(info, buffer); if (!(buffer = alloc_recv_buffer(SERIAL_DESCR_BUF_SIZE))) panic("%s: Failed to allocate memory for receive buffer!\n", __func__); descr->buf = virt_to_phys(buffer->buffer); return recvl; } static unsigned int handle_all_descr_data(struct e100_serial *info) { struct etrax_dma_descr *descr; unsigned int recvl; unsigned int ret = 0; while (1) { descr = &info->rec_descr[info->cur_rec_descr]; if (descr == phys_to_virt(*info->idescradr)) break; if (++info->cur_rec_descr == SERIAL_RECV_DESCRIPTORS) info->cur_rec_descr = 0; /* find out how many bytes were read */ /* if the eop bit was not set, all data has been received */ if (!(descr->status & d_eop)) { recvl = descr->sw_len; } else { /* otherwise we find the amount of data received here */ recvl = descr->hw_len; } /* Reset the status information */ descr->status = 0; DFLOW( DEBUG_LOG(info->line, "RX %lu\n", recvl); if (info->port.tty->stopped) { unsigned char *buf = phys_to_virt(descr->buf); DEBUG_LOG(info->line, "rx 0x%02X\n", buf[0]); DEBUG_LOG(info->line, "rx 0x%02X\n", buf[1]); DEBUG_LOG(info->line, "rx 0x%02X\n", buf[2]); } ); /* update stats */ info->icount.rx += recvl; ret += handle_descr_data(info, descr, recvl); } return ret; } static void receive_chars_dma(struct e100_serial *info) { struct tty_struct *tty; unsigned char rstat; #ifdef CONFIG_SVINTO_SIM /* No receive in the simulator. Will probably be when the rest of * the serial interface works, and this piece will just be removed. */ return; #endif /* Acknowledge both dma_descr and dma_eop irq in R_DMA_CHx_CLR_INTR */ *info->iclrintradr = IO_STATE(R_DMA_CH6_CLR_INTR, clr_descr, do) | IO_STATE(R_DMA_CH6_CLR_INTR, clr_eop, do); tty = info->port.tty; if (!tty) /* Something wrong... */ return; #ifdef SERIAL_HANDLE_EARLY_ERRORS if (info->uses_dma_in) e100_enable_serial_data_irq(info); #endif if (info->errorcode == ERRCODE_INSERT_BREAK) add_char_and_flag(info, '\0', TTY_BREAK); handle_all_descr_data(info); /* Read the status register to detect errors */ rstat = info->ioport[REG_STATUS]; if (rstat & IO_MASK(R_SERIAL0_STATUS, xoff_detect) ) { DFLOW(DEBUG_LOG(info->line, "XOFF detect stat %x\n", rstat)); } if (rstat & SER_ERROR_MASK) { /* If we got an error, we must reset it by reading the * data_in field */ unsigned char data = info->ioport[REG_DATA]; PROCSTAT(ser_stat[info->line].errors_cnt++); DEBUG_LOG(info->line, "#dERR: s d 0x%04X\n", ((rstat & SER_ERROR_MASK) << 8) | data); if (rstat & SER_PAR_ERR_MASK) add_char_and_flag(info, data, TTY_PARITY); else if (rstat & SER_OVERRUN_MASK) add_char_and_flag(info, data, TTY_OVERRUN); else if (rstat & SER_FRAMING_ERR_MASK) add_char_and_flag(info, data, TTY_FRAME); } START_FLUSH_FAST_TIMER(info, "receive_chars"); /* Restart the receiving DMA */ *info->icmdadr = IO_STATE(R_DMA_CH6_CMD, cmd, restart); } static int start_recv_dma(struct e100_serial *info) { struct etrax_dma_descr *descr = info->rec_descr; struct etrax_recv_buffer *buffer; int i; /* Set up the receiving descriptors */ for (i = 0; i < SERIAL_RECV_DESCRIPTORS; i++) { if (!(buffer = alloc_recv_buffer(SERIAL_DESCR_BUF_SIZE))) panic("%s: Failed to allocate memory for receive buffer!\n", __func__); descr[i].ctrl = d_int; descr[i].buf = virt_to_phys(buffer->buffer); descr[i].sw_len = SERIAL_DESCR_BUF_SIZE; descr[i].hw_len = 0; descr[i].status = 0; descr[i].next = virt_to_phys(&descr[i+1]); } /* Link the last descriptor to the first */ descr[i-1].next = virt_to_phys(&descr[0]); /* Start with the first descriptor in the list */ info->cur_rec_descr = 0; /* Start the DMA */ *info->ifirstadr = virt_to_phys(&descr[info->cur_rec_descr]); *info->icmdadr = IO_STATE(R_DMA_CH6_CMD, cmd, start); /* Input DMA should be running now */ return 1; } static void start_receive(struct e100_serial *info) { #ifdef CONFIG_SVINTO_SIM /* No receive in the simulator. Will probably be when the rest of * the serial interface works, and this piece will just be removed. */ return; #endif if (info->uses_dma_in) { /* reset the input dma channel to be sure it works */ *info->icmdadr = IO_STATE(R_DMA_CH6_CMD, cmd, reset); while (IO_EXTRACT(R_DMA_CH6_CMD, cmd, *info->icmdadr) == IO_STATE_VALUE(R_DMA_CH6_CMD, cmd, reset)); start_recv_dma(info); } } /* the bits in the MASK2 register are laid out like this: DMAI_EOP DMAI_DESCR DMAO_EOP DMAO_DESCR where I is the input channel and O is the output channel for the port. info->irq is the bit number for the DMAO_DESCR so to check the others we shift info->irq to the left. */ /* dma output channel interrupt handler this interrupt is called from DMA2(ser2), DMA4(ser3), DMA6(ser0) or DMA8(ser1) when they have finished a descriptor with the intr flag set. */ static irqreturn_t tr_interrupt(int irq, void *dev_id) { struct e100_serial *info; unsigned long ireg; int i; int handled = 0; #ifdef CONFIG_SVINTO_SIM /* No receive in the simulator. Will probably be when the rest of * the serial interface works, and this piece will just be removed. */ { const char *s = "What? tr_interrupt in simulator??\n"; SIMCOUT(s,strlen(s)); } return IRQ_HANDLED; #endif /* find out the line that caused this irq and get it from rs_table */ ireg = *R_IRQ_MASK2_RD; /* get the active irq bits for the dma channels */ for (i = 0; i < NR_PORTS; i++) { info = rs_table + i; if (!info->enabled || !info->uses_dma_out) continue; /* check for dma_descr (don't need to check for dma_eop in output dma for serial */ if (ireg & info->irq) { handled = 1; /* we can send a new dma bunch. make it so. */ DINTR2(DEBUG_LOG(info->line, "tr_interrupt %i\n", i)); /* Read jiffies_usec first, * we want this time to be as late as possible */ PROCSTAT(ser_stat[info->line].tx_dma_ints++); info->last_tx_active_usec = GET_JIFFIES_USEC(); info->last_tx_active = jiffies; transmit_chars_dma(info); } /* FIXME: here we should really check for a change in the status lines and if so call status_handle(info) */ } return IRQ_RETVAL(handled); } /* tr_interrupt */ /* dma input channel interrupt handler */ static irqreturn_t rec_interrupt(int irq, void *dev_id) { struct e100_serial *info; unsigned long ireg; int i; int handled = 0; #ifdef CONFIG_SVINTO_SIM /* No receive in the simulator. Will probably be when the rest of * the serial interface works, and this piece will just be removed. */ { const char *s = "What? rec_interrupt in simulator??\n"; SIMCOUT(s,strlen(s)); } return IRQ_HANDLED; #endif /* find out the line that caused this irq and get it from rs_table */ ireg = *R_IRQ_MASK2_RD; /* get the active irq bits for the dma channels */ for (i = 0; i < NR_PORTS; i++) { info = rs_table + i; if (!info->enabled || !info->uses_dma_in) continue; /* check for both dma_eop and dma_descr for the input dma channel */ if (ireg & ((info->irq << 2) | (info->irq << 3))) { handled = 1; /* we have received something */ receive_chars_dma(info); } /* FIXME: here we should really check for a change in the status lines and if so call status_handle(info) */ } return IRQ_RETVAL(handled); } /* rec_interrupt */ static int force_eop_if_needed(struct e100_serial *info) { /* We check data_avail bit to determine if data has * arrived since last time */ unsigned char rstat = info->ioport[REG_STATUS]; /* error or datavail? */ if (rstat & SER_ERROR_MASK) { /* Some error has occurred. If there has been valid data, an * EOP interrupt will be made automatically. If no data, the * normal ser_interrupt should be enabled and handle it. * So do nothing! */ DEBUG_LOG(info->line, "timeout err: rstat 0x%03X\n", rstat | (info->line << 8)); return 0; } if (rstat & SER_DATA_AVAIL_MASK) { /* Ok data, no error, count it */ TIMERD(DEBUG_LOG(info->line, "timeout: rstat 0x%03X\n", rstat | (info->line << 8))); /* Read data to clear status flags */ (void)info->ioport[REG_DATA]; info->forced_eop = 0; START_FLUSH_FAST_TIMER(info, "magic"); return 0; } /* hit the timeout, force an EOP for the input * dma channel if we haven't already */ if (!info->forced_eop) { info->forced_eop = 1; PROCSTAT(ser_stat[info->line].timeout_flush_cnt++); TIMERD(DEBUG_LOG(info->line, "timeout EOP %i\n", info->line)); FORCE_EOP(info); } return 1; } static void flush_to_flip_buffer(struct e100_serial *info) { struct tty_struct *tty; struct etrax_recv_buffer *buffer; unsigned long flags; local_irq_save(flags); tty = info->port.tty; if (!tty) { local_irq_restore(flags); return; } while ((buffer = info->first_recv_buffer) != NULL) { unsigned int count = buffer->length; tty_insert_flip_string(tty, buffer->buffer, count); info->recv_cnt -= count; if (count == buffer->length) { info->first_recv_buffer = buffer->next; kfree(buffer); } else { buffer->length -= count; memmove(buffer->buffer, buffer->buffer + count, buffer->length); buffer->error = TTY_NORMAL; } } if (!info->first_recv_buffer) info->last_recv_buffer = NULL; local_irq_restore(flags); /* This includes a check for low-latency */ tty_flip_buffer_push(tty); } static void check_flush_timeout(struct e100_serial *info) { /* Flip what we've got (if we can) */ flush_to_flip_buffer(info); /* We might need to flip later, but not to fast * since the system is busy processing input... */ if (info->first_recv_buffer) START_FLUSH_FAST_TIMER_TIME(info, "flip", 2000); /* Force eop last, since data might have come while we're processing * and if we started the slow timer above, we won't start a fast * below. */ force_eop_if_needed(info); } #ifdef CONFIG_ETRAX_SERIAL_FAST_TIMER static void flush_timeout_function(unsigned long data) { struct e100_serial *info = (struct e100_serial *)data; fast_timers[info->line].function = NULL; serial_fast_timer_expired++; TIMERD(DEBUG_LOG(info->line, "flush_timout %i ", info->line)); TIMERD(DEBUG_LOG(info->line, "num expired: %i\n", serial_fast_timer_expired)); check_flush_timeout(info); } #else /* dma fifo/buffer timeout handler forces an end-of-packet for the dma input channel if no chars have been received for CONFIG_ETRAX_SERIAL_RX_TIMEOUT_TICKS/100 s. */ static struct timer_list flush_timer; static void timed_flush_handler(unsigned long ptr) { struct e100_serial *info; int i; #ifdef CONFIG_SVINTO_SIM return; #endif for (i = 0; i < NR_PORTS; i++) { info = rs_table + i; if (info->uses_dma_in) check_flush_timeout(info); } /* restart flush timer */ mod_timer(&flush_timer, jiffies + CONFIG_ETRAX_SERIAL_RX_TIMEOUT_TICKS); } #endif #ifdef SERIAL_HANDLE_EARLY_ERRORS /* If there is an error (ie break) when the DMA is running and * there are no bytes in the fifo the DMA is stopped and we get no * eop interrupt. Thus we have to monitor the first bytes on a DMA * transfer, and if it is without error we can turn the serial * interrupts off. */ /* BREAK handling on ETRAX 100: ETRAX will generate interrupt although there is no stop bit between the characters. Depending on how long the break sequence is, the end of the breaksequence will look differently: | indicates start/end of a character. B= Break character (0x00) with framing error. E= Error byte with parity error received after B characters. F= "Faked" valid byte received immediately after B characters. V= Valid byte 1. B BL ___________________________ V .._|__________|__________| |valid data | Multiple frame errors with data == 0x00 (B), the timing matches up "perfectly" so no extra ending char is detected. The RXD pin is 1 in the last interrupt, in that case we set info->errorcode = ERRCODE_INSERT_BREAK, but we can't really know if another byte will come and this really is case 2. below (e.g F=0xFF or 0xFE) If RXD pin is 0 we can expect another character (see 2. below). 2. B B E or F__________________..__ V .._|__________|__________|______ | |valid data "valid" or parity error Multiple frame errors with data == 0x00 (B), but the part of the break trigs is interpreted as a start bit (and possibly some 0 bits followed by a number of 1 bits and a stop bit). Depending on parity settings etc. this last character can be either a fake "valid" char (F) or have a parity error (E). If the character is valid it will be put in the buffer, we set info->errorcode = ERRCODE_SET_BREAK so the receive interrupt will set the flags so the tty will handle it, if it's an error byte it will not be put in the buffer and we set info->errorcode = ERRCODE_INSERT_BREAK. To distinguish a V byte in 1. from an F byte in 2. we keep a timestamp of the last faulty char (B) and compares it with the current time: If the time elapsed time is less then 2*char_time_usec we will assume it's a faked F char and not a Valid char and set info->errorcode = ERRCODE_SET_BREAK. Flaws in the above solution: ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We use the timer to distinguish a F character from a V character, if a V character is to close after the break we might make the wrong decision. TODO: The break will be delayed until an F or V character is received. */ static struct e100_serial * handle_ser_rx_interrupt_no_dma(struct e100_serial *info) { unsigned long data_read; struct tty_struct *tty = info->port.tty; if (!tty) { printk("!NO TTY!\n"); return info; } /* Read data and status at the same time */ data_read = *((unsigned long *)&info->ioport[REG_DATA_STATUS32]); more_data: if (data_read & IO_MASK(R_SERIAL0_READ, xoff_detect) ) { DFLOW(DEBUG_LOG(info->line, "XOFF detect\n", 0)); } DINTR2(DEBUG_LOG(info->line, "ser_rx %c\n", IO_EXTRACT(R_SERIAL0_READ, data_in, data_read))); if (data_read & ( IO_MASK(R_SERIAL0_READ, framing_err) | IO_MASK(R_SERIAL0_READ, par_err) | IO_MASK(R_SERIAL0_READ, overrun) )) { /* An error */ info->last_rx_active_usec = GET_JIFFIES_USEC(); info->last_rx_active = jiffies; DINTR1(DEBUG_LOG(info->line, "ser_rx err stat_data %04X\n", data_read)); DLOG_INT_TRIG( if (!log_int_trig1_pos) { log_int_trig1_pos = log_int_pos; log_int(rdpc(), 0, 0); } ); if ( ((data_read & IO_MASK(R_SERIAL0_READ, data_in)) == 0) && (data_read & IO_MASK(R_SERIAL0_READ, framing_err)) ) { /* Most likely a break, but we get interrupts over and * over again. */ if (!info->break_detected_cnt) { DEBUG_LOG(info->line, "#BRK start\n", 0); } if (data_read & IO_MASK(R_SERIAL0_READ, rxd)) { /* The RX pin is high now, so the break * must be over, but.... * we can't really know if we will get another * last byte ending the break or not. * And we don't know if the byte (if any) will * have an error or look valid. */ DEBUG_LOG(info->line, "# BL BRK\n", 0); info->errorcode = ERRCODE_INSERT_BREAK; } info->break_detected_cnt++; } else { /* The error does not look like a break, but could be * the end of one */ if (info->break_detected_cnt) { DEBUG_LOG(info->line, "EBRK %i\n", info->break_detected_cnt); info->errorcode = ERRCODE_INSERT_BREAK; } else { unsigned char data = IO_EXTRACT(R_SERIAL0_READ, data_in, data_read); char flag = TTY_NORMAL; if (info->errorcode == ERRCODE_INSERT_BREAK) { struct tty_struct *tty = info->port.tty; tty_insert_flip_char(tty, 0, flag); info->icount.rx++; } if (data_read & IO_MASK(R_SERIAL0_READ, par_err)) { info->icount.parity++; flag = TTY_PARITY; } else if (data_read & IO_MASK(R_SERIAL0_READ, overrun)) { info->icount.overrun++; flag = TTY_OVERRUN; } else if (data_read & IO_MASK(R_SERIAL0_READ, framing_err)) { info->icount.frame++; flag = TTY_FRAME; } tty_insert_flip_char(tty, data, flag); info->errorcode = 0; } info->break_detected_cnt = 0; } } else if (data_read & IO_MASK(R_SERIAL0_READ, data_avail)) { /* No error */ DLOG_INT_TRIG( if (!log_int_trig1_pos) { if (log_int_pos >= log_int_size) { log_int_pos = 0; } log_int_trig0_pos = log_int_pos; log_int(rdpc(), 0, 0); } ); tty_insert_flip_char(tty, IO_EXTRACT(R_SERIAL0_READ, data_in, data_read), TTY_NORMAL); } else { DEBUG_LOG(info->line, "ser_rx int but no data_avail %08lX\n", data_read); } info->icount.rx++; data_read = *((unsigned long *)&info->ioport[REG_DATA_STATUS32]); if (data_read & IO_MASK(R_SERIAL0_READ, data_avail)) { DEBUG_LOG(info->line, "ser_rx %c in loop\n", IO_EXTRACT(R_SERIAL0_READ, data_in, data_read)); goto more_data; } tty_flip_buffer_push(info->port.tty); return info; } static struct e100_serial* handle_ser_rx_interrupt(struct e100_serial *info) { unsigned char rstat; #ifdef SERIAL_DEBUG_INTR printk("Interrupt from serport %d\n", i); #endif /* DEBUG_LOG(info->line, "ser_interrupt stat %03X\n", rstat | (i << 8)); */ if (!info->uses_dma_in) { return handle_ser_rx_interrupt_no_dma(info); } /* DMA is used */ rstat = info->ioport[REG_STATUS]; if (rstat & IO_MASK(R_SERIAL0_STATUS, xoff_detect) ) { DFLOW(DEBUG_LOG(info->line, "XOFF detect\n", 0)); } if (rstat & SER_ERROR_MASK) { unsigned char data; info->last_rx_active_usec = GET_JIFFIES_USEC(); info->last_rx_active = jiffies; /* If we got an error, we must reset it by reading the * data_in field */ data = info->ioport[REG_DATA]; DINTR1(DEBUG_LOG(info->line, "ser_rx! %c\n", data)); DINTR1(DEBUG_LOG(info->line, "ser_rx err stat %02X\n", rstat)); if (!data && (rstat & SER_FRAMING_ERR_MASK)) { /* Most likely a break, but we get interrupts over and * over again. */ if (!info->break_detected_cnt) { DEBUG_LOG(info->line, "#BRK start\n", 0); } if (rstat & SER_RXD_MASK) { /* The RX pin is high now, so the break * must be over, but.... * we can't really know if we will get another * last byte ending the break or not. * And we don't know if the byte (if any) will * have an error or look valid. */ DEBUG_LOG(info->line, "# BL BRK\n", 0); info->errorcode = ERRCODE_INSERT_BREAK; } info->break_detected_cnt++; } else { /* The error does not look like a break, but could be * the end of one */ if (info->break_detected_cnt) { DEBUG_LOG(info->line, "EBRK %i\n", info->break_detected_cnt); info->errorcode = ERRCODE_INSERT_BREAK; } else { if (info->errorcode == ERRCODE_INSERT_BREAK) { info->icount.brk++; add_char_and_flag(info, '\0', TTY_BREAK); } if (rstat & SER_PAR_ERR_MASK) { info->icount.parity++; add_char_and_flag(info, data, TTY_PARITY); } else if (rstat & SER_OVERRUN_MASK) { info->icount.overrun++; add_char_and_flag(info, data, TTY_OVERRUN); } else if (rstat & SER_FRAMING_ERR_MASK) { info->icount.frame++; add_char_and_flag(info, data, TTY_FRAME); } info->errorcode = 0; } info->break_detected_cnt = 0; DEBUG_LOG(info->line, "#iERR s d %04X\n", ((rstat & SER_ERROR_MASK) << 8) | data); } PROCSTAT(ser_stat[info->line].early_errors_cnt++); } else { /* It was a valid byte, now let the DMA do the rest */ unsigned long curr_time_u = GET_JIFFIES_USEC(); unsigned long curr_time = jiffies; if (info->break_detected_cnt) { /* Detect if this character is a new valid char or the * last char in a break sequence: If LSBits are 0 and * MSBits are high AND the time is close to the * previous interrupt we should discard it. */ long elapsed_usec = (curr_time - info->last_rx_active) * (1000000/HZ) + curr_time_u - info->last_rx_active_usec; if (elapsed_usec < 2*info->char_time_usec) { DEBUG_LOG(info->line, "FBRK %i\n", info->line); /* Report as BREAK (error) and let * receive_chars_dma() handle it */ info->errorcode = ERRCODE_SET_BREAK; } else { DEBUG_LOG(info->line, "Not end of BRK (V)%i\n", info->line); } DEBUG_LOG(info->line, "num brk %i\n", info->break_detected_cnt); } #ifdef SERIAL_DEBUG_INTR printk("** OK, disabling ser_interrupts\n"); #endif e100_disable_serial_data_irq(info); DINTR2(DEBUG_LOG(info->line, "ser_rx OK %d\n", info->line)); info->break_detected_cnt = 0; PROCSTAT(ser_stat[info->line].ser_ints_ok_cnt++); } /* Restarting the DMA never hurts */ *info->icmdadr = IO_STATE(R_DMA_CH6_CMD, cmd, restart); START_FLUSH_FAST_TIMER(info, "ser_int"); return info; } /* handle_ser_rx_interrupt */ static void handle_ser_tx_interrupt(struct e100_serial *info) { unsigned long flags; if (info->x_char) { unsigned char rstat; DFLOW(DEBUG_LOG(info->line, "tx_int: xchar 0x%02X\n", info->x_char)); local_irq_save(flags); rstat = info->ioport[REG_STATUS]; DFLOW(DEBUG_LOG(info->line, "stat %x\n", rstat)); info->ioport[REG_TR_DATA] = info->x_char; info->icount.tx++; info->x_char = 0; /* We must enable since it is disabled in ser_interrupt */ e100_enable_serial_tx_ready_irq(info); local_irq_restore(flags); return; } if (info->uses_dma_out) { unsigned char rstat; int i; /* We only use normal tx interrupt when sending x_char */ DFLOW(DEBUG_LOG(info->line, "tx_int: xchar sent\n", 0)); local_irq_save(flags); rstat = info->ioport[REG_STATUS]; DFLOW(DEBUG_LOG(info->line, "stat %x\n", rstat)); e100_disable_serial_tx_ready_irq(info); if (info->port.tty->stopped) rs_stop(info->port.tty); /* Enable the DMA channel and tell it to continue */ e100_enable_txdma_channel(info); /* Wait 12 cycles before doing the DMA command */ for(i = 6; i > 0; i--) nop(); *info->ocmdadr = IO_STATE(R_DMA_CH6_CMD, cmd, continue); local_irq_restore(flags); return; } /* Normal char-by-char interrupt */ if (info->xmit.head == info->xmit.tail || info->port.tty->stopped || info->port.tty->hw_stopped) { DFLOW(DEBUG_LOG(info->line, "tx_int: stopped %i\n", info->port.tty->stopped)); e100_disable_serial_tx_ready_irq(info); info->tr_running = 0; return; } DINTR2(DEBUG_LOG(info->line, "tx_int %c\n", info->xmit.buf[info->xmit.tail])); /* Send a byte, rs485 timing is critical so turn of ints */ local_irq_save(flags); info->ioport[REG_TR_DATA] = info->xmit.buf[info->xmit.tail]; info->xmit.tail = (info->xmit.tail + 1) & (SERIAL_XMIT_SIZE-1); info->icount.tx++; if (info->xmit.head == info->xmit.tail) { #if defined(CONFIG_ETRAX_RS485) && defined(CONFIG_ETRAX_FAST_TIMER) if (info->rs485.flags & SER_RS485_ENABLED) { /* Set a short timer to toggle RTS */ start_one_shot_timer(&fast_timers_rs485[info->line], rs485_toggle_rts_timer_function, (unsigned long)info, info->char_time_usec*2, "RS-485"); } #endif /* RS485 */ info->last_tx_active_usec = GET_JIFFIES_USEC(); info->last_tx_active = jiffies; e100_disable_serial_tx_ready_irq(info); info->tr_running = 0; DFLOW(DEBUG_LOG(info->line, "tx_int: stop2\n", 0)); } else { /* We must enable since it is disabled in ser_interrupt */ e100_enable_serial_tx_ready_irq(info); } local_irq_restore(flags); if (CIRC_CNT(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE) < WAKEUP_CHARS) rs_sched_event(info, RS_EVENT_WRITE_WAKEUP); } /* handle_ser_tx_interrupt */ /* result of time measurements: * RX duration 54-60 us when doing something, otherwise 6-9 us * ser_int duration: just sending: 8-15 us normally, up to 73 us */ static irqreturn_t ser_interrupt(int irq, void *dev_id) { static volatile int tx_started = 0; struct e100_serial *info; int i; unsigned long flags; unsigned long irq_mask1_rd; unsigned long data_mask = (1 << (8+2*0)); /* ser0 data_avail */ int handled = 0; static volatile unsigned long reentered_ready_mask = 0; local_irq_save(flags); irq_mask1_rd = *R_IRQ_MASK1_RD; /* First handle all rx interrupts with ints disabled */ info = rs_table; irq_mask1_rd &= e100_ser_int_mask; for (i = 0; i < NR_PORTS; i++) { /* Which line caused the data irq? */ if (irq_mask1_rd & data_mask) { handled = 1; handle_ser_rx_interrupt(info); } info += 1; data_mask <<= 2; } /* Handle tx interrupts with interrupts enabled so we * can take care of new data interrupts while transmitting * We protect the tx part with the tx_started flag. * We disable the tr_ready interrupts we are about to handle and * unblock the serial interrupt so new serial interrupts may come. * * If we get a new interrupt: * - it migth be due to synchronous serial ports. * - serial irq will be blocked by general irq handler. * - async data will be handled above (sync will be ignored). * - tx_started flag will prevent us from trying to send again and * we will exit fast - no need to unblock serial irq. * - Next (sync) serial interrupt handler will be runned with * disabled interrupt due to restore_flags() at end of function, * so sync handler will not be preempted or reentered. */ if (!tx_started) { unsigned long ready_mask; unsigned long tx_started = 1; /* Only the tr_ready interrupts left */ irq_mask1_rd &= (IO_MASK(R_IRQ_MASK1_RD, ser0_ready) | IO_MASK(R_IRQ_MASK1_RD, ser1_ready) | IO_MASK(R_IRQ_MASK1_RD, ser2_ready) | IO_MASK(R_IRQ_MASK1_RD, ser3_ready)); while (irq_mask1_rd) { /* Disable those we are about to handle */ *R_IRQ_MASK1_CLR = irq_mask1_rd; /* Unblock the serial interrupt */ *R_VECT_MASK_SET = IO_STATE(R_VECT_MASK_SET, serial, set); local_irq_enable(); ready_mask = (1 << (8+1+2*0)); /* ser0 tr_ready */ info = rs_table; for (i = 0; i < NR_PORTS; i++) { /* Which line caused the ready irq? */ if (irq_mask1_rd & ready_mask) { handled = 1; handle_ser_tx_interrupt(info); } info += 1; ready_mask <<= 2; } /* handle_ser_tx_interrupt enables tr_ready interrupts */ local_irq_disable(); /* Handle reentered TX interrupt */ irq_mask1_rd = reentered_ready_mask; } local_irq_disable(); tx_started = 0; } else { unsigned long ready_mask; ready_mask = irq_mask1_rd & (IO_MASK(R_IRQ_MASK1_RD, ser0_ready) | IO_MASK(R_IRQ_MASK1_RD, ser1_ready) | IO_MASK(R_IRQ_MASK1_RD, ser2_ready) | IO_MASK(R_IRQ_MASK1_RD, ser3_ready)); if (ready_mask) { reentered_ready_mask |= ready_mask; /* Disable those we are about to handle */ *R_IRQ_MASK1_CLR = ready_mask; DFLOW(DEBUG_LOG(SERIAL_DEBUG_LINE, "ser_int reentered with TX %X\n", ready_mask)); } } local_irq_restore(flags); return IRQ_RETVAL(handled); } /* ser_interrupt */ #endif /* * ------------------------------------------------------------------- * Here ends the serial interrupt routines. * ------------------------------------------------------------------- */ /* * This routine is used to handle the "bottom half" processing for the * serial driver, known also the "software interrupt" processing. * This processing is done at the kernel interrupt level, after the * rs_interrupt() has returned, BUT WITH INTERRUPTS TURNED ON. This * is where time-consuming activities which can not be done in the * interrupt driver proper are done; the interrupt driver schedules * them using rs_sched_event(), and they get done here. */ static void do_softint(struct work_struct *work) { struct e100_serial *info; struct tty_struct *tty; info = container_of(work, struct e100_serial, work); tty = info->port.tty; if (!tty) return; if (test_and_clear_bit(RS_EVENT_WRITE_WAKEUP, &info->event)) tty_wakeup(tty); } static int startup(struct e100_serial * info) { unsigned long flags; unsigned long xmit_page; int i; xmit_page = get_zeroed_page(GFP_KERNEL); if (!xmit_page) return -ENOMEM; local_irq_save(flags); /* if it was already initialized, skip this */ if (info->flags & ASYNC_INITIALIZED) { local_irq_restore(flags); free_page(xmit_page); return 0; } if (info->xmit.buf) free_page(xmit_page); else info->xmit.buf = (unsigned char *) xmit_page; #ifdef SERIAL_DEBUG_OPEN printk("starting up ttyS%d (xmit_buf 0x%p)...\n", info->line, info->xmit.buf); #endif #ifdef CONFIG_SVINTO_SIM /* Bits and pieces collected from below. Better to have them in one ifdef:ed clause than to mix in a lot of ifdefs, right? */ if (info->port.tty) clear_bit(TTY_IO_ERROR, &info->port.tty->flags); info->xmit.head = info->xmit.tail = 0; info->first_recv_buffer = info->last_recv_buffer = NULL; info->recv_cnt = info->max_recv_cnt = 0; for (i = 0; i < SERIAL_RECV_DESCRIPTORS; i++) info->rec_descr[i].buf = NULL; /* No real action in the simulator, but may set info important to ioctl. */ change_speed(info); #else /* * Clear the FIFO buffers and disable them * (they will be reenabled in change_speed()) */ /* * Reset the DMA channels and make sure their interrupts are cleared */ if (info->dma_in_enabled) { info->uses_dma_in = 1; e100_enable_rxdma_channel(info); *info->icmdadr = IO_STATE(R_DMA_CH6_CMD, cmd, reset); /* Wait until reset cycle is complete */ while (IO_EXTRACT(R_DMA_CH6_CMD, cmd, *info->icmdadr) == IO_STATE_VALUE(R_DMA_CH6_CMD, cmd, reset)); /* Make sure the irqs are cleared */ *info->iclrintradr = IO_STATE(R_DMA_CH6_CLR_INTR, clr_descr, do) | IO_STATE(R_DMA_CH6_CLR_INTR, clr_eop, do); } else { e100_disable_rxdma_channel(info); } if (info->dma_out_enabled) { info->uses_dma_out = 1; e100_enable_txdma_channel(info); *info->ocmdadr = IO_STATE(R_DMA_CH6_CMD, cmd, reset); while (IO_EXTRACT(R_DMA_CH6_CMD, cmd, *info->ocmdadr) == IO_STATE_VALUE(R_DMA_CH6_CMD, cmd, reset)); /* Make sure the irqs are cleared */ *info->oclrintradr = IO_STATE(R_DMA_CH6_CLR_INTR, clr_descr, do) | IO_STATE(R_DMA_CH6_CLR_INTR, clr_eop, do); } else { e100_disable_txdma_channel(info); } if (info->port.tty) clear_bit(TTY_IO_ERROR, &info->port.tty->flags); info->xmit.head = info->xmit.tail = 0; info->first_recv_buffer = info->last_recv_buffer = NULL; info->recv_cnt = info->max_recv_cnt = 0; for (i = 0; i < SERIAL_RECV_DESCRIPTORS; i++) info->rec_descr[i].buf = 0; /* * and set the speed and other flags of the serial port * this will start the rx/tx as well */ #ifdef SERIAL_HANDLE_EARLY_ERRORS e100_enable_serial_data_irq(info); #endif change_speed(info); /* dummy read to reset any serial errors */ (void)info->ioport[REG_DATA]; /* enable the interrupts */ if (info->uses_dma_out) e100_enable_txdma_irq(info); e100_enable_rx_irq(info); info->tr_running = 0; /* to be sure we don't lock up the transmitter */ /* setup the dma input descriptor and start dma */ start_receive(info); /* for safety, make sure the descriptors last result is 0 bytes written */ info->tr_descr.sw_len = 0; info->tr_descr.hw_len = 0; info->tr_descr.status = 0; /* enable RTS/DTR last */ e100_rts(info, 1); e100_dtr(info, 1); #endif /* CONFIG_SVINTO_SIM */ info->flags |= ASYNC_INITIALIZED; local_irq_restore(flags); return 0; } /* * This routine will shutdown a serial port; interrupts are disabled, and * DTR is dropped if the hangup on close termio flag is on. */ static void shutdown(struct e100_serial * info) { unsigned long flags; struct etrax_dma_descr *descr = info->rec_descr; struct etrax_recv_buffer *buffer; int i; #ifndef CONFIG_SVINTO_SIM /* shut down the transmitter and receiver */ DFLOW(DEBUG_LOG(info->line, "shutdown %i\n", info->line)); e100_disable_rx(info); info->ioport[REG_TR_CTRL] = (info->tx_ctrl &= ~0x40); /* disable interrupts, reset dma channels */ if (info->uses_dma_in) { e100_disable_rxdma_irq(info); *info->icmdadr = IO_STATE(R_DMA_CH6_CMD, cmd, reset); info->uses_dma_in = 0; } else { e100_disable_serial_data_irq(info); } if (info->uses_dma_out) { e100_disable_txdma_irq(info); info->tr_running = 0; *info->ocmdadr = IO_STATE(R_DMA_CH6_CMD, cmd, reset); info->uses_dma_out = 0; } else { e100_disable_serial_tx_ready_irq(info); info->tr_running = 0; } #endif /* CONFIG_SVINTO_SIM */ if (!(info->flags & ASYNC_INITIALIZED)) return; #ifdef SERIAL_DEBUG_OPEN printk("Shutting down serial port %d (irq %d)....\n", info->line, info->irq); #endif local_irq_save(flags); if (info->xmit.buf) { free_page((unsigned long)info->xmit.buf); info->xmit.buf = NULL; } for (i = 0; i < SERIAL_RECV_DESCRIPTORS; i++) if (descr[i].buf) { buffer = phys_to_virt(descr[i].buf) - sizeof *buffer; kfree(buffer); descr[i].buf = 0; } if (!info->port.tty || (info->port.tty->termios->c_cflag & HUPCL)) { /* hang up DTR and RTS if HUPCL is enabled */ e100_dtr(info, 0); e100_rts(info, 0); /* could check CRTSCTS before doing this */ } if (info->port.tty) set_bit(TTY_IO_ERROR, &info->port.tty->flags); info->flags &= ~ASYNC_INITIALIZED; local_irq_restore(flags); } /* change baud rate and other assorted parameters */ static void change_speed(struct e100_serial *info) { unsigned int cflag; unsigned long xoff; unsigned long flags; /* first some safety checks */ if (!info->port.tty || !info->port.tty->termios) return; if (!info->ioport) return; cflag = info->port.tty->termios->c_cflag; /* possibly, the tx/rx should be disabled first to do this safely */ /* change baud-rate and write it to the hardware */ if ((info->flags & ASYNC_SPD_MASK) == ASYNC_SPD_CUST) { /* Special baudrate */ u32 mask = 0xFF << (info->line*8); /* Each port has 8 bits */ unsigned long alt_source = IO_STATE(R_ALT_SER_BAUDRATE, ser0_rec, normal) | IO_STATE(R_ALT_SER_BAUDRATE, ser0_tr, normal); /* R_ALT_SER_BAUDRATE selects the source */ DBAUD(printk("Custom baudrate: baud_base/divisor %lu/%i\n", (unsigned long)info->baud_base, info->custom_divisor)); if (info->baud_base == SERIAL_PRESCALE_BASE) { /* 0, 2-65535 (0=65536) */ u16 divisor = info->custom_divisor; /* R_SERIAL_PRESCALE (upper 16 bits of R_CLOCK_PRESCALE) */ /* baudrate is 3.125MHz/custom_divisor */ alt_source = IO_STATE(R_ALT_SER_BAUDRATE, ser0_rec, prescale) | IO_STATE(R_ALT_SER_BAUDRATE, ser0_tr, prescale); alt_source = 0x11; DBAUD(printk("Writing SERIAL_PRESCALE: divisor %i\n", divisor)); *R_SERIAL_PRESCALE = divisor; info->baud = SERIAL_PRESCALE_BASE/divisor; } #ifdef CONFIG_ETRAX_EXTERN_PB6CLK_ENABLED else if ((info->baud_base==CONFIG_ETRAX_EXTERN_PB6CLK_FREQ/8 && info->custom_divisor == 1) || (info->baud_base==CONFIG_ETRAX_EXTERN_PB6CLK_FREQ && info->custom_divisor == 8)) { /* ext_clk selected */ alt_source = IO_STATE(R_ALT_SER_BAUDRATE, ser0_rec, extern) | IO_STATE(R_ALT_SER_BAUDRATE, ser0_tr, extern); DBAUD(printk("using external baudrate: %lu\n", CONFIG_ETRAX_EXTERN_PB6CLK_FREQ/8)); info->baud = CONFIG_ETRAX_EXTERN_PB6CLK_FREQ/8; } #endif else { /* Bad baudbase, we don't support using timer0 * for baudrate. */ printk(KERN_WARNING "Bad baud_base/custom_divisor: %lu/%i\n", (unsigned long)info->baud_base, info->custom_divisor); } r_alt_ser_baudrate_shadow &= ~mask; r_alt_ser_baudrate_shadow |= (alt_source << (info->line*8)); *R_ALT_SER_BAUDRATE = r_alt_ser_baudrate_shadow; } else { /* Normal baudrate */ /* Make sure we use normal baudrate */ u32 mask = 0xFF << (info->line*8); /* Each port has 8 bits */ unsigned long alt_source = IO_STATE(R_ALT_SER_BAUDRATE, ser0_rec, normal) | IO_STATE(R_ALT_SER_BAUDRATE, ser0_tr, normal); r_alt_ser_baudrate_shadow &= ~mask; r_alt_ser_baudrate_shadow |= (alt_source << (info->line*8)); #ifndef CONFIG_SVINTO_SIM *R_ALT_SER_BAUDRATE = r_alt_ser_baudrate_shadow; #endif /* CONFIG_SVINTO_SIM */ info->baud = cflag_to_baud(cflag); #ifndef CONFIG_SVINTO_SIM info->ioport[REG_BAUD] = cflag_to_etrax_baud(cflag); #endif /* CONFIG_SVINTO_SIM */ } #ifndef CONFIG_SVINTO_SIM /* start with default settings and then fill in changes */ local_irq_save(flags); /* 8 bit, no/even parity */ info->rx_ctrl &= ~(IO_MASK(R_SERIAL0_REC_CTRL, rec_bitnr) | IO_MASK(R_SERIAL0_REC_CTRL, rec_par_en) | IO_MASK(R_SERIAL0_REC_CTRL, rec_par)); /* 8 bit, no/even parity, 1 stop bit, no cts */ info->tx_ctrl &= ~(IO_MASK(R_SERIAL0_TR_CTRL, tr_bitnr) | IO_MASK(R_SERIAL0_TR_CTRL, tr_par_en) | IO_MASK(R_SERIAL0_TR_CTRL, tr_par) | IO_MASK(R_SERIAL0_TR_CTRL, stop_bits) | IO_MASK(R_SERIAL0_TR_CTRL, auto_cts)); if ((cflag & CSIZE) == CS7) { /* set 7 bit mode */ info->tx_ctrl |= IO_STATE(R_SERIAL0_TR_CTRL, tr_bitnr, tr_7bit); info->rx_ctrl |= IO_STATE(R_SERIAL0_REC_CTRL, rec_bitnr, rec_7bit); } if (cflag & CSTOPB) { /* set 2 stop bit mode */ info->tx_ctrl |= IO_STATE(R_SERIAL0_TR_CTRL, stop_bits, two_bits); } if (cflag & PARENB) { /* enable parity */ info->tx_ctrl |= IO_STATE(R_SERIAL0_TR_CTRL, tr_par_en, enable); info->rx_ctrl |= IO_STATE(R_SERIAL0_REC_CTRL, rec_par_en, enable); } if (cflag & CMSPAR) { /* enable stick parity, PARODD mean Mark which matches ETRAX */ info->tx_ctrl |= IO_STATE(R_SERIAL0_TR_CTRL, tr_stick_par, stick); info->rx_ctrl |= IO_STATE(R_SERIAL0_REC_CTRL, rec_stick_par, stick); } if (cflag & PARODD) { /* set odd parity (or Mark if CMSPAR) */ info->tx_ctrl |= IO_STATE(R_SERIAL0_TR_CTRL, tr_par, odd); info->rx_ctrl |= IO_STATE(R_SERIAL0_REC_CTRL, rec_par, odd); } if (cflag & CRTSCTS) { /* enable automatic CTS handling */ DFLOW(DEBUG_LOG(info->line, "FLOW auto_cts enabled\n", 0)); info->tx_ctrl |= IO_STATE(R_SERIAL0_TR_CTRL, auto_cts, active); } /* make sure the tx and rx are enabled */ info->tx_ctrl |= IO_STATE(R_SERIAL0_TR_CTRL, tr_enable, enable); info->rx_ctrl |= IO_STATE(R_SERIAL0_REC_CTRL, rec_enable, enable); /* actually write the control regs to the hardware */ info->ioport[REG_TR_CTRL] = info->tx_ctrl; info->ioport[REG_REC_CTRL] = info->rx_ctrl; xoff = IO_FIELD(R_SERIAL0_XOFF, xoff_char, STOP_CHAR(info->port.tty)); xoff |= IO_STATE(R_SERIAL0_XOFF, tx_stop, enable); if (info->port.tty->termios->c_iflag & IXON ) { DFLOW(DEBUG_LOG(info->line, "FLOW XOFF enabled 0x%02X\n", STOP_CHAR(info->port.tty))); xoff |= IO_STATE(R_SERIAL0_XOFF, auto_xoff, enable); } *((unsigned long *)&info->ioport[REG_XOFF]) = xoff; local_irq_restore(flags); #endif /* !CONFIG_SVINTO_SIM */ update_char_time(info); } /* change_speed */ /* start transmitting chars NOW */ static void rs_flush_chars(struct tty_struct *tty) { struct e100_serial *info = (struct e100_serial *)tty->driver_data; unsigned long flags; if (info->tr_running || info->xmit.head == info->xmit.tail || tty->stopped || tty->hw_stopped || !info->xmit.buf) return; #ifdef SERIAL_DEBUG_FLOW printk("rs_flush_chars\n"); #endif /* this protection might not exactly be necessary here */ local_irq_save(flags); start_transmit(info); local_irq_restore(flags); } static int rs_raw_write(struct tty_struct *tty, const unsigned char *buf, int count) { int c, ret = 0; struct e100_serial *info = (struct e100_serial *)tty->driver_data; unsigned long flags; /* first some sanity checks */ if (!tty || !info->xmit.buf || !tmp_buf) return 0; #ifdef SERIAL_DEBUG_DATA if (info->line == SERIAL_DEBUG_LINE) printk("rs_raw_write (%d), status %d\n", count, info->ioport[REG_STATUS]); #endif #ifdef CONFIG_SVINTO_SIM /* Really simple. The output is here and now. */ SIMCOUT(buf, count); return count; #endif local_save_flags(flags); DFLOW(DEBUG_LOG(info->line, "write count %i ", count)); DFLOW(DEBUG_LOG(info->line, "ldisc %i\n", tty->ldisc.chars_in_buffer(tty))); /* The local_irq_disable/restore_flags pairs below are needed * because the DMA interrupt handler moves the info->xmit values. * the memcpy needs to be in the critical region unfortunately, * because we need to read xmit values, memcpy, write xmit values * in one atomic operation... this could perhaps be avoided by * more clever design. */ local_irq_disable(); while (count) { c = CIRC_SPACE_TO_END(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE); if (count < c) c = count; if (c <= 0) break; memcpy(info->xmit.buf + info->xmit.head, buf, c); info->xmit.head = (info->xmit.head + c) & (SERIAL_XMIT_SIZE-1); buf += c; count -= c; ret += c; } local_irq_restore(flags); /* enable transmitter if not running, unless the tty is stopped * this does not need IRQ protection since if tr_running == 0 * the IRQ's are not running anyway for this port. */ DFLOW(DEBUG_LOG(info->line, "write ret %i\n", ret)); if (info->xmit.head != info->xmit.tail && !tty->stopped && !tty->hw_stopped && !info->tr_running) { start_transmit(info); } return ret; } /* raw_raw_write() */ static int rs_write(struct tty_struct *tty, const unsigned char *buf, int count) { #if defined(CONFIG_ETRAX_RS485) struct e100_serial *info = (struct e100_serial *)tty->driver_data; if (info->rs485.flags & SER_RS485_ENABLED) { /* If we are in RS-485 mode, we need to toggle RTS and disable * the receiver before initiating a DMA transfer */ #ifdef CONFIG_ETRAX_FAST_TIMER /* Abort any started timer */ fast_timers_rs485[info->line].function = NULL; del_fast_timer(&fast_timers_rs485[info->line]); #endif e100_rts(info, (info->rs485.flags & SER_RS485_RTS_ON_SEND)); #if defined(CONFIG_ETRAX_RS485_DISABLE_RECEIVER) e100_disable_rx(info); e100_enable_rx_irq(info); #endif if ((info->rs485.flags & SER_RS485_RTS_BEFORE_SEND) && (info->rs485.delay_rts_before_send > 0)) msleep(info->rs485.delay_rts_before_send); } #endif /* CONFIG_ETRAX_RS485 */ count = rs_raw_write(tty, buf, count); #if defined(CONFIG_ETRAX_RS485) if (info->rs485.flags & SER_RS485_ENABLED) { unsigned int val; /* If we are in RS-485 mode the following has to be done: * wait until DMA is ready * wait on transmit shift register * toggle RTS * enable the receiver */ /* Sleep until all sent */ tty_wait_until_sent(tty, 0); #ifdef CONFIG_ETRAX_FAST_TIMER /* Now sleep a little more so that shift register is empty */ schedule_usleep(info->char_time_usec * 2); #endif /* wait on transmit shift register */ do{ get_lsr_info(info, &val); }while (!(val & TIOCSER_TEMT)); e100_rts(info, (info->rs485.flags & SER_RS485_RTS_AFTER_SEND)); #if defined(CONFIG_ETRAX_RS485_DISABLE_RECEIVER) e100_enable_rx(info); e100_enable_rxdma_irq(info); #endif } #endif /* CONFIG_ETRAX_RS485 */ return count; } /* rs_write */ /* how much space is available in the xmit buffer? */ static int rs_write_room(struct tty_struct *tty) { struct e100_serial *info = (struct e100_serial *)tty->driver_data; return CIRC_SPACE(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE); } /* How many chars are in the xmit buffer? * This does not include any chars in the transmitter FIFO. * Use wait_until_sent for waiting for FIFO drain. */ static int rs_chars_in_buffer(struct tty_struct *tty) { struct e100_serial *info = (struct e100_serial *)tty->driver_data; return CIRC_CNT(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE); } /* discard everything in the xmit buffer */ static void rs_flush_buffer(struct tty_struct *tty) { struct e100_serial *info = (struct e100_serial *)tty->driver_data; unsigned long flags; local_irq_save(flags); info->xmit.head = info->xmit.tail = 0; local_irq_restore(flags); tty_wakeup(tty); } /* * This function is used to send a high-priority XON/XOFF character to * the device * * Since we use DMA we don't check for info->x_char in transmit_chars_dma(), * but we do it in handle_ser_tx_interrupt(). * We disable DMA channel and enable tx ready interrupt and write the * character when possible. */ static void rs_send_xchar(struct tty_struct *tty, char ch) { struct e100_serial *info = (struct e100_serial *)tty->driver_data; unsigned long flags; local_irq_save(flags); if (info->uses_dma_out) { /* Put the DMA on hold and disable the channel */ *info->ocmdadr = IO_STATE(R_DMA_CH6_CMD, cmd, hold); while (IO_EXTRACT(R_DMA_CH6_CMD, cmd, *info->ocmdadr) != IO_STATE_VALUE(R_DMA_CH6_CMD, cmd, hold)); e100_disable_txdma_channel(info); } /* Must make sure transmitter is not stopped before we can transmit */ if (tty->stopped) rs_start(tty); /* Enable manual transmit interrupt and send from there */ DFLOW(DEBUG_LOG(info->line, "rs_send_xchar 0x%02X\n", ch)); info->x_char = ch; e100_enable_serial_tx_ready_irq(info); local_irq_restore(flags); } /* * ------------------------------------------------------------ * rs_throttle() * * This routine is called by the upper-layer tty layer to signal that * incoming characters should be throttled. * ------------------------------------------------------------ */ static void rs_throttle(struct tty_struct * tty) { struct e100_serial *info = (struct e100_serial *)tty->driver_data; #ifdef SERIAL_DEBUG_THROTTLE char buf[64]; printk("throttle %s: %lu....\n", tty_name(tty, buf), (unsigned long)tty->ldisc.chars_in_buffer(tty)); #endif DFLOW(DEBUG_LOG(info->line,"rs_throttle %lu\n", tty->ldisc.chars_in_buffer(tty))); /* Do RTS before XOFF since XOFF might take some time */ if (tty->termios->c_cflag & CRTSCTS) { /* Turn off RTS line */ e100_rts(info, 0); } if (I_IXOFF(tty)) rs_send_xchar(tty, STOP_CHAR(tty)); } static void rs_unthrottle(struct tty_struct * tty) { struct e100_serial *info = (struct e100_serial *)tty->driver_data; #ifdef SERIAL_DEBUG_THROTTLE char buf[64]; printk("unthrottle %s: %lu....\n", tty_name(tty, buf), (unsigned long)tty->ldisc.chars_in_buffer(tty)); #endif DFLOW(DEBUG_LOG(info->line,"rs_unthrottle ldisc %d\n", tty->ldisc.chars_in_buffer(tty))); DFLOW(DEBUG_LOG(info->line,"rs_unthrottle flip.count: %i\n", tty->flip.count)); /* Do RTS before XOFF since XOFF might take some time */ if (tty->termios->c_cflag & CRTSCTS) { /* Assert RTS line */ e100_rts(info, 1); } if (I_IXOFF(tty)) { if (info->x_char) info->x_char = 0; else rs_send_xchar(tty, START_CHAR(tty)); } } /* * ------------------------------------------------------------ * rs_ioctl() and friends * ------------------------------------------------------------ */ static int get_serial_info(struct e100_serial * info, struct serial_struct * retinfo) { struct serial_struct tmp; /* this is all probably wrong, there are a lot of fields * here that we don't have in e100_serial and maybe we * should set them to something else than 0. */ if (!retinfo) return -EFAULT; memset(&tmp, 0, sizeof(tmp)); tmp.type = info->type; tmp.line = info->line; tmp.port = (int)info->ioport; tmp.irq = info->irq; tmp.flags = info->flags; tmp.baud_base = info->baud_base; tmp.close_delay = info->close_delay; tmp.closing_wait = info->closing_wait; tmp.custom_divisor = info->custom_divisor; if (copy_to_user(retinfo, &tmp, sizeof(*retinfo))) return -EFAULT; return 0; } static int set_serial_info(struct e100_serial *info, struct serial_struct *new_info) { struct serial_struct new_serial; struct e100_serial old_info; int retval = 0; if (copy_from_user(&new_serial, new_info, sizeof(new_serial))) return -EFAULT; old_info = *info; if (!capable(CAP_SYS_ADMIN)) { if ((new_serial.type != info->type) || (new_serial.close_delay != info->close_delay) || ((new_serial.flags & ~ASYNC_USR_MASK) != (info->flags & ~ASYNC_USR_MASK))) return -EPERM; info->flags = ((info->flags & ~ASYNC_USR_MASK) | (new_serial.flags & ASYNC_USR_MASK)); goto check_and_exit; } if (info->count > 1) return -EBUSY; /* * OK, past this point, all the error checking has been done. * At this point, we start making changes..... */ info->baud_base = new_serial.baud_base; info->flags = ((info->flags & ~ASYNC_FLAGS) | (new_serial.flags & ASYNC_FLAGS)); info->custom_divisor = new_serial.custom_divisor; info->type = new_serial.type; info->close_delay = new_serial.close_delay; info->closing_wait = new_serial.closing_wait; info->port.tty->low_latency = (info->flags & ASYNC_LOW_LATENCY) ? 1 : 0; check_and_exit: if (info->flags & ASYNC_INITIALIZED) { change_speed(info); } else retval = startup(info); return retval; } /* * get_lsr_info - get line status register info * * Purpose: Let user call ioctl() to get info when the UART physically * is emptied. On bus types like RS485, the transmitter must * release the bus after transmitting. This must be done when * the transmit shift register is empty, not be done when the * transmit holding register is empty. This functionality * allows an RS485 driver to be written in user space. */ static int get_lsr_info(struct e100_serial * info, unsigned int *value) { unsigned int result = TIOCSER_TEMT; #ifndef CONFIG_SVINTO_SIM unsigned long curr_time = jiffies; unsigned long curr_time_usec = GET_JIFFIES_USEC(); unsigned long elapsed_usec = (curr_time - info->last_tx_active) * 1000000/HZ + curr_time_usec - info->last_tx_active_usec; if (info->xmit.head != info->xmit.tail || elapsed_usec < 2*info->char_time_usec) { result = 0; } #endif if (copy_to_user(value, &result, sizeof(int))) return -EFAULT; return 0; } #ifdef SERIAL_DEBUG_IO struct state_str { int state; const char *str; }; const struct state_str control_state_str[] = { {TIOCM_DTR, "DTR" }, {TIOCM_RTS, "RTS"}, {TIOCM_ST, "ST?" }, {TIOCM_SR, "SR?" }, {TIOCM_CTS, "CTS" }, {TIOCM_CD, "CD" }, {TIOCM_RI, "RI" }, {TIOCM_DSR, "DSR" }, {0, NULL } }; char *get_control_state_str(int MLines, char *s) { int i = 0; s[0]='\0'; while (control_state_str[i].str != NULL) { if (MLines & control_state_str[i].state) { if (s[0] != '\0') { strcat(s, ", "); } strcat(s, control_state_str[i].str); } i++; } return s; } #endif static int rs_break(struct tty_struct *tty, int break_state) { struct e100_serial *info = (struct e100_serial *)tty->driver_data; unsigned long flags; if (!info->ioport) return -EIO; local_irq_save(flags); if (break_state == -1) { /* Go to manual mode and set the txd pin to 0 */ /* Clear bit 7 (txd) and 6 (tr_enable) */ info->tx_ctrl &= 0x3F; } else { /* Set bit 7 (txd) and 6 (tr_enable) */ info->tx_ctrl |= (0x80 | 0x40); } info->ioport[REG_TR_CTRL] = info->tx_ctrl; local_irq_restore(flags); return 0; } static int rs_tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear) { struct e100_serial *info = (struct e100_serial *)tty->driver_data; unsigned long flags; local_irq_save(flags); if (clear & TIOCM_RTS) e100_rts(info, 0); if (clear & TIOCM_DTR) e100_dtr(info, 0); /* Handle FEMALE behaviour */ if (clear & TIOCM_RI) e100_ri_out(info, 0); if (clear & TIOCM_CD) e100_cd_out(info, 0); if (set & TIOCM_RTS) e100_rts(info, 1); if (set & TIOCM_DTR) e100_dtr(info, 1); /* Handle FEMALE behaviour */ if (set & TIOCM_RI) e100_ri_out(info, 1); if (set & TIOCM_CD) e100_cd_out(info, 1); local_irq_restore(flags); return 0; } static int rs_tiocmget(struct tty_struct *tty) { struct e100_serial *info = (struct e100_serial *)tty->driver_data; unsigned int result; unsigned long flags; local_irq_save(flags); result = (!E100_RTS_GET(info) ? TIOCM_RTS : 0) | (!E100_DTR_GET(info) ? TIOCM_DTR : 0) | (!E100_RI_GET(info) ? TIOCM_RNG : 0) | (!E100_DSR_GET(info) ? TIOCM_DSR : 0) | (!E100_CD_GET(info) ? TIOCM_CAR : 0) | (!E100_CTS_GET(info) ? TIOCM_CTS : 0); local_irq_restore(flags); #ifdef SERIAL_DEBUG_IO printk(KERN_DEBUG "ser%i: modem state: %i 0x%08X\n", info->line, result, result); { char s[100]; get_control_state_str(result, s); printk(KERN_DEBUG "state: %s\n", s); } #endif return result; } static int rs_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg) { struct e100_serial * info = (struct e100_serial *)tty->driver_data; if ((cmd != TIOCGSERIAL) && (cmd != TIOCSSERIAL) && (cmd != TIOCSERCONFIG) && (cmd != TIOCSERGWILD) && (cmd != TIOCSERSWILD) && (cmd != TIOCSERGSTRUCT)) { if (tty->flags & (1 << TTY_IO_ERROR)) return -EIO; } switch (cmd) { case TIOCGSERIAL: return get_serial_info(info, (struct serial_struct *) arg); case TIOCSSERIAL: return set_serial_info(info, (struct serial_struct *) arg); case TIOCSERGETLSR: /* Get line status register */ return get_lsr_info(info, (unsigned int *) arg); case TIOCSERGSTRUCT: if (copy_to_user((struct e100_serial *) arg, info, sizeof(struct e100_serial))) return -EFAULT; return 0; #if defined(CONFIG_ETRAX_RS485) case TIOCSERSETRS485: { /* In this ioctl we still use the old structure * rs485_control for backward compatibility * (if we use serial_rs485, then old user-level code * wouldn't work anymore...). * The use of this ioctl is deprecated: use TIOCSRS485 * instead.*/ struct rs485_control rs485ctrl; struct serial_rs485 rs485data; printk(KERN_DEBUG "The use of this ioctl is deprecated. Use TIOCSRS485 instead\n"); if (copy_from_user(&rs485ctrl, (struct rs485_control *)arg, sizeof(rs485ctrl))) return -EFAULT; rs485data.delay_rts_before_send = rs485ctrl.delay_rts_before_send; rs485data.flags = 0; if (rs485data.delay_rts_before_send != 0) rs485data.flags |= SER_RS485_RTS_BEFORE_SEND; else rs485data.flags &= ~(SER_RS485_RTS_BEFORE_SEND); if (rs485ctrl.enabled) rs485data.flags |= SER_RS485_ENABLED; else rs485data.flags &= ~(SER_RS485_ENABLED); if (rs485ctrl.rts_on_send) rs485data.flags |= SER_RS485_RTS_ON_SEND; else rs485data.flags &= ~(SER_RS485_RTS_ON_SEND); if (rs485ctrl.rts_after_sent) rs485data.flags |= SER_RS485_RTS_AFTER_SEND; else rs485data.flags &= ~(SER_RS485_RTS_AFTER_SEND); return e100_enable_rs485(tty, &rs485data); } case TIOCSRS485: { /* This is the new version of TIOCSRS485, with new * data structure serial_rs485 */ struct serial_rs485 rs485data; if (copy_from_user(&rs485data, (struct rs485_control *)arg, sizeof(rs485data))) return -EFAULT; return e100_enable_rs485(tty, &rs485data); } case TIOCGRS485: { struct serial_rs485 *rs485data = &(((struct e100_serial *)tty->driver_data)->rs485); /* This is the ioctl to get RS485 data from user-space */ if (copy_to_user((struct serial_rs485 *) arg, rs485data, sizeof(struct serial_rs485))) return -EFAULT; break; } case TIOCSERWRRS485: { struct rs485_write rs485wr; if (copy_from_user(&rs485wr, (struct rs485_write *)arg, sizeof(rs485wr))) return -EFAULT; return e100_write_rs485(tty, rs485wr.outc, rs485wr.outc_size); } #endif default: return -ENOIOCTLCMD; } return 0; } static void rs_set_termios(struct tty_struct *tty, struct ktermios *old_termios) { struct e100_serial *info = (struct e100_serial *)tty->driver_data; change_speed(info); /* Handle turning off CRTSCTS */ if ((old_termios->c_cflag & CRTSCTS) && !(tty->termios->c_cflag & CRTSCTS)) { tty->hw_stopped = 0; rs_start(tty); } } /* * ------------------------------------------------------------ * rs_close() * * This routine is called when the serial port gets closed. First, we * wait for the last remaining data to be sent. Then, we unlink its * S structure from the interrupt chain if necessary, and we free * that IRQ if nothing is left in the chain. * ------------------------------------------------------------ */ static void rs_close(struct tty_struct *tty, struct file * filp) { struct e100_serial * info = (struct e100_serial *)tty->driver_data; unsigned long flags; if (!info) return; /* interrupts are disabled for this entire function */ local_irq_save(flags); if (tty_hung_up_p(filp)) { local_irq_restore(flags); return; } #ifdef SERIAL_DEBUG_OPEN printk("[%d] rs_close ttyS%d, count = %d\n", current->pid, info->line, info->count); #endif if ((tty->count == 1) && (info->count != 1)) { /* * Uh, oh. tty->count is 1, which means that the tty * structure will be freed. Info->count should always * be one in these conditions. If it's greater than * one, we've got real problems, since it means the * serial port won't be shutdown. */ printk(KERN_CRIT "rs_close: bad serial port count; tty->count is 1, " "info->count is %d\n", info->count); info->count = 1; } if (--info->count < 0) { printk(KERN_CRIT "rs_close: bad serial port count for ttyS%d: %d\n", info->line, info->count); info->count = 0; } if (info->count) { local_irq_restore(flags); return; } info->flags |= ASYNC_CLOSING; /* * Save the termios structure, since this port may have * separate termios for callout and dialin. */ if (info->flags & ASYNC_NORMAL_ACTIVE) info->normal_termios = *tty->termios; /* * Now we wait for the transmit buffer to clear; and we notify * the line discipline to only process XON/XOFF characters. */ tty->closing = 1; if (info->closing_wait != ASYNC_CLOSING_WAIT_NONE) tty_wait_until_sent(tty, info->closing_wait); /* * At this point we stop accepting input. To do this, we * disable the serial receiver and the DMA receive interrupt. */ #ifdef SERIAL_HANDLE_EARLY_ERRORS e100_disable_serial_data_irq(info); #endif #ifndef CONFIG_SVINTO_SIM e100_disable_rx(info); e100_disable_rx_irq(info); if (info->flags & ASYNC_INITIALIZED) { /* * Before we drop DTR, make sure the UART transmitter * has completely drained; this is especially * important as we have a transmit FIFO! */ rs_wait_until_sent(tty, HZ); } #endif shutdown(info); rs_flush_buffer(tty); tty_ldisc_flush(tty); tty->closing = 0; info->event = 0; info->port.tty = NULL; if (info->blocked_open) { if (info->close_delay) schedule_timeout_interruptible(info->close_delay); wake_up_interruptible(&info->open_wait); } info->flags &= ~(ASYNC_NORMAL_ACTIVE|ASYNC_CLOSING); wake_up_interruptible(&info->close_wait); local_irq_restore(flags); /* port closed */ #if defined(CONFIG_ETRAX_RS485) if (info->rs485.flags & SER_RS485_ENABLED) { info->rs485.flags &= ~(SER_RS485_ENABLED); #if defined(CONFIG_ETRAX_RS485_ON_PA) *R_PORT_PA_DATA = port_pa_data_shadow &= ~(1 << rs485_pa_bit); #endif #if defined(CONFIG_ETRAX_RS485_ON_PORT_G) REG_SHADOW_SET(R_PORT_G_DATA, port_g_data_shadow, rs485_port_g_bit, 0); #endif #if defined(CONFIG_ETRAX_RS485_LTC1387) REG_SHADOW_SET(R_PORT_G_DATA, port_g_data_shadow, CONFIG_ETRAX_RS485_LTC1387_DXEN_PORT_G_BIT, 0); REG_SHADOW_SET(R_PORT_G_DATA, port_g_data_shadow, CONFIG_ETRAX_RS485_LTC1387_RXEN_PORT_G_BIT, 0); #endif } #endif /* * Release any allocated DMA irq's. */ if (info->dma_in_enabled) { free_irq(info->dma_in_irq_nbr, info); cris_free_dma(info->dma_in_nbr, info->dma_in_irq_description); info->uses_dma_in = 0; #ifdef SERIAL_DEBUG_OPEN printk(KERN_DEBUG "DMA irq '%s' freed\n", info->dma_in_irq_description); #endif } if (info->dma_out_enabled) { free_irq(info->dma_out_irq_nbr, info); cris_free_dma(info->dma_out_nbr, info->dma_out_irq_description); info->uses_dma_out = 0; #ifdef SERIAL_DEBUG_OPEN printk(KERN_DEBUG "DMA irq '%s' freed\n", info->dma_out_irq_description); #endif } } /* * rs_wait_until_sent() --- wait until the transmitter is empty */ static void rs_wait_until_sent(struct tty_struct *tty, int timeout) { unsigned long orig_jiffies; struct e100_serial *info = (struct e100_serial *)tty->driver_data; unsigned long curr_time = jiffies; unsigned long curr_time_usec = GET_JIFFIES_USEC(); long elapsed_usec = (curr_time - info->last_tx_active) * (1000000/HZ) + curr_time_usec - info->last_tx_active_usec; /* * Check R_DMA_CHx_STATUS bit 0-6=number of available bytes in FIFO * R_DMA_CHx_HWSW bit 31-16=nbr of bytes left in DMA buffer (0=64k) */ orig_jiffies = jiffies; while (info->xmit.head != info->xmit.tail || /* More in send queue */ (*info->ostatusadr & 0x007f) || /* more in FIFO */ (elapsed_usec < 2*info->char_time_usec)) { schedule_timeout_interruptible(1); if (signal_pending(current)) break; if (timeout && time_after(jiffies, orig_jiffies + timeout)) break; curr_time = jiffies; curr_time_usec = GET_JIFFIES_USEC(); elapsed_usec = (curr_time - info->last_tx_active) * (1000000/HZ) + curr_time_usec - info->last_tx_active_usec; } set_current_state(TASK_RUNNING); } /* * rs_hangup() --- called by tty_hangup() when a hangup is signaled. */ void rs_hangup(struct tty_struct *tty) { struct e100_serial * info = (struct e100_serial *)tty->driver_data; rs_flush_buffer(tty); shutdown(info); info->event = 0; info->count = 0; info->flags &= ~ASYNC_NORMAL_ACTIVE; info->port.tty = NULL; wake_up_interruptible(&info->open_wait); } /* * ------------------------------------------------------------ * rs_open() and friends * ------------------------------------------------------------ */ static int block_til_ready(struct tty_struct *tty, struct file * filp, struct e100_serial *info) { DECLARE_WAITQUEUE(wait, current); unsigned long flags; int retval; int do_clocal = 0, extra_count = 0; /* * If the device is in the middle of being closed, then block * until it's done, and then try again. */ if (tty_hung_up_p(filp) || (info->flags & ASYNC_CLOSING)) { wait_event_interruptible_tty(info->close_wait, !(info->flags & ASYNC_CLOSING)); #ifdef SERIAL_DO_RESTART if (info->flags & ASYNC_HUP_NOTIFY) return -EAGAIN; else return -ERESTARTSYS; #else return -EAGAIN; #endif } /* * If non-blocking mode is set, or the port is not enabled, * then make the check up front and then exit. */ if ((filp->f_flags & O_NONBLOCK) || (tty->flags & (1 << TTY_IO_ERROR))) { info->flags |= ASYNC_NORMAL_ACTIVE; return 0; } if (tty->termios->c_cflag & CLOCAL) { do_clocal = 1; } /* * Block waiting for the carrier detect and the line to become * free (i.e., not in use by the callout). While we are in * this loop, info->count is dropped by one, so that * rs_close() knows when to free things. We restore it upon * exit, either normal or abnormal. */ retval = 0; add_wait_queue(&info->open_wait, &wait); #ifdef SERIAL_DEBUG_OPEN printk("block_til_ready before block: ttyS%d, count = %d\n", info->line, info->count); #endif local_irq_save(flags); if (!tty_hung_up_p(filp)) { extra_count++; info->count--; } local_irq_restore(flags); info->blocked_open++; while (1) { local_irq_save(flags); /* assert RTS and DTR */ e100_rts(info, 1); e100_dtr(info, 1); local_irq_restore(flags); set_current_state(TASK_INTERRUPTIBLE); if (tty_hung_up_p(filp) || !(info->flags & ASYNC_INITIALIZED)) { #ifdef SERIAL_DO_RESTART if (info->flags & ASYNC_HUP_NOTIFY) retval = -EAGAIN; else retval = -ERESTARTSYS; #else retval = -EAGAIN; #endif break; } if (!(info->flags & ASYNC_CLOSING) && do_clocal) /* && (do_clocal || DCD_IS_ASSERTED) */ break; if (signal_pending(current)) { retval = -ERESTARTSYS; break; } #ifdef SERIAL_DEBUG_OPEN printk("block_til_ready blocking: ttyS%d, count = %d\n", info->line, info->count); #endif tty_unlock(); schedule(); tty_lock(); } set_current_state(TASK_RUNNING); remove_wait_queue(&info->open_wait, &wait); if (extra_count) info->count++; info->blocked_open--; #ifdef SERIAL_DEBUG_OPEN printk("block_til_ready after blocking: ttyS%d, count = %d\n", info->line, info->count); #endif if (retval) return retval; info->flags |= ASYNC_NORMAL_ACTIVE; return 0; } static void deinit_port(struct e100_serial *info) { if (info->dma_out_enabled) { cris_free_dma(info->dma_out_nbr, info->dma_out_irq_description); free_irq(info->dma_out_irq_nbr, info); } if (info->dma_in_enabled) { cris_free_dma(info->dma_in_nbr, info->dma_in_irq_description); free_irq(info->dma_in_irq_nbr, info); } } /* * This routine is called whenever a serial port is opened. * It performs the serial-specific initialization for the tty structure. */ static int rs_open(struct tty_struct *tty, struct file * filp) { struct e100_serial *info; int retval, line; unsigned long page; int allocated_resources = 0; /* find which port we want to open */ line = tty->index; if (line < 0 || line >= NR_PORTS) return -ENODEV; /* find the corresponding e100_serial struct in the table */ info = rs_table + line; /* don't allow the opening of ports that are not enabled in the HW config */ if (!info->enabled) return -ENODEV; #ifdef SERIAL_DEBUG_OPEN printk("[%d] rs_open %s, count = %d\n", current->pid, tty->name, info->count); #endif info->count++; tty->driver_data = info; info->port.tty = tty; info->port.tty->low_latency = (info->flags & ASYNC_LOW_LATENCY) ? 1 : 0; if (!tmp_buf) { page = get_zeroed_page(GFP_KERNEL); if (!page) { return -ENOMEM; } if (tmp_buf) free_page(page); else tmp_buf = (unsigned char *) page; } /* * If the port is in the middle of closing, bail out now */ if (tty_hung_up_p(filp) || (info->flags & ASYNC_CLOSING)) { wait_event_interruptible_tty(info->close_wait, !(info->flags & ASYNC_CLOSING)); #ifdef SERIAL_DO_RESTART return ((info->flags & ASYNC_HUP_NOTIFY) ? -EAGAIN : -ERESTARTSYS); #else return -EAGAIN; #endif } /* * If DMA is enabled try to allocate the irq's. */ if (info->count == 1) { allocated_resources = 1; if (info->dma_in_enabled) { if (request_irq(info->dma_in_irq_nbr, rec_interrupt, info->dma_in_irq_flags, info->dma_in_irq_description, info)) { printk(KERN_WARNING "DMA irq '%s' busy; " "falling back to non-DMA mode\n", info->dma_in_irq_description); /* Make sure we never try to use DMA in */ /* for the port again. */ info->dma_in_enabled = 0; } else if (cris_request_dma(info->dma_in_nbr, info->dma_in_irq_description, DMA_VERBOSE_ON_ERROR, info->dma_owner)) { free_irq(info->dma_in_irq_nbr, info); printk(KERN_WARNING "DMA '%s' busy; " "falling back to non-DMA mode\n", info->dma_in_irq_description); /* Make sure we never try to use DMA in */ /* for the port again. */ info->dma_in_enabled = 0; } #ifdef SERIAL_DEBUG_OPEN else printk(KERN_DEBUG "DMA irq '%s' allocated\n", info->dma_in_irq_description); #endif } if (info->dma_out_enabled) { if (request_irq(info->dma_out_irq_nbr, tr_interrupt, info->dma_out_irq_flags, info->dma_out_irq_description, info)) { printk(KERN_WARNING "DMA irq '%s' busy; " "falling back to non-DMA mode\n", info->dma_out_irq_description); /* Make sure we never try to use DMA out */ /* for the port again. */ info->dma_out_enabled = 0; } else if (cris_request_dma(info->dma_out_nbr, info->dma_out_irq_description, DMA_VERBOSE_ON_ERROR, info->dma_owner)) { free_irq(info->dma_out_irq_nbr, info); printk(KERN_WARNING "DMA '%s' busy; " "falling back to non-DMA mode\n", info->dma_out_irq_description); /* Make sure we never try to use DMA out */ /* for the port again. */ info->dma_out_enabled = 0; } #ifdef SERIAL_DEBUG_OPEN else printk(KERN_DEBUG "DMA irq '%s' allocated\n", info->dma_out_irq_description); #endif } } /* * Start up the serial port */ retval = startup(info); if (retval) { if (allocated_resources) deinit_port(info); /* FIXME Decrease count info->count here too? */ return retval; } retval = block_til_ready(tty, filp, info); if (retval) { #ifdef SERIAL_DEBUG_OPEN printk("rs_open returning after block_til_ready with %d\n", retval); #endif if (allocated_resources) deinit_port(info); return retval; } if ((info->count == 1) && (info->flags & ASYNC_SPLIT_TERMIOS)) { *tty->termios = info->normal_termios; change_speed(info); } #ifdef SERIAL_DEBUG_OPEN printk("rs_open ttyS%d successful...\n", info->line); #endif DLOG_INT_TRIG( log_int_pos = 0); DFLIP( if (info->line == SERIAL_DEBUG_LINE) { info->icount.rx = 0; } ); return 0; } #ifdef CONFIG_PROC_FS /* * /proc fs routines.... */ static void seq_line_info(struct seq_file *m, struct e100_serial *info) { unsigned long tmp; seq_printf(m, "%d: uart:E100 port:%lX irq:%d", info->line, (unsigned long)info->ioport, info->irq); if (!info->ioport || (info->type == PORT_UNKNOWN)) { seq_printf(m, "\n"); return; } seq_printf(m, " baud:%d", info->baud); seq_printf(m, " tx:%lu rx:%lu", (unsigned long)info->icount.tx, (unsigned long)info->icount.rx); tmp = CIRC_CNT(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE); if (tmp) seq_printf(m, " tx_pend:%lu/%lu", (unsigned long)tmp, (unsigned long)SERIAL_XMIT_SIZE); seq_printf(m, " rx_pend:%lu/%lu", (unsigned long)info->recv_cnt, (unsigned long)info->max_recv_cnt); #if 1 if (info->port.tty) { if (info->port.tty->stopped) seq_printf(m, " stopped:%i", (int)info->port.tty->stopped); if (info->port.tty->hw_stopped) seq_printf(m, " hw_stopped:%i", (int)info->port.tty->hw_stopped); } { unsigned char rstat = info->ioport[REG_STATUS]; if (rstat & IO_MASK(R_SERIAL0_STATUS, xoff_detect)) seq_printf(m, " xoff_detect:1"); } #endif if (info->icount.frame) seq_printf(m, " fe:%lu", (unsigned long)info->icount.frame); if (info->icount.parity) seq_printf(m, " pe:%lu", (unsigned long)info->icount.parity); if (info->icount.brk) seq_printf(m, " brk:%lu", (unsigned long)info->icount.brk); if (info->icount.overrun) seq_printf(m, " oe:%lu", (unsigned long)info->icount.overrun); /* * Last thing is the RS-232 status lines */ if (!E100_RTS_GET(info)) seq_puts(m, "|RTS"); if (!E100_CTS_GET(info)) seq_puts(m, "|CTS"); if (!E100_DTR_GET(info)) seq_puts(m, "|DTR"); if (!E100_DSR_GET(info)) seq_puts(m, "|DSR"); if (!E100_CD_GET(info)) seq_puts(m, "|CD"); if (!E100_RI_GET(info)) seq_puts(m, "|RI"); seq_puts(m, "\n"); } static int crisv10_proc_show(struct seq_file *m, void *v) { int i; seq_printf(m, "serinfo:1.0 driver:%s\n", serial_version); for (i = 0; i < NR_PORTS; i++) { if (!rs_table[i].enabled) continue; seq_line_info(m, &rs_table[i]); } #ifdef DEBUG_LOG_INCLUDED for (i = 0; i < debug_log_pos; i++) { seq_printf(m, "%-4i %lu.%lu ", i, debug_log[i].time, timer_data_to_ns(debug_log[i].timer_data)); seq_printf(m, debug_log[i].string, debug_log[i].value); } seq_printf(m, "debug_log %i/%i\n", i, DEBUG_LOG_SIZE); debug_log_pos = 0; #endif return 0; } static int crisv10_proc_open(struct inode *inode, struct file *file) { return single_open(file, crisv10_proc_show, NULL); } static const struct file_operations crisv10_proc_fops = { .owner = THIS_MODULE, .open = crisv10_proc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; #endif /* Finally, routines used to initialize the serial driver. */ static void show_serial_version(void) { printk(KERN_INFO "ETRAX 100LX serial-driver %s, " "(c) 2000-2004 Axis Communications AB\r\n", &serial_version[11]); /* "$Revision: x.yy" */ } /* rs_init inits the driver at boot (using the module_init chain) */ static const struct tty_operations rs_ops = { .open = rs_open, .close = rs_close, .write = rs_write, .flush_chars = rs_flush_chars, .write_room = rs_write_room, .chars_in_buffer = rs_chars_in_buffer, .flush_buffer = rs_flush_buffer, .ioctl = rs_ioctl, .throttle = rs_throttle, .unthrottle = rs_unthrottle, .set_termios = rs_set_termios, .stop = rs_stop, .start = rs_start, .hangup = rs_hangup, .break_ctl = rs_break, .send_xchar = rs_send_xchar, .wait_until_sent = rs_wait_until_sent, .tiocmget = rs_tiocmget, .tiocmset = rs_tiocmset, #ifdef CONFIG_PROC_FS .proc_fops = &crisv10_proc_fops, #endif }; static int __init rs_init(void) { int i; struct e100_serial *info; struct tty_driver *driver = alloc_tty_driver(NR_PORTS); if (!driver) return -ENOMEM; show_serial_version(); /* Setup the timed flush handler system */ #if !defined(CONFIG_ETRAX_SERIAL_FAST_TIMER) setup_timer(&flush_timer, timed_flush_handler, 0); mod_timer(&flush_timer, jiffies + 5); #endif #if defined(CONFIG_ETRAX_RS485) #if defined(CONFIG_ETRAX_RS485_ON_PA) if (cris_io_interface_allocate_pins(if_serial_0, 'a', rs485_pa_bit, rs485_pa_bit)) { printk(KERN_CRIT "ETRAX100LX serial: Could not allocate " "RS485 pin\n"); put_tty_driver(driver); return -EBUSY; } #endif #if defined(CONFIG_ETRAX_RS485_ON_PORT_G) if (cris_io_interface_allocate_pins(if_serial_0, 'g', rs485_pa_bit, rs485_port_g_bit)) { printk(KERN_CRIT "ETRAX100LX serial: Could not allocate " "RS485 pin\n"); put_tty_driver(driver); return -EBUSY; } #endif #endif /* Initialize the tty_driver structure */ driver->driver_name = "serial"; driver->name = "ttyS"; driver->major = TTY_MAJOR; driver->minor_start = 64; driver->type = TTY_DRIVER_TYPE_SERIAL; driver->subtype = SERIAL_TYPE_NORMAL; driver->init_termios = tty_std_termios; driver->init_termios.c_cflag = B115200 | CS8 | CREAD | HUPCL | CLOCAL; /* is normally B9600 default... */ driver->init_termios.c_ispeed = 115200; driver->init_termios.c_ospeed = 115200; driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV; tty_set_operations(driver, &rs_ops); serial_driver = driver; if (tty_register_driver(driver)) panic("Couldn't register serial driver\n"); /* do some initializing for the separate ports */ for (i = 0, info = rs_table; i < NR_PORTS; i++,info++) { if (info->enabled) { if (cris_request_io_interface(info->io_if, info->io_if_description)) { printk(KERN_CRIT "ETRAX100LX async serial: " "Could not allocate IO pins for " "%s, port %d\n", info->io_if_description, i); info->enabled = 0; } } info->uses_dma_in = 0; info->uses_dma_out = 0; info->line = i; info->port.tty = NULL; info->type = PORT_ETRAX; info->tr_running = 0; info->forced_eop = 0; info->baud_base = DEF_BAUD_BASE; info->custom_divisor = 0; info->flags = 0; info->close_delay = 5*HZ/10; info->closing_wait = 30*HZ; info->x_char = 0; info->event = 0; info->count = 0; info->blocked_open = 0; info->normal_termios = driver->init_termios; init_waitqueue_head(&info->open_wait); init_waitqueue_head(&info->close_wait); info->xmit.buf = NULL; info->xmit.tail = info->xmit.head = 0; info->first_recv_buffer = info->last_recv_buffer = NULL; info->recv_cnt = info->max_recv_cnt = 0; info->last_tx_active_usec = 0; info->last_tx_active = 0; #if defined(CONFIG_ETRAX_RS485) /* Set sane defaults */ info->rs485.flags &= ~(SER_RS485_RTS_ON_SEND); info->rs485.flags |= SER_RS485_RTS_AFTER_SEND; info->rs485.flags &= ~(SER_RS485_RTS_BEFORE_SEND); info->rs485.delay_rts_before_send = 0; info->rs485.flags &= ~(SER_RS485_ENABLED); #endif INIT_WORK(&info->work, do_softint); if (info->enabled) { printk(KERN_INFO "%s%d at %p is a builtin UART with DMA\n", serial_driver->name, info->line, info->ioport); } } #ifdef CONFIG_ETRAX_FAST_TIMER #ifdef CONFIG_ETRAX_SERIAL_FAST_TIMER memset(fast_timers, 0, sizeof(fast_timers)); #endif #ifdef CONFIG_ETRAX_RS485 memset(fast_timers_rs485, 0, sizeof(fast_timers_rs485)); #endif fast_timer_init(); #endif #ifndef CONFIG_SVINTO_SIM #ifndef CONFIG_ETRAX_KGDB /* Not needed in simulator. May only complicate stuff. */ /* hook the irq's for DMA channel 6 and 7, serial output and input, and some more... */ if (request_irq(SERIAL_IRQ_NBR, ser_interrupt, IRQF_SHARED | IRQF_DISABLED, "serial ", driver)) panic("%s: Failed to request irq8", __func__); #endif #endif /* CONFIG_SVINTO_SIM */ return 0; } /* this makes sure that rs_init is called during kernel boot */ module_init(rs_init);
gpl-2.0
budi79/deka-kernel-msm7x30-3.0
drivers/media/video/pwc/pwc-misc.c
2739
4174
/* Linux driver for Philips webcam Various miscellaneous functions and tables. (C) 1999-2003 Nemosoft Unv. (C) 2004-2006 Luc Saillard (luc@saillard.org) NOTE: this version of pwc is an unofficial (modified) release of pwc & pcwx driver and thus may have bugs that are not present in the original version. Please send bug reports and support requests to <luc@saillard.org>. The decompression routines have been implemented by reverse-engineering the Nemosoft binary pwcx module. Caveat emptor. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include "pwc.h" const struct pwc_coord pwc_image_sizes[PSZ_MAX] = { { 128, 96, 0 }, /* sqcif */ { 160, 120, 0 }, /* qsif */ { 176, 144, 0 }, /* qcif */ { 320, 240, 0 }, /* sif */ { 352, 288, 0 }, /* cif */ { 640, 480, 0 }, /* vga */ }; /* x,y -> PSZ_ */ int pwc_decode_size(struct pwc_device *pdev, int width, int height) { int i, find; /* Make sure we don't go beyond our max size. NB: we have different limits for RAW and normal modes. In case you don't have the decompressor loaded or use RAW mode, the maximum viewable size is smaller. */ if (pdev->pixfmt != V4L2_PIX_FMT_YUV420) { if (width > pdev->abs_max.x || height > pdev->abs_max.y) { PWC_DEBUG_SIZE("VIDEO_PALETTE_RAW: going beyond abs_max.\n"); return -1; } } else { if (width > pdev->view_max.x || height > pdev->view_max.y) { PWC_DEBUG_SIZE("VIDEO_PALETTE_not RAW: going beyond view_max.\n"); return -1; } } /* Find the largest size supported by the camera that fits into the requested size. */ find = -1; for (i = 0; i < PSZ_MAX; i++) { if (pdev->image_mask & (1 << i)) { if (pwc_image_sizes[i].x <= width && pwc_image_sizes[i].y <= height) find = i; } } return find; } /* initialize variables depending on type and decompressor*/ void pwc_construct(struct pwc_device *pdev) { if (DEVICE_USE_CODEC1(pdev->type)) { pdev->view_min.x = 128; pdev->view_min.y = 96; pdev->view_max.x = 352; pdev->view_max.y = 288; pdev->abs_max.x = 352; pdev->abs_max.y = 288; pdev->image_mask = 1 << PSZ_SQCIF | 1 << PSZ_QCIF | 1 << PSZ_CIF; pdev->vcinterface = 2; pdev->vendpoint = 4; pdev->frame_header_size = 0; pdev->frame_trailer_size = 0; } else if (DEVICE_USE_CODEC3(pdev->type)) { pdev->view_min.x = 160; pdev->view_min.y = 120; pdev->view_max.x = 640; pdev->view_max.y = 480; pdev->image_mask = 1 << PSZ_QSIF | 1 << PSZ_SIF | 1 << PSZ_VGA; pdev->abs_max.x = 640; pdev->abs_max.y = 480; pdev->vcinterface = 3; pdev->vendpoint = 5; pdev->frame_header_size = TOUCAM_HEADER_SIZE; pdev->frame_trailer_size = TOUCAM_TRAILER_SIZE; } else /* if (DEVICE_USE_CODEC2(pdev->type)) */ { pdev->view_min.x = 128; pdev->view_min.y = 96; /* Anthill bug #38: PWC always reports max size, even without PWCX */ pdev->view_max.x = 640; pdev->view_max.y = 480; pdev->image_mask = 1 << PSZ_SQCIF | 1 << PSZ_QSIF | 1 << PSZ_QCIF | 1 << PSZ_SIF | 1 << PSZ_CIF | 1 << PSZ_VGA; pdev->abs_max.x = 640; pdev->abs_max.y = 480; pdev->vcinterface = 3; pdev->vendpoint = 4; pdev->frame_header_size = 0; pdev->frame_trailer_size = 0; } pdev->pixfmt = V4L2_PIX_FMT_YUV420; /* default */ pdev->view_min.size = pdev->view_min.x * pdev->view_min.y; pdev->view_max.size = pdev->view_max.x * pdev->view_max.y; /* length of image, in YUV format; always allocate enough memory. */ pdev->len_per_image = PAGE_ALIGN((pdev->abs_max.x * pdev->abs_max.y * 3) / 2); }
gpl-2.0
sakuraba001/android_kernel_samsung_tblte
sound/pci/ice1712/pontis.c
3251
22490
/* * ALSA driver for ICEnsemble VT1724 (Envy24HT) * * Lowlevel functions for Pontis MS300 * * Copyright (c) 2004 Takashi Iwai <tiwai@suse.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/mutex.h> #include <sound/core.h> #include <sound/info.h> #include <sound/tlv.h> #include "ice1712.h" #include "envy24ht.h" #include "pontis.h" /* I2C addresses */ #define WM_DEV 0x34 #define CS_DEV 0x20 /* WM8776 registers */ #define WM_HP_ATTEN_L 0x00 /* headphone left attenuation */ #define WM_HP_ATTEN_R 0x01 /* headphone left attenuation */ #define WM_HP_MASTER 0x02 /* headphone master (both channels) */ /* override LLR */ #define WM_DAC_ATTEN_L 0x03 /* digital left attenuation */ #define WM_DAC_ATTEN_R 0x04 #define WM_DAC_MASTER 0x05 #define WM_PHASE_SWAP 0x06 /* DAC phase swap */ #define WM_DAC_CTRL1 0x07 #define WM_DAC_MUTE 0x08 #define WM_DAC_CTRL2 0x09 #define WM_DAC_INT 0x0a #define WM_ADC_INT 0x0b #define WM_MASTER_CTRL 0x0c #define WM_POWERDOWN 0x0d #define WM_ADC_ATTEN_L 0x0e #define WM_ADC_ATTEN_R 0x0f #define WM_ALC_CTRL1 0x10 #define WM_ALC_CTRL2 0x11 #define WM_ALC_CTRL3 0x12 #define WM_NOISE_GATE 0x13 #define WM_LIMITER 0x14 #define WM_ADC_MUX 0x15 #define WM_OUT_MUX 0x16 #define WM_RESET 0x17 /* * GPIO */ #define PONTIS_CS_CS (1<<4) /* CS */ #define PONTIS_CS_CLK (1<<5) /* CLK */ #define PONTIS_CS_RDATA (1<<6) /* CS8416 -> VT1720 */ #define PONTIS_CS_WDATA (1<<7) /* VT1720 -> CS8416 */ /* * get the current register value of WM codec */ static unsigned short wm_get(struct snd_ice1712 *ice, int reg) { reg <<= 1; return ((unsigned short)ice->akm[0].images[reg] << 8) | ice->akm[0].images[reg + 1]; } /* * set the register value of WM codec and remember it */ static void wm_put_nocache(struct snd_ice1712 *ice, int reg, unsigned short val) { unsigned short cval; cval = (reg << 9) | val; snd_vt1724_write_i2c(ice, WM_DEV, cval >> 8, cval & 0xff); } static void wm_put(struct snd_ice1712 *ice, int reg, unsigned short val) { wm_put_nocache(ice, reg, val); reg <<= 1; ice->akm[0].images[reg] = val >> 8; ice->akm[0].images[reg + 1] = val; } /* * DAC volume attenuation mixer control (-64dB to 0dB) */ #define DAC_0dB 0xff #define DAC_RES 128 #define DAC_MIN (DAC_0dB - DAC_RES) static int wm_dac_vol_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 2; uinfo->value.integer.min = 0; /* mute */ uinfo->value.integer.max = DAC_RES; /* 0dB, 0.5dB step */ return 0; } static int wm_dac_vol_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol); unsigned short val; int i; mutex_lock(&ice->gpio_mutex); for (i = 0; i < 2; i++) { val = wm_get(ice, WM_DAC_ATTEN_L + i) & 0xff; val = val > DAC_MIN ? (val - DAC_MIN) : 0; ucontrol->value.integer.value[i] = val; } mutex_unlock(&ice->gpio_mutex); return 0; } static int wm_dac_vol_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol); unsigned short oval, nval; int i, idx, change = 0; mutex_lock(&ice->gpio_mutex); for (i = 0; i < 2; i++) { nval = ucontrol->value.integer.value[i]; nval = (nval ? (nval + DAC_MIN) : 0) & 0xff; idx = WM_DAC_ATTEN_L + i; oval = wm_get(ice, idx) & 0xff; if (oval != nval) { wm_put(ice, idx, nval); wm_put_nocache(ice, idx, nval | 0x100); change = 1; } } mutex_unlock(&ice->gpio_mutex); return change; } /* * ADC gain mixer control (-64dB to 0dB) */ #define ADC_0dB 0xcf #define ADC_RES 128 #define ADC_MIN (ADC_0dB - ADC_RES) static int wm_adc_vol_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 2; uinfo->value.integer.min = 0; /* mute (-64dB) */ uinfo->value.integer.max = ADC_RES; /* 0dB, 0.5dB step */ return 0; } static int wm_adc_vol_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol); unsigned short val; int i; mutex_lock(&ice->gpio_mutex); for (i = 0; i < 2; i++) { val = wm_get(ice, WM_ADC_ATTEN_L + i) & 0xff; val = val > ADC_MIN ? (val - ADC_MIN) : 0; ucontrol->value.integer.value[i] = val; } mutex_unlock(&ice->gpio_mutex); return 0; } static int wm_adc_vol_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol); unsigned short ovol, nvol; int i, idx, change = 0; mutex_lock(&ice->gpio_mutex); for (i = 0; i < 2; i++) { nvol = ucontrol->value.integer.value[i]; nvol = nvol ? (nvol + ADC_MIN) : 0; idx = WM_ADC_ATTEN_L + i; ovol = wm_get(ice, idx) & 0xff; if (ovol != nvol) { wm_put(ice, idx, nvol); change = 1; } } mutex_unlock(&ice->gpio_mutex); return change; } /* * ADC input mux mixer control */ #define wm_adc_mux_info snd_ctl_boolean_mono_info static int wm_adc_mux_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol); int bit = kcontrol->private_value; mutex_lock(&ice->gpio_mutex); ucontrol->value.integer.value[0] = (wm_get(ice, WM_ADC_MUX) & (1 << bit)) ? 1 : 0; mutex_unlock(&ice->gpio_mutex); return 0; } static int wm_adc_mux_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol); int bit = kcontrol->private_value; unsigned short oval, nval; int change; mutex_lock(&ice->gpio_mutex); nval = oval = wm_get(ice, WM_ADC_MUX); if (ucontrol->value.integer.value[0]) nval |= (1 << bit); else nval &= ~(1 << bit); change = nval != oval; if (change) { wm_put(ice, WM_ADC_MUX, nval); } mutex_unlock(&ice->gpio_mutex); return change; } /* * Analog bypass (In -> Out) */ #define wm_bypass_info snd_ctl_boolean_mono_info static int wm_bypass_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol); mutex_lock(&ice->gpio_mutex); ucontrol->value.integer.value[0] = (wm_get(ice, WM_OUT_MUX) & 0x04) ? 1 : 0; mutex_unlock(&ice->gpio_mutex); return 0; } static int wm_bypass_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol); unsigned short val, oval; int change = 0; mutex_lock(&ice->gpio_mutex); val = oval = wm_get(ice, WM_OUT_MUX); if (ucontrol->value.integer.value[0]) val |= 0x04; else val &= ~0x04; if (val != oval) { wm_put(ice, WM_OUT_MUX, val); change = 1; } mutex_unlock(&ice->gpio_mutex); return change; } /* * Left/Right swap */ #define wm_chswap_info snd_ctl_boolean_mono_info static int wm_chswap_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol); mutex_lock(&ice->gpio_mutex); ucontrol->value.integer.value[0] = (wm_get(ice, WM_DAC_CTRL1) & 0xf0) != 0x90; mutex_unlock(&ice->gpio_mutex); return 0; } static int wm_chswap_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol); unsigned short val, oval; int change = 0; mutex_lock(&ice->gpio_mutex); oval = wm_get(ice, WM_DAC_CTRL1); val = oval & 0x0f; if (ucontrol->value.integer.value[0]) val |= 0x60; else val |= 0x90; if (val != oval) { wm_put(ice, WM_DAC_CTRL1, val); wm_put_nocache(ice, WM_DAC_CTRL1, val); change = 1; } mutex_unlock(&ice->gpio_mutex); return change; } /* * write data in the SPI mode */ static void set_gpio_bit(struct snd_ice1712 *ice, unsigned int bit, int val) { unsigned int tmp = snd_ice1712_gpio_read(ice); if (val) tmp |= bit; else tmp &= ~bit; snd_ice1712_gpio_write(ice, tmp); } static void spi_send_byte(struct snd_ice1712 *ice, unsigned char data) { int i; for (i = 0; i < 8; i++) { set_gpio_bit(ice, PONTIS_CS_CLK, 0); udelay(1); set_gpio_bit(ice, PONTIS_CS_WDATA, data & 0x80); udelay(1); set_gpio_bit(ice, PONTIS_CS_CLK, 1); udelay(1); data <<= 1; } } static unsigned int spi_read_byte(struct snd_ice1712 *ice) { int i; unsigned int val = 0; for (i = 0; i < 8; i++) { val <<= 1; set_gpio_bit(ice, PONTIS_CS_CLK, 0); udelay(1); if (snd_ice1712_gpio_read(ice) & PONTIS_CS_RDATA) val |= 1; udelay(1); set_gpio_bit(ice, PONTIS_CS_CLK, 1); udelay(1); } return val; } static void spi_write(struct snd_ice1712 *ice, unsigned int dev, unsigned int reg, unsigned int data) { snd_ice1712_gpio_set_dir(ice, PONTIS_CS_CS|PONTIS_CS_WDATA|PONTIS_CS_CLK); snd_ice1712_gpio_set_mask(ice, ~(PONTIS_CS_CS|PONTIS_CS_WDATA|PONTIS_CS_CLK)); set_gpio_bit(ice, PONTIS_CS_CS, 0); spi_send_byte(ice, dev & ~1); /* WRITE */ spi_send_byte(ice, reg); /* MAP */ spi_send_byte(ice, data); /* DATA */ /* trigger */ set_gpio_bit(ice, PONTIS_CS_CS, 1); udelay(1); /* restore */ snd_ice1712_gpio_set_mask(ice, ice->gpio.write_mask); snd_ice1712_gpio_set_dir(ice, ice->gpio.direction); } static unsigned int spi_read(struct snd_ice1712 *ice, unsigned int dev, unsigned int reg) { unsigned int val; snd_ice1712_gpio_set_dir(ice, PONTIS_CS_CS|PONTIS_CS_WDATA|PONTIS_CS_CLK); snd_ice1712_gpio_set_mask(ice, ~(PONTIS_CS_CS|PONTIS_CS_WDATA|PONTIS_CS_CLK)); set_gpio_bit(ice, PONTIS_CS_CS, 0); spi_send_byte(ice, dev & ~1); /* WRITE */ spi_send_byte(ice, reg); /* MAP */ /* trigger */ set_gpio_bit(ice, PONTIS_CS_CS, 1); udelay(1); set_gpio_bit(ice, PONTIS_CS_CS, 0); spi_send_byte(ice, dev | 1); /* READ */ val = spi_read_byte(ice); /* trigger */ set_gpio_bit(ice, PONTIS_CS_CS, 1); udelay(1); /* restore */ snd_ice1712_gpio_set_mask(ice, ice->gpio.write_mask); snd_ice1712_gpio_set_dir(ice, ice->gpio.direction); return val; } /* * SPDIF input source */ static int cs_source_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { static const char * const texts[] = { "Coax", /* RXP0 */ "Optical", /* RXP1 */ "CD", /* RXP2 */ }; uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED; uinfo->count = 1; uinfo->value.enumerated.items = 3; if (uinfo->value.enumerated.item >= uinfo->value.enumerated.items) uinfo->value.enumerated.item = uinfo->value.enumerated.items - 1; strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]); return 0; } static int cs_source_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol); mutex_lock(&ice->gpio_mutex); ucontrol->value.enumerated.item[0] = ice->gpio.saved[0]; mutex_unlock(&ice->gpio_mutex); return 0; } static int cs_source_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol); unsigned char val; int change = 0; mutex_lock(&ice->gpio_mutex); if (ucontrol->value.enumerated.item[0] != ice->gpio.saved[0]) { ice->gpio.saved[0] = ucontrol->value.enumerated.item[0] & 3; val = 0x80 | (ice->gpio.saved[0] << 3); spi_write(ice, CS_DEV, 0x04, val); change = 1; } mutex_unlock(&ice->gpio_mutex); return change; } /* * GPIO controls */ static int pontis_gpio_mask_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 1; uinfo->value.integer.min = 0; uinfo->value.integer.max = 0xffff; /* 16bit */ return 0; } static int pontis_gpio_mask_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol); mutex_lock(&ice->gpio_mutex); /* 4-7 reserved */ ucontrol->value.integer.value[0] = (~ice->gpio.write_mask & 0xffff) | 0x00f0; mutex_unlock(&ice->gpio_mutex); return 0; } static int pontis_gpio_mask_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol); unsigned int val; int changed; mutex_lock(&ice->gpio_mutex); /* 4-7 reserved */ val = (~ucontrol->value.integer.value[0] & 0xffff) | 0x00f0; changed = val != ice->gpio.write_mask; ice->gpio.write_mask = val; mutex_unlock(&ice->gpio_mutex); return changed; } static int pontis_gpio_dir_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol); mutex_lock(&ice->gpio_mutex); /* 4-7 reserved */ ucontrol->value.integer.value[0] = ice->gpio.direction & 0xff0f; mutex_unlock(&ice->gpio_mutex); return 0; } static int pontis_gpio_dir_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol); unsigned int val; int changed; mutex_lock(&ice->gpio_mutex); /* 4-7 reserved */ val = ucontrol->value.integer.value[0] & 0xff0f; changed = (val != ice->gpio.direction); ice->gpio.direction = val; mutex_unlock(&ice->gpio_mutex); return changed; } static int pontis_gpio_data_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol); mutex_lock(&ice->gpio_mutex); snd_ice1712_gpio_set_dir(ice, ice->gpio.direction); snd_ice1712_gpio_set_mask(ice, ice->gpio.write_mask); ucontrol->value.integer.value[0] = snd_ice1712_gpio_read(ice) & 0xffff; mutex_unlock(&ice->gpio_mutex); return 0; } static int pontis_gpio_data_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol); unsigned int val, nval; int changed = 0; mutex_lock(&ice->gpio_mutex); snd_ice1712_gpio_set_dir(ice, ice->gpio.direction); snd_ice1712_gpio_set_mask(ice, ice->gpio.write_mask); val = snd_ice1712_gpio_read(ice) & 0xffff; nval = ucontrol->value.integer.value[0] & 0xffff; if (val != nval) { snd_ice1712_gpio_write(ice, nval); changed = 1; } mutex_unlock(&ice->gpio_mutex); return changed; } static const DECLARE_TLV_DB_SCALE(db_scale_volume, -6400, 50, 1); /* * mixers */ static struct snd_kcontrol_new pontis_controls[] = { { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .access = (SNDRV_CTL_ELEM_ACCESS_READWRITE | SNDRV_CTL_ELEM_ACCESS_TLV_READ), .name = "PCM Playback Volume", .info = wm_dac_vol_info, .get = wm_dac_vol_get, .put = wm_dac_vol_put, .tlv = { .p = db_scale_volume }, }, { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .access = (SNDRV_CTL_ELEM_ACCESS_READWRITE | SNDRV_CTL_ELEM_ACCESS_TLV_READ), .name = "Capture Volume", .info = wm_adc_vol_info, .get = wm_adc_vol_get, .put = wm_adc_vol_put, .tlv = { .p = db_scale_volume }, }, { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "CD Capture Switch", .info = wm_adc_mux_info, .get = wm_adc_mux_get, .put = wm_adc_mux_put, .private_value = 0, }, { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Line Capture Switch", .info = wm_adc_mux_info, .get = wm_adc_mux_get, .put = wm_adc_mux_put, .private_value = 1, }, { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Analog Bypass Switch", .info = wm_bypass_info, .get = wm_bypass_get, .put = wm_bypass_put, }, { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Swap Output Channels", .info = wm_chswap_info, .get = wm_chswap_get, .put = wm_chswap_put, }, { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "IEC958 Input Source", .info = cs_source_info, .get = cs_source_get, .put = cs_source_put, }, /* FIXME: which interface? */ { .iface = SNDRV_CTL_ELEM_IFACE_CARD, .name = "GPIO Mask", .info = pontis_gpio_mask_info, .get = pontis_gpio_mask_get, .put = pontis_gpio_mask_put, }, { .iface = SNDRV_CTL_ELEM_IFACE_CARD, .name = "GPIO Direction", .info = pontis_gpio_mask_info, .get = pontis_gpio_dir_get, .put = pontis_gpio_dir_put, }, { .iface = SNDRV_CTL_ELEM_IFACE_CARD, .name = "GPIO Data", .info = pontis_gpio_mask_info, .get = pontis_gpio_data_get, .put = pontis_gpio_data_put, }, }; /* * WM codec registers */ static void wm_proc_regs_write(struct snd_info_entry *entry, struct snd_info_buffer *buffer) { struct snd_ice1712 *ice = entry->private_data; char line[64]; unsigned int reg, val; mutex_lock(&ice->gpio_mutex); while (!snd_info_get_line(buffer, line, sizeof(line))) { if (sscanf(line, "%x %x", &reg, &val) != 2) continue; if (reg <= 0x17 && val <= 0xffff) wm_put(ice, reg, val); } mutex_unlock(&ice->gpio_mutex); } static void wm_proc_regs_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer) { struct snd_ice1712 *ice = entry->private_data; int reg, val; mutex_lock(&ice->gpio_mutex); for (reg = 0; reg <= 0x17; reg++) { val = wm_get(ice, reg); snd_iprintf(buffer, "%02x = %04x\n", reg, val); } mutex_unlock(&ice->gpio_mutex); } static void wm_proc_init(struct snd_ice1712 *ice) { struct snd_info_entry *entry; if (! snd_card_proc_new(ice->card, "wm_codec", &entry)) { snd_info_set_text_ops(entry, ice, wm_proc_regs_read); entry->mode |= S_IWUSR; entry->c.text.write = wm_proc_regs_write; } } static void cs_proc_regs_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer) { struct snd_ice1712 *ice = entry->private_data; int reg, val; mutex_lock(&ice->gpio_mutex); for (reg = 0; reg <= 0x26; reg++) { val = spi_read(ice, CS_DEV, reg); snd_iprintf(buffer, "%02x = %02x\n", reg, val); } val = spi_read(ice, CS_DEV, 0x7f); snd_iprintf(buffer, "%02x = %02x\n", 0x7f, val); mutex_unlock(&ice->gpio_mutex); } static void cs_proc_init(struct snd_ice1712 *ice) { struct snd_info_entry *entry; if (! snd_card_proc_new(ice->card, "cs_codec", &entry)) snd_info_set_text_ops(entry, ice, cs_proc_regs_read); } static int pontis_add_controls(struct snd_ice1712 *ice) { unsigned int i; int err; for (i = 0; i < ARRAY_SIZE(pontis_controls); i++) { err = snd_ctl_add(ice->card, snd_ctl_new1(&pontis_controls[i], ice)); if (err < 0) return err; } wm_proc_init(ice); cs_proc_init(ice); return 0; } /* * initialize the chip */ static int pontis_init(struct snd_ice1712 *ice) { static const unsigned short wm_inits[] = { /* These come first to reduce init pop noise */ WM_ADC_MUX, 0x00c0, /* ADC mute */ WM_DAC_MUTE, 0x0001, /* DAC softmute */ WM_DAC_CTRL1, 0x0000, /* DAC mute */ WM_POWERDOWN, 0x0008, /* All power-up except HP */ WM_RESET, 0x0000, /* reset */ }; static const unsigned short wm_inits2[] = { WM_MASTER_CTRL, 0x0022, /* 256fs, slave mode */ WM_DAC_INT, 0x0022, /* I2S, normal polarity, 24bit */ WM_ADC_INT, 0x0022, /* I2S, normal polarity, 24bit */ WM_DAC_CTRL1, 0x0090, /* DAC L/R */ WM_OUT_MUX, 0x0001, /* OUT DAC */ WM_HP_ATTEN_L, 0x0179, /* HP 0dB */ WM_HP_ATTEN_R, 0x0179, /* HP 0dB */ WM_DAC_ATTEN_L, 0x0000, /* DAC 0dB */ WM_DAC_ATTEN_L, 0x0100, /* DAC 0dB */ WM_DAC_ATTEN_R, 0x0000, /* DAC 0dB */ WM_DAC_ATTEN_R, 0x0100, /* DAC 0dB */ /* WM_DAC_MASTER, 0x0100, */ /* DAC master muted */ WM_PHASE_SWAP, 0x0000, /* phase normal */ WM_DAC_CTRL2, 0x0000, /* no deemphasis, no ZFLG */ WM_ADC_ATTEN_L, 0x0000, /* ADC muted */ WM_ADC_ATTEN_R, 0x0000, /* ADC muted */ #if 0 WM_ALC_CTRL1, 0x007b, /* */ WM_ALC_CTRL2, 0x0000, /* */ WM_ALC_CTRL3, 0x0000, /* */ WM_NOISE_GATE, 0x0000, /* */ #endif WM_DAC_MUTE, 0x0000, /* DAC unmute */ WM_ADC_MUX, 0x0003, /* ADC unmute, both CD/Line On */ }; static const unsigned char cs_inits[] = { 0x04, 0x80, /* RUN, RXP0 */ 0x05, 0x05, /* slave, 24bit */ 0x01, 0x00, 0x02, 0x00, 0x03, 0x00, }; unsigned int i; ice->vt1720 = 1; ice->num_total_dacs = 2; ice->num_total_adcs = 2; /* to remember the register values */ ice->akm = kzalloc(sizeof(struct snd_akm4xxx), GFP_KERNEL); if (! ice->akm) return -ENOMEM; ice->akm_codecs = 1; /* HACK - use this as the SPDIF source. * don't call snd_ice1712_gpio_get/put(), otherwise it's overwritten */ ice->gpio.saved[0] = 0; /* initialize WM8776 codec */ for (i = 0; i < ARRAY_SIZE(wm_inits); i += 2) wm_put(ice, wm_inits[i], wm_inits[i+1]); schedule_timeout_uninterruptible(1); for (i = 0; i < ARRAY_SIZE(wm_inits2); i += 2) wm_put(ice, wm_inits2[i], wm_inits2[i+1]); /* initialize CS8416 codec */ /* assert PRST#; MT05 bit 7 */ outb(inb(ICEMT1724(ice, AC97_CMD)) | 0x80, ICEMT1724(ice, AC97_CMD)); mdelay(5); /* deassert PRST# */ outb(inb(ICEMT1724(ice, AC97_CMD)) & ~0x80, ICEMT1724(ice, AC97_CMD)); for (i = 0; i < ARRAY_SIZE(cs_inits); i += 2) spi_write(ice, CS_DEV, cs_inits[i], cs_inits[i+1]); return 0; } /* * Pontis boards don't provide the EEPROM data at all. * hence the driver needs to sets up it properly. */ static unsigned char pontis_eeprom[] = { [ICE_EEP2_SYSCONF] = 0x08, /* clock 256, mpu401, spdif-in/ADC, 1DAC */ [ICE_EEP2_ACLINK] = 0x80, /* I2S */ [ICE_EEP2_I2S] = 0xf8, /* vol, 96k, 24bit, 192k */ [ICE_EEP2_SPDIF] = 0xc3, /* out-en, out-int, spdif-in */ [ICE_EEP2_GPIO_DIR] = 0x07, [ICE_EEP2_GPIO_DIR1] = 0x00, [ICE_EEP2_GPIO_DIR2] = 0x00, /* ignored */ [ICE_EEP2_GPIO_MASK] = 0x0f, /* 4-7 reserved for CS8416 */ [ICE_EEP2_GPIO_MASK1] = 0xff, [ICE_EEP2_GPIO_MASK2] = 0x00, /* ignored */ [ICE_EEP2_GPIO_STATE] = 0x06, /* 0-low, 1-high, 2-high */ [ICE_EEP2_GPIO_STATE1] = 0x00, [ICE_EEP2_GPIO_STATE2] = 0x00, /* ignored */ }; /* entry point */ struct snd_ice1712_card_info snd_vt1720_pontis_cards[] = { { .subvendor = VT1720_SUBDEVICE_PONTIS_MS300, .name = "Pontis MS300", .model = "ms300", .chip_init = pontis_init, .build_controls = pontis_add_controls, .eeprom_size = sizeof(pontis_eeprom), .eeprom_data = pontis_eeprom, }, { } /* terminator */ };
gpl-2.0
pknithis/linux
drivers/ide/icside.c
4275
16635
/* * Copyright (c) 1996-2004 Russell King. * * Please note that this platform does not support 32-bit IDE IO. */ #include <linux/string.h> #include <linux/module.h> #include <linux/ioport.h> #include <linux/slab.h> #include <linux/blkdev.h> #include <linux/errno.h> #include <linux/ide.h> #include <linux/dma-mapping.h> #include <linux/device.h> #include <linux/init.h> #include <linux/scatterlist.h> #include <linux/io.h> #include <asm/dma.h> #include <asm/ecard.h> #define DRV_NAME "icside" #define ICS_IDENT_OFFSET 0x2280 #define ICS_ARCIN_V5_INTRSTAT 0x0000 #define ICS_ARCIN_V5_INTROFFSET 0x0004 #define ICS_ARCIN_V5_IDEOFFSET 0x2800 #define ICS_ARCIN_V5_IDEALTOFFSET 0x2b80 #define ICS_ARCIN_V5_IDESTEPPING 6 #define ICS_ARCIN_V6_IDEOFFSET_1 0x2000 #define ICS_ARCIN_V6_INTROFFSET_1 0x2200 #define ICS_ARCIN_V6_INTRSTAT_1 0x2290 #define ICS_ARCIN_V6_IDEALTOFFSET_1 0x2380 #define ICS_ARCIN_V6_IDEOFFSET_2 0x3000 #define ICS_ARCIN_V6_INTROFFSET_2 0x3200 #define ICS_ARCIN_V6_INTRSTAT_2 0x3290 #define ICS_ARCIN_V6_IDEALTOFFSET_2 0x3380 #define ICS_ARCIN_V6_IDESTEPPING 6 struct cardinfo { unsigned int dataoffset; unsigned int ctrloffset; unsigned int stepping; }; static struct cardinfo icside_cardinfo_v5 = { .dataoffset = ICS_ARCIN_V5_IDEOFFSET, .ctrloffset = ICS_ARCIN_V5_IDEALTOFFSET, .stepping = ICS_ARCIN_V5_IDESTEPPING, }; static struct cardinfo icside_cardinfo_v6_1 = { .dataoffset = ICS_ARCIN_V6_IDEOFFSET_1, .ctrloffset = ICS_ARCIN_V6_IDEALTOFFSET_1, .stepping = ICS_ARCIN_V6_IDESTEPPING, }; static struct cardinfo icside_cardinfo_v6_2 = { .dataoffset = ICS_ARCIN_V6_IDEOFFSET_2, .ctrloffset = ICS_ARCIN_V6_IDEALTOFFSET_2, .stepping = ICS_ARCIN_V6_IDESTEPPING, }; struct icside_state { unsigned int channel; unsigned int enabled; void __iomem *irq_port; void __iomem *ioc_base; unsigned int sel; unsigned int type; struct ide_host *host; }; #define ICS_TYPE_A3IN 0 #define ICS_TYPE_A3USER 1 #define ICS_TYPE_V6 3 #define ICS_TYPE_V5 15 #define ICS_TYPE_NOTYPE ((unsigned int)-1) /* ---------------- Version 5 PCB Support Functions --------------------- */ /* Prototype: icside_irqenable_arcin_v5 (struct expansion_card *ec, int irqnr) * Purpose : enable interrupts from card */ static void icside_irqenable_arcin_v5 (struct expansion_card *ec, int irqnr) { struct icside_state *state = ec->irq_data; writeb(0, state->irq_port + ICS_ARCIN_V5_INTROFFSET); } /* Prototype: icside_irqdisable_arcin_v5 (struct expansion_card *ec, int irqnr) * Purpose : disable interrupts from card */ static void icside_irqdisable_arcin_v5 (struct expansion_card *ec, int irqnr) { struct icside_state *state = ec->irq_data; readb(state->irq_port + ICS_ARCIN_V5_INTROFFSET); } static const expansioncard_ops_t icside_ops_arcin_v5 = { .irqenable = icside_irqenable_arcin_v5, .irqdisable = icside_irqdisable_arcin_v5, }; /* ---------------- Version 6 PCB Support Functions --------------------- */ /* Prototype: icside_irqenable_arcin_v6 (struct expansion_card *ec, int irqnr) * Purpose : enable interrupts from card */ static void icside_irqenable_arcin_v6 (struct expansion_card *ec, int irqnr) { struct icside_state *state = ec->irq_data; void __iomem *base = state->irq_port; state->enabled = 1; switch (state->channel) { case 0: writeb(0, base + ICS_ARCIN_V6_INTROFFSET_1); readb(base + ICS_ARCIN_V6_INTROFFSET_2); break; case 1: writeb(0, base + ICS_ARCIN_V6_INTROFFSET_2); readb(base + ICS_ARCIN_V6_INTROFFSET_1); break; } } /* Prototype: icside_irqdisable_arcin_v6 (struct expansion_card *ec, int irqnr) * Purpose : disable interrupts from card */ static void icside_irqdisable_arcin_v6 (struct expansion_card *ec, int irqnr) { struct icside_state *state = ec->irq_data; state->enabled = 0; readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_1); readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_2); } /* Prototype: icside_irqprobe(struct expansion_card *ec) * Purpose : detect an active interrupt from card */ static int icside_irqpending_arcin_v6(struct expansion_card *ec) { struct icside_state *state = ec->irq_data; return readb(state->irq_port + ICS_ARCIN_V6_INTRSTAT_1) & 1 || readb(state->irq_port + ICS_ARCIN_V6_INTRSTAT_2) & 1; } static const expansioncard_ops_t icside_ops_arcin_v6 = { .irqenable = icside_irqenable_arcin_v6, .irqdisable = icside_irqdisable_arcin_v6, .irqpending = icside_irqpending_arcin_v6, }; /* * Handle routing of interrupts. This is called before * we write the command to the drive. */ static void icside_maskproc(ide_drive_t *drive, int mask) { ide_hwif_t *hwif = drive->hwif; struct expansion_card *ec = ECARD_DEV(hwif->dev); struct icside_state *state = ecard_get_drvdata(ec); unsigned long flags; local_irq_save(flags); state->channel = hwif->channel; if (state->enabled && !mask) { switch (hwif->channel) { case 0: writeb(0, state->irq_port + ICS_ARCIN_V6_INTROFFSET_1); readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_2); break; case 1: writeb(0, state->irq_port + ICS_ARCIN_V6_INTROFFSET_2); readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_1); break; } } else { readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_2); readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_1); } local_irq_restore(flags); } static const struct ide_port_ops icside_v6_no_dma_port_ops = { .maskproc = icside_maskproc, }; #ifdef CONFIG_BLK_DEV_IDEDMA_ICS /* * SG-DMA support. * * Similar to the BM-DMA, but we use the RiscPCs IOMD DMA controllers. * There is only one DMA controller per card, which means that only * one drive can be accessed at one time. NOTE! We do not enforce that * here, but we rely on the main IDE driver spotting that both * interfaces use the same IRQ, which should guarantee this. */ /* * Configure the IOMD to give the appropriate timings for the transfer * mode being requested. We take the advice of the ATA standards, and * calculate the cycle time based on the transfer mode, and the EIDE * MW DMA specs that the drive provides in the IDENTIFY command. * * We have the following IOMD DMA modes to choose from: * * Type Active Recovery Cycle * A 250 (250) 312 (550) 562 (800) * B 187 250 437 * C 125 (125) 125 (375) 250 (500) * D 62 125 187 * * (figures in brackets are actual measured timings) * * However, we also need to take care of the read/write active and * recovery timings: * * Read Write * Mode Active -- Recovery -- Cycle IOMD type * MW0 215 50 215 480 A * MW1 80 50 50 150 C * MW2 70 25 25 120 C */ static void icside_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive) { unsigned long cycle_time = 0; int use_dma_info = 0; const u8 xfer_mode = drive->dma_mode; switch (xfer_mode) { case XFER_MW_DMA_2: cycle_time = 250; use_dma_info = 1; break; case XFER_MW_DMA_1: cycle_time = 250; use_dma_info = 1; break; case XFER_MW_DMA_0: cycle_time = 480; break; case XFER_SW_DMA_2: case XFER_SW_DMA_1: case XFER_SW_DMA_0: cycle_time = 480; break; } /* * If we're going to be doing MW_DMA_1 or MW_DMA_2, we should * take care to note the values in the ID... */ if (use_dma_info && drive->id[ATA_ID_EIDE_DMA_TIME] > cycle_time) cycle_time = drive->id[ATA_ID_EIDE_DMA_TIME]; ide_set_drivedata(drive, (void *)cycle_time); printk(KERN_INFO "%s: %s selected (peak %luMB/s)\n", drive->name, ide_xfer_verbose(xfer_mode), 2000 / (cycle_time ? cycle_time : (unsigned long) -1)); } static const struct ide_port_ops icside_v6_port_ops = { .set_dma_mode = icside_set_dma_mode, .maskproc = icside_maskproc, }; static void icside_dma_host_set(ide_drive_t *drive, int on) { } static int icside_dma_end(ide_drive_t *drive) { ide_hwif_t *hwif = drive->hwif; struct expansion_card *ec = ECARD_DEV(hwif->dev); disable_dma(ec->dma); return get_dma_residue(ec->dma) != 0; } static void icside_dma_start(ide_drive_t *drive) { ide_hwif_t *hwif = drive->hwif; struct expansion_card *ec = ECARD_DEV(hwif->dev); /* We can not enable DMA on both channels simultaneously. */ BUG_ON(dma_channel_active(ec->dma)); enable_dma(ec->dma); } static int icside_dma_setup(ide_drive_t *drive, struct ide_cmd *cmd) { ide_hwif_t *hwif = drive->hwif; struct expansion_card *ec = ECARD_DEV(hwif->dev); struct icside_state *state = ecard_get_drvdata(ec); unsigned int dma_mode; if (cmd->tf_flags & IDE_TFLAG_WRITE) dma_mode = DMA_MODE_WRITE; else dma_mode = DMA_MODE_READ; /* * We can not enable DMA on both channels. */ BUG_ON(dma_channel_active(ec->dma)); /* * Ensure that we have the right interrupt routed. */ icside_maskproc(drive, 0); /* * Route the DMA signals to the correct interface. */ writeb(state->sel | hwif->channel, state->ioc_base); /* * Select the correct timing for this drive. */ set_dma_speed(ec->dma, (unsigned long)ide_get_drivedata(drive)); /* * Tell the DMA engine about the SG table and * data direction. */ set_dma_sg(ec->dma, hwif->sg_table, cmd->sg_nents); set_dma_mode(ec->dma, dma_mode); return 0; } static int icside_dma_test_irq(ide_drive_t *drive) { ide_hwif_t *hwif = drive->hwif; struct expansion_card *ec = ECARD_DEV(hwif->dev); struct icside_state *state = ecard_get_drvdata(ec); return readb(state->irq_port + (hwif->channel ? ICS_ARCIN_V6_INTRSTAT_2 : ICS_ARCIN_V6_INTRSTAT_1)) & 1; } static int icside_dma_init(ide_hwif_t *hwif, const struct ide_port_info *d) { hwif->dmatable_cpu = NULL; hwif->dmatable_dma = 0; return 0; } static const struct ide_dma_ops icside_v6_dma_ops = { .dma_host_set = icside_dma_host_set, .dma_setup = icside_dma_setup, .dma_start = icside_dma_start, .dma_end = icside_dma_end, .dma_test_irq = icside_dma_test_irq, .dma_lost_irq = ide_dma_lost_irq, }; #endif static int icside_dma_off_init(ide_hwif_t *hwif, const struct ide_port_info *d) { return -EOPNOTSUPP; } static void icside_setup_ports(struct ide_hw *hw, void __iomem *base, struct cardinfo *info, struct expansion_card *ec) { unsigned long port = (unsigned long)base + info->dataoffset; hw->io_ports.data_addr = port; hw->io_ports.error_addr = port + (1 << info->stepping); hw->io_ports.nsect_addr = port + (2 << info->stepping); hw->io_ports.lbal_addr = port + (3 << info->stepping); hw->io_ports.lbam_addr = port + (4 << info->stepping); hw->io_ports.lbah_addr = port + (5 << info->stepping); hw->io_ports.device_addr = port + (6 << info->stepping); hw->io_ports.status_addr = port + (7 << info->stepping); hw->io_ports.ctl_addr = (unsigned long)base + info->ctrloffset; hw->irq = ec->irq; hw->dev = &ec->dev; } static const struct ide_port_info icside_v5_port_info = { .host_flags = IDE_HFLAG_NO_DMA, .chipset = ide_acorn, }; static int icside_register_v5(struct icside_state *state, struct expansion_card *ec) { void __iomem *base; struct ide_host *host; struct ide_hw hw, *hws[] = { &hw }; int ret; base = ecardm_iomap(ec, ECARD_RES_MEMC, 0, 0); if (!base) return -ENOMEM; state->irq_port = base; ec->irqaddr = base + ICS_ARCIN_V5_INTRSTAT; ec->irqmask = 1; ecard_setirq(ec, &icside_ops_arcin_v5, state); /* * Be on the safe side - disable interrupts */ icside_irqdisable_arcin_v5(ec, 0); icside_setup_ports(&hw, base, &icside_cardinfo_v5, ec); host = ide_host_alloc(&icside_v5_port_info, hws, 1); if (host == NULL) return -ENODEV; state->host = host; ecard_set_drvdata(ec, state); ret = ide_host_register(host, &icside_v5_port_info, hws); if (ret) goto err_free; return 0; err_free: ide_host_free(host); ecard_set_drvdata(ec, NULL); return ret; } static const struct ide_port_info icside_v6_port_info __initconst = { .init_dma = icside_dma_off_init, .port_ops = &icside_v6_no_dma_port_ops, .host_flags = IDE_HFLAG_SERIALIZE | IDE_HFLAG_MMIO, .mwdma_mask = ATA_MWDMA2, .swdma_mask = ATA_SWDMA2, .chipset = ide_acorn, }; static int icside_register_v6(struct icside_state *state, struct expansion_card *ec) { void __iomem *ioc_base, *easi_base; struct ide_host *host; unsigned int sel = 0; int ret; struct ide_hw hw[2], *hws[] = { &hw[0], &hw[1] }; struct ide_port_info d = icside_v6_port_info; ioc_base = ecardm_iomap(ec, ECARD_RES_IOCFAST, 0, 0); if (!ioc_base) { ret = -ENOMEM; goto out; } easi_base = ioc_base; if (ecard_resource_flags(ec, ECARD_RES_EASI)) { easi_base = ecardm_iomap(ec, ECARD_RES_EASI, 0, 0); if (!easi_base) { ret = -ENOMEM; goto out; } /* * Enable access to the EASI region. */ sel = 1 << 5; } writeb(sel, ioc_base); ecard_setirq(ec, &icside_ops_arcin_v6, state); state->irq_port = easi_base; state->ioc_base = ioc_base; state->sel = sel; /* * Be on the safe side - disable interrupts */ icside_irqdisable_arcin_v6(ec, 0); icside_setup_ports(&hw[0], easi_base, &icside_cardinfo_v6_1, ec); icside_setup_ports(&hw[1], easi_base, &icside_cardinfo_v6_2, ec); host = ide_host_alloc(&d, hws, 2); if (host == NULL) return -ENODEV; state->host = host; ecard_set_drvdata(ec, state); #ifdef CONFIG_BLK_DEV_IDEDMA_ICS if (ec->dma != NO_DMA && !request_dma(ec->dma, DRV_NAME)) { d.init_dma = icside_dma_init; d.port_ops = &icside_v6_port_ops; d.dma_ops = &icside_v6_dma_ops; } #endif ret = ide_host_register(host, &d, hws); if (ret) goto err_free; return 0; err_free: ide_host_free(host); if (d.dma_ops) free_dma(ec->dma); ecard_set_drvdata(ec, NULL); out: return ret; } static int icside_probe(struct expansion_card *ec, const struct ecard_id *id) { struct icside_state *state; void __iomem *idmem; int ret; ret = ecard_request_resources(ec); if (ret) goto out; state = kzalloc(sizeof(struct icside_state), GFP_KERNEL); if (!state) { ret = -ENOMEM; goto release; } state->type = ICS_TYPE_NOTYPE; idmem = ecardm_iomap(ec, ECARD_RES_IOCFAST, 0, 0); if (idmem) { unsigned int type; type = readb(idmem + ICS_IDENT_OFFSET) & 1; type |= (readb(idmem + ICS_IDENT_OFFSET + 4) & 1) << 1; type |= (readb(idmem + ICS_IDENT_OFFSET + 8) & 1) << 2; type |= (readb(idmem + ICS_IDENT_OFFSET + 12) & 1) << 3; ecardm_iounmap(ec, idmem); state->type = type; } switch (state->type) { case ICS_TYPE_A3IN: dev_warn(&ec->dev, "A3IN unsupported\n"); ret = -ENODEV; break; case ICS_TYPE_A3USER: dev_warn(&ec->dev, "A3USER unsupported\n"); ret = -ENODEV; break; case ICS_TYPE_V5: ret = icside_register_v5(state, ec); break; case ICS_TYPE_V6: ret = icside_register_v6(state, ec); break; default: dev_warn(&ec->dev, "unknown interface type\n"); ret = -ENODEV; break; } if (ret == 0) goto out; kfree(state); release: ecard_release_resources(ec); out: return ret; } static void icside_remove(struct expansion_card *ec) { struct icside_state *state = ecard_get_drvdata(ec); switch (state->type) { case ICS_TYPE_V5: /* FIXME: tell IDE to stop using the interface */ /* Disable interrupts */ icside_irqdisable_arcin_v5(ec, 0); break; case ICS_TYPE_V6: /* FIXME: tell IDE to stop using the interface */ if (ec->dma != NO_DMA) free_dma(ec->dma); /* Disable interrupts */ icside_irqdisable_arcin_v6(ec, 0); /* Reset the ROM pointer/EASI selection */ writeb(0, state->ioc_base); break; } ecard_set_drvdata(ec, NULL); kfree(state); ecard_release_resources(ec); } static void icside_shutdown(struct expansion_card *ec) { struct icside_state *state = ecard_get_drvdata(ec); unsigned long flags; /* * Disable interrupts from this card. We need to do * this before disabling EASI since we may be accessing * this register via that region. */ local_irq_save(flags); ec->ops->irqdisable(ec, 0); local_irq_restore(flags); /* * Reset the ROM pointer so that we can read the ROM * after a soft reboot. This also disables access to * the IDE taskfile via the EASI region. */ if (state->ioc_base) writeb(0, state->ioc_base); } static const struct ecard_id icside_ids[] = { { MANU_ICS, PROD_ICS_IDE }, { MANU_ICS2, PROD_ICS2_IDE }, { 0xffff, 0xffff } }; static struct ecard_driver icside_driver = { .probe = icside_probe, .remove = icside_remove, .shutdown = icside_shutdown, .id_table = icside_ids, .drv = { .name = "icside", }, }; static int __init icside_init(void) { return ecard_register_driver(&icside_driver); } static void __exit icside_exit(void) { ecard_remove_driver(&icside_driver); } MODULE_AUTHOR("Russell King <rmk@arm.linux.org.uk>"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("ICS IDE driver"); module_init(icside_init); module_exit(icside_exit);
gpl-2.0
deedwar/evolve
drivers/gpu/drm/exynos/exynos_drm_plane.c
4787
4240
/* * Copyright (C) 2011 Samsung Electronics Co.Ltd * Authors: Joonyoung Shim <jy0922.shim@samsung.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * */ #include "drmP.h" #include "exynos_drm.h" #include "exynos_drm_crtc.h" #include "exynos_drm_drv.h" #include "exynos_drm_encoder.h" struct exynos_plane { struct drm_plane base; struct exynos_drm_overlay overlay; bool enabled; }; static const uint32_t formats[] = { DRM_FORMAT_XRGB8888, DRM_FORMAT_ARGB8888, DRM_FORMAT_NV12, DRM_FORMAT_NV12M, DRM_FORMAT_NV12MT, }; static int exynos_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, struct drm_framebuffer *fb, int crtc_x, int crtc_y, unsigned int crtc_w, unsigned int crtc_h, uint32_t src_x, uint32_t src_y, uint32_t src_w, uint32_t src_h) { struct exynos_plane *exynos_plane = container_of(plane, struct exynos_plane, base); struct exynos_drm_overlay *overlay = &exynos_plane->overlay; struct exynos_drm_crtc_pos pos; unsigned int x = src_x >> 16; unsigned int y = src_y >> 16; int ret; DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__); memset(&pos, 0, sizeof(struct exynos_drm_crtc_pos)); pos.crtc_x = crtc_x; pos.crtc_y = crtc_y; pos.crtc_w = crtc_w; pos.crtc_h = crtc_h; pos.fb_x = x; pos.fb_y = y; /* TODO: scale feature */ ret = exynos_drm_overlay_update(overlay, fb, &crtc->mode, &pos); if (ret < 0) return ret; exynos_drm_fn_encoder(crtc, overlay, exynos_drm_encoder_crtc_mode_set); exynos_drm_fn_encoder(crtc, &overlay->zpos, exynos_drm_encoder_crtc_plane_commit); exynos_plane->enabled = true; return 0; } static int exynos_disable_plane(struct drm_plane *plane) { struct exynos_plane *exynos_plane = container_of(plane, struct exynos_plane, base); struct exynos_drm_overlay *overlay = &exynos_plane->overlay; DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__); if (!exynos_plane->enabled) return 0; exynos_drm_fn_encoder(plane->crtc, &overlay->zpos, exynos_drm_encoder_crtc_disable); exynos_plane->enabled = false; exynos_plane->overlay.zpos = DEFAULT_ZPOS; return 0; } static void exynos_plane_destroy(struct drm_plane *plane) { struct exynos_plane *exynos_plane = container_of(plane, struct exynos_plane, base); DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__); exynos_disable_plane(plane); drm_plane_cleanup(plane); kfree(exynos_plane); } static struct drm_plane_funcs exynos_plane_funcs = { .update_plane = exynos_update_plane, .disable_plane = exynos_disable_plane, .destroy = exynos_plane_destroy, }; int exynos_plane_init(struct drm_device *dev, unsigned int nr) { struct exynos_plane *exynos_plane; uint32_t possible_crtcs; exynos_plane = kzalloc(sizeof(struct exynos_plane), GFP_KERNEL); if (!exynos_plane) return -ENOMEM; /* all CRTCs are available */ possible_crtcs = (1 << MAX_CRTC) - 1; exynos_plane->overlay.zpos = DEFAULT_ZPOS; return drm_plane_init(dev, &exynos_plane->base, possible_crtcs, &exynos_plane_funcs, formats, ARRAY_SIZE(formats), false); } int exynos_plane_set_zpos_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_exynos_plane_set_zpos *zpos_req = data; struct drm_mode_object *obj; struct drm_plane *plane; struct exynos_plane *exynos_plane; int ret = 0; DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__); if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EINVAL; if (zpos_req->zpos < 0 || zpos_req->zpos >= MAX_PLANE) { if (zpos_req->zpos != DEFAULT_ZPOS) { DRM_ERROR("zpos not within limits\n"); return -EINVAL; } } mutex_lock(&dev->mode_config.mutex); obj = drm_mode_object_find(dev, zpos_req->plane_id, DRM_MODE_OBJECT_PLANE); if (!obj) { DRM_DEBUG_KMS("Unknown plane ID %d\n", zpos_req->plane_id); ret = -EINVAL; goto out; } plane = obj_to_plane(obj); exynos_plane = container_of(plane, struct exynos_plane, base); exynos_plane->overlay.zpos = zpos_req->zpos; out: mutex_unlock(&dev->mode_config.mutex); return ret; }
gpl-2.0
wzhy90/sony_msm8x60
arch/arm/mach-prima2/timer.c
4787
7066
/* * System timer for CSR SiRFprimaII * * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company. * * Licensed under GPLv2 or later. */ #include <linux/kernel.h> #include <linux/interrupt.h> #include <linux/clockchips.h> #include <linux/clocksource.h> #include <linux/bitops.h> #include <linux/irq.h> #include <linux/clk.h> #include <linux/err.h> #include <linux/slab.h> #include <linux/of.h> #include <linux/of_address.h> #include <mach/map.h> #include <asm/sched_clock.h> #include <asm/mach/time.h> #define SIRFSOC_TIMER_COUNTER_LO 0x0000 #define SIRFSOC_TIMER_COUNTER_HI 0x0004 #define SIRFSOC_TIMER_MATCH_0 0x0008 #define SIRFSOC_TIMER_MATCH_1 0x000C #define SIRFSOC_TIMER_MATCH_2 0x0010 #define SIRFSOC_TIMER_MATCH_3 0x0014 #define SIRFSOC_TIMER_MATCH_4 0x0018 #define SIRFSOC_TIMER_MATCH_5 0x001C #define SIRFSOC_TIMER_STATUS 0x0020 #define SIRFSOC_TIMER_INT_EN 0x0024 #define SIRFSOC_TIMER_WATCHDOG_EN 0x0028 #define SIRFSOC_TIMER_DIV 0x002C #define SIRFSOC_TIMER_LATCH 0x0030 #define SIRFSOC_TIMER_LATCHED_LO 0x0034 #define SIRFSOC_TIMER_LATCHED_HI 0x0038 #define SIRFSOC_TIMER_WDT_INDEX 5 #define SIRFSOC_TIMER_LATCH_BIT BIT(0) #define SIRFSOC_TIMER_REG_CNT 11 static const u32 sirfsoc_timer_reg_list[SIRFSOC_TIMER_REG_CNT] = { SIRFSOC_TIMER_MATCH_0, SIRFSOC_TIMER_MATCH_1, SIRFSOC_TIMER_MATCH_2, SIRFSOC_TIMER_MATCH_3, SIRFSOC_TIMER_MATCH_4, SIRFSOC_TIMER_MATCH_5, SIRFSOC_TIMER_INT_EN, SIRFSOC_TIMER_WATCHDOG_EN, SIRFSOC_TIMER_DIV, SIRFSOC_TIMER_LATCHED_LO, SIRFSOC_TIMER_LATCHED_HI, }; static u32 sirfsoc_timer_reg_val[SIRFSOC_TIMER_REG_CNT]; static void __iomem *sirfsoc_timer_base; static void __init sirfsoc_of_timer_map(void); /* timer0 interrupt handler */ static irqreturn_t sirfsoc_timer_interrupt(int irq, void *dev_id) { struct clock_event_device *ce = dev_id; WARN_ON(!(readl_relaxed(sirfsoc_timer_base + SIRFSOC_TIMER_STATUS) & BIT(0))); /* clear timer0 interrupt */ writel_relaxed(BIT(0), sirfsoc_timer_base + SIRFSOC_TIMER_STATUS); ce->event_handler(ce); return IRQ_HANDLED; } /* read 64-bit timer counter */ static cycle_t sirfsoc_timer_read(struct clocksource *cs) { u64 cycles; /* latch the 64-bit timer counter */ writel_relaxed(SIRFSOC_TIMER_LATCH_BIT, sirfsoc_timer_base + SIRFSOC_TIMER_LATCH); cycles = readl_relaxed(sirfsoc_timer_base + SIRFSOC_TIMER_LATCHED_HI); cycles = (cycles << 32) | readl_relaxed(sirfsoc_timer_base + SIRFSOC_TIMER_LATCHED_LO); return cycles; } static int sirfsoc_timer_set_next_event(unsigned long delta, struct clock_event_device *ce) { unsigned long now, next; writel_relaxed(SIRFSOC_TIMER_LATCH_BIT, sirfsoc_timer_base + SIRFSOC_TIMER_LATCH); now = readl_relaxed(sirfsoc_timer_base + SIRFSOC_TIMER_LATCHED_LO); next = now + delta; writel_relaxed(next, sirfsoc_timer_base + SIRFSOC_TIMER_MATCH_0); writel_relaxed(SIRFSOC_TIMER_LATCH_BIT, sirfsoc_timer_base + SIRFSOC_TIMER_LATCH); now = readl_relaxed(sirfsoc_timer_base + SIRFSOC_TIMER_LATCHED_LO); return next - now > delta ? -ETIME : 0; } static void sirfsoc_timer_set_mode(enum clock_event_mode mode, struct clock_event_device *ce) { u32 val = readl_relaxed(sirfsoc_timer_base + SIRFSOC_TIMER_INT_EN); switch (mode) { case CLOCK_EVT_MODE_PERIODIC: WARN_ON(1); break; case CLOCK_EVT_MODE_ONESHOT: writel_relaxed(val | BIT(0), sirfsoc_timer_base + SIRFSOC_TIMER_INT_EN); break; case CLOCK_EVT_MODE_SHUTDOWN: writel_relaxed(val & ~BIT(0), sirfsoc_timer_base + SIRFSOC_TIMER_INT_EN); break; case CLOCK_EVT_MODE_UNUSED: case CLOCK_EVT_MODE_RESUME: break; } } static void sirfsoc_clocksource_suspend(struct clocksource *cs) { int i; writel_relaxed(SIRFSOC_TIMER_LATCH_BIT, sirfsoc_timer_base + SIRFSOC_TIMER_LATCH); for (i = 0; i < SIRFSOC_TIMER_REG_CNT; i++) sirfsoc_timer_reg_val[i] = readl_relaxed(sirfsoc_timer_base + sirfsoc_timer_reg_list[i]); } static void sirfsoc_clocksource_resume(struct clocksource *cs) { int i; for (i = 0; i < SIRFSOC_TIMER_REG_CNT; i++) writel_relaxed(sirfsoc_timer_reg_val[i], sirfsoc_timer_base + sirfsoc_timer_reg_list[i]); writel_relaxed(sirfsoc_timer_reg_val[i - 2], sirfsoc_timer_base + SIRFSOC_TIMER_COUNTER_LO); writel_relaxed(sirfsoc_timer_reg_val[i - 1], sirfsoc_timer_base + SIRFSOC_TIMER_COUNTER_HI); } static struct clock_event_device sirfsoc_clockevent = { .name = "sirfsoc_clockevent", .rating = 200, .features = CLOCK_EVT_FEAT_ONESHOT, .set_mode = sirfsoc_timer_set_mode, .set_next_event = sirfsoc_timer_set_next_event, }; static struct clocksource sirfsoc_clocksource = { .name = "sirfsoc_clocksource", .rating = 200, .mask = CLOCKSOURCE_MASK(64), .flags = CLOCK_SOURCE_IS_CONTINUOUS, .read = sirfsoc_timer_read, .suspend = sirfsoc_clocksource_suspend, .resume = sirfsoc_clocksource_resume, }; static struct irqaction sirfsoc_timer_irq = { .name = "sirfsoc_timer0", .flags = IRQF_TIMER, .irq = 0, .handler = sirfsoc_timer_interrupt, .dev_id = &sirfsoc_clockevent, }; /* Overwrite weak default sched_clock with more precise one */ static u32 notrace sirfsoc_read_sched_clock(void) { return (u32)(sirfsoc_timer_read(NULL) & 0xffffffff); } static void __init sirfsoc_clockevent_init(void) { clockevents_calc_mult_shift(&sirfsoc_clockevent, CLOCK_TICK_RATE, 60); sirfsoc_clockevent.max_delta_ns = clockevent_delta2ns(-2, &sirfsoc_clockevent); sirfsoc_clockevent.min_delta_ns = clockevent_delta2ns(2, &sirfsoc_clockevent); sirfsoc_clockevent.cpumask = cpumask_of(0); clockevents_register_device(&sirfsoc_clockevent); } /* initialize the kernel jiffy timer source */ static void __init sirfsoc_timer_init(void) { unsigned long rate; /* timer's input clock is io clock */ struct clk *clk = clk_get_sys("io", NULL); BUG_ON(IS_ERR(clk)); rate = clk_get_rate(clk); BUG_ON(rate < CLOCK_TICK_RATE); BUG_ON(rate % CLOCK_TICK_RATE); sirfsoc_of_timer_map(); writel_relaxed(rate / CLOCK_TICK_RATE / 2 - 1, sirfsoc_timer_base + SIRFSOC_TIMER_DIV); writel_relaxed(0, sirfsoc_timer_base + SIRFSOC_TIMER_COUNTER_LO); writel_relaxed(0, sirfsoc_timer_base + SIRFSOC_TIMER_COUNTER_HI); writel_relaxed(BIT(0), sirfsoc_timer_base + SIRFSOC_TIMER_STATUS); BUG_ON(clocksource_register_hz(&sirfsoc_clocksource, CLOCK_TICK_RATE)); setup_sched_clock(sirfsoc_read_sched_clock, 32, CLOCK_TICK_RATE); BUG_ON(setup_irq(sirfsoc_timer_irq.irq, &sirfsoc_timer_irq)); sirfsoc_clockevent_init(); } static struct of_device_id timer_ids[] = { { .compatible = "sirf,prima2-tick" }, {}, }; static void __init sirfsoc_of_timer_map(void) { struct device_node *np; const unsigned int *intspec; np = of_find_matching_node(NULL, timer_ids); if (!np) panic("unable to find compatible timer node in dtb\n"); sirfsoc_timer_base = of_iomap(np, 0); if (!sirfsoc_timer_base) panic("unable to map timer cpu registers\n"); /* Get the interrupts property */ intspec = of_get_property(np, "interrupts", NULL); BUG_ON(!intspec); sirfsoc_timer_irq.irq = be32_to_cpup(intspec); of_node_put(np); } struct sys_timer sirfsoc_timer = { .init = sirfsoc_timer_init, };
gpl-2.0
Talustus/android_kernel_samsung_galaxys4
drivers/input/touchscreen/cy8ctmg110_ts.c
4787
8480
/* * Driver for cypress touch screen controller * * Copyright (c) 2009 Aava Mobile * * Some cleanups by Alan Cox <alan@linux.intel.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/input.h> #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/i2c.h> #include <linux/gpio.h> #include <linux/input/cy8ctmg110_pdata.h> #define CY8CTMG110_DRIVER_NAME "cy8ctmg110" /* Touch coordinates */ #define CY8CTMG110_X_MIN 0 #define CY8CTMG110_Y_MIN 0 #define CY8CTMG110_X_MAX 759 #define CY8CTMG110_Y_MAX 465 /* cy8ctmg110 register definitions */ #define CY8CTMG110_TOUCH_WAKEUP_TIME 0 #define CY8CTMG110_TOUCH_SLEEP_TIME 2 #define CY8CTMG110_TOUCH_X1 3 #define CY8CTMG110_TOUCH_Y1 5 #define CY8CTMG110_TOUCH_X2 7 #define CY8CTMG110_TOUCH_Y2 9 #define CY8CTMG110_FINGERS 11 #define CY8CTMG110_GESTURE 12 #define CY8CTMG110_REG_MAX 13 /* * The touch driver structure. */ struct cy8ctmg110 { struct input_dev *input; char phys[32]; struct i2c_client *client; int reset_pin; int irq_pin; }; /* * cy8ctmg110_power is the routine that is called when touch hardware * will powered off or on. */ static void cy8ctmg110_power(struct cy8ctmg110 *ts, bool poweron) { if (ts->reset_pin) gpio_direction_output(ts->reset_pin, 1 - poweron); } static int cy8ctmg110_write_regs(struct cy8ctmg110 *tsc, unsigned char reg, unsigned char len, unsigned char *value) { struct i2c_client *client = tsc->client; int ret; unsigned char i2c_data[6]; BUG_ON(len > 5); i2c_data[0] = reg; memcpy(i2c_data + 1, value, len); ret = i2c_master_send(client, i2c_data, len + 1); if (ret != len + 1) { dev_err(&client->dev, "i2c write data cmd failed\n"); return ret < 0 ? ret : -EIO; } return 0; } static int cy8ctmg110_read_regs(struct cy8ctmg110 *tsc, unsigned char *data, unsigned char len, unsigned char cmd) { struct i2c_client *client = tsc->client; int ret; struct i2c_msg msg[2] = { /* first write slave position to i2c devices */ { client->addr, 0, 1, &cmd }, /* Second read data from position */ { client->addr, I2C_M_RD, len, data } }; ret = i2c_transfer(client->adapter, msg, 2); if (ret < 0) return ret; return 0; } static int cy8ctmg110_touch_pos(struct cy8ctmg110 *tsc) { struct input_dev *input = tsc->input; unsigned char reg_p[CY8CTMG110_REG_MAX]; int x, y; memset(reg_p, 0, CY8CTMG110_REG_MAX); /* Reading coordinates */ if (cy8ctmg110_read_regs(tsc, reg_p, 9, CY8CTMG110_TOUCH_X1) != 0) return -EIO; y = reg_p[2] << 8 | reg_p[3]; x = reg_p[0] << 8 | reg_p[1]; /* Number of touch */ if (reg_p[8] == 0) { input_report_key(input, BTN_TOUCH, 0); } else { input_report_key(input, BTN_TOUCH, 1); input_report_abs(input, ABS_X, x); input_report_abs(input, ABS_Y, y); } input_sync(input); return 0; } static int cy8ctmg110_set_sleepmode(struct cy8ctmg110 *ts, bool sleep) { unsigned char reg_p[3]; if (sleep) { reg_p[0] = 0x00; reg_p[1] = 0xff; reg_p[2] = 5; } else { reg_p[0] = 0x10; reg_p[1] = 0xff; reg_p[2] = 0; } return cy8ctmg110_write_regs(ts, CY8CTMG110_TOUCH_WAKEUP_TIME, 3, reg_p); } static irqreturn_t cy8ctmg110_irq_thread(int irq, void *dev_id) { struct cy8ctmg110 *tsc = dev_id; cy8ctmg110_touch_pos(tsc); return IRQ_HANDLED; } static int __devinit cy8ctmg110_probe(struct i2c_client *client, const struct i2c_device_id *id) { const struct cy8ctmg110_pdata *pdata = client->dev.platform_data; struct cy8ctmg110 *ts; struct input_dev *input_dev; int err; /* No pdata no way forward */ if (pdata == NULL) { dev_err(&client->dev, "no pdata\n"); return -ENODEV; } if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_READ_WORD_DATA)) return -EIO; ts = kzalloc(sizeof(struct cy8ctmg110), GFP_KERNEL); input_dev = input_allocate_device(); if (!ts || !input_dev) { err = -ENOMEM; goto err_free_mem; } ts->client = client; ts->input = input_dev; ts->reset_pin = pdata->reset_pin; ts->irq_pin = pdata->irq_pin; snprintf(ts->phys, sizeof(ts->phys), "%s/input0", dev_name(&client->dev)); input_dev->name = CY8CTMG110_DRIVER_NAME " Touchscreen"; input_dev->phys = ts->phys; input_dev->id.bustype = BUS_I2C; input_dev->dev.parent = &client->dev; input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS); input_dev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH); input_set_abs_params(input_dev, ABS_X, CY8CTMG110_X_MIN, CY8CTMG110_X_MAX, 4, 0); input_set_abs_params(input_dev, ABS_Y, CY8CTMG110_Y_MIN, CY8CTMG110_Y_MAX, 4, 0); if (ts->reset_pin) { err = gpio_request(ts->reset_pin, NULL); if (err) { dev_err(&client->dev, "Unable to request GPIO pin %d.\n", ts->reset_pin); goto err_free_mem; } } cy8ctmg110_power(ts, true); cy8ctmg110_set_sleepmode(ts, false); err = gpio_request(ts->irq_pin, "touch_irq_key"); if (err < 0) { dev_err(&client->dev, "Failed to request GPIO %d, error %d\n", ts->irq_pin, err); goto err_shutoff_device; } err = gpio_direction_input(ts->irq_pin); if (err < 0) { dev_err(&client->dev, "Failed to configure input direction for GPIO %d, error %d\n", ts->irq_pin, err); goto err_free_irq_gpio; } client->irq = gpio_to_irq(ts->irq_pin); if (client->irq < 0) { err = client->irq; dev_err(&client->dev, "Unable to get irq number for GPIO %d, error %d\n", ts->irq_pin, err); goto err_free_irq_gpio; } err = request_threaded_irq(client->irq, NULL, cy8ctmg110_irq_thread, IRQF_TRIGGER_RISING, "touch_reset_key", ts); if (err < 0) { dev_err(&client->dev, "irq %d busy? error %d\n", client->irq, err); goto err_free_irq_gpio; } err = input_register_device(input_dev); if (err) goto err_free_irq; i2c_set_clientdata(client, ts); device_init_wakeup(&client->dev, 1); return 0; err_free_irq: free_irq(client->irq, ts); err_free_irq_gpio: gpio_free(ts->irq_pin); err_shutoff_device: cy8ctmg110_set_sleepmode(ts, true); cy8ctmg110_power(ts, false); if (ts->reset_pin) gpio_free(ts->reset_pin); err_free_mem: input_free_device(input_dev); kfree(ts); return err; } #ifdef CONFIG_PM static int cy8ctmg110_suspend(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); struct cy8ctmg110 *ts = i2c_get_clientdata(client); if (device_may_wakeup(&client->dev)) enable_irq_wake(client->irq); else { cy8ctmg110_set_sleepmode(ts, true); cy8ctmg110_power(ts, false); } return 0; } static int cy8ctmg110_resume(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); struct cy8ctmg110 *ts = i2c_get_clientdata(client); if (device_may_wakeup(&client->dev)) disable_irq_wake(client->irq); else { cy8ctmg110_power(ts, true); cy8ctmg110_set_sleepmode(ts, false); } return 0; } static SIMPLE_DEV_PM_OPS(cy8ctmg110_pm, cy8ctmg110_suspend, cy8ctmg110_resume); #endif static int __devexit cy8ctmg110_remove(struct i2c_client *client) { struct cy8ctmg110 *ts = i2c_get_clientdata(client); cy8ctmg110_set_sleepmode(ts, true); cy8ctmg110_power(ts, false); free_irq(client->irq, ts); input_unregister_device(ts->input); gpio_free(ts->irq_pin); if (ts->reset_pin) gpio_free(ts->reset_pin); kfree(ts); return 0; } static const struct i2c_device_id cy8ctmg110_idtable[] = { { CY8CTMG110_DRIVER_NAME, 1 }, { } }; MODULE_DEVICE_TABLE(i2c, cy8ctmg110_idtable); static struct i2c_driver cy8ctmg110_driver = { .driver = { .owner = THIS_MODULE, .name = CY8CTMG110_DRIVER_NAME, #ifdef CONFIG_PM .pm = &cy8ctmg110_pm, #endif }, .id_table = cy8ctmg110_idtable, .probe = cy8ctmg110_probe, .remove = __devexit_p(cy8ctmg110_remove), }; module_i2c_driver(cy8ctmg110_driver); MODULE_AUTHOR("Samuli Konttila <samuli.konttila@aavamobile.com>"); MODULE_DESCRIPTION("cy8ctmg110 TouchScreen Driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
kennysgithub/sm-p607t-kernel
arch/arm/mach-nomadik/cpu-8815.c
4787
4495
/* * Copyright STMicroelectronics, 2007. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/types.h> #include <linux/init.h> #include <linux/device.h> #include <linux/amba/bus.h> #include <linux/platform_device.h> #include <linux/io.h> #include <plat/gpio-nomadik.h> #include <mach/hardware.h> #include <mach/irqs.h> #include <asm/mach/map.h> #include <asm/hardware/vic.h> #include <asm/cacheflush.h> #include <asm/hardware/cache-l2x0.h> #include "clock.h" #include "cpu-8815.h" #define __MEM_4K_RESOURCE(x) \ .res = {.start = (x), .end = (x) + SZ_4K - 1, .flags = IORESOURCE_MEM} /* The 8815 has 4 GPIO blocks, let's register them immediately */ #define GPIO_RESOURCE(block) \ { \ .start = NOMADIK_GPIO##block##_BASE, \ .end = NOMADIK_GPIO##block##_BASE + SZ_4K - 1, \ .flags = IORESOURCE_MEM, \ }, \ { \ .start = IRQ_GPIO##block, \ .end = IRQ_GPIO##block, \ .flags = IORESOURCE_IRQ, \ } #define GPIO_DEVICE(block) \ { \ .name = "gpio", \ .id = block, \ .num_resources = 2, \ .resource = &cpu8815_gpio_resources[block * 2], \ .dev = { \ .platform_data = &cpu8815_gpio[block], \ }, \ } static struct nmk_gpio_platform_data cpu8815_gpio[] = { { .name = "GPIO-0-31", .first_gpio = 0, .first_irq = NOMADIK_GPIO_TO_IRQ(0), }, { .name = "GPIO-32-63", .first_gpio = 32, .first_irq = NOMADIK_GPIO_TO_IRQ(32), }, { .name = "GPIO-64-95", .first_gpio = 64, .first_irq = NOMADIK_GPIO_TO_IRQ(64), }, { .name = "GPIO-96-127", /* 124..127 not routed to pin */ .first_gpio = 96, .first_irq = NOMADIK_GPIO_TO_IRQ(96), } }; static struct resource cpu8815_gpio_resources[] = { GPIO_RESOURCE(0), GPIO_RESOURCE(1), GPIO_RESOURCE(2), GPIO_RESOURCE(3), }; static struct platform_device cpu8815_platform_gpio[] = { GPIO_DEVICE(0), GPIO_DEVICE(1), GPIO_DEVICE(2), GPIO_DEVICE(3), }; static AMBA_APB_DEVICE(cpu8815_amba_rng, "rng", 0, NOMADIK_RNG_BASE, { }, NULL); static struct platform_device *platform_devs[] __initdata = { cpu8815_platform_gpio + 0, cpu8815_platform_gpio + 1, cpu8815_platform_gpio + 2, cpu8815_platform_gpio + 3, }; static struct amba_device *amba_devs[] __initdata = { &cpu8815_amba_rng_device }; static int __init cpu8815_init(void) { int i; platform_add_devices(platform_devs, ARRAY_SIZE(platform_devs)); for (i = 0; i < ARRAY_SIZE(amba_devs); i++) amba_device_register(amba_devs[i], &iomem_resource); return 0; } arch_initcall(cpu8815_init); /* All SoC devices live in the same area (see hardware.h) */ static struct map_desc nomadik_io_desc[] __initdata = { { .virtual = NOMADIK_IO_VIRTUAL, .pfn = __phys_to_pfn(NOMADIK_IO_PHYSICAL), .length = NOMADIK_IO_SIZE, .type = MT_DEVICE, } /* static ram and secured ram may be added later */ }; void __init cpu8815_map_io(void) { iotable_init(nomadik_io_desc, ARRAY_SIZE(nomadik_io_desc)); } void __init cpu8815_init_irq(void) { /* This modified VIC cell has two register blocks, at 0 and 0x20 */ vic_init(io_p2v(NOMADIK_IC_BASE + 0x00), IRQ_VIC_START + 0, ~0, 0); vic_init(io_p2v(NOMADIK_IC_BASE + 0x20), IRQ_VIC_START + 32, ~0, 0); /* * Init clocks here so that they are available for system timer * initialization. */ clk_init(); } /* * This function is called from the board init ("init_machine"). */ void __init cpu8815_platform_init(void) { #ifdef CONFIG_CACHE_L2X0 /* At full speed latency must be >=2, so 0x249 in low bits */ l2x0_init(io_p2v(NOMADIK_L2CC_BASE), 0x00730249, 0xfe000fff); #endif return; } void cpu8815_restart(char mode, const char *cmd) { void __iomem *src_rstsr = io_p2v(NOMADIK_SRC_BASE + 0x18); /* FIXME: use egpio when implemented */ /* Write anything to Reset status register */ writel(1, src_rstsr); }
gpl-2.0
ausdim/TW-ll-I9505-jfltexx
drivers/usb/musb/musb_gadget_ep0.c
5043
27747
/* * MUSB OTG peripheral driver ep0 handling * * Copyright 2005 Mentor Graphics Corporation * Copyright (C) 2005-2006 by Texas Instruments * Copyright (C) 2006-2007 Nokia Corporation * Copyright (C) 2008-2009 MontaVista Software, Inc. <source@mvista.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA * 02110-1301 USA * * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ #include <linux/kernel.h> #include <linux/list.h> #include <linux/timer.h> #include <linux/spinlock.h> #include <linux/device.h> #include <linux/interrupt.h> #include "musb_core.h" /* ep0 is always musb->endpoints[0].ep_in */ #define next_ep0_request(musb) next_in_request(&(musb)->endpoints[0]) /* * locking note: we use only the controller lock, for simpler correctness. * It's always held with IRQs blocked. * * It protects the ep0 request queue as well as ep0_state, not just the * controller and indexed registers. And that lock stays held unless it * needs to be dropped to allow reentering this driver ... like upcalls to * the gadget driver, or adjusting endpoint halt status. */ static char *decode_ep0stage(u8 stage) { switch (stage) { case MUSB_EP0_STAGE_IDLE: return "idle"; case MUSB_EP0_STAGE_SETUP: return "setup"; case MUSB_EP0_STAGE_TX: return "in"; case MUSB_EP0_STAGE_RX: return "out"; case MUSB_EP0_STAGE_ACKWAIT: return "wait"; case MUSB_EP0_STAGE_STATUSIN: return "in/status"; case MUSB_EP0_STAGE_STATUSOUT: return "out/status"; default: return "?"; } } /* handle a standard GET_STATUS request * Context: caller holds controller lock */ static int service_tx_status_request( struct musb *musb, const struct usb_ctrlrequest *ctrlrequest) { void __iomem *mbase = musb->mregs; int handled = 1; u8 result[2], epnum = 0; const u8 recip = ctrlrequest->bRequestType & USB_RECIP_MASK; result[1] = 0; switch (recip) { case USB_RECIP_DEVICE: result[0] = musb->is_self_powered << USB_DEVICE_SELF_POWERED; result[0] |= musb->may_wakeup << USB_DEVICE_REMOTE_WAKEUP; if (musb->g.is_otg) { result[0] |= musb->g.b_hnp_enable << USB_DEVICE_B_HNP_ENABLE; result[0] |= musb->g.a_alt_hnp_support << USB_DEVICE_A_ALT_HNP_SUPPORT; result[0] |= musb->g.a_hnp_support << USB_DEVICE_A_HNP_SUPPORT; } break; case USB_RECIP_INTERFACE: result[0] = 0; break; case USB_RECIP_ENDPOINT: { int is_in; struct musb_ep *ep; u16 tmp; void __iomem *regs; epnum = (u8) ctrlrequest->wIndex; if (!epnum) { result[0] = 0; break; } is_in = epnum & USB_DIR_IN; if (is_in) { epnum &= 0x0f; ep = &musb->endpoints[epnum].ep_in; } else { ep = &musb->endpoints[epnum].ep_out; } regs = musb->endpoints[epnum].regs; if (epnum >= MUSB_C_NUM_EPS || !ep->desc) { handled = -EINVAL; break; } musb_ep_select(mbase, epnum); if (is_in) tmp = musb_readw(regs, MUSB_TXCSR) & MUSB_TXCSR_P_SENDSTALL; else tmp = musb_readw(regs, MUSB_RXCSR) & MUSB_RXCSR_P_SENDSTALL; musb_ep_select(mbase, 0); result[0] = tmp ? 1 : 0; } break; default: /* class, vendor, etc ... delegate */ handled = 0; break; } /* fill up the fifo; caller updates csr0 */ if (handled > 0) { u16 len = le16_to_cpu(ctrlrequest->wLength); if (len > 2) len = 2; musb_write_fifo(&musb->endpoints[0], len, result); } return handled; } /* * handle a control-IN request, the end0 buffer contains the current request * that is supposed to be a standard control request. Assumes the fifo to * be at least 2 bytes long. * * @return 0 if the request was NOT HANDLED, * < 0 when error * > 0 when the request is processed * * Context: caller holds controller lock */ static int service_in_request(struct musb *musb, const struct usb_ctrlrequest *ctrlrequest) { int handled = 0; /* not handled */ if ((ctrlrequest->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) { switch (ctrlrequest->bRequest) { case USB_REQ_GET_STATUS: handled = service_tx_status_request(musb, ctrlrequest); break; /* case USB_REQ_SYNC_FRAME: */ default: break; } } return handled; } /* * Context: caller holds controller lock */ static void musb_g_ep0_giveback(struct musb *musb, struct usb_request *req) { musb_g_giveback(&musb->endpoints[0].ep_in, req, 0); } /* * Tries to start B-device HNP negotiation if enabled via sysfs */ static inline void musb_try_b_hnp_enable(struct musb *musb) { void __iomem *mbase = musb->mregs; u8 devctl; dev_dbg(musb->controller, "HNP: Setting HR\n"); devctl = musb_readb(mbase, MUSB_DEVCTL); musb_writeb(mbase, MUSB_DEVCTL, devctl | MUSB_DEVCTL_HR); } /* * Handle all control requests with no DATA stage, including standard * requests such as: * USB_REQ_SET_CONFIGURATION, USB_REQ_SET_INTERFACE, unrecognized * always delegated to the gadget driver * USB_REQ_SET_ADDRESS, USB_REQ_CLEAR_FEATURE, USB_REQ_SET_FEATURE * always handled here, except for class/vendor/... features * * Context: caller holds controller lock */ static int service_zero_data_request(struct musb *musb, struct usb_ctrlrequest *ctrlrequest) __releases(musb->lock) __acquires(musb->lock) { int handled = -EINVAL; void __iomem *mbase = musb->mregs; const u8 recip = ctrlrequest->bRequestType & USB_RECIP_MASK; /* the gadget driver handles everything except what we MUST handle */ if ((ctrlrequest->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) { switch (ctrlrequest->bRequest) { case USB_REQ_SET_ADDRESS: /* change it after the status stage */ musb->set_address = true; musb->address = (u8) (ctrlrequest->wValue & 0x7f); handled = 1; break; case USB_REQ_CLEAR_FEATURE: switch (recip) { case USB_RECIP_DEVICE: if (ctrlrequest->wValue != USB_DEVICE_REMOTE_WAKEUP) break; musb->may_wakeup = 0; handled = 1; break; case USB_RECIP_INTERFACE: break; case USB_RECIP_ENDPOINT:{ const u8 epnum = ctrlrequest->wIndex & 0x0f; struct musb_ep *musb_ep; struct musb_hw_ep *ep; struct musb_request *request; void __iomem *regs; int is_in; u16 csr; if (epnum == 0 || epnum >= MUSB_C_NUM_EPS || ctrlrequest->wValue != USB_ENDPOINT_HALT) break; ep = musb->endpoints + epnum; regs = ep->regs; is_in = ctrlrequest->wIndex & USB_DIR_IN; if (is_in) musb_ep = &ep->ep_in; else musb_ep = &ep->ep_out; if (!musb_ep->desc) break; handled = 1; /* Ignore request if endpoint is wedged */ if (musb_ep->wedged) break; musb_ep_select(mbase, epnum); if (is_in) { csr = musb_readw(regs, MUSB_TXCSR); csr |= MUSB_TXCSR_CLRDATATOG | MUSB_TXCSR_P_WZC_BITS; csr &= ~(MUSB_TXCSR_P_SENDSTALL | MUSB_TXCSR_P_SENTSTALL | MUSB_TXCSR_TXPKTRDY); musb_writew(regs, MUSB_TXCSR, csr); } else { csr = musb_readw(regs, MUSB_RXCSR); csr |= MUSB_RXCSR_CLRDATATOG | MUSB_RXCSR_P_WZC_BITS; csr &= ~(MUSB_RXCSR_P_SENDSTALL | MUSB_RXCSR_P_SENTSTALL); musb_writew(regs, MUSB_RXCSR, csr); } /* Maybe start the first request in the queue */ request = next_request(musb_ep); if (!musb_ep->busy && request) { dev_dbg(musb->controller, "restarting the request\n"); musb_ep_restart(musb, request); } /* select ep0 again */ musb_ep_select(mbase, 0); } break; default: /* class, vendor, etc ... delegate */ handled = 0; break; } break; case USB_REQ_SET_FEATURE: switch (recip) { case USB_RECIP_DEVICE: handled = 1; switch (ctrlrequest->wValue) { case USB_DEVICE_REMOTE_WAKEUP: musb->may_wakeup = 1; break; case USB_DEVICE_TEST_MODE: if (musb->g.speed != USB_SPEED_HIGH) goto stall; if (ctrlrequest->wIndex & 0xff) goto stall; switch (ctrlrequest->wIndex >> 8) { case 1: pr_debug("TEST_J\n"); /* TEST_J */ musb->test_mode_nr = MUSB_TEST_J; break; case 2: /* TEST_K */ pr_debug("TEST_K\n"); musb->test_mode_nr = MUSB_TEST_K; break; case 3: /* TEST_SE0_NAK */ pr_debug("TEST_SE0_NAK\n"); musb->test_mode_nr = MUSB_TEST_SE0_NAK; break; case 4: /* TEST_PACKET */ pr_debug("TEST_PACKET\n"); musb->test_mode_nr = MUSB_TEST_PACKET; break; case 0xc0: /* TEST_FORCE_HS */ pr_debug("TEST_FORCE_HS\n"); musb->test_mode_nr = MUSB_TEST_FORCE_HS; break; case 0xc1: /* TEST_FORCE_FS */ pr_debug("TEST_FORCE_FS\n"); musb->test_mode_nr = MUSB_TEST_FORCE_FS; break; case 0xc2: /* TEST_FIFO_ACCESS */ pr_debug("TEST_FIFO_ACCESS\n"); musb->test_mode_nr = MUSB_TEST_FIFO_ACCESS; break; case 0xc3: /* TEST_FORCE_HOST */ pr_debug("TEST_FORCE_HOST\n"); musb->test_mode_nr = MUSB_TEST_FORCE_HOST; break; default: goto stall; } /* enter test mode after irq */ if (handled > 0) musb->test_mode = true; break; case USB_DEVICE_B_HNP_ENABLE: if (!musb->g.is_otg) goto stall; musb->g.b_hnp_enable = 1; musb_try_b_hnp_enable(musb); break; case USB_DEVICE_A_HNP_SUPPORT: if (!musb->g.is_otg) goto stall; musb->g.a_hnp_support = 1; break; case USB_DEVICE_A_ALT_HNP_SUPPORT: if (!musb->g.is_otg) goto stall; musb->g.a_alt_hnp_support = 1; break; case USB_DEVICE_DEBUG_MODE: handled = 0; break; stall: default: handled = -EINVAL; break; } break; case USB_RECIP_INTERFACE: break; case USB_RECIP_ENDPOINT:{ const u8 epnum = ctrlrequest->wIndex & 0x0f; struct musb_ep *musb_ep; struct musb_hw_ep *ep; void __iomem *regs; int is_in; u16 csr; if (epnum == 0 || epnum >= MUSB_C_NUM_EPS || ctrlrequest->wValue != USB_ENDPOINT_HALT) break; ep = musb->endpoints + epnum; regs = ep->regs; is_in = ctrlrequest->wIndex & USB_DIR_IN; if (is_in) musb_ep = &ep->ep_in; else musb_ep = &ep->ep_out; if (!musb_ep->desc) break; musb_ep_select(mbase, epnum); if (is_in) { csr = musb_readw(regs, MUSB_TXCSR); if (csr & MUSB_TXCSR_FIFONOTEMPTY) csr |= MUSB_TXCSR_FLUSHFIFO; csr |= MUSB_TXCSR_P_SENDSTALL | MUSB_TXCSR_CLRDATATOG | MUSB_TXCSR_P_WZC_BITS; musb_writew(regs, MUSB_TXCSR, csr); } else { csr = musb_readw(regs, MUSB_RXCSR); csr |= MUSB_RXCSR_P_SENDSTALL | MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_CLRDATATOG | MUSB_RXCSR_P_WZC_BITS; musb_writew(regs, MUSB_RXCSR, csr); } /* select ep0 again */ musb_ep_select(mbase, 0); handled = 1; } break; default: /* class, vendor, etc ... delegate */ handled = 0; break; } break; default: /* delegate SET_CONFIGURATION, etc */ handled = 0; } } else handled = 0; return handled; } /* we have an ep0out data packet * Context: caller holds controller lock */ static void ep0_rxstate(struct musb *musb) { void __iomem *regs = musb->control_ep->regs; struct musb_request *request; struct usb_request *req; u16 count, csr; request = next_ep0_request(musb); req = &request->request; /* read packet and ack; or stall because of gadget driver bug: * should have provided the rx buffer before setup() returned. */ if (req) { void *buf = req->buf + req->actual; unsigned len = req->length - req->actual; /* read the buffer */ count = musb_readb(regs, MUSB_COUNT0); if (count > len) { req->status = -EOVERFLOW; count = len; } musb_read_fifo(&musb->endpoints[0], count, buf); req->actual += count; csr = MUSB_CSR0_P_SVDRXPKTRDY; if (count < 64 || req->actual == req->length) { musb->ep0_state = MUSB_EP0_STAGE_STATUSIN; csr |= MUSB_CSR0_P_DATAEND; } else req = NULL; } else csr = MUSB_CSR0_P_SVDRXPKTRDY | MUSB_CSR0_P_SENDSTALL; /* Completion handler may choose to stall, e.g. because the * message just received holds invalid data. */ if (req) { musb->ackpend = csr; musb_g_ep0_giveback(musb, req); if (!musb->ackpend) return; musb->ackpend = 0; } musb_ep_select(musb->mregs, 0); musb_writew(regs, MUSB_CSR0, csr); } /* * transmitting to the host (IN), this code might be called from IRQ * and from kernel thread. * * Context: caller holds controller lock */ static void ep0_txstate(struct musb *musb) { void __iomem *regs = musb->control_ep->regs; struct musb_request *req = next_ep0_request(musb); struct usb_request *request; u16 csr = MUSB_CSR0_TXPKTRDY; u8 *fifo_src; u8 fifo_count; if (!req) { /* WARN_ON(1); */ dev_dbg(musb->controller, "odd; csr0 %04x\n", musb_readw(regs, MUSB_CSR0)); return; } request = &req->request; /* load the data */ fifo_src = (u8 *) request->buf + request->actual; fifo_count = min((unsigned) MUSB_EP0_FIFOSIZE, request->length - request->actual); musb_write_fifo(&musb->endpoints[0], fifo_count, fifo_src); request->actual += fifo_count; /* update the flags */ if (fifo_count < MUSB_MAX_END0_PACKET || (request->actual == request->length && !request->zero)) { musb->ep0_state = MUSB_EP0_STAGE_STATUSOUT; csr |= MUSB_CSR0_P_DATAEND; } else request = NULL; /* report completions as soon as the fifo's loaded; there's no * win in waiting till this last packet gets acked. (other than * very precise fault reporting, needed by USB TMC; possible with * this hardware, but not usable from portable gadget drivers.) */ if (request) { musb->ackpend = csr; musb_g_ep0_giveback(musb, request); if (!musb->ackpend) return; musb->ackpend = 0; } /* send it out, triggering a "txpktrdy cleared" irq */ musb_ep_select(musb->mregs, 0); musb_writew(regs, MUSB_CSR0, csr); } /* * Read a SETUP packet (struct usb_ctrlrequest) from the hardware. * Fields are left in USB byte-order. * * Context: caller holds controller lock. */ static void musb_read_setup(struct musb *musb, struct usb_ctrlrequest *req) { struct musb_request *r; void __iomem *regs = musb->control_ep->regs; musb_read_fifo(&musb->endpoints[0], sizeof *req, (u8 *)req); /* NOTE: earlier 2.6 versions changed setup packets to host * order, but now USB packets always stay in USB byte order. */ dev_dbg(musb->controller, "SETUP req%02x.%02x v%04x i%04x l%d\n", req->bRequestType, req->bRequest, le16_to_cpu(req->wValue), le16_to_cpu(req->wIndex), le16_to_cpu(req->wLength)); /* clean up any leftover transfers */ r = next_ep0_request(musb); if (r) musb_g_ep0_giveback(musb, &r->request); /* For zero-data requests we want to delay the STATUS stage to * avoid SETUPEND errors. If we read data (OUT), delay accepting * packets until there's a buffer to store them in. * * If we write data, the controller acts happier if we enable * the TX FIFO right away, and give the controller a moment * to switch modes... */ musb->set_address = false; musb->ackpend = MUSB_CSR0_P_SVDRXPKTRDY; if (req->wLength == 0) { if (req->bRequestType & USB_DIR_IN) musb->ackpend |= MUSB_CSR0_TXPKTRDY; musb->ep0_state = MUSB_EP0_STAGE_ACKWAIT; } else if (req->bRequestType & USB_DIR_IN) { musb->ep0_state = MUSB_EP0_STAGE_TX; musb_writew(regs, MUSB_CSR0, MUSB_CSR0_P_SVDRXPKTRDY); while ((musb_readw(regs, MUSB_CSR0) & MUSB_CSR0_RXPKTRDY) != 0) cpu_relax(); musb->ackpend = 0; } else musb->ep0_state = MUSB_EP0_STAGE_RX; } static int forward_to_driver(struct musb *musb, const struct usb_ctrlrequest *ctrlrequest) __releases(musb->lock) __acquires(musb->lock) { int retval; if (!musb->gadget_driver) return -EOPNOTSUPP; spin_unlock(&musb->lock); retval = musb->gadget_driver->setup(&musb->g, ctrlrequest); spin_lock(&musb->lock); return retval; } /* * Handle peripheral ep0 interrupt * * Context: irq handler; we won't re-enter the driver that way. */ irqreturn_t musb_g_ep0_irq(struct musb *musb) { u16 csr; u16 len; void __iomem *mbase = musb->mregs; void __iomem *regs = musb->endpoints[0].regs; irqreturn_t retval = IRQ_NONE; musb_ep_select(mbase, 0); /* select ep0 */ csr = musb_readw(regs, MUSB_CSR0); len = musb_readb(regs, MUSB_COUNT0); dev_dbg(musb->controller, "csr %04x, count %d, myaddr %d, ep0stage %s\n", csr, len, musb_readb(mbase, MUSB_FADDR), decode_ep0stage(musb->ep0_state)); if (csr & MUSB_CSR0_P_DATAEND) { /* * If DATAEND is set we should not call the callback, * hence the status stage is not complete. */ return IRQ_HANDLED; } /* I sent a stall.. need to acknowledge it now.. */ if (csr & MUSB_CSR0_P_SENTSTALL) { musb_writew(regs, MUSB_CSR0, csr & ~MUSB_CSR0_P_SENTSTALL); retval = IRQ_HANDLED; musb->ep0_state = MUSB_EP0_STAGE_IDLE; csr = musb_readw(regs, MUSB_CSR0); } /* request ended "early" */ if (csr & MUSB_CSR0_P_SETUPEND) { musb_writew(regs, MUSB_CSR0, MUSB_CSR0_P_SVDSETUPEND); retval = IRQ_HANDLED; /* Transition into the early status phase */ switch (musb->ep0_state) { case MUSB_EP0_STAGE_TX: musb->ep0_state = MUSB_EP0_STAGE_STATUSOUT; break; case MUSB_EP0_STAGE_RX: musb->ep0_state = MUSB_EP0_STAGE_STATUSIN; break; default: ERR("SetupEnd came in a wrong ep0stage %s\n", decode_ep0stage(musb->ep0_state)); } csr = musb_readw(regs, MUSB_CSR0); /* NOTE: request may need completion */ } /* docs from Mentor only describe tx, rx, and idle/setup states. * we need to handle nuances around status stages, and also the * case where status and setup stages come back-to-back ... */ switch (musb->ep0_state) { case MUSB_EP0_STAGE_TX: /* irq on clearing txpktrdy */ if ((csr & MUSB_CSR0_TXPKTRDY) == 0) { ep0_txstate(musb); retval = IRQ_HANDLED; } break; case MUSB_EP0_STAGE_RX: /* irq on set rxpktrdy */ if (csr & MUSB_CSR0_RXPKTRDY) { ep0_rxstate(musb); retval = IRQ_HANDLED; } break; case MUSB_EP0_STAGE_STATUSIN: /* end of sequence #2 (OUT/RX state) or #3 (no data) */ /* update address (if needed) only @ the end of the * status phase per usb spec, which also guarantees * we get 10 msec to receive this irq... until this * is done we won't see the next packet. */ if (musb->set_address) { musb->set_address = false; musb_writeb(mbase, MUSB_FADDR, musb->address); } /* enter test mode if needed (exit by reset) */ else if (musb->test_mode) { dev_dbg(musb->controller, "entering TESTMODE\n"); if (MUSB_TEST_PACKET == musb->test_mode_nr) musb_load_testpacket(musb); musb_writeb(mbase, MUSB_TESTMODE, musb->test_mode_nr); } /* FALLTHROUGH */ case MUSB_EP0_STAGE_STATUSOUT: /* end of sequence #1: write to host (TX state) */ { struct musb_request *req; req = next_ep0_request(musb); if (req) musb_g_ep0_giveback(musb, &req->request); } /* * In case when several interrupts can get coalesced, * check to see if we've already received a SETUP packet... */ if (csr & MUSB_CSR0_RXPKTRDY) goto setup; retval = IRQ_HANDLED; musb->ep0_state = MUSB_EP0_STAGE_IDLE; break; case MUSB_EP0_STAGE_IDLE: /* * This state is typically (but not always) indiscernible * from the status states since the corresponding interrupts * tend to happen within too little period of time (with only * a zero-length packet in between) and so get coalesced... */ retval = IRQ_HANDLED; musb->ep0_state = MUSB_EP0_STAGE_SETUP; /* FALLTHROUGH */ case MUSB_EP0_STAGE_SETUP: setup: if (csr & MUSB_CSR0_RXPKTRDY) { struct usb_ctrlrequest setup; int handled = 0; if (len != 8) { ERR("SETUP packet len %d != 8 ?\n", len); break; } musb_read_setup(musb, &setup); retval = IRQ_HANDLED; /* sometimes the RESET won't be reported */ if (unlikely(musb->g.speed == USB_SPEED_UNKNOWN)) { u8 power; printk(KERN_NOTICE "%s: peripheral reset " "irq lost!\n", musb_driver_name); power = musb_readb(mbase, MUSB_POWER); musb->g.speed = (power & MUSB_POWER_HSMODE) ? USB_SPEED_HIGH : USB_SPEED_FULL; } switch (musb->ep0_state) { /* sequence #3 (no data stage), includes requests * we can't forward (notably SET_ADDRESS and the * device/endpoint feature set/clear operations) * plus SET_CONFIGURATION and others we must */ case MUSB_EP0_STAGE_ACKWAIT: handled = service_zero_data_request( musb, &setup); /* * We're expecting no data in any case, so * always set the DATAEND bit -- doing this * here helps avoid SetupEnd interrupt coming * in the idle stage when we're stalling... */ musb->ackpend |= MUSB_CSR0_P_DATAEND; /* status stage might be immediate */ if (handled > 0) musb->ep0_state = MUSB_EP0_STAGE_STATUSIN; break; /* sequence #1 (IN to host), includes GET_STATUS * requests that we can't forward, GET_DESCRIPTOR * and others that we must */ case MUSB_EP0_STAGE_TX: handled = service_in_request(musb, &setup); if (handled > 0) { musb->ackpend = MUSB_CSR0_TXPKTRDY | MUSB_CSR0_P_DATAEND; musb->ep0_state = MUSB_EP0_STAGE_STATUSOUT; } break; /* sequence #2 (OUT from host), always forward */ default: /* MUSB_EP0_STAGE_RX */ break; } dev_dbg(musb->controller, "handled %d, csr %04x, ep0stage %s\n", handled, csr, decode_ep0stage(musb->ep0_state)); /* unless we need to delegate this to the gadget * driver, we know how to wrap this up: csr0 has * not yet been written. */ if (handled < 0) goto stall; else if (handled > 0) goto finish; handled = forward_to_driver(musb, &setup); if (handled < 0) { musb_ep_select(mbase, 0); stall: dev_dbg(musb->controller, "stall (%d)\n", handled); musb->ackpend |= MUSB_CSR0_P_SENDSTALL; musb->ep0_state = MUSB_EP0_STAGE_IDLE; finish: musb_writew(regs, MUSB_CSR0, musb->ackpend); musb->ackpend = 0; } } break; case MUSB_EP0_STAGE_ACKWAIT: /* This should not happen. But happens with tusb6010 with * g_file_storage and high speed. Do nothing. */ retval = IRQ_HANDLED; break; default: /* "can't happen" */ WARN_ON(1); musb_writew(regs, MUSB_CSR0, MUSB_CSR0_P_SENDSTALL); musb->ep0_state = MUSB_EP0_STAGE_IDLE; break; } return retval; } static int musb_g_ep0_enable(struct usb_ep *ep, const struct usb_endpoint_descriptor *desc) { /* always enabled */ return -EINVAL; } static int musb_g_ep0_disable(struct usb_ep *e) { /* always enabled */ return -EINVAL; } static int musb_g_ep0_queue(struct usb_ep *e, struct usb_request *r, gfp_t gfp_flags) { struct musb_ep *ep; struct musb_request *req; struct musb *musb; int status; unsigned long lockflags; void __iomem *regs; if (!e || !r) return -EINVAL; ep = to_musb_ep(e); musb = ep->musb; regs = musb->control_ep->regs; req = to_musb_request(r); req->musb = musb; req->request.actual = 0; req->request.status = -EINPROGRESS; req->tx = ep->is_in; spin_lock_irqsave(&musb->lock, lockflags); if (!list_empty(&ep->req_list)) { status = -EBUSY; goto cleanup; } switch (musb->ep0_state) { case MUSB_EP0_STAGE_RX: /* control-OUT data */ case MUSB_EP0_STAGE_TX: /* control-IN data */ case MUSB_EP0_STAGE_ACKWAIT: /* zero-length data */ status = 0; break; default: dev_dbg(musb->controller, "ep0 request queued in state %d\n", musb->ep0_state); status = -EINVAL; goto cleanup; } /* add request to the list */ list_add_tail(&req->list, &ep->req_list); dev_dbg(musb->controller, "queue to %s (%s), length=%d\n", ep->name, ep->is_in ? "IN/TX" : "OUT/RX", req->request.length); musb_ep_select(musb->mregs, 0); /* sequence #1, IN ... start writing the data */ if (musb->ep0_state == MUSB_EP0_STAGE_TX) ep0_txstate(musb); /* sequence #3, no-data ... issue IN status */ else if (musb->ep0_state == MUSB_EP0_STAGE_ACKWAIT) { if (req->request.length) status = -EINVAL; else { musb->ep0_state = MUSB_EP0_STAGE_STATUSIN; musb_writew(regs, MUSB_CSR0, musb->ackpend | MUSB_CSR0_P_DATAEND); musb->ackpend = 0; musb_g_ep0_giveback(ep->musb, r); } /* else for sequence #2 (OUT), caller provides a buffer * before the next packet arrives. deferred responses * (after SETUP is acked) are racey. */ } else if (musb->ackpend) { musb_writew(regs, MUSB_CSR0, musb->ackpend); musb->ackpend = 0; } cleanup: spin_unlock_irqrestore(&musb->lock, lockflags); return status; } static int musb_g_ep0_dequeue(struct usb_ep *ep, struct usb_request *req) { /* we just won't support this */ return -EINVAL; } static int musb_g_ep0_halt(struct usb_ep *e, int value) { struct musb_ep *ep; struct musb *musb; void __iomem *base, *regs; unsigned long flags; int status; u16 csr; if (!e || !value) return -EINVAL; ep = to_musb_ep(e); musb = ep->musb; base = musb->mregs; regs = musb->control_ep->regs; status = 0; spin_lock_irqsave(&musb->lock, flags); if (!list_empty(&ep->req_list)) { status = -EBUSY; goto cleanup; } musb_ep_select(base, 0); csr = musb->ackpend; switch (musb->ep0_state) { /* Stalls are usually issued after parsing SETUP packet, either * directly in irq context from setup() or else later. */ case MUSB_EP0_STAGE_TX: /* control-IN data */ case MUSB_EP0_STAGE_ACKWAIT: /* STALL for zero-length data */ case MUSB_EP0_STAGE_RX: /* control-OUT data */ csr = musb_readw(regs, MUSB_CSR0); /* FALLTHROUGH */ /* It's also OK to issue stalls during callbacks when a non-empty * DATA stage buffer has been read (or even written). */ case MUSB_EP0_STAGE_STATUSIN: /* control-OUT status */ case MUSB_EP0_STAGE_STATUSOUT: /* control-IN status */ csr |= MUSB_CSR0_P_SENDSTALL; musb_writew(regs, MUSB_CSR0, csr); musb->ep0_state = MUSB_EP0_STAGE_IDLE; musb->ackpend = 0; break; default: dev_dbg(musb->controller, "ep0 can't halt in state %d\n", musb->ep0_state); status = -EINVAL; } cleanup: spin_unlock_irqrestore(&musb->lock, flags); return status; } const struct usb_ep_ops musb_g_ep0_ops = { .enable = musb_g_ep0_enable, .disable = musb_g_ep0_disable, .alloc_request = musb_alloc_request, .free_request = musb_free_request, .queue = musb_g_ep0_queue, .dequeue = musb_g_ep0_dequeue, .set_halt = musb_g_ep0_halt, };
gpl-2.0
iHateWEBos/kernel_htc_m8_LolliSense
arch/microblaze/kernel/heartbeat.c
7859
1668
/* * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu> * Copyright (C) 2007-2009 PetaLogix * Copyright (C) 2006 Atmark Techno, Inc. * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/sched.h> #include <linux/io.h> #include <asm/setup.h> #include <asm/page.h> #include <asm/prom.h> static unsigned int base_addr; void heartbeat(void) { static unsigned int cnt, period, dist; if (base_addr) { if (cnt == 0 || cnt == dist) out_be32(base_addr, 1); else if (cnt == 7 || cnt == dist + 7) out_be32(base_addr, 0); if (++cnt > period) { cnt = 0; /* * The hyperbolic function below modifies the heartbeat * period length in dependency of the current (5min) * load. It goes through the points f(0)=126, f(1)=86, * f(5)=51, f(inf)->30. */ period = ((672 << FSHIFT) / (5 * avenrun[0] + (7 << FSHIFT))) + 30; dist = period / 4; } } } void setup_heartbeat(void) { struct device_node *gpio = NULL; int *prop; int j; const char * const gpio_list[] = { "xlnx,xps-gpio-1.00.a", NULL }; for (j = 0; gpio_list[j] != NULL; j++) { gpio = of_find_compatible_node(NULL, NULL, gpio_list[j]); if (gpio) break; } if (gpio) { base_addr = be32_to_cpup(of_get_property(gpio, "reg", NULL)); base_addr = (unsigned long) ioremap(base_addr, PAGE_SIZE); printk(KERN_NOTICE "Heartbeat GPIO at 0x%x\n", base_addr); /* GPIO is configured as output */ prop = (int *) of_get_property(gpio, "xlnx,is-bidir", NULL); if (prop) out_be32(base_addr + 4, 0); } }
gpl-2.0
omnirom/android_kernel_asus_me301t
arch/arm/plat-s3c24xx/clock.c
8115
1663
/* linux/arch/arm/plat-s3c24xx/clock.c * * Copyright (c) 2004-2005 Simtec Electronics * Ben Dooks <ben@simtec.co.uk> * * S3C24XX Core clock control support * * Based on, and code from linux/arch/arm/mach-versatile/clock.c ** ** Copyright (C) 2004 ARM Limited. ** Written by Deep Blue Solutions Limited. * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/clk.h> #include <linux/io.h> #include <mach/hardware.h> #include <asm/irq.h> #include <mach/regs-clock.h> #include <mach/regs-gpio.h> #include <plat/cpu-freq.h> #include <plat/clock.h> #include <plat/cpu.h> #include <plat/pll.h> /* initialise all the clocks */ void __init_or_cpufreq s3c24xx_setup_clocks(unsigned long fclk, unsigned long hclk, unsigned long pclk) { clk_upll.rate = s3c24xx_get_pll(__raw_readl(S3C2410_UPLLCON), clk_xtal.rate); clk_mpll.rate = fclk; clk_h.rate = hclk; clk_p.rate = pclk; clk_f.rate = fclk; }
gpl-2.0