repo_name string | path string | copies string | size string | content string | license string |
|---|---|---|---|---|---|
dalingrin/android_kernel_2.6.32 | drivers/net/igbvf/vf.c | 1489 | 11866 | /*******************************************************************************
Intel(R) 82576 Virtual Function Linux driver
Copyright(c) 2009 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
version 2, as published by the Free Software Foundation.
This program is distributed in the hope it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
Contact Information:
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*******************************************************************************/
#include "vf.h"
static s32 e1000_check_for_link_vf(struct e1000_hw *hw);
static s32 e1000_get_link_up_info_vf(struct e1000_hw *hw, u16 *speed,
u16 *duplex);
static s32 e1000_init_hw_vf(struct e1000_hw *hw);
static s32 e1000_reset_hw_vf(struct e1000_hw *hw);
static void e1000_update_mc_addr_list_vf(struct e1000_hw *hw, u8 *,
u32, u32, u32);
static void e1000_rar_set_vf(struct e1000_hw *, u8 *, u32);
static s32 e1000_read_mac_addr_vf(struct e1000_hw *);
static s32 e1000_set_vfta_vf(struct e1000_hw *, u16, bool);
/**
* e1000_init_mac_params_vf - Inits MAC params
* @hw: pointer to the HW structure
**/
static s32 e1000_init_mac_params_vf(struct e1000_hw *hw)
{
struct e1000_mac_info *mac = &hw->mac;
/* VF's have no MTA Registers - PF feature only */
mac->mta_reg_count = 128;
/* VF's have no access to RAR entries */
mac->rar_entry_count = 1;
/* Function pointers */
/* reset */
mac->ops.reset_hw = e1000_reset_hw_vf;
/* hw initialization */
mac->ops.init_hw = e1000_init_hw_vf;
/* check for link */
mac->ops.check_for_link = e1000_check_for_link_vf;
/* link info */
mac->ops.get_link_up_info = e1000_get_link_up_info_vf;
/* multicast address update */
mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_vf;
/* set mac address */
mac->ops.rar_set = e1000_rar_set_vf;
/* read mac address */
mac->ops.read_mac_addr = e1000_read_mac_addr_vf;
/* set vlan filter table array */
mac->ops.set_vfta = e1000_set_vfta_vf;
return E1000_SUCCESS;
}
/**
* e1000_init_function_pointers_vf - Inits function pointers
* @hw: pointer to the HW structure
**/
void e1000_init_function_pointers_vf(struct e1000_hw *hw)
{
hw->mac.ops.init_params = e1000_init_mac_params_vf;
hw->mbx.ops.init_params = e1000_init_mbx_params_vf;
}
/**
* e1000_get_link_up_info_vf - Gets link info.
* @hw: pointer to the HW structure
* @speed: pointer to 16 bit value to store link speed.
* @duplex: pointer to 16 bit value to store duplex.
*
* Since we cannot read the PHY and get accurate link info, we must rely upon
* the status register's data which is often stale and inaccurate.
**/
static s32 e1000_get_link_up_info_vf(struct e1000_hw *hw, u16 *speed,
u16 *duplex)
{
s32 status;
status = er32(STATUS);
if (status & E1000_STATUS_SPEED_1000)
*speed = SPEED_1000;
else if (status & E1000_STATUS_SPEED_100)
*speed = SPEED_100;
else
*speed = SPEED_10;
if (status & E1000_STATUS_FD)
*duplex = FULL_DUPLEX;
else
*duplex = HALF_DUPLEX;
return E1000_SUCCESS;
}
/**
* e1000_reset_hw_vf - Resets the HW
* @hw: pointer to the HW structure
*
* VF's provide a function level reset. This is done using bit 26 of ctrl_reg.
* This is all the reset we can perform on a VF.
**/
static s32 e1000_reset_hw_vf(struct e1000_hw *hw)
{
struct e1000_mbx_info *mbx = &hw->mbx;
u32 timeout = E1000_VF_INIT_TIMEOUT;
u32 ret_val = -E1000_ERR_MAC_INIT;
u32 msgbuf[3];
u8 *addr = (u8 *)(&msgbuf[1]);
u32 ctrl;
/* assert vf queue/interrupt reset */
ctrl = er32(CTRL);
ew32(CTRL, ctrl | E1000_CTRL_RST);
/* we cannot initialize while the RSTI / RSTD bits are asserted */
while (!mbx->ops.check_for_rst(hw) && timeout) {
timeout--;
udelay(5);
}
if (timeout) {
/* mailbox timeout can now become active */
mbx->timeout = E1000_VF_MBX_INIT_TIMEOUT;
/* notify pf of vf reset completion */
msgbuf[0] = E1000_VF_RESET;
mbx->ops.write_posted(hw, msgbuf, 1);
msleep(10);
/* set our "perm_addr" based on info provided by PF */
ret_val = mbx->ops.read_posted(hw, msgbuf, 3);
if (!ret_val) {
if (msgbuf[0] == (E1000_VF_RESET | E1000_VT_MSGTYPE_ACK))
memcpy(hw->mac.perm_addr, addr, 6);
else
ret_val = -E1000_ERR_MAC_INIT;
}
}
return ret_val;
}
/**
* e1000_init_hw_vf - Inits the HW
* @hw: pointer to the HW structure
*
* Not much to do here except clear the PF Reset indication if there is one.
**/
static s32 e1000_init_hw_vf(struct e1000_hw *hw)
{
/* attempt to set and restore our mac address */
e1000_rar_set_vf(hw, hw->mac.addr, 0);
return E1000_SUCCESS;
}
/**
* e1000_hash_mc_addr_vf - Generate a multicast hash value
* @hw: pointer to the HW structure
* @mc_addr: pointer to a multicast address
*
* Generates a multicast address hash value which is used to determine
* the multicast filter table array address and new table value. See
* e1000_mta_set_generic()
**/
static u32 e1000_hash_mc_addr_vf(struct e1000_hw *hw, u8 *mc_addr)
{
u32 hash_value, hash_mask;
u8 bit_shift = 0;
/* Register count multiplied by bits per register */
hash_mask = (hw->mac.mta_reg_count * 32) - 1;
/*
* The bit_shift is the number of left-shifts
* where 0xFF would still fall within the hash mask.
*/
while (hash_mask >> bit_shift != 0xFF)
bit_shift++;
hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) |
(((u16) mc_addr[5]) << bit_shift)));
return hash_value;
}
/**
* e1000_update_mc_addr_list_vf - Update Multicast addresses
* @hw: pointer to the HW structure
* @mc_addr_list: array of multicast addresses to program
* @mc_addr_count: number of multicast addresses to program
* @rar_used_count: the first RAR register free to program
* @rar_count: total number of supported Receive Address Registers
*
* Updates the Receive Address Registers and Multicast Table Array.
* The caller must have a packed mc_addr_list of multicast addresses.
* The parameter rar_count will usually be hw->mac.rar_entry_count
* unless there are workarounds that change this.
**/
void e1000_update_mc_addr_list_vf(struct e1000_hw *hw,
u8 *mc_addr_list, u32 mc_addr_count,
u32 rar_used_count, u32 rar_count)
{
struct e1000_mbx_info *mbx = &hw->mbx;
u32 msgbuf[E1000_VFMAILBOX_SIZE];
u16 *hash_list = (u16 *)&msgbuf[1];
u32 hash_value;
u32 cnt, i;
/* Each entry in the list uses 1 16 bit word. We have 30
* 16 bit words available in our HW msg buffer (minus 1 for the
* msg type). That's 30 hash values if we pack 'em right. If
* there are more than 30 MC addresses to add then punt the
* extras for now and then add code to handle more than 30 later.
* It would be unusual for a server to request that many multi-cast
* addresses except for in large enterprise network environments.
*/
cnt = (mc_addr_count > 30) ? 30 : mc_addr_count;
msgbuf[0] = E1000_VF_SET_MULTICAST;
msgbuf[0] |= cnt << E1000_VT_MSGINFO_SHIFT;
for (i = 0; i < cnt; i++) {
hash_value = e1000_hash_mc_addr_vf(hw, mc_addr_list);
hash_list[i] = hash_value & 0x0FFFF;
mc_addr_list += ETH_ADDR_LEN;
}
mbx->ops.write_posted(hw, msgbuf, E1000_VFMAILBOX_SIZE);
}
/**
* e1000_set_vfta_vf - Set/Unset vlan filter table address
* @hw: pointer to the HW structure
* @vid: determines the vfta register and bit to set/unset
* @set: if true then set bit, else clear bit
**/
static s32 e1000_set_vfta_vf(struct e1000_hw *hw, u16 vid, bool set)
{
struct e1000_mbx_info *mbx = &hw->mbx;
u32 msgbuf[2];
s32 err;
msgbuf[0] = E1000_VF_SET_VLAN;
msgbuf[1] = vid;
/* Setting the 8 bit field MSG INFO to true indicates "add" */
if (set)
msgbuf[0] |= 1 << E1000_VT_MSGINFO_SHIFT;
mbx->ops.write_posted(hw, msgbuf, 2);
err = mbx->ops.read_posted(hw, msgbuf, 2);
msgbuf[0] &= ~E1000_VT_MSGTYPE_CTS;
/* if nacked the vlan was rejected */
if (!err && (msgbuf[0] == (E1000_VF_SET_VLAN | E1000_VT_MSGTYPE_NACK)))
err = -E1000_ERR_MAC_INIT;
return err;
}
/** e1000_rlpml_set_vf - Set the maximum receive packet length
* @hw: pointer to the HW structure
* @max_size: value to assign to max frame size
**/
void e1000_rlpml_set_vf(struct e1000_hw *hw, u16 max_size)
{
struct e1000_mbx_info *mbx = &hw->mbx;
u32 msgbuf[2];
msgbuf[0] = E1000_VF_SET_LPE;
msgbuf[1] = max_size;
mbx->ops.write_posted(hw, msgbuf, 2);
}
/**
* e1000_rar_set_vf - set device MAC address
* @hw: pointer to the HW structure
* @addr: pointer to the receive address
* @index receive address array register
**/
static void e1000_rar_set_vf(struct e1000_hw *hw, u8 * addr, u32 index)
{
struct e1000_mbx_info *mbx = &hw->mbx;
u32 msgbuf[3];
u8 *msg_addr = (u8 *)(&msgbuf[1]);
s32 ret_val;
memset(msgbuf, 0, 12);
msgbuf[0] = E1000_VF_SET_MAC_ADDR;
memcpy(msg_addr, addr, 6);
ret_val = mbx->ops.write_posted(hw, msgbuf, 3);
if (!ret_val)
ret_val = mbx->ops.read_posted(hw, msgbuf, 3);
msgbuf[0] &= ~E1000_VT_MSGTYPE_CTS;
/* if nacked the address was rejected, use "perm_addr" */
if (!ret_val &&
(msgbuf[0] == (E1000_VF_SET_MAC_ADDR | E1000_VT_MSGTYPE_NACK)))
e1000_read_mac_addr_vf(hw);
}
/**
* e1000_read_mac_addr_vf - Read device MAC address
* @hw: pointer to the HW structure
**/
static s32 e1000_read_mac_addr_vf(struct e1000_hw *hw)
{
int i;
for (i = 0; i < ETH_ADDR_LEN; i++)
hw->mac.addr[i] = hw->mac.perm_addr[i];
return E1000_SUCCESS;
}
/**
* e1000_check_for_link_vf - Check for link for a virtual interface
* @hw: pointer to the HW structure
*
* Checks to see if the underlying PF is still talking to the VF and
* if it is then it reports the link state to the hardware, otherwise
* it reports link down and returns an error.
**/
static s32 e1000_check_for_link_vf(struct e1000_hw *hw)
{
struct e1000_mbx_info *mbx = &hw->mbx;
struct e1000_mac_info *mac = &hw->mac;
s32 ret_val = E1000_SUCCESS;
u32 in_msg = 0;
/*
* We only want to run this if there has been a rst asserted.
* in this case that could mean a link change, device reset,
* or a virtual function reset
*/
/* If we were hit with a reset drop the link */
if (!mbx->ops.check_for_rst(hw))
mac->get_link_status = true;
if (!mac->get_link_status)
goto out;
/* if link status is down no point in checking to see if pf is up */
if (!(er32(STATUS) & E1000_STATUS_LU))
goto out;
/* if the read failed it could just be a mailbox collision, best wait
* until we are called again and don't report an error */
if (mbx->ops.read(hw, &in_msg, 1))
goto out;
/* if incoming message isn't clear to send we are waiting on response */
if (!(in_msg & E1000_VT_MSGTYPE_CTS)) {
/* message is not CTS and is NACK we must have lost CTS status */
if (in_msg & E1000_VT_MSGTYPE_NACK)
ret_val = -E1000_ERR_MAC_INIT;
goto out;
}
/* the pf is talking, if we timed out in the past we reinit */
if (!mbx->timeout) {
ret_val = -E1000_ERR_MAC_INIT;
goto out;
}
/* if we passed all the tests above then the link is up and we no
* longer need to check for link */
mac->get_link_status = false;
out:
return ret_val;
}
| gpl-2.0 |
puppybane/linux-cyrus | drivers/hwmon/max1619.c | 1745 | 9413 | /*
* max1619.c - Part of lm_sensors, Linux kernel modules for hardware
* monitoring
* Copyright (C) 2003-2004 Oleksij Rempel <bug-track@fisher-privat.net>
* Jean Delvare <jdelvare@suse.de>
*
* Based on the lm90 driver. The MAX1619 is a sensor chip made by Maxim.
* It reports up to two temperatures (its own plus up to
* one external one). Complete datasheet can be
* obtained from Maxim's website at:
* http://pdfserv.maxim-ic.com/en/ds/MAX1619.pdf
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/jiffies.h>
#include <linux/i2c.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
#include <linux/err.h>
#include <linux/mutex.h>
#include <linux/sysfs.h>
static const unsigned short normal_i2c[] = {
0x18, 0x19, 0x1a, 0x29, 0x2a, 0x2b, 0x4c, 0x4d, 0x4e, I2C_CLIENT_END };
/*
* The MAX1619 registers
*/
#define MAX1619_REG_R_MAN_ID 0xFE
#define MAX1619_REG_R_CHIP_ID 0xFF
#define MAX1619_REG_R_CONFIG 0x03
#define MAX1619_REG_W_CONFIG 0x09
#define MAX1619_REG_R_CONVRATE 0x04
#define MAX1619_REG_W_CONVRATE 0x0A
#define MAX1619_REG_R_STATUS 0x02
#define MAX1619_REG_R_LOCAL_TEMP 0x00
#define MAX1619_REG_R_REMOTE_TEMP 0x01
#define MAX1619_REG_R_REMOTE_HIGH 0x07
#define MAX1619_REG_W_REMOTE_HIGH 0x0D
#define MAX1619_REG_R_REMOTE_LOW 0x08
#define MAX1619_REG_W_REMOTE_LOW 0x0E
#define MAX1619_REG_R_REMOTE_CRIT 0x10
#define MAX1619_REG_W_REMOTE_CRIT 0x12
#define MAX1619_REG_R_TCRIT_HYST 0x11
#define MAX1619_REG_W_TCRIT_HYST 0x13
/*
* Conversions
*/
static int temp_from_reg(int val)
{
return (val & 0x80 ? val-0x100 : val) * 1000;
}
static int temp_to_reg(int val)
{
return (val < 0 ? val+0x100*1000 : val) / 1000;
}
enum temp_index {
t_input1 = 0,
t_input2,
t_low2,
t_high2,
t_crit2,
t_hyst2,
t_num_regs
};
/*
* Client data (each client gets its own)
*/
struct max1619_data {
struct i2c_client *client;
struct mutex update_lock;
char valid; /* zero until following fields are valid */
unsigned long last_updated; /* in jiffies */
/* registers values */
u8 temp[t_num_regs]; /* index with enum temp_index */
u8 alarms;
};
static const u8 regs_read[t_num_regs] = {
[t_input1] = MAX1619_REG_R_LOCAL_TEMP,
[t_input2] = MAX1619_REG_R_REMOTE_TEMP,
[t_low2] = MAX1619_REG_R_REMOTE_LOW,
[t_high2] = MAX1619_REG_R_REMOTE_HIGH,
[t_crit2] = MAX1619_REG_R_REMOTE_CRIT,
[t_hyst2] = MAX1619_REG_R_TCRIT_HYST,
};
static const u8 regs_write[t_num_regs] = {
[t_low2] = MAX1619_REG_W_REMOTE_LOW,
[t_high2] = MAX1619_REG_W_REMOTE_HIGH,
[t_crit2] = MAX1619_REG_W_REMOTE_CRIT,
[t_hyst2] = MAX1619_REG_W_TCRIT_HYST,
};
static struct max1619_data *max1619_update_device(struct device *dev)
{
struct max1619_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
int config, i;
mutex_lock(&data->update_lock);
if (time_after(jiffies, data->last_updated + HZ * 2) || !data->valid) {
dev_dbg(&client->dev, "Updating max1619 data.\n");
for (i = 0; i < t_num_regs; i++)
data->temp[i] = i2c_smbus_read_byte_data(client,
regs_read[i]);
data->alarms = i2c_smbus_read_byte_data(client,
MAX1619_REG_R_STATUS);
/* If OVERT polarity is low, reverse alarm bit */
config = i2c_smbus_read_byte_data(client, MAX1619_REG_R_CONFIG);
if (!(config & 0x20))
data->alarms ^= 0x02;
data->last_updated = jiffies;
data->valid = 1;
}
mutex_unlock(&data->update_lock);
return data;
}
/*
* Sysfs stuff
*/
static ssize_t show_temp(struct device *dev, struct device_attribute *devattr,
char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct max1619_data *data = max1619_update_device(dev);
return sprintf(buf, "%d\n", temp_from_reg(data->temp[attr->index]));
}
static ssize_t set_temp(struct device *dev, struct device_attribute *devattr,
const char *buf, size_t count)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct max1619_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
long val;
int err = kstrtol(buf, 10, &val);
if (err)
return err;
mutex_lock(&data->update_lock);
data->temp[attr->index] = temp_to_reg(val);
i2c_smbus_write_byte_data(client, regs_write[attr->index],
data->temp[attr->index]);
mutex_unlock(&data->update_lock);
return count;
}
static ssize_t show_alarms(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct max1619_data *data = max1619_update_device(dev);
return sprintf(buf, "%d\n", data->alarms);
}
static ssize_t show_alarm(struct device *dev, struct device_attribute *attr,
char *buf)
{
int bitnr = to_sensor_dev_attr(attr)->index;
struct max1619_data *data = max1619_update_device(dev);
return sprintf(buf, "%d\n", (data->alarms >> bitnr) & 1);
}
static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp, NULL, t_input1);
static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, show_temp, NULL, t_input2);
static SENSOR_DEVICE_ATTR(temp2_min, S_IWUSR | S_IRUGO, show_temp, set_temp,
t_low2);
static SENSOR_DEVICE_ATTR(temp2_max, S_IWUSR | S_IRUGO, show_temp, set_temp,
t_high2);
static SENSOR_DEVICE_ATTR(temp2_crit, S_IWUSR | S_IRUGO, show_temp, set_temp,
t_crit2);
static SENSOR_DEVICE_ATTR(temp2_crit_hyst, S_IWUSR | S_IRUGO, show_temp,
set_temp, t_hyst2);
static DEVICE_ATTR(alarms, S_IRUGO, show_alarms, NULL);
static SENSOR_DEVICE_ATTR(temp2_crit_alarm, S_IRUGO, show_alarm, NULL, 1);
static SENSOR_DEVICE_ATTR(temp2_fault, S_IRUGO, show_alarm, NULL, 2);
static SENSOR_DEVICE_ATTR(temp2_min_alarm, S_IRUGO, show_alarm, NULL, 3);
static SENSOR_DEVICE_ATTR(temp2_max_alarm, S_IRUGO, show_alarm, NULL, 4);
static struct attribute *max1619_attrs[] = {
&sensor_dev_attr_temp1_input.dev_attr.attr,
&sensor_dev_attr_temp2_input.dev_attr.attr,
&sensor_dev_attr_temp2_min.dev_attr.attr,
&sensor_dev_attr_temp2_max.dev_attr.attr,
&sensor_dev_attr_temp2_crit.dev_attr.attr,
&sensor_dev_attr_temp2_crit_hyst.dev_attr.attr,
&dev_attr_alarms.attr,
&sensor_dev_attr_temp2_crit_alarm.dev_attr.attr,
&sensor_dev_attr_temp2_fault.dev_attr.attr,
&sensor_dev_attr_temp2_min_alarm.dev_attr.attr,
&sensor_dev_attr_temp2_max_alarm.dev_attr.attr,
NULL
};
ATTRIBUTE_GROUPS(max1619);
/* Return 0 if detection is successful, -ENODEV otherwise */
static int max1619_detect(struct i2c_client *client,
struct i2c_board_info *info)
{
struct i2c_adapter *adapter = client->adapter;
u8 reg_config, reg_convrate, reg_status, man_id, chip_id;
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
return -ENODEV;
/* detection */
reg_config = i2c_smbus_read_byte_data(client, MAX1619_REG_R_CONFIG);
reg_convrate = i2c_smbus_read_byte_data(client, MAX1619_REG_R_CONVRATE);
reg_status = i2c_smbus_read_byte_data(client, MAX1619_REG_R_STATUS);
if ((reg_config & 0x03) != 0x00
|| reg_convrate > 0x07 || (reg_status & 0x61) != 0x00) {
dev_dbg(&adapter->dev, "MAX1619 detection failed at 0x%02x\n",
client->addr);
return -ENODEV;
}
/* identification */
man_id = i2c_smbus_read_byte_data(client, MAX1619_REG_R_MAN_ID);
chip_id = i2c_smbus_read_byte_data(client, MAX1619_REG_R_CHIP_ID);
if (man_id != 0x4D || chip_id != 0x04) {
dev_info(&adapter->dev,
"Unsupported chip (man_id=0x%02X, chip_id=0x%02X).\n",
man_id, chip_id);
return -ENODEV;
}
strlcpy(info->type, "max1619", I2C_NAME_SIZE);
return 0;
}
static void max1619_init_client(struct i2c_client *client)
{
u8 config;
/*
* Start the conversions.
*/
i2c_smbus_write_byte_data(client, MAX1619_REG_W_CONVRATE,
5); /* 2 Hz */
config = i2c_smbus_read_byte_data(client, MAX1619_REG_R_CONFIG);
if (config & 0x40)
i2c_smbus_write_byte_data(client, MAX1619_REG_W_CONFIG,
config & 0xBF); /* run */
}
static int max1619_probe(struct i2c_client *new_client,
const struct i2c_device_id *id)
{
struct max1619_data *data;
struct device *hwmon_dev;
data = devm_kzalloc(&new_client->dev, sizeof(struct max1619_data),
GFP_KERNEL);
if (!data)
return -ENOMEM;
data->client = new_client;
mutex_init(&data->update_lock);
/* Initialize the MAX1619 chip */
max1619_init_client(new_client);
hwmon_dev = devm_hwmon_device_register_with_groups(&new_client->dev,
new_client->name,
data,
max1619_groups);
return PTR_ERR_OR_ZERO(hwmon_dev);
}
static const struct i2c_device_id max1619_id[] = {
{ "max1619", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, max1619_id);
static struct i2c_driver max1619_driver = {
.class = I2C_CLASS_HWMON,
.driver = {
.name = "max1619",
},
.probe = max1619_probe,
.id_table = max1619_id,
.detect = max1619_detect,
.address_list = normal_i2c,
};
module_i2c_driver(max1619_driver);
MODULE_AUTHOR("Oleksij Rempel <bug-track@fisher-privat.net>, Jean Delvare <jdelvare@suse.de>");
MODULE_DESCRIPTION("MAX1619 sensor driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
Hundsbuah/tf700t_10_6_1_14_4 | drivers/s390/char/raw3270.c | 2769 | 35255 | /*
* IBM/3270 Driver - core functions.
*
* Author(s):
* Original 3270 Code for 2.4 written by Richard Hitt (UTS Global)
* Rewritten for 2.5 by Martin Schwidefsky <schwidefsky@de.ibm.com>
* Copyright IBM Corp. 2003, 2009
*/
#include <linux/kernel_stat.h>
#include <linux/module.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/wait.h>
#include <asm/ccwdev.h>
#include <asm/cio.h>
#include <asm/ebcdic.h>
#include <asm/diag.h>
#include "raw3270.h"
#include <linux/major.h>
#include <linux/kdev_t.h>
#include <linux/device.h>
#include <linux/mutex.h>
static struct class *class3270;
/* The main 3270 data structure. */
struct raw3270 {
struct list_head list;
struct ccw_device *cdev;
int minor;
short model, rows, cols;
unsigned long flags;
struct list_head req_queue; /* Request queue. */
struct list_head view_list; /* List of available views. */
struct raw3270_view *view; /* Active view. */
struct timer_list timer; /* Device timer. */
unsigned char *ascebc; /* ascii -> ebcdic table */
struct device *clttydev; /* 3270-class tty device ptr */
struct device *cltubdev; /* 3270-class tub device ptr */
struct raw3270_request init_request;
unsigned char init_data[256];
};
/* raw3270->flags */
#define RAW3270_FLAGS_14BITADDR 0 /* 14-bit buffer addresses */
#define RAW3270_FLAGS_BUSY 1 /* Device busy, leave it alone */
#define RAW3270_FLAGS_ATTN 2 /* Device sent an ATTN interrupt */
#define RAW3270_FLAGS_READY 4 /* Device is useable by views */
#define RAW3270_FLAGS_CONSOLE 8 /* Device is the console. */
#define RAW3270_FLAGS_FROZEN 16 /* set if 3270 is frozen for suspend */
/* Semaphore to protect global data of raw3270 (devices, views, etc). */
static DEFINE_MUTEX(raw3270_mutex);
/* List of 3270 devices. */
static LIST_HEAD(raw3270_devices);
/*
* Flag to indicate if the driver has been registered. Some operations
* like waiting for the end of i/o need to be done differently as long
* as the kernel is still starting up (console support).
*/
static int raw3270_registered;
/* Module parameters */
static int tubxcorrect = 0;
module_param(tubxcorrect, bool, 0);
/*
* Wait queue for device init/delete, view delete.
*/
DECLARE_WAIT_QUEUE_HEAD(raw3270_wait_queue);
/*
* Encode array for 12 bit 3270 addresses.
*/
static unsigned char raw3270_ebcgraf[64] = {
0x40, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7,
0xc8, 0xc9, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
0x50, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7,
0xd8, 0xd9, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f,
0x60, 0x61, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7,
0xe8, 0xe9, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f,
0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7,
0xf8, 0xf9, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f
};
void
raw3270_buffer_address(struct raw3270 *rp, char *cp, unsigned short addr)
{
if (test_bit(RAW3270_FLAGS_14BITADDR, &rp->flags)) {
cp[0] = (addr >> 8) & 0x3f;
cp[1] = addr & 0xff;
} else {
cp[0] = raw3270_ebcgraf[(addr >> 6) & 0x3f];
cp[1] = raw3270_ebcgraf[addr & 0x3f];
}
}
/*
* Allocate a new 3270 ccw request
*/
struct raw3270_request *
raw3270_request_alloc(size_t size)
{
struct raw3270_request *rq;
/* Allocate request structure */
rq = kzalloc(sizeof(struct raw3270_request), GFP_KERNEL | GFP_DMA);
if (!rq)
return ERR_PTR(-ENOMEM);
/* alloc output buffer. */
if (size > 0) {
rq->buffer = kmalloc(size, GFP_KERNEL | GFP_DMA);
if (!rq->buffer) {
kfree(rq);
return ERR_PTR(-ENOMEM);
}
}
rq->size = size;
INIT_LIST_HEAD(&rq->list);
/*
* Setup ccw.
*/
rq->ccw.cda = __pa(rq->buffer);
rq->ccw.flags = CCW_FLAG_SLI;
return rq;
}
/*
* Free 3270 ccw request
*/
void
raw3270_request_free (struct raw3270_request *rq)
{
kfree(rq->buffer);
kfree(rq);
}
/*
* Reset request to initial state.
*/
void
raw3270_request_reset(struct raw3270_request *rq)
{
BUG_ON(!list_empty(&rq->list));
rq->ccw.cmd_code = 0;
rq->ccw.count = 0;
rq->ccw.cda = __pa(rq->buffer);
rq->ccw.flags = CCW_FLAG_SLI;
rq->rescnt = 0;
rq->rc = 0;
}
/*
* Set command code to ccw of a request.
*/
void
raw3270_request_set_cmd(struct raw3270_request *rq, u8 cmd)
{
rq->ccw.cmd_code = cmd;
}
/*
* Add data fragment to output buffer.
*/
int
raw3270_request_add_data(struct raw3270_request *rq, void *data, size_t size)
{
if (size + rq->ccw.count > rq->size)
return -E2BIG;
memcpy(rq->buffer + rq->ccw.count, data, size);
rq->ccw.count += size;
return 0;
}
/*
* Set address/length pair to ccw of a request.
*/
void
raw3270_request_set_data(struct raw3270_request *rq, void *data, size_t size)
{
rq->ccw.cda = __pa(data);
rq->ccw.count = size;
}
/*
* Set idal buffer to ccw of a request.
*/
void
raw3270_request_set_idal(struct raw3270_request *rq, struct idal_buffer *ib)
{
rq->ccw.cda = __pa(ib->data);
rq->ccw.count = ib->size;
rq->ccw.flags |= CCW_FLAG_IDA;
}
/*
* Stop running ccw.
*/
static int
raw3270_halt_io_nolock(struct raw3270 *rp, struct raw3270_request *rq)
{
int retries;
int rc;
if (raw3270_request_final(rq))
return 0;
/* Check if interrupt has already been processed */
for (retries = 0; retries < 5; retries++) {
if (retries < 2)
rc = ccw_device_halt(rp->cdev, (long) rq);
else
rc = ccw_device_clear(rp->cdev, (long) rq);
if (rc == 0)
break; /* termination successful */
}
return rc;
}
static int
raw3270_halt_io(struct raw3270 *rp, struct raw3270_request *rq)
{
unsigned long flags;
int rc;
spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
rc = raw3270_halt_io_nolock(rp, rq);
spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
return rc;
}
/*
* Add the request to the request queue, try to start it if the
* 3270 device is idle. Return without waiting for end of i/o.
*/
static int
__raw3270_start(struct raw3270 *rp, struct raw3270_view *view,
struct raw3270_request *rq)
{
rq->view = view;
raw3270_get_view(view);
if (list_empty(&rp->req_queue) &&
!test_bit(RAW3270_FLAGS_BUSY, &rp->flags)) {
/* No other requests are on the queue. Start this one. */
rq->rc = ccw_device_start(rp->cdev, &rq->ccw,
(unsigned long) rq, 0, 0);
if (rq->rc) {
raw3270_put_view(view);
return rq->rc;
}
}
list_add_tail(&rq->list, &rp->req_queue);
return 0;
}
int
raw3270_start(struct raw3270_view *view, struct raw3270_request *rq)
{
unsigned long flags;
struct raw3270 *rp;
int rc;
spin_lock_irqsave(get_ccwdev_lock(view->dev->cdev), flags);
rp = view->dev;
if (!rp || rp->view != view ||
test_bit(RAW3270_FLAGS_FROZEN, &rp->flags))
rc = -EACCES;
else if (!test_bit(RAW3270_FLAGS_READY, &rp->flags))
rc = -ENODEV;
else
rc = __raw3270_start(rp, view, rq);
spin_unlock_irqrestore(get_ccwdev_lock(view->dev->cdev), flags);
return rc;
}
int
raw3270_start_locked(struct raw3270_view *view, struct raw3270_request *rq)
{
struct raw3270 *rp;
int rc;
rp = view->dev;
if (!rp || rp->view != view ||
test_bit(RAW3270_FLAGS_FROZEN, &rp->flags))
rc = -EACCES;
else if (!test_bit(RAW3270_FLAGS_READY, &rp->flags))
rc = -ENODEV;
else
rc = __raw3270_start(rp, view, rq);
return rc;
}
int
raw3270_start_irq(struct raw3270_view *view, struct raw3270_request *rq)
{
struct raw3270 *rp;
rp = view->dev;
rq->view = view;
raw3270_get_view(view);
list_add_tail(&rq->list, &rp->req_queue);
return 0;
}
/*
* 3270 interrupt routine, called from the ccw_device layer
*/
static void
raw3270_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
{
struct raw3270 *rp;
struct raw3270_view *view;
struct raw3270_request *rq;
int rc;
kstat_cpu(smp_processor_id()).irqs[IOINT_C70]++;
rp = dev_get_drvdata(&cdev->dev);
if (!rp)
return;
rq = (struct raw3270_request *) intparm;
view = rq ? rq->view : rp->view;
if (IS_ERR(irb))
rc = RAW3270_IO_RETRY;
else if (irb->scsw.cmd.fctl & SCSW_FCTL_HALT_FUNC) {
rq->rc = -EIO;
rc = RAW3270_IO_DONE;
} else if (irb->scsw.cmd.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END |
DEV_STAT_UNIT_EXCEP)) {
/* Handle CE-DE-UE and subsequent UDE */
set_bit(RAW3270_FLAGS_BUSY, &rp->flags);
rc = RAW3270_IO_BUSY;
} else if (test_bit(RAW3270_FLAGS_BUSY, &rp->flags)) {
/* Wait for UDE if busy flag is set. */
if (irb->scsw.cmd.dstat & DEV_STAT_DEV_END) {
clear_bit(RAW3270_FLAGS_BUSY, &rp->flags);
/* Got it, now retry. */
rc = RAW3270_IO_RETRY;
} else
rc = RAW3270_IO_BUSY;
} else if (view)
rc = view->fn->intv(view, rq, irb);
else
rc = RAW3270_IO_DONE;
switch (rc) {
case RAW3270_IO_DONE:
break;
case RAW3270_IO_BUSY:
/*
* Intervention required by the operator. We have to wait
* for unsolicited device end.
*/
return;
case RAW3270_IO_RETRY:
if (!rq)
break;
rq->rc = ccw_device_start(rp->cdev, &rq->ccw,
(unsigned long) rq, 0, 0);
if (rq->rc == 0)
return; /* Successfully restarted. */
break;
case RAW3270_IO_STOP:
if (!rq)
break;
raw3270_halt_io_nolock(rp, rq);
rq->rc = -EIO;
break;
default:
BUG();
}
if (rq) {
BUG_ON(list_empty(&rq->list));
/* The request completed, remove from queue and do callback. */
list_del_init(&rq->list);
if (rq->callback)
rq->callback(rq, rq->callback_data);
/* Do put_device for get_device in raw3270_start. */
raw3270_put_view(view);
}
/*
* Try to start each request on request queue until one is
* started successful.
*/
while (!list_empty(&rp->req_queue)) {
rq = list_entry(rp->req_queue.next,struct raw3270_request,list);
rq->rc = ccw_device_start(rp->cdev, &rq->ccw,
(unsigned long) rq, 0, 0);
if (rq->rc == 0)
break;
/* Start failed. Remove request and do callback. */
list_del_init(&rq->list);
if (rq->callback)
rq->callback(rq, rq->callback_data);
/* Do put_device for get_device in raw3270_start. */
raw3270_put_view(view);
}
}
/*
* Size sensing.
*/
struct raw3270_ua { /* Query Reply structure for Usable Area */
struct { /* Usable Area Query Reply Base */
short l; /* Length of this structured field */
char sfid; /* 0x81 if Query Reply */
char qcode; /* 0x81 if Usable Area */
char flags0;
char flags1;
short w; /* Width of usable area */
short h; /* Heigth of usavle area */
char units; /* 0x00:in; 0x01:mm */
int xr;
int yr;
char aw;
char ah;
short buffsz; /* Character buffer size, bytes */
char xmin;
char ymin;
char xmax;
char ymax;
} __attribute__ ((packed)) uab;
struct { /* Alternate Usable Area Self-Defining Parameter */
char l; /* Length of this Self-Defining Parm */
char sdpid; /* 0x02 if Alternate Usable Area */
char res;
char auaid; /* 0x01 is Id for the A U A */
short wauai; /* Width of AUAi */
short hauai; /* Height of AUAi */
char auaunits; /* 0x00:in, 0x01:mm */
int auaxr;
int auayr;
char awauai;
char ahauai;
} __attribute__ ((packed)) aua;
} __attribute__ ((packed));
static struct diag210 raw3270_init_diag210;
static DEFINE_MUTEX(raw3270_init_mutex);
static int
raw3270_init_irq(struct raw3270_view *view, struct raw3270_request *rq,
struct irb *irb)
{
/*
* Unit-Check Processing:
* Expect Command Reject or Intervention Required.
*/
if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) {
/* Request finished abnormally. */
if (irb->ecw[0] & SNS0_INTERVENTION_REQ) {
set_bit(RAW3270_FLAGS_BUSY, &view->dev->flags);
return RAW3270_IO_BUSY;
}
}
if (rq) {
if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) {
if (irb->ecw[0] & SNS0_CMD_REJECT)
rq->rc = -EOPNOTSUPP;
else
rq->rc = -EIO;
} else
/* Request finished normally. Copy residual count. */
rq->rescnt = irb->scsw.cmd.count;
}
if (irb->scsw.cmd.dstat & DEV_STAT_ATTENTION) {
set_bit(RAW3270_FLAGS_ATTN, &view->dev->flags);
wake_up(&raw3270_wait_queue);
}
return RAW3270_IO_DONE;
}
static struct raw3270_fn raw3270_init_fn = {
.intv = raw3270_init_irq
};
static struct raw3270_view raw3270_init_view = {
.fn = &raw3270_init_fn
};
/*
* raw3270_wait/raw3270_wait_interruptible/__raw3270_wakeup
* Wait for end of request. The request must have been started
* with raw3270_start, rc = 0. The device lock may NOT have been
* released between calling raw3270_start and raw3270_wait.
*/
static void
raw3270_wake_init(struct raw3270_request *rq, void *data)
{
wake_up((wait_queue_head_t *) data);
}
/*
* Special wait function that can cope with console initialization.
*/
static int
raw3270_start_init(struct raw3270 *rp, struct raw3270_view *view,
struct raw3270_request *rq)
{
unsigned long flags;
int rc;
#ifdef CONFIG_TN3270_CONSOLE
if (raw3270_registered == 0) {
spin_lock_irqsave(get_ccwdev_lock(view->dev->cdev), flags);
rq->callback = NULL;
rc = __raw3270_start(rp, view, rq);
if (rc == 0)
while (!raw3270_request_final(rq)) {
wait_cons_dev();
barrier();
}
spin_unlock_irqrestore(get_ccwdev_lock(view->dev->cdev), flags);
return rq->rc;
}
#endif
rq->callback = raw3270_wake_init;
rq->callback_data = &raw3270_wait_queue;
spin_lock_irqsave(get_ccwdev_lock(view->dev->cdev), flags);
rc = __raw3270_start(rp, view, rq);
spin_unlock_irqrestore(get_ccwdev_lock(view->dev->cdev), flags);
if (rc)
return rc;
/* Now wait for the completion. */
rc = wait_event_interruptible(raw3270_wait_queue,
raw3270_request_final(rq));
if (rc == -ERESTARTSYS) { /* Interrupted by a signal. */
raw3270_halt_io(view->dev, rq);
/* No wait for the halt to complete. */
wait_event(raw3270_wait_queue, raw3270_request_final(rq));
return -ERESTARTSYS;
}
return rq->rc;
}
static int
__raw3270_size_device_vm(struct raw3270 *rp)
{
int rc, model;
struct ccw_dev_id dev_id;
ccw_device_get_id(rp->cdev, &dev_id);
raw3270_init_diag210.vrdcdvno = dev_id.devno;
raw3270_init_diag210.vrdclen = sizeof(struct diag210);
rc = diag210(&raw3270_init_diag210);
if (rc)
return rc;
model = raw3270_init_diag210.vrdccrmd;
switch (model) {
case 2:
rp->model = model;
rp->rows = 24;
rp->cols = 80;
break;
case 3:
rp->model = model;
rp->rows = 32;
rp->cols = 80;
break;
case 4:
rp->model = model;
rp->rows = 43;
rp->cols = 80;
break;
case 5:
rp->model = model;
rp->rows = 27;
rp->cols = 132;
break;
default:
rc = -EOPNOTSUPP;
break;
}
return rc;
}
static int
__raw3270_size_device(struct raw3270 *rp)
{
static const unsigned char wbuf[] =
{ 0x00, 0x07, 0x01, 0xff, 0x03, 0x00, 0x81 };
struct raw3270_ua *uap;
int rc;
/*
* To determine the size of the 3270 device we need to do:
* 1) send a 'read partition' data stream to the device
* 2) wait for the attn interrupt that precedes the query reply
* 3) do a read modified to get the query reply
* To make things worse we have to cope with intervention
* required (3270 device switched to 'stand-by') and command
* rejects (old devices that can't do 'read partition').
*/
memset(&rp->init_request, 0, sizeof(rp->init_request));
memset(&rp->init_data, 0, 256);
/* Store 'read partition' data stream to init_data */
memcpy(&rp->init_data, wbuf, sizeof(wbuf));
INIT_LIST_HEAD(&rp->init_request.list);
rp->init_request.ccw.cmd_code = TC_WRITESF;
rp->init_request.ccw.flags = CCW_FLAG_SLI;
rp->init_request.ccw.count = sizeof(wbuf);
rp->init_request.ccw.cda = (__u32) __pa(&rp->init_data);
rc = raw3270_start_init(rp, &raw3270_init_view, &rp->init_request);
if (rc)
/* Check error cases: -ERESTARTSYS, -EIO and -EOPNOTSUPP */
return rc;
/* Wait for attention interrupt. */
#ifdef CONFIG_TN3270_CONSOLE
if (raw3270_registered == 0) {
unsigned long flags;
spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
while (!test_and_clear_bit(RAW3270_FLAGS_ATTN, &rp->flags))
wait_cons_dev();
spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
} else
#endif
rc = wait_event_interruptible(raw3270_wait_queue,
test_and_clear_bit(RAW3270_FLAGS_ATTN, &rp->flags));
if (rc)
return rc;
/*
* The device accepted the 'read partition' command. Now
* set up a read ccw and issue it.
*/
rp->init_request.ccw.cmd_code = TC_READMOD;
rp->init_request.ccw.flags = CCW_FLAG_SLI;
rp->init_request.ccw.count = sizeof(rp->init_data);
rp->init_request.ccw.cda = (__u32) __pa(rp->init_data);
rc = raw3270_start_init(rp, &raw3270_init_view, &rp->init_request);
if (rc)
return rc;
/* Got a Query Reply */
uap = (struct raw3270_ua *) (rp->init_data + 1);
/* Paranoia check. */
if (rp->init_data[0] != 0x88 || uap->uab.qcode != 0x81)
return -EOPNOTSUPP;
/* Copy rows/columns of default Usable Area */
rp->rows = uap->uab.h;
rp->cols = uap->uab.w;
/* Check for 14 bit addressing */
if ((uap->uab.flags0 & 0x0d) == 0x01)
set_bit(RAW3270_FLAGS_14BITADDR, &rp->flags);
/* Check for Alternate Usable Area */
if (uap->uab.l == sizeof(struct raw3270_ua) &&
uap->aua.sdpid == 0x02) {
rp->rows = uap->aua.hauai;
rp->cols = uap->aua.wauai;
}
return 0;
}
static int
raw3270_size_device(struct raw3270 *rp)
{
int rc;
mutex_lock(&raw3270_init_mutex);
rp->view = &raw3270_init_view;
raw3270_init_view.dev = rp;
if (MACHINE_IS_VM)
rc = __raw3270_size_device_vm(rp);
else
rc = __raw3270_size_device(rp);
raw3270_init_view.dev = NULL;
rp->view = NULL;
mutex_unlock(&raw3270_init_mutex);
if (rc == 0) { /* Found something. */
/* Try to find a model. */
rp->model = 0;
if (rp->rows == 24 && rp->cols == 80)
rp->model = 2;
if (rp->rows == 32 && rp->cols == 80)
rp->model = 3;
if (rp->rows == 43 && rp->cols == 80)
rp->model = 4;
if (rp->rows == 27 && rp->cols == 132)
rp->model = 5;
} else {
/* Couldn't detect size. Use default model 2. */
rp->model = 2;
rp->rows = 24;
rp->cols = 80;
return 0;
}
return rc;
}
static int
raw3270_reset_device(struct raw3270 *rp)
{
int rc;
mutex_lock(&raw3270_init_mutex);
memset(&rp->init_request, 0, sizeof(rp->init_request));
memset(&rp->init_data, 0, sizeof(rp->init_data));
/* Store reset data stream to init_data/init_request */
rp->init_data[0] = TW_KR;
INIT_LIST_HEAD(&rp->init_request.list);
rp->init_request.ccw.cmd_code = TC_EWRITEA;
rp->init_request.ccw.flags = CCW_FLAG_SLI;
rp->init_request.ccw.count = 1;
rp->init_request.ccw.cda = (__u32) __pa(rp->init_data);
rp->view = &raw3270_init_view;
raw3270_init_view.dev = rp;
rc = raw3270_start_init(rp, &raw3270_init_view, &rp->init_request);
raw3270_init_view.dev = NULL;
rp->view = NULL;
mutex_unlock(&raw3270_init_mutex);
return rc;
}
int
raw3270_reset(struct raw3270_view *view)
{
struct raw3270 *rp;
int rc;
rp = view->dev;
if (!rp || rp->view != view ||
test_bit(RAW3270_FLAGS_FROZEN, &rp->flags))
rc = -EACCES;
else if (!test_bit(RAW3270_FLAGS_READY, &rp->flags))
rc = -ENODEV;
else
rc = raw3270_reset_device(view->dev);
return rc;
}
/*
* Setup new 3270 device.
*/
static int
raw3270_setup_device(struct ccw_device *cdev, struct raw3270 *rp, char *ascebc)
{
struct list_head *l;
struct raw3270 *tmp;
int minor;
memset(rp, 0, sizeof(struct raw3270));
/* Copy ebcdic -> ascii translation table. */
memcpy(ascebc, _ascebc, 256);
if (tubxcorrect) {
/* correct brackets and circumflex */
ascebc['['] = 0xad;
ascebc[']'] = 0xbd;
ascebc['^'] = 0xb0;
}
rp->ascebc = ascebc;
/* Set defaults. */
rp->rows = 24;
rp->cols = 80;
INIT_LIST_HEAD(&rp->req_queue);
INIT_LIST_HEAD(&rp->view_list);
/*
* Add device to list and find the smallest unused minor
* number for it. Note: there is no device with minor 0,
* see special case for fs3270.c:fs3270_open().
*/
mutex_lock(&raw3270_mutex);
/* Keep the list sorted. */
minor = RAW3270_FIRSTMINOR;
rp->minor = -1;
list_for_each(l, &raw3270_devices) {
tmp = list_entry(l, struct raw3270, list);
if (tmp->minor > minor) {
rp->minor = minor;
__list_add(&rp->list, l->prev, l);
break;
}
minor++;
}
if (rp->minor == -1 && minor < RAW3270_MAXDEVS + RAW3270_FIRSTMINOR) {
rp->minor = minor;
list_add_tail(&rp->list, &raw3270_devices);
}
mutex_unlock(&raw3270_mutex);
/* No free minor number? Then give up. */
if (rp->minor == -1)
return -EUSERS;
rp->cdev = cdev;
dev_set_drvdata(&cdev->dev, rp);
cdev->handler = raw3270_irq;
return 0;
}
#ifdef CONFIG_TN3270_CONSOLE
/*
* Setup 3270 device configured as console.
*/
struct raw3270 __init *raw3270_setup_console(struct ccw_device *cdev)
{
struct raw3270 *rp;
char *ascebc;
int rc;
rp = kzalloc(sizeof(struct raw3270), GFP_KERNEL | GFP_DMA);
ascebc = kzalloc(256, GFP_KERNEL);
rc = raw3270_setup_device(cdev, rp, ascebc);
if (rc)
return ERR_PTR(rc);
set_bit(RAW3270_FLAGS_CONSOLE, &rp->flags);
rc = raw3270_reset_device(rp);
if (rc)
return ERR_PTR(rc);
rc = raw3270_size_device(rp);
if (rc)
return ERR_PTR(rc);
rc = raw3270_reset_device(rp);
if (rc)
return ERR_PTR(rc);
set_bit(RAW3270_FLAGS_READY, &rp->flags);
return rp;
}
void
raw3270_wait_cons_dev(struct raw3270 *rp)
{
unsigned long flags;
spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
wait_cons_dev();
spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
}
#endif
/*
* Create a 3270 device structure.
*/
static struct raw3270 *
raw3270_create_device(struct ccw_device *cdev)
{
struct raw3270 *rp;
char *ascebc;
int rc;
rp = kmalloc(sizeof(struct raw3270), GFP_KERNEL | GFP_DMA);
if (!rp)
return ERR_PTR(-ENOMEM);
ascebc = kmalloc(256, GFP_KERNEL);
if (!ascebc) {
kfree(rp);
return ERR_PTR(-ENOMEM);
}
rc = raw3270_setup_device(cdev, rp, ascebc);
if (rc) {
kfree(rp->ascebc);
kfree(rp);
rp = ERR_PTR(rc);
}
/* Get reference to ccw_device structure. */
get_device(&cdev->dev);
return rp;
}
/*
* Activate a view.
*/
int
raw3270_activate_view(struct raw3270_view *view)
{
struct raw3270 *rp;
struct raw3270_view *oldview, *nv;
unsigned long flags;
int rc;
rp = view->dev;
if (!rp)
return -ENODEV;
spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
if (rp->view == view)
rc = 0;
else if (!test_bit(RAW3270_FLAGS_READY, &rp->flags))
rc = -ENODEV;
else if (test_bit(RAW3270_FLAGS_FROZEN, &rp->flags))
rc = -EACCES;
else {
oldview = NULL;
if (rp->view) {
oldview = rp->view;
oldview->fn->deactivate(oldview);
}
rp->view = view;
rc = view->fn->activate(view);
if (rc) {
/* Didn't work. Try to reactivate the old view. */
rp->view = oldview;
if (!oldview || oldview->fn->activate(oldview) != 0) {
/* Didn't work as well. Try any other view. */
list_for_each_entry(nv, &rp->view_list, list)
if (nv != view && nv != oldview) {
rp->view = nv;
if (nv->fn->activate(nv) == 0)
break;
rp->view = NULL;
}
}
}
}
spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
return rc;
}
/*
* Deactivate current view.
*/
void
raw3270_deactivate_view(struct raw3270_view *view)
{
unsigned long flags;
struct raw3270 *rp;
rp = view->dev;
if (!rp)
return;
spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
if (rp->view == view) {
view->fn->deactivate(view);
rp->view = NULL;
/* Move deactivated view to end of list. */
list_del_init(&view->list);
list_add_tail(&view->list, &rp->view_list);
/* Try to activate another view. */
if (test_bit(RAW3270_FLAGS_READY, &rp->flags) &&
!test_bit(RAW3270_FLAGS_FROZEN, &rp->flags)) {
list_for_each_entry(view, &rp->view_list, list) {
rp->view = view;
if (view->fn->activate(view) == 0)
break;
rp->view = NULL;
}
}
}
spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
}
/*
* Add view to device with minor "minor".
*/
int
raw3270_add_view(struct raw3270_view *view, struct raw3270_fn *fn, int minor)
{
unsigned long flags;
struct raw3270 *rp;
int rc;
if (minor <= 0)
return -ENODEV;
mutex_lock(&raw3270_mutex);
rc = -ENODEV;
list_for_each_entry(rp, &raw3270_devices, list) {
if (rp->minor != minor)
continue;
spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
if (test_bit(RAW3270_FLAGS_READY, &rp->flags)) {
atomic_set(&view->ref_count, 2);
view->dev = rp;
view->fn = fn;
view->model = rp->model;
view->rows = rp->rows;
view->cols = rp->cols;
view->ascebc = rp->ascebc;
spin_lock_init(&view->lock);
list_add(&view->list, &rp->view_list);
rc = 0;
}
spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
break;
}
mutex_unlock(&raw3270_mutex);
return rc;
}
/*
* Find specific view of device with minor "minor".
*/
struct raw3270_view *
raw3270_find_view(struct raw3270_fn *fn, int minor)
{
struct raw3270 *rp;
struct raw3270_view *view, *tmp;
unsigned long flags;
mutex_lock(&raw3270_mutex);
view = ERR_PTR(-ENODEV);
list_for_each_entry(rp, &raw3270_devices, list) {
if (rp->minor != minor)
continue;
spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
if (test_bit(RAW3270_FLAGS_READY, &rp->flags)) {
view = ERR_PTR(-ENOENT);
list_for_each_entry(tmp, &rp->view_list, list) {
if (tmp->fn == fn) {
raw3270_get_view(tmp);
view = tmp;
break;
}
}
}
spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
break;
}
mutex_unlock(&raw3270_mutex);
return view;
}
/*
* Remove view from device and free view structure via call to view->fn->free.
*/
void
raw3270_del_view(struct raw3270_view *view)
{
unsigned long flags;
struct raw3270 *rp;
struct raw3270_view *nv;
rp = view->dev;
spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
if (rp->view == view) {
view->fn->deactivate(view);
rp->view = NULL;
}
list_del_init(&view->list);
if (!rp->view && test_bit(RAW3270_FLAGS_READY, &rp->flags) &&
!test_bit(RAW3270_FLAGS_FROZEN, &rp->flags)) {
/* Try to activate another view. */
list_for_each_entry(nv, &rp->view_list, list) {
if (nv->fn->activate(nv) == 0) {
rp->view = nv;
break;
}
}
}
spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
/* Wait for reference counter to drop to zero. */
atomic_dec(&view->ref_count);
wait_event(raw3270_wait_queue, atomic_read(&view->ref_count) == 0);
if (view->fn->free)
view->fn->free(view);
}
/*
* Remove a 3270 device structure.
*/
static void
raw3270_delete_device(struct raw3270 *rp)
{
struct ccw_device *cdev;
/* Remove from device chain. */
mutex_lock(&raw3270_mutex);
if (rp->clttydev && !IS_ERR(rp->clttydev))
device_destroy(class3270, MKDEV(IBM_TTY3270_MAJOR, rp->minor));
if (rp->cltubdev && !IS_ERR(rp->cltubdev))
device_destroy(class3270, MKDEV(IBM_FS3270_MAJOR, rp->minor));
list_del_init(&rp->list);
mutex_unlock(&raw3270_mutex);
/* Disconnect from ccw_device. */
cdev = rp->cdev;
rp->cdev = NULL;
dev_set_drvdata(&cdev->dev, NULL);
cdev->handler = NULL;
/* Put ccw_device structure. */
put_device(&cdev->dev);
/* Now free raw3270 structure. */
kfree(rp->ascebc);
kfree(rp);
}
static int
raw3270_probe (struct ccw_device *cdev)
{
return 0;
}
/*
* Additional attributes for a 3270 device
*/
static ssize_t
raw3270_model_show(struct device *dev, struct device_attribute *attr, char *buf)
{
return snprintf(buf, PAGE_SIZE, "%i\n",
((struct raw3270 *) dev_get_drvdata(dev))->model);
}
static DEVICE_ATTR(model, 0444, raw3270_model_show, NULL);
static ssize_t
raw3270_rows_show(struct device *dev, struct device_attribute *attr, char *buf)
{
return snprintf(buf, PAGE_SIZE, "%i\n",
((struct raw3270 *) dev_get_drvdata(dev))->rows);
}
static DEVICE_ATTR(rows, 0444, raw3270_rows_show, NULL);
static ssize_t
raw3270_columns_show(struct device *dev, struct device_attribute *attr, char *buf)
{
return snprintf(buf, PAGE_SIZE, "%i\n",
((struct raw3270 *) dev_get_drvdata(dev))->cols);
}
static DEVICE_ATTR(columns, 0444, raw3270_columns_show, NULL);
static struct attribute * raw3270_attrs[] = {
&dev_attr_model.attr,
&dev_attr_rows.attr,
&dev_attr_columns.attr,
NULL,
};
static struct attribute_group raw3270_attr_group = {
.attrs = raw3270_attrs,
};
static int raw3270_create_attributes(struct raw3270 *rp)
{
int rc;
rc = sysfs_create_group(&rp->cdev->dev.kobj, &raw3270_attr_group);
if (rc)
goto out;
rp->clttydev = device_create(class3270, &rp->cdev->dev,
MKDEV(IBM_TTY3270_MAJOR, rp->minor), NULL,
"tty%s", dev_name(&rp->cdev->dev));
if (IS_ERR(rp->clttydev)) {
rc = PTR_ERR(rp->clttydev);
goto out_ttydev;
}
rp->cltubdev = device_create(class3270, &rp->cdev->dev,
MKDEV(IBM_FS3270_MAJOR, rp->minor), NULL,
"tub%s", dev_name(&rp->cdev->dev));
if (!IS_ERR(rp->cltubdev))
goto out;
rc = PTR_ERR(rp->cltubdev);
device_destroy(class3270, MKDEV(IBM_TTY3270_MAJOR, rp->minor));
out_ttydev:
sysfs_remove_group(&rp->cdev->dev.kobj, &raw3270_attr_group);
out:
return rc;
}
/*
* Notifier for device addition/removal
*/
struct raw3270_notifier {
struct list_head list;
void (*notifier)(int, int);
};
static LIST_HEAD(raw3270_notifier);
int raw3270_register_notifier(void (*notifier)(int, int))
{
struct raw3270_notifier *np;
struct raw3270 *rp;
np = kmalloc(sizeof(struct raw3270_notifier), GFP_KERNEL);
if (!np)
return -ENOMEM;
np->notifier = notifier;
mutex_lock(&raw3270_mutex);
list_add_tail(&np->list, &raw3270_notifier);
list_for_each_entry(rp, &raw3270_devices, list) {
get_device(&rp->cdev->dev);
notifier(rp->minor, 1);
}
mutex_unlock(&raw3270_mutex);
return 0;
}
void raw3270_unregister_notifier(void (*notifier)(int, int))
{
struct raw3270_notifier *np;
mutex_lock(&raw3270_mutex);
list_for_each_entry(np, &raw3270_notifier, list)
if (np->notifier == notifier) {
list_del(&np->list);
kfree(np);
break;
}
mutex_unlock(&raw3270_mutex);
}
/*
* Set 3270 device online.
*/
static int
raw3270_set_online (struct ccw_device *cdev)
{
struct raw3270 *rp;
struct raw3270_notifier *np;
int rc;
rp = raw3270_create_device(cdev);
if (IS_ERR(rp))
return PTR_ERR(rp);
rc = raw3270_reset_device(rp);
if (rc)
goto failure;
rc = raw3270_size_device(rp);
if (rc)
goto failure;
rc = raw3270_reset_device(rp);
if (rc)
goto failure;
rc = raw3270_create_attributes(rp);
if (rc)
goto failure;
set_bit(RAW3270_FLAGS_READY, &rp->flags);
mutex_lock(&raw3270_mutex);
list_for_each_entry(np, &raw3270_notifier, list)
np->notifier(rp->minor, 1);
mutex_unlock(&raw3270_mutex);
return 0;
failure:
raw3270_delete_device(rp);
return rc;
}
/*
* Remove 3270 device structure.
*/
static void
raw3270_remove (struct ccw_device *cdev)
{
unsigned long flags;
struct raw3270 *rp;
struct raw3270_view *v;
struct raw3270_notifier *np;
rp = dev_get_drvdata(&cdev->dev);
/*
* _remove is the opposite of _probe; it's probe that
* should set up rp. raw3270_remove gets entered for
* devices even if they haven't been varied online.
* Thus, rp may validly be NULL here.
*/
if (rp == NULL)
return;
clear_bit(RAW3270_FLAGS_READY, &rp->flags);
sysfs_remove_group(&cdev->dev.kobj, &raw3270_attr_group);
/* Deactivate current view and remove all views. */
spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
if (rp->view) {
rp->view->fn->deactivate(rp->view);
rp->view = NULL;
}
while (!list_empty(&rp->view_list)) {
v = list_entry(rp->view_list.next, struct raw3270_view, list);
if (v->fn->release)
v->fn->release(v);
spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
raw3270_del_view(v);
spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
}
spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
mutex_lock(&raw3270_mutex);
list_for_each_entry(np, &raw3270_notifier, list)
np->notifier(rp->minor, 0);
mutex_unlock(&raw3270_mutex);
/* Reset 3270 device. */
raw3270_reset_device(rp);
/* And finally remove it. */
raw3270_delete_device(rp);
}
/*
* Set 3270 device offline.
*/
static int
raw3270_set_offline (struct ccw_device *cdev)
{
struct raw3270 *rp;
rp = dev_get_drvdata(&cdev->dev);
if (test_bit(RAW3270_FLAGS_CONSOLE, &rp->flags))
return -EBUSY;
raw3270_remove(cdev);
return 0;
}
static int raw3270_pm_stop(struct ccw_device *cdev)
{
struct raw3270 *rp;
struct raw3270_view *view;
unsigned long flags;
rp = dev_get_drvdata(&cdev->dev);
if (!rp)
return 0;
spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
if (rp->view)
rp->view->fn->deactivate(rp->view);
if (!test_bit(RAW3270_FLAGS_CONSOLE, &rp->flags)) {
/*
* Release tty and fullscreen for all non-console
* devices.
*/
list_for_each_entry(view, &rp->view_list, list) {
if (view->fn->release)
view->fn->release(view);
}
}
set_bit(RAW3270_FLAGS_FROZEN, &rp->flags);
spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
return 0;
}
static int raw3270_pm_start(struct ccw_device *cdev)
{
struct raw3270 *rp;
unsigned long flags;
rp = dev_get_drvdata(&cdev->dev);
if (!rp)
return 0;
spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
clear_bit(RAW3270_FLAGS_FROZEN, &rp->flags);
if (rp->view)
rp->view->fn->activate(rp->view);
spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
return 0;
}
void raw3270_pm_unfreeze(struct raw3270_view *view)
{
#ifdef CONFIG_TN3270_CONSOLE
struct raw3270 *rp;
rp = view->dev;
if (rp && test_bit(RAW3270_FLAGS_FROZEN, &rp->flags))
ccw_device_force_console();
#endif
}
static struct ccw_device_id raw3270_id[] = {
{ CCW_DEVICE(0x3270, 0) },
{ CCW_DEVICE(0x3271, 0) },
{ CCW_DEVICE(0x3272, 0) },
{ CCW_DEVICE(0x3273, 0) },
{ CCW_DEVICE(0x3274, 0) },
{ CCW_DEVICE(0x3275, 0) },
{ CCW_DEVICE(0x3276, 0) },
{ CCW_DEVICE(0x3277, 0) },
{ CCW_DEVICE(0x3278, 0) },
{ CCW_DEVICE(0x3279, 0) },
{ CCW_DEVICE(0x3174, 0) },
{ /* end of list */ },
};
static struct ccw_driver raw3270_ccw_driver = {
.driver = {
.name = "3270",
.owner = THIS_MODULE,
},
.ids = raw3270_id,
.probe = &raw3270_probe,
.remove = &raw3270_remove,
.set_online = &raw3270_set_online,
.set_offline = &raw3270_set_offline,
.freeze = &raw3270_pm_stop,
.thaw = &raw3270_pm_start,
.restore = &raw3270_pm_start,
};
static int
raw3270_init(void)
{
struct raw3270 *rp;
int rc;
if (raw3270_registered)
return 0;
raw3270_registered = 1;
rc = ccw_driver_register(&raw3270_ccw_driver);
if (rc == 0) {
/* Create attributes for early (= console) device. */
mutex_lock(&raw3270_mutex);
class3270 = class_create(THIS_MODULE, "3270");
list_for_each_entry(rp, &raw3270_devices, list) {
get_device(&rp->cdev->dev);
raw3270_create_attributes(rp);
}
mutex_unlock(&raw3270_mutex);
}
return rc;
}
static void
raw3270_exit(void)
{
ccw_driver_unregister(&raw3270_ccw_driver);
class_destroy(class3270);
}
MODULE_LICENSE("GPL");
module_init(raw3270_init);
module_exit(raw3270_exit);
EXPORT_SYMBOL(raw3270_request_alloc);
EXPORT_SYMBOL(raw3270_request_free);
EXPORT_SYMBOL(raw3270_request_reset);
EXPORT_SYMBOL(raw3270_request_set_cmd);
EXPORT_SYMBOL(raw3270_request_add_data);
EXPORT_SYMBOL(raw3270_request_set_data);
EXPORT_SYMBOL(raw3270_request_set_idal);
EXPORT_SYMBOL(raw3270_buffer_address);
EXPORT_SYMBOL(raw3270_add_view);
EXPORT_SYMBOL(raw3270_del_view);
EXPORT_SYMBOL(raw3270_find_view);
EXPORT_SYMBOL(raw3270_activate_view);
EXPORT_SYMBOL(raw3270_deactivate_view);
EXPORT_SYMBOL(raw3270_start);
EXPORT_SYMBOL(raw3270_start_locked);
EXPORT_SYMBOL(raw3270_start_irq);
EXPORT_SYMBOL(raw3270_reset);
EXPORT_SYMBOL(raw3270_register_notifier);
EXPORT_SYMBOL(raw3270_unregister_notifier);
EXPORT_SYMBOL(raw3270_wait_queue);
| gpl-2.0 |
M1cha/android_kernel_xiaomi_aries | drivers/atm/solos-pci.c | 2769 | 35129 | /*
* Driver for the Solos PCI ADSL2+ card, designed to support Linux by
* Traverse Technologies -- http://www.traverse.com.au/
* Xrio Limited -- http://www.xrio.com/
*
*
* Copyright © 2008 Traverse Technologies
* Copyright © 2008 Intel Corporation
*
* Authors: Nathan Williams <nathan@traverse.com.au>
* David Woodhouse <dwmw2@infradead.org>
* Treker Chen <treker@xrio.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#define DEBUG
#define VERBOSE_DEBUG
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/ioport.h>
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/atm.h>
#include <linux/atmdev.h>
#include <linux/skbuff.h>
#include <linux/sysfs.h>
#include <linux/device.h>
#include <linux/kobject.h>
#include <linux/firmware.h>
#include <linux/ctype.h>
#include <linux/swab.h>
#include <linux/slab.h>
#define VERSION "0.07"
#define PTAG "solos-pci"
#define CONFIG_RAM_SIZE 128
#define FLAGS_ADDR 0x7C
#define IRQ_EN_ADDR 0x78
#define FPGA_VER 0x74
#define IRQ_CLEAR 0x70
#define WRITE_FLASH 0x6C
#define PORTS 0x68
#define FLASH_BLOCK 0x64
#define FLASH_BUSY 0x60
#define FPGA_MODE 0x5C
#define FLASH_MODE 0x58
#define TX_DMA_ADDR(port) (0x40 + (4 * (port)))
#define RX_DMA_ADDR(port) (0x30 + (4 * (port)))
#define DATA_RAM_SIZE 32768
#define BUF_SIZE 2048
#define OLD_BUF_SIZE 4096 /* For FPGA versions <= 2*/
#define FPGA_PAGE 528 /* FPGA flash page size*/
#define SOLOS_PAGE 512 /* Solos flash page size*/
#define FPGA_BLOCK (FPGA_PAGE * 8) /* FPGA flash block size*/
#define SOLOS_BLOCK (SOLOS_PAGE * 8) /* Solos flash block size*/
#define RX_BUF(card, nr) ((card->buffers) + (nr)*(card->buffer_size)*2)
#define TX_BUF(card, nr) ((card->buffers) + (nr)*(card->buffer_size)*2 + (card->buffer_size))
#define FLASH_BUF ((card->buffers) + 4*(card->buffer_size)*2)
#define RX_DMA_SIZE 2048
#define FPGA_VERSION(a,b) (((a) << 8) + (b))
#define LEGACY_BUFFERS 2
#define DMA_SUPPORTED 4
static int reset = 0;
static int atmdebug = 0;
static int firmware_upgrade = 0;
static int fpga_upgrade = 0;
static int db_firmware_upgrade = 0;
static int db_fpga_upgrade = 0;
struct pkt_hdr {
__le16 size;
__le16 vpi;
__le16 vci;
__le16 type;
};
struct solos_skb_cb {
struct atm_vcc *vcc;
uint32_t dma_addr;
};
#define SKB_CB(skb) ((struct solos_skb_cb *)skb->cb)
#define PKT_DATA 0
#define PKT_COMMAND 1
#define PKT_POPEN 3
#define PKT_PCLOSE 4
#define PKT_STATUS 5
struct solos_card {
void __iomem *config_regs;
void __iomem *buffers;
int nr_ports;
int tx_mask;
struct pci_dev *dev;
struct atm_dev *atmdev[4];
struct tasklet_struct tlet;
spinlock_t tx_lock;
spinlock_t tx_queue_lock;
spinlock_t cli_queue_lock;
spinlock_t param_queue_lock;
struct list_head param_queue;
struct sk_buff_head tx_queue[4];
struct sk_buff_head cli_queue[4];
struct sk_buff *tx_skb[4];
struct sk_buff *rx_skb[4];
wait_queue_head_t param_wq;
wait_queue_head_t fw_wq;
int using_dma;
int fpga_version;
int buffer_size;
};
struct solos_param {
struct list_head list;
pid_t pid;
int port;
struct sk_buff *response;
};
#define SOLOS_CHAN(atmdev) ((int)(unsigned long)(atmdev)->phy_data)
MODULE_AUTHOR("Traverse Technologies <support@traverse.com.au>");
MODULE_DESCRIPTION("Solos PCI driver");
MODULE_VERSION(VERSION);
MODULE_LICENSE("GPL");
MODULE_FIRMWARE("solos-FPGA.bin");
MODULE_FIRMWARE("solos-Firmware.bin");
MODULE_FIRMWARE("solos-db-FPGA.bin");
MODULE_PARM_DESC(reset, "Reset Solos chips on startup");
MODULE_PARM_DESC(atmdebug, "Print ATM data");
MODULE_PARM_DESC(firmware_upgrade, "Initiate Solos firmware upgrade");
MODULE_PARM_DESC(fpga_upgrade, "Initiate FPGA upgrade");
MODULE_PARM_DESC(db_firmware_upgrade, "Initiate daughter board Solos firmware upgrade");
MODULE_PARM_DESC(db_fpga_upgrade, "Initiate daughter board FPGA upgrade");
module_param(reset, int, 0444);
module_param(atmdebug, int, 0644);
module_param(firmware_upgrade, int, 0444);
module_param(fpga_upgrade, int, 0444);
module_param(db_firmware_upgrade, int, 0444);
module_param(db_fpga_upgrade, int, 0444);
static void fpga_queue(struct solos_card *card, int port, struct sk_buff *skb,
struct atm_vcc *vcc);
static uint32_t fpga_tx(struct solos_card *);
static irqreturn_t solos_irq(int irq, void *dev_id);
static struct atm_vcc* find_vcc(struct atm_dev *dev, short vpi, int vci);
static int list_vccs(int vci);
static int atm_init(struct solos_card *, struct device *);
static void atm_remove(struct solos_card *);
static int send_command(struct solos_card *card, int dev, const char *buf, size_t size);
static void solos_bh(unsigned long);
static int print_buffer(struct sk_buff *buf);
static inline void solos_pop(struct atm_vcc *vcc, struct sk_buff *skb)
{
if (vcc->pop)
vcc->pop(vcc, skb);
else
dev_kfree_skb_any(skb);
}
static ssize_t solos_param_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct atm_dev *atmdev = container_of(dev, struct atm_dev, class_dev);
struct solos_card *card = atmdev->dev_data;
struct solos_param prm;
struct sk_buff *skb;
struct pkt_hdr *header;
int buflen;
buflen = strlen(attr->attr.name) + 10;
skb = alloc_skb(sizeof(*header) + buflen, GFP_KERNEL);
if (!skb) {
dev_warn(&card->dev->dev, "Failed to allocate sk_buff in solos_param_show()\n");
return -ENOMEM;
}
header = (void *)skb_put(skb, sizeof(*header));
buflen = snprintf((void *)&header[1], buflen - 1,
"L%05d\n%s\n", current->pid, attr->attr.name);
skb_put(skb, buflen);
header->size = cpu_to_le16(buflen);
header->vpi = cpu_to_le16(0);
header->vci = cpu_to_le16(0);
header->type = cpu_to_le16(PKT_COMMAND);
prm.pid = current->pid;
prm.response = NULL;
prm.port = SOLOS_CHAN(atmdev);
spin_lock_irq(&card->param_queue_lock);
list_add(&prm.list, &card->param_queue);
spin_unlock_irq(&card->param_queue_lock);
fpga_queue(card, prm.port, skb, NULL);
wait_event_timeout(card->param_wq, prm.response, 5 * HZ);
spin_lock_irq(&card->param_queue_lock);
list_del(&prm.list);
spin_unlock_irq(&card->param_queue_lock);
if (!prm.response)
return -EIO;
buflen = prm.response->len;
memcpy(buf, prm.response->data, buflen);
kfree_skb(prm.response);
return buflen;
}
static ssize_t solos_param_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct atm_dev *atmdev = container_of(dev, struct atm_dev, class_dev);
struct solos_card *card = atmdev->dev_data;
struct solos_param prm;
struct sk_buff *skb;
struct pkt_hdr *header;
int buflen;
ssize_t ret;
buflen = strlen(attr->attr.name) + 11 + count;
skb = alloc_skb(sizeof(*header) + buflen, GFP_KERNEL);
if (!skb) {
dev_warn(&card->dev->dev, "Failed to allocate sk_buff in solos_param_store()\n");
return -ENOMEM;
}
header = (void *)skb_put(skb, sizeof(*header));
buflen = snprintf((void *)&header[1], buflen - 1,
"L%05d\n%s\n%s\n", current->pid, attr->attr.name, buf);
skb_put(skb, buflen);
header->size = cpu_to_le16(buflen);
header->vpi = cpu_to_le16(0);
header->vci = cpu_to_le16(0);
header->type = cpu_to_le16(PKT_COMMAND);
prm.pid = current->pid;
prm.response = NULL;
prm.port = SOLOS_CHAN(atmdev);
spin_lock_irq(&card->param_queue_lock);
list_add(&prm.list, &card->param_queue);
spin_unlock_irq(&card->param_queue_lock);
fpga_queue(card, prm.port, skb, NULL);
wait_event_timeout(card->param_wq, prm.response, 5 * HZ);
spin_lock_irq(&card->param_queue_lock);
list_del(&prm.list);
spin_unlock_irq(&card->param_queue_lock);
skb = prm.response;
if (!skb)
return -EIO;
buflen = skb->len;
/* Sometimes it has a newline, sometimes it doesn't. */
if (skb->data[buflen - 1] == '\n')
buflen--;
if (buflen == 2 && !strncmp(skb->data, "OK", 2))
ret = count;
else if (buflen == 5 && !strncmp(skb->data, "ERROR", 5))
ret = -EIO;
else {
/* We know we have enough space allocated for this; we allocated
it ourselves */
skb->data[buflen] = 0;
dev_warn(&card->dev->dev, "Unexpected parameter response: '%s'\n",
skb->data);
ret = -EIO;
}
kfree_skb(skb);
return ret;
}
static char *next_string(struct sk_buff *skb)
{
int i = 0;
char *this = skb->data;
for (i = 0; i < skb->len; i++) {
if (this[i] == '\n') {
this[i] = 0;
skb_pull(skb, i + 1);
return this;
}
if (!isprint(this[i]))
return NULL;
}
return NULL;
}
/*
* Status packet has fields separated by \n, starting with a version number
* for the information therein. Fields are....
*
* packet version
* RxBitRate (version >= 1)
* TxBitRate (version >= 1)
* State (version >= 1)
* LocalSNRMargin (version >= 1)
* LocalLineAttn (version >= 1)
*/
static int process_status(struct solos_card *card, int port, struct sk_buff *skb)
{
char *str, *end, *state_str, *snr, *attn;
int ver, rate_up, rate_down;
if (!card->atmdev[port])
return -ENODEV;
str = next_string(skb);
if (!str)
return -EIO;
ver = simple_strtol(str, NULL, 10);
if (ver < 1) {
dev_warn(&card->dev->dev, "Unexpected status interrupt version %d\n",
ver);
return -EIO;
}
str = next_string(skb);
if (!str)
return -EIO;
if (!strcmp(str, "ERROR")) {
dev_dbg(&card->dev->dev, "Status packet indicated Solos error on port %d (starting up?)\n",
port);
return 0;
}
rate_down = simple_strtol(str, &end, 10);
if (*end)
return -EIO;
str = next_string(skb);
if (!str)
return -EIO;
rate_up = simple_strtol(str, &end, 10);
if (*end)
return -EIO;
state_str = next_string(skb);
if (!state_str)
return -EIO;
/* Anything but 'Showtime' is down */
if (strcmp(state_str, "Showtime")) {
atm_dev_signal_change(card->atmdev[port], ATM_PHY_SIG_LOST);
dev_info(&card->dev->dev, "Port %d: %s\n", port, state_str);
return 0;
}
snr = next_string(skb);
if (!snr)
return -EIO;
attn = next_string(skb);
if (!attn)
return -EIO;
dev_info(&card->dev->dev, "Port %d: %s @%d/%d kb/s%s%s%s%s\n",
port, state_str, rate_down/1000, rate_up/1000,
snr[0]?", SNR ":"", snr, attn[0]?", Attn ":"", attn);
card->atmdev[port]->link_rate = rate_down / 424;
atm_dev_signal_change(card->atmdev[port], ATM_PHY_SIG_FOUND);
return 0;
}
static int process_command(struct solos_card *card, int port, struct sk_buff *skb)
{
struct solos_param *prm;
unsigned long flags;
int cmdpid;
int found = 0;
if (skb->len < 7)
return 0;
if (skb->data[0] != 'L' || !isdigit(skb->data[1]) ||
!isdigit(skb->data[2]) || !isdigit(skb->data[3]) ||
!isdigit(skb->data[4]) || !isdigit(skb->data[5]) ||
skb->data[6] != '\n')
return 0;
cmdpid = simple_strtol(&skb->data[1], NULL, 10);
spin_lock_irqsave(&card->param_queue_lock, flags);
list_for_each_entry(prm, &card->param_queue, list) {
if (prm->port == port && prm->pid == cmdpid) {
prm->response = skb;
skb_pull(skb, 7);
wake_up(&card->param_wq);
found = 1;
break;
}
}
spin_unlock_irqrestore(&card->param_queue_lock, flags);
return found;
}
static ssize_t console_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct atm_dev *atmdev = container_of(dev, struct atm_dev, class_dev);
struct solos_card *card = atmdev->dev_data;
struct sk_buff *skb;
unsigned int len;
spin_lock(&card->cli_queue_lock);
skb = skb_dequeue(&card->cli_queue[SOLOS_CHAN(atmdev)]);
spin_unlock(&card->cli_queue_lock);
if(skb == NULL)
return sprintf(buf, "No data.\n");
len = skb->len;
memcpy(buf, skb->data, len);
dev_dbg(&card->dev->dev, "len: %d\n", len);
kfree_skb(skb);
return len;
}
static int send_command(struct solos_card *card, int dev, const char *buf, size_t size)
{
struct sk_buff *skb;
struct pkt_hdr *header;
if (size > (BUF_SIZE - sizeof(*header))) {
dev_dbg(&card->dev->dev, "Command is too big. Dropping request\n");
return 0;
}
skb = alloc_skb(size + sizeof(*header), GFP_ATOMIC);
if (!skb) {
dev_warn(&card->dev->dev, "Failed to allocate sk_buff in send_command()\n");
return 0;
}
header = (void *)skb_put(skb, sizeof(*header));
header->size = cpu_to_le16(size);
header->vpi = cpu_to_le16(0);
header->vci = cpu_to_le16(0);
header->type = cpu_to_le16(PKT_COMMAND);
memcpy(skb_put(skb, size), buf, size);
fpga_queue(card, dev, skb, NULL);
return 0;
}
static ssize_t console_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct atm_dev *atmdev = container_of(dev, struct atm_dev, class_dev);
struct solos_card *card = atmdev->dev_data;
int err;
err = send_command(card, SOLOS_CHAN(atmdev), buf, count);
return err?:count;
}
static DEVICE_ATTR(console, 0644, console_show, console_store);
#define SOLOS_ATTR_RO(x) static DEVICE_ATTR(x, 0444, solos_param_show, NULL);
#define SOLOS_ATTR_RW(x) static DEVICE_ATTR(x, 0644, solos_param_show, solos_param_store);
#include "solos-attrlist.c"
#undef SOLOS_ATTR_RO
#undef SOLOS_ATTR_RW
#define SOLOS_ATTR_RO(x) &dev_attr_##x.attr,
#define SOLOS_ATTR_RW(x) &dev_attr_##x.attr,
static struct attribute *solos_attrs[] = {
#include "solos-attrlist.c"
NULL
};
static struct attribute_group solos_attr_group = {
.attrs = solos_attrs,
.name = "parameters",
};
static int flash_upgrade(struct solos_card *card, int chip)
{
const struct firmware *fw;
const char *fw_name;
int blocksize = 0;
int numblocks = 0;
int offset;
switch (chip) {
case 0:
fw_name = "solos-FPGA.bin";
blocksize = FPGA_BLOCK;
break;
case 1:
fw_name = "solos-Firmware.bin";
blocksize = SOLOS_BLOCK;
break;
case 2:
if (card->fpga_version > LEGACY_BUFFERS){
fw_name = "solos-db-FPGA.bin";
blocksize = FPGA_BLOCK;
} else {
dev_info(&card->dev->dev, "FPGA version doesn't support"
" daughter board upgrades\n");
return -EPERM;
}
break;
case 3:
if (card->fpga_version > LEGACY_BUFFERS){
fw_name = "solos-Firmware.bin";
blocksize = SOLOS_BLOCK;
} else {
dev_info(&card->dev->dev, "FPGA version doesn't support"
" daughter board upgrades\n");
return -EPERM;
}
break;
default:
return -ENODEV;
}
if (request_firmware(&fw, fw_name, &card->dev->dev))
return -ENOENT;
dev_info(&card->dev->dev, "Flash upgrade starting\n");
numblocks = fw->size / blocksize;
dev_info(&card->dev->dev, "Firmware size: %zd\n", fw->size);
dev_info(&card->dev->dev, "Number of blocks: %d\n", numblocks);
dev_info(&card->dev->dev, "Changing FPGA to Update mode\n");
iowrite32(1, card->config_regs + FPGA_MODE);
(void) ioread32(card->config_regs + FPGA_MODE);
/* Set mode to Chip Erase */
if(chip == 0 || chip == 2)
dev_info(&card->dev->dev, "Set FPGA Flash mode to FPGA Chip Erase\n");
if(chip == 1 || chip == 3)
dev_info(&card->dev->dev, "Set FPGA Flash mode to Solos Chip Erase\n");
iowrite32((chip * 2), card->config_regs + FLASH_MODE);
iowrite32(1, card->config_regs + WRITE_FLASH);
wait_event(card->fw_wq, !ioread32(card->config_regs + FLASH_BUSY));
for (offset = 0; offset < fw->size; offset += blocksize) {
int i;
/* Clear write flag */
iowrite32(0, card->config_regs + WRITE_FLASH);
/* Set mode to Block Write */
/* dev_info(&card->dev->dev, "Set FPGA Flash mode to Block Write\n"); */
iowrite32(((chip * 2) + 1), card->config_regs + FLASH_MODE);
/* Copy block to buffer, swapping each 16 bits */
for(i = 0; i < blocksize; i += 4) {
uint32_t word = swahb32p((uint32_t *)(fw->data + offset + i));
if(card->fpga_version > LEGACY_BUFFERS)
iowrite32(word, FLASH_BUF + i);
else
iowrite32(word, RX_BUF(card, 3) + i);
}
/* Specify block number and then trigger flash write */
iowrite32(offset / blocksize, card->config_regs + FLASH_BLOCK);
iowrite32(1, card->config_regs + WRITE_FLASH);
wait_event(card->fw_wq, !ioread32(card->config_regs + FLASH_BUSY));
}
release_firmware(fw);
iowrite32(0, card->config_regs + WRITE_FLASH);
iowrite32(0, card->config_regs + FPGA_MODE);
iowrite32(0, card->config_regs + FLASH_MODE);
dev_info(&card->dev->dev, "Returning FPGA to Data mode\n");
return 0;
}
static irqreturn_t solos_irq(int irq, void *dev_id)
{
struct solos_card *card = dev_id;
int handled = 1;
iowrite32(0, card->config_regs + IRQ_CLEAR);
/* If we're up and running, just kick the tasklet to process TX/RX */
if (card->atmdev[0])
tasklet_schedule(&card->tlet);
else
wake_up(&card->fw_wq);
return IRQ_RETVAL(handled);
}
void solos_bh(unsigned long card_arg)
{
struct solos_card *card = (void *)card_arg;
uint32_t card_flags;
uint32_t rx_done = 0;
int port;
/*
* Since fpga_tx() is going to need to read the flags under its lock,
* it can return them to us so that we don't have to hit PCI MMIO
* again for the same information
*/
card_flags = fpga_tx(card);
for (port = 0; port < card->nr_ports; port++) {
if (card_flags & (0x10 << port)) {
struct pkt_hdr _hdr, *header;
struct sk_buff *skb;
struct atm_vcc *vcc;
int size;
if (card->using_dma) {
skb = card->rx_skb[port];
card->rx_skb[port] = NULL;
pci_unmap_single(card->dev, SKB_CB(skb)->dma_addr,
RX_DMA_SIZE, PCI_DMA_FROMDEVICE);
header = (void *)skb->data;
size = le16_to_cpu(header->size);
skb_put(skb, size + sizeof(*header));
skb_pull(skb, sizeof(*header));
} else {
header = &_hdr;
rx_done |= 0x10 << port;
memcpy_fromio(header, RX_BUF(card, port), sizeof(*header));
size = le16_to_cpu(header->size);
if (size > (card->buffer_size - sizeof(*header))){
dev_warn(&card->dev->dev, "Invalid buffer size\n");
continue;
}
skb = alloc_skb(size + 1, GFP_ATOMIC);
if (!skb) {
if (net_ratelimit())
dev_warn(&card->dev->dev, "Failed to allocate sk_buff for RX\n");
continue;
}
memcpy_fromio(skb_put(skb, size),
RX_BUF(card, port) + sizeof(*header),
size);
}
if (atmdebug) {
dev_info(&card->dev->dev, "Received: port %d\n", port);
dev_info(&card->dev->dev, "size: %d VPI: %d VCI: %d\n",
size, le16_to_cpu(header->vpi),
le16_to_cpu(header->vci));
print_buffer(skb);
}
switch (le16_to_cpu(header->type)) {
case PKT_DATA:
vcc = find_vcc(card->atmdev[port], le16_to_cpu(header->vpi),
le16_to_cpu(header->vci));
if (!vcc) {
if (net_ratelimit())
dev_warn(&card->dev->dev, "Received packet for unknown VPI.VCI %d.%d on port %d\n",
le16_to_cpu(header->vpi), le16_to_cpu(header->vci),
port);
continue;
}
atm_charge(vcc, skb->truesize);
vcc->push(vcc, skb);
atomic_inc(&vcc->stats->rx);
break;
case PKT_STATUS:
if (process_status(card, port, skb) &&
net_ratelimit()) {
dev_warn(&card->dev->dev, "Bad status packet of %d bytes on port %d:\n", skb->len, port);
print_buffer(skb);
}
dev_kfree_skb_any(skb);
break;
case PKT_COMMAND:
default: /* FIXME: Not really, surely? */
if (process_command(card, port, skb))
break;
spin_lock(&card->cli_queue_lock);
if (skb_queue_len(&card->cli_queue[port]) > 10) {
if (net_ratelimit())
dev_warn(&card->dev->dev, "Dropping console response on port %d\n",
port);
dev_kfree_skb_any(skb);
} else
skb_queue_tail(&card->cli_queue[port], skb);
spin_unlock(&card->cli_queue_lock);
break;
}
}
/* Allocate RX skbs for any ports which need them */
if (card->using_dma && card->atmdev[port] &&
!card->rx_skb[port]) {
struct sk_buff *skb = alloc_skb(RX_DMA_SIZE, GFP_ATOMIC);
if (skb) {
SKB_CB(skb)->dma_addr =
pci_map_single(card->dev, skb->data,
RX_DMA_SIZE, PCI_DMA_FROMDEVICE);
iowrite32(SKB_CB(skb)->dma_addr,
card->config_regs + RX_DMA_ADDR(port));
card->rx_skb[port] = skb;
} else {
if (net_ratelimit())
dev_warn(&card->dev->dev, "Failed to allocate RX skb");
/* We'll have to try again later */
tasklet_schedule(&card->tlet);
}
}
}
if (rx_done)
iowrite32(rx_done, card->config_regs + FLAGS_ADDR);
return;
}
static struct atm_vcc *find_vcc(struct atm_dev *dev, short vpi, int vci)
{
struct hlist_head *head;
struct atm_vcc *vcc = NULL;
struct hlist_node *node;
struct sock *s;
read_lock(&vcc_sklist_lock);
head = &vcc_hash[vci & (VCC_HTABLE_SIZE -1)];
sk_for_each(s, node, head) {
vcc = atm_sk(s);
if (vcc->dev == dev && vcc->vci == vci &&
vcc->vpi == vpi && vcc->qos.rxtp.traffic_class != ATM_NONE &&
test_bit(ATM_VF_READY, &vcc->flags))
goto out;
}
vcc = NULL;
out:
read_unlock(&vcc_sklist_lock);
return vcc;
}
static int list_vccs(int vci)
{
struct hlist_head *head;
struct atm_vcc *vcc;
struct hlist_node *node;
struct sock *s;
int num_found = 0;
int i;
read_lock(&vcc_sklist_lock);
if (vci != 0){
head = &vcc_hash[vci & (VCC_HTABLE_SIZE -1)];
sk_for_each(s, node, head) {
num_found ++;
vcc = atm_sk(s);
printk(KERN_DEBUG "Device: %d Vpi: %d Vci: %d\n",
vcc->dev->number,
vcc->vpi,
vcc->vci);
}
} else {
for(i = 0; i < VCC_HTABLE_SIZE; i++){
head = &vcc_hash[i];
sk_for_each(s, node, head) {
num_found ++;
vcc = atm_sk(s);
printk(KERN_DEBUG "Device: %d Vpi: %d Vci: %d\n",
vcc->dev->number,
vcc->vpi,
vcc->vci);
}
}
}
read_unlock(&vcc_sklist_lock);
return num_found;
}
static int popen(struct atm_vcc *vcc)
{
struct solos_card *card = vcc->dev->dev_data;
struct sk_buff *skb;
struct pkt_hdr *header;
if (vcc->qos.aal != ATM_AAL5) {
dev_warn(&card->dev->dev, "Unsupported ATM type %d\n",
vcc->qos.aal);
return -EINVAL;
}
skb = alloc_skb(sizeof(*header), GFP_ATOMIC);
if (!skb) {
if (net_ratelimit())
dev_warn(&card->dev->dev, "Failed to allocate sk_buff in popen()\n");
return -ENOMEM;
}
header = (void *)skb_put(skb, sizeof(*header));
header->size = cpu_to_le16(0);
header->vpi = cpu_to_le16(vcc->vpi);
header->vci = cpu_to_le16(vcc->vci);
header->type = cpu_to_le16(PKT_POPEN);
fpga_queue(card, SOLOS_CHAN(vcc->dev), skb, NULL);
set_bit(ATM_VF_ADDR, &vcc->flags);
set_bit(ATM_VF_READY, &vcc->flags);
list_vccs(0);
return 0;
}
static void pclose(struct atm_vcc *vcc)
{
struct solos_card *card = vcc->dev->dev_data;
struct sk_buff *skb;
struct pkt_hdr *header;
skb = alloc_skb(sizeof(*header), GFP_ATOMIC);
if (!skb) {
dev_warn(&card->dev->dev, "Failed to allocate sk_buff in pclose()\n");
return;
}
header = (void *)skb_put(skb, sizeof(*header));
header->size = cpu_to_le16(0);
header->vpi = cpu_to_le16(vcc->vpi);
header->vci = cpu_to_le16(vcc->vci);
header->type = cpu_to_le16(PKT_PCLOSE);
fpga_queue(card, SOLOS_CHAN(vcc->dev), skb, NULL);
clear_bit(ATM_VF_ADDR, &vcc->flags);
clear_bit(ATM_VF_READY, &vcc->flags);
/* Hold up vcc_destroy_socket() (our caller) until solos_bh() in the
tasklet has finished processing any incoming packets (and, more to
the point, using the vcc pointer). */
tasklet_unlock_wait(&card->tlet);
return;
}
static int print_buffer(struct sk_buff *buf)
{
int len,i;
char msg[500];
char item[10];
len = buf->len;
for (i = 0; i < len; i++){
if(i % 8 == 0)
sprintf(msg, "%02X: ", i);
sprintf(item,"%02X ",*(buf->data + i));
strcat(msg, item);
if(i % 8 == 7) {
sprintf(item, "\n");
strcat(msg, item);
printk(KERN_DEBUG "%s", msg);
}
}
if (i % 8 != 0) {
sprintf(item, "\n");
strcat(msg, item);
printk(KERN_DEBUG "%s", msg);
}
printk(KERN_DEBUG "\n");
return 0;
}
static void fpga_queue(struct solos_card *card, int port, struct sk_buff *skb,
struct atm_vcc *vcc)
{
int old_len;
unsigned long flags;
SKB_CB(skb)->vcc = vcc;
spin_lock_irqsave(&card->tx_queue_lock, flags);
old_len = skb_queue_len(&card->tx_queue[port]);
skb_queue_tail(&card->tx_queue[port], skb);
if (!old_len)
card->tx_mask |= (1 << port);
spin_unlock_irqrestore(&card->tx_queue_lock, flags);
/* Theoretically we could just schedule the tasklet here, but
that introduces latency we don't want -- it's noticeable */
if (!old_len)
fpga_tx(card);
}
static uint32_t fpga_tx(struct solos_card *card)
{
uint32_t tx_pending, card_flags;
uint32_t tx_started = 0;
struct sk_buff *skb;
struct atm_vcc *vcc;
unsigned char port;
unsigned long flags;
spin_lock_irqsave(&card->tx_lock, flags);
card_flags = ioread32(card->config_regs + FLAGS_ADDR);
/*
* The queue lock is required for _writing_ to tx_mask, but we're
* OK to read it here without locking. The only potential update
* that we could race with is in fpga_queue() where it sets a bit
* for a new port... but it's going to call this function again if
* it's doing that, anyway.
*/
tx_pending = card->tx_mask & ~card_flags;
for (port = 0; tx_pending; tx_pending >>= 1, port++) {
if (tx_pending & 1) {
struct sk_buff *oldskb = card->tx_skb[port];
if (oldskb)
pci_unmap_single(card->dev, SKB_CB(oldskb)->dma_addr,
oldskb->len, PCI_DMA_TODEVICE);
spin_lock(&card->tx_queue_lock);
skb = skb_dequeue(&card->tx_queue[port]);
if (!skb)
card->tx_mask &= ~(1 << port);
spin_unlock(&card->tx_queue_lock);
if (skb && !card->using_dma) {
memcpy_toio(TX_BUF(card, port), skb->data, skb->len);
tx_started |= 1 << port;
oldskb = skb; /* We're done with this skb already */
} else if (skb && card->using_dma) {
SKB_CB(skb)->dma_addr = pci_map_single(card->dev, skb->data,
skb->len, PCI_DMA_TODEVICE);
iowrite32(SKB_CB(skb)->dma_addr,
card->config_regs + TX_DMA_ADDR(port));
}
if (!oldskb)
continue;
/* Clean up and free oldskb now it's gone */
if (atmdebug) {
struct pkt_hdr *header = (void *)oldskb->data;
int size = le16_to_cpu(header->size);
skb_pull(oldskb, sizeof(*header));
dev_info(&card->dev->dev, "Transmitted: port %d\n",
port);
dev_info(&card->dev->dev, "size: %d VPI: %d VCI: %d\n",
size, le16_to_cpu(header->vpi),
le16_to_cpu(header->vci));
print_buffer(oldskb);
}
vcc = SKB_CB(oldskb)->vcc;
if (vcc) {
atomic_inc(&vcc->stats->tx);
solos_pop(vcc, oldskb);
} else
dev_kfree_skb_irq(oldskb);
}
}
/* For non-DMA TX, write the 'TX start' bit for all four ports simultaneously */
if (tx_started)
iowrite32(tx_started, card->config_regs + FLAGS_ADDR);
spin_unlock_irqrestore(&card->tx_lock, flags);
return card_flags;
}
static int psend(struct atm_vcc *vcc, struct sk_buff *skb)
{
struct solos_card *card = vcc->dev->dev_data;
struct pkt_hdr *header;
int pktlen;
pktlen = skb->len;
if (pktlen > (BUF_SIZE - sizeof(*header))) {
dev_warn(&card->dev->dev, "Length of PDU is too large. Dropping PDU.\n");
solos_pop(vcc, skb);
return 0;
}
if (!skb_clone_writable(skb, sizeof(*header))) {
int expand_by = 0;
int ret;
if (skb_headroom(skb) < sizeof(*header))
expand_by = sizeof(*header) - skb_headroom(skb);
ret = pskb_expand_head(skb, expand_by, 0, GFP_ATOMIC);
if (ret) {
dev_warn(&card->dev->dev, "pskb_expand_head failed.\n");
solos_pop(vcc, skb);
return ret;
}
}
header = (void *)skb_push(skb, sizeof(*header));
/* This does _not_ include the size of the header */
header->size = cpu_to_le16(pktlen);
header->vpi = cpu_to_le16(vcc->vpi);
header->vci = cpu_to_le16(vcc->vci);
header->type = cpu_to_le16(PKT_DATA);
fpga_queue(card, SOLOS_CHAN(vcc->dev), skb, vcc);
return 0;
}
static struct atmdev_ops fpga_ops = {
.open = popen,
.close = pclose,
.ioctl = NULL,
.getsockopt = NULL,
.setsockopt = NULL,
.send = psend,
.send_oam = NULL,
.phy_put = NULL,
.phy_get = NULL,
.change_qos = NULL,
.proc_read = NULL,
.owner = THIS_MODULE
};
static int fpga_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
int err;
uint16_t fpga_ver;
uint8_t major_ver, minor_ver;
uint32_t data32;
struct solos_card *card;
card = kzalloc(sizeof(*card), GFP_KERNEL);
if (!card)
return -ENOMEM;
card->dev = dev;
init_waitqueue_head(&card->fw_wq);
init_waitqueue_head(&card->param_wq);
err = pci_enable_device(dev);
if (err) {
dev_warn(&dev->dev, "Failed to enable PCI device\n");
goto out;
}
err = pci_set_dma_mask(dev, DMA_BIT_MASK(32));
if (err) {
dev_warn(&dev->dev, "Failed to set 32-bit DMA mask\n");
goto out;
}
err = pci_request_regions(dev, "solos");
if (err) {
dev_warn(&dev->dev, "Failed to request regions\n");
goto out;
}
card->config_regs = pci_iomap(dev, 0, CONFIG_RAM_SIZE);
if (!card->config_regs) {
dev_warn(&dev->dev, "Failed to ioremap config registers\n");
goto out_release_regions;
}
card->buffers = pci_iomap(dev, 1, DATA_RAM_SIZE);
if (!card->buffers) {
dev_warn(&dev->dev, "Failed to ioremap data buffers\n");
goto out_unmap_config;
}
if (reset) {
iowrite32(1, card->config_regs + FPGA_MODE);
data32 = ioread32(card->config_regs + FPGA_MODE);
iowrite32(0, card->config_regs + FPGA_MODE);
data32 = ioread32(card->config_regs + FPGA_MODE);
}
data32 = ioread32(card->config_regs + FPGA_VER);
fpga_ver = (data32 & 0x0000FFFF);
major_ver = ((data32 & 0xFF000000) >> 24);
minor_ver = ((data32 & 0x00FF0000) >> 16);
card->fpga_version = FPGA_VERSION(major_ver,minor_ver);
if (card->fpga_version > LEGACY_BUFFERS)
card->buffer_size = BUF_SIZE;
else
card->buffer_size = OLD_BUF_SIZE;
dev_info(&dev->dev, "Solos FPGA Version %d.%02d svn-%d\n",
major_ver, minor_ver, fpga_ver);
if (fpga_ver < 37 && (fpga_upgrade || firmware_upgrade ||
db_fpga_upgrade || db_firmware_upgrade)) {
dev_warn(&dev->dev,
"FPGA too old; cannot upgrade flash. Use JTAG.\n");
fpga_upgrade = firmware_upgrade = 0;
db_fpga_upgrade = db_firmware_upgrade = 0;
}
if (card->fpga_version >= DMA_SUPPORTED){
card->using_dma = 1;
} else {
card->using_dma = 0;
/* Set RX empty flag for all ports */
iowrite32(0xF0, card->config_regs + FLAGS_ADDR);
}
data32 = ioread32(card->config_regs + PORTS);
card->nr_ports = (data32 & 0x000000FF);
pci_set_drvdata(dev, card);
tasklet_init(&card->tlet, solos_bh, (unsigned long)card);
spin_lock_init(&card->tx_lock);
spin_lock_init(&card->tx_queue_lock);
spin_lock_init(&card->cli_queue_lock);
spin_lock_init(&card->param_queue_lock);
INIT_LIST_HEAD(&card->param_queue);
err = request_irq(dev->irq, solos_irq, IRQF_SHARED,
"solos-pci", card);
if (err) {
dev_dbg(&card->dev->dev, "Failed to request interrupt IRQ: %d\n", dev->irq);
goto out_unmap_both;
}
iowrite32(1, card->config_regs + IRQ_EN_ADDR);
if (fpga_upgrade)
flash_upgrade(card, 0);
if (firmware_upgrade)
flash_upgrade(card, 1);
if (db_fpga_upgrade)
flash_upgrade(card, 2);
if (db_firmware_upgrade)
flash_upgrade(card, 3);
err = atm_init(card, &dev->dev);
if (err)
goto out_free_irq;
return 0;
out_free_irq:
iowrite32(0, card->config_regs + IRQ_EN_ADDR);
free_irq(dev->irq, card);
tasklet_kill(&card->tlet);
out_unmap_both:
pci_set_drvdata(dev, NULL);
pci_iounmap(dev, card->buffers);
out_unmap_config:
pci_iounmap(dev, card->config_regs);
out_release_regions:
pci_release_regions(dev);
out:
kfree(card);
return err;
}
static int atm_init(struct solos_card *card, struct device *parent)
{
int i;
for (i = 0; i < card->nr_ports; i++) {
struct sk_buff *skb;
struct pkt_hdr *header;
skb_queue_head_init(&card->tx_queue[i]);
skb_queue_head_init(&card->cli_queue[i]);
card->atmdev[i] = atm_dev_register("solos-pci", parent, &fpga_ops, -1, NULL);
if (!card->atmdev[i]) {
dev_err(&card->dev->dev, "Could not register ATM device %d\n", i);
atm_remove(card);
return -ENODEV;
}
if (device_create_file(&card->atmdev[i]->class_dev, &dev_attr_console))
dev_err(&card->dev->dev, "Could not register console for ATM device %d\n", i);
if (sysfs_create_group(&card->atmdev[i]->class_dev.kobj, &solos_attr_group))
dev_err(&card->dev->dev, "Could not register parameter group for ATM device %d\n", i);
dev_info(&card->dev->dev, "Registered ATM device %d\n", card->atmdev[i]->number);
card->atmdev[i]->ci_range.vpi_bits = 8;
card->atmdev[i]->ci_range.vci_bits = 16;
card->atmdev[i]->dev_data = card;
card->atmdev[i]->phy_data = (void *)(unsigned long)i;
atm_dev_signal_change(card->atmdev[i], ATM_PHY_SIG_FOUND);
skb = alloc_skb(sizeof(*header), GFP_ATOMIC);
if (!skb) {
dev_warn(&card->dev->dev, "Failed to allocate sk_buff in atm_init()\n");
continue;
}
header = (void *)skb_put(skb, sizeof(*header));
header->size = cpu_to_le16(0);
header->vpi = cpu_to_le16(0);
header->vci = cpu_to_le16(0);
header->type = cpu_to_le16(PKT_STATUS);
fpga_queue(card, i, skb, NULL);
}
return 0;
}
static void atm_remove(struct solos_card *card)
{
int i;
for (i = 0; i < card->nr_ports; i++) {
if (card->atmdev[i]) {
struct sk_buff *skb;
dev_info(&card->dev->dev, "Unregistering ATM device %d\n", card->atmdev[i]->number);
sysfs_remove_group(&card->atmdev[i]->class_dev.kobj, &solos_attr_group);
atm_dev_deregister(card->atmdev[i]);
skb = card->rx_skb[i];
if (skb) {
pci_unmap_single(card->dev, SKB_CB(skb)->dma_addr,
RX_DMA_SIZE, PCI_DMA_FROMDEVICE);
dev_kfree_skb(skb);
}
skb = card->tx_skb[i];
if (skb) {
pci_unmap_single(card->dev, SKB_CB(skb)->dma_addr,
skb->len, PCI_DMA_TODEVICE);
dev_kfree_skb(skb);
}
while ((skb = skb_dequeue(&card->tx_queue[i])))
dev_kfree_skb(skb);
}
}
}
static void fpga_remove(struct pci_dev *dev)
{
struct solos_card *card = pci_get_drvdata(dev);
/* Disable IRQs */
iowrite32(0, card->config_regs + IRQ_EN_ADDR);
/* Reset FPGA */
iowrite32(1, card->config_regs + FPGA_MODE);
(void)ioread32(card->config_regs + FPGA_MODE);
atm_remove(card);
free_irq(dev->irq, card);
tasklet_kill(&card->tlet);
/* Release device from reset */
iowrite32(0, card->config_regs + FPGA_MODE);
(void)ioread32(card->config_regs + FPGA_MODE);
pci_iounmap(dev, card->buffers);
pci_iounmap(dev, card->config_regs);
pci_release_regions(dev);
pci_disable_device(dev);
pci_set_drvdata(dev, NULL);
kfree(card);
}
static struct pci_device_id fpga_pci_tbl[] __devinitdata = {
{ 0x10ee, 0x0300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ 0, }
};
MODULE_DEVICE_TABLE(pci,fpga_pci_tbl);
static struct pci_driver fpga_driver = {
.name = "solos",
.id_table = fpga_pci_tbl,
.probe = fpga_probe,
.remove = fpga_remove,
};
static int __init solos_pci_init(void)
{
printk(KERN_INFO "Solos PCI Driver Version %s\n", VERSION);
return pci_register_driver(&fpga_driver);
}
static void __exit solos_pci_exit(void)
{
pci_unregister_driver(&fpga_driver);
printk(KERN_INFO "Solos PCI Driver %s Unloaded\n", VERSION);
}
module_init(solos_pci_init);
module_exit(solos_pci_exit);
| gpl-2.0 |
hallovveen31/ICED_COLD_Hercules_JB_Kernel | drivers/platform/x86/intel_mid_powerbtn.c | 3025 | 3343 | /*
* Power button driver for Medfield.
*
* Copyright (C) 2010 Intel Corp
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/platform_device.h>
#include <linux/input.h>
#include <asm/intel_scu_ipc.h>
#define DRIVER_NAME "msic_power_btn"
#define MSIC_PB_STATUS 0x3f
#define MSIC_PB_LEVEL (1 << 3) /* 1 - release, 0 - press */
static irqreturn_t mfld_pb_isr(int irq, void *dev_id)
{
struct input_dev *input = dev_id;
int ret;
u8 pbstat;
ret = intel_scu_ipc_ioread8(MSIC_PB_STATUS, &pbstat);
if (ret < 0) {
dev_err(input->dev.parent, "Read error %d while reading"
" MSIC_PB_STATUS\n", ret);
} else {
input_event(input, EV_KEY, KEY_POWER,
!(pbstat & MSIC_PB_LEVEL));
input_sync(input);
}
return IRQ_HANDLED;
}
static int __devinit mfld_pb_probe(struct platform_device *pdev)
{
struct input_dev *input;
int irq = platform_get_irq(pdev, 0);
int error;
if (irq < 0)
return -EINVAL;
input = input_allocate_device();
if (!input) {
dev_err(&pdev->dev, "Input device allocation error\n");
return -ENOMEM;
}
input->name = pdev->name;
input->phys = "power-button/input0";
input->id.bustype = BUS_HOST;
input->dev.parent = &pdev->dev;
input_set_capability(input, EV_KEY, KEY_POWER);
error = request_threaded_irq(irq, NULL, mfld_pb_isr, 0,
DRIVER_NAME, input);
if (error) {
dev_err(&pdev->dev, "Unable to request irq %d for mfld power"
"button\n", irq);
goto err_free_input;
}
error = input_register_device(input);
if (error) {
dev_err(&pdev->dev, "Unable to register input dev, error "
"%d\n", error);
goto err_free_irq;
}
platform_set_drvdata(pdev, input);
return 0;
err_free_irq:
free_irq(irq, input);
err_free_input:
input_free_device(input);
return error;
}
static int __devexit mfld_pb_remove(struct platform_device *pdev)
{
struct input_dev *input = platform_get_drvdata(pdev);
int irq = platform_get_irq(pdev, 0);
free_irq(irq, input);
input_unregister_device(input);
platform_set_drvdata(pdev, NULL);
return 0;
}
static struct platform_driver mfld_pb_driver = {
.driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
},
.probe = mfld_pb_probe,
.remove = __devexit_p(mfld_pb_remove),
};
static int __init mfld_pb_init(void)
{
return platform_driver_register(&mfld_pb_driver);
}
module_init(mfld_pb_init);
static void __exit mfld_pb_exit(void)
{
platform_driver_unregister(&mfld_pb_driver);
}
module_exit(mfld_pb_exit);
MODULE_AUTHOR("Hong Liu <hong.liu@intel.com>");
MODULE_DESCRIPTION("Intel Medfield Power Button Driver");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:" DRIVER_NAME);
| gpl-2.0 |
VentureROM-L/android_kernel_asus_grouper | drivers/s390/cio/qdio_setup.c | 3025 | 13065 | /*
* driver/s390/cio/qdio_setup.c
*
* qdio queue initialization
*
* Copyright (C) IBM Corp. 2008
* Author(s): Jan Glauber <jang@linux.vnet.ibm.com>
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include <asm/qdio.h>
#include "cio.h"
#include "css.h"
#include "device.h"
#include "ioasm.h"
#include "chsc.h"
#include "qdio.h"
#include "qdio_debug.h"
static struct kmem_cache *qdio_q_cache;
/*
* qebsm is only available under 64bit but the adapter sets the feature
* flag anyway, so we manually override it.
*/
static inline int qebsm_possible(void)
{
#ifdef CONFIG_64BIT
return css_general_characteristics.qebsm;
#endif
return 0;
}
/*
* qib_param_field: pointer to 128 bytes or NULL, if no param field
* nr_input_qs: pointer to nr_queues*128 words of data or NULL
*/
static void set_impl_params(struct qdio_irq *irq_ptr,
unsigned int qib_param_field_format,
unsigned char *qib_param_field,
unsigned long *input_slib_elements,
unsigned long *output_slib_elements)
{
struct qdio_q *q;
int i, j;
if (!irq_ptr)
return;
irq_ptr->qib.pfmt = qib_param_field_format;
if (qib_param_field)
memcpy(irq_ptr->qib.parm, qib_param_field,
QDIO_MAX_BUFFERS_PER_Q);
if (!input_slib_elements)
goto output;
for_each_input_queue(irq_ptr, q, i) {
for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++)
q->slib->slibe[j].parms =
input_slib_elements[i * QDIO_MAX_BUFFERS_PER_Q + j];
}
output:
if (!output_slib_elements)
return;
for_each_output_queue(irq_ptr, q, i) {
for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++)
q->slib->slibe[j].parms =
output_slib_elements[i * QDIO_MAX_BUFFERS_PER_Q + j];
}
}
static int __qdio_allocate_qs(struct qdio_q **irq_ptr_qs, int nr_queues)
{
struct qdio_q *q;
int i;
for (i = 0; i < nr_queues; i++) {
q = kmem_cache_alloc(qdio_q_cache, GFP_KERNEL);
if (!q)
return -ENOMEM;
q->slib = (struct slib *) __get_free_page(GFP_KERNEL);
if (!q->slib) {
kmem_cache_free(qdio_q_cache, q);
return -ENOMEM;
}
irq_ptr_qs[i] = q;
}
return 0;
}
int qdio_allocate_qs(struct qdio_irq *irq_ptr, int nr_input_qs, int nr_output_qs)
{
int rc;
rc = __qdio_allocate_qs(irq_ptr->input_qs, nr_input_qs);
if (rc)
return rc;
rc = __qdio_allocate_qs(irq_ptr->output_qs, nr_output_qs);
return rc;
}
static void setup_queues_misc(struct qdio_q *q, struct qdio_irq *irq_ptr,
qdio_handler_t *handler, int i)
{
struct slib *slib = q->slib;
/* queue must be cleared for qdio_establish */
memset(q, 0, sizeof(*q));
memset(slib, 0, PAGE_SIZE);
q->slib = slib;
q->irq_ptr = irq_ptr;
q->mask = 1 << (31 - i);
q->nr = i;
q->handler = handler;
}
static void setup_storage_lists(struct qdio_q *q, struct qdio_irq *irq_ptr,
void **sbals_array, int i)
{
struct qdio_q *prev;
int j;
DBF_HEX(&q, sizeof(void *));
q->sl = (struct sl *)((char *)q->slib + PAGE_SIZE / 2);
/* fill in sbal */
for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++) {
q->sbal[j] = *sbals_array++;
BUG_ON((unsigned long)q->sbal[j] & 0xff);
}
/* fill in slib */
if (i > 0) {
prev = (q->is_input_q) ? irq_ptr->input_qs[i - 1]
: irq_ptr->output_qs[i - 1];
prev->slib->nsliba = (unsigned long)q->slib;
}
q->slib->sla = (unsigned long)q->sl;
q->slib->slsba = (unsigned long)&q->slsb.val[0];
/* fill in sl */
for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++)
q->sl->element[j].sbal = (unsigned long)q->sbal[j];
}
static void setup_queues(struct qdio_irq *irq_ptr,
struct qdio_initialize *qdio_init)
{
struct qdio_q *q;
void **input_sbal_array = qdio_init->input_sbal_addr_array;
void **output_sbal_array = qdio_init->output_sbal_addr_array;
int i;
for_each_input_queue(irq_ptr, q, i) {
DBF_EVENT("in-q:%1d", i);
setup_queues_misc(q, irq_ptr, qdio_init->input_handler, i);
q->is_input_q = 1;
q->u.in.queue_start_poll = qdio_init->queue_start_poll;
setup_storage_lists(q, irq_ptr, input_sbal_array, i);
input_sbal_array += QDIO_MAX_BUFFERS_PER_Q;
if (is_thinint_irq(irq_ptr))
tasklet_init(&q->tasklet, tiqdio_inbound_processing,
(unsigned long) q);
else
tasklet_init(&q->tasklet, qdio_inbound_processing,
(unsigned long) q);
}
for_each_output_queue(irq_ptr, q, i) {
DBF_EVENT("outq:%1d", i);
setup_queues_misc(q, irq_ptr, qdio_init->output_handler, i);
q->is_input_q = 0;
q->u.out.scan_threshold = qdio_init->scan_threshold;
setup_storage_lists(q, irq_ptr, output_sbal_array, i);
output_sbal_array += QDIO_MAX_BUFFERS_PER_Q;
tasklet_init(&q->tasklet, qdio_outbound_processing,
(unsigned long) q);
setup_timer(&q->u.out.timer, (void(*)(unsigned long))
&qdio_outbound_timer, (unsigned long)q);
}
}
static void process_ac_flags(struct qdio_irq *irq_ptr, unsigned char qdioac)
{
if (qdioac & AC1_SIGA_INPUT_NEEDED)
irq_ptr->siga_flag.input = 1;
if (qdioac & AC1_SIGA_OUTPUT_NEEDED)
irq_ptr->siga_flag.output = 1;
if (qdioac & AC1_SIGA_SYNC_NEEDED)
irq_ptr->siga_flag.sync = 1;
if (!(qdioac & AC1_AUTOMATIC_SYNC_ON_THININT))
irq_ptr->siga_flag.sync_after_ai = 1;
if (!(qdioac & AC1_AUTOMATIC_SYNC_ON_OUT_PCI))
irq_ptr->siga_flag.sync_out_after_pci = 1;
}
static void check_and_setup_qebsm(struct qdio_irq *irq_ptr,
unsigned char qdioac, unsigned long token)
{
if (!(irq_ptr->qib.rflags & QIB_RFLAGS_ENABLE_QEBSM))
goto no_qebsm;
if (!(qdioac & AC1_SC_QEBSM_AVAILABLE) ||
(!(qdioac & AC1_SC_QEBSM_ENABLED)))
goto no_qebsm;
irq_ptr->sch_token = token;
DBF_EVENT("V=V:1");
DBF_EVENT("%8lx", irq_ptr->sch_token);
return;
no_qebsm:
irq_ptr->sch_token = 0;
irq_ptr->qib.rflags &= ~QIB_RFLAGS_ENABLE_QEBSM;
DBF_EVENT("noV=V");
}
/*
* If there is a qdio_irq we use the chsc_page and store the information
* in the qdio_irq, otherwise we copy it to the specified structure.
*/
int qdio_setup_get_ssqd(struct qdio_irq *irq_ptr,
struct subchannel_id *schid,
struct qdio_ssqd_desc *data)
{
struct chsc_ssqd_area *ssqd;
int rc;
DBF_EVENT("getssqd:%4x", schid->sch_no);
if (irq_ptr != NULL)
ssqd = (struct chsc_ssqd_area *)irq_ptr->chsc_page;
else
ssqd = (struct chsc_ssqd_area *)__get_free_page(GFP_KERNEL);
memset(ssqd, 0, PAGE_SIZE);
ssqd->request = (struct chsc_header) {
.length = 0x0010,
.code = 0x0024,
};
ssqd->first_sch = schid->sch_no;
ssqd->last_sch = schid->sch_no;
ssqd->ssid = schid->ssid;
if (chsc(ssqd))
return -EIO;
rc = chsc_error_from_response(ssqd->response.code);
if (rc)
return rc;
if (!(ssqd->qdio_ssqd.flags & CHSC_FLAG_QDIO_CAPABILITY) ||
!(ssqd->qdio_ssqd.flags & CHSC_FLAG_VALIDITY) ||
(ssqd->qdio_ssqd.sch != schid->sch_no))
return -EINVAL;
if (irq_ptr != NULL)
memcpy(&irq_ptr->ssqd_desc, &ssqd->qdio_ssqd,
sizeof(struct qdio_ssqd_desc));
else {
memcpy(data, &ssqd->qdio_ssqd,
sizeof(struct qdio_ssqd_desc));
free_page((unsigned long)ssqd);
}
return 0;
}
void qdio_setup_ssqd_info(struct qdio_irq *irq_ptr)
{
unsigned char qdioac;
int rc;
rc = qdio_setup_get_ssqd(irq_ptr, &irq_ptr->schid, NULL);
if (rc) {
DBF_ERROR("%4x ssqd ERR", irq_ptr->schid.sch_no);
DBF_ERROR("rc:%x", rc);
/* all flags set, worst case */
qdioac = AC1_SIGA_INPUT_NEEDED | AC1_SIGA_OUTPUT_NEEDED |
AC1_SIGA_SYNC_NEEDED;
} else
qdioac = irq_ptr->ssqd_desc.qdioac1;
check_and_setup_qebsm(irq_ptr, qdioac, irq_ptr->ssqd_desc.sch_token);
process_ac_flags(irq_ptr, qdioac);
DBF_EVENT("qdioac:%4x", qdioac);
}
void qdio_release_memory(struct qdio_irq *irq_ptr)
{
struct qdio_q *q;
int i;
/*
* Must check queue array manually since irq_ptr->nr_input_queues /
* irq_ptr->nr_input_queues may not yet be set.
*/
for (i = 0; i < QDIO_MAX_QUEUES_PER_IRQ; i++) {
q = irq_ptr->input_qs[i];
if (q) {
free_page((unsigned long) q->slib);
kmem_cache_free(qdio_q_cache, q);
}
}
for (i = 0; i < QDIO_MAX_QUEUES_PER_IRQ; i++) {
q = irq_ptr->output_qs[i];
if (q) {
free_page((unsigned long) q->slib);
kmem_cache_free(qdio_q_cache, q);
}
}
free_page((unsigned long) irq_ptr->qdr);
free_page(irq_ptr->chsc_page);
free_page((unsigned long) irq_ptr);
}
static void __qdio_allocate_fill_qdr(struct qdio_irq *irq_ptr,
struct qdio_q **irq_ptr_qs,
int i, int nr)
{
irq_ptr->qdr->qdf0[i + nr].sliba =
(unsigned long)irq_ptr_qs[i]->slib;
irq_ptr->qdr->qdf0[i + nr].sla =
(unsigned long)irq_ptr_qs[i]->sl;
irq_ptr->qdr->qdf0[i + nr].slsba =
(unsigned long)&irq_ptr_qs[i]->slsb.val[0];
irq_ptr->qdr->qdf0[i + nr].akey = PAGE_DEFAULT_KEY >> 4;
irq_ptr->qdr->qdf0[i + nr].bkey = PAGE_DEFAULT_KEY >> 4;
irq_ptr->qdr->qdf0[i + nr].ckey = PAGE_DEFAULT_KEY >> 4;
irq_ptr->qdr->qdf0[i + nr].dkey = PAGE_DEFAULT_KEY >> 4;
}
static void setup_qdr(struct qdio_irq *irq_ptr,
struct qdio_initialize *qdio_init)
{
int i;
irq_ptr->qdr->qfmt = qdio_init->q_format;
irq_ptr->qdr->iqdcnt = qdio_init->no_input_qs;
irq_ptr->qdr->oqdcnt = qdio_init->no_output_qs;
irq_ptr->qdr->iqdsz = sizeof(struct qdesfmt0) / 4; /* size in words */
irq_ptr->qdr->oqdsz = sizeof(struct qdesfmt0) / 4;
irq_ptr->qdr->qiba = (unsigned long)&irq_ptr->qib;
irq_ptr->qdr->qkey = PAGE_DEFAULT_KEY >> 4;
for (i = 0; i < qdio_init->no_input_qs; i++)
__qdio_allocate_fill_qdr(irq_ptr, irq_ptr->input_qs, i, 0);
for (i = 0; i < qdio_init->no_output_qs; i++)
__qdio_allocate_fill_qdr(irq_ptr, irq_ptr->output_qs, i,
qdio_init->no_input_qs);
}
static void setup_qib(struct qdio_irq *irq_ptr,
struct qdio_initialize *init_data)
{
if (qebsm_possible())
irq_ptr->qib.rflags |= QIB_RFLAGS_ENABLE_QEBSM;
irq_ptr->qib.rflags |= init_data->qib_rflags;
irq_ptr->qib.qfmt = init_data->q_format;
if (init_data->no_input_qs)
irq_ptr->qib.isliba =
(unsigned long)(irq_ptr->input_qs[0]->slib);
if (init_data->no_output_qs)
irq_ptr->qib.osliba =
(unsigned long)(irq_ptr->output_qs[0]->slib);
memcpy(irq_ptr->qib.ebcnam, init_data->adapter_name, 8);
}
int qdio_setup_irq(struct qdio_initialize *init_data)
{
struct ciw *ciw;
struct qdio_irq *irq_ptr = init_data->cdev->private->qdio_data;
int rc;
memset(&irq_ptr->qib, 0, sizeof(irq_ptr->qib));
memset(&irq_ptr->siga_flag, 0, sizeof(irq_ptr->siga_flag));
memset(&irq_ptr->ccw, 0, sizeof(irq_ptr->ccw));
memset(&irq_ptr->ssqd_desc, 0, sizeof(irq_ptr->ssqd_desc));
memset(&irq_ptr->perf_stat, 0, sizeof(irq_ptr->perf_stat));
irq_ptr->debugfs_dev = irq_ptr->debugfs_perf = NULL;
irq_ptr->sch_token = irq_ptr->state = irq_ptr->perf_stat_enabled = 0;
/* wipes qib.ac, required by ar7063 */
memset(irq_ptr->qdr, 0, sizeof(struct qdr));
irq_ptr->int_parm = init_data->int_parm;
irq_ptr->nr_input_qs = init_data->no_input_qs;
irq_ptr->nr_output_qs = init_data->no_output_qs;
irq_ptr->schid = ccw_device_get_subchannel_id(init_data->cdev);
irq_ptr->cdev = init_data->cdev;
setup_queues(irq_ptr, init_data);
setup_qib(irq_ptr, init_data);
qdio_setup_thinint(irq_ptr);
set_impl_params(irq_ptr, init_data->qib_param_field_format,
init_data->qib_param_field,
init_data->input_slib_elements,
init_data->output_slib_elements);
/* fill input and output descriptors */
setup_qdr(irq_ptr, init_data);
/* qdr, qib, sls, slsbs, slibs, sbales are filled now */
/* get qdio commands */
ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_EQUEUE);
if (!ciw) {
DBF_ERROR("%4x NO EQ", irq_ptr->schid.sch_no);
rc = -EINVAL;
goto out_err;
}
irq_ptr->equeue = *ciw;
ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_AQUEUE);
if (!ciw) {
DBF_ERROR("%4x NO AQ", irq_ptr->schid.sch_no);
rc = -EINVAL;
goto out_err;
}
irq_ptr->aqueue = *ciw;
/* set new interrupt handler */
irq_ptr->orig_handler = init_data->cdev->handler;
init_data->cdev->handler = qdio_int_handler;
return 0;
out_err:
qdio_release_memory(irq_ptr);
return rc;
}
void qdio_print_subchannel_info(struct qdio_irq *irq_ptr,
struct ccw_device *cdev)
{
char s[80];
snprintf(s, 80, "qdio: %s %s on SC %x using "
"AI:%d QEBSM:%d PCI:%d TDD:%d SIGA:%s%s%s%s%s\n",
dev_name(&cdev->dev),
(irq_ptr->qib.qfmt == QDIO_QETH_QFMT) ? "OSA" :
((irq_ptr->qib.qfmt == QDIO_ZFCP_QFMT) ? "ZFCP" : "HS"),
irq_ptr->schid.sch_no,
is_thinint_irq(irq_ptr),
(irq_ptr->sch_token) ? 1 : 0,
(irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED) ? 1 : 0,
css_general_characteristics.aif_tdd,
(irq_ptr->siga_flag.input) ? "R" : " ",
(irq_ptr->siga_flag.output) ? "W" : " ",
(irq_ptr->siga_flag.sync) ? "S" : " ",
(irq_ptr->siga_flag.sync_after_ai) ? "A" : " ",
(irq_ptr->siga_flag.sync_out_after_pci) ? "P" : " ");
printk(KERN_INFO "%s", s);
}
int __init qdio_setup_init(void)
{
qdio_q_cache = kmem_cache_create("qdio_q", sizeof(struct qdio_q),
256, 0, NULL);
if (!qdio_q_cache)
return -ENOMEM;
/* Check for OSA/FCP thin interrupts (bit 67). */
DBF_EVENT("thinint:%1d",
(css_general_characteristics.aif_osa) ? 1 : 0);
/* Check for QEBSM support in general (bit 58). */
DBF_EVENT("cssQEBSM:%1d", (qebsm_possible()) ? 1 : 0);
return 0;
}
void qdio_setup_exit(void)
{
kmem_cache_destroy(qdio_q_cache);
}
| gpl-2.0 |
angpysha/KitKatExtendedKernel | drivers/power/ab8500_btemp.c | 3281 | 29987 | /*
* Copyright (C) ST-Ericsson SA 2012
*
* Battery temperature driver for AB8500
*
* License Terms: GNU General Public License v2
* Author:
* Johan Palsson <johan.palsson@stericsson.com>
* Karl Komierowski <karl.komierowski@stericsson.com>
* Arun R Murthy <arun.murthy@stericsson.com>
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/platform_device.h>
#include <linux/power_supply.h>
#include <linux/completion.h>
#include <linux/workqueue.h>
#include <linux/mfd/abx500/ab8500.h>
#include <linux/mfd/abx500.h>
#include <linux/mfd/abx500/ab8500-bm.h>
#include <linux/mfd/abx500/ab8500-gpadc.h>
#include <linux/jiffies.h>
#define VTVOUT_V 1800
#define BTEMP_THERMAL_LOW_LIMIT -10
#define BTEMP_THERMAL_MED_LIMIT 0
#define BTEMP_THERMAL_HIGH_LIMIT_52 52
#define BTEMP_THERMAL_HIGH_LIMIT_57 57
#define BTEMP_THERMAL_HIGH_LIMIT_62 62
#define BTEMP_BATCTRL_CURR_SRC_7UA 7
#define BTEMP_BATCTRL_CURR_SRC_20UA 20
#define to_ab8500_btemp_device_info(x) container_of((x), \
struct ab8500_btemp, btemp_psy);
/**
* struct ab8500_btemp_interrupts - ab8500 interrupts
* @name: name of the interrupt
* @isr function pointer to the isr
*/
struct ab8500_btemp_interrupts {
char *name;
irqreturn_t (*isr)(int irq, void *data);
};
struct ab8500_btemp_events {
bool batt_rem;
bool btemp_high;
bool btemp_medhigh;
bool btemp_lowmed;
bool btemp_low;
bool ac_conn;
bool usb_conn;
};
struct ab8500_btemp_ranges {
int btemp_high_limit;
int btemp_med_limit;
int btemp_low_limit;
};
/**
* struct ab8500_btemp - ab8500 BTEMP device information
* @dev: Pointer to the structure device
* @node: List of AB8500 BTEMPs, hence prepared for reentrance
* @curr_source: What current source we use, in uA
* @bat_temp: Battery temperature in degree Celcius
* @prev_bat_temp Last dispatched battery temperature
* @parent: Pointer to the struct ab8500
* @gpadc: Pointer to the struct gpadc
* @fg: Pointer to the struct fg
* @pdata: Pointer to the abx500_btemp platform data
* @bat: Pointer to the abx500_bm platform data
* @btemp_psy: Structure for BTEMP specific battery properties
* @events: Structure for information about events triggered
* @btemp_ranges: Battery temperature range structure
* @btemp_wq: Work queue for measuring the temperature periodically
* @btemp_periodic_work: Work for measuring the temperature periodically
*/
struct ab8500_btemp {
struct device *dev;
struct list_head node;
int curr_source;
int bat_temp;
int prev_bat_temp;
struct ab8500 *parent;
struct ab8500_gpadc *gpadc;
struct ab8500_fg *fg;
struct abx500_btemp_platform_data *pdata;
struct abx500_bm_data *bat;
struct power_supply btemp_psy;
struct ab8500_btemp_events events;
struct ab8500_btemp_ranges btemp_ranges;
struct workqueue_struct *btemp_wq;
struct delayed_work btemp_periodic_work;
};
/* BTEMP power supply properties */
static enum power_supply_property ab8500_btemp_props[] = {
POWER_SUPPLY_PROP_PRESENT,
POWER_SUPPLY_PROP_ONLINE,
POWER_SUPPLY_PROP_TECHNOLOGY,
POWER_SUPPLY_PROP_TEMP,
};
static LIST_HEAD(ab8500_btemp_list);
/**
* ab8500_btemp_get() - returns a reference to the primary AB8500 BTEMP
* (i.e. the first BTEMP in the instance list)
*/
struct ab8500_btemp *ab8500_btemp_get(void)
{
struct ab8500_btemp *btemp;
btemp = list_first_entry(&ab8500_btemp_list, struct ab8500_btemp, node);
return btemp;
}
/**
* ab8500_btemp_batctrl_volt_to_res() - convert batctrl voltage to resistance
* @di: pointer to the ab8500_btemp structure
* @v_batctrl: measured batctrl voltage
* @inst_curr: measured instant current
*
* This function returns the battery resistance that is
* derived from the BATCTRL voltage.
* Returns value in Ohms.
*/
static int ab8500_btemp_batctrl_volt_to_res(struct ab8500_btemp *di,
int v_batctrl, int inst_curr)
{
int rbs;
if (is_ab8500_1p1_or_earlier(di->parent)) {
/*
* For ABB cut1.0 and 1.1 BAT_CTRL is internally
* connected to 1.8V through a 450k resistor
*/
return (450000 * (v_batctrl)) / (1800 - v_batctrl);
}
if (di->bat->adc_therm == ABx500_ADC_THERM_BATCTRL) {
/*
* If the battery has internal NTC, we use the current
* source to calculate the resistance, 7uA or 20uA
*/
rbs = (v_batctrl * 1000
- di->bat->gnd_lift_resistance * inst_curr)
/ di->curr_source;
} else {
/*
* BAT_CTRL is internally
* connected to 1.8V through a 80k resistor
*/
rbs = (80000 * (v_batctrl)) / (1800 - v_batctrl);
}
return rbs;
}
/**
* ab8500_btemp_read_batctrl_voltage() - measure batctrl voltage
* @di: pointer to the ab8500_btemp structure
*
* This function returns the voltage on BATCTRL. Returns value in mV.
*/
static int ab8500_btemp_read_batctrl_voltage(struct ab8500_btemp *di)
{
int vbtemp;
static int prev;
vbtemp = ab8500_gpadc_convert(di->gpadc, BAT_CTRL);
if (vbtemp < 0) {
dev_err(di->dev,
"%s gpadc conversion failed, using previous value",
__func__);
return prev;
}
prev = vbtemp;
return vbtemp;
}
/**
* ab8500_btemp_curr_source_enable() - enable/disable batctrl current source
* @di: pointer to the ab8500_btemp structure
* @enable: enable or disable the current source
*
* Enable or disable the current sources for the BatCtrl AD channel
*/
static int ab8500_btemp_curr_source_enable(struct ab8500_btemp *di,
bool enable)
{
int curr;
int ret = 0;
/*
* BATCTRL current sources are included on AB8500 cut2.0
* and future versions
*/
if (is_ab8500_1p1_or_earlier(di->parent))
return 0;
/* Only do this for batteries with internal NTC */
if (di->bat->adc_therm == ABx500_ADC_THERM_BATCTRL && enable) {
if (di->curr_source == BTEMP_BATCTRL_CURR_SRC_7UA)
curr = BAT_CTRL_7U_ENA;
else
curr = BAT_CTRL_20U_ENA;
dev_dbg(di->dev, "Set BATCTRL %duA\n", di->curr_source);
ret = abx500_mask_and_set_register_interruptible(di->dev,
AB8500_CHARGER, AB8500_BAT_CTRL_CURRENT_SOURCE,
FORCE_BAT_CTRL_CMP_HIGH, FORCE_BAT_CTRL_CMP_HIGH);
if (ret) {
dev_err(di->dev, "%s failed setting cmp_force\n",
__func__);
return ret;
}
/*
* We have to wait one 32kHz cycle before enabling
* the current source, since ForceBatCtrlCmpHigh needs
* to be written in a separate cycle
*/
udelay(32);
ret = abx500_set_register_interruptible(di->dev,
AB8500_CHARGER, AB8500_BAT_CTRL_CURRENT_SOURCE,
FORCE_BAT_CTRL_CMP_HIGH | curr);
if (ret) {
dev_err(di->dev, "%s failed enabling current source\n",
__func__);
goto disable_curr_source;
}
} else if (di->bat->adc_therm == ABx500_ADC_THERM_BATCTRL && !enable) {
dev_dbg(di->dev, "Disable BATCTRL curr source\n");
/* Write 0 to the curr bits */
ret = abx500_mask_and_set_register_interruptible(di->dev,
AB8500_CHARGER, AB8500_BAT_CTRL_CURRENT_SOURCE,
BAT_CTRL_7U_ENA | BAT_CTRL_20U_ENA,
~(BAT_CTRL_7U_ENA | BAT_CTRL_20U_ENA));
if (ret) {
dev_err(di->dev, "%s failed disabling current source\n",
__func__);
goto disable_curr_source;
}
/* Enable Pull-Up and comparator */
ret = abx500_mask_and_set_register_interruptible(di->dev,
AB8500_CHARGER, AB8500_BAT_CTRL_CURRENT_SOURCE,
BAT_CTRL_PULL_UP_ENA | BAT_CTRL_CMP_ENA,
BAT_CTRL_PULL_UP_ENA | BAT_CTRL_CMP_ENA);
if (ret) {
dev_err(di->dev, "%s failed enabling PU and comp\n",
__func__);
goto enable_pu_comp;
}
/*
* We have to wait one 32kHz cycle before disabling
* ForceBatCtrlCmpHigh since this needs to be written
* in a separate cycle
*/
udelay(32);
/* Disable 'force comparator' */
ret = abx500_mask_and_set_register_interruptible(di->dev,
AB8500_CHARGER, AB8500_BAT_CTRL_CURRENT_SOURCE,
FORCE_BAT_CTRL_CMP_HIGH, ~FORCE_BAT_CTRL_CMP_HIGH);
if (ret) {
dev_err(di->dev, "%s failed disabling force comp\n",
__func__);
goto disable_force_comp;
}
}
return ret;
/*
* We have to try unsetting FORCE_BAT_CTRL_CMP_HIGH one more time
* if we got an error above
*/
disable_curr_source:
/* Write 0 to the curr bits */
ret = abx500_mask_and_set_register_interruptible(di->dev,
AB8500_CHARGER, AB8500_BAT_CTRL_CURRENT_SOURCE,
BAT_CTRL_7U_ENA | BAT_CTRL_20U_ENA,
~(BAT_CTRL_7U_ENA | BAT_CTRL_20U_ENA));
if (ret) {
dev_err(di->dev, "%s failed disabling current source\n",
__func__);
return ret;
}
enable_pu_comp:
/* Enable Pull-Up and comparator */
ret = abx500_mask_and_set_register_interruptible(di->dev,
AB8500_CHARGER, AB8500_BAT_CTRL_CURRENT_SOURCE,
BAT_CTRL_PULL_UP_ENA | BAT_CTRL_CMP_ENA,
BAT_CTRL_PULL_UP_ENA | BAT_CTRL_CMP_ENA);
if (ret) {
dev_err(di->dev, "%s failed enabling PU and comp\n",
__func__);
return ret;
}
disable_force_comp:
/*
* We have to wait one 32kHz cycle before disabling
* ForceBatCtrlCmpHigh since this needs to be written
* in a separate cycle
*/
udelay(32);
/* Disable 'force comparator' */
ret = abx500_mask_and_set_register_interruptible(di->dev,
AB8500_CHARGER, AB8500_BAT_CTRL_CURRENT_SOURCE,
FORCE_BAT_CTRL_CMP_HIGH, ~FORCE_BAT_CTRL_CMP_HIGH);
if (ret) {
dev_err(di->dev, "%s failed disabling force comp\n",
__func__);
return ret;
}
return ret;
}
/**
* ab8500_btemp_get_batctrl_res() - get battery resistance
* @di: pointer to the ab8500_btemp structure
*
* This function returns the battery pack identification resistance.
* Returns value in Ohms.
*/
static int ab8500_btemp_get_batctrl_res(struct ab8500_btemp *di)
{
int ret;
int batctrl = 0;
int res;
int inst_curr;
int i;
/*
* BATCTRL current sources are included on AB8500 cut2.0
* and future versions
*/
ret = ab8500_btemp_curr_source_enable(di, true);
if (ret) {
dev_err(di->dev, "%s curr source enabled failed\n", __func__);
return ret;
}
if (!di->fg)
di->fg = ab8500_fg_get();
if (!di->fg) {
dev_err(di->dev, "No fg found\n");
return -EINVAL;
}
ret = ab8500_fg_inst_curr_start(di->fg);
if (ret) {
dev_err(di->dev, "Failed to start current measurement\n");
return ret;
}
/*
* Since there is no interrupt when current measurement is done,
* loop for over 250ms (250ms is one sample conversion time
* with 32.768 Khz RTC clock). Note that a stop time must be set
* since the ab8500_btemp_read_batctrl_voltage call can block and
* take an unknown amount of time to complete.
*/
i = 0;
do {
batctrl += ab8500_btemp_read_batctrl_voltage(di);
i++;
msleep(20);
} while (!ab8500_fg_inst_curr_done(di->fg));
batctrl /= i;
ret = ab8500_fg_inst_curr_finalize(di->fg, &inst_curr);
if (ret) {
dev_err(di->dev, "Failed to finalize current measurement\n");
return ret;
}
res = ab8500_btemp_batctrl_volt_to_res(di, batctrl, inst_curr);
ret = ab8500_btemp_curr_source_enable(di, false);
if (ret) {
dev_err(di->dev, "%s curr source disable failed\n", __func__);
return ret;
}
dev_dbg(di->dev, "%s batctrl: %d res: %d inst_curr: %d samples: %d\n",
__func__, batctrl, res, inst_curr, i);
return res;
}
/**
* ab8500_btemp_res_to_temp() - resistance to temperature
* @di: pointer to the ab8500_btemp structure
* @tbl: pointer to the resiatance to temperature table
* @tbl_size: size of the resistance to temperature table
* @res: resistance to calculate the temperature from
*
* This function returns the battery temperature in degrees Celcius
* based on the NTC resistance.
*/
static int ab8500_btemp_res_to_temp(struct ab8500_btemp *di,
const struct abx500_res_to_temp *tbl, int tbl_size, int res)
{
int i, temp;
/*
* Calculate the formula for the straight line
* Simple interpolation if we are within
* the resistance table limits, extrapolate
* if resistance is outside the limits.
*/
if (res > tbl[0].resist)
i = 0;
else if (res <= tbl[tbl_size - 1].resist)
i = tbl_size - 2;
else {
i = 0;
while (!(res <= tbl[i].resist &&
res > tbl[i + 1].resist))
i++;
}
temp = tbl[i].temp + ((tbl[i + 1].temp - tbl[i].temp) *
(res - tbl[i].resist)) / (tbl[i + 1].resist - tbl[i].resist);
return temp;
}
/**
* ab8500_btemp_measure_temp() - measure battery temperature
* @di: pointer to the ab8500_btemp structure
*
* Returns battery temperature (on success) else the previous temperature
*/
static int ab8500_btemp_measure_temp(struct ab8500_btemp *di)
{
int temp;
static int prev;
int rbat, rntc, vntc;
u8 id;
id = di->bat->batt_id;
if (di->bat->adc_therm == ABx500_ADC_THERM_BATCTRL &&
id != BATTERY_UNKNOWN) {
rbat = ab8500_btemp_get_batctrl_res(di);
if (rbat < 0) {
dev_err(di->dev, "%s get batctrl res failed\n",
__func__);
/*
* Return out-of-range temperature so that
* charging is stopped
*/
return BTEMP_THERMAL_LOW_LIMIT;
}
temp = ab8500_btemp_res_to_temp(di,
di->bat->bat_type[id].r_to_t_tbl,
di->bat->bat_type[id].n_temp_tbl_elements, rbat);
} else {
vntc = ab8500_gpadc_convert(di->gpadc, BTEMP_BALL);
if (vntc < 0) {
dev_err(di->dev,
"%s gpadc conversion failed,"
" using previous value\n", __func__);
return prev;
}
/*
* The PCB NTC is sourced from VTVOUT via a 230kOhm
* resistor.
*/
rntc = 230000 * vntc / (VTVOUT_V - vntc);
temp = ab8500_btemp_res_to_temp(di,
di->bat->bat_type[id].r_to_t_tbl,
di->bat->bat_type[id].n_temp_tbl_elements, rntc);
prev = temp;
}
dev_dbg(di->dev, "Battery temperature is %d\n", temp);
return temp;
}
/**
* ab8500_btemp_id() - Identify the connected battery
* @di: pointer to the ab8500_btemp structure
*
* This function will try to identify the battery by reading the ID
* resistor. Some brands use a combined ID resistor with a NTC resistor to
* both be able to identify and to read the temperature of it.
*/
static int ab8500_btemp_id(struct ab8500_btemp *di)
{
int res;
u8 i;
di->curr_source = BTEMP_BATCTRL_CURR_SRC_7UA;
di->bat->batt_id = BATTERY_UNKNOWN;
res = ab8500_btemp_get_batctrl_res(di);
if (res < 0) {
dev_err(di->dev, "%s get batctrl res failed\n", __func__);
return -ENXIO;
}
/* BATTERY_UNKNOWN is defined on position 0, skip it! */
for (i = BATTERY_UNKNOWN + 1; i < di->bat->n_btypes; i++) {
if ((res <= di->bat->bat_type[i].resis_high) &&
(res >= di->bat->bat_type[i].resis_low)) {
dev_dbg(di->dev, "Battery detected on %s"
" low %d < res %d < high: %d"
" index: %d\n",
di->bat->adc_therm == ABx500_ADC_THERM_BATCTRL ?
"BATCTRL" : "BATTEMP",
di->bat->bat_type[i].resis_low, res,
di->bat->bat_type[i].resis_high, i);
di->bat->batt_id = i;
break;
}
}
if (di->bat->batt_id == BATTERY_UNKNOWN) {
dev_warn(di->dev, "Battery identified as unknown"
", resistance %d Ohm\n", res);
return -ENXIO;
}
/*
* We only have to change current source if the
* detected type is Type 1, else we use the 7uA source
*/
if (di->bat->adc_therm == ABx500_ADC_THERM_BATCTRL &&
di->bat->batt_id == 1) {
dev_dbg(di->dev, "Set BATCTRL current source to 20uA\n");
di->curr_source = BTEMP_BATCTRL_CURR_SRC_20UA;
}
return di->bat->batt_id;
}
/**
* ab8500_btemp_periodic_work() - Measuring the temperature periodically
* @work: pointer to the work_struct structure
*
* Work function for measuring the temperature periodically
*/
static void ab8500_btemp_periodic_work(struct work_struct *work)
{
int interval;
struct ab8500_btemp *di = container_of(work,
struct ab8500_btemp, btemp_periodic_work.work);
di->bat_temp = ab8500_btemp_measure_temp(di);
if (di->bat_temp != di->prev_bat_temp) {
di->prev_bat_temp = di->bat_temp;
power_supply_changed(&di->btemp_psy);
}
if (di->events.ac_conn || di->events.usb_conn)
interval = di->bat->temp_interval_chg;
else
interval = di->bat->temp_interval_nochg;
/* Schedule a new measurement */
queue_delayed_work(di->btemp_wq,
&di->btemp_periodic_work,
round_jiffies(interval * HZ));
}
/**
* ab8500_btemp_batctrlindb_handler() - battery removal detected
* @irq: interrupt number
* @_di: void pointer that has to address of ab8500_btemp
*
* Returns IRQ status(IRQ_HANDLED)
*/
static irqreturn_t ab8500_btemp_batctrlindb_handler(int irq, void *_di)
{
struct ab8500_btemp *di = _di;
dev_err(di->dev, "Battery removal detected!\n");
di->events.batt_rem = true;
power_supply_changed(&di->btemp_psy);
return IRQ_HANDLED;
}
/**
* ab8500_btemp_templow_handler() - battery temp lower than 10 degrees
* @irq: interrupt number
* @_di: void pointer that has to address of ab8500_btemp
*
* Returns IRQ status(IRQ_HANDLED)
*/
static irqreturn_t ab8500_btemp_templow_handler(int irq, void *_di)
{
struct ab8500_btemp *di = _di;
if (is_ab8500_2p0_or_earlier(di->parent)) {
dev_dbg(di->dev, "Ignore false btemp low irq"
" for ABB cut 1.0, 1.1 and 2.0\n");
} else {
dev_crit(di->dev, "Battery temperature lower than -10deg c\n");
di->events.btemp_low = true;
di->events.btemp_high = false;
di->events.btemp_medhigh = false;
di->events.btemp_lowmed = false;
power_supply_changed(&di->btemp_psy);
}
return IRQ_HANDLED;
}
/**
* ab8500_btemp_temphigh_handler() - battery temp higher than max temp
* @irq: interrupt number
* @_di: void pointer that has to address of ab8500_btemp
*
* Returns IRQ status(IRQ_HANDLED)
*/
static irqreturn_t ab8500_btemp_temphigh_handler(int irq, void *_di)
{
struct ab8500_btemp *di = _di;
dev_crit(di->dev, "Battery temperature is higher than MAX temp\n");
di->events.btemp_high = true;
di->events.btemp_medhigh = false;
di->events.btemp_lowmed = false;
di->events.btemp_low = false;
power_supply_changed(&di->btemp_psy);
return IRQ_HANDLED;
}
/**
* ab8500_btemp_lowmed_handler() - battery temp between low and medium
* @irq: interrupt number
* @_di: void pointer that has to address of ab8500_btemp
*
* Returns IRQ status(IRQ_HANDLED)
*/
static irqreturn_t ab8500_btemp_lowmed_handler(int irq, void *_di)
{
struct ab8500_btemp *di = _di;
dev_dbg(di->dev, "Battery temperature is between low and medium\n");
di->events.btemp_lowmed = true;
di->events.btemp_medhigh = false;
di->events.btemp_high = false;
di->events.btemp_low = false;
power_supply_changed(&di->btemp_psy);
return IRQ_HANDLED;
}
/**
* ab8500_btemp_medhigh_handler() - battery temp between medium and high
* @irq: interrupt number
* @_di: void pointer that has to address of ab8500_btemp
*
* Returns IRQ status(IRQ_HANDLED)
*/
static irqreturn_t ab8500_btemp_medhigh_handler(int irq, void *_di)
{
struct ab8500_btemp *di = _di;
dev_dbg(di->dev, "Battery temperature is between medium and high\n");
di->events.btemp_medhigh = true;
di->events.btemp_lowmed = false;
di->events.btemp_high = false;
di->events.btemp_low = false;
power_supply_changed(&di->btemp_psy);
return IRQ_HANDLED;
}
/**
* ab8500_btemp_periodic() - Periodic temperature measurements
* @di: pointer to the ab8500_btemp structure
* @enable: enable or disable periodic temperature measurements
*
* Starts of stops periodic temperature measurements. Periodic measurements
* should only be done when a charger is connected.
*/
static void ab8500_btemp_periodic(struct ab8500_btemp *di,
bool enable)
{
dev_dbg(di->dev, "Enable periodic temperature measurements: %d\n",
enable);
/*
* Make sure a new measurement is done directly by cancelling
* any pending work
*/
cancel_delayed_work_sync(&di->btemp_periodic_work);
if (enable)
queue_delayed_work(di->btemp_wq, &di->btemp_periodic_work, 0);
}
/**
* ab8500_btemp_get_temp() - get battery temperature
* @di: pointer to the ab8500_btemp structure
*
* Returns battery temperature
*/
static int ab8500_btemp_get_temp(struct ab8500_btemp *di)
{
int temp = 0;
/*
* The BTEMP events are not reliabe on AB8500 cut2.0
* and prior versions
*/
if (is_ab8500_2p0_or_earlier(di->parent)) {
temp = di->bat_temp * 10;
} else {
if (di->events.btemp_low) {
if (temp > di->btemp_ranges.btemp_low_limit)
temp = di->btemp_ranges.btemp_low_limit;
else
temp = di->bat_temp * 10;
} else if (di->events.btemp_high) {
if (temp < di->btemp_ranges.btemp_high_limit)
temp = di->btemp_ranges.btemp_high_limit;
else
temp = di->bat_temp * 10;
} else if (di->events.btemp_lowmed) {
if (temp > di->btemp_ranges.btemp_med_limit)
temp = di->btemp_ranges.btemp_med_limit;
else
temp = di->bat_temp * 10;
} else if (di->events.btemp_medhigh) {
if (temp < di->btemp_ranges.btemp_med_limit)
temp = di->btemp_ranges.btemp_med_limit;
else
temp = di->bat_temp * 10;
} else
temp = di->bat_temp * 10;
}
return temp;
}
/**
* ab8500_btemp_get_batctrl_temp() - get the temperature
* @btemp: pointer to the btemp structure
*
* Returns the batctrl temperature in millidegrees
*/
int ab8500_btemp_get_batctrl_temp(struct ab8500_btemp *btemp)
{
return btemp->bat_temp * 1000;
}
/**
* ab8500_btemp_get_property() - get the btemp properties
* @psy: pointer to the power_supply structure
* @psp: pointer to the power_supply_property structure
* @val: pointer to the power_supply_propval union
*
* This function gets called when an application tries to get the btemp
* properties by reading the sysfs files.
* online: presence of the battery
* present: presence of the battery
* technology: battery technology
* temp: battery temperature
* Returns error code in case of failure else 0(on success)
*/
static int ab8500_btemp_get_property(struct power_supply *psy,
enum power_supply_property psp,
union power_supply_propval *val)
{
struct ab8500_btemp *di;
di = to_ab8500_btemp_device_info(psy);
switch (psp) {
case POWER_SUPPLY_PROP_PRESENT:
case POWER_SUPPLY_PROP_ONLINE:
if (di->events.batt_rem)
val->intval = 0;
else
val->intval = 1;
break;
case POWER_SUPPLY_PROP_TECHNOLOGY:
val->intval = di->bat->bat_type[di->bat->batt_id].name;
break;
case POWER_SUPPLY_PROP_TEMP:
val->intval = ab8500_btemp_get_temp(di);
break;
default:
return -EINVAL;
}
return 0;
}
static int ab8500_btemp_get_ext_psy_data(struct device *dev, void *data)
{
struct power_supply *psy;
struct power_supply *ext;
struct ab8500_btemp *di;
union power_supply_propval ret;
int i, j;
bool psy_found = false;
psy = (struct power_supply *)data;
ext = dev_get_drvdata(dev);
di = to_ab8500_btemp_device_info(psy);
/*
* For all psy where the name of your driver
* appears in any supplied_to
*/
for (i = 0; i < ext->num_supplicants; i++) {
if (!strcmp(ext->supplied_to[i], psy->name))
psy_found = true;
}
if (!psy_found)
return 0;
/* Go through all properties for the psy */
for (j = 0; j < ext->num_properties; j++) {
enum power_supply_property prop;
prop = ext->properties[j];
if (ext->get_property(ext, prop, &ret))
continue;
switch (prop) {
case POWER_SUPPLY_PROP_PRESENT:
switch (ext->type) {
case POWER_SUPPLY_TYPE_MAINS:
/* AC disconnected */
if (!ret.intval && di->events.ac_conn) {
di->events.ac_conn = false;
}
/* AC connected */
else if (ret.intval && !di->events.ac_conn) {
di->events.ac_conn = true;
if (!di->events.usb_conn)
ab8500_btemp_periodic(di, true);
}
break;
case POWER_SUPPLY_TYPE_USB:
/* USB disconnected */
if (!ret.intval && di->events.usb_conn) {
di->events.usb_conn = false;
}
/* USB connected */
else if (ret.intval && !di->events.usb_conn) {
di->events.usb_conn = true;
if (!di->events.ac_conn)
ab8500_btemp_periodic(di, true);
}
break;
default:
break;
}
break;
default:
break;
}
}
return 0;
}
/**
* ab8500_btemp_external_power_changed() - callback for power supply changes
* @psy: pointer to the structure power_supply
*
* This function is pointing to the function pointer external_power_changed
* of the structure power_supply.
* This function gets executed when there is a change in the external power
* supply to the btemp.
*/
static void ab8500_btemp_external_power_changed(struct power_supply *psy)
{
struct ab8500_btemp *di = to_ab8500_btemp_device_info(psy);
class_for_each_device(power_supply_class, NULL,
&di->btemp_psy, ab8500_btemp_get_ext_psy_data);
}
/* ab8500 btemp driver interrupts and their respective isr */
static struct ab8500_btemp_interrupts ab8500_btemp_irq[] = {
{"BAT_CTRL_INDB", ab8500_btemp_batctrlindb_handler},
{"BTEMP_LOW", ab8500_btemp_templow_handler},
{"BTEMP_HIGH", ab8500_btemp_temphigh_handler},
{"BTEMP_LOW_MEDIUM", ab8500_btemp_lowmed_handler},
{"BTEMP_MEDIUM_HIGH", ab8500_btemp_medhigh_handler},
};
#if defined(CONFIG_PM)
static int ab8500_btemp_resume(struct platform_device *pdev)
{
struct ab8500_btemp *di = platform_get_drvdata(pdev);
ab8500_btemp_periodic(di, true);
return 0;
}
static int ab8500_btemp_suspend(struct platform_device *pdev,
pm_message_t state)
{
struct ab8500_btemp *di = platform_get_drvdata(pdev);
ab8500_btemp_periodic(di, false);
return 0;
}
#else
#define ab8500_btemp_suspend NULL
#define ab8500_btemp_resume NULL
#endif
static int __devexit ab8500_btemp_remove(struct platform_device *pdev)
{
struct ab8500_btemp *di = platform_get_drvdata(pdev);
int i, irq;
/* Disable interrupts */
for (i = 0; i < ARRAY_SIZE(ab8500_btemp_irq); i++) {
irq = platform_get_irq_byname(pdev, ab8500_btemp_irq[i].name);
free_irq(irq, di);
}
/* Delete the work queue */
destroy_workqueue(di->btemp_wq);
flush_scheduled_work();
power_supply_unregister(&di->btemp_psy);
platform_set_drvdata(pdev, NULL);
kfree(di);
return 0;
}
static int __devinit ab8500_btemp_probe(struct platform_device *pdev)
{
int irq, i, ret = 0;
u8 val;
struct abx500_bm_plat_data *plat_data;
struct ab8500_btemp *di =
kzalloc(sizeof(struct ab8500_btemp), GFP_KERNEL);
if (!di)
return -ENOMEM;
/* get parent data */
di->dev = &pdev->dev;
di->parent = dev_get_drvdata(pdev->dev.parent);
di->gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
/* get btemp specific platform data */
plat_data = pdev->dev.platform_data;
di->pdata = plat_data->btemp;
if (!di->pdata) {
dev_err(di->dev, "no btemp platform data supplied\n");
ret = -EINVAL;
goto free_device_info;
}
/* get battery specific platform data */
di->bat = plat_data->battery;
if (!di->bat) {
dev_err(di->dev, "no battery platform data supplied\n");
ret = -EINVAL;
goto free_device_info;
}
/* BTEMP supply */
di->btemp_psy.name = "ab8500_btemp";
di->btemp_psy.type = POWER_SUPPLY_TYPE_BATTERY;
di->btemp_psy.properties = ab8500_btemp_props;
di->btemp_psy.num_properties = ARRAY_SIZE(ab8500_btemp_props);
di->btemp_psy.get_property = ab8500_btemp_get_property;
di->btemp_psy.supplied_to = di->pdata->supplied_to;
di->btemp_psy.num_supplicants = di->pdata->num_supplicants;
di->btemp_psy.external_power_changed =
ab8500_btemp_external_power_changed;
/* Create a work queue for the btemp */
di->btemp_wq =
create_singlethread_workqueue("ab8500_btemp_wq");
if (di->btemp_wq == NULL) {
dev_err(di->dev, "failed to create work queue\n");
goto free_device_info;
}
/* Init work for measuring temperature periodically */
INIT_DELAYED_WORK_DEFERRABLE(&di->btemp_periodic_work,
ab8500_btemp_periodic_work);
/* Identify the battery */
if (ab8500_btemp_id(di) < 0)
dev_warn(di->dev, "failed to identify the battery\n");
/* Set BTEMP thermal limits. Low and Med are fixed */
di->btemp_ranges.btemp_low_limit = BTEMP_THERMAL_LOW_LIMIT;
di->btemp_ranges.btemp_med_limit = BTEMP_THERMAL_MED_LIMIT;
ret = abx500_get_register_interruptible(di->dev, AB8500_CHARGER,
AB8500_BTEMP_HIGH_TH, &val);
if (ret < 0) {
dev_err(di->dev, "%s ab8500 read failed\n", __func__);
goto free_btemp_wq;
}
switch (val) {
case BTEMP_HIGH_TH_57_0:
case BTEMP_HIGH_TH_57_1:
di->btemp_ranges.btemp_high_limit =
BTEMP_THERMAL_HIGH_LIMIT_57;
break;
case BTEMP_HIGH_TH_52:
di->btemp_ranges.btemp_high_limit =
BTEMP_THERMAL_HIGH_LIMIT_52;
break;
case BTEMP_HIGH_TH_62:
di->btemp_ranges.btemp_high_limit =
BTEMP_THERMAL_HIGH_LIMIT_62;
break;
}
/* Register BTEMP power supply class */
ret = power_supply_register(di->dev, &di->btemp_psy);
if (ret) {
dev_err(di->dev, "failed to register BTEMP psy\n");
goto free_btemp_wq;
}
/* Register interrupts */
for (i = 0; i < ARRAY_SIZE(ab8500_btemp_irq); i++) {
irq = platform_get_irq_byname(pdev, ab8500_btemp_irq[i].name);
ret = request_threaded_irq(irq, NULL, ab8500_btemp_irq[i].isr,
IRQF_SHARED | IRQF_NO_SUSPEND,
ab8500_btemp_irq[i].name, di);
if (ret) {
dev_err(di->dev, "failed to request %s IRQ %d: %d\n"
, ab8500_btemp_irq[i].name, irq, ret);
goto free_irq;
}
dev_dbg(di->dev, "Requested %s IRQ %d: %d\n",
ab8500_btemp_irq[i].name, irq, ret);
}
platform_set_drvdata(pdev, di);
/* Kick off periodic temperature measurements */
ab8500_btemp_periodic(di, true);
list_add_tail(&di->node, &ab8500_btemp_list);
return ret;
free_irq:
power_supply_unregister(&di->btemp_psy);
/* We also have to free all successfully registered irqs */
for (i = i - 1; i >= 0; i--) {
irq = platform_get_irq_byname(pdev, ab8500_btemp_irq[i].name);
free_irq(irq, di);
}
free_btemp_wq:
destroy_workqueue(di->btemp_wq);
free_device_info:
kfree(di);
return ret;
}
static struct platform_driver ab8500_btemp_driver = {
.probe = ab8500_btemp_probe,
.remove = __devexit_p(ab8500_btemp_remove),
.suspend = ab8500_btemp_suspend,
.resume = ab8500_btemp_resume,
.driver = {
.name = "ab8500-btemp",
.owner = THIS_MODULE,
},
};
static int __init ab8500_btemp_init(void)
{
return platform_driver_register(&ab8500_btemp_driver);
}
static void __exit ab8500_btemp_exit(void)
{
platform_driver_unregister(&ab8500_btemp_driver);
}
subsys_initcall_sync(ab8500_btemp_init);
module_exit(ab8500_btemp_exit);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Johan Palsson, Karl Komierowski, Arun R Murthy");
MODULE_ALIAS("platform:ab8500-btemp");
MODULE_DESCRIPTION("AB8500 battery temperature driver");
| gpl-2.0 |
flar2/jewel-bulletproof | drivers/pcmcia/at91_cf.c | 3793 | 10622 | /*
* at91_cf.c -- AT91 CompactFlash controller driver
*
* Copyright (C) 2005 David Brownell
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/gpio.h>
#include <pcmcia/ss.h>
#include <mach/hardware.h>
#include <asm/io.h>
#include <asm/sizes.h>
#include <mach/board.h>
#include <mach/at91rm9200_mc.h>
#include <mach/at91_ramc.h>
/*
* A0..A10 work in each range; A23 indicates I/O space; A25 is CFRNW;
* some other bit in {A24,A22..A11} is nREG to flag memory access
* (vs attributes). So more than 2KB/region would just be waste.
* Note: These are offsets from the physical base address.
*/
#define CF_ATTR_PHYS (0)
#define CF_IO_PHYS (1 << 23)
#define CF_MEM_PHYS (0x017ff800)
/*--------------------------------------------------------------------------*/
static const char driver_name[] = "at91_cf";
struct at91_cf_socket {
struct pcmcia_socket socket;
unsigned present:1;
struct platform_device *pdev;
struct at91_cf_data *board;
unsigned long phys_baseaddr;
};
static inline int at91_cf_present(struct at91_cf_socket *cf)
{
return !gpio_get_value(cf->board->det_pin);
}
/*--------------------------------------------------------------------------*/
static int at91_cf_ss_init(struct pcmcia_socket *s)
{
return 0;
}
static irqreturn_t at91_cf_irq(int irq, void *_cf)
{
struct at91_cf_socket *cf = _cf;
if (irq == gpio_to_irq(cf->board->det_pin)) {
unsigned present = at91_cf_present(cf);
/* kick pccard as needed */
if (present != cf->present) {
cf->present = present;
pr_debug("%s: card %s\n", driver_name,
present ? "present" : "gone");
pcmcia_parse_events(&cf->socket, SS_DETECT);
}
}
return IRQ_HANDLED;
}
static int at91_cf_get_status(struct pcmcia_socket *s, u_int *sp)
{
struct at91_cf_socket *cf;
if (!sp)
return -EINVAL;
cf = container_of(s, struct at91_cf_socket, socket);
/* NOTE: CF is always 3VCARD */
if (at91_cf_present(cf)) {
int rdy = gpio_is_valid(cf->board->irq_pin); /* RDY/nIRQ */
int vcc = gpio_is_valid(cf->board->vcc_pin);
*sp = SS_DETECT | SS_3VCARD;
if (!rdy || gpio_get_value(rdy))
*sp |= SS_READY;
if (!vcc || gpio_get_value(vcc))
*sp |= SS_POWERON;
} else
*sp = 0;
return 0;
}
static int
at91_cf_set_socket(struct pcmcia_socket *sock, struct socket_state_t *s)
{
struct at91_cf_socket *cf;
cf = container_of(sock, struct at91_cf_socket, socket);
/* switch Vcc if needed and possible */
if (gpio_is_valid(cf->board->vcc_pin)) {
switch (s->Vcc) {
case 0:
gpio_set_value(cf->board->vcc_pin, 0);
break;
case 33:
gpio_set_value(cf->board->vcc_pin, 1);
break;
default:
return -EINVAL;
}
}
/* toggle reset if needed */
gpio_set_value(cf->board->rst_pin, s->flags & SS_RESET);
pr_debug("%s: Vcc %d, io_irq %d, flags %04x csc %04x\n",
driver_name, s->Vcc, s->io_irq, s->flags, s->csc_mask);
return 0;
}
static int at91_cf_ss_suspend(struct pcmcia_socket *s)
{
return at91_cf_set_socket(s, &dead_socket);
}
/* we already mapped the I/O region */
static int at91_cf_set_io_map(struct pcmcia_socket *s, struct pccard_io_map *io)
{
struct at91_cf_socket *cf;
u32 csr;
cf = container_of(s, struct at91_cf_socket, socket);
io->flags &= (MAP_ACTIVE | MAP_16BIT | MAP_AUTOSZ);
/*
* Use 16 bit accesses unless/until we need 8-bit i/o space.
*/
csr = at91_ramc_read(0, AT91_SMC_CSR(cf->board->chipselect)) & ~AT91_SMC_DBW;
/*
* NOTE: this CF controller ignores IOIS16, so we can't really do
* MAP_AUTOSZ. The 16bit mode allows single byte access on either
* D0-D7 (even addr) or D8-D15 (odd), so it's close enough for many
* purposes (and handles ide-cs).
*
* The 8bit mode is needed for odd byte access on D0-D7. It seems
* some cards only like that way to get at the odd byte, despite
* CF 3.0 spec table 35 also giving the D8-D15 option.
*/
if (!(io->flags & (MAP_16BIT | MAP_AUTOSZ))) {
csr |= AT91_SMC_DBW_8;
pr_debug("%s: 8bit i/o bus\n", driver_name);
} else {
csr |= AT91_SMC_DBW_16;
pr_debug("%s: 16bit i/o bus\n", driver_name);
}
at91_ramc_write(0, AT91_SMC_CSR(cf->board->chipselect), csr);
io->start = cf->socket.io_offset;
io->stop = io->start + SZ_2K - 1;
return 0;
}
/* pcmcia layer maps/unmaps mem regions */
static int
at91_cf_set_mem_map(struct pcmcia_socket *s, struct pccard_mem_map *map)
{
struct at91_cf_socket *cf;
if (map->card_start)
return -EINVAL;
cf = container_of(s, struct at91_cf_socket, socket);
map->flags &= (MAP_ACTIVE | MAP_ATTRIB | MAP_16BIT);
if (map->flags & MAP_ATTRIB)
map->static_start = cf->phys_baseaddr + CF_ATTR_PHYS;
else
map->static_start = cf->phys_baseaddr + CF_MEM_PHYS;
return 0;
}
static struct pccard_operations at91_cf_ops = {
.init = at91_cf_ss_init,
.suspend = at91_cf_ss_suspend,
.get_status = at91_cf_get_status,
.set_socket = at91_cf_set_socket,
.set_io_map = at91_cf_set_io_map,
.set_mem_map = at91_cf_set_mem_map,
};
/*--------------------------------------------------------------------------*/
static int __init at91_cf_probe(struct platform_device *pdev)
{
struct at91_cf_socket *cf;
struct at91_cf_data *board = pdev->dev.platform_data;
struct resource *io;
int status;
if (!board || !gpio_is_valid(board->det_pin) || !gpio_is_valid(board->rst_pin))
return -ENODEV;
io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!io)
return -ENODEV;
cf = kzalloc(sizeof *cf, GFP_KERNEL);
if (!cf)
return -ENOMEM;
cf->board = board;
cf->pdev = pdev;
cf->phys_baseaddr = io->start;
platform_set_drvdata(pdev, cf);
/* must be a GPIO; ergo must trigger on both edges */
status = gpio_request(board->det_pin, "cf_det");
if (status < 0)
goto fail0;
status = request_irq(gpio_to_irq(board->det_pin), at91_cf_irq, 0, driver_name, cf);
if (status < 0)
goto fail00;
device_init_wakeup(&pdev->dev, 1);
status = gpio_request(board->rst_pin, "cf_rst");
if (status < 0)
goto fail0a;
if (gpio_is_valid(board->vcc_pin)) {
status = gpio_request(board->vcc_pin, "cf_vcc");
if (status < 0)
goto fail0b;
}
/*
* The card driver will request this irq later as needed.
* but it causes lots of "irqNN: nobody cared" messages
* unless we report that we handle everything (sigh).
* (Note: DK board doesn't wire the IRQ pin...)
*/
if (gpio_is_valid(board->irq_pin)) {
status = gpio_request(board->irq_pin, "cf_irq");
if (status < 0)
goto fail0c;
status = request_irq(gpio_to_irq(board->irq_pin), at91_cf_irq,
IRQF_SHARED, driver_name, cf);
if (status < 0)
goto fail0d;
cf->socket.pci_irq = gpio_to_irq(board->irq_pin);
} else
cf->socket.pci_irq = nr_irqs + 1;
/* pcmcia layer only remaps "real" memory not iospace */
cf->socket.io_offset = (unsigned long)
ioremap(cf->phys_baseaddr + CF_IO_PHYS, SZ_2K);
if (!cf->socket.io_offset) {
status = -ENXIO;
goto fail1;
}
/* reserve chip-select regions */
if (!request_mem_region(io->start, resource_size(io), driver_name)) {
status = -ENXIO;
goto fail1;
}
pr_info("%s: irqs det #%d, io #%d\n", driver_name,
gpio_to_irq(board->det_pin), gpio_to_irq(board->irq_pin));
cf->socket.owner = THIS_MODULE;
cf->socket.dev.parent = &pdev->dev;
cf->socket.ops = &at91_cf_ops;
cf->socket.resource_ops = &pccard_static_ops;
cf->socket.features = SS_CAP_PCCARD | SS_CAP_STATIC_MAP
| SS_CAP_MEM_ALIGN;
cf->socket.map_size = SZ_2K;
cf->socket.io[0].res = io;
status = pcmcia_register_socket(&cf->socket);
if (status < 0)
goto fail2;
return 0;
fail2:
release_mem_region(io->start, resource_size(io));
fail1:
if (cf->socket.io_offset)
iounmap((void __iomem *) cf->socket.io_offset);
if (gpio_is_valid(board->irq_pin)) {
free_irq(gpio_to_irq(board->irq_pin), cf);
fail0d:
gpio_free(board->irq_pin);
}
fail0c:
if (gpio_is_valid(board->vcc_pin))
gpio_free(board->vcc_pin);
fail0b:
gpio_free(board->rst_pin);
fail0a:
device_init_wakeup(&pdev->dev, 0);
free_irq(gpio_to_irq(board->det_pin), cf);
fail00:
gpio_free(board->det_pin);
fail0:
kfree(cf);
return status;
}
static int __exit at91_cf_remove(struct platform_device *pdev)
{
struct at91_cf_socket *cf = platform_get_drvdata(pdev);
struct at91_cf_data *board = cf->board;
struct resource *io = cf->socket.io[0].res;
pcmcia_unregister_socket(&cf->socket);
release_mem_region(io->start, resource_size(io));
iounmap((void __iomem *) cf->socket.io_offset);
if (gpio_is_valid(board->irq_pin)) {
free_irq(gpio_to_irq(board->irq_pin), cf);
gpio_free(board->irq_pin);
}
if (gpio_is_valid(board->vcc_pin))
gpio_free(board->vcc_pin);
gpio_free(board->rst_pin);
device_init_wakeup(&pdev->dev, 0);
free_irq(gpio_to_irq(board->det_pin), cf);
gpio_free(board->det_pin);
kfree(cf);
return 0;
}
#ifdef CONFIG_PM
static int at91_cf_suspend(struct platform_device *pdev, pm_message_t mesg)
{
struct at91_cf_socket *cf = platform_get_drvdata(pdev);
struct at91_cf_data *board = cf->board;
if (device_may_wakeup(&pdev->dev)) {
enable_irq_wake(gpio_to_irq(board->det_pin));
if (gpio_is_valid(board->irq_pin))
enable_irq_wake(gpio_to_irq(board->irq_pin));
}
return 0;
}
static int at91_cf_resume(struct platform_device *pdev)
{
struct at91_cf_socket *cf = platform_get_drvdata(pdev);
struct at91_cf_data *board = cf->board;
if (device_may_wakeup(&pdev->dev)) {
disable_irq_wake(gpio_to_irq(board->det_pin));
if (gpio_is_valid(board->irq_pin))
disable_irq_wake(gpio_to_irq(board->irq_pin));
}
return 0;
}
#else
#define at91_cf_suspend NULL
#define at91_cf_resume NULL
#endif
static struct platform_driver at91_cf_driver = {
.driver = {
.name = (char *) driver_name,
.owner = THIS_MODULE,
},
.remove = __exit_p(at91_cf_remove),
.suspend = at91_cf_suspend,
.resume = at91_cf_resume,
};
/*--------------------------------------------------------------------------*/
static int __init at91_cf_init(void)
{
return platform_driver_probe(&at91_cf_driver, at91_cf_probe);
}
module_init(at91_cf_init);
static void __exit at91_cf_exit(void)
{
platform_driver_unregister(&at91_cf_driver);
}
module_exit(at91_cf_exit);
MODULE_DESCRIPTION("AT91 Compact Flash Driver");
MODULE_AUTHOR("David Brownell");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:at91_cf");
| gpl-2.0 |
SimpleAOSP-Kernel/kernel_hammerhead | kernel/power/fbearlysuspend.c | 3793 | 4455 | /* kernel/power/fbearlysuspend.c
*
* Copyright (C) 2005-2008 Google, Inc.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/earlysuspend.h>
#include <linux/module.h>
#include <linux/wait.h>
#include "power.h"
#define MAX_BUF 100
static wait_queue_head_t fb_state_wq;
static int display = 1;
static DEFINE_SPINLOCK(fb_state_lock);
static enum {
FB_STATE_STOPPED_DRAWING,
FB_STATE_REQUEST_STOP_DRAWING,
FB_STATE_DRAWING_OK,
} fb_state;
/* tell userspace to stop drawing, wait for it to stop */
static void stop_drawing_early_suspend(struct early_suspend *h)
{
int ret;
unsigned long irq_flags;
spin_lock_irqsave(&fb_state_lock, irq_flags);
fb_state = FB_STATE_REQUEST_STOP_DRAWING;
spin_unlock_irqrestore(&fb_state_lock, irq_flags);
wake_up_all(&fb_state_wq);
ret = wait_event_timeout(fb_state_wq,
fb_state == FB_STATE_STOPPED_DRAWING,
HZ);
if (unlikely(fb_state != FB_STATE_STOPPED_DRAWING))
pr_warning("stop_drawing_early_suspend: timeout waiting for "
"userspace to stop drawing\n");
}
/* tell userspace to start drawing */
static void start_drawing_late_resume(struct early_suspend *h)
{
unsigned long irq_flags;
spin_lock_irqsave(&fb_state_lock, irq_flags);
fb_state = FB_STATE_DRAWING_OK;
spin_unlock_irqrestore(&fb_state_lock, irq_flags);
wake_up(&fb_state_wq);
}
static struct early_suspend stop_drawing_early_suspend_desc = {
.level = EARLY_SUSPEND_LEVEL_STOP_DRAWING,
.suspend = stop_drawing_early_suspend,
.resume = start_drawing_late_resume,
};
static ssize_t wait_for_fb_sleep_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
char *s = buf;
int ret;
ret = wait_event_interruptible(fb_state_wq,
fb_state != FB_STATE_DRAWING_OK);
if (ret && fb_state == FB_STATE_DRAWING_OK) {
return ret;
} else {
s += sprintf(buf, "sleeping");
if (display == 1) {
display = 0;
sysfs_notify(power_kobj, NULL, "wait_for_fb_status");
}
}
return s - buf;
}
static ssize_t wait_for_fb_wake_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
char *s = buf;
int ret;
unsigned long irq_flags;
spin_lock_irqsave(&fb_state_lock, irq_flags);
if (fb_state == FB_STATE_REQUEST_STOP_DRAWING) {
fb_state = FB_STATE_STOPPED_DRAWING;
wake_up(&fb_state_wq);
}
spin_unlock_irqrestore(&fb_state_lock, irq_flags);
ret = wait_event_interruptible(fb_state_wq,
fb_state == FB_STATE_DRAWING_OK);
if (ret && fb_state != FB_STATE_DRAWING_OK)
return ret;
else {
s += sprintf(buf, "awake");
if (display == 0) {
display = 1;
sysfs_notify(power_kobj, NULL, "wait_for_fb_status");
}
}
return s - buf;
}
static ssize_t wait_for_fb_status_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
int ret = 0;
if (display == 1)
ret = snprintf(buf, strnlen("on", MAX_BUF) + 1, "on");
else
ret = snprintf(buf, strnlen("off", MAX_BUF) + 1, "off");
return ret;
}
#define power_ro_attr(_name) \
static struct kobj_attribute _name##_attr = { \
.attr = { \
.name = __stringify(_name), \
.mode = 0444, \
}, \
.show = _name##_show, \
.store = NULL, \
}
power_ro_attr(wait_for_fb_sleep);
power_ro_attr(wait_for_fb_wake);
power_ro_attr(wait_for_fb_status);
static struct attribute *g[] = {
&wait_for_fb_sleep_attr.attr,
&wait_for_fb_wake_attr.attr,
&wait_for_fb_status_attr.attr,
NULL,
};
static struct attribute_group attr_group = {
.attrs = g,
};
static int __init android_power_init(void)
{
int ret;
init_waitqueue_head(&fb_state_wq);
fb_state = FB_STATE_DRAWING_OK;
ret = sysfs_create_group(power_kobj, &attr_group);
if (ret) {
pr_err("android_power_init: sysfs_create_group failed\n");
return ret;
}
register_early_suspend(&stop_drawing_early_suspend_desc);
return 0;
}
static void __exit android_power_exit(void)
{
unregister_early_suspend(&stop_drawing_early_suspend_desc);
sysfs_remove_group(power_kobj, &attr_group);
}
module_init(android_power_init);
module_exit(android_power_exit);
| gpl-2.0 |
aatjitra/sgs3 | arch/ia64/sn/kernel/sn2/sn2_smp.c | 4049 | 15766 | /*
* SN2 Platform specific SMP Support
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2000-2006 Silicon Graphics, Inc. All rights reserved.
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/spinlock.h>
#include <linux/threads.h>
#include <linux/sched.h>
#include <linux/smp.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/mmzone.h>
#include <linux/module.h>
#include <linux/bitops.h>
#include <linux/nodemask.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <asm/processor.h>
#include <asm/irq.h>
#include <asm/sal.h>
#include <asm/system.h>
#include <asm/delay.h>
#include <asm/io.h>
#include <asm/smp.h>
#include <asm/tlb.h>
#include <asm/numa.h>
#include <asm/hw_irq.h>
#include <asm/current.h>
#include <asm/sn/sn_cpuid.h>
#include <asm/sn/sn_sal.h>
#include <asm/sn/addrs.h>
#include <asm/sn/shub_mmr.h>
#include <asm/sn/nodepda.h>
#include <asm/sn/rw_mmr.h>
#include <asm/sn/sn_feature_sets.h>
DEFINE_PER_CPU(struct ptc_stats, ptcstats);
DECLARE_PER_CPU(struct ptc_stats, ptcstats);
static __cacheline_aligned DEFINE_SPINLOCK(sn2_global_ptc_lock);
/* 0 = old algorithm (no IPI flushes), 1 = ipi deadlock flush, 2 = ipi instead of SHUB ptc, >2 = always ipi */
static int sn2_flush_opt = 0;
extern unsigned long
sn2_ptc_deadlock_recovery_core(volatile unsigned long *, unsigned long,
volatile unsigned long *, unsigned long,
volatile unsigned long *, unsigned long);
void
sn2_ptc_deadlock_recovery(short *, short, short, int,
volatile unsigned long *, unsigned long,
volatile unsigned long *, unsigned long);
/*
* Note: some is the following is captured here to make degugging easier
* (the macros make more sense if you see the debug patch - not posted)
*/
#define sn2_ptctest 0
#define local_node_uses_ptc_ga(sh1) ((sh1) ? 1 : 0)
#define max_active_pio(sh1) ((sh1) ? 32 : 7)
#define reset_max_active_on_deadlock() 1
#define PTC_LOCK(sh1) ((sh1) ? &sn2_global_ptc_lock : &sn_nodepda->ptc_lock)
struct ptc_stats {
unsigned long ptc_l;
unsigned long change_rid;
unsigned long shub_ptc_flushes;
unsigned long nodes_flushed;
unsigned long deadlocks;
unsigned long deadlocks2;
unsigned long lock_itc_clocks;
unsigned long shub_itc_clocks;
unsigned long shub_itc_clocks_max;
unsigned long shub_ptc_flushes_not_my_mm;
unsigned long shub_ipi_flushes;
unsigned long shub_ipi_flushes_itc_clocks;
};
#define sn2_ptctest 0
static inline unsigned long wait_piowc(void)
{
volatile unsigned long *piows;
unsigned long zeroval, ws;
piows = pda->pio_write_status_addr;
zeroval = pda->pio_write_status_val;
do {
cpu_relax();
} while (((ws = *piows) & SH_PIO_WRITE_STATUS_PENDING_WRITE_COUNT_MASK) != zeroval);
return (ws & SH_PIO_WRITE_STATUS_WRITE_DEADLOCK_MASK) != 0;
}
/**
* sn_migrate - SN-specific task migration actions
* @task: Task being migrated to new CPU
*
* SN2 PIO writes from separate CPUs are not guaranteed to arrive in order.
* Context switching user threads which have memory-mapped MMIO may cause
* PIOs to issue from separate CPUs, thus the PIO writes must be drained
* from the previous CPU's Shub before execution resumes on the new CPU.
*/
void sn_migrate(struct task_struct *task)
{
pda_t *last_pda = pdacpu(task_thread_info(task)->last_cpu);
volatile unsigned long *adr = last_pda->pio_write_status_addr;
unsigned long val = last_pda->pio_write_status_val;
/* Drain PIO writes from old CPU's Shub */
while (unlikely((*adr & SH_PIO_WRITE_STATUS_PENDING_WRITE_COUNT_MASK)
!= val))
cpu_relax();
}
void sn_tlb_migrate_finish(struct mm_struct *mm)
{
/* flush_tlb_mm is inefficient if more than 1 users of mm */
if (mm == current->mm && mm && atomic_read(&mm->mm_users) == 1)
flush_tlb_mm(mm);
}
static void
sn2_ipi_flush_all_tlb(struct mm_struct *mm)
{
unsigned long itc;
itc = ia64_get_itc();
smp_flush_tlb_cpumask(*mm_cpumask(mm));
itc = ia64_get_itc() - itc;
__get_cpu_var(ptcstats).shub_ipi_flushes_itc_clocks += itc;
__get_cpu_var(ptcstats).shub_ipi_flushes++;
}
/**
* sn2_global_tlb_purge - globally purge translation cache of virtual address range
* @mm: mm_struct containing virtual address range
* @start: start of virtual address range
* @end: end of virtual address range
* @nbits: specifies number of bytes to purge per instruction (num = 1<<(nbits & 0xfc))
*
* Purges the translation caches of all processors of the given virtual address
* range.
*
* Note:
* - cpu_vm_mask is a bit mask that indicates which cpus have loaded the context.
* - cpu_vm_mask is converted into a nodemask of the nodes containing the
* cpus in cpu_vm_mask.
* - if only one bit is set in cpu_vm_mask & it is the current cpu & the
* process is purging its own virtual address range, then only the
* local TLB needs to be flushed. This flushing can be done using
* ptc.l. This is the common case & avoids the global spinlock.
* - if multiple cpus have loaded the context, then flushing has to be
* done with ptc.g/MMRs under protection of the global ptc_lock.
*/
void
sn2_global_tlb_purge(struct mm_struct *mm, unsigned long start,
unsigned long end, unsigned long nbits)
{
int i, ibegin, shub1, cnode, mynasid, cpu, lcpu = 0, nasid;
int mymm = (mm == current->active_mm && mm == current->mm);
int use_cpu_ptcga;
volatile unsigned long *ptc0, *ptc1;
unsigned long itc, itc2, flags, data0 = 0, data1 = 0, rr_value, old_rr = 0;
short nasids[MAX_NUMNODES], nix;
nodemask_t nodes_flushed;
int active, max_active, deadlock, flush_opt = sn2_flush_opt;
if (flush_opt > 2) {
sn2_ipi_flush_all_tlb(mm);
return;
}
nodes_clear(nodes_flushed);
i = 0;
for_each_cpu(cpu, mm_cpumask(mm)) {
cnode = cpu_to_node(cpu);
node_set(cnode, nodes_flushed);
lcpu = cpu;
i++;
}
if (i == 0)
return;
preempt_disable();
if (likely(i == 1 && lcpu == smp_processor_id() && mymm)) {
do {
ia64_ptcl(start, nbits << 2);
start += (1UL << nbits);
} while (start < end);
ia64_srlz_i();
__get_cpu_var(ptcstats).ptc_l++;
preempt_enable();
return;
}
if (atomic_read(&mm->mm_users) == 1 && mymm) {
flush_tlb_mm(mm);
__get_cpu_var(ptcstats).change_rid++;
preempt_enable();
return;
}
if (flush_opt == 2) {
sn2_ipi_flush_all_tlb(mm);
preempt_enable();
return;
}
itc = ia64_get_itc();
nix = 0;
for_each_node_mask(cnode, nodes_flushed)
nasids[nix++] = cnodeid_to_nasid(cnode);
rr_value = (mm->context << 3) | REGION_NUMBER(start);
shub1 = is_shub1();
if (shub1) {
data0 = (1UL << SH1_PTC_0_A_SHFT) |
(nbits << SH1_PTC_0_PS_SHFT) |
(rr_value << SH1_PTC_0_RID_SHFT) |
(1UL << SH1_PTC_0_START_SHFT);
ptc0 = (long *)GLOBAL_MMR_PHYS_ADDR(0, SH1_PTC_0);
ptc1 = (long *)GLOBAL_MMR_PHYS_ADDR(0, SH1_PTC_1);
} else {
data0 = (1UL << SH2_PTC_A_SHFT) |
(nbits << SH2_PTC_PS_SHFT) |
(1UL << SH2_PTC_START_SHFT);
ptc0 = (long *)GLOBAL_MMR_PHYS_ADDR(0, SH2_PTC +
(rr_value << SH2_PTC_RID_SHFT));
ptc1 = NULL;
}
mynasid = get_nasid();
use_cpu_ptcga = local_node_uses_ptc_ga(shub1);
max_active = max_active_pio(shub1);
itc = ia64_get_itc();
spin_lock_irqsave(PTC_LOCK(shub1), flags);
itc2 = ia64_get_itc();
__get_cpu_var(ptcstats).lock_itc_clocks += itc2 - itc;
__get_cpu_var(ptcstats).shub_ptc_flushes++;
__get_cpu_var(ptcstats).nodes_flushed += nix;
if (!mymm)
__get_cpu_var(ptcstats).shub_ptc_flushes_not_my_mm++;
if (use_cpu_ptcga && !mymm) {
old_rr = ia64_get_rr(start);
ia64_set_rr(start, (old_rr & 0xff) | (rr_value << 8));
ia64_srlz_d();
}
wait_piowc();
do {
if (shub1)
data1 = start | (1UL << SH1_PTC_1_START_SHFT);
else
data0 = (data0 & ~SH2_PTC_ADDR_MASK) | (start & SH2_PTC_ADDR_MASK);
deadlock = 0;
active = 0;
for (ibegin = 0, i = 0; i < nix; i++) {
nasid = nasids[i];
if (use_cpu_ptcga && unlikely(nasid == mynasid)) {
ia64_ptcga(start, nbits << 2);
ia64_srlz_i();
} else {
ptc0 = CHANGE_NASID(nasid, ptc0);
if (ptc1)
ptc1 = CHANGE_NASID(nasid, ptc1);
pio_atomic_phys_write_mmrs(ptc0, data0, ptc1, data1);
active++;
}
if (active >= max_active || i == (nix - 1)) {
if ((deadlock = wait_piowc())) {
if (flush_opt == 1)
goto done;
sn2_ptc_deadlock_recovery(nasids, ibegin, i, mynasid, ptc0, data0, ptc1, data1);
if (reset_max_active_on_deadlock())
max_active = 1;
}
active = 0;
ibegin = i + 1;
}
}
start += (1UL << nbits);
} while (start < end);
done:
itc2 = ia64_get_itc() - itc2;
__get_cpu_var(ptcstats).shub_itc_clocks += itc2;
if (itc2 > __get_cpu_var(ptcstats).shub_itc_clocks_max)
__get_cpu_var(ptcstats).shub_itc_clocks_max = itc2;
if (old_rr) {
ia64_set_rr(start, old_rr);
ia64_srlz_d();
}
spin_unlock_irqrestore(PTC_LOCK(shub1), flags);
if (flush_opt == 1 && deadlock) {
__get_cpu_var(ptcstats).deadlocks++;
sn2_ipi_flush_all_tlb(mm);
}
preempt_enable();
}
/*
* sn2_ptc_deadlock_recovery
*
* Recover from PTC deadlocks conditions. Recovery requires stepping thru each
* TLB flush transaction. The recovery sequence is somewhat tricky & is
* coded in assembly language.
*/
void
sn2_ptc_deadlock_recovery(short *nasids, short ib, short ie, int mynasid,
volatile unsigned long *ptc0, unsigned long data0,
volatile unsigned long *ptc1, unsigned long data1)
{
short nasid, i;
unsigned long *piows, zeroval, n;
__get_cpu_var(ptcstats).deadlocks++;
piows = (unsigned long *) pda->pio_write_status_addr;
zeroval = pda->pio_write_status_val;
for (i=ib; i <= ie; i++) {
nasid = nasids[i];
if (local_node_uses_ptc_ga(is_shub1()) && nasid == mynasid)
continue;
ptc0 = CHANGE_NASID(nasid, ptc0);
if (ptc1)
ptc1 = CHANGE_NASID(nasid, ptc1);
n = sn2_ptc_deadlock_recovery_core(ptc0, data0, ptc1, data1, piows, zeroval);
__get_cpu_var(ptcstats).deadlocks2 += n;
}
}
/**
* sn_send_IPI_phys - send an IPI to a Nasid and slice
* @nasid: nasid to receive the interrupt (may be outside partition)
* @physid: physical cpuid to receive the interrupt.
* @vector: command to send
* @delivery_mode: delivery mechanism
*
* Sends an IPI (interprocessor interrupt) to the processor specified by
* @physid
*
* @delivery_mode can be one of the following
*
* %IA64_IPI_DM_INT - pend an interrupt
* %IA64_IPI_DM_PMI - pend a PMI
* %IA64_IPI_DM_NMI - pend an NMI
* %IA64_IPI_DM_INIT - pend an INIT interrupt
*/
void sn_send_IPI_phys(int nasid, long physid, int vector, int delivery_mode)
{
long val;
unsigned long flags = 0;
volatile long *p;
p = (long *)GLOBAL_MMR_PHYS_ADDR(nasid, SH_IPI_INT);
val = (1UL << SH_IPI_INT_SEND_SHFT) |
(physid << SH_IPI_INT_PID_SHFT) |
((long)delivery_mode << SH_IPI_INT_TYPE_SHFT) |
((long)vector << SH_IPI_INT_IDX_SHFT) |
(0x000feeUL << SH_IPI_INT_BASE_SHFT);
mb();
if (enable_shub_wars_1_1()) {
spin_lock_irqsave(&sn2_global_ptc_lock, flags);
}
pio_phys_write_mmr(p, val);
if (enable_shub_wars_1_1()) {
wait_piowc();
spin_unlock_irqrestore(&sn2_global_ptc_lock, flags);
}
}
EXPORT_SYMBOL(sn_send_IPI_phys);
/**
* sn2_send_IPI - send an IPI to a processor
* @cpuid: target of the IPI
* @vector: command to send
* @delivery_mode: delivery mechanism
* @redirect: redirect the IPI?
*
* Sends an IPI (InterProcessor Interrupt) to the processor specified by
* @cpuid. @vector specifies the command to send, while @delivery_mode can
* be one of the following
*
* %IA64_IPI_DM_INT - pend an interrupt
* %IA64_IPI_DM_PMI - pend a PMI
* %IA64_IPI_DM_NMI - pend an NMI
* %IA64_IPI_DM_INIT - pend an INIT interrupt
*/
void sn2_send_IPI(int cpuid, int vector, int delivery_mode, int redirect)
{
long physid;
int nasid;
physid = cpu_physical_id(cpuid);
nasid = cpuid_to_nasid(cpuid);
/* the following is used only when starting cpus at boot time */
if (unlikely(nasid == -1))
ia64_sn_get_sapic_info(physid, &nasid, NULL, NULL);
sn_send_IPI_phys(nasid, physid, vector, delivery_mode);
}
#ifdef CONFIG_HOTPLUG_CPU
/**
* sn_cpu_disable_allowed - Determine if a CPU can be disabled.
* @cpu - CPU that is requested to be disabled.
*
* CPU disable is only allowed on SHub2 systems running with a PROM
* that supports CPU disable. It is not permitted to disable the boot processor.
*/
bool sn_cpu_disable_allowed(int cpu)
{
if (is_shub2() && sn_prom_feature_available(PRF_CPU_DISABLE_SUPPORT)) {
if (cpu != 0)
return true;
else
printk(KERN_WARNING
"Disabling the boot processor is not allowed.\n");
} else
printk(KERN_WARNING
"CPU disable is not supported on this system.\n");
return false;
}
#endif /* CONFIG_HOTPLUG_CPU */
#ifdef CONFIG_PROC_FS
#define PTC_BASENAME "sgi_sn/ptc_statistics"
static void *sn2_ptc_seq_start(struct seq_file *file, loff_t * offset)
{
if (*offset < nr_cpu_ids)
return offset;
return NULL;
}
static void *sn2_ptc_seq_next(struct seq_file *file, void *data, loff_t * offset)
{
(*offset)++;
if (*offset < nr_cpu_ids)
return offset;
return NULL;
}
static void sn2_ptc_seq_stop(struct seq_file *file, void *data)
{
}
static int sn2_ptc_seq_show(struct seq_file *file, void *data)
{
struct ptc_stats *stat;
int cpu;
cpu = *(loff_t *) data;
if (!cpu) {
seq_printf(file,
"# cpu ptc_l newrid ptc_flushes nodes_flushed deadlocks lock_nsec shub_nsec shub_nsec_max not_my_mm deadlock2 ipi_fluches ipi_nsec\n");
seq_printf(file, "# ptctest %d, flushopt %d\n", sn2_ptctest, sn2_flush_opt);
}
if (cpu < nr_cpu_ids && cpu_online(cpu)) {
stat = &per_cpu(ptcstats, cpu);
seq_printf(file, "cpu %d %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld\n", cpu, stat->ptc_l,
stat->change_rid, stat->shub_ptc_flushes, stat->nodes_flushed,
stat->deadlocks,
1000 * stat->lock_itc_clocks / per_cpu(ia64_cpu_info, cpu).cyc_per_usec,
1000 * stat->shub_itc_clocks / per_cpu(ia64_cpu_info, cpu).cyc_per_usec,
1000 * stat->shub_itc_clocks_max / per_cpu(ia64_cpu_info, cpu).cyc_per_usec,
stat->shub_ptc_flushes_not_my_mm,
stat->deadlocks2,
stat->shub_ipi_flushes,
1000 * stat->shub_ipi_flushes_itc_clocks / per_cpu(ia64_cpu_info, cpu).cyc_per_usec);
}
return 0;
}
static ssize_t sn2_ptc_proc_write(struct file *file, const char __user *user, size_t count, loff_t *data)
{
int cpu;
char optstr[64];
if (count == 0 || count > sizeof(optstr))
return -EINVAL;
if (copy_from_user(optstr, user, count))
return -EFAULT;
optstr[count - 1] = '\0';
sn2_flush_opt = simple_strtoul(optstr, NULL, 0);
for_each_online_cpu(cpu)
memset(&per_cpu(ptcstats, cpu), 0, sizeof(struct ptc_stats));
return count;
}
static const struct seq_operations sn2_ptc_seq_ops = {
.start = sn2_ptc_seq_start,
.next = sn2_ptc_seq_next,
.stop = sn2_ptc_seq_stop,
.show = sn2_ptc_seq_show
};
static int sn2_ptc_proc_open(struct inode *inode, struct file *file)
{
return seq_open(file, &sn2_ptc_seq_ops);
}
static const struct file_operations proc_sn2_ptc_operations = {
.open = sn2_ptc_proc_open,
.read = seq_read,
.write = sn2_ptc_proc_write,
.llseek = seq_lseek,
.release = seq_release,
};
static struct proc_dir_entry *proc_sn2_ptc;
static int __init sn2_ptc_init(void)
{
if (!ia64_platform_is("sn2"))
return 0;
proc_sn2_ptc = proc_create(PTC_BASENAME, 0444,
NULL, &proc_sn2_ptc_operations);
if (!proc_sn2_ptc) {
printk(KERN_ERR "unable to create %s proc entry", PTC_BASENAME);
return -EINVAL;
}
spin_lock_init(&sn2_global_ptc_lock);
return 0;
}
static void __exit sn2_ptc_exit(void)
{
remove_proc_entry(PTC_BASENAME, NULL);
}
module_init(sn2_ptc_init);
module_exit(sn2_ptc_exit);
#endif /* CONFIG_PROC_FS */
| gpl-2.0 |
CyanogenMod/android_kernel_samsung_manta | arch/powerpc/platforms/pseries/hotplug-cpu.c | 4561 | 10919 | /*
* pseries CPU Hotplug infrastructure.
*
* Split out from arch/powerpc/platforms/pseries/setup.c
* arch/powerpc/kernel/rtas.c, and arch/powerpc/platforms/pseries/smp.c
*
* Peter Bergner, IBM March 2001.
* Copyright (C) 2001 IBM.
* Dave Engebretsen, Peter Bergner, and
* Mike Corrigan {engebret|bergner|mikec}@us.ibm.com
* Plus various changes from other IBM teams...
*
* Copyright (C) 2006 Michael Ellerman, IBM Corporation
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/sched.h> /* for idle_task_exit */
#include <linux/cpu.h>
#include <asm/prom.h>
#include <asm/rtas.h>
#include <asm/firmware.h>
#include <asm/machdep.h>
#include <asm/vdso_datapage.h>
#include <asm/pSeries_reconfig.h>
#include <asm/xics.h>
#include "plpar_wrappers.h"
#include "offline_states.h"
/* This version can't take the spinlock, because it never returns */
static struct rtas_args rtas_stop_self_args = {
.token = RTAS_UNKNOWN_SERVICE,
.nargs = 0,
.nret = 1,
.rets = &rtas_stop_self_args.args[0],
};
static DEFINE_PER_CPU(enum cpu_state_vals, preferred_offline_state) =
CPU_STATE_OFFLINE;
static DEFINE_PER_CPU(enum cpu_state_vals, current_state) = CPU_STATE_OFFLINE;
static enum cpu_state_vals default_offline_state = CPU_STATE_OFFLINE;
static int cede_offline_enabled __read_mostly = 1;
/*
* Enable/disable cede_offline when available.
*/
static int __init setup_cede_offline(char *str)
{
if (!strcmp(str, "off"))
cede_offline_enabled = 0;
else if (!strcmp(str, "on"))
cede_offline_enabled = 1;
else
return 0;
return 1;
}
__setup("cede_offline=", setup_cede_offline);
enum cpu_state_vals get_cpu_current_state(int cpu)
{
return per_cpu(current_state, cpu);
}
void set_cpu_current_state(int cpu, enum cpu_state_vals state)
{
per_cpu(current_state, cpu) = state;
}
enum cpu_state_vals get_preferred_offline_state(int cpu)
{
return per_cpu(preferred_offline_state, cpu);
}
void set_preferred_offline_state(int cpu, enum cpu_state_vals state)
{
per_cpu(preferred_offline_state, cpu) = state;
}
void set_default_offline_state(int cpu)
{
per_cpu(preferred_offline_state, cpu) = default_offline_state;
}
static void rtas_stop_self(void)
{
struct rtas_args *args = &rtas_stop_self_args;
local_irq_disable();
BUG_ON(args->token == RTAS_UNKNOWN_SERVICE);
printk("cpu %u (hwid %u) Ready to die...\n",
smp_processor_id(), hard_smp_processor_id());
enter_rtas(__pa(args));
panic("Alas, I survived.\n");
}
static void pseries_mach_cpu_die(void)
{
unsigned int cpu = smp_processor_id();
unsigned int hwcpu = hard_smp_processor_id();
u8 cede_latency_hint = 0;
local_irq_disable();
idle_task_exit();
xics_teardown_cpu();
if (get_preferred_offline_state(cpu) == CPU_STATE_INACTIVE) {
set_cpu_current_state(cpu, CPU_STATE_INACTIVE);
if (ppc_md.suspend_disable_cpu)
ppc_md.suspend_disable_cpu();
cede_latency_hint = 2;
get_lppaca()->idle = 1;
if (!get_lppaca()->shared_proc)
get_lppaca()->donate_dedicated_cpu = 1;
while (get_preferred_offline_state(cpu) == CPU_STATE_INACTIVE) {
extended_cede_processor(cede_latency_hint);
}
if (!get_lppaca()->shared_proc)
get_lppaca()->donate_dedicated_cpu = 0;
get_lppaca()->idle = 0;
if (get_preferred_offline_state(cpu) == CPU_STATE_ONLINE) {
unregister_slb_shadow(hwcpu);
/*
* Call to start_secondary_resume() will not return.
* Kernel stack will be reset and start_secondary()
* will be called to continue the online operation.
*/
start_secondary_resume();
}
}
/* Requested state is CPU_STATE_OFFLINE at this point */
WARN_ON(get_preferred_offline_state(cpu) != CPU_STATE_OFFLINE);
set_cpu_current_state(cpu, CPU_STATE_OFFLINE);
unregister_slb_shadow(hwcpu);
rtas_stop_self();
/* Should never get here... */
BUG();
for(;;);
}
static int pseries_cpu_disable(void)
{
int cpu = smp_processor_id();
set_cpu_online(cpu, false);
vdso_data->processorCount--;
/*fix boot_cpuid here*/
if (cpu == boot_cpuid)
boot_cpuid = cpumask_any(cpu_online_mask);
/* FIXME: abstract this to not be platform specific later on */
xics_migrate_irqs_away();
return 0;
}
/*
* pseries_cpu_die: Wait for the cpu to die.
* @cpu: logical processor id of the CPU whose death we're awaiting.
*
* This function is called from the context of the thread which is performing
* the cpu-offline. Here we wait for long enough to allow the cpu in question
* to self-destroy so that the cpu-offline thread can send the CPU_DEAD
* notifications.
*
* OTOH, pseries_mach_cpu_die() is called by the @cpu when it wants to
* self-destruct.
*/
static void pseries_cpu_die(unsigned int cpu)
{
int tries;
int cpu_status = 1;
unsigned int pcpu = get_hard_smp_processor_id(cpu);
if (get_preferred_offline_state(cpu) == CPU_STATE_INACTIVE) {
cpu_status = 1;
for (tries = 0; tries < 5000; tries++) {
if (get_cpu_current_state(cpu) == CPU_STATE_INACTIVE) {
cpu_status = 0;
break;
}
msleep(1);
}
} else if (get_preferred_offline_state(cpu) == CPU_STATE_OFFLINE) {
for (tries = 0; tries < 25; tries++) {
cpu_status = smp_query_cpu_stopped(pcpu);
if (cpu_status == QCSS_STOPPED ||
cpu_status == QCSS_HARDWARE_ERROR)
break;
cpu_relax();
}
}
if (cpu_status != 0) {
printk("Querying DEAD? cpu %i (%i) shows %i\n",
cpu, pcpu, cpu_status);
}
/* Isolation and deallocation are definitely done by
* drslot_chrp_cpu. If they were not they would be
* done here. Change isolate state to Isolate and
* change allocation-state to Unusable.
*/
paca[cpu].cpu_start = 0;
}
/*
* Update cpu_present_mask and paca(s) for a new cpu node. The wrinkle
* here is that a cpu device node may represent up to two logical cpus
* in the SMT case. We must honor the assumption in other code that
* the logical ids for sibling SMT threads x and y are adjacent, such
* that x^1 == y and y^1 == x.
*/
static int pseries_add_processor(struct device_node *np)
{
unsigned int cpu;
cpumask_var_t candidate_mask, tmp;
int err = -ENOSPC, len, nthreads, i;
const u32 *intserv;
intserv = of_get_property(np, "ibm,ppc-interrupt-server#s", &len);
if (!intserv)
return 0;
zalloc_cpumask_var(&candidate_mask, GFP_KERNEL);
zalloc_cpumask_var(&tmp, GFP_KERNEL);
nthreads = len / sizeof(u32);
for (i = 0; i < nthreads; i++)
cpumask_set_cpu(i, tmp);
cpu_maps_update_begin();
BUG_ON(!cpumask_subset(cpu_present_mask, cpu_possible_mask));
/* Get a bitmap of unoccupied slots. */
cpumask_xor(candidate_mask, cpu_possible_mask, cpu_present_mask);
if (cpumask_empty(candidate_mask)) {
/* If we get here, it most likely means that NR_CPUS is
* less than the partition's max processors setting.
*/
printk(KERN_ERR "Cannot add cpu %s; this system configuration"
" supports %d logical cpus.\n", np->full_name,
cpumask_weight(cpu_possible_mask));
goto out_unlock;
}
while (!cpumask_empty(tmp))
if (cpumask_subset(tmp, candidate_mask))
/* Found a range where we can insert the new cpu(s) */
break;
else
cpumask_shift_left(tmp, tmp, nthreads);
if (cpumask_empty(tmp)) {
printk(KERN_ERR "Unable to find space in cpu_present_mask for"
" processor %s with %d thread(s)\n", np->name,
nthreads);
goto out_unlock;
}
for_each_cpu(cpu, tmp) {
BUG_ON(cpu_present(cpu));
set_cpu_present(cpu, true);
set_hard_smp_processor_id(cpu, *intserv++);
}
err = 0;
out_unlock:
cpu_maps_update_done();
free_cpumask_var(candidate_mask);
free_cpumask_var(tmp);
return err;
}
/*
* Update the present map for a cpu node which is going away, and set
* the hard id in the paca(s) to -1 to be consistent with boot time
* convention for non-present cpus.
*/
static void pseries_remove_processor(struct device_node *np)
{
unsigned int cpu;
int len, nthreads, i;
const u32 *intserv;
intserv = of_get_property(np, "ibm,ppc-interrupt-server#s", &len);
if (!intserv)
return;
nthreads = len / sizeof(u32);
cpu_maps_update_begin();
for (i = 0; i < nthreads; i++) {
for_each_present_cpu(cpu) {
if (get_hard_smp_processor_id(cpu) != intserv[i])
continue;
BUG_ON(cpu_online(cpu));
set_cpu_present(cpu, false);
set_hard_smp_processor_id(cpu, -1);
break;
}
if (cpu >= nr_cpu_ids)
printk(KERN_WARNING "Could not find cpu to remove "
"with physical id 0x%x\n", intserv[i]);
}
cpu_maps_update_done();
}
static int pseries_smp_notifier(struct notifier_block *nb,
unsigned long action, void *node)
{
int err = 0;
switch (action) {
case PSERIES_RECONFIG_ADD:
err = pseries_add_processor(node);
break;
case PSERIES_RECONFIG_REMOVE:
pseries_remove_processor(node);
break;
}
return notifier_from_errno(err);
}
static struct notifier_block pseries_smp_nb = {
.notifier_call = pseries_smp_notifier,
};
#define MAX_CEDE_LATENCY_LEVELS 4
#define CEDE_LATENCY_PARAM_LENGTH 10
#define CEDE_LATENCY_PARAM_MAX_LENGTH \
(MAX_CEDE_LATENCY_LEVELS * CEDE_LATENCY_PARAM_LENGTH * sizeof(char))
#define CEDE_LATENCY_TOKEN 45
static char cede_parameters[CEDE_LATENCY_PARAM_MAX_LENGTH];
static int parse_cede_parameters(void)
{
memset(cede_parameters, 0, CEDE_LATENCY_PARAM_MAX_LENGTH);
return rtas_call(rtas_token("ibm,get-system-parameter"), 3, 1,
NULL,
CEDE_LATENCY_TOKEN,
__pa(cede_parameters),
CEDE_LATENCY_PARAM_MAX_LENGTH);
}
static int __init pseries_cpu_hotplug_init(void)
{
struct device_node *np;
const char *typep;
int cpu;
int qcss_tok;
for_each_node_by_name(np, "interrupt-controller") {
typep = of_get_property(np, "compatible", NULL);
if (strstr(typep, "open-pic")) {
of_node_put(np);
printk(KERN_INFO "CPU Hotplug not supported on "
"systems using MPIC\n");
return 0;
}
}
rtas_stop_self_args.token = rtas_token("stop-self");
qcss_tok = rtas_token("query-cpu-stopped-state");
if (rtas_stop_self_args.token == RTAS_UNKNOWN_SERVICE ||
qcss_tok == RTAS_UNKNOWN_SERVICE) {
printk(KERN_INFO "CPU Hotplug not supported by firmware "
"- disabling.\n");
return 0;
}
ppc_md.cpu_die = pseries_mach_cpu_die;
smp_ops->cpu_disable = pseries_cpu_disable;
smp_ops->cpu_die = pseries_cpu_die;
/* Processors can be added/removed only on LPAR */
if (firmware_has_feature(FW_FEATURE_LPAR)) {
pSeries_reconfig_notifier_register(&pseries_smp_nb);
cpu_maps_update_begin();
if (cede_offline_enabled && parse_cede_parameters() == 0) {
default_offline_state = CPU_STATE_INACTIVE;
for_each_online_cpu(cpu)
set_default_offline_state(cpu);
}
cpu_maps_update_done();
}
return 0;
}
arch_initcall(pseries_cpu_hotplug_init);
| gpl-2.0 |
MSM8226-Samsung/android_kernel_samsung_kmini3g | drivers/media/dvb/frontends/cx22700.c | 5073 | 10988 | /*
Conexant cx22700 DVB OFDM demodulator driver
Copyright (C) 2001-2002 Convergence Integrated Media GmbH
Holger Waechtler <holger@convergence.de>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/string.h>
#include <linux/slab.h>
#include "dvb_frontend.h"
#include "cx22700.h"
struct cx22700_state {
struct i2c_adapter* i2c;
const struct cx22700_config* config;
struct dvb_frontend frontend;
};
static int debug;
#define dprintk(args...) \
do { \
if (debug) printk(KERN_DEBUG "cx22700: " args); \
} while (0)
static u8 init_tab [] = {
0x04, 0x10,
0x05, 0x09,
0x06, 0x00,
0x08, 0x04,
0x09, 0x00,
0x0a, 0x01,
0x15, 0x40,
0x16, 0x10,
0x17, 0x87,
0x18, 0x17,
0x1a, 0x10,
0x25, 0x04,
0x2e, 0x00,
0x39, 0x00,
0x3a, 0x04,
0x45, 0x08,
0x46, 0x02,
0x47, 0x05,
};
static int cx22700_writereg (struct cx22700_state* state, u8 reg, u8 data)
{
int ret;
u8 buf [] = { reg, data };
struct i2c_msg msg = { .addr = state->config->demod_address, .flags = 0, .buf = buf, .len = 2 };
dprintk ("%s\n", __func__);
ret = i2c_transfer (state->i2c, &msg, 1);
if (ret != 1)
printk("%s: writereg error (reg == 0x%02x, val == 0x%02x, ret == %i)\n",
__func__, reg, data, ret);
return (ret != 1) ? -1 : 0;
}
static int cx22700_readreg (struct cx22700_state* state, u8 reg)
{
int ret;
u8 b0 [] = { reg };
u8 b1 [] = { 0 };
struct i2c_msg msg [] = { { .addr = state->config->demod_address, .flags = 0, .buf = b0, .len = 1 },
{ .addr = state->config->demod_address, .flags = I2C_M_RD, .buf = b1, .len = 1 } };
dprintk ("%s\n", __func__);
ret = i2c_transfer (state->i2c, msg, 2);
if (ret != 2) return -EIO;
return b1[0];
}
static int cx22700_set_inversion (struct cx22700_state* state, int inversion)
{
u8 val;
dprintk ("%s\n", __func__);
switch (inversion) {
case INVERSION_AUTO:
return -EOPNOTSUPP;
case INVERSION_ON:
val = cx22700_readreg (state, 0x09);
return cx22700_writereg (state, 0x09, val | 0x01);
case INVERSION_OFF:
val = cx22700_readreg (state, 0x09);
return cx22700_writereg (state, 0x09, val & 0xfe);
default:
return -EINVAL;
}
}
static int cx22700_set_tps(struct cx22700_state *state,
struct dtv_frontend_properties *p)
{
static const u8 qam_tab [4] = { 0, 1, 0, 2 };
static const u8 fec_tab [6] = { 0, 1, 2, 0, 3, 4 };
u8 val;
dprintk ("%s\n", __func__);
if (p->code_rate_HP < FEC_1_2 || p->code_rate_HP > FEC_7_8)
return -EINVAL;
if (p->code_rate_LP < FEC_1_2 || p->code_rate_LP > FEC_7_8)
return -EINVAL;
if (p->code_rate_HP == FEC_4_5 || p->code_rate_LP == FEC_4_5)
return -EINVAL;
if (p->guard_interval < GUARD_INTERVAL_1_32 ||
p->guard_interval > GUARD_INTERVAL_1_4)
return -EINVAL;
if (p->transmission_mode != TRANSMISSION_MODE_2K &&
p->transmission_mode != TRANSMISSION_MODE_8K)
return -EINVAL;
if (p->modulation != QPSK &&
p->modulation != QAM_16 &&
p->modulation != QAM_64)
return -EINVAL;
if (p->hierarchy < HIERARCHY_NONE ||
p->hierarchy > HIERARCHY_4)
return -EINVAL;
if (p->bandwidth_hz > 8000000 || p->bandwidth_hz < 6000000)
return -EINVAL;
if (p->bandwidth_hz == 7000000)
cx22700_writereg (state, 0x09, cx22700_readreg (state, 0x09 | 0x10));
else
cx22700_writereg (state, 0x09, cx22700_readreg (state, 0x09 & ~0x10));
val = qam_tab[p->modulation - QPSK];
val |= p->hierarchy - HIERARCHY_NONE;
cx22700_writereg (state, 0x04, val);
val = fec_tab[p->code_rate_HP - FEC_1_2] << 3;
val |= fec_tab[p->code_rate_LP - FEC_1_2];
cx22700_writereg (state, 0x05, val);
val = (p->guard_interval - GUARD_INTERVAL_1_32) << 2;
val |= p->transmission_mode - TRANSMISSION_MODE_2K;
cx22700_writereg (state, 0x06, val);
cx22700_writereg (state, 0x08, 0x04 | 0x02); /* use user tps parameters */
cx22700_writereg (state, 0x08, 0x04); /* restart acquisition */
return 0;
}
static int cx22700_get_tps(struct cx22700_state *state,
struct dtv_frontend_properties *p)
{
static const fe_modulation_t qam_tab [3] = { QPSK, QAM_16, QAM_64 };
static const fe_code_rate_t fec_tab [5] = { FEC_1_2, FEC_2_3, FEC_3_4,
FEC_5_6, FEC_7_8 };
u8 val;
dprintk ("%s\n", __func__);
if (!(cx22700_readreg(state, 0x07) & 0x20)) /* tps valid? */
return -EAGAIN;
val = cx22700_readreg (state, 0x01);
if ((val & 0x7) > 4)
p->hierarchy = HIERARCHY_AUTO;
else
p->hierarchy = HIERARCHY_NONE + (val & 0x7);
if (((val >> 3) & 0x3) > 2)
p->modulation = QAM_AUTO;
else
p->modulation = qam_tab[(val >> 3) & 0x3];
val = cx22700_readreg (state, 0x02);
if (((val >> 3) & 0x07) > 4)
p->code_rate_HP = FEC_AUTO;
else
p->code_rate_HP = fec_tab[(val >> 3) & 0x07];
if ((val & 0x07) > 4)
p->code_rate_LP = FEC_AUTO;
else
p->code_rate_LP = fec_tab[val & 0x07];
val = cx22700_readreg (state, 0x03);
p->guard_interval = GUARD_INTERVAL_1_32 + ((val >> 6) & 0x3);
p->transmission_mode = TRANSMISSION_MODE_2K + ((val >> 5) & 0x1);
return 0;
}
static int cx22700_init (struct dvb_frontend* fe)
{ struct cx22700_state* state = fe->demodulator_priv;
int i;
dprintk("cx22700_init: init chip\n");
cx22700_writereg (state, 0x00, 0x02); /* soft reset */
cx22700_writereg (state, 0x00, 0x00);
msleep(10);
for (i=0; i<sizeof(init_tab); i+=2)
cx22700_writereg (state, init_tab[i], init_tab[i+1]);
cx22700_writereg (state, 0x00, 0x01);
return 0;
}
static int cx22700_read_status(struct dvb_frontend* fe, fe_status_t* status)
{
struct cx22700_state* state = fe->demodulator_priv;
u16 rs_ber = (cx22700_readreg (state, 0x0d) << 9)
| (cx22700_readreg (state, 0x0e) << 1);
u8 sync = cx22700_readreg (state, 0x07);
*status = 0;
if (rs_ber < 0xff00)
*status |= FE_HAS_SIGNAL;
if (sync & 0x20)
*status |= FE_HAS_CARRIER;
if (sync & 0x10)
*status |= FE_HAS_VITERBI;
if (sync & 0x10)
*status |= FE_HAS_SYNC;
if (*status == 0x0f)
*status |= FE_HAS_LOCK;
return 0;
}
static int cx22700_read_ber(struct dvb_frontend* fe, u32* ber)
{
struct cx22700_state* state = fe->demodulator_priv;
*ber = cx22700_readreg (state, 0x0c) & 0x7f;
cx22700_writereg (state, 0x0c, 0x00);
return 0;
}
static int cx22700_read_signal_strength(struct dvb_frontend* fe, u16* signal_strength)
{
struct cx22700_state* state = fe->demodulator_priv;
u16 rs_ber = (cx22700_readreg (state, 0x0d) << 9)
| (cx22700_readreg (state, 0x0e) << 1);
*signal_strength = ~rs_ber;
return 0;
}
static int cx22700_read_snr(struct dvb_frontend* fe, u16* snr)
{
struct cx22700_state* state = fe->demodulator_priv;
u16 rs_ber = (cx22700_readreg (state, 0x0d) << 9)
| (cx22700_readreg (state, 0x0e) << 1);
*snr = ~rs_ber;
return 0;
}
static int cx22700_read_ucblocks(struct dvb_frontend* fe, u32* ucblocks)
{
struct cx22700_state* state = fe->demodulator_priv;
*ucblocks = cx22700_readreg (state, 0x0f);
cx22700_writereg (state, 0x0f, 0x00);
return 0;
}
static int cx22700_set_frontend(struct dvb_frontend *fe)
{
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
struct cx22700_state* state = fe->demodulator_priv;
cx22700_writereg (state, 0x00, 0x02); /* XXX CHECKME: soft reset*/
cx22700_writereg (state, 0x00, 0x00);
if (fe->ops.tuner_ops.set_params) {
fe->ops.tuner_ops.set_params(fe);
if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0);
}
cx22700_set_inversion(state, c->inversion);
cx22700_set_tps(state, c);
cx22700_writereg (state, 0x37, 0x01); /* PAL loop filter off */
cx22700_writereg (state, 0x00, 0x01); /* restart acquire */
return 0;
}
static int cx22700_get_frontend(struct dvb_frontend *fe)
{
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
struct cx22700_state* state = fe->demodulator_priv;
u8 reg09 = cx22700_readreg (state, 0x09);
c->inversion = reg09 & 0x1 ? INVERSION_ON : INVERSION_OFF;
return cx22700_get_tps(state, c);
}
static int cx22700_i2c_gate_ctrl(struct dvb_frontend* fe, int enable)
{
struct cx22700_state* state = fe->demodulator_priv;
if (enable) {
return cx22700_writereg(state, 0x0a, 0x00);
} else {
return cx22700_writereg(state, 0x0a, 0x01);
}
}
static int cx22700_get_tune_settings(struct dvb_frontend* fe, struct dvb_frontend_tune_settings* fesettings)
{
fesettings->min_delay_ms = 150;
fesettings->step_size = 166667;
fesettings->max_drift = 166667*2;
return 0;
}
static void cx22700_release(struct dvb_frontend* fe)
{
struct cx22700_state* state = fe->demodulator_priv;
kfree(state);
}
static struct dvb_frontend_ops cx22700_ops;
struct dvb_frontend* cx22700_attach(const struct cx22700_config* config,
struct i2c_adapter* i2c)
{
struct cx22700_state* state = NULL;
/* allocate memory for the internal state */
state = kzalloc(sizeof(struct cx22700_state), GFP_KERNEL);
if (state == NULL) goto error;
/* setup the state */
state->config = config;
state->i2c = i2c;
/* check if the demod is there */
if (cx22700_readreg(state, 0x07) < 0) goto error;
/* create dvb_frontend */
memcpy(&state->frontend.ops, &cx22700_ops, sizeof(struct dvb_frontend_ops));
state->frontend.demodulator_priv = state;
return &state->frontend;
error:
kfree(state);
return NULL;
}
static struct dvb_frontend_ops cx22700_ops = {
.delsys = { SYS_DVBT },
.info = {
.name = "Conexant CX22700 DVB-T",
.frequency_min = 470000000,
.frequency_max = 860000000,
.frequency_stepsize = 166667,
.caps = FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 |
FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO |
FE_CAN_QPSK | FE_CAN_QAM_16 | FE_CAN_QAM_64 |
FE_CAN_RECOVER
},
.release = cx22700_release,
.init = cx22700_init,
.i2c_gate_ctrl = cx22700_i2c_gate_ctrl,
.set_frontend = cx22700_set_frontend,
.get_frontend = cx22700_get_frontend,
.get_tune_settings = cx22700_get_tune_settings,
.read_status = cx22700_read_status,
.read_ber = cx22700_read_ber,
.read_signal_strength = cx22700_read_signal_strength,
.read_snr = cx22700_read_snr,
.read_ucblocks = cx22700_read_ucblocks,
};
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "Turn on/off frontend debugging (default:off).");
MODULE_DESCRIPTION("Conexant CX22700 DVB-T Demodulator driver");
MODULE_AUTHOR("Holger Waechtler");
MODULE_LICENSE("GPL");
EXPORT_SYMBOL(cx22700_attach);
| gpl-2.0 |
MatthewBooth/JFLTE-GPE-Kernel | drivers/media/dvb/frontends/l64781.c | 5073 | 15803 | /*
driver for LSI L64781 COFDM demodulator
Copyright (C) 2001 Holger Waechtler for Convergence Integrated Media GmbH
Marko Kohtala <marko.kohtala@luukku.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/string.h>
#include <linux/slab.h>
#include "dvb_frontend.h"
#include "l64781.h"
struct l64781_state {
struct i2c_adapter* i2c;
const struct l64781_config* config;
struct dvb_frontend frontend;
/* private demodulator data */
unsigned int first:1;
};
#define dprintk(args...) \
do { \
if (debug) printk(KERN_DEBUG "l64781: " args); \
} while (0)
static int debug;
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "Turn on/off frontend debugging (default:off).");
static int l64781_writereg (struct l64781_state* state, u8 reg, u8 data)
{
int ret;
u8 buf [] = { reg, data };
struct i2c_msg msg = { .addr = state->config->demod_address, .flags = 0, .buf = buf, .len = 2 };
if ((ret = i2c_transfer(state->i2c, &msg, 1)) != 1)
dprintk ("%s: write_reg error (reg == %02x) = %02x!\n",
__func__, reg, ret);
return (ret != 1) ? -1 : 0;
}
static int l64781_readreg (struct l64781_state* state, u8 reg)
{
int ret;
u8 b0 [] = { reg };
u8 b1 [] = { 0 };
struct i2c_msg msg [] = { { .addr = state->config->demod_address, .flags = 0, .buf = b0, .len = 1 },
{ .addr = state->config->demod_address, .flags = I2C_M_RD, .buf = b1, .len = 1 } };
ret = i2c_transfer(state->i2c, msg, 2);
if (ret != 2) return ret;
return b1[0];
}
static void apply_tps (struct l64781_state* state)
{
l64781_writereg (state, 0x2a, 0x00);
l64781_writereg (state, 0x2a, 0x01);
/* This here is a little bit questionable because it enables
the automatic update of TPS registers. I think we'd need to
handle the IRQ from FE to update some other registers as
well, or at least implement some magic to tuning to correct
to the TPS received from transmission. */
l64781_writereg (state, 0x2a, 0x02);
}
static void reset_afc (struct l64781_state* state)
{
/* Set AFC stall for the AFC_INIT_FRQ setting, TIM_STALL for
timing offset */
l64781_writereg (state, 0x07, 0x9e); /* stall AFC */
l64781_writereg (state, 0x08, 0); /* AFC INIT FREQ */
l64781_writereg (state, 0x09, 0);
l64781_writereg (state, 0x0a, 0);
l64781_writereg (state, 0x07, 0x8e);
l64781_writereg (state, 0x0e, 0); /* AGC gain to zero in beginning */
l64781_writereg (state, 0x11, 0x80); /* stall TIM */
l64781_writereg (state, 0x10, 0); /* TIM_OFFSET_LSB */
l64781_writereg (state, 0x12, 0);
l64781_writereg (state, 0x13, 0);
l64781_writereg (state, 0x11, 0x00);
}
static int reset_and_configure (struct l64781_state* state)
{
u8 buf [] = { 0x06 };
struct i2c_msg msg = { .addr = 0x00, .flags = 0, .buf = buf, .len = 1 };
// NOTE: this is correct in writing to address 0x00
return (i2c_transfer(state->i2c, &msg, 1) == 1) ? 0 : -ENODEV;
}
static int apply_frontend_param(struct dvb_frontend *fe)
{
struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct l64781_state* state = fe->demodulator_priv;
/* The coderates for FEC_NONE, FEC_4_5 and FEC_FEC_6_7 are arbitrary */
static const u8 fec_tab[] = { 7, 0, 1, 2, 9, 3, 10, 4 };
/* QPSK, QAM_16, QAM_64 */
static const u8 qam_tab [] = { 2, 4, 0, 6 };
static const u8 guard_tab [] = { 1, 2, 4, 8 };
/* The Grundig 29504-401.04 Tuner comes with 18.432MHz crystal. */
static const u32 ppm = 8000;
u32 ddfs_offset_fixed;
/* u32 ddfs_offset_variable = 0x6000-((1000000UL+ppm)/ */
/* bw_tab[p->bandWidth]<<10)/15625; */
u32 init_freq;
u32 spi_bias;
u8 val0x04;
u8 val0x05;
u8 val0x06;
int bw;
switch (p->bandwidth_hz) {
case 8000000:
bw = 8;
break;
case 7000000:
bw = 7;
break;
case 6000000:
bw = 6;
break;
default:
return -EINVAL;
}
if (fe->ops.tuner_ops.set_params) {
fe->ops.tuner_ops.set_params(fe);
if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0);
}
if (p->inversion != INVERSION_ON &&
p->inversion != INVERSION_OFF)
return -EINVAL;
if (p->code_rate_HP != FEC_1_2 && p->code_rate_HP != FEC_2_3 &&
p->code_rate_HP != FEC_3_4 && p->code_rate_HP != FEC_5_6 &&
p->code_rate_HP != FEC_7_8)
return -EINVAL;
if (p->hierarchy != HIERARCHY_NONE &&
(p->code_rate_LP != FEC_1_2 && p->code_rate_LP != FEC_2_3 &&
p->code_rate_LP != FEC_3_4 && p->code_rate_LP != FEC_5_6 &&
p->code_rate_LP != FEC_7_8))
return -EINVAL;
if (p->modulation != QPSK && p->modulation != QAM_16 &&
p->modulation != QAM_64)
return -EINVAL;
if (p->transmission_mode != TRANSMISSION_MODE_2K &&
p->transmission_mode != TRANSMISSION_MODE_8K)
return -EINVAL;
if (p->guard_interval < GUARD_INTERVAL_1_32 ||
p->guard_interval > GUARD_INTERVAL_1_4)
return -EINVAL;
if (p->hierarchy < HIERARCHY_NONE ||
p->hierarchy > HIERARCHY_4)
return -EINVAL;
ddfs_offset_fixed = 0x4000-(ppm<<16)/bw/1000000;
/* This works up to 20000 ppm, it overflows if too large ppm! */
init_freq = (((8UL<<25) + (8UL<<19) / 25*ppm / (15625/25)) /
bw & 0xFFFFFF);
/* SPI bias calculation is slightly modified to fit in 32bit */
/* will work for high ppm only... */
spi_bias = 378 * (1 << 10);
spi_bias *= 16;
spi_bias *= bw;
spi_bias *= qam_tab[p->modulation];
spi_bias /= p->code_rate_HP + 1;
spi_bias /= (guard_tab[p->guard_interval] + 32);
spi_bias *= 1000;
spi_bias /= 1000 + ppm/1000;
spi_bias *= p->code_rate_HP;
val0x04 = (p->transmission_mode << 2) | p->guard_interval;
val0x05 = fec_tab[p->code_rate_HP];
if (p->hierarchy != HIERARCHY_NONE)
val0x05 |= (p->code_rate_LP - FEC_1_2) << 3;
val0x06 = (p->hierarchy << 2) | p->modulation;
l64781_writereg (state, 0x04, val0x04);
l64781_writereg (state, 0x05, val0x05);
l64781_writereg (state, 0x06, val0x06);
reset_afc (state);
/* Technical manual section 2.6.1, TIM_IIR_GAIN optimal values */
l64781_writereg (state, 0x15,
p->transmission_mode == TRANSMISSION_MODE_2K ? 1 : 3);
l64781_writereg (state, 0x16, init_freq & 0xff);
l64781_writereg (state, 0x17, (init_freq >> 8) & 0xff);
l64781_writereg (state, 0x18, (init_freq >> 16) & 0xff);
l64781_writereg (state, 0x1b, spi_bias & 0xff);
l64781_writereg (state, 0x1c, (spi_bias >> 8) & 0xff);
l64781_writereg (state, 0x1d, ((spi_bias >> 16) & 0x7f) |
(p->inversion == INVERSION_ON ? 0x80 : 0x00));
l64781_writereg (state, 0x22, ddfs_offset_fixed & 0xff);
l64781_writereg (state, 0x23, (ddfs_offset_fixed >> 8) & 0x3f);
l64781_readreg (state, 0x00); /* clear interrupt registers... */
l64781_readreg (state, 0x01); /* dto. */
apply_tps (state);
return 0;
}
static int get_frontend(struct dvb_frontend *fe)
{
struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct l64781_state* state = fe->demodulator_priv;
int tmp;
tmp = l64781_readreg(state, 0x04);
switch(tmp & 3) {
case 0:
p->guard_interval = GUARD_INTERVAL_1_32;
break;
case 1:
p->guard_interval = GUARD_INTERVAL_1_16;
break;
case 2:
p->guard_interval = GUARD_INTERVAL_1_8;
break;
case 3:
p->guard_interval = GUARD_INTERVAL_1_4;
break;
}
switch((tmp >> 2) & 3) {
case 0:
p->transmission_mode = TRANSMISSION_MODE_2K;
break;
case 1:
p->transmission_mode = TRANSMISSION_MODE_8K;
break;
default:
printk(KERN_WARNING "Unexpected value for transmission_mode\n");
}
tmp = l64781_readreg(state, 0x05);
switch(tmp & 7) {
case 0:
p->code_rate_HP = FEC_1_2;
break;
case 1:
p->code_rate_HP = FEC_2_3;
break;
case 2:
p->code_rate_HP = FEC_3_4;
break;
case 3:
p->code_rate_HP = FEC_5_6;
break;
case 4:
p->code_rate_HP = FEC_7_8;
break;
default:
printk("Unexpected value for code_rate_HP\n");
}
switch((tmp >> 3) & 7) {
case 0:
p->code_rate_LP = FEC_1_2;
break;
case 1:
p->code_rate_LP = FEC_2_3;
break;
case 2:
p->code_rate_LP = FEC_3_4;
break;
case 3:
p->code_rate_LP = FEC_5_6;
break;
case 4:
p->code_rate_LP = FEC_7_8;
break;
default:
printk("Unexpected value for code_rate_LP\n");
}
tmp = l64781_readreg(state, 0x06);
switch(tmp & 3) {
case 0:
p->modulation = QPSK;
break;
case 1:
p->modulation = QAM_16;
break;
case 2:
p->modulation = QAM_64;
break;
default:
printk(KERN_WARNING "Unexpected value for modulation\n");
}
switch((tmp >> 2) & 7) {
case 0:
p->hierarchy = HIERARCHY_NONE;
break;
case 1:
p->hierarchy = HIERARCHY_1;
break;
case 2:
p->hierarchy = HIERARCHY_2;
break;
case 3:
p->hierarchy = HIERARCHY_4;
break;
default:
printk("Unexpected value for hierarchy\n");
}
tmp = l64781_readreg (state, 0x1d);
p->inversion = (tmp & 0x80) ? INVERSION_ON : INVERSION_OFF;
tmp = (int) (l64781_readreg (state, 0x08) |
(l64781_readreg (state, 0x09) << 8) |
(l64781_readreg (state, 0x0a) << 16));
p->frequency += tmp;
return 0;
}
static int l64781_read_status(struct dvb_frontend* fe, fe_status_t* status)
{
struct l64781_state* state = fe->demodulator_priv;
int sync = l64781_readreg (state, 0x32);
int gain = l64781_readreg (state, 0x0e);
l64781_readreg (state, 0x00); /* clear interrupt registers... */
l64781_readreg (state, 0x01); /* dto. */
*status = 0;
if (gain > 5)
*status |= FE_HAS_SIGNAL;
if (sync & 0x02) /* VCXO locked, this criteria should be ok */
*status |= FE_HAS_CARRIER;
if (sync & 0x20)
*status |= FE_HAS_VITERBI;
if (sync & 0x40)
*status |= FE_HAS_SYNC;
if (sync == 0x7f)
*status |= FE_HAS_LOCK;
return 0;
}
static int l64781_read_ber(struct dvb_frontend* fe, u32* ber)
{
struct l64781_state* state = fe->demodulator_priv;
/* XXX FIXME: set up counting period (reg 0x26...0x28)
*/
*ber = l64781_readreg (state, 0x39)
| (l64781_readreg (state, 0x3a) << 8);
return 0;
}
static int l64781_read_signal_strength(struct dvb_frontend* fe, u16* signal_strength)
{
struct l64781_state* state = fe->demodulator_priv;
u8 gain = l64781_readreg (state, 0x0e);
*signal_strength = (gain << 8) | gain;
return 0;
}
static int l64781_read_snr(struct dvb_frontend* fe, u16* snr)
{
struct l64781_state* state = fe->demodulator_priv;
u8 avg_quality = 0xff - l64781_readreg (state, 0x33);
*snr = (avg_quality << 8) | avg_quality; /* not exact, but...*/
return 0;
}
static int l64781_read_ucblocks(struct dvb_frontend* fe, u32* ucblocks)
{
struct l64781_state* state = fe->demodulator_priv;
*ucblocks = l64781_readreg (state, 0x37)
| (l64781_readreg (state, 0x38) << 8);
return 0;
}
static int l64781_sleep(struct dvb_frontend* fe)
{
struct l64781_state* state = fe->demodulator_priv;
/* Power down */
return l64781_writereg (state, 0x3e, 0x5a);
}
static int l64781_init(struct dvb_frontend* fe)
{
struct l64781_state* state = fe->demodulator_priv;
reset_and_configure (state);
/* Power up */
l64781_writereg (state, 0x3e, 0xa5);
/* Reset hard */
l64781_writereg (state, 0x2a, 0x04);
l64781_writereg (state, 0x2a, 0x00);
/* Set tuner specific things */
/* AFC_POL, set also in reset_afc */
l64781_writereg (state, 0x07, 0x8e);
/* Use internal ADC */
l64781_writereg (state, 0x0b, 0x81);
/* AGC loop gain, and polarity is positive */
l64781_writereg (state, 0x0c, 0x84);
/* Internal ADC outputs two's complement */
l64781_writereg (state, 0x0d, 0x8c);
/* With ppm=8000, it seems the DTR_SENSITIVITY will result in
value of 2 with all possible bandwidths and guard
intervals, which is the initial value anyway. */
/*l64781_writereg (state, 0x19, 0x92);*/
/* Everything is two's complement, soft bit and CSI_OUT too */
l64781_writereg (state, 0x1e, 0x09);
/* delay a bit after first init attempt */
if (state->first) {
state->first = 0;
msleep(200);
}
return 0;
}
static int l64781_get_tune_settings(struct dvb_frontend* fe,
struct dvb_frontend_tune_settings* fesettings)
{
fesettings->min_delay_ms = 4000;
fesettings->step_size = 0;
fesettings->max_drift = 0;
return 0;
}
static void l64781_release(struct dvb_frontend* fe)
{
struct l64781_state* state = fe->demodulator_priv;
kfree(state);
}
static struct dvb_frontend_ops l64781_ops;
struct dvb_frontend* l64781_attach(const struct l64781_config* config,
struct i2c_adapter* i2c)
{
struct l64781_state* state = NULL;
int reg0x3e = -1;
u8 b0 [] = { 0x1a };
u8 b1 [] = { 0x00 };
struct i2c_msg msg [] = { { .addr = config->demod_address, .flags = 0, .buf = b0, .len = 1 },
{ .addr = config->demod_address, .flags = I2C_M_RD, .buf = b1, .len = 1 } };
/* allocate memory for the internal state */
state = kzalloc(sizeof(struct l64781_state), GFP_KERNEL);
if (state == NULL) goto error;
/* setup the state */
state->config = config;
state->i2c = i2c;
state->first = 1;
/**
* the L64781 won't show up before we send the reset_and_configure()
* broadcast. If nothing responds there is no L64781 on the bus...
*/
if (reset_and_configure(state) < 0) {
dprintk("No response to reset and configure broadcast...\n");
goto error;
}
/* The chip always responds to reads */
if (i2c_transfer(state->i2c, msg, 2) != 2) {
dprintk("No response to read on I2C bus\n");
goto error;
}
/* Save current register contents for bailout */
reg0x3e = l64781_readreg(state, 0x3e);
/* Reading the POWER_DOWN register always returns 0 */
if (reg0x3e != 0) {
dprintk("Device doesn't look like L64781\n");
goto error;
}
/* Turn the chip off */
l64781_writereg (state, 0x3e, 0x5a);
/* Responds to all reads with 0 */
if (l64781_readreg(state, 0x1a) != 0) {
dprintk("Read 1 returned unexpcted value\n");
goto error;
}
/* Turn the chip on */
l64781_writereg (state, 0x3e, 0xa5);
/* Responds with register default value */
if (l64781_readreg(state, 0x1a) != 0xa1) {
dprintk("Read 2 returned unexpcted value\n");
goto error;
}
/* create dvb_frontend */
memcpy(&state->frontend.ops, &l64781_ops, sizeof(struct dvb_frontend_ops));
state->frontend.demodulator_priv = state;
return &state->frontend;
error:
if (reg0x3e >= 0)
l64781_writereg (state, 0x3e, reg0x3e); /* restore reg 0x3e */
kfree(state);
return NULL;
}
static struct dvb_frontend_ops l64781_ops = {
.delsys = { SYS_DVBT },
.info = {
.name = "LSI L64781 DVB-T",
/* .frequency_min = ???,*/
/* .frequency_max = ???,*/
.frequency_stepsize = 166666,
/* .frequency_tolerance = ???,*/
/* .symbol_rate_tolerance = ???,*/
.caps = FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 |
FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 |
FE_CAN_QPSK | FE_CAN_QAM_16 | FE_CAN_QAM_64 |
FE_CAN_MUTE_TS
},
.release = l64781_release,
.init = l64781_init,
.sleep = l64781_sleep,
.set_frontend = apply_frontend_param,
.get_frontend = get_frontend,
.get_tune_settings = l64781_get_tune_settings,
.read_status = l64781_read_status,
.read_ber = l64781_read_ber,
.read_signal_strength = l64781_read_signal_strength,
.read_snr = l64781_read_snr,
.read_ucblocks = l64781_read_ucblocks,
};
MODULE_DESCRIPTION("LSI L64781 DVB-T Demodulator driver");
MODULE_AUTHOR("Holger Waechtler, Marko Kohtala");
MODULE_LICENSE("GPL");
EXPORT_SYMBOL(l64781_attach);
| gpl-2.0 |
ffosilva/android_kernel_sony_msm8974 | drivers/scsi/qla4xxx/ql4_attr.c | 5073 | 4233 | /*
* QLogic iSCSI HBA Driver
* Copyright (c) 2003-2011 QLogic Corporation
*
* See LICENSE.qla4xxx for copyright and licensing details.
*/
#include "ql4_def.h"
#include "ql4_glbl.h"
#include "ql4_dbg.h"
/* Scsi_Host attributes. */
static ssize_t
qla4xxx_fw_version_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev));
if (is_qla8022(ha))
return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%x)\n",
ha->firmware_version[0],
ha->firmware_version[1],
ha->patch_number, ha->build_number);
else
return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d.%02d\n",
ha->firmware_version[0],
ha->firmware_version[1],
ha->patch_number, ha->build_number);
}
static ssize_t
qla4xxx_serial_num_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev));
return snprintf(buf, PAGE_SIZE, "%s\n", ha->serial_number);
}
static ssize_t
qla4xxx_iscsi_version_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev));
return snprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->iscsi_major,
ha->iscsi_minor);
}
static ssize_t
qla4xxx_optrom_version_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev));
return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d.%02d\n",
ha->bootload_major, ha->bootload_minor,
ha->bootload_patch, ha->bootload_build);
}
static ssize_t
qla4xxx_board_id_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev));
return snprintf(buf, PAGE_SIZE, "0x%08X\n", ha->board_id);
}
static ssize_t
qla4xxx_fw_state_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev));
qla4xxx_get_firmware_state(ha);
return snprintf(buf, PAGE_SIZE, "0x%08X%8X\n", ha->firmware_state,
ha->addl_fw_state);
}
static ssize_t
qla4xxx_phy_port_cnt_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev));
if (!is_qla8022(ha))
return -ENOSYS;
return snprintf(buf, PAGE_SIZE, "0x%04X\n", ha->phy_port_cnt);
}
static ssize_t
qla4xxx_phy_port_num_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev));
if (!is_qla8022(ha))
return -ENOSYS;
return snprintf(buf, PAGE_SIZE, "0x%04X\n", ha->phy_port_num);
}
static ssize_t
qla4xxx_iscsi_func_cnt_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev));
if (!is_qla8022(ha))
return -ENOSYS;
return snprintf(buf, PAGE_SIZE, "0x%04X\n", ha->iscsi_pci_func_cnt);
}
static ssize_t
qla4xxx_hba_model_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev));
return snprintf(buf, PAGE_SIZE, "%s\n", ha->model_name);
}
static DEVICE_ATTR(fw_version, S_IRUGO, qla4xxx_fw_version_show, NULL);
static DEVICE_ATTR(serial_num, S_IRUGO, qla4xxx_serial_num_show, NULL);
static DEVICE_ATTR(iscsi_version, S_IRUGO, qla4xxx_iscsi_version_show, NULL);
static DEVICE_ATTR(optrom_version, S_IRUGO, qla4xxx_optrom_version_show, NULL);
static DEVICE_ATTR(board_id, S_IRUGO, qla4xxx_board_id_show, NULL);
static DEVICE_ATTR(fw_state, S_IRUGO, qla4xxx_fw_state_show, NULL);
static DEVICE_ATTR(phy_port_cnt, S_IRUGO, qla4xxx_phy_port_cnt_show, NULL);
static DEVICE_ATTR(phy_port_num, S_IRUGO, qla4xxx_phy_port_num_show, NULL);
static DEVICE_ATTR(iscsi_func_cnt, S_IRUGO, qla4xxx_iscsi_func_cnt_show, NULL);
static DEVICE_ATTR(hba_model, S_IRUGO, qla4xxx_hba_model_show, NULL);
struct device_attribute *qla4xxx_host_attrs[] = {
&dev_attr_fw_version,
&dev_attr_serial_num,
&dev_attr_iscsi_version,
&dev_attr_optrom_version,
&dev_attr_board_id,
&dev_attr_fw_state,
&dev_attr_phy_port_cnt,
&dev_attr_phy_port_num,
&dev_attr_iscsi_func_cnt,
&dev_attr_hba_model,
NULL,
};
| gpl-2.0 |
nquest/kernel_dns_s4502m | drivers/hid/hid-gaff.c | 5329 | 4868 | /*
* Force feedback support for GreenAsia (Product ID 0x12) based devices
*
* The devices are distributed under various names and the same USB device ID
* can be used in many game controllers.
*
*
* 0e8f:0012 "GreenAsia Inc. USB Joystick "
* - tested with MANTA Warior MM816 and SpeedLink Strike2 SL-6635.
*
* Copyright (c) 2008 Lukasz Lubojanski <lukasz@lubojanski.info>
*/
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/input.h>
#include <linux/slab.h>
#include <linux/usb.h>
#include <linux/hid.h>
#include <linux/module.h>
#include "hid-ids.h"
#ifdef CONFIG_GREENASIA_FF
#include "usbhid/usbhid.h"
struct gaff_device {
struct hid_report *report;
};
static int hid_gaff_play(struct input_dev *dev, void *data,
struct ff_effect *effect)
{
struct hid_device *hid = input_get_drvdata(dev);
struct gaff_device *gaff = data;
int left, right;
left = effect->u.rumble.strong_magnitude;
right = effect->u.rumble.weak_magnitude;
dbg_hid("called with 0x%04x 0x%04x", left, right);
left = left * 0xfe / 0xffff;
right = right * 0xfe / 0xffff;
gaff->report->field[0]->value[0] = 0x51;
gaff->report->field[0]->value[1] = 0x0;
gaff->report->field[0]->value[2] = right;
gaff->report->field[0]->value[3] = 0;
gaff->report->field[0]->value[4] = left;
gaff->report->field[0]->value[5] = 0;
dbg_hid("running with 0x%02x 0x%02x", left, right);
usbhid_submit_report(hid, gaff->report, USB_DIR_OUT);
gaff->report->field[0]->value[0] = 0xfa;
gaff->report->field[0]->value[1] = 0xfe;
gaff->report->field[0]->value[2] = 0x0;
gaff->report->field[0]->value[4] = 0x0;
usbhid_submit_report(hid, gaff->report, USB_DIR_OUT);
return 0;
}
static int gaff_init(struct hid_device *hid)
{
struct gaff_device *gaff;
struct hid_report *report;
struct hid_input *hidinput = list_entry(hid->inputs.next,
struct hid_input, list);
struct list_head *report_list =
&hid->report_enum[HID_OUTPUT_REPORT].report_list;
struct list_head *report_ptr = report_list;
struct input_dev *dev = hidinput->input;
int error;
if (list_empty(report_list)) {
hid_err(hid, "no output reports found\n");
return -ENODEV;
}
report_ptr = report_ptr->next;
report = list_entry(report_ptr, struct hid_report, list);
if (report->maxfield < 1) {
hid_err(hid, "no fields in the report\n");
return -ENODEV;
}
if (report->field[0]->report_count < 6) {
hid_err(hid, "not enough values in the field\n");
return -ENODEV;
}
gaff = kzalloc(sizeof(struct gaff_device), GFP_KERNEL);
if (!gaff)
return -ENOMEM;
set_bit(FF_RUMBLE, dev->ffbit);
error = input_ff_create_memless(dev, gaff, hid_gaff_play);
if (error) {
kfree(gaff);
return error;
}
gaff->report = report;
gaff->report->field[0]->value[0] = 0x51;
gaff->report->field[0]->value[1] = 0x00;
gaff->report->field[0]->value[2] = 0x00;
gaff->report->field[0]->value[3] = 0x00;
usbhid_submit_report(hid, gaff->report, USB_DIR_OUT);
gaff->report->field[0]->value[0] = 0xfa;
gaff->report->field[0]->value[1] = 0xfe;
usbhid_submit_report(hid, gaff->report, USB_DIR_OUT);
hid_info(hid, "Force Feedback for GreenAsia 0x12 devices by Lukasz Lubojanski <lukasz@lubojanski.info>\n");
return 0;
}
#else
static inline int gaff_init(struct hid_device *hdev)
{
return 0;
}
#endif
static int ga_probe(struct hid_device *hdev, const struct hid_device_id *id)
{
int ret;
dev_dbg(&hdev->dev, "Greenasia HID hardware probe...");
ret = hid_parse(hdev);
if (ret) {
hid_err(hdev, "parse failed\n");
goto err;
}
ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT & ~HID_CONNECT_FF);
if (ret) {
hid_err(hdev, "hw start failed\n");
goto err;
}
gaff_init(hdev);
return 0;
err:
return ret;
}
static const struct hid_device_id ga_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_GREENASIA, 0x0012), },
{ }
};
MODULE_DEVICE_TABLE(hid, ga_devices);
static struct hid_driver ga_driver = {
.name = "greenasia",
.id_table = ga_devices,
.probe = ga_probe,
};
static int __init ga_init(void)
{
return hid_register_driver(&ga_driver);
}
static void __exit ga_exit(void)
{
hid_unregister_driver(&ga_driver);
}
module_init(ga_init);
module_exit(ga_exit);
MODULE_LICENSE("GPL");
| gpl-2.0 |
Arc-Team/android_kernel_htc_a11 | arch/powerpc/lib/rheap.c | 8145 | 16859 | /*
* A Remote Heap. Remote means that we don't touch the memory that the
* heap points to. Normal heap implementations use the memory they manage
* to place their list. We cannot do that because the memory we manage may
* have special properties, for example it is uncachable or of different
* endianess.
*
* Author: Pantelis Antoniou <panto@intracom.gr>
*
* 2004 (c) INTRACOM S.A. Greece. This file is licensed under
* the terms of the GNU General Public License version 2. This program
* is licensed "as is" without any warranty of any kind, whether express
* or implied.
*/
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/mm.h>
#include <linux/err.h>
#include <linux/slab.h>
#include <asm/rheap.h>
/*
* Fixup a list_head, needed when copying lists. If the pointers fall
* between s and e, apply the delta. This assumes that
* sizeof(struct list_head *) == sizeof(unsigned long *).
*/
static inline void fixup(unsigned long s, unsigned long e, int d,
struct list_head *l)
{
unsigned long *pp;
pp = (unsigned long *)&l->next;
if (*pp >= s && *pp < e)
*pp += d;
pp = (unsigned long *)&l->prev;
if (*pp >= s && *pp < e)
*pp += d;
}
/* Grow the allocated blocks */
static int grow(rh_info_t * info, int max_blocks)
{
rh_block_t *block, *blk;
int i, new_blocks;
int delta;
unsigned long blks, blke;
if (max_blocks <= info->max_blocks)
return -EINVAL;
new_blocks = max_blocks - info->max_blocks;
block = kmalloc(sizeof(rh_block_t) * max_blocks, GFP_ATOMIC);
if (block == NULL)
return -ENOMEM;
if (info->max_blocks > 0) {
/* copy old block area */
memcpy(block, info->block,
sizeof(rh_block_t) * info->max_blocks);
delta = (char *)block - (char *)info->block;
/* and fixup list pointers */
blks = (unsigned long)info->block;
blke = (unsigned long)(info->block + info->max_blocks);
for (i = 0, blk = block; i < info->max_blocks; i++, blk++)
fixup(blks, blke, delta, &blk->list);
fixup(blks, blke, delta, &info->empty_list);
fixup(blks, blke, delta, &info->free_list);
fixup(blks, blke, delta, &info->taken_list);
/* free the old allocated memory */
if ((info->flags & RHIF_STATIC_BLOCK) == 0)
kfree(info->block);
}
info->block = block;
info->empty_slots += new_blocks;
info->max_blocks = max_blocks;
info->flags &= ~RHIF_STATIC_BLOCK;
/* add all new blocks to the free list */
blk = block + info->max_blocks - new_blocks;
for (i = 0; i < new_blocks; i++, blk++)
list_add(&blk->list, &info->empty_list);
return 0;
}
/*
* Assure at least the required amount of empty slots. If this function
* causes a grow in the block area then all pointers kept to the block
* area are invalid!
*/
static int assure_empty(rh_info_t * info, int slots)
{
int max_blocks;
/* This function is not meant to be used to grow uncontrollably */
if (slots >= 4)
return -EINVAL;
/* Enough space */
if (info->empty_slots >= slots)
return 0;
/* Next 16 sized block */
max_blocks = ((info->max_blocks + slots) + 15) & ~15;
return grow(info, max_blocks);
}
static rh_block_t *get_slot(rh_info_t * info)
{
rh_block_t *blk;
/* If no more free slots, and failure to extend. */
/* XXX: You should have called assure_empty before */
if (info->empty_slots == 0) {
printk(KERN_ERR "rh: out of slots; crash is imminent.\n");
return NULL;
}
/* Get empty slot to use */
blk = list_entry(info->empty_list.next, rh_block_t, list);
list_del_init(&blk->list);
info->empty_slots--;
/* Initialize */
blk->start = 0;
blk->size = 0;
blk->owner = NULL;
return blk;
}
static inline void release_slot(rh_info_t * info, rh_block_t * blk)
{
list_add(&blk->list, &info->empty_list);
info->empty_slots++;
}
static void attach_free_block(rh_info_t * info, rh_block_t * blkn)
{
rh_block_t *blk;
rh_block_t *before;
rh_block_t *after;
rh_block_t *next;
int size;
unsigned long s, e, bs, be;
struct list_head *l;
/* We assume that they are aligned properly */
size = blkn->size;
s = blkn->start;
e = s + size;
/* Find the blocks immediately before and after the given one
* (if any) */
before = NULL;
after = NULL;
next = NULL;
list_for_each(l, &info->free_list) {
blk = list_entry(l, rh_block_t, list);
bs = blk->start;
be = bs + blk->size;
if (next == NULL && s >= bs)
next = blk;
if (be == s)
before = blk;
if (e == bs)
after = blk;
/* If both are not null, break now */
if (before != NULL && after != NULL)
break;
}
/* Now check if they are really adjacent */
if (before && s != (before->start + before->size))
before = NULL;
if (after && e != after->start)
after = NULL;
/* No coalescing; list insert and return */
if (before == NULL && after == NULL) {
if (next != NULL)
list_add(&blkn->list, &next->list);
else
list_add(&blkn->list, &info->free_list);
return;
}
/* We don't need it anymore */
release_slot(info, blkn);
/* Grow the before block */
if (before != NULL && after == NULL) {
before->size += size;
return;
}
/* Grow the after block backwards */
if (before == NULL && after != NULL) {
after->start -= size;
after->size += size;
return;
}
/* Grow the before block, and release the after block */
before->size += size + after->size;
list_del(&after->list);
release_slot(info, after);
}
static void attach_taken_block(rh_info_t * info, rh_block_t * blkn)
{
rh_block_t *blk;
struct list_head *l;
/* Find the block immediately before the given one (if any) */
list_for_each(l, &info->taken_list) {
blk = list_entry(l, rh_block_t, list);
if (blk->start > blkn->start) {
list_add_tail(&blkn->list, &blk->list);
return;
}
}
list_add_tail(&blkn->list, &info->taken_list);
}
/*
* Create a remote heap dynamically. Note that no memory for the blocks
* are allocated. It will upon the first allocation
*/
rh_info_t *rh_create(unsigned int alignment)
{
rh_info_t *info;
/* Alignment must be a power of two */
if ((alignment & (alignment - 1)) != 0)
return ERR_PTR(-EINVAL);
info = kmalloc(sizeof(*info), GFP_ATOMIC);
if (info == NULL)
return ERR_PTR(-ENOMEM);
info->alignment = alignment;
/* Initially everything as empty */
info->block = NULL;
info->max_blocks = 0;
info->empty_slots = 0;
info->flags = 0;
INIT_LIST_HEAD(&info->empty_list);
INIT_LIST_HEAD(&info->free_list);
INIT_LIST_HEAD(&info->taken_list);
return info;
}
EXPORT_SYMBOL_GPL(rh_create);
/*
* Destroy a dynamically created remote heap. Deallocate only if the areas
* are not static
*/
void rh_destroy(rh_info_t * info)
{
if ((info->flags & RHIF_STATIC_BLOCK) == 0 && info->block != NULL)
kfree(info->block);
if ((info->flags & RHIF_STATIC_INFO) == 0)
kfree(info);
}
EXPORT_SYMBOL_GPL(rh_destroy);
/*
* Initialize in place a remote heap info block. This is needed to support
* operation very early in the startup of the kernel, when it is not yet safe
* to call kmalloc.
*/
void rh_init(rh_info_t * info, unsigned int alignment, int max_blocks,
rh_block_t * block)
{
int i;
rh_block_t *blk;
/* Alignment must be a power of two */
if ((alignment & (alignment - 1)) != 0)
return;
info->alignment = alignment;
/* Initially everything as empty */
info->block = block;
info->max_blocks = max_blocks;
info->empty_slots = max_blocks;
info->flags = RHIF_STATIC_INFO | RHIF_STATIC_BLOCK;
INIT_LIST_HEAD(&info->empty_list);
INIT_LIST_HEAD(&info->free_list);
INIT_LIST_HEAD(&info->taken_list);
/* Add all new blocks to the free list */
for (i = 0, blk = block; i < max_blocks; i++, blk++)
list_add(&blk->list, &info->empty_list);
}
EXPORT_SYMBOL_GPL(rh_init);
/* Attach a free memory region, coalesces regions if adjuscent */
int rh_attach_region(rh_info_t * info, unsigned long start, int size)
{
rh_block_t *blk;
unsigned long s, e, m;
int r;
/* The region must be aligned */
s = start;
e = s + size;
m = info->alignment - 1;
/* Round start up */
s = (s + m) & ~m;
/* Round end down */
e = e & ~m;
if (IS_ERR_VALUE(e) || (e < s))
return -ERANGE;
/* Take final values */
start = s;
size = e - s;
/* Grow the blocks, if needed */
r = assure_empty(info, 1);
if (r < 0)
return r;
blk = get_slot(info);
blk->start = start;
blk->size = size;
blk->owner = NULL;
attach_free_block(info, blk);
return 0;
}
EXPORT_SYMBOL_GPL(rh_attach_region);
/* Detatch given address range, splits free block if needed. */
unsigned long rh_detach_region(rh_info_t * info, unsigned long start, int size)
{
struct list_head *l;
rh_block_t *blk, *newblk;
unsigned long s, e, m, bs, be;
/* Validate size */
if (size <= 0)
return (unsigned long) -EINVAL;
/* The region must be aligned */
s = start;
e = s + size;
m = info->alignment - 1;
/* Round start up */
s = (s + m) & ~m;
/* Round end down */
e = e & ~m;
if (assure_empty(info, 1) < 0)
return (unsigned long) -ENOMEM;
blk = NULL;
list_for_each(l, &info->free_list) {
blk = list_entry(l, rh_block_t, list);
/* The range must lie entirely inside one free block */
bs = blk->start;
be = blk->start + blk->size;
if (s >= bs && e <= be)
break;
blk = NULL;
}
if (blk == NULL)
return (unsigned long) -ENOMEM;
/* Perfect fit */
if (bs == s && be == e) {
/* Delete from free list, release slot */
list_del(&blk->list);
release_slot(info, blk);
return s;
}
/* blk still in free list, with updated start and/or size */
if (bs == s || be == e) {
if (bs == s)
blk->start += size;
blk->size -= size;
} else {
/* The front free fragment */
blk->size = s - bs;
/* the back free fragment */
newblk = get_slot(info);
newblk->start = e;
newblk->size = be - e;
list_add(&newblk->list, &blk->list);
}
return s;
}
EXPORT_SYMBOL_GPL(rh_detach_region);
/* Allocate a block of memory at the specified alignment. The value returned
* is an offset into the buffer initialized by rh_init(), or a negative number
* if there is an error.
*/
unsigned long rh_alloc_align(rh_info_t * info, int size, int alignment, const char *owner)
{
struct list_head *l;
rh_block_t *blk;
rh_block_t *newblk;
unsigned long start, sp_size;
/* Validate size, and alignment must be power of two */
if (size <= 0 || (alignment & (alignment - 1)) != 0)
return (unsigned long) -EINVAL;
/* Align to configured alignment */
size = (size + (info->alignment - 1)) & ~(info->alignment - 1);
if (assure_empty(info, 2) < 0)
return (unsigned long) -ENOMEM;
blk = NULL;
list_for_each(l, &info->free_list) {
blk = list_entry(l, rh_block_t, list);
if (size <= blk->size) {
start = (blk->start + alignment - 1) & ~(alignment - 1);
if (start + size <= blk->start + blk->size)
break;
}
blk = NULL;
}
if (blk == NULL)
return (unsigned long) -ENOMEM;
/* Just fits */
if (blk->size == size) {
/* Move from free list to taken list */
list_del(&blk->list);
newblk = blk;
} else {
/* Fragment caused, split if needed */
/* Create block for fragment in the beginning */
sp_size = start - blk->start;
if (sp_size) {
rh_block_t *spblk;
spblk = get_slot(info);
spblk->start = blk->start;
spblk->size = sp_size;
/* add before the blk */
list_add(&spblk->list, blk->list.prev);
}
newblk = get_slot(info);
newblk->start = start;
newblk->size = size;
/* blk still in free list, with updated start and size
* for fragment in the end */
blk->start = start + size;
blk->size -= sp_size + size;
/* No fragment in the end, remove blk */
if (blk->size == 0) {
list_del(&blk->list);
release_slot(info, blk);
}
}
newblk->owner = owner;
attach_taken_block(info, newblk);
return start;
}
EXPORT_SYMBOL_GPL(rh_alloc_align);
/* Allocate a block of memory at the default alignment. The value returned is
* an offset into the buffer initialized by rh_init(), or a negative number if
* there is an error.
*/
unsigned long rh_alloc(rh_info_t * info, int size, const char *owner)
{
return rh_alloc_align(info, size, info->alignment, owner);
}
EXPORT_SYMBOL_GPL(rh_alloc);
/* Allocate a block of memory at the given offset, rounded up to the default
* alignment. The value returned is an offset into the buffer initialized by
* rh_init(), or a negative number if there is an error.
*/
unsigned long rh_alloc_fixed(rh_info_t * info, unsigned long start, int size, const char *owner)
{
struct list_head *l;
rh_block_t *blk, *newblk1, *newblk2;
unsigned long s, e, m, bs = 0, be = 0;
/* Validate size */
if (size <= 0)
return (unsigned long) -EINVAL;
/* The region must be aligned */
s = start;
e = s + size;
m = info->alignment - 1;
/* Round start up */
s = (s + m) & ~m;
/* Round end down */
e = e & ~m;
if (assure_empty(info, 2) < 0)
return (unsigned long) -ENOMEM;
blk = NULL;
list_for_each(l, &info->free_list) {
blk = list_entry(l, rh_block_t, list);
/* The range must lie entirely inside one free block */
bs = blk->start;
be = blk->start + blk->size;
if (s >= bs && e <= be)
break;
blk = NULL;
}
if (blk == NULL)
return (unsigned long) -ENOMEM;
/* Perfect fit */
if (bs == s && be == e) {
/* Move from free list to taken list */
list_del(&blk->list);
blk->owner = owner;
start = blk->start;
attach_taken_block(info, blk);
return start;
}
/* blk still in free list, with updated start and/or size */
if (bs == s || be == e) {
if (bs == s)
blk->start += size;
blk->size -= size;
} else {
/* The front free fragment */
blk->size = s - bs;
/* The back free fragment */
newblk2 = get_slot(info);
newblk2->start = e;
newblk2->size = be - e;
list_add(&newblk2->list, &blk->list);
}
newblk1 = get_slot(info);
newblk1->start = s;
newblk1->size = e - s;
newblk1->owner = owner;
start = newblk1->start;
attach_taken_block(info, newblk1);
return start;
}
EXPORT_SYMBOL_GPL(rh_alloc_fixed);
/* Deallocate the memory previously allocated by one of the rh_alloc functions.
* The return value is the size of the deallocated block, or a negative number
* if there is an error.
*/
int rh_free(rh_info_t * info, unsigned long start)
{
rh_block_t *blk, *blk2;
struct list_head *l;
int size;
/* Linear search for block */
blk = NULL;
list_for_each(l, &info->taken_list) {
blk2 = list_entry(l, rh_block_t, list);
if (start < blk2->start)
break;
blk = blk2;
}
if (blk == NULL || start > (blk->start + blk->size))
return -EINVAL;
/* Remove from taken list */
list_del(&blk->list);
/* Get size of freed block */
size = blk->size;
attach_free_block(info, blk);
return size;
}
EXPORT_SYMBOL_GPL(rh_free);
int rh_get_stats(rh_info_t * info, int what, int max_stats, rh_stats_t * stats)
{
rh_block_t *blk;
struct list_head *l;
struct list_head *h;
int nr;
switch (what) {
case RHGS_FREE:
h = &info->free_list;
break;
case RHGS_TAKEN:
h = &info->taken_list;
break;
default:
return -EINVAL;
}
/* Linear search for block */
nr = 0;
list_for_each(l, h) {
blk = list_entry(l, rh_block_t, list);
if (stats != NULL && nr < max_stats) {
stats->start = blk->start;
stats->size = blk->size;
stats->owner = blk->owner;
stats++;
}
nr++;
}
return nr;
}
EXPORT_SYMBOL_GPL(rh_get_stats);
int rh_set_owner(rh_info_t * info, unsigned long start, const char *owner)
{
rh_block_t *blk, *blk2;
struct list_head *l;
int size;
/* Linear search for block */
blk = NULL;
list_for_each(l, &info->taken_list) {
blk2 = list_entry(l, rh_block_t, list);
if (start < blk2->start)
break;
blk = blk2;
}
if (blk == NULL || start > (blk->start + blk->size))
return -EINVAL;
blk->owner = owner;
size = blk->size;
return size;
}
EXPORT_SYMBOL_GPL(rh_set_owner);
void rh_dump(rh_info_t * info)
{
static rh_stats_t st[32]; /* XXX maximum 32 blocks */
int maxnr;
int i, nr;
maxnr = ARRAY_SIZE(st);
printk(KERN_INFO
"info @0x%p (%d slots empty / %d max)\n",
info, info->empty_slots, info->max_blocks);
printk(KERN_INFO " Free:\n");
nr = rh_get_stats(info, RHGS_FREE, maxnr, st);
if (nr > maxnr)
nr = maxnr;
for (i = 0; i < nr; i++)
printk(KERN_INFO
" 0x%lx-0x%lx (%u)\n",
st[i].start, st[i].start + st[i].size,
st[i].size);
printk(KERN_INFO "\n");
printk(KERN_INFO " Taken:\n");
nr = rh_get_stats(info, RHGS_TAKEN, maxnr, st);
if (nr > maxnr)
nr = maxnr;
for (i = 0; i < nr; i++)
printk(KERN_INFO
" 0x%lx-0x%lx (%u) %s\n",
st[i].start, st[i].start + st[i].size,
st[i].size, st[i].owner != NULL ? st[i].owner : "");
printk(KERN_INFO "\n");
}
EXPORT_SYMBOL_GPL(rh_dump);
void rh_dump_blk(rh_info_t * info, rh_block_t * blk)
{
printk(KERN_INFO
"blk @0x%p: 0x%lx-0x%lx (%u)\n",
blk, blk->start, blk->start + blk->size, blk->size);
}
EXPORT_SYMBOL_GPL(rh_dump_blk);
| gpl-2.0 |
eiselekd/gcc | gcc/testsuite/ada/acats/tests/cxb/cxb30060.c | 210 | 4955 | /*
-- CXB30060.C
--
-- Grant of Unlimited Rights
--
-- Under contracts F33600-87-D-0337, F33600-84-D-0280, MDA903-79-C-0687,
-- F08630-91-C-0015, and DCA100-97-D-0025, the U.S. Government obtained
-- unlimited rights in the software and documentation contained herein.
-- Unlimited rights are defined in DFAR 252.227-7013(a)(19). By making
-- this public release, the Government intends to confer upon all
-- recipients unlimited rights equal to those held by the Government.
-- These rights include rights to use, duplicate, release or disclose the
-- released technical data and computer software in whole or in part, in
-- any manner and for any purpose whatsoever, and to have or permit others
-- to do so.
--
-- DISCLAIMER
--
-- ALL MATERIALS OR INFORMATION HEREIN RELEASED, MADE AVAILABLE OR
-- DISCLOSED ARE AS IS. THE GOVERNMENT MAKES NO EXPRESS OR IMPLIED
-- WARRANTY AS TO ANY MATTER WHATSOEVER, INCLUDING THE CONDITIONS OF THE
-- SOFTWARE, DOCUMENTATION OR OTHER INFORMATION RELEASED, MADE AVAILABLE
-- OR DISCLOSED, OR THE OWNERSHIP, MERCHANTABILITY, OR FITNESS FOR A
-- PARTICULAR PURPOSE OF SAID MATERIAL.
--*
--
-- FUNCTION NAME: CXB30060 ("wchar_gen")
--
-- FUNCTION DESCRIPTION:
-- This C function returns the value of type wchar_t corresponding to the
-- value of its parameter, where
-- Val 0 .. 9 ==> '0' .. '9'
-- Val 10 .. 19 ==> 'A' .. 'J'
-- Val 20 .. 29 ==> 'k' .. 't'
-- Val 30 ==> ' '
-- Val 31 ==> '.'
-- Val 32 ==> ','
--
-- INPUT:
-- This function requires that one int parameter be passed to it.
--
-- OUTPUT:
-- The function will return the appropriate value of type wchar_t.
--
-- CHANGE HISTORY:
-- 13 Sep 99 RLB Created function to replace incorrect
-- Unchecked_Conversion.
--
--!
*/
#include <stddef.h>
wchar_t CXB30060 (int val)
/* NOTE: The above function definition should be accepted by an ANSI-C */
/* compiler. Older C compilers may reject it; they may, however */
/* accept the following two lines. An implementation may comment */
/* out the above function definition and uncomment the following */
/* one. Otherwise, an implementation must provide the necessary */
/* modifications to this C code to satisfy the function */
/* requirements (see Function Description). */
/* */
/* wchar_t CXB30060 (val) */
/* int val; */
/* */
{ wchar_t return_value = ';';
switch (val)
{
case 0:
return_value = '0';
break;
case 1:
return_value = '1';
break;
case 2:
return_value = '2';
break;
case 3:
return_value = '3';
break;
case 4:
return_value = '4';
break;
case 5:
return_value = '5';
break;
case 6:
return_value = '6';
break;
case 7:
return_value = '7';
break;
case 8:
return_value = '8';
break;
case 9:
return_value = '9';
break;
case 10:
return_value = 'A';
break;
case 11:
return_value = 'B';
break;
case 12:
return_value = 'C';
break;
case 13:
return_value = 'D';
break;
case 14:
return_value = 'E';
break;
case 15:
return_value = 'F';
break;
case 16:
return_value = 'G';
break;
case 17:
return_value = 'H';
break;
case 18:
return_value = 'I';
break;
case 19:
return_value = 'J';
break;
case 20:
return_value = 'k';
break;
case 21:
return_value = 'l';
break;
case 22:
return_value = 'm';
break;
case 23:
return_value = 'n';
break;
case 24:
return_value = 'o';
break;
case 25:
return_value = 'p';
break;
case 26:
return_value = 'q';
break;
case 27:
return_value = 'r';
break;
case 28:
return_value = 's';
break;
case 29:
return_value = 't';
break;
case 30:
return_value = ' ';
break;
case 31:
return_value = '.';
break;
case 32:
return_value = ',';
break;
}
return (return_value); /* Return character value */
}
| gpl-2.0 |
hirojikmr/2015-10-28_kern_reading | drivers/gpu/drm/radeon/dce6_afmt.c | 210 | 9382 | /*
* Copyright 2013 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include <linux/hdmi.h>
#include <drm/drmP.h>
#include "radeon.h"
#include "radeon_audio.h"
#include "sid.h"
#define DCE8_DCCG_AUDIO_DTO1_PHASE 0x05b8
#define DCE8_DCCG_AUDIO_DTO1_MODULE 0x05bc
u32 dce6_endpoint_rreg(struct radeon_device *rdev,
u32 block_offset, u32 reg)
{
unsigned long flags;
u32 r;
spin_lock_irqsave(&rdev->end_idx_lock, flags);
WREG32(AZ_F0_CODEC_ENDPOINT_INDEX + block_offset, reg);
r = RREG32(AZ_F0_CODEC_ENDPOINT_DATA + block_offset);
spin_unlock_irqrestore(&rdev->end_idx_lock, flags);
return r;
}
void dce6_endpoint_wreg(struct radeon_device *rdev,
u32 block_offset, u32 reg, u32 v)
{
unsigned long flags;
spin_lock_irqsave(&rdev->end_idx_lock, flags);
if (ASIC_IS_DCE8(rdev))
WREG32(AZ_F0_CODEC_ENDPOINT_INDEX + block_offset, reg);
else
WREG32(AZ_F0_CODEC_ENDPOINT_INDEX + block_offset,
AZ_ENDPOINT_REG_WRITE_EN | AZ_ENDPOINT_REG_INDEX(reg));
WREG32(AZ_F0_CODEC_ENDPOINT_DATA + block_offset, v);
spin_unlock_irqrestore(&rdev->end_idx_lock, flags);
}
static void dce6_afmt_get_connected_pins(struct radeon_device *rdev)
{
int i;
u32 offset, tmp;
for (i = 0; i < rdev->audio.num_pins; i++) {
offset = rdev->audio.pin[i].offset;
tmp = RREG32_ENDPOINT(offset,
AZ_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT);
if (((tmp & PORT_CONNECTIVITY_MASK) >> PORT_CONNECTIVITY_SHIFT) == 1)
rdev->audio.pin[i].connected = false;
else
rdev->audio.pin[i].connected = true;
}
}
struct r600_audio_pin *dce6_audio_get_pin(struct radeon_device *rdev)
{
int i;
dce6_afmt_get_connected_pins(rdev);
for (i = 0; i < rdev->audio.num_pins; i++) {
if (rdev->audio.pin[i].connected)
return &rdev->audio.pin[i];
}
DRM_ERROR("No connected audio pins found!\n");
return NULL;
}
void dce6_afmt_select_pin(struct drm_encoder *encoder)
{
struct radeon_device *rdev = encoder->dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
if (!dig || !dig->afmt || !dig->pin)
return;
WREG32(AFMT_AUDIO_SRC_CONTROL + dig->afmt->offset,
AFMT_AUDIO_SRC_SELECT(dig->pin->id));
}
void dce6_afmt_write_latency_fields(struct drm_encoder *encoder,
struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct radeon_device *rdev = encoder->dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
u32 tmp = 0;
if (!dig || !dig->afmt || !dig->pin)
return;
if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
if (connector->latency_present[1])
tmp = VIDEO_LIPSYNC(connector->video_latency[1]) |
AUDIO_LIPSYNC(connector->audio_latency[1]);
else
tmp = VIDEO_LIPSYNC(0) | AUDIO_LIPSYNC(0);
} else {
if (connector->latency_present[0])
tmp = VIDEO_LIPSYNC(connector->video_latency[0]) |
AUDIO_LIPSYNC(connector->audio_latency[0]);
else
tmp = VIDEO_LIPSYNC(0) | AUDIO_LIPSYNC(0);
}
WREG32_ENDPOINT(dig->pin->offset,
AZ_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, tmp);
}
void dce6_afmt_hdmi_write_speaker_allocation(struct drm_encoder *encoder,
u8 *sadb, int sad_count)
{
struct radeon_device *rdev = encoder->dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
u32 tmp;
if (!dig || !dig->afmt || !dig->pin)
return;
/* program the speaker allocation */
tmp = RREG32_ENDPOINT(dig->pin->offset,
AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
tmp &= ~(DP_CONNECTION | SPEAKER_ALLOCATION_MASK);
/* set HDMI mode */
tmp |= HDMI_CONNECTION;
if (sad_count)
tmp |= SPEAKER_ALLOCATION(sadb[0]);
else
tmp |= SPEAKER_ALLOCATION(5); /* stereo */
WREG32_ENDPOINT(dig->pin->offset,
AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp);
}
void dce6_afmt_dp_write_speaker_allocation(struct drm_encoder *encoder,
u8 *sadb, int sad_count)
{
struct radeon_device *rdev = encoder->dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
u32 tmp;
if (!dig || !dig->afmt || !dig->pin)
return;
/* program the speaker allocation */
tmp = RREG32_ENDPOINT(dig->pin->offset,
AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
tmp &= ~(HDMI_CONNECTION | SPEAKER_ALLOCATION_MASK);
/* set DP mode */
tmp |= DP_CONNECTION;
if (sad_count)
tmp |= SPEAKER_ALLOCATION(sadb[0]);
else
tmp |= SPEAKER_ALLOCATION(5); /* stereo */
WREG32_ENDPOINT(dig->pin->offset,
AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp);
}
void dce6_afmt_write_sad_regs(struct drm_encoder *encoder,
struct cea_sad *sads, int sad_count)
{
int i;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
struct radeon_device *rdev = encoder->dev->dev_private;
static const u16 eld_reg_to_type[][2] = {
{ AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, HDMI_AUDIO_CODING_TYPE_PCM },
{ AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1, HDMI_AUDIO_CODING_TYPE_AC3 },
{ AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2, HDMI_AUDIO_CODING_TYPE_MPEG1 },
{ AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3, HDMI_AUDIO_CODING_TYPE_MP3 },
{ AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4, HDMI_AUDIO_CODING_TYPE_MPEG2 },
{ AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5, HDMI_AUDIO_CODING_TYPE_AAC_LC },
{ AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6, HDMI_AUDIO_CODING_TYPE_DTS },
{ AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7, HDMI_AUDIO_CODING_TYPE_ATRAC },
{ AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9, HDMI_AUDIO_CODING_TYPE_EAC3 },
{ AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10, HDMI_AUDIO_CODING_TYPE_DTS_HD },
{ AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11, HDMI_AUDIO_CODING_TYPE_MLP },
{ AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO },
};
if (!dig || !dig->afmt || !dig->pin)
return;
for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
u32 value = 0;
u8 stereo_freqs = 0;
int max_channels = -1;
int j;
for (j = 0; j < sad_count; j++) {
struct cea_sad *sad = &sads[j];
if (sad->format == eld_reg_to_type[i][1]) {
if (sad->channels > max_channels) {
value = MAX_CHANNELS(sad->channels) |
DESCRIPTOR_BYTE_2(sad->byte2) |
SUPPORTED_FREQUENCIES(sad->freq);
max_channels = sad->channels;
}
if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM)
stereo_freqs |= sad->freq;
else
break;
}
}
value |= SUPPORTED_FREQUENCIES_STEREO(stereo_freqs);
WREG32_ENDPOINT(dig->pin->offset, eld_reg_to_type[i][0], value);
}
}
void dce6_audio_enable(struct radeon_device *rdev,
struct r600_audio_pin *pin,
u8 enable_mask)
{
if (!pin)
return;
WREG32_ENDPOINT(pin->offset, AZ_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL,
enable_mask ? AUDIO_ENABLED : 0);
}
void dce6_hdmi_audio_set_dto(struct radeon_device *rdev,
struct radeon_crtc *crtc, unsigned int clock)
{
/* Two dtos; generally use dto0 for HDMI */
u32 value = 0;
if (crtc)
value |= DCCG_AUDIO_DTO0_SOURCE_SEL(crtc->crtc_id);
WREG32(DCCG_AUDIO_DTO_SOURCE, value);
/* Express [24MHz / target pixel clock] as an exact rational
* number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE
* is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
*/
WREG32(DCCG_AUDIO_DTO0_PHASE, 24000);
WREG32(DCCG_AUDIO_DTO0_MODULE, clock);
}
void dce6_dp_audio_set_dto(struct radeon_device *rdev,
struct radeon_crtc *crtc, unsigned int clock)
{
/* Two dtos; generally use dto1 for DP */
u32 value = 0;
value |= DCCG_AUDIO_DTO_SEL;
if (crtc)
value |= DCCG_AUDIO_DTO0_SOURCE_SEL(crtc->crtc_id);
WREG32(DCCG_AUDIO_DTO_SOURCE, value);
/* Express [24MHz / target pixel clock] as an exact rational
* number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE
* is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
*/
if (ASIC_IS_DCE8(rdev)) {
WREG32(DCE8_DCCG_AUDIO_DTO1_PHASE, 24000);
WREG32(DCE8_DCCG_AUDIO_DTO1_MODULE, clock);
} else {
WREG32(DCCG_AUDIO_DTO1_PHASE, 24000);
WREG32(DCCG_AUDIO_DTO1_MODULE, clock);
}
}
| gpl-2.0 |
yasker/linux | drivers/firmware/psci.c | 210 | 8808 | /*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* Copyright (C) 2015 ARM Limited
*/
#define pr_fmt(fmt) "psci: " fmt
#include <linux/errno.h>
#include <linux/linkage.h>
#include <linux/of.h>
#include <linux/pm.h>
#include <linux/printk.h>
#include <linux/psci.h>
#include <linux/reboot.h>
#include <uapi/linux/psci.h>
#include <asm/cputype.h>
#include <asm/system_misc.h>
#include <asm/smp_plat.h>
/*
* While a 64-bit OS can make calls with SMC32 calling conventions, for some
* calls it is necessary to use SMC64 to pass or return 64-bit values. For such
* calls PSCI_0_2_FN_NATIVE(x) will choose the appropriate (native-width)
* function ID.
*/
#ifdef CONFIG_64BIT
#define PSCI_0_2_FN_NATIVE(name) PSCI_0_2_FN64_##name
#else
#define PSCI_0_2_FN_NATIVE(name) PSCI_0_2_FN_##name
#endif
/*
* The CPU any Trusted OS is resident on. The trusted OS may reject CPU_OFF
* calls to its resident CPU, so we must avoid issuing those. We never migrate
* a Trusted OS even if it claims to be capable of migration -- doing so will
* require cooperation with a Trusted OS driver.
*/
static int resident_cpu = -1;
bool psci_tos_resident_on(int cpu)
{
return cpu == resident_cpu;
}
struct psci_operations psci_ops;
typedef unsigned long (psci_fn)(unsigned long, unsigned long,
unsigned long, unsigned long);
asmlinkage psci_fn __invoke_psci_fn_hvc;
asmlinkage psci_fn __invoke_psci_fn_smc;
static psci_fn *invoke_psci_fn;
enum psci_function {
PSCI_FN_CPU_SUSPEND,
PSCI_FN_CPU_ON,
PSCI_FN_CPU_OFF,
PSCI_FN_MIGRATE,
PSCI_FN_MAX,
};
static u32 psci_function_id[PSCI_FN_MAX];
static int psci_to_linux_errno(int errno)
{
switch (errno) {
case PSCI_RET_SUCCESS:
return 0;
case PSCI_RET_NOT_SUPPORTED:
return -EOPNOTSUPP;
case PSCI_RET_INVALID_PARAMS:
return -EINVAL;
case PSCI_RET_DENIED:
return -EPERM;
};
return -EINVAL;
}
static u32 psci_get_version(void)
{
return invoke_psci_fn(PSCI_0_2_FN_PSCI_VERSION, 0, 0, 0);
}
static int psci_cpu_suspend(u32 state, unsigned long entry_point)
{
int err;
u32 fn;
fn = psci_function_id[PSCI_FN_CPU_SUSPEND];
err = invoke_psci_fn(fn, state, entry_point, 0);
return psci_to_linux_errno(err);
}
static int psci_cpu_off(u32 state)
{
int err;
u32 fn;
fn = psci_function_id[PSCI_FN_CPU_OFF];
err = invoke_psci_fn(fn, state, 0, 0);
return psci_to_linux_errno(err);
}
static int psci_cpu_on(unsigned long cpuid, unsigned long entry_point)
{
int err;
u32 fn;
fn = psci_function_id[PSCI_FN_CPU_ON];
err = invoke_psci_fn(fn, cpuid, entry_point, 0);
return psci_to_linux_errno(err);
}
static int psci_migrate(unsigned long cpuid)
{
int err;
u32 fn;
fn = psci_function_id[PSCI_FN_MIGRATE];
err = invoke_psci_fn(fn, cpuid, 0, 0);
return psci_to_linux_errno(err);
}
static int psci_affinity_info(unsigned long target_affinity,
unsigned long lowest_affinity_level)
{
return invoke_psci_fn(PSCI_0_2_FN_NATIVE(AFFINITY_INFO),
target_affinity, lowest_affinity_level, 0);
}
static int psci_migrate_info_type(void)
{
return invoke_psci_fn(PSCI_0_2_FN_MIGRATE_INFO_TYPE, 0, 0, 0);
}
static unsigned long psci_migrate_info_up_cpu(void)
{
return invoke_psci_fn(PSCI_0_2_FN_NATIVE(MIGRATE_INFO_UP_CPU),
0, 0, 0);
}
static int get_set_conduit_method(struct device_node *np)
{
const char *method;
pr_info("probing for conduit method from DT.\n");
if (of_property_read_string(np, "method", &method)) {
pr_warn("missing \"method\" property\n");
return -ENXIO;
}
if (!strcmp("hvc", method)) {
invoke_psci_fn = __invoke_psci_fn_hvc;
} else if (!strcmp("smc", method)) {
invoke_psci_fn = __invoke_psci_fn_smc;
} else {
pr_warn("invalid \"method\" property: %s\n", method);
return -EINVAL;
}
return 0;
}
static void psci_sys_reset(enum reboot_mode reboot_mode, const char *cmd)
{
invoke_psci_fn(PSCI_0_2_FN_SYSTEM_RESET, 0, 0, 0);
}
static void psci_sys_poweroff(void)
{
invoke_psci_fn(PSCI_0_2_FN_SYSTEM_OFF, 0, 0, 0);
}
/*
* Detect the presence of a resident Trusted OS which may cause CPU_OFF to
* return DENIED (which would be fatal).
*/
static void __init psci_init_migrate(void)
{
unsigned long cpuid;
int type, cpu = -1;
type = psci_ops.migrate_info_type();
if (type == PSCI_0_2_TOS_MP) {
pr_info("Trusted OS migration not required\n");
return;
}
if (type == PSCI_RET_NOT_SUPPORTED) {
pr_info("MIGRATE_INFO_TYPE not supported.\n");
return;
}
if (type != PSCI_0_2_TOS_UP_MIGRATE &&
type != PSCI_0_2_TOS_UP_NO_MIGRATE) {
pr_err("MIGRATE_INFO_TYPE returned unknown type (%d)\n", type);
return;
}
cpuid = psci_migrate_info_up_cpu();
if (cpuid & ~MPIDR_HWID_BITMASK) {
pr_warn("MIGRATE_INFO_UP_CPU reported invalid physical ID (0x%lx)\n",
cpuid);
return;
}
cpu = get_logical_index(cpuid);
resident_cpu = cpu >= 0 ? cpu : -1;
pr_info("Trusted OS resident on physical CPU 0x%lx\n", cpuid);
}
static void __init psci_0_2_set_functions(void)
{
pr_info("Using standard PSCI v0.2 function IDs\n");
psci_function_id[PSCI_FN_CPU_SUSPEND] = PSCI_0_2_FN_NATIVE(CPU_SUSPEND);
psci_ops.cpu_suspend = psci_cpu_suspend;
psci_function_id[PSCI_FN_CPU_OFF] = PSCI_0_2_FN_CPU_OFF;
psci_ops.cpu_off = psci_cpu_off;
psci_function_id[PSCI_FN_CPU_ON] = PSCI_0_2_FN_NATIVE(CPU_ON);
psci_ops.cpu_on = psci_cpu_on;
psci_function_id[PSCI_FN_MIGRATE] = PSCI_0_2_FN_NATIVE(MIGRATE);
psci_ops.migrate = psci_migrate;
psci_ops.affinity_info = psci_affinity_info;
psci_ops.migrate_info_type = psci_migrate_info_type;
arm_pm_restart = psci_sys_reset;
pm_power_off = psci_sys_poweroff;
}
/*
* Probe function for PSCI firmware versions >= 0.2
*/
static int __init psci_probe(void)
{
u32 ver = psci_get_version();
pr_info("PSCIv%d.%d detected in firmware.\n",
PSCI_VERSION_MAJOR(ver),
PSCI_VERSION_MINOR(ver));
if (PSCI_VERSION_MAJOR(ver) == 0 && PSCI_VERSION_MINOR(ver) < 2) {
pr_err("Conflicting PSCI version detected.\n");
return -EINVAL;
}
psci_0_2_set_functions();
psci_init_migrate();
return 0;
}
typedef int (*psci_initcall_t)(const struct device_node *);
/*
* PSCI init function for PSCI versions >=0.2
*
* Probe based on PSCI PSCI_VERSION function
*/
static int __init psci_0_2_init(struct device_node *np)
{
int err;
err = get_set_conduit_method(np);
if (err)
goto out_put_node;
/*
* Starting with v0.2, the PSCI specification introduced a call
* (PSCI_VERSION) that allows probing the firmware version, so
* that PSCI function IDs and version specific initialization
* can be carried out according to the specific version reported
* by firmware
*/
err = psci_probe();
out_put_node:
of_node_put(np);
return err;
}
/*
* PSCI < v0.2 get PSCI Function IDs via DT.
*/
static int __init psci_0_1_init(struct device_node *np)
{
u32 id;
int err;
err = get_set_conduit_method(np);
if (err)
goto out_put_node;
pr_info("Using PSCI v0.1 Function IDs from DT\n");
if (!of_property_read_u32(np, "cpu_suspend", &id)) {
psci_function_id[PSCI_FN_CPU_SUSPEND] = id;
psci_ops.cpu_suspend = psci_cpu_suspend;
}
if (!of_property_read_u32(np, "cpu_off", &id)) {
psci_function_id[PSCI_FN_CPU_OFF] = id;
psci_ops.cpu_off = psci_cpu_off;
}
if (!of_property_read_u32(np, "cpu_on", &id)) {
psci_function_id[PSCI_FN_CPU_ON] = id;
psci_ops.cpu_on = psci_cpu_on;
}
if (!of_property_read_u32(np, "migrate", &id)) {
psci_function_id[PSCI_FN_MIGRATE] = id;
psci_ops.migrate = psci_migrate;
}
out_put_node:
of_node_put(np);
return err;
}
static const struct of_device_id const psci_of_match[] __initconst = {
{ .compatible = "arm,psci", .data = psci_0_1_init},
{ .compatible = "arm,psci-0.2", .data = psci_0_2_init},
{},
};
int __init psci_dt_init(void)
{
struct device_node *np;
const struct of_device_id *matched_np;
psci_initcall_t init_fn;
np = of_find_matching_node_and_match(NULL, psci_of_match, &matched_np);
if (!np)
return -ENODEV;
init_fn = (psci_initcall_t)matched_np->data;
return init_fn(np);
}
#ifdef CONFIG_ACPI
/*
* We use PSCI 0.2+ when ACPI is deployed on ARM64 and it's
* explicitly clarified in SBBR
*/
int __init psci_acpi_init(void)
{
if (!acpi_psci_present()) {
pr_info("is not implemented in ACPI.\n");
return -EOPNOTSUPP;
}
pr_info("probing for conduit method from ACPI.\n");
if (acpi_psci_use_hvc())
invoke_psci_fn = __invoke_psci_fn_hvc;
else
invoke_psci_fn = __invoke_psci_fn_smc;
return psci_probe();
}
#endif
| gpl-2.0 |
hisilicon/linaro-kernel | arch/powerpc/platforms/cell/smp.c | 466 | 4072 | /*
* SMP support for BPA machines.
*
* Dave Engebretsen, Peter Bergner, and
* Mike Corrigan {engebret|bergner|mikec}@us.ibm.com
*
* Plus various changes from other IBM teams...
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#undef DEBUG
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/smp.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/cache.h>
#include <linux/err.h>
#include <linux/device.h>
#include <linux/cpu.h>
#include <asm/ptrace.h>
#include <linux/atomic.h>
#include <asm/irq.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/io.h>
#include <asm/prom.h>
#include <asm/smp.h>
#include <asm/paca.h>
#include <asm/machdep.h>
#include <asm/cputable.h>
#include <asm/firmware.h>
#include <asm/rtas.h>
#include <asm/cputhreads.h>
#include "interrupt.h"
#include <asm/udbg.h>
#ifdef DEBUG
#define DBG(fmt...) udbg_printf(fmt)
#else
#define DBG(fmt...)
#endif
/*
* The Primary thread of each non-boot processor was started from the OF client
* interface by prom_hold_cpus and is spinning on secondary_hold_spinloop.
*/
static cpumask_t of_spin_map;
/**
* smp_startup_cpu() - start the given cpu
*
* At boot time, there is nothing to do for primary threads which were
* started from Open Firmware. For anything else, call RTAS with the
* appropriate start location.
*
* Returns:
* 0 - failure
* 1 - success
*/
static inline int smp_startup_cpu(unsigned int lcpu)
{
int status;
unsigned long start_here = __pa((u32)*((unsigned long *)
generic_secondary_smp_init));
unsigned int pcpu;
int start_cpu;
if (cpumask_test_cpu(lcpu, &of_spin_map))
/* Already started by OF and sitting in spin loop */
return 1;
pcpu = get_hard_smp_processor_id(lcpu);
/* Fixup atomic count: it exited inside IRQ handler. */
task_thread_info(paca[lcpu].__current)->preempt_count = 0;
/*
* If the RTAS start-cpu token does not exist then presume the
* cpu is already spinning.
*/
start_cpu = rtas_token("start-cpu");
if (start_cpu == RTAS_UNKNOWN_SERVICE)
return 1;
status = rtas_call(start_cpu, 3, 1, NULL, pcpu, start_here, lcpu);
if (status != 0) {
printk(KERN_ERR "start-cpu failed: %i\n", status);
return 0;
}
return 1;
}
static int __init smp_iic_probe(void)
{
iic_request_IPIs();
return cpumask_weight(cpu_possible_mask);
}
static void smp_cell_setup_cpu(int cpu)
{
if (cpu != boot_cpuid)
iic_setup_cpu();
/*
* change default DABRX to allow user watchpoints
*/
mtspr(SPRN_DABRX, DABRX_KERNEL | DABRX_USER);
}
static int smp_cell_kick_cpu(int nr)
{
BUG_ON(nr < 0 || nr >= NR_CPUS);
if (!smp_startup_cpu(nr))
return -ENOENT;
/*
* The processor is currently spinning, waiting for the
* cpu_start field to become non-zero After we set cpu_start,
* the processor will continue on to secondary_start
*/
paca[nr].cpu_start = 1;
return 0;
}
static struct smp_ops_t bpa_iic_smp_ops = {
.message_pass = iic_message_pass,
.probe = smp_iic_probe,
.kick_cpu = smp_cell_kick_cpu,
.setup_cpu = smp_cell_setup_cpu,
.cpu_bootable = smp_generic_cpu_bootable,
};
/* This is called very early */
void __init smp_init_cell(void)
{
int i;
DBG(" -> smp_init_cell()\n");
smp_ops = &bpa_iic_smp_ops;
/* Mark threads which are still spinning in hold loops. */
if (cpu_has_feature(CPU_FTR_SMT)) {
for_each_present_cpu(i) {
if (cpu_thread_in_core(i) == 0)
cpumask_set_cpu(i, &of_spin_map);
}
} else
cpumask_copy(&of_spin_map, cpu_present_mask);
cpumask_clear_cpu(boot_cpuid, &of_spin_map);
/* Non-lpar has additional take/give timebase */
if (rtas_token("freeze-time-base") != RTAS_UNKNOWN_SERVICE) {
smp_ops->give_timebase = rtas_give_timebase;
smp_ops->take_timebase = rtas_take_timebase;
}
DBG(" <- smp_init_cell()\n");
}
| gpl-2.0 |
venkatkamesh/lg_ally_kernel-2.6.XX | arch/x86/kernel/sys_x86_64.c | 466 | 5686 | #include <linux/errno.h>
#include <linux/sched.h>
#include <linux/syscalls.h>
#include <linux/mm.h>
#include <linux/fs.h>
#include <linux/smp.h>
#include <linux/sem.h>
#include <linux/msg.h>
#include <linux/shm.h>
#include <linux/stat.h>
#include <linux/mman.h>
#include <linux/file.h>
#include <linux/utsname.h>
#include <linux/personality.h>
#include <linux/random.h>
#include <linux/uaccess.h>
#include <asm/ia32.h>
#include <asm/syscalls.h>
SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len,
unsigned long, prot, unsigned long, flags,
unsigned long, fd, unsigned long, off)
{
long error;
error = -EINVAL;
if (off & ~PAGE_MASK)
goto out;
error = sys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT);
out:
return error;
}
static void find_start_end(unsigned long flags, unsigned long *begin,
unsigned long *end)
{
if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
unsigned long new_begin;
/* This is usually used needed to map code in small
model, so it needs to be in the first 31bit. Limit
it to that. This means we need to move the
unmapped base down for this case. This can give
conflicts with the heap, but we assume that glibc
malloc knows how to fall back to mmap. Give it 1GB
of playground for now. -AK */
*begin = 0x40000000;
*end = 0x80000000;
if (current->flags & PF_RANDOMIZE) {
new_begin = randomize_range(*begin, *begin + 0x02000000, 0);
if (new_begin)
*begin = new_begin;
}
} else {
*begin = TASK_UNMAPPED_BASE;
*end = TASK_SIZE;
}
}
unsigned long
arch_get_unmapped_area(struct file *filp, unsigned long addr,
unsigned long len, unsigned long pgoff, unsigned long flags)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
unsigned long start_addr;
unsigned long begin, end;
if (flags & MAP_FIXED)
return addr;
find_start_end(flags, &begin, &end);
if (len > end)
return -ENOMEM;
if (addr) {
addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr);
if (end - len >= addr &&
(!vma || addr + len <= vma->vm_start))
return addr;
}
if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
&& len <= mm->cached_hole_size) {
mm->cached_hole_size = 0;
mm->free_area_cache = begin;
}
addr = mm->free_area_cache;
if (addr < begin)
addr = begin;
start_addr = addr;
full_search:
for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
/* At this point: (!vma || addr < vma->vm_end). */
if (end - len < addr) {
/*
* Start a new search - just in case we missed
* some holes.
*/
if (start_addr != begin) {
start_addr = addr = begin;
mm->cached_hole_size = 0;
goto full_search;
}
return -ENOMEM;
}
if (!vma || addr + len <= vma->vm_start) {
/*
* Remember the place where we stopped the search:
*/
mm->free_area_cache = addr + len;
return addr;
}
if (addr + mm->cached_hole_size < vma->vm_start)
mm->cached_hole_size = vma->vm_start - addr;
addr = vma->vm_end;
}
}
unsigned long
arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
const unsigned long len, const unsigned long pgoff,
const unsigned long flags)
{
struct vm_area_struct *vma;
struct mm_struct *mm = current->mm;
unsigned long addr = addr0;
/* requested length too big for entire address space */
if (len > TASK_SIZE)
return -ENOMEM;
if (flags & MAP_FIXED)
return addr;
/* for MAP_32BIT mappings we force the legact mmap base */
if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
goto bottomup;
/* requesting a specific address */
if (addr) {
addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr &&
(!vma || addr + len <= vma->vm_start))
return addr;
}
/* check if free_area_cache is useful for us */
if (len <= mm->cached_hole_size) {
mm->cached_hole_size = 0;
mm->free_area_cache = mm->mmap_base;
}
/* either no address requested or can't fit in requested address hole */
addr = mm->free_area_cache;
/* make sure it can fit in the remaining address space */
if (addr > len) {
vma = find_vma(mm, addr-len);
if (!vma || addr <= vma->vm_start)
/* remember the address as a hint for next time */
return mm->free_area_cache = addr-len;
}
if (mm->mmap_base < len)
goto bottomup;
addr = mm->mmap_base-len;
do {
/*
* Lookup failure means no vma is above this address,
* else if new region fits below vma->vm_start,
* return with success:
*/
vma = find_vma(mm, addr);
if (!vma || addr+len <= vma->vm_start)
/* remember the address as a hint for next time */
return mm->free_area_cache = addr;
/* remember the largest hole we saw so far */
if (addr + mm->cached_hole_size < vma->vm_start)
mm->cached_hole_size = vma->vm_start - addr;
/* try just below the current vma->vm_start */
addr = vma->vm_start-len;
} while (len < vma->vm_start);
bottomup:
/*
* A failed mmap() very likely causes application failure,
* so fall back to the bottom-up function here. This scenario
* can happen with large stack limits and large mmap()
* allocations.
*/
mm->cached_hole_size = ~0UL;
mm->free_area_cache = TASK_UNMAPPED_BASE;
addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
/*
* Restore the topdown base:
*/
mm->free_area_cache = mm->mmap_base;
mm->cached_hole_size = ~0UL;
return addr;
}
SYSCALL_DEFINE1(uname, struct new_utsname __user *, name)
{
int err;
down_read(&uts_sem);
err = copy_to_user(name, utsname(), sizeof(*name));
up_read(&uts_sem);
if (personality(current->personality) == PER_LINUX32)
err |= copy_to_user(&name->machine, "i686", 5);
return err ? -EFAULT : 0;
}
| gpl-2.0 |
schnitzeltony/linux | arch/sparc/kernel/pmc.c | 1234 | 2054 | /* pmc - Driver implementation for power management functions
* of Power Management Controller (PMC) on SPARCstation-Voyager.
*
* Copyright (c) 2002 Eric Brower (ebrower@usa.net)
*/
#include <linux/kernel.h>
#include <linux/fs.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/pm.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/module.h>
#include <asm/io.h>
#include <asm/oplib.h>
#include <asm/uaccess.h>
#include <asm/auxio.h>
#include <asm/processor.h>
/* Debug
*
* #define PMC_DEBUG_LED
* #define PMC_NO_IDLE
*/
#define PMC_OBPNAME "SUNW,pmc"
#define PMC_DEVNAME "pmc"
#define PMC_IDLE_REG 0x00
#define PMC_IDLE_ON 0x01
static u8 __iomem *regs;
#define pmc_readb(offs) (sbus_readb(regs+offs))
#define pmc_writeb(val, offs) (sbus_writeb(val, regs+offs))
/*
* CPU idle callback function
* See .../arch/sparc/kernel/process.c
*/
static void pmc_swift_idle(void)
{
#ifdef PMC_DEBUG_LED
set_auxio(0x00, AUXIO_LED);
#endif
pmc_writeb(pmc_readb(PMC_IDLE_REG) | PMC_IDLE_ON, PMC_IDLE_REG);
#ifdef PMC_DEBUG_LED
set_auxio(AUXIO_LED, 0x00);
#endif
}
static int pmc_probe(struct platform_device *op)
{
regs = of_ioremap(&op->resource[0], 0,
resource_size(&op->resource[0]), PMC_OBPNAME);
if (!regs) {
printk(KERN_ERR "%s: unable to map registers\n", PMC_DEVNAME);
return -ENODEV;
}
#ifndef PMC_NO_IDLE
/* Assign power management IDLE handler */
sparc_idle = pmc_swift_idle;
#endif
printk(KERN_INFO "%s: power management initialized\n", PMC_DEVNAME);
return 0;
}
static struct of_device_id pmc_match[] = {
{
.name = PMC_OBPNAME,
},
{},
};
MODULE_DEVICE_TABLE(of, pmc_match);
static struct platform_driver pmc_driver = {
.driver = {
.name = "pmc",
.of_match_table = pmc_match,
},
.probe = pmc_probe,
};
static int __init pmc_init(void)
{
return platform_driver_register(&pmc_driver);
}
/* This driver is not critical to the boot process
* and is easiest to ioremap when SBus is already
* initialized, so we install ourselves thusly:
*/
__initcall(pmc_init);
| gpl-2.0 |
vibhu0009/Kernel | drivers/scsi/qla2xxx/tcm_qla2xxx.c | 1234 | 55872 | /*******************************************************************************
* This file contains tcm implementation using v4 configfs fabric infrastructure
* for QLogic target mode HBAs
*
* ?? Copyright 2010-2011 RisingTide Systems LLC.
*
* Licensed to the Linux Foundation under the General Public License (GPL)
* version 2.
*
* Author: Nicholas A. Bellinger <nab@risingtidesystems.com>
*
* tcm_qla2xxx_parse_wwn() and tcm_qla2xxx_format_wwn() contains code from
* the TCM_FC / Open-FCoE.org fabric module.
*
* Copyright (c) 2010 Cisco Systems, Inc
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
****************************************************************************/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <generated/utsrelease.h>
#include <linux/utsname.h>
#include <linux/init.h>
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/kthread.h>
#include <linux/types.h>
#include <linux/string.h>
#include <linux/configfs.h>
#include <linux/ctype.h>
#include <asm/unaligned.h>
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_cmnd.h>
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
#include <target/target_core_fabric_configfs.h>
#include <target/target_core_configfs.h>
#include <target/configfs_macros.h>
#include "qla_def.h"
#include "qla_target.h"
#include "tcm_qla2xxx.h"
struct workqueue_struct *tcm_qla2xxx_free_wq;
struct workqueue_struct *tcm_qla2xxx_cmd_wq;
static int tcm_qla2xxx_check_true(struct se_portal_group *se_tpg)
{
return 1;
}
static int tcm_qla2xxx_check_false(struct se_portal_group *se_tpg)
{
return 0;
}
/*
* Parse WWN.
* If strict, we require lower-case hex and colon separators to be sure
* the name is the same as what would be generated by ft_format_wwn()
* so the name and wwn are mapped one-to-one.
*/
static ssize_t tcm_qla2xxx_parse_wwn(const char *name, u64 *wwn, int strict)
{
const char *cp;
char c;
u32 nibble;
u32 byte = 0;
u32 pos = 0;
u32 err;
*wwn = 0;
for (cp = name; cp < &name[TCM_QLA2XXX_NAMELEN - 1]; cp++) {
c = *cp;
if (c == '\n' && cp[1] == '\0')
continue;
if (strict && pos++ == 2 && byte++ < 7) {
pos = 0;
if (c == ':')
continue;
err = 1;
goto fail;
}
if (c == '\0') {
err = 2;
if (strict && byte != 8)
goto fail;
return cp - name;
}
err = 3;
if (isdigit(c))
nibble = c - '0';
else if (isxdigit(c) && (islower(c) || !strict))
nibble = tolower(c) - 'a' + 10;
else
goto fail;
*wwn = (*wwn << 4) | nibble;
}
err = 4;
fail:
pr_debug("err %u len %zu pos %u byte %u\n",
err, cp - name, pos, byte);
return -1;
}
static ssize_t tcm_qla2xxx_format_wwn(char *buf, size_t len, u64 wwn)
{
u8 b[8];
put_unaligned_be64(wwn, b);
return snprintf(buf, len,
"%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x",
b[0], b[1], b[2], b[3], b[4], b[5], b[6], b[7]);
}
static char *tcm_qla2xxx_get_fabric_name(void)
{
return "qla2xxx";
}
/*
* From drivers/scsi/scsi_transport_fc.c:fc_parse_wwn
*/
static int tcm_qla2xxx_npiv_extract_wwn(const char *ns, u64 *nm)
{
unsigned int i, j;
u8 wwn[8];
memset(wwn, 0, sizeof(wwn));
/* Validate and store the new name */
for (i = 0, j = 0; i < 16; i++) {
int value;
value = hex_to_bin(*ns++);
if (value >= 0)
j = (j << 4) | value;
else
return -EINVAL;
if (i % 2) {
wwn[i/2] = j & 0xff;
j = 0;
}
}
*nm = wwn_to_u64(wwn);
return 0;
}
/*
* This parsing logic follows drivers/scsi/scsi_transport_fc.c:
* store_fc_host_vport_create()
*/
static int tcm_qla2xxx_npiv_parse_wwn(
const char *name,
size_t count,
u64 *wwpn,
u64 *wwnn)
{
unsigned int cnt = count;
int rc;
*wwpn = 0;
*wwnn = 0;
/* count may include a LF at end of string */
if (name[cnt-1] == '\n')
cnt--;
/* validate we have enough characters for WWPN */
if ((cnt != (16+1+16)) || (name[16] != ':'))
return -EINVAL;
rc = tcm_qla2xxx_npiv_extract_wwn(&name[0], wwpn);
if (rc != 0)
return rc;
rc = tcm_qla2xxx_npiv_extract_wwn(&name[17], wwnn);
if (rc != 0)
return rc;
return 0;
}
static ssize_t tcm_qla2xxx_npiv_format_wwn(char *buf, size_t len,
u64 wwpn, u64 wwnn)
{
u8 b[8], b2[8];
put_unaligned_be64(wwpn, b);
put_unaligned_be64(wwnn, b2);
return snprintf(buf, len,
"%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x,"
"%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x",
b[0], b[1], b[2], b[3], b[4], b[5], b[6], b[7],
b2[0], b2[1], b2[2], b2[3], b2[4], b2[5], b2[6], b2[7]);
}
static char *tcm_qla2xxx_npiv_get_fabric_name(void)
{
return "qla2xxx_npiv";
}
static u8 tcm_qla2xxx_get_fabric_proto_ident(struct se_portal_group *se_tpg)
{
struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
struct tcm_qla2xxx_tpg, se_tpg);
struct tcm_qla2xxx_lport *lport = tpg->lport;
u8 proto_id;
switch (lport->lport_proto_id) {
case SCSI_PROTOCOL_FCP:
default:
proto_id = fc_get_fabric_proto_ident(se_tpg);
break;
}
return proto_id;
}
static char *tcm_qla2xxx_get_fabric_wwn(struct se_portal_group *se_tpg)
{
struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
struct tcm_qla2xxx_tpg, se_tpg);
struct tcm_qla2xxx_lport *lport = tpg->lport;
return lport->lport_naa_name;
}
static char *tcm_qla2xxx_npiv_get_fabric_wwn(struct se_portal_group *se_tpg)
{
struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
struct tcm_qla2xxx_tpg, se_tpg);
struct tcm_qla2xxx_lport *lport = tpg->lport;
return &lport->lport_npiv_name[0];
}
static u16 tcm_qla2xxx_get_tag(struct se_portal_group *se_tpg)
{
struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
struct tcm_qla2xxx_tpg, se_tpg);
return tpg->lport_tpgt;
}
static u32 tcm_qla2xxx_get_default_depth(struct se_portal_group *se_tpg)
{
return 1;
}
static u32 tcm_qla2xxx_get_pr_transport_id(
struct se_portal_group *se_tpg,
struct se_node_acl *se_nacl,
struct t10_pr_registration *pr_reg,
int *format_code,
unsigned char *buf)
{
struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
struct tcm_qla2xxx_tpg, se_tpg);
struct tcm_qla2xxx_lport *lport = tpg->lport;
int ret = 0;
switch (lport->lport_proto_id) {
case SCSI_PROTOCOL_FCP:
default:
ret = fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
format_code, buf);
break;
}
return ret;
}
static u32 tcm_qla2xxx_get_pr_transport_id_len(
struct se_portal_group *se_tpg,
struct se_node_acl *se_nacl,
struct t10_pr_registration *pr_reg,
int *format_code)
{
struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
struct tcm_qla2xxx_tpg, se_tpg);
struct tcm_qla2xxx_lport *lport = tpg->lport;
int ret = 0;
switch (lport->lport_proto_id) {
case SCSI_PROTOCOL_FCP:
default:
ret = fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
format_code);
break;
}
return ret;
}
static char *tcm_qla2xxx_parse_pr_out_transport_id(
struct se_portal_group *se_tpg,
const char *buf,
u32 *out_tid_len,
char **port_nexus_ptr)
{
struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
struct tcm_qla2xxx_tpg, se_tpg);
struct tcm_qla2xxx_lport *lport = tpg->lport;
char *tid = NULL;
switch (lport->lport_proto_id) {
case SCSI_PROTOCOL_FCP:
default:
tid = fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
port_nexus_ptr);
break;
}
return tid;
}
static int tcm_qla2xxx_check_demo_mode(struct se_portal_group *se_tpg)
{
struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
struct tcm_qla2xxx_tpg, se_tpg);
return QLA_TPG_ATTRIB(tpg)->generate_node_acls;
}
static int tcm_qla2xxx_check_demo_mode_cache(struct se_portal_group *se_tpg)
{
struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
struct tcm_qla2xxx_tpg, se_tpg);
return QLA_TPG_ATTRIB(tpg)->cache_dynamic_acls;
}
static int tcm_qla2xxx_check_demo_write_protect(struct se_portal_group *se_tpg)
{
struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
struct tcm_qla2xxx_tpg, se_tpg);
return QLA_TPG_ATTRIB(tpg)->demo_mode_write_protect;
}
static int tcm_qla2xxx_check_prod_write_protect(struct se_portal_group *se_tpg)
{
struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
struct tcm_qla2xxx_tpg, se_tpg);
return QLA_TPG_ATTRIB(tpg)->prod_mode_write_protect;
}
static struct se_node_acl *tcm_qla2xxx_alloc_fabric_acl(
struct se_portal_group *se_tpg)
{
struct tcm_qla2xxx_nacl *nacl;
nacl = kzalloc(sizeof(struct tcm_qla2xxx_nacl), GFP_KERNEL);
if (!nacl) {
pr_err("Unable to allocate struct tcm_qla2xxx_nacl\n");
return NULL;
}
return &nacl->se_node_acl;
}
static void tcm_qla2xxx_release_fabric_acl(
struct se_portal_group *se_tpg,
struct se_node_acl *se_nacl)
{
struct tcm_qla2xxx_nacl *nacl = container_of(se_nacl,
struct tcm_qla2xxx_nacl, se_node_acl);
kfree(nacl);
}
static u32 tcm_qla2xxx_tpg_get_inst_index(struct se_portal_group *se_tpg)
{
struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
struct tcm_qla2xxx_tpg, se_tpg);
return tpg->lport_tpgt;
}
static void tcm_qla2xxx_complete_mcmd(struct work_struct *work)
{
struct qla_tgt_mgmt_cmd *mcmd = container_of(work,
struct qla_tgt_mgmt_cmd, free_work);
transport_generic_free_cmd(&mcmd->se_cmd, 0);
}
/*
* Called from qla_target_template->free_mcmd(), and will call
* tcm_qla2xxx_release_cmd() via normal struct target_core_fabric_ops
* release callback. qla_hw_data->hardware_lock is expected to be held
*/
static void tcm_qla2xxx_free_mcmd(struct qla_tgt_mgmt_cmd *mcmd)
{
INIT_WORK(&mcmd->free_work, tcm_qla2xxx_complete_mcmd);
queue_work(tcm_qla2xxx_free_wq, &mcmd->free_work);
}
static void tcm_qla2xxx_complete_free(struct work_struct *work)
{
struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
transport_generic_free_cmd(&cmd->se_cmd, 0);
}
/*
* Called from qla_target_template->free_cmd(), and will call
* tcm_qla2xxx_release_cmd via normal struct target_core_fabric_ops
* release callback. qla_hw_data->hardware_lock is expected to be held
*/
static void tcm_qla2xxx_free_cmd(struct qla_tgt_cmd *cmd)
{
INIT_WORK(&cmd->work, tcm_qla2xxx_complete_free);
queue_work(tcm_qla2xxx_free_wq, &cmd->work);
}
/*
* Called from struct target_core_fabric_ops->check_stop_free() context
*/
static int tcm_qla2xxx_check_stop_free(struct se_cmd *se_cmd)
{
return target_put_sess_cmd(se_cmd->se_sess, se_cmd);
}
/* tcm_qla2xxx_release_cmd - Callback from TCM Core to release underlying
* fabric descriptor @se_cmd command to release
*/
static void tcm_qla2xxx_release_cmd(struct se_cmd *se_cmd)
{
struct qla_tgt_cmd *cmd;
if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) {
struct qla_tgt_mgmt_cmd *mcmd = container_of(se_cmd,
struct qla_tgt_mgmt_cmd, se_cmd);
qlt_free_mcmd(mcmd);
return;
}
cmd = container_of(se_cmd, struct qla_tgt_cmd, se_cmd);
qlt_free_cmd(cmd);
}
static int tcm_qla2xxx_shutdown_session(struct se_session *se_sess)
{
struct qla_tgt_sess *sess = se_sess->fabric_sess_ptr;
struct scsi_qla_host *vha;
unsigned long flags;
BUG_ON(!sess);
vha = sess->vha;
spin_lock_irqsave(&vha->hw->hardware_lock, flags);
target_sess_cmd_list_set_waiting(se_sess);
spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
return 1;
}
static void tcm_qla2xxx_close_session(struct se_session *se_sess)
{
struct qla_tgt_sess *sess = se_sess->fabric_sess_ptr;
struct scsi_qla_host *vha;
unsigned long flags;
BUG_ON(!sess);
vha = sess->vha;
spin_lock_irqsave(&vha->hw->hardware_lock, flags);
qlt_unreg_sess(sess);
spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
}
static u32 tcm_qla2xxx_sess_get_index(struct se_session *se_sess)
{
return 0;
}
/*
* The LIO target core uses DMA_TO_DEVICE to mean that data is going
* to the target (eg handling a WRITE) and DMA_FROM_DEVICE to mean
* that data is coming from the target (eg handling a READ). However,
* this is just the opposite of what we have to tell the DMA mapping
* layer -- eg when handling a READ, the HBA will have to DMA the data
* out of memory so it can send it to the initiator, which means we
* need to use DMA_TO_DEVICE when we map the data.
*/
static enum dma_data_direction tcm_qla2xxx_mapping_dir(struct se_cmd *se_cmd)
{
if (se_cmd->se_cmd_flags & SCF_BIDI)
return DMA_BIDIRECTIONAL;
switch (se_cmd->data_direction) {
case DMA_TO_DEVICE:
return DMA_FROM_DEVICE;
case DMA_FROM_DEVICE:
return DMA_TO_DEVICE;
case DMA_NONE:
default:
return DMA_NONE;
}
}
static int tcm_qla2xxx_write_pending(struct se_cmd *se_cmd)
{
struct qla_tgt_cmd *cmd = container_of(se_cmd,
struct qla_tgt_cmd, se_cmd);
cmd->bufflen = se_cmd->data_length;
cmd->dma_data_direction = tcm_qla2xxx_mapping_dir(se_cmd);
cmd->sg_cnt = se_cmd->t_data_nents;
cmd->sg = se_cmd->t_data_sg;
/*
* qla_target.c:qlt_rdy_to_xfer() will call pci_map_sg() to setup
* the SGL mappings into PCIe memory for incoming FCP WRITE data.
*/
return qlt_rdy_to_xfer(cmd);
}
static int tcm_qla2xxx_write_pending_status(struct se_cmd *se_cmd)
{
unsigned long flags;
/*
* Check for WRITE_PENDING status to determine if we need to wait for
* CTIO aborts to be posted via hardware in tcm_qla2xxx_handle_data().
*/
spin_lock_irqsave(&se_cmd->t_state_lock, flags);
if (se_cmd->t_state == TRANSPORT_WRITE_PENDING ||
se_cmd->t_state == TRANSPORT_COMPLETE_QF_WP) {
spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
wait_for_completion_timeout(&se_cmd->t_transport_stop_comp,
3000);
return 0;
}
spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
return 0;
}
static void tcm_qla2xxx_set_default_node_attrs(struct se_node_acl *nacl)
{
return;
}
static u32 tcm_qla2xxx_get_task_tag(struct se_cmd *se_cmd)
{
struct qla_tgt_cmd *cmd = container_of(se_cmd,
struct qla_tgt_cmd, se_cmd);
return cmd->tag;
}
static int tcm_qla2xxx_get_cmd_state(struct se_cmd *se_cmd)
{
return 0;
}
/*
* Called from process context in qla_target.c:qlt_do_work() code
*/
static int tcm_qla2xxx_handle_cmd(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd,
unsigned char *cdb, uint32_t data_length, int fcp_task_attr,
int data_dir, int bidi)
{
struct se_cmd *se_cmd = &cmd->se_cmd;
struct se_session *se_sess;
struct qla_tgt_sess *sess;
int flags = TARGET_SCF_ACK_KREF;
if (bidi)
flags |= TARGET_SCF_BIDI_OP;
sess = cmd->sess;
if (!sess) {
pr_err("Unable to locate struct qla_tgt_sess from qla_tgt_cmd\n");
return -EINVAL;
}
se_sess = sess->se_sess;
if (!se_sess) {
pr_err("Unable to locate active struct se_session\n");
return -EINVAL;
}
return target_submit_cmd(se_cmd, se_sess, cdb, &cmd->sense_buffer[0],
cmd->unpacked_lun, data_length, fcp_task_attr,
data_dir, flags);
}
static void tcm_qla2xxx_handle_data_work(struct work_struct *work)
{
struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
/*
* Ensure that the complete FCP WRITE payload has been received.
* Otherwise return an exception via CHECK_CONDITION status.
*/
if (!cmd->write_data_transferred) {
/*
* Check if se_cmd has already been aborted via LUN_RESET, and
* waiting upon completion in tcm_qla2xxx_write_pending_status()
*/
if (cmd->se_cmd.transport_state & CMD_T_ABORTED) {
complete(&cmd->se_cmd.t_transport_stop_comp);
return;
}
transport_generic_request_failure(&cmd->se_cmd,
TCM_CHECK_CONDITION_ABORT_CMD);
return;
}
return target_execute_cmd(&cmd->se_cmd);
}
/*
* Called from qla_target.c:qlt_do_ctio_completion()
*/
static void tcm_qla2xxx_handle_data(struct qla_tgt_cmd *cmd)
{
INIT_WORK(&cmd->work, tcm_qla2xxx_handle_data_work);
queue_work(tcm_qla2xxx_free_wq, &cmd->work);
}
/*
* Called from qla_target.c:qlt_issue_task_mgmt()
*/
static int tcm_qla2xxx_handle_tmr(struct qla_tgt_mgmt_cmd *mcmd, uint32_t lun,
uint8_t tmr_func, uint32_t tag)
{
struct qla_tgt_sess *sess = mcmd->sess;
struct se_cmd *se_cmd = &mcmd->se_cmd;
return target_submit_tmr(se_cmd, sess->se_sess, NULL, lun, mcmd,
tmr_func, GFP_ATOMIC, tag, TARGET_SCF_ACK_KREF);
}
static int tcm_qla2xxx_queue_data_in(struct se_cmd *se_cmd)
{
struct qla_tgt_cmd *cmd = container_of(se_cmd,
struct qla_tgt_cmd, se_cmd);
cmd->bufflen = se_cmd->data_length;
cmd->dma_data_direction = tcm_qla2xxx_mapping_dir(se_cmd);
cmd->aborted = (se_cmd->transport_state & CMD_T_ABORTED);
cmd->sg_cnt = se_cmd->t_data_nents;
cmd->sg = se_cmd->t_data_sg;
cmd->offset = 0;
/*
* Now queue completed DATA_IN the qla2xxx LLD and response ring
*/
return qlt_xmit_response(cmd, QLA_TGT_XMIT_DATA|QLA_TGT_XMIT_STATUS,
se_cmd->scsi_status);
}
static int tcm_qla2xxx_queue_status(struct se_cmd *se_cmd)
{
struct qla_tgt_cmd *cmd = container_of(se_cmd,
struct qla_tgt_cmd, se_cmd);
int xmit_type = QLA_TGT_XMIT_STATUS;
cmd->bufflen = se_cmd->data_length;
cmd->sg = NULL;
cmd->sg_cnt = 0;
cmd->offset = 0;
cmd->dma_data_direction = tcm_qla2xxx_mapping_dir(se_cmd);
cmd->aborted = (se_cmd->transport_state & CMD_T_ABORTED);
if (se_cmd->data_direction == DMA_FROM_DEVICE) {
/*
* For FCP_READ with CHECK_CONDITION status, clear cmd->bufflen
* for qla_tgt_xmit_response LLD code
*/
if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
se_cmd->se_cmd_flags &= ~SCF_OVERFLOW_BIT;
se_cmd->residual_count = 0;
}
se_cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
se_cmd->residual_count += se_cmd->data_length;
cmd->bufflen = 0;
}
/*
* Now queue status response to qla2xxx LLD code and response ring
*/
return qlt_xmit_response(cmd, xmit_type, se_cmd->scsi_status);
}
static int tcm_qla2xxx_queue_tm_rsp(struct se_cmd *se_cmd)
{
struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
struct qla_tgt_mgmt_cmd *mcmd = container_of(se_cmd,
struct qla_tgt_mgmt_cmd, se_cmd);
pr_debug("queue_tm_rsp: mcmd: %p func: 0x%02x response: 0x%02x\n",
mcmd, se_tmr->function, se_tmr->response);
/*
* Do translation between TCM TM response codes and
* QLA2xxx FC TM response codes.
*/
switch (se_tmr->response) {
case TMR_FUNCTION_COMPLETE:
mcmd->fc_tm_rsp = FC_TM_SUCCESS;
break;
case TMR_TASK_DOES_NOT_EXIST:
mcmd->fc_tm_rsp = FC_TM_BAD_CMD;
break;
case TMR_FUNCTION_REJECTED:
mcmd->fc_tm_rsp = FC_TM_REJECT;
break;
case TMR_LUN_DOES_NOT_EXIST:
default:
mcmd->fc_tm_rsp = FC_TM_FAILED;
break;
}
/*
* Queue the TM response to QLA2xxx LLD to build a
* CTIO response packet.
*/
qlt_xmit_tm_rsp(mcmd);
return 0;
}
/* Local pointer to allocated TCM configfs fabric module */
struct target_fabric_configfs *tcm_qla2xxx_fabric_configfs;
struct target_fabric_configfs *tcm_qla2xxx_npiv_fabric_configfs;
static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *,
struct tcm_qla2xxx_nacl *, struct qla_tgt_sess *);
/*
* Expected to be called with struct qla_hw_data->hardware_lock held
*/
static void tcm_qla2xxx_clear_nacl_from_fcport_map(struct qla_tgt_sess *sess)
{
struct se_node_acl *se_nacl = sess->se_sess->se_node_acl;
struct se_portal_group *se_tpg = se_nacl->se_tpg;
struct se_wwn *se_wwn = se_tpg->se_tpg_wwn;
struct tcm_qla2xxx_lport *lport = container_of(se_wwn,
struct tcm_qla2xxx_lport, lport_wwn);
struct tcm_qla2xxx_nacl *nacl = container_of(se_nacl,
struct tcm_qla2xxx_nacl, se_node_acl);
void *node;
pr_debug("fc_rport domain: port_id 0x%06x\n", nacl->nport_id);
node = btree_remove32(&lport->lport_fcport_map, nacl->nport_id);
WARN_ON(node && (node != se_nacl));
pr_debug("Removed from fcport_map: %p for WWNN: 0x%016LX, port_id: 0x%06x\n",
se_nacl, nacl->nport_wwnn, nacl->nport_id);
/*
* Now clear the se_nacl and session pointers from our HW lport lookup
* table mapping for this initiator's fabric S_ID and LOOP_ID entries.
*
* This is done ahead of callbacks into tcm_qla2xxx_free_session() ->
* target_wait_for_sess_cmds() before the session waits for outstanding
* I/O to complete, to avoid a race between session shutdown execution
* and incoming ATIOs or TMRs picking up a stale se_node_act reference.
*/
tcm_qla2xxx_clear_sess_lookup(lport, nacl, sess);
}
static void tcm_qla2xxx_release_session(struct kref *kref)
{
struct se_session *se_sess = container_of(kref,
struct se_session, sess_kref);
qlt_unreg_sess(se_sess->fabric_sess_ptr);
}
static void tcm_qla2xxx_put_session(struct se_session *se_sess)
{
struct qla_tgt_sess *sess = se_sess->fabric_sess_ptr;
struct qla_hw_data *ha = sess->vha->hw;
unsigned long flags;
spin_lock_irqsave(&ha->hardware_lock, flags);
kref_put(&se_sess->sess_kref, tcm_qla2xxx_release_session);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
static void tcm_qla2xxx_put_sess(struct qla_tgt_sess *sess)
{
tcm_qla2xxx_put_session(sess->se_sess);
}
static void tcm_qla2xxx_shutdown_sess(struct qla_tgt_sess *sess)
{
tcm_qla2xxx_shutdown_session(sess->se_sess);
}
static struct se_node_acl *tcm_qla2xxx_make_nodeacl(
struct se_portal_group *se_tpg,
struct config_group *group,
const char *name)
{
struct se_node_acl *se_nacl, *se_nacl_new;
struct tcm_qla2xxx_nacl *nacl;
u64 wwnn;
u32 qla2xxx_nexus_depth;
if (tcm_qla2xxx_parse_wwn(name, &wwnn, 1) < 0)
return ERR_PTR(-EINVAL);
se_nacl_new = tcm_qla2xxx_alloc_fabric_acl(se_tpg);
if (!se_nacl_new)
return ERR_PTR(-ENOMEM);
/* #warning FIXME: Hardcoded qla2xxx_nexus depth in tcm_qla2xxx_make_nodeacl */
qla2xxx_nexus_depth = 1;
/*
* se_nacl_new may be released by core_tpg_add_initiator_node_acl()
* when converting a NodeACL from demo mode -> explict
*/
se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,
name, qla2xxx_nexus_depth);
if (IS_ERR(se_nacl)) {
tcm_qla2xxx_release_fabric_acl(se_tpg, se_nacl_new);
return se_nacl;
}
/*
* Locate our struct tcm_qla2xxx_nacl and set the FC Nport WWPN
*/
nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl);
nacl->nport_wwnn = wwnn;
tcm_qla2xxx_format_wwn(&nacl->nport_name[0], TCM_QLA2XXX_NAMELEN, wwnn);
return se_nacl;
}
static void tcm_qla2xxx_drop_nodeacl(struct se_node_acl *se_acl)
{
struct se_portal_group *se_tpg = se_acl->se_tpg;
struct tcm_qla2xxx_nacl *nacl = container_of(se_acl,
struct tcm_qla2xxx_nacl, se_node_acl);
core_tpg_del_initiator_node_acl(se_tpg, se_acl, 1);
kfree(nacl);
}
/* Start items for tcm_qla2xxx_tpg_attrib_cit */
#define DEF_QLA_TPG_ATTRIB(name) \
\
static ssize_t tcm_qla2xxx_tpg_attrib_show_##name( \
struct se_portal_group *se_tpg, \
char *page) \
{ \
struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, \
struct tcm_qla2xxx_tpg, se_tpg); \
\
return sprintf(page, "%u\n", QLA_TPG_ATTRIB(tpg)->name); \
} \
\
static ssize_t tcm_qla2xxx_tpg_attrib_store_##name( \
struct se_portal_group *se_tpg, \
const char *page, \
size_t count) \
{ \
struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, \
struct tcm_qla2xxx_tpg, se_tpg); \
unsigned long val; \
int ret; \
\
ret = kstrtoul(page, 0, &val); \
if (ret < 0) { \
pr_err("kstrtoul() failed with" \
" ret: %d\n", ret); \
return -EINVAL; \
} \
ret = tcm_qla2xxx_set_attrib_##name(tpg, val); \
\
return (!ret) ? count : -EINVAL; \
}
#define DEF_QLA_TPG_ATTR_BOOL(_name) \
\
static int tcm_qla2xxx_set_attrib_##_name( \
struct tcm_qla2xxx_tpg *tpg, \
unsigned long val) \
{ \
struct tcm_qla2xxx_tpg_attrib *a = &tpg->tpg_attrib; \
\
if ((val != 0) && (val != 1)) { \
pr_err("Illegal boolean value %lu\n", val); \
return -EINVAL; \
} \
\
a->_name = val; \
return 0; \
}
#define QLA_TPG_ATTR(_name, _mode) \
TF_TPG_ATTRIB_ATTR(tcm_qla2xxx, _name, _mode);
/*
* Define tcm_qla2xxx_tpg_attrib_s_generate_node_acls
*/
DEF_QLA_TPG_ATTR_BOOL(generate_node_acls);
DEF_QLA_TPG_ATTRIB(generate_node_acls);
QLA_TPG_ATTR(generate_node_acls, S_IRUGO | S_IWUSR);
/*
Define tcm_qla2xxx_attrib_s_cache_dynamic_acls
*/
DEF_QLA_TPG_ATTR_BOOL(cache_dynamic_acls);
DEF_QLA_TPG_ATTRIB(cache_dynamic_acls);
QLA_TPG_ATTR(cache_dynamic_acls, S_IRUGO | S_IWUSR);
/*
* Define tcm_qla2xxx_tpg_attrib_s_demo_mode_write_protect
*/
DEF_QLA_TPG_ATTR_BOOL(demo_mode_write_protect);
DEF_QLA_TPG_ATTRIB(demo_mode_write_protect);
QLA_TPG_ATTR(demo_mode_write_protect, S_IRUGO | S_IWUSR);
/*
* Define tcm_qla2xxx_tpg_attrib_s_prod_mode_write_protect
*/
DEF_QLA_TPG_ATTR_BOOL(prod_mode_write_protect);
DEF_QLA_TPG_ATTRIB(prod_mode_write_protect);
QLA_TPG_ATTR(prod_mode_write_protect, S_IRUGO | S_IWUSR);
static struct configfs_attribute *tcm_qla2xxx_tpg_attrib_attrs[] = {
&tcm_qla2xxx_tpg_attrib_generate_node_acls.attr,
&tcm_qla2xxx_tpg_attrib_cache_dynamic_acls.attr,
&tcm_qla2xxx_tpg_attrib_demo_mode_write_protect.attr,
&tcm_qla2xxx_tpg_attrib_prod_mode_write_protect.attr,
NULL,
};
/* End items for tcm_qla2xxx_tpg_attrib_cit */
static ssize_t tcm_qla2xxx_tpg_show_enable(
struct se_portal_group *se_tpg,
char *page)
{
struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
struct tcm_qla2xxx_tpg, se_tpg);
return snprintf(page, PAGE_SIZE, "%d\n",
atomic_read(&tpg->lport_tpg_enabled));
}
static ssize_t tcm_qla2xxx_tpg_store_enable(
struct se_portal_group *se_tpg,
const char *page,
size_t count)
{
struct se_wwn *se_wwn = se_tpg->se_tpg_wwn;
struct tcm_qla2xxx_lport *lport = container_of(se_wwn,
struct tcm_qla2xxx_lport, lport_wwn);
struct scsi_qla_host *vha = lport->qla_vha;
struct qla_hw_data *ha = vha->hw;
struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
struct tcm_qla2xxx_tpg, se_tpg);
unsigned long op;
int rc;
rc = kstrtoul(page, 0, &op);
if (rc < 0) {
pr_err("kstrtoul() returned %d\n", rc);
return -EINVAL;
}
if ((op != 1) && (op != 0)) {
pr_err("Illegal value for tpg_enable: %lu\n", op);
return -EINVAL;
}
if (op) {
atomic_set(&tpg->lport_tpg_enabled, 1);
qlt_enable_vha(vha);
} else {
if (!ha->tgt.qla_tgt) {
pr_err("truct qla_hw_data *ha->tgt.qla_tgt is NULL\n");
return -ENODEV;
}
atomic_set(&tpg->lport_tpg_enabled, 0);
qlt_stop_phase1(ha->tgt.qla_tgt);
}
return count;
}
TF_TPG_BASE_ATTR(tcm_qla2xxx, enable, S_IRUGO | S_IWUSR);
static struct configfs_attribute *tcm_qla2xxx_tpg_attrs[] = {
&tcm_qla2xxx_tpg_enable.attr,
NULL,
};
static struct se_portal_group *tcm_qla2xxx_make_tpg(
struct se_wwn *wwn,
struct config_group *group,
const char *name)
{
struct tcm_qla2xxx_lport *lport = container_of(wwn,
struct tcm_qla2xxx_lport, lport_wwn);
struct tcm_qla2xxx_tpg *tpg;
unsigned long tpgt;
int ret;
if (strstr(name, "tpgt_") != name)
return ERR_PTR(-EINVAL);
if (kstrtoul(name + 5, 10, &tpgt) || tpgt > USHRT_MAX)
return ERR_PTR(-EINVAL);
if (!lport->qla_npiv_vp && (tpgt != 1)) {
pr_err("In non NPIV mode, a single TPG=1 is used for HW port mappings\n");
return ERR_PTR(-ENOSYS);
}
tpg = kzalloc(sizeof(struct tcm_qla2xxx_tpg), GFP_KERNEL);
if (!tpg) {
pr_err("Unable to allocate struct tcm_qla2xxx_tpg\n");
return ERR_PTR(-ENOMEM);
}
tpg->lport = lport;
tpg->lport_tpgt = tpgt;
/*
* By default allow READ-ONLY TPG demo-mode access w/ cached dynamic
* NodeACLs
*/
QLA_TPG_ATTRIB(tpg)->generate_node_acls = 1;
QLA_TPG_ATTRIB(tpg)->demo_mode_write_protect = 1;
QLA_TPG_ATTRIB(tpg)->cache_dynamic_acls = 1;
ret = core_tpg_register(&tcm_qla2xxx_fabric_configfs->tf_ops, wwn,
&tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL);
if (ret < 0) {
kfree(tpg);
return NULL;
}
/*
* Setup local TPG=1 pointer for non NPIV mode.
*/
if (lport->qla_npiv_vp == NULL)
lport->tpg_1 = tpg;
return &tpg->se_tpg;
}
static void tcm_qla2xxx_drop_tpg(struct se_portal_group *se_tpg)
{
struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
struct tcm_qla2xxx_tpg, se_tpg);
struct tcm_qla2xxx_lport *lport = tpg->lport;
struct scsi_qla_host *vha = lport->qla_vha;
struct qla_hw_data *ha = vha->hw;
/*
* Call into qla2x_target.c LLD logic to shutdown the active
* FC Nexuses and disable target mode operation for this qla_hw_data
*/
if (ha->tgt.qla_tgt && !ha->tgt.qla_tgt->tgt_stop)
qlt_stop_phase1(ha->tgt.qla_tgt);
core_tpg_deregister(se_tpg);
/*
* Clear local TPG=1 pointer for non NPIV mode.
*/
if (lport->qla_npiv_vp == NULL)
lport->tpg_1 = NULL;
kfree(tpg);
}
static struct se_portal_group *tcm_qla2xxx_npiv_make_tpg(
struct se_wwn *wwn,
struct config_group *group,
const char *name)
{
struct tcm_qla2xxx_lport *lport = container_of(wwn,
struct tcm_qla2xxx_lport, lport_wwn);
struct tcm_qla2xxx_tpg *tpg;
unsigned long tpgt;
int ret;
if (strstr(name, "tpgt_") != name)
return ERR_PTR(-EINVAL);
if (kstrtoul(name + 5, 10, &tpgt) || tpgt > USHRT_MAX)
return ERR_PTR(-EINVAL);
tpg = kzalloc(sizeof(struct tcm_qla2xxx_tpg), GFP_KERNEL);
if (!tpg) {
pr_err("Unable to allocate struct tcm_qla2xxx_tpg\n");
return ERR_PTR(-ENOMEM);
}
tpg->lport = lport;
tpg->lport_tpgt = tpgt;
ret = core_tpg_register(&tcm_qla2xxx_npiv_fabric_configfs->tf_ops, wwn,
&tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL);
if (ret < 0) {
kfree(tpg);
return NULL;
}
return &tpg->se_tpg;
}
/*
* Expected to be called with struct qla_hw_data->hardware_lock held
*/
static struct qla_tgt_sess *tcm_qla2xxx_find_sess_by_s_id(
scsi_qla_host_t *vha,
const uint8_t *s_id)
{
struct qla_hw_data *ha = vha->hw;
struct tcm_qla2xxx_lport *lport;
struct se_node_acl *se_nacl;
struct tcm_qla2xxx_nacl *nacl;
u32 key;
lport = ha->tgt.target_lport_ptr;
if (!lport) {
pr_err("Unable to locate struct tcm_qla2xxx_lport\n");
dump_stack();
return NULL;
}
key = (((unsigned long)s_id[0] << 16) |
((unsigned long)s_id[1] << 8) |
(unsigned long)s_id[2]);
pr_debug("find_sess_by_s_id: 0x%06x\n", key);
se_nacl = btree_lookup32(&lport->lport_fcport_map, key);
if (!se_nacl) {
pr_debug("Unable to locate s_id: 0x%06x\n", key);
return NULL;
}
pr_debug("find_sess_by_s_id: located se_nacl: %p, initiatorname: %s\n",
se_nacl, se_nacl->initiatorname);
nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl);
if (!nacl->qla_tgt_sess) {
pr_err("Unable to locate struct qla_tgt_sess\n");
return NULL;
}
return nacl->qla_tgt_sess;
}
/*
* Expected to be called with struct qla_hw_data->hardware_lock held
*/
static void tcm_qla2xxx_set_sess_by_s_id(
struct tcm_qla2xxx_lport *lport,
struct se_node_acl *new_se_nacl,
struct tcm_qla2xxx_nacl *nacl,
struct se_session *se_sess,
struct qla_tgt_sess *qla_tgt_sess,
uint8_t *s_id)
{
u32 key;
void *slot;
int rc;
key = (((unsigned long)s_id[0] << 16) |
((unsigned long)s_id[1] << 8) |
(unsigned long)s_id[2]);
pr_debug("set_sess_by_s_id: %06x\n", key);
slot = btree_lookup32(&lport->lport_fcport_map, key);
if (!slot) {
if (new_se_nacl) {
pr_debug("Setting up new fc_port entry to new_se_nacl\n");
nacl->nport_id = key;
rc = btree_insert32(&lport->lport_fcport_map, key,
new_se_nacl, GFP_ATOMIC);
if (rc)
printk(KERN_ERR "Unable to insert s_id into fcport_map: %06x\n",
(int)key);
} else {
pr_debug("Wiping nonexisting fc_port entry\n");
}
qla_tgt_sess->se_sess = se_sess;
nacl->qla_tgt_sess = qla_tgt_sess;
return;
}
if (nacl->qla_tgt_sess) {
if (new_se_nacl == NULL) {
pr_debug("Clearing existing nacl->qla_tgt_sess and fc_port entry\n");
btree_remove32(&lport->lport_fcport_map, key);
nacl->qla_tgt_sess = NULL;
return;
}
pr_debug("Replacing existing nacl->qla_tgt_sess and fc_port entry\n");
btree_update32(&lport->lport_fcport_map, key, new_se_nacl);
qla_tgt_sess->se_sess = se_sess;
nacl->qla_tgt_sess = qla_tgt_sess;
return;
}
if (new_se_nacl == NULL) {
pr_debug("Clearing existing fc_port entry\n");
btree_remove32(&lport->lport_fcport_map, key);
return;
}
pr_debug("Replacing existing fc_port entry w/o active nacl->qla_tgt_sess\n");
btree_update32(&lport->lport_fcport_map, key, new_se_nacl);
qla_tgt_sess->se_sess = se_sess;
nacl->qla_tgt_sess = qla_tgt_sess;
pr_debug("Setup nacl->qla_tgt_sess %p by s_id for se_nacl: %p, initiatorname: %s\n",
nacl->qla_tgt_sess, new_se_nacl, new_se_nacl->initiatorname);
}
/*
* Expected to be called with struct qla_hw_data->hardware_lock held
*/
static struct qla_tgt_sess *tcm_qla2xxx_find_sess_by_loop_id(
scsi_qla_host_t *vha,
const uint16_t loop_id)
{
struct qla_hw_data *ha = vha->hw;
struct tcm_qla2xxx_lport *lport;
struct se_node_acl *se_nacl;
struct tcm_qla2xxx_nacl *nacl;
struct tcm_qla2xxx_fc_loopid *fc_loopid;
lport = ha->tgt.target_lport_ptr;
if (!lport) {
pr_err("Unable to locate struct tcm_qla2xxx_lport\n");
dump_stack();
return NULL;
}
pr_debug("find_sess_by_loop_id: Using loop_id: 0x%04x\n", loop_id);
fc_loopid = lport->lport_loopid_map + loop_id;
se_nacl = fc_loopid->se_nacl;
if (!se_nacl) {
pr_debug("Unable to locate se_nacl by loop_id: 0x%04x\n",
loop_id);
return NULL;
}
nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl);
if (!nacl->qla_tgt_sess) {
pr_err("Unable to locate struct qla_tgt_sess\n");
return NULL;
}
return nacl->qla_tgt_sess;
}
/*
* Expected to be called with struct qla_hw_data->hardware_lock held
*/
static void tcm_qla2xxx_set_sess_by_loop_id(
struct tcm_qla2xxx_lport *lport,
struct se_node_acl *new_se_nacl,
struct tcm_qla2xxx_nacl *nacl,
struct se_session *se_sess,
struct qla_tgt_sess *qla_tgt_sess,
uint16_t loop_id)
{
struct se_node_acl *saved_nacl;
struct tcm_qla2xxx_fc_loopid *fc_loopid;
pr_debug("set_sess_by_loop_id: Using loop_id: 0x%04x\n", loop_id);
fc_loopid = &((struct tcm_qla2xxx_fc_loopid *)
lport->lport_loopid_map)[loop_id];
saved_nacl = fc_loopid->se_nacl;
if (!saved_nacl) {
pr_debug("Setting up new fc_loopid->se_nacl to new_se_nacl\n");
fc_loopid->se_nacl = new_se_nacl;
if (qla_tgt_sess->se_sess != se_sess)
qla_tgt_sess->se_sess = se_sess;
if (nacl->qla_tgt_sess != qla_tgt_sess)
nacl->qla_tgt_sess = qla_tgt_sess;
return;
}
if (nacl->qla_tgt_sess) {
if (new_se_nacl == NULL) {
pr_debug("Clearing nacl->qla_tgt_sess and fc_loopid->se_nacl\n");
fc_loopid->se_nacl = NULL;
nacl->qla_tgt_sess = NULL;
return;
}
pr_debug("Replacing existing nacl->qla_tgt_sess and fc_loopid->se_nacl\n");
fc_loopid->se_nacl = new_se_nacl;
if (qla_tgt_sess->se_sess != se_sess)
qla_tgt_sess->se_sess = se_sess;
if (nacl->qla_tgt_sess != qla_tgt_sess)
nacl->qla_tgt_sess = qla_tgt_sess;
return;
}
if (new_se_nacl == NULL) {
pr_debug("Clearing fc_loopid->se_nacl\n");
fc_loopid->se_nacl = NULL;
return;
}
pr_debug("Replacing existing fc_loopid->se_nacl w/o active nacl->qla_tgt_sess\n");
fc_loopid->se_nacl = new_se_nacl;
if (qla_tgt_sess->se_sess != se_sess)
qla_tgt_sess->se_sess = se_sess;
if (nacl->qla_tgt_sess != qla_tgt_sess)
nacl->qla_tgt_sess = qla_tgt_sess;
pr_debug("Setup nacl->qla_tgt_sess %p by loop_id for se_nacl: %p, initiatorname: %s\n",
nacl->qla_tgt_sess, new_se_nacl, new_se_nacl->initiatorname);
}
/*
* Should always be called with qla_hw_data->hardware_lock held.
*/
static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *lport,
struct tcm_qla2xxx_nacl *nacl, struct qla_tgt_sess *sess)
{
struct se_session *se_sess = sess->se_sess;
unsigned char be_sid[3];
be_sid[0] = sess->s_id.b.domain;
be_sid[1] = sess->s_id.b.area;
be_sid[2] = sess->s_id.b.al_pa;
tcm_qla2xxx_set_sess_by_s_id(lport, NULL, nacl, se_sess,
sess, be_sid);
tcm_qla2xxx_set_sess_by_loop_id(lport, NULL, nacl, se_sess,
sess, sess->loop_id);
}
static void tcm_qla2xxx_free_session(struct qla_tgt_sess *sess)
{
struct qla_tgt *tgt = sess->tgt;
struct qla_hw_data *ha = tgt->ha;
struct se_session *se_sess;
struct se_node_acl *se_nacl;
struct tcm_qla2xxx_lport *lport;
struct tcm_qla2xxx_nacl *nacl;
BUG_ON(in_interrupt());
se_sess = sess->se_sess;
if (!se_sess) {
pr_err("struct qla_tgt_sess->se_sess is NULL\n");
dump_stack();
return;
}
se_nacl = se_sess->se_node_acl;
nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl);
lport = ha->tgt.target_lport_ptr;
if (!lport) {
pr_err("Unable to locate struct tcm_qla2xxx_lport\n");
dump_stack();
return;
}
target_wait_for_sess_cmds(se_sess);
transport_deregister_session_configfs(sess->se_sess);
transport_deregister_session(sess->se_sess);
}
/*
* Called via qlt_create_sess():ha->qla2x_tmpl->check_initiator_node_acl()
* to locate struct se_node_acl
*/
static int tcm_qla2xxx_check_initiator_node_acl(
scsi_qla_host_t *vha,
unsigned char *fc_wwpn,
void *qla_tgt_sess,
uint8_t *s_id,
uint16_t loop_id)
{
struct qla_hw_data *ha = vha->hw;
struct tcm_qla2xxx_lport *lport;
struct tcm_qla2xxx_tpg *tpg;
struct tcm_qla2xxx_nacl *nacl;
struct se_portal_group *se_tpg;
struct se_node_acl *se_nacl;
struct se_session *se_sess;
struct qla_tgt_sess *sess = qla_tgt_sess;
unsigned char port_name[36];
unsigned long flags;
lport = ha->tgt.target_lport_ptr;
if (!lport) {
pr_err("Unable to locate struct tcm_qla2xxx_lport\n");
dump_stack();
return -EINVAL;
}
/*
* Locate the TPG=1 reference..
*/
tpg = lport->tpg_1;
if (!tpg) {
pr_err("Unable to lcoate struct tcm_qla2xxx_lport->tpg_1\n");
return -EINVAL;
}
se_tpg = &tpg->se_tpg;
se_sess = transport_init_session();
if (IS_ERR(se_sess)) {
pr_err("Unable to initialize struct se_session\n");
return PTR_ERR(se_sess);
}
/*
* Format the FCP Initiator port_name into colon seperated values to
* match the format by tcm_qla2xxx explict ConfigFS NodeACLs.
*/
memset(&port_name, 0, 36);
snprintf(port_name, 36, "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x",
fc_wwpn[0], fc_wwpn[1], fc_wwpn[2], fc_wwpn[3], fc_wwpn[4],
fc_wwpn[5], fc_wwpn[6], fc_wwpn[7]);
/*
* Locate our struct se_node_acl either from an explict NodeACL created
* via ConfigFS, or via running in TPG demo mode.
*/
se_sess->se_node_acl = core_tpg_check_initiator_node_acl(se_tpg,
port_name);
if (!se_sess->se_node_acl) {
transport_free_session(se_sess);
return -EINVAL;
}
se_nacl = se_sess->se_node_acl;
nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl);
/*
* And now setup the new se_nacl and session pointers into our HW lport
* mappings for fabric S_ID and LOOP_ID.
*/
spin_lock_irqsave(&ha->hardware_lock, flags);
tcm_qla2xxx_set_sess_by_s_id(lport, se_nacl, nacl, se_sess,
qla_tgt_sess, s_id);
tcm_qla2xxx_set_sess_by_loop_id(lport, se_nacl, nacl, se_sess,
qla_tgt_sess, loop_id);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
/*
* Finally register the new FC Nexus with TCM
*/
__transport_register_session(se_nacl->se_tpg, se_nacl, se_sess, sess);
return 0;
}
static void tcm_qla2xxx_update_sess(struct qla_tgt_sess *sess, port_id_t s_id,
uint16_t loop_id, bool conf_compl_supported)
{
struct qla_tgt *tgt = sess->tgt;
struct qla_hw_data *ha = tgt->ha;
struct tcm_qla2xxx_lport *lport = ha->tgt.target_lport_ptr;
struct se_node_acl *se_nacl = sess->se_sess->se_node_acl;
struct tcm_qla2xxx_nacl *nacl = container_of(se_nacl,
struct tcm_qla2xxx_nacl, se_node_acl);
u32 key;
if (sess->loop_id != loop_id || sess->s_id.b24 != s_id.b24)
pr_info("Updating session %p from port %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x loop_id %d -> %d s_id %x:%x:%x -> %x:%x:%x\n",
sess,
sess->port_name[0], sess->port_name[1],
sess->port_name[2], sess->port_name[3],
sess->port_name[4], sess->port_name[5],
sess->port_name[6], sess->port_name[7],
sess->loop_id, loop_id,
sess->s_id.b.domain, sess->s_id.b.area, sess->s_id.b.al_pa,
s_id.b.domain, s_id.b.area, s_id.b.al_pa);
if (sess->loop_id != loop_id) {
/*
* Because we can shuffle loop IDs around and we
* update different sessions non-atomically, we might
* have overwritten this session's old loop ID
* already, and we might end up overwriting some other
* session that will be updated later. So we have to
* be extra careful and we can't warn about those things...
*/
if (lport->lport_loopid_map[sess->loop_id].se_nacl == se_nacl)
lport->lport_loopid_map[sess->loop_id].se_nacl = NULL;
lport->lport_loopid_map[loop_id].se_nacl = se_nacl;
sess->loop_id = loop_id;
}
if (sess->s_id.b24 != s_id.b24) {
key = (((u32) sess->s_id.b.domain << 16) |
((u32) sess->s_id.b.area << 8) |
((u32) sess->s_id.b.al_pa));
if (btree_lookup32(&lport->lport_fcport_map, key))
WARN(btree_remove32(&lport->lport_fcport_map, key) != se_nacl,
"Found wrong se_nacl when updating s_id %x:%x:%x\n",
sess->s_id.b.domain, sess->s_id.b.area, sess->s_id.b.al_pa);
else
WARN(1, "No lport_fcport_map entry for s_id %x:%x:%x\n",
sess->s_id.b.domain, sess->s_id.b.area, sess->s_id.b.al_pa);
key = (((u32) s_id.b.domain << 16) |
((u32) s_id.b.area << 8) |
((u32) s_id.b.al_pa));
if (btree_lookup32(&lport->lport_fcport_map, key)) {
WARN(1, "Already have lport_fcport_map entry for s_id %x:%x:%x\n",
s_id.b.domain, s_id.b.area, s_id.b.al_pa);
btree_update32(&lport->lport_fcport_map, key, se_nacl);
} else {
btree_insert32(&lport->lport_fcport_map, key, se_nacl, GFP_ATOMIC);
}
sess->s_id = s_id;
nacl->nport_id = key;
}
sess->conf_compl_supported = conf_compl_supported;
}
/*
* Calls into tcm_qla2xxx used by qla2xxx LLD I/O path.
*/
static struct qla_tgt_func_tmpl tcm_qla2xxx_template = {
.handle_cmd = tcm_qla2xxx_handle_cmd,
.handle_data = tcm_qla2xxx_handle_data,
.handle_tmr = tcm_qla2xxx_handle_tmr,
.free_cmd = tcm_qla2xxx_free_cmd,
.free_mcmd = tcm_qla2xxx_free_mcmd,
.free_session = tcm_qla2xxx_free_session,
.update_sess = tcm_qla2xxx_update_sess,
.check_initiator_node_acl = tcm_qla2xxx_check_initiator_node_acl,
.find_sess_by_s_id = tcm_qla2xxx_find_sess_by_s_id,
.find_sess_by_loop_id = tcm_qla2xxx_find_sess_by_loop_id,
.clear_nacl_from_fcport_map = tcm_qla2xxx_clear_nacl_from_fcport_map,
.put_sess = tcm_qla2xxx_put_sess,
.shutdown_sess = tcm_qla2xxx_shutdown_sess,
};
static int tcm_qla2xxx_init_lport(struct tcm_qla2xxx_lport *lport)
{
int rc;
rc = btree_init32(&lport->lport_fcport_map);
if (rc) {
pr_err("Unable to initialize lport->lport_fcport_map btree\n");
return rc;
}
lport->lport_loopid_map = vmalloc(sizeof(struct tcm_qla2xxx_fc_loopid) *
65536);
if (!lport->lport_loopid_map) {
pr_err("Unable to allocate lport->lport_loopid_map of %zu bytes\n",
sizeof(struct tcm_qla2xxx_fc_loopid) * 65536);
btree_destroy32(&lport->lport_fcport_map);
return -ENOMEM;
}
memset(lport->lport_loopid_map, 0, sizeof(struct tcm_qla2xxx_fc_loopid)
* 65536);
pr_debug("qla2xxx: Allocated lport_loopid_map of %zu bytes\n",
sizeof(struct tcm_qla2xxx_fc_loopid) * 65536);
return 0;
}
static int tcm_qla2xxx_lport_register_cb(struct scsi_qla_host *vha)
{
struct qla_hw_data *ha = vha->hw;
struct tcm_qla2xxx_lport *lport;
/*
* Setup local pointer to vha, NPIV VP pointer (if present) and
* vha->tcm_lport pointer
*/
lport = (struct tcm_qla2xxx_lport *)ha->tgt.target_lport_ptr;
lport->qla_vha = vha;
return 0;
}
static struct se_wwn *tcm_qla2xxx_make_lport(
struct target_fabric_configfs *tf,
struct config_group *group,
const char *name)
{
struct tcm_qla2xxx_lport *lport;
u64 wwpn;
int ret = -ENODEV;
if (tcm_qla2xxx_parse_wwn(name, &wwpn, 1) < 0)
return ERR_PTR(-EINVAL);
lport = kzalloc(sizeof(struct tcm_qla2xxx_lport), GFP_KERNEL);
if (!lport) {
pr_err("Unable to allocate struct tcm_qla2xxx_lport\n");
return ERR_PTR(-ENOMEM);
}
lport->lport_wwpn = wwpn;
tcm_qla2xxx_format_wwn(&lport->lport_name[0], TCM_QLA2XXX_NAMELEN,
wwpn);
sprintf(lport->lport_naa_name, "naa.%016llx", (unsigned long long) wwpn);
ret = tcm_qla2xxx_init_lport(lport);
if (ret != 0)
goto out;
ret = qlt_lport_register(&tcm_qla2xxx_template, wwpn,
tcm_qla2xxx_lport_register_cb, lport);
if (ret != 0)
goto out_lport;
return &lport->lport_wwn;
out_lport:
vfree(lport->lport_loopid_map);
btree_destroy32(&lport->lport_fcport_map);
out:
kfree(lport);
return ERR_PTR(ret);
}
static void tcm_qla2xxx_drop_lport(struct se_wwn *wwn)
{
struct tcm_qla2xxx_lport *lport = container_of(wwn,
struct tcm_qla2xxx_lport, lport_wwn);
struct scsi_qla_host *vha = lport->qla_vha;
struct qla_hw_data *ha = vha->hw;
struct se_node_acl *node;
u32 key = 0;
/*
* Call into qla2x_target.c LLD logic to complete the
* shutdown of struct qla_tgt after the call to
* qlt_stop_phase1() from tcm_qla2xxx_drop_tpg() above..
*/
if (ha->tgt.qla_tgt && !ha->tgt.qla_tgt->tgt_stopped)
qlt_stop_phase2(ha->tgt.qla_tgt);
qlt_lport_deregister(vha);
vfree(lport->lport_loopid_map);
btree_for_each_safe32(&lport->lport_fcport_map, key, node)
btree_remove32(&lport->lport_fcport_map, key);
btree_destroy32(&lport->lport_fcport_map);
kfree(lport);
}
static struct se_wwn *tcm_qla2xxx_npiv_make_lport(
struct target_fabric_configfs *tf,
struct config_group *group,
const char *name)
{
struct tcm_qla2xxx_lport *lport;
u64 npiv_wwpn, npiv_wwnn;
int ret;
if (tcm_qla2xxx_npiv_parse_wwn(name, strlen(name)+1,
&npiv_wwpn, &npiv_wwnn) < 0)
return ERR_PTR(-EINVAL);
lport = kzalloc(sizeof(struct tcm_qla2xxx_lport), GFP_KERNEL);
if (!lport) {
pr_err("Unable to allocate struct tcm_qla2xxx_lport for NPIV\n");
return ERR_PTR(-ENOMEM);
}
lport->lport_npiv_wwpn = npiv_wwpn;
lport->lport_npiv_wwnn = npiv_wwnn;
tcm_qla2xxx_npiv_format_wwn(&lport->lport_npiv_name[0],
TCM_QLA2XXX_NAMELEN, npiv_wwpn, npiv_wwnn);
sprintf(lport->lport_naa_name, "naa.%016llx", (unsigned long long) npiv_wwpn);
/* FIXME: tcm_qla2xxx_npiv_make_lport */
ret = -ENOSYS;
if (ret != 0)
goto out;
return &lport->lport_wwn;
out:
kfree(lport);
return ERR_PTR(ret);
}
static void tcm_qla2xxx_npiv_drop_lport(struct se_wwn *wwn)
{
struct tcm_qla2xxx_lport *lport = container_of(wwn,
struct tcm_qla2xxx_lport, lport_wwn);
struct scsi_qla_host *vha = lport->qla_vha;
struct Scsi_Host *sh = vha->host;
/*
* Notify libfc that we want to release the lport->npiv_vport
*/
fc_vport_terminate(lport->npiv_vport);
scsi_host_put(sh);
kfree(lport);
}
static ssize_t tcm_qla2xxx_wwn_show_attr_version(
struct target_fabric_configfs *tf,
char *page)
{
return sprintf(page,
"TCM QLOGIC QLA2XXX NPIV capable fabric module %s on %s/%s on "
UTS_RELEASE"\n", TCM_QLA2XXX_VERSION, utsname()->sysname,
utsname()->machine);
}
TF_WWN_ATTR_RO(tcm_qla2xxx, version);
static struct configfs_attribute *tcm_qla2xxx_wwn_attrs[] = {
&tcm_qla2xxx_wwn_version.attr,
NULL,
};
static struct target_core_fabric_ops tcm_qla2xxx_ops = {
.get_fabric_name = tcm_qla2xxx_get_fabric_name,
.get_fabric_proto_ident = tcm_qla2xxx_get_fabric_proto_ident,
.tpg_get_wwn = tcm_qla2xxx_get_fabric_wwn,
.tpg_get_tag = tcm_qla2xxx_get_tag,
.tpg_get_default_depth = tcm_qla2xxx_get_default_depth,
.tpg_get_pr_transport_id = tcm_qla2xxx_get_pr_transport_id,
.tpg_get_pr_transport_id_len = tcm_qla2xxx_get_pr_transport_id_len,
.tpg_parse_pr_out_transport_id = tcm_qla2xxx_parse_pr_out_transport_id,
.tpg_check_demo_mode = tcm_qla2xxx_check_demo_mode,
.tpg_check_demo_mode_cache = tcm_qla2xxx_check_demo_mode_cache,
.tpg_check_demo_mode_write_protect =
tcm_qla2xxx_check_demo_write_protect,
.tpg_check_prod_mode_write_protect =
tcm_qla2xxx_check_prod_write_protect,
.tpg_check_demo_mode_login_only = tcm_qla2xxx_check_true,
.tpg_alloc_fabric_acl = tcm_qla2xxx_alloc_fabric_acl,
.tpg_release_fabric_acl = tcm_qla2xxx_release_fabric_acl,
.tpg_get_inst_index = tcm_qla2xxx_tpg_get_inst_index,
.check_stop_free = tcm_qla2xxx_check_stop_free,
.release_cmd = tcm_qla2xxx_release_cmd,
.put_session = tcm_qla2xxx_put_session,
.shutdown_session = tcm_qla2xxx_shutdown_session,
.close_session = tcm_qla2xxx_close_session,
.sess_get_index = tcm_qla2xxx_sess_get_index,
.sess_get_initiator_sid = NULL,
.write_pending = tcm_qla2xxx_write_pending,
.write_pending_status = tcm_qla2xxx_write_pending_status,
.set_default_node_attributes = tcm_qla2xxx_set_default_node_attrs,
.get_task_tag = tcm_qla2xxx_get_task_tag,
.get_cmd_state = tcm_qla2xxx_get_cmd_state,
.queue_data_in = tcm_qla2xxx_queue_data_in,
.queue_status = tcm_qla2xxx_queue_status,
.queue_tm_rsp = tcm_qla2xxx_queue_tm_rsp,
/*
* Setup function pointers for generic logic in
* target_core_fabric_configfs.c
*/
.fabric_make_wwn = tcm_qla2xxx_make_lport,
.fabric_drop_wwn = tcm_qla2xxx_drop_lport,
.fabric_make_tpg = tcm_qla2xxx_make_tpg,
.fabric_drop_tpg = tcm_qla2xxx_drop_tpg,
.fabric_post_link = NULL,
.fabric_pre_unlink = NULL,
.fabric_make_np = NULL,
.fabric_drop_np = NULL,
.fabric_make_nodeacl = tcm_qla2xxx_make_nodeacl,
.fabric_drop_nodeacl = tcm_qla2xxx_drop_nodeacl,
};
static struct target_core_fabric_ops tcm_qla2xxx_npiv_ops = {
.get_fabric_name = tcm_qla2xxx_npiv_get_fabric_name,
.get_fabric_proto_ident = tcm_qla2xxx_get_fabric_proto_ident,
.tpg_get_wwn = tcm_qla2xxx_npiv_get_fabric_wwn,
.tpg_get_tag = tcm_qla2xxx_get_tag,
.tpg_get_default_depth = tcm_qla2xxx_get_default_depth,
.tpg_get_pr_transport_id = tcm_qla2xxx_get_pr_transport_id,
.tpg_get_pr_transport_id_len = tcm_qla2xxx_get_pr_transport_id_len,
.tpg_parse_pr_out_transport_id = tcm_qla2xxx_parse_pr_out_transport_id,
.tpg_check_demo_mode = tcm_qla2xxx_check_false,
.tpg_check_demo_mode_cache = tcm_qla2xxx_check_true,
.tpg_check_demo_mode_write_protect = tcm_qla2xxx_check_true,
.tpg_check_prod_mode_write_protect = tcm_qla2xxx_check_false,
.tpg_check_demo_mode_login_only = tcm_qla2xxx_check_true,
.tpg_alloc_fabric_acl = tcm_qla2xxx_alloc_fabric_acl,
.tpg_release_fabric_acl = tcm_qla2xxx_release_fabric_acl,
.tpg_get_inst_index = tcm_qla2xxx_tpg_get_inst_index,
.release_cmd = tcm_qla2xxx_release_cmd,
.put_session = tcm_qla2xxx_put_session,
.shutdown_session = tcm_qla2xxx_shutdown_session,
.close_session = tcm_qla2xxx_close_session,
.sess_get_index = tcm_qla2xxx_sess_get_index,
.sess_get_initiator_sid = NULL,
.write_pending = tcm_qla2xxx_write_pending,
.write_pending_status = tcm_qla2xxx_write_pending_status,
.set_default_node_attributes = tcm_qla2xxx_set_default_node_attrs,
.get_task_tag = tcm_qla2xxx_get_task_tag,
.get_cmd_state = tcm_qla2xxx_get_cmd_state,
.queue_data_in = tcm_qla2xxx_queue_data_in,
.queue_status = tcm_qla2xxx_queue_status,
.queue_tm_rsp = tcm_qla2xxx_queue_tm_rsp,
/*
* Setup function pointers for generic logic in
* target_core_fabric_configfs.c
*/
.fabric_make_wwn = tcm_qla2xxx_npiv_make_lport,
.fabric_drop_wwn = tcm_qla2xxx_npiv_drop_lport,
.fabric_make_tpg = tcm_qla2xxx_npiv_make_tpg,
.fabric_drop_tpg = tcm_qla2xxx_drop_tpg,
.fabric_post_link = NULL,
.fabric_pre_unlink = NULL,
.fabric_make_np = NULL,
.fabric_drop_np = NULL,
.fabric_make_nodeacl = tcm_qla2xxx_make_nodeacl,
.fabric_drop_nodeacl = tcm_qla2xxx_drop_nodeacl,
};
static int tcm_qla2xxx_register_configfs(void)
{
struct target_fabric_configfs *fabric, *npiv_fabric;
int ret;
pr_debug("TCM QLOGIC QLA2XXX fabric module %s on %s/%s on "
UTS_RELEASE"\n", TCM_QLA2XXX_VERSION, utsname()->sysname,
utsname()->machine);
/*
* Register the top level struct config_item_type with TCM core
*/
fabric = target_fabric_configfs_init(THIS_MODULE, "qla2xxx");
if (IS_ERR(fabric)) {
pr_err("target_fabric_configfs_init() failed\n");
return PTR_ERR(fabric);
}
/*
* Setup fabric->tf_ops from our local tcm_qla2xxx_ops
*/
fabric->tf_ops = tcm_qla2xxx_ops;
/*
* Setup default attribute lists for various fabric->tf_cit_tmpl
*/
TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = tcm_qla2xxx_wwn_attrs;
TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = tcm_qla2xxx_tpg_attrs;
TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs =
tcm_qla2xxx_tpg_attrib_attrs;
TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;
TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;
TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;
TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;
/*
* Register the fabric for use within TCM
*/
ret = target_fabric_configfs_register(fabric);
if (ret < 0) {
pr_err("target_fabric_configfs_register() failed for TCM_QLA2XXX\n");
return ret;
}
/*
* Setup our local pointer to *fabric
*/
tcm_qla2xxx_fabric_configfs = fabric;
pr_debug("TCM_QLA2XXX[0] - Set fabric -> tcm_qla2xxx_fabric_configfs\n");
/*
* Register the top level struct config_item_type for NPIV with TCM core
*/
npiv_fabric = target_fabric_configfs_init(THIS_MODULE, "qla2xxx_npiv");
if (IS_ERR(npiv_fabric)) {
pr_err("target_fabric_configfs_init() failed\n");
ret = PTR_ERR(npiv_fabric);
goto out_fabric;
}
/*
* Setup fabric->tf_ops from our local tcm_qla2xxx_npiv_ops
*/
npiv_fabric->tf_ops = tcm_qla2xxx_npiv_ops;
/*
* Setup default attribute lists for various npiv_fabric->tf_cit_tmpl
*/
TF_CIT_TMPL(npiv_fabric)->tfc_wwn_cit.ct_attrs = tcm_qla2xxx_wwn_attrs;
TF_CIT_TMPL(npiv_fabric)->tfc_tpg_base_cit.ct_attrs = NULL;
TF_CIT_TMPL(npiv_fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL;
TF_CIT_TMPL(npiv_fabric)->tfc_tpg_param_cit.ct_attrs = NULL;
TF_CIT_TMPL(npiv_fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;
TF_CIT_TMPL(npiv_fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;
TF_CIT_TMPL(npiv_fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
TF_CIT_TMPL(npiv_fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
TF_CIT_TMPL(npiv_fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;
/*
* Register the npiv_fabric for use within TCM
*/
ret = target_fabric_configfs_register(npiv_fabric);
if (ret < 0) {
pr_err("target_fabric_configfs_register() failed for TCM_QLA2XXX\n");
goto out_fabric;
}
/*
* Setup our local pointer to *npiv_fabric
*/
tcm_qla2xxx_npiv_fabric_configfs = npiv_fabric;
pr_debug("TCM_QLA2XXX[0] - Set fabric -> tcm_qla2xxx_npiv_fabric_configfs\n");
tcm_qla2xxx_free_wq = alloc_workqueue("tcm_qla2xxx_free",
WQ_MEM_RECLAIM, 0);
if (!tcm_qla2xxx_free_wq) {
ret = -ENOMEM;
goto out_fabric_npiv;
}
tcm_qla2xxx_cmd_wq = alloc_workqueue("tcm_qla2xxx_cmd", 0, 0);
if (!tcm_qla2xxx_cmd_wq) {
ret = -ENOMEM;
goto out_free_wq;
}
return 0;
out_free_wq:
destroy_workqueue(tcm_qla2xxx_free_wq);
out_fabric_npiv:
target_fabric_configfs_deregister(tcm_qla2xxx_npiv_fabric_configfs);
out_fabric:
target_fabric_configfs_deregister(tcm_qla2xxx_fabric_configfs);
return ret;
}
static void tcm_qla2xxx_deregister_configfs(void)
{
destroy_workqueue(tcm_qla2xxx_cmd_wq);
destroy_workqueue(tcm_qla2xxx_free_wq);
target_fabric_configfs_deregister(tcm_qla2xxx_fabric_configfs);
tcm_qla2xxx_fabric_configfs = NULL;
pr_debug("TCM_QLA2XXX[0] - Cleared tcm_qla2xxx_fabric_configfs\n");
target_fabric_configfs_deregister(tcm_qla2xxx_npiv_fabric_configfs);
tcm_qla2xxx_npiv_fabric_configfs = NULL;
pr_debug("TCM_QLA2XXX[0] - Cleared tcm_qla2xxx_npiv_fabric_configfs\n");
}
static int __init tcm_qla2xxx_init(void)
{
int ret;
ret = tcm_qla2xxx_register_configfs();
if (ret < 0)
return ret;
return 0;
}
static void __exit tcm_qla2xxx_exit(void)
{
tcm_qla2xxx_deregister_configfs();
}
MODULE_DESCRIPTION("TCM QLA2XXX series NPIV enabled fabric driver");
MODULE_LICENSE("GPL");
module_init(tcm_qla2xxx_init);
module_exit(tcm_qla2xxx_exit);
| gpl-2.0 |
Motorhead1991/android_kernel_samsung_exynos7420 | arch/m68k/mm/hwtest.c | 1490 | 2661 | /* Tests for presence or absence of hardware registers.
* This code was originally in atari/config.c, but I noticed
* that it was also in drivers/nubus/nubus.c and I wanted to
* use it in hp300/config.c, so it seemed sensible to pull it
* out into its own file.
*
* The test is for use when trying to read a hardware register
* that isn't present would cause a bus error. We set up a
* temporary handler so that this doesn't kill the kernel.
*
* There is a test-by-reading and a test-by-writing; I present
* them here complete with the comments from the original atari
* config.c...
* -- PMM <pmaydell@chiark.greenend.org.uk>, 05/1998
*/
/* This function tests for the presence of an address, specially a
* hardware register address. It is called very early in the kernel
* initialization process, when the VBR register isn't set up yet. On
* an Atari, it still points to address 0, which is unmapped. So a bus
* error would cause another bus error while fetching the exception
* vector, and the CPU would do nothing at all. So we needed to set up
* a temporary VBR and a vector table for the duration of the test.
*/
#include <linux/module.h>
int hwreg_present( volatile void *regp )
{
int ret = 0;
unsigned long flags;
long save_sp, save_vbr;
long tmp_vectors[3];
local_irq_save(flags);
__asm__ __volatile__
( "movec %/vbr,%2\n\t"
"movel #Lberr1,%4@(8)\n\t"
"movec %4,%/vbr\n\t"
"movel %/sp,%1\n\t"
"moveq #0,%0\n\t"
"tstb %3@\n\t"
"nop\n\t"
"moveq #1,%0\n"
"Lberr1:\n\t"
"movel %1,%/sp\n\t"
"movec %2,%/vbr"
: "=&d" (ret), "=&r" (save_sp), "=&r" (save_vbr)
: "a" (regp), "a" (tmp_vectors)
);
local_irq_restore(flags);
return( ret );
}
EXPORT_SYMBOL(hwreg_present);
/* Basically the same, but writes a value into a word register, protected
* by a bus error handler. Returns 1 if successful, 0 otherwise.
*/
int hwreg_write( volatile void *regp, unsigned short val )
{
int ret;
unsigned long flags;
long save_sp, save_vbr;
long tmp_vectors[3];
local_irq_save(flags);
__asm__ __volatile__
( "movec %/vbr,%2\n\t"
"movel #Lberr2,%4@(8)\n\t"
"movec %4,%/vbr\n\t"
"movel %/sp,%1\n\t"
"moveq #0,%0\n\t"
"movew %5,%3@\n\t"
"nop \n\t" /* If this nop isn't present, 'ret' may already be
* loaded with 1 at the time the bus error
* happens! */
"moveq #1,%0\n"
"Lberr2:\n\t"
"movel %1,%/sp\n\t"
"movec %2,%/vbr"
: "=&d" (ret), "=&r" (save_sp), "=&r" (save_vbr)
: "a" (regp), "a" (tmp_vectors), "g" (val)
);
local_irq_restore(flags);
return( ret );
}
EXPORT_SYMBOL(hwreg_write);
| gpl-2.0 |
EnJens/android-tegra-nv-2.6.39 | arch/arm/mm/alignment.c | 1490 | 25261 | /*
* linux/arch/arm/mm/alignment.c
*
* Copyright (C) 1995 Linus Torvalds
* Modifications for ARM processor (c) 1995-2001 Russell King
* Thumb alignment fault fixups (c) 2004 MontaVista Software, Inc.
* - Adapted from gdb/sim/arm/thumbemu.c -- Thumb instruction emulation.
* Copyright (C) 1996, Cygnus Software Technologies Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/moduleparam.h>
#include <linux/compiler.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/uaccess.h>
#include <asm/unaligned.h>
#include "fault.h"
/*
* 32-bit misaligned trap handler (c) 1998 San Mehat (CCC) -July 1998
* /proc/sys/debug/alignment, modified and integrated into
* Linux 2.1 by Russell King
*
* Speed optimisations and better fault handling by Russell King.
*
* *** NOTE ***
* This code is not portable to processors with late data abort handling.
*/
#define CODING_BITS(i) (i & 0x0e000000)
#define LDST_I_BIT(i) (i & (1 << 26)) /* Immediate constant */
#define LDST_P_BIT(i) (i & (1 << 24)) /* Preindex */
#define LDST_U_BIT(i) (i & (1 << 23)) /* Add offset */
#define LDST_W_BIT(i) (i & (1 << 21)) /* Writeback */
#define LDST_L_BIT(i) (i & (1 << 20)) /* Load */
#define LDST_P_EQ_U(i) ((((i) ^ ((i) >> 1)) & (1 << 23)) == 0)
#define LDSTHD_I_BIT(i) (i & (1 << 22)) /* double/half-word immed */
#define LDM_S_BIT(i) (i & (1 << 22)) /* write CPSR from SPSR */
#define RN_BITS(i) ((i >> 16) & 15) /* Rn */
#define RD_BITS(i) ((i >> 12) & 15) /* Rd */
#define RM_BITS(i) (i & 15) /* Rm */
#define REGMASK_BITS(i) (i & 0xffff)
#define OFFSET_BITS(i) (i & 0x0fff)
#define IS_SHIFT(i) (i & 0x0ff0)
#define SHIFT_BITS(i) ((i >> 7) & 0x1f)
#define SHIFT_TYPE(i) (i & 0x60)
#define SHIFT_LSL 0x00
#define SHIFT_LSR 0x20
#define SHIFT_ASR 0x40
#define SHIFT_RORRRX 0x60
#define BAD_INSTR 0xdeadc0de
/* Thumb-2 32 bit format per ARMv7 DDI0406A A6.3, either f800h,e800h,f800h */
#define IS_T32(hi16) \
(((hi16) & 0xe000) == 0xe000 && ((hi16) & 0x1800))
static unsigned long ai_user;
static unsigned long ai_sys;
static unsigned long ai_skipped;
static unsigned long ai_half;
static unsigned long ai_word;
static unsigned long ai_dword;
static unsigned long ai_multi;
static int ai_usermode;
core_param(alignment, ai_usermode, int, 0600);
#define UM_WARN (1 << 0)
#define UM_FIXUP (1 << 1)
#define UM_SIGNAL (1 << 2)
#ifdef CONFIG_PROC_FS
static const char *usermode_action[] = {
"ignored",
"warn",
"fixup",
"fixup+warn",
"signal",
"signal+warn"
};
static int alignment_proc_show(struct seq_file *m, void *v)
{
seq_printf(m, "User:\t\t%lu\n", ai_user);
seq_printf(m, "System:\t\t%lu\n", ai_sys);
seq_printf(m, "Skipped:\t%lu\n", ai_skipped);
seq_printf(m, "Half:\t\t%lu\n", ai_half);
seq_printf(m, "Word:\t\t%lu\n", ai_word);
if (cpu_architecture() >= CPU_ARCH_ARMv5TE)
seq_printf(m, "DWord:\t\t%lu\n", ai_dword);
seq_printf(m, "Multi:\t\t%lu\n", ai_multi);
seq_printf(m, "User faults:\t%i (%s)\n", ai_usermode,
usermode_action[ai_usermode]);
return 0;
}
static int alignment_proc_open(struct inode *inode, struct file *file)
{
return single_open(file, alignment_proc_show, NULL);
}
static ssize_t alignment_proc_write(struct file *file, const char __user *buffer,
size_t count, loff_t *pos)
{
char mode;
if (count > 0) {
if (get_user(mode, buffer))
return -EFAULT;
if (mode >= '0' && mode <= '5')
ai_usermode = mode - '0';
}
return count;
}
static const struct file_operations alignment_proc_fops = {
.open = alignment_proc_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
.write = alignment_proc_write,
};
#endif /* CONFIG_PROC_FS */
union offset_union {
unsigned long un;
signed long sn;
};
#define TYPE_ERROR 0
#define TYPE_FAULT 1
#define TYPE_LDST 2
#define TYPE_DONE 3
#ifdef __ARMEB__
#define BE 1
#define FIRST_BYTE_16 "mov %1, %1, ror #8\n"
#define FIRST_BYTE_32 "mov %1, %1, ror #24\n"
#define NEXT_BYTE "ror #24"
#else
#define BE 0
#define FIRST_BYTE_16
#define FIRST_BYTE_32
#define NEXT_BYTE "lsr #8"
#endif
#define __get8_unaligned_check(ins,val,addr,err) \
__asm__( \
ARM( "1: "ins" %1, [%2], #1\n" ) \
THUMB( "1: "ins" %1, [%2]\n" ) \
THUMB( " add %2, %2, #1\n" ) \
"2:\n" \
" .pushsection .fixup,\"ax\"\n" \
" .align 2\n" \
"3: mov %0, #1\n" \
" b 2b\n" \
" .popsection\n" \
" .pushsection __ex_table,\"a\"\n" \
" .align 3\n" \
" .long 1b, 3b\n" \
" .popsection\n" \
: "=r" (err), "=&r" (val), "=r" (addr) \
: "0" (err), "2" (addr))
#define __get16_unaligned_check(ins,val,addr) \
do { \
unsigned int err = 0, v, a = addr; \
__get8_unaligned_check(ins,v,a,err); \
val = v << ((BE) ? 8 : 0); \
__get8_unaligned_check(ins,v,a,err); \
val |= v << ((BE) ? 0 : 8); \
if (err) \
goto fault; \
} while (0)
#define get16_unaligned_check(val,addr) \
__get16_unaligned_check("ldrb",val,addr)
#define get16t_unaligned_check(val,addr) \
__get16_unaligned_check("ldrbt",val,addr)
#define __get32_unaligned_check(ins,val,addr) \
do { \
unsigned int err = 0, v, a = addr; \
__get8_unaligned_check(ins,v,a,err); \
val = v << ((BE) ? 24 : 0); \
__get8_unaligned_check(ins,v,a,err); \
val |= v << ((BE) ? 16 : 8); \
__get8_unaligned_check(ins,v,a,err); \
val |= v << ((BE) ? 8 : 16); \
__get8_unaligned_check(ins,v,a,err); \
val |= v << ((BE) ? 0 : 24); \
if (err) \
goto fault; \
} while (0)
#define get32_unaligned_check(val,addr) \
__get32_unaligned_check("ldrb",val,addr)
#define get32t_unaligned_check(val,addr) \
__get32_unaligned_check("ldrbt",val,addr)
#define __put16_unaligned_check(ins,val,addr) \
do { \
unsigned int err = 0, v = val, a = addr; \
__asm__( FIRST_BYTE_16 \
ARM( "1: "ins" %1, [%2], #1\n" ) \
THUMB( "1: "ins" %1, [%2]\n" ) \
THUMB( " add %2, %2, #1\n" ) \
" mov %1, %1, "NEXT_BYTE"\n" \
"2: "ins" %1, [%2]\n" \
"3:\n" \
" .pushsection .fixup,\"ax\"\n" \
" .align 2\n" \
"4: mov %0, #1\n" \
" b 3b\n" \
" .popsection\n" \
" .pushsection __ex_table,\"a\"\n" \
" .align 3\n" \
" .long 1b, 4b\n" \
" .long 2b, 4b\n" \
" .popsection\n" \
: "=r" (err), "=&r" (v), "=&r" (a) \
: "0" (err), "1" (v), "2" (a)); \
if (err) \
goto fault; \
} while (0)
#define put16_unaligned_check(val,addr) \
__put16_unaligned_check("strb",val,addr)
#define put16t_unaligned_check(val,addr) \
__put16_unaligned_check("strbt",val,addr)
#define __put32_unaligned_check(ins,val,addr) \
do { \
unsigned int err = 0, v = val, a = addr; \
__asm__( FIRST_BYTE_32 \
ARM( "1: "ins" %1, [%2], #1\n" ) \
THUMB( "1: "ins" %1, [%2]\n" ) \
THUMB( " add %2, %2, #1\n" ) \
" mov %1, %1, "NEXT_BYTE"\n" \
ARM( "2: "ins" %1, [%2], #1\n" ) \
THUMB( "2: "ins" %1, [%2]\n" ) \
THUMB( " add %2, %2, #1\n" ) \
" mov %1, %1, "NEXT_BYTE"\n" \
ARM( "3: "ins" %1, [%2], #1\n" ) \
THUMB( "3: "ins" %1, [%2]\n" ) \
THUMB( " add %2, %2, #1\n" ) \
" mov %1, %1, "NEXT_BYTE"\n" \
"4: "ins" %1, [%2]\n" \
"5:\n" \
" .pushsection .fixup,\"ax\"\n" \
" .align 2\n" \
"6: mov %0, #1\n" \
" b 5b\n" \
" .popsection\n" \
" .pushsection __ex_table,\"a\"\n" \
" .align 3\n" \
" .long 1b, 6b\n" \
" .long 2b, 6b\n" \
" .long 3b, 6b\n" \
" .long 4b, 6b\n" \
" .popsection\n" \
: "=r" (err), "=&r" (v), "=&r" (a) \
: "0" (err), "1" (v), "2" (a)); \
if (err) \
goto fault; \
} while (0)
#define put32_unaligned_check(val,addr) \
__put32_unaligned_check("strb", val, addr)
#define put32t_unaligned_check(val,addr) \
__put32_unaligned_check("strbt", val, addr)
static void
do_alignment_finish_ldst(unsigned long addr, unsigned long instr, struct pt_regs *regs, union offset_union offset)
{
if (!LDST_U_BIT(instr))
offset.un = -offset.un;
if (!LDST_P_BIT(instr))
addr += offset.un;
if (!LDST_P_BIT(instr) || LDST_W_BIT(instr))
regs->uregs[RN_BITS(instr)] = addr;
}
static int
do_alignment_ldrhstrh(unsigned long addr, unsigned long instr, struct pt_regs *regs)
{
unsigned int rd = RD_BITS(instr);
ai_half += 1;
if (user_mode(regs))
goto user;
if (LDST_L_BIT(instr)) {
unsigned long val;
get16_unaligned_check(val, addr);
/* signed half-word? */
if (instr & 0x40)
val = (signed long)((signed short) val);
regs->uregs[rd] = val;
} else
put16_unaligned_check(regs->uregs[rd], addr);
return TYPE_LDST;
user:
if (LDST_L_BIT(instr)) {
unsigned long val;
get16t_unaligned_check(val, addr);
/* signed half-word? */
if (instr & 0x40)
val = (signed long)((signed short) val);
regs->uregs[rd] = val;
} else
put16t_unaligned_check(regs->uregs[rd], addr);
return TYPE_LDST;
fault:
return TYPE_FAULT;
}
static int
do_alignment_ldrdstrd(unsigned long addr, unsigned long instr,
struct pt_regs *regs)
{
unsigned int rd = RD_BITS(instr);
unsigned int rd2;
int load;
if ((instr & 0xfe000000) == 0xe8000000) {
/* ARMv7 Thumb-2 32-bit LDRD/STRD */
rd2 = (instr >> 8) & 0xf;
load = !!(LDST_L_BIT(instr));
} else if (((rd & 1) == 1) || (rd == 14))
goto bad;
else {
load = ((instr & 0xf0) == 0xd0);
rd2 = rd + 1;
}
ai_dword += 1;
if (user_mode(regs))
goto user;
if (load) {
unsigned long val;
get32_unaligned_check(val, addr);
regs->uregs[rd] = val;
get32_unaligned_check(val, addr + 4);
regs->uregs[rd2] = val;
} else {
put32_unaligned_check(regs->uregs[rd], addr);
put32_unaligned_check(regs->uregs[rd2], addr + 4);
}
return TYPE_LDST;
user:
if (load) {
unsigned long val;
get32t_unaligned_check(val, addr);
regs->uregs[rd] = val;
get32t_unaligned_check(val, addr + 4);
regs->uregs[rd2] = val;
} else {
put32t_unaligned_check(regs->uregs[rd], addr);
put32t_unaligned_check(regs->uregs[rd2], addr + 4);
}
return TYPE_LDST;
bad:
return TYPE_ERROR;
fault:
return TYPE_FAULT;
}
static int
do_alignment_ldrstr(unsigned long addr, unsigned long instr, struct pt_regs *regs)
{
unsigned int rd = RD_BITS(instr);
ai_word += 1;
if ((!LDST_P_BIT(instr) && LDST_W_BIT(instr)) || user_mode(regs))
goto trans;
if (LDST_L_BIT(instr)) {
unsigned int val;
get32_unaligned_check(val, addr);
regs->uregs[rd] = val;
} else
put32_unaligned_check(regs->uregs[rd], addr);
return TYPE_LDST;
trans:
if (LDST_L_BIT(instr)) {
unsigned int val;
get32t_unaligned_check(val, addr);
regs->uregs[rd] = val;
} else
put32t_unaligned_check(regs->uregs[rd], addr);
return TYPE_LDST;
fault:
return TYPE_FAULT;
}
/*
* LDM/STM alignment handler.
*
* There are 4 variants of this instruction:
*
* B = rn pointer before instruction, A = rn pointer after instruction
* ------ increasing address ----->
* | | r0 | r1 | ... | rx | |
* PU = 01 B A
* PU = 11 B A
* PU = 00 A B
* PU = 10 A B
*/
static int
do_alignment_ldmstm(unsigned long addr, unsigned long instr, struct pt_regs *regs)
{
unsigned int rd, rn, correction, nr_regs, regbits;
unsigned long eaddr, newaddr;
if (LDM_S_BIT(instr))
goto bad;
correction = 4; /* processor implementation defined */
regs->ARM_pc += correction;
ai_multi += 1;
/* count the number of registers in the mask to be transferred */
nr_regs = hweight16(REGMASK_BITS(instr)) * 4;
rn = RN_BITS(instr);
newaddr = eaddr = regs->uregs[rn];
if (!LDST_U_BIT(instr))
nr_regs = -nr_regs;
newaddr += nr_regs;
if (!LDST_U_BIT(instr))
eaddr = newaddr;
if (LDST_P_EQ_U(instr)) /* U = P */
eaddr += 4;
/*
* For alignment faults on the ARM922T/ARM920T the MMU makes
* the FSR (and hence addr) equal to the updated base address
* of the multiple access rather than the restored value.
* Switch this message off if we've got a ARM92[02], otherwise
* [ls]dm alignment faults are noisy!
*/
#if !(defined CONFIG_CPU_ARM922T) && !(defined CONFIG_CPU_ARM920T)
/*
* This is a "hint" - we already have eaddr worked out by the
* processor for us.
*/
if (addr != eaddr) {
printk(KERN_ERR "LDMSTM: PC = %08lx, instr = %08lx, "
"addr = %08lx, eaddr = %08lx\n",
instruction_pointer(regs), instr, addr, eaddr);
show_regs(regs);
}
#endif
if (user_mode(regs)) {
for (regbits = REGMASK_BITS(instr), rd = 0; regbits;
regbits >>= 1, rd += 1)
if (regbits & 1) {
if (LDST_L_BIT(instr)) {
unsigned int val;
get32t_unaligned_check(val, eaddr);
regs->uregs[rd] = val;
} else
put32t_unaligned_check(regs->uregs[rd], eaddr);
eaddr += 4;
}
} else {
for (regbits = REGMASK_BITS(instr), rd = 0; regbits;
regbits >>= 1, rd += 1)
if (regbits & 1) {
if (LDST_L_BIT(instr)) {
unsigned int val;
get32_unaligned_check(val, eaddr);
regs->uregs[rd] = val;
} else
put32_unaligned_check(regs->uregs[rd], eaddr);
eaddr += 4;
}
}
if (LDST_W_BIT(instr))
regs->uregs[rn] = newaddr;
if (!LDST_L_BIT(instr) || !(REGMASK_BITS(instr) & (1 << 15)))
regs->ARM_pc -= correction;
return TYPE_DONE;
fault:
regs->ARM_pc -= correction;
return TYPE_FAULT;
bad:
printk(KERN_ERR "Alignment trap: not handling ldm with s-bit set\n");
return TYPE_ERROR;
}
/*
* Convert Thumb ld/st instruction forms to equivalent ARM instructions so
* we can reuse ARM userland alignment fault fixups for Thumb.
*
* This implementation was initially based on the algorithm found in
* gdb/sim/arm/thumbemu.c. It is basically just a code reduction of same
* to convert only Thumb ld/st instruction forms to equivalent ARM forms.
*
* NOTES:
* 1. Comments below refer to ARM ARM DDI0100E Thumb Instruction sections.
* 2. If for some reason we're passed an non-ld/st Thumb instruction to
* decode, we return 0xdeadc0de. This should never happen under normal
* circumstances but if it does, we've got other problems to deal with
* elsewhere and we obviously can't fix those problems here.
*/
static unsigned long
thumb2arm(u16 tinstr)
{
u32 L = (tinstr & (1<<11)) >> 11;
switch ((tinstr & 0xf800) >> 11) {
/* 6.5.1 Format 1: */
case 0x6000 >> 11: /* 7.1.52 STR(1) */
case 0x6800 >> 11: /* 7.1.26 LDR(1) */
case 0x7000 >> 11: /* 7.1.55 STRB(1) */
case 0x7800 >> 11: /* 7.1.30 LDRB(1) */
return 0xe5800000 |
((tinstr & (1<<12)) << (22-12)) | /* fixup */
(L<<20) | /* L==1? */
((tinstr & (7<<0)) << (12-0)) | /* Rd */
((tinstr & (7<<3)) << (16-3)) | /* Rn */
((tinstr & (31<<6)) >> /* immed_5 */
(6 - ((tinstr & (1<<12)) ? 0 : 2)));
case 0x8000 >> 11: /* 7.1.57 STRH(1) */
case 0x8800 >> 11: /* 7.1.32 LDRH(1) */
return 0xe1c000b0 |
(L<<20) | /* L==1? */
((tinstr & (7<<0)) << (12-0)) | /* Rd */
((tinstr & (7<<3)) << (16-3)) | /* Rn */
((tinstr & (7<<6)) >> (6-1)) | /* immed_5[2:0] */
((tinstr & (3<<9)) >> (9-8)); /* immed_5[4:3] */
/* 6.5.1 Format 2: */
case 0x5000 >> 11:
case 0x5800 >> 11:
{
static const u32 subset[8] = {
0xe7800000, /* 7.1.53 STR(2) */
0xe18000b0, /* 7.1.58 STRH(2) */
0xe7c00000, /* 7.1.56 STRB(2) */
0xe19000d0, /* 7.1.34 LDRSB */
0xe7900000, /* 7.1.27 LDR(2) */
0xe19000b0, /* 7.1.33 LDRH(2) */
0xe7d00000, /* 7.1.31 LDRB(2) */
0xe19000f0 /* 7.1.35 LDRSH */
};
return subset[(tinstr & (7<<9)) >> 9] |
((tinstr & (7<<0)) << (12-0)) | /* Rd */
((tinstr & (7<<3)) << (16-3)) | /* Rn */
((tinstr & (7<<6)) >> (6-0)); /* Rm */
}
/* 6.5.1 Format 3: */
case 0x4800 >> 11: /* 7.1.28 LDR(3) */
/* NOTE: This case is not technically possible. We're
* loading 32-bit memory data via PC relative
* addressing mode. So we can and should eliminate
* this case. But I'll leave it here for now.
*/
return 0xe59f0000 |
((tinstr & (7<<8)) << (12-8)) | /* Rd */
((tinstr & 255) << (2-0)); /* immed_8 */
/* 6.5.1 Format 4: */
case 0x9000 >> 11: /* 7.1.54 STR(3) */
case 0x9800 >> 11: /* 7.1.29 LDR(4) */
return 0xe58d0000 |
(L<<20) | /* L==1? */
((tinstr & (7<<8)) << (12-8)) | /* Rd */
((tinstr & 255) << 2); /* immed_8 */
/* 6.6.1 Format 1: */
case 0xc000 >> 11: /* 7.1.51 STMIA */
case 0xc800 >> 11: /* 7.1.25 LDMIA */
{
u32 Rn = (tinstr & (7<<8)) >> 8;
u32 W = ((L<<Rn) & (tinstr&255)) ? 0 : 1<<21;
return 0xe8800000 | W | (L<<20) | (Rn<<16) |
(tinstr&255);
}
/* 6.6.1 Format 2: */
case 0xb000 >> 11: /* 7.1.48 PUSH */
case 0xb800 >> 11: /* 7.1.47 POP */
if ((tinstr & (3 << 9)) == 0x0400) {
static const u32 subset[4] = {
0xe92d0000, /* STMDB sp!,{registers} */
0xe92d4000, /* STMDB sp!,{registers,lr} */
0xe8bd0000, /* LDMIA sp!,{registers} */
0xe8bd8000 /* LDMIA sp!,{registers,pc} */
};
return subset[(L<<1) | ((tinstr & (1<<8)) >> 8)] |
(tinstr & 255); /* register_list */
}
/* Else fall through for illegal instruction case */
default:
return BAD_INSTR;
}
}
/*
* Convert Thumb-2 32 bit LDM, STM, LDRD, STRD to equivalent instruction
* handlable by ARM alignment handler, also find the corresponding handler,
* so that we can reuse ARM userland alignment fault fixups for Thumb.
*
* @pinstr: original Thumb-2 instruction; returns new handlable instruction
* @regs: register context.
* @poffset: return offset from faulted addr for later writeback
*
* NOTES:
* 1. Comments below refer to ARMv7 DDI0406A Thumb Instruction sections.
* 2. Register name Rt from ARMv7 is same as Rd from ARMv6 (Rd is Rt)
*/
static void *
do_alignment_t32_to_handler(unsigned long *pinstr, struct pt_regs *regs,
union offset_union *poffset)
{
unsigned long instr = *pinstr;
u16 tinst1 = (instr >> 16) & 0xffff;
u16 tinst2 = instr & 0xffff;
poffset->un = 0;
switch (tinst1 & 0xffe0) {
/* A6.3.5 Load/Store multiple */
case 0xe880: /* STM/STMIA/STMEA,LDM/LDMIA, PUSH/POP T2 */
case 0xe8a0: /* ...above writeback version */
case 0xe900: /* STMDB/STMFD, LDMDB/LDMEA */
case 0xe920: /* ...above writeback version */
/* no need offset decision since handler calculates it */
return do_alignment_ldmstm;
case 0xf840: /* POP/PUSH T3 (single register) */
if (RN_BITS(instr) == 13 && (tinst2 & 0x09ff) == 0x0904) {
u32 L = !!(LDST_L_BIT(instr));
const u32 subset[2] = {
0xe92d0000, /* STMDB sp!,{registers} */
0xe8bd0000, /* LDMIA sp!,{registers} */
};
*pinstr = subset[L] | (1<<RD_BITS(instr));
return do_alignment_ldmstm;
}
/* Else fall through for illegal instruction case */
break;
/* A6.3.6 Load/store double, STRD/LDRD(immed, lit, reg) */
case 0xe860:
case 0xe960:
case 0xe8e0:
case 0xe9e0:
poffset->un = (tinst2 & 0xff) << 2;
case 0xe940:
case 0xe9c0:
return do_alignment_ldrdstrd;
/*
* No need to handle load/store instructions up to word size
* since ARMv6 and later CPUs can perform unaligned accesses.
*/
default:
break;
}
return NULL;
}
static int
do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
{
union offset_union offset;
unsigned long instr = 0, instrptr;
int (*handler)(unsigned long addr, unsigned long instr, struct pt_regs *regs);
unsigned int type;
mm_segment_t fs;
unsigned int fault;
u16 tinstr = 0;
int isize = 4;
int thumb2_32b = 0;
instrptr = instruction_pointer(regs);
fs = get_fs();
set_fs(KERNEL_DS);
if (thumb_mode(regs)) {
fault = __get_user(tinstr, (u16 *)(instrptr & ~1));
if (!fault) {
if (cpu_architecture() >= CPU_ARCH_ARMv7 &&
IS_T32(tinstr)) {
/* Thumb-2 32-bit */
u16 tinst2 = 0;
fault = __get_user(tinst2, (u16 *)(instrptr+2));
instr = (tinstr << 16) | tinst2;
thumb2_32b = 1;
} else {
isize = 2;
instr = thumb2arm(tinstr);
}
}
} else
fault = __get_user(instr, (u32 *)instrptr);
set_fs(fs);
if (fault) {
type = TYPE_FAULT;
goto bad_or_fault;
}
if (user_mode(regs))
goto user;
ai_sys += 1;
fixup:
regs->ARM_pc += isize;
switch (CODING_BITS(instr)) {
case 0x00000000: /* 3.13.4 load/store instruction extensions */
if (LDSTHD_I_BIT(instr))
offset.un = (instr & 0xf00) >> 4 | (instr & 15);
else
offset.un = regs->uregs[RM_BITS(instr)];
if ((instr & 0x000000f0) == 0x000000b0 || /* LDRH, STRH */
(instr & 0x001000f0) == 0x001000f0) /* LDRSH */
handler = do_alignment_ldrhstrh;
else if ((instr & 0x001000f0) == 0x000000d0 || /* LDRD */
(instr & 0x001000f0) == 0x000000f0) /* STRD */
handler = do_alignment_ldrdstrd;
else if ((instr & 0x01f00ff0) == 0x01000090) /* SWP */
goto swp;
else
goto bad;
break;
case 0x04000000: /* ldr or str immediate */
offset.un = OFFSET_BITS(instr);
handler = do_alignment_ldrstr;
break;
case 0x06000000: /* ldr or str register */
offset.un = regs->uregs[RM_BITS(instr)];
if (IS_SHIFT(instr)) {
unsigned int shiftval = SHIFT_BITS(instr);
switch(SHIFT_TYPE(instr)) {
case SHIFT_LSL:
offset.un <<= shiftval;
break;
case SHIFT_LSR:
offset.un >>= shiftval;
break;
case SHIFT_ASR:
offset.sn >>= shiftval;
break;
case SHIFT_RORRRX:
if (shiftval == 0) {
offset.un >>= 1;
if (regs->ARM_cpsr & PSR_C_BIT)
offset.un |= 1 << 31;
} else
offset.un = offset.un >> shiftval |
offset.un << (32 - shiftval);
break;
}
}
handler = do_alignment_ldrstr;
break;
case 0x08000000: /* ldm or stm, or thumb-2 32bit instruction */
if (thumb2_32b)
handler = do_alignment_t32_to_handler(&instr, regs, &offset);
else
handler = do_alignment_ldmstm;
break;
default:
goto bad;
}
if (!handler)
goto bad;
type = handler(addr, instr, regs);
if (type == TYPE_ERROR || type == TYPE_FAULT) {
regs->ARM_pc -= isize;
goto bad_or_fault;
}
if (type == TYPE_LDST)
do_alignment_finish_ldst(addr, instr, regs, offset);
return 0;
bad_or_fault:
if (type == TYPE_ERROR)
goto bad;
/*
* We got a fault - fix it up, or die.
*/
do_bad_area(addr, fsr, regs);
return 0;
swp:
printk(KERN_ERR "Alignment trap: not handling swp instruction\n");
bad:
/*
* Oops, we didn't handle the instruction.
*/
printk(KERN_ERR "Alignment trap: not handling instruction "
"%0*lx at [<%08lx>]\n",
isize << 1,
isize == 2 ? tinstr : instr, instrptr);
ai_skipped += 1;
return 1;
user:
ai_user += 1;
if (ai_usermode & UM_WARN)
printk("Alignment trap: %s (%d) PC=0x%08lx Instr=0x%0*lx "
"Address=0x%08lx FSR 0x%03x\n", current->comm,
task_pid_nr(current), instrptr,
isize << 1,
isize == 2 ? tinstr : instr,
addr, fsr);
if (ai_usermode & UM_FIXUP)
goto fixup;
if (ai_usermode & UM_SIGNAL)
force_sig(SIGBUS, current);
else {
/*
* We're about to disable the alignment trap and return to
* user space. But if an interrupt occurs before actually
* reaching user space, then the IRQ vector entry code will
* notice that we were still in kernel space and therefore
* the alignment trap won't be re-enabled in that case as it
* is presumed to be always on from kernel space.
* Let's prevent that race by disabling interrupts here (they
* are disabled on the way back to user space anyway in
* entry-common.S) and disable the alignment trap only if
* there is no work pending for this thread.
*/
raw_local_irq_disable();
if (!(current_thread_info()->flags & _TIF_WORK_MASK))
set_cr(cr_no_alignment);
}
return 0;
}
/*
* This needs to be done after sysctl_init, otherwise sys/ will be
* overwritten. Actually, this shouldn't be in sys/ at all since
* it isn't a sysctl, and it doesn't contain sysctl information.
* We now locate it in /proc/cpu/alignment instead.
*/
static int __init alignment_init(void)
{
#ifdef CONFIG_PROC_FS
struct proc_dir_entry *res;
res = proc_create("cpu/alignment", S_IWUSR | S_IRUGO, NULL,
&alignment_proc_fops);
if (!res)
return -ENOMEM;
#endif
/*
* ARMv6 and later CPUs can perform unaligned accesses for
* most single load and store instructions up to word size.
* LDM, STM, LDRD and STRD still need to be handled.
*
* Ignoring the alignment fault is not an option on these
* CPUs since we spin re-faulting the instruction without
* making any progress.
*/
if (cpu_architecture() >= CPU_ARCH_ARMv6 && (cr_alignment & CR_U)) {
cr_alignment &= ~CR_A;
cr_no_alignment &= ~CR_A;
set_cr(cr_alignment);
ai_usermode = UM_FIXUP;
}
hook_fault_code(1, do_alignment, SIGBUS, BUS_ADRALN,
"alignment exception");
/*
* ARMv6K and ARMv7 use fault status 3 (0b00011) as Access Flag section
* fault, not as alignment error.
*
* TODO: handle ARMv6K properly. Runtime check for 'K' extension is
* needed.
*/
if (cpu_architecture() <= CPU_ARCH_ARMv6) {
hook_fault_code(3, do_alignment, SIGBUS, BUS_ADRALN,
"alignment exception");
}
return 0;
}
fs_initcall(alignment_init);
| gpl-2.0 |
dhiru1602/android_kernel_samsung_jf | drivers/base/power/sysfs.c | 1746 | 16611 | /*
* drivers/base/power/sysfs.c - sysfs entries for device PM
*/
#include <linux/device.h>
#include <linux/string.h>
#include <linux/export.h>
#include <linux/pm_qos.h>
#include <linux/pm_runtime.h>
#include <linux/atomic.h>
#include <linux/jiffies.h>
#include "power.h"
/*
* control - Report/change current runtime PM setting of the device
*
* Runtime power management of a device can be blocked with the help of
* this attribute. All devices have one of the following two values for
* the power/control file:
*
* + "auto\n" to allow the device to be power managed at run time;
* + "on\n" to prevent the device from being power managed at run time;
*
* The default for all devices is "auto", which means that devices may be
* subject to automatic power management, depending on their drivers.
* Changing this attribute to "on" prevents the driver from power managing
* the device at run time. Doing that while the device is suspended causes
* it to be woken up.
*
* wakeup - Report/change current wakeup option for device
*
* Some devices support "wakeup" events, which are hardware signals
* used to activate devices from suspended or low power states. Such
* devices have one of three values for the sysfs power/wakeup file:
*
* + "enabled\n" to issue the events;
* + "disabled\n" not to do so; or
* + "\n" for temporary or permanent inability to issue wakeup.
*
* (For example, unconfigured USB devices can't issue wakeups.)
*
* Familiar examples of devices that can issue wakeup events include
* keyboards and mice (both PS2 and USB styles), power buttons, modems,
* "Wake-On-LAN" Ethernet links, GPIO lines, and more. Some events
* will wake the entire system from a suspend state; others may just
* wake up the device (if the system as a whole is already active).
* Some wakeup events use normal IRQ lines; other use special out
* of band signaling.
*
* It is the responsibility of device drivers to enable (or disable)
* wakeup signaling as part of changing device power states, respecting
* the policy choices provided through the driver model.
*
* Devices may not be able to generate wakeup events from all power
* states. Also, the events may be ignored in some configurations;
* for example, they might need help from other devices that aren't
* active, or which may have wakeup disabled. Some drivers rely on
* wakeup events internally (unless they are disabled), keeping
* their hardware in low power modes whenever they're unused. This
* saves runtime power, without requiring system-wide sleep states.
*
* async - Report/change current async suspend setting for the device
*
* Asynchronous suspend and resume of the device during system-wide power
* state transitions can be enabled by writing "enabled" to this file.
* Analogously, if "disabled" is written to this file, the device will be
* suspended and resumed synchronously.
*
* All devices have one of the following two values for power/async:
*
* + "enabled\n" to permit the asynchronous suspend/resume of the device;
* + "disabled\n" to forbid it;
*
* NOTE: It generally is unsafe to permit the asynchronous suspend/resume
* of a device unless it is certain that all of the PM dependencies of the
* device are known to the PM core. However, for some devices this
* attribute is set to "enabled" by bus type code or device drivers and in
* that cases it should be safe to leave the default value.
*
* autosuspend_delay_ms - Report/change a device's autosuspend_delay value
*
* Some drivers don't want to carry out a runtime suspend as soon as a
* device becomes idle; they want it always to remain idle for some period
* of time before suspending it. This period is the autosuspend_delay
* value (expressed in milliseconds) and it can be controlled by the user.
* If the value is negative then the device will never be runtime
* suspended.
*
* NOTE: The autosuspend_delay_ms attribute and the autosuspend_delay
* value are used only if the driver calls pm_runtime_use_autosuspend().
*
* wakeup_count - Report the number of wakeup events related to the device
*/
static const char enabled[] = "enabled";
static const char disabled[] = "disabled";
const char power_group_name[] = "power";
EXPORT_SYMBOL_GPL(power_group_name);
#ifdef CONFIG_PM_RUNTIME
static const char ctrl_auto[] = "auto";
static const char ctrl_on[] = "on";
static ssize_t control_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
return sprintf(buf, "%s\n",
dev->power.runtime_auto ? ctrl_auto : ctrl_on);
}
static ssize_t control_store(struct device * dev, struct device_attribute *attr,
const char * buf, size_t n)
{
char *cp;
int len = n;
cp = memchr(buf, '\n', n);
if (cp)
len = cp - buf;
device_lock(dev);
if (len == sizeof ctrl_auto - 1 && strncmp(buf, ctrl_auto, len) == 0)
pm_runtime_allow(dev);
else if (len == sizeof ctrl_on - 1 && strncmp(buf, ctrl_on, len) == 0)
pm_runtime_forbid(dev);
else
n = -EINVAL;
device_unlock(dev);
return n;
}
static DEVICE_ATTR(control, 0644, control_show, control_store);
static ssize_t rtpm_active_time_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
int ret;
spin_lock_irq(&dev->power.lock);
update_pm_runtime_accounting(dev);
ret = sprintf(buf, "%i\n", jiffies_to_msecs(dev->power.active_jiffies));
spin_unlock_irq(&dev->power.lock);
return ret;
}
static DEVICE_ATTR(runtime_active_time, 0444, rtpm_active_time_show, NULL);
static ssize_t rtpm_suspended_time_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
int ret;
spin_lock_irq(&dev->power.lock);
update_pm_runtime_accounting(dev);
ret = sprintf(buf, "%i\n",
jiffies_to_msecs(dev->power.suspended_jiffies));
spin_unlock_irq(&dev->power.lock);
return ret;
}
static DEVICE_ATTR(runtime_suspended_time, 0444, rtpm_suspended_time_show, NULL);
static ssize_t rtpm_status_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
const char *p;
if (dev->power.runtime_error) {
p = "error\n";
} else if (dev->power.disable_depth) {
p = "unsupported\n";
} else {
switch (dev->power.runtime_status) {
case RPM_SUSPENDED:
p = "suspended\n";
break;
case RPM_SUSPENDING:
p = "suspending\n";
break;
case RPM_RESUMING:
p = "resuming\n";
break;
case RPM_ACTIVE:
p = "active\n";
break;
default:
return -EIO;
}
}
return sprintf(buf, p);
}
static DEVICE_ATTR(runtime_status, 0444, rtpm_status_show, NULL);
static ssize_t autosuspend_delay_ms_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
if (!dev->power.use_autosuspend)
return -EIO;
return sprintf(buf, "%d\n", dev->power.autosuspend_delay);
}
static ssize_t autosuspend_delay_ms_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t n)
{
long delay;
if (!dev->power.use_autosuspend)
return -EIO;
if (strict_strtol(buf, 10, &delay) != 0 || delay != (int) delay)
return -EINVAL;
device_lock(dev);
pm_runtime_set_autosuspend_delay(dev, delay);
device_unlock(dev);
return n;
}
static DEVICE_ATTR(autosuspend_delay_ms, 0644, autosuspend_delay_ms_show,
autosuspend_delay_ms_store);
static ssize_t pm_qos_latency_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%d\n", dev->power.pq_req->node.prio);
}
static ssize_t pm_qos_latency_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t n)
{
s32 value;
int ret;
if (kstrtos32(buf, 0, &value))
return -EINVAL;
if (value < 0)
return -EINVAL;
ret = dev_pm_qos_update_request(dev->power.pq_req, value);
return ret < 0 ? ret : n;
}
static DEVICE_ATTR(pm_qos_resume_latency_us, 0644,
pm_qos_latency_show, pm_qos_latency_store);
#endif /* CONFIG_PM_RUNTIME */
#ifdef CONFIG_PM_SLEEP
static ssize_t
wake_show(struct device * dev, struct device_attribute *attr, char * buf)
{
return sprintf(buf, "%s\n", device_can_wakeup(dev)
? (device_may_wakeup(dev) ? enabled : disabled)
: "");
}
static ssize_t
wake_store(struct device * dev, struct device_attribute *attr,
const char * buf, size_t n)
{
char *cp;
int len = n;
if (!device_can_wakeup(dev))
return -EINVAL;
cp = memchr(buf, '\n', n);
if (cp)
len = cp - buf;
if (len == sizeof enabled - 1
&& strncmp(buf, enabled, sizeof enabled - 1) == 0)
device_set_wakeup_enable(dev, 1);
else if (len == sizeof disabled - 1
&& strncmp(buf, disabled, sizeof disabled - 1) == 0)
device_set_wakeup_enable(dev, 0);
else
return -EINVAL;
return n;
}
static DEVICE_ATTR(wakeup, 0644, wake_show, wake_store);
static ssize_t wakeup_count_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
unsigned long count = 0;
bool enabled = false;
spin_lock_irq(&dev->power.lock);
if (dev->power.wakeup) {
count = dev->power.wakeup->event_count;
enabled = true;
}
spin_unlock_irq(&dev->power.lock);
return enabled ? sprintf(buf, "%lu\n", count) : sprintf(buf, "\n");
}
static DEVICE_ATTR(wakeup_count, 0444, wakeup_count_show, NULL);
static ssize_t wakeup_active_count_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
unsigned long count = 0;
bool enabled = false;
spin_lock_irq(&dev->power.lock);
if (dev->power.wakeup) {
count = dev->power.wakeup->active_count;
enabled = true;
}
spin_unlock_irq(&dev->power.lock);
return enabled ? sprintf(buf, "%lu\n", count) : sprintf(buf, "\n");
}
static DEVICE_ATTR(wakeup_active_count, 0444, wakeup_active_count_show, NULL);
static ssize_t wakeup_hit_count_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
unsigned long count = 0;
bool enabled = false;
spin_lock_irq(&dev->power.lock);
if (dev->power.wakeup) {
count = dev->power.wakeup->hit_count;
enabled = true;
}
spin_unlock_irq(&dev->power.lock);
return enabled ? sprintf(buf, "%lu\n", count) : sprintf(buf, "\n");
}
static DEVICE_ATTR(wakeup_hit_count, 0444, wakeup_hit_count_show, NULL);
static ssize_t wakeup_active_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
unsigned int active = 0;
bool enabled = false;
spin_lock_irq(&dev->power.lock);
if (dev->power.wakeup) {
active = dev->power.wakeup->active;
enabled = true;
}
spin_unlock_irq(&dev->power.lock);
return enabled ? sprintf(buf, "%u\n", active) : sprintf(buf, "\n");
}
static DEVICE_ATTR(wakeup_active, 0444, wakeup_active_show, NULL);
static ssize_t wakeup_total_time_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
s64 msec = 0;
bool enabled = false;
spin_lock_irq(&dev->power.lock);
if (dev->power.wakeup) {
msec = ktime_to_ms(dev->power.wakeup->total_time);
enabled = true;
}
spin_unlock_irq(&dev->power.lock);
return enabled ? sprintf(buf, "%lld\n", msec) : sprintf(buf, "\n");
}
static DEVICE_ATTR(wakeup_total_time_ms, 0444, wakeup_total_time_show, NULL);
static ssize_t wakeup_max_time_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
s64 msec = 0;
bool enabled = false;
spin_lock_irq(&dev->power.lock);
if (dev->power.wakeup) {
msec = ktime_to_ms(dev->power.wakeup->max_time);
enabled = true;
}
spin_unlock_irq(&dev->power.lock);
return enabled ? sprintf(buf, "%lld\n", msec) : sprintf(buf, "\n");
}
static DEVICE_ATTR(wakeup_max_time_ms, 0444, wakeup_max_time_show, NULL);
static ssize_t wakeup_last_time_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
s64 msec = 0;
bool enabled = false;
spin_lock_irq(&dev->power.lock);
if (dev->power.wakeup) {
msec = ktime_to_ms(dev->power.wakeup->last_time);
enabled = true;
}
spin_unlock_irq(&dev->power.lock);
return enabled ? sprintf(buf, "%lld\n", msec) : sprintf(buf, "\n");
}
static DEVICE_ATTR(wakeup_last_time_ms, 0444, wakeup_last_time_show, NULL);
#endif /* CONFIG_PM_SLEEP */
#ifdef CONFIG_PM_ADVANCED_DEBUG
#ifdef CONFIG_PM_RUNTIME
static ssize_t rtpm_usagecount_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%d\n", atomic_read(&dev->power.usage_count));
}
static ssize_t rtpm_children_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%d\n", dev->power.ignore_children ?
0 : atomic_read(&dev->power.child_count));
}
static ssize_t rtpm_enabled_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
if ((dev->power.disable_depth) && (dev->power.runtime_auto == false))
return sprintf(buf, "disabled & forbidden\n");
else if (dev->power.disable_depth)
return sprintf(buf, "disabled\n");
else if (dev->power.runtime_auto == false)
return sprintf(buf, "forbidden\n");
return sprintf(buf, "enabled\n");
}
static DEVICE_ATTR(runtime_usage, 0444, rtpm_usagecount_show, NULL);
static DEVICE_ATTR(runtime_active_kids, 0444, rtpm_children_show, NULL);
static DEVICE_ATTR(runtime_enabled, 0444, rtpm_enabled_show, NULL);
#endif
static ssize_t async_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
return sprintf(buf, "%s\n",
device_async_suspend_enabled(dev) ? enabled : disabled);
}
static ssize_t async_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t n)
{
char *cp;
int len = n;
cp = memchr(buf, '\n', n);
if (cp)
len = cp - buf;
if (len == sizeof enabled - 1 && strncmp(buf, enabled, len) == 0)
device_enable_async_suspend(dev);
else if (len == sizeof disabled - 1 && strncmp(buf, disabled, len) == 0)
device_disable_async_suspend(dev);
else
return -EINVAL;
return n;
}
static DEVICE_ATTR(async, 0644, async_show, async_store);
#endif /* CONFIG_PM_ADVANCED_DEBUG */
static struct attribute *power_attrs[] = {
#ifdef CONFIG_PM_ADVANCED_DEBUG
#ifdef CONFIG_PM_SLEEP
&dev_attr_async.attr,
#endif
#ifdef CONFIG_PM_RUNTIME
&dev_attr_runtime_status.attr,
&dev_attr_runtime_usage.attr,
&dev_attr_runtime_active_kids.attr,
&dev_attr_runtime_enabled.attr,
#endif
#endif /* CONFIG_PM_ADVANCED_DEBUG */
NULL,
};
static struct attribute_group pm_attr_group = {
.name = power_group_name,
.attrs = power_attrs,
};
static struct attribute *wakeup_attrs[] = {
#ifdef CONFIG_PM_SLEEP
&dev_attr_wakeup.attr,
&dev_attr_wakeup_count.attr,
&dev_attr_wakeup_active_count.attr,
&dev_attr_wakeup_hit_count.attr,
&dev_attr_wakeup_active.attr,
&dev_attr_wakeup_total_time_ms.attr,
&dev_attr_wakeup_max_time_ms.attr,
&dev_attr_wakeup_last_time_ms.attr,
#endif
NULL,
};
static struct attribute_group pm_wakeup_attr_group = {
.name = power_group_name,
.attrs = wakeup_attrs,
};
static struct attribute *runtime_attrs[] = {
#ifdef CONFIG_PM_RUNTIME
#ifndef CONFIG_PM_ADVANCED_DEBUG
&dev_attr_runtime_status.attr,
#endif
&dev_attr_control.attr,
&dev_attr_runtime_suspended_time.attr,
&dev_attr_runtime_active_time.attr,
&dev_attr_autosuspend_delay_ms.attr,
#endif /* CONFIG_PM_RUNTIME */
NULL,
};
static struct attribute_group pm_runtime_attr_group = {
.name = power_group_name,
.attrs = runtime_attrs,
};
static struct attribute *pm_qos_attrs[] = {
#ifdef CONFIG_PM_RUNTIME
&dev_attr_pm_qos_resume_latency_us.attr,
#endif /* CONFIG_PM_RUNTIME */
NULL,
};
static struct attribute_group pm_qos_attr_group = {
.name = power_group_name,
.attrs = pm_qos_attrs,
};
int dpm_sysfs_add(struct device *dev)
{
int rc;
rc = sysfs_create_group(&dev->kobj, &pm_attr_group);
if (rc)
return rc;
if (pm_runtime_callbacks_present(dev)) {
rc = sysfs_merge_group(&dev->kobj, &pm_runtime_attr_group);
if (rc)
goto err_out;
}
if (device_can_wakeup(dev)) {
rc = sysfs_merge_group(&dev->kobj, &pm_wakeup_attr_group);
if (rc) {
if (pm_runtime_callbacks_present(dev))
sysfs_unmerge_group(&dev->kobj,
&pm_runtime_attr_group);
goto err_out;
}
}
return 0;
err_out:
sysfs_remove_group(&dev->kobj, &pm_attr_group);
return rc;
}
int wakeup_sysfs_add(struct device *dev)
{
return sysfs_merge_group(&dev->kobj, &pm_wakeup_attr_group);
}
void wakeup_sysfs_remove(struct device *dev)
{
sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group);
}
int pm_qos_sysfs_add(struct device *dev)
{
return sysfs_merge_group(&dev->kobj, &pm_qos_attr_group);
}
void pm_qos_sysfs_remove(struct device *dev)
{
sysfs_unmerge_group(&dev->kobj, &pm_qos_attr_group);
}
void rpm_sysfs_remove(struct device *dev)
{
sysfs_unmerge_group(&dev->kobj, &pm_runtime_attr_group);
}
void dpm_sysfs_remove(struct device *dev)
{
rpm_sysfs_remove(dev);
sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group);
sysfs_remove_group(&dev->kobj, &pm_attr_group);
}
| gpl-2.0 |
zjh3123629/linux-3.10.28 | arch/arm/mach-imx/clk-imx51-imx53.c | 2002 | 32150 | /*
* Copyright (C) 2011 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/
#include <linux/mm.h>
#include <linux/delay.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/clkdev.h>
#include <linux/of.h>
#include <linux/err.h>
#include "crm-regs-imx5.h"
#include "clk.h"
#include "common.h"
#include "hardware.h"
/* Low-power Audio Playback Mode clock */
static const char *lp_apm_sel[] = { "osc", };
/* This is used multiple times */
static const char *standard_pll_sel[] = { "pll1_sw", "pll2_sw", "pll3_sw", "lp_apm", };
static const char *periph_apm_sel[] = { "pll1_sw", "pll3_sw", "lp_apm", };
static const char *main_bus_sel[] = { "pll2_sw", "periph_apm", };
static const char *per_lp_apm_sel[] = { "main_bus", "lp_apm", };
static const char *per_root_sel[] = { "per_podf", "ipg", };
static const char *esdhc_c_sel[] = { "esdhc_a_podf", "esdhc_b_podf", };
static const char *esdhc_d_sel[] = { "esdhc_a_podf", "esdhc_b_podf", };
static const char *ssi_apm_sels[] = { "ckih1", "lp_amp", "ckih2", };
static const char *ssi_clk_sels[] = { "pll1_sw", "pll2_sw", "pll3_sw", "ssi_apm", };
static const char *ssi3_clk_sels[] = { "ssi1_root_gate", "ssi2_root_gate", };
static const char *ssi_ext1_com_sels[] = { "ssi_ext1_podf", "ssi1_root_gate", };
static const char *ssi_ext2_com_sels[] = { "ssi_ext2_podf", "ssi2_root_gate", };
static const char *emi_slow_sel[] = { "main_bus", "ahb", };
static const char *usb_phy_sel_str[] = { "osc", "usb_phy_podf", };
static const char *mx51_ipu_di0_sel[] = { "di_pred", "osc", "ckih1", "tve_di", };
static const char *mx53_ipu_di0_sel[] = { "di_pred", "osc", "ckih1", "di_pll4_podf", "dummy", "ldb_di0_gate", };
static const char *mx53_ldb_di0_sel[] = { "pll3_sw", "pll4_sw", };
static const char *mx51_ipu_di1_sel[] = { "di_pred", "osc", "ckih1", "tve_di", "ipp_di1", };
static const char *mx53_ipu_di1_sel[] = { "di_pred", "osc", "ckih1", "tve_di", "ipp_di1", "ldb_di1_gate", };
static const char *mx53_ldb_di1_sel[] = { "pll3_sw", "pll4_sw", };
static const char *mx51_tve_ext_sel[] = { "osc", "ckih1", };
static const char *mx53_tve_ext_sel[] = { "pll4_sw", "ckih1", };
static const char *mx51_tve_sel[] = { "tve_pred", "tve_ext_sel", };
static const char *ipu_sel[] = { "axi_a", "axi_b", "emi_slow_gate", "ahb", };
static const char *gpu3d_sel[] = { "axi_a", "axi_b", "emi_slow_gate", "ahb" };
static const char *gpu2d_sel[] = { "axi_a", "axi_b", "emi_slow_gate", "ahb" };
static const char *vpu_sel[] = { "axi_a", "axi_b", "emi_slow_gate", "ahb", };
static const char *mx53_can_sel[] = { "ipg", "ckih1", "ckih2", "lp_apm", };
static const char *mx53_cko1_sel[] = {
"cpu_podf", "pll1_sw", "pll2_sw", "pll3_sw",
"emi_slow_podf", "pll4_sw", "nfc_podf", "dummy",
"di_pred", "dummy", "dummy", "ahb",
"ipg", "per_root", "ckil", "dummy",};
static const char *mx53_cko2_sel[] = {
"dummy"/* dptc_core */, "dummy"/* dptc_perich */,
"dummy", "esdhc_a_podf",
"usboh3_podf", "dummy"/* wrck_clk_root */,
"ecspi_podf", "dummy"/* pll1_ref_clk */,
"esdhc_b_podf", "dummy"/* ddr_clk_root */,
"dummy"/* arm_axi_clk_root */, "dummy"/* usb_phy_out */,
"vpu_sel", "ipu_sel",
"osc", "ckih1",
"dummy", "esdhc_c_sel",
"ssi1_root_podf", "ssi2_root_podf",
"dummy", "dummy",
"dummy"/* lpsr_clk_root */, "dummy"/* pgc_clk_root */,
"dummy"/* tve_out */, "usb_phy_sel",
"tve_sel", "lp_apm",
"uart_root", "dummy"/* spdif0_clk_root */,
"dummy", "dummy", };
enum imx5_clks {
dummy, ckil, osc, ckih1, ckih2, ahb, ipg, axi_a, axi_b, uart_pred,
uart_root, esdhc_a_pred, esdhc_b_pred, esdhc_c_s, esdhc_d_s,
emi_sel, emi_slow_podf, nfc_podf, ecspi_pred, ecspi_podf, usboh3_pred,
usboh3_podf, usb_phy_pred, usb_phy_podf, cpu_podf, di_pred, tve_di_unused,
tve_s, uart1_ipg_gate, uart1_per_gate, uart2_ipg_gate,
uart2_per_gate, uart3_ipg_gate, uart3_per_gate, i2c1_gate, i2c2_gate,
gpt_ipg_gate, pwm1_ipg_gate, pwm1_hf_gate, pwm2_ipg_gate, pwm2_hf_gate,
gpt_hf_gate, fec_gate, usboh3_per_gate, esdhc1_ipg_gate, esdhc2_ipg_gate,
esdhc3_ipg_gate, esdhc4_ipg_gate, ssi1_ipg_gate, ssi2_ipg_gate,
ssi3_ipg_gate, ecspi1_ipg_gate, ecspi1_per_gate, ecspi2_ipg_gate,
ecspi2_per_gate, cspi_ipg_gate, sdma_gate, emi_slow_gate, ipu_s,
ipu_gate, nfc_gate, ipu_di1_gate, vpu_s, vpu_gate,
vpu_reference_gate, uart4_ipg_gate, uart4_per_gate, uart5_ipg_gate,
uart5_per_gate, tve_gate, tve_pred, esdhc1_per_gate, esdhc2_per_gate,
esdhc3_per_gate, esdhc4_per_gate, usb_phy_gate, hsi2c_gate,
mipi_hsc1_gate, mipi_hsc2_gate, mipi_esc_gate, mipi_hsp_gate,
ldb_di1_div_3_5, ldb_di1_div, ldb_di0_div_3_5, ldb_di0_div,
ldb_di1_gate, can2_serial_gate, can2_ipg_gate, i2c3_gate, lp_apm,
periph_apm, main_bus, ahb_max, aips_tz1, aips_tz2, tmax1, tmax2,
tmax3, spba, uart_sel, esdhc_a_sel, esdhc_b_sel, esdhc_a_podf,
esdhc_b_podf, ecspi_sel, usboh3_sel, usb_phy_sel, iim_gate,
usboh3_gate, emi_fast_gate, ipu_di0_gate,gpc_dvfs, pll1_sw, pll2_sw,
pll3_sw, ipu_di0_sel, ipu_di1_sel, tve_ext_sel, mx51_mipi, pll4_sw,
ldb_di1_sel, di_pll4_podf, ldb_di0_sel, ldb_di0_gate, usb_phy1_gate,
usb_phy2_gate, per_lp_apm, per_pred1, per_pred2, per_podf, per_root,
ssi_apm, ssi1_root_sel, ssi2_root_sel, ssi3_root_sel, ssi_ext1_sel,
ssi_ext2_sel, ssi_ext1_com_sel, ssi_ext2_com_sel, ssi1_root_pred,
ssi1_root_podf, ssi2_root_pred, ssi2_root_podf, ssi_ext1_pred,
ssi_ext1_podf, ssi_ext2_pred, ssi_ext2_podf, ssi1_root_gate,
ssi2_root_gate, ssi3_root_gate, ssi_ext1_gate, ssi_ext2_gate,
epit1_ipg_gate, epit1_hf_gate, epit2_ipg_gate, epit2_hf_gate,
can_sel, can1_serial_gate, can1_ipg_gate,
owire_gate, gpu3d_s, gpu2d_s, gpu3d_gate, gpu2d_gate, garb_gate,
cko1_sel, cko1_podf, cko1,
cko2_sel, cko2_podf, cko2,
srtc_gate, pata_gate,
clk_max
};
static struct clk *clk[clk_max];
static struct clk_onecell_data clk_data;
static void __init mx5_clocks_common_init(unsigned long rate_ckil,
unsigned long rate_osc, unsigned long rate_ckih1,
unsigned long rate_ckih2)
{
int i;
clk[dummy] = imx_clk_fixed("dummy", 0);
clk[ckil] = imx_clk_fixed("ckil", rate_ckil);
clk[osc] = imx_clk_fixed("osc", rate_osc);
clk[ckih1] = imx_clk_fixed("ckih1", rate_ckih1);
clk[ckih2] = imx_clk_fixed("ckih2", rate_ckih2);
clk[lp_apm] = imx_clk_mux("lp_apm", MXC_CCM_CCSR, 9, 1,
lp_apm_sel, ARRAY_SIZE(lp_apm_sel));
clk[periph_apm] = imx_clk_mux("periph_apm", MXC_CCM_CBCMR, 12, 2,
periph_apm_sel, ARRAY_SIZE(periph_apm_sel));
clk[main_bus] = imx_clk_mux("main_bus", MXC_CCM_CBCDR, 25, 1,
main_bus_sel, ARRAY_SIZE(main_bus_sel));
clk[per_lp_apm] = imx_clk_mux("per_lp_apm", MXC_CCM_CBCMR, 1, 1,
per_lp_apm_sel, ARRAY_SIZE(per_lp_apm_sel));
clk[per_pred1] = imx_clk_divider("per_pred1", "per_lp_apm", MXC_CCM_CBCDR, 6, 2);
clk[per_pred2] = imx_clk_divider("per_pred2", "per_pred1", MXC_CCM_CBCDR, 3, 3);
clk[per_podf] = imx_clk_divider("per_podf", "per_pred2", MXC_CCM_CBCDR, 0, 3);
clk[per_root] = imx_clk_mux("per_root", MXC_CCM_CBCMR, 0, 1,
per_root_sel, ARRAY_SIZE(per_root_sel));
clk[ahb] = imx_clk_divider("ahb", "main_bus", MXC_CCM_CBCDR, 10, 3);
clk[ahb_max] = imx_clk_gate2("ahb_max", "ahb", MXC_CCM_CCGR0, 28);
clk[aips_tz1] = imx_clk_gate2("aips_tz1", "ahb", MXC_CCM_CCGR0, 24);
clk[aips_tz2] = imx_clk_gate2("aips_tz2", "ahb", MXC_CCM_CCGR0, 26);
clk[tmax1] = imx_clk_gate2("tmax1", "ahb", MXC_CCM_CCGR1, 0);
clk[tmax2] = imx_clk_gate2("tmax2", "ahb", MXC_CCM_CCGR1, 2);
clk[tmax3] = imx_clk_gate2("tmax3", "ahb", MXC_CCM_CCGR1, 4);
clk[spba] = imx_clk_gate2("spba", "ipg", MXC_CCM_CCGR5, 0);
clk[ipg] = imx_clk_divider("ipg", "ahb", MXC_CCM_CBCDR, 8, 2);
clk[axi_a] = imx_clk_divider("axi_a", "main_bus", MXC_CCM_CBCDR, 16, 3);
clk[axi_b] = imx_clk_divider("axi_b", "main_bus", MXC_CCM_CBCDR, 19, 3);
clk[uart_sel] = imx_clk_mux("uart_sel", MXC_CCM_CSCMR1, 24, 2,
standard_pll_sel, ARRAY_SIZE(standard_pll_sel));
clk[uart_pred] = imx_clk_divider("uart_pred", "uart_sel", MXC_CCM_CSCDR1, 3, 3);
clk[uart_root] = imx_clk_divider("uart_root", "uart_pred", MXC_CCM_CSCDR1, 0, 3);
clk[esdhc_a_sel] = imx_clk_mux("esdhc_a_sel", MXC_CCM_CSCMR1, 20, 2,
standard_pll_sel, ARRAY_SIZE(standard_pll_sel));
clk[esdhc_b_sel] = imx_clk_mux("esdhc_b_sel", MXC_CCM_CSCMR1, 16, 2,
standard_pll_sel, ARRAY_SIZE(standard_pll_sel));
clk[esdhc_a_pred] = imx_clk_divider("esdhc_a_pred", "esdhc_a_sel", MXC_CCM_CSCDR1, 16, 3);
clk[esdhc_a_podf] = imx_clk_divider("esdhc_a_podf", "esdhc_a_pred", MXC_CCM_CSCDR1, 11, 3);
clk[esdhc_b_pred] = imx_clk_divider("esdhc_b_pred", "esdhc_b_sel", MXC_CCM_CSCDR1, 22, 3);
clk[esdhc_b_podf] = imx_clk_divider("esdhc_b_podf", "esdhc_b_pred", MXC_CCM_CSCDR1, 19, 3);
clk[esdhc_c_s] = imx_clk_mux("esdhc_c_sel", MXC_CCM_CSCMR1, 19, 1, esdhc_c_sel, ARRAY_SIZE(esdhc_c_sel));
clk[esdhc_d_s] = imx_clk_mux("esdhc_d_sel", MXC_CCM_CSCMR1, 18, 1, esdhc_d_sel, ARRAY_SIZE(esdhc_d_sel));
clk[emi_sel] = imx_clk_mux("emi_sel", MXC_CCM_CBCDR, 26, 1,
emi_slow_sel, ARRAY_SIZE(emi_slow_sel));
clk[emi_slow_podf] = imx_clk_divider("emi_slow_podf", "emi_sel", MXC_CCM_CBCDR, 22, 3);
clk[nfc_podf] = imx_clk_divider("nfc_podf", "emi_slow_podf", MXC_CCM_CBCDR, 13, 3);
clk[ecspi_sel] = imx_clk_mux("ecspi_sel", MXC_CCM_CSCMR1, 4, 2,
standard_pll_sel, ARRAY_SIZE(standard_pll_sel));
clk[ecspi_pred] = imx_clk_divider("ecspi_pred", "ecspi_sel", MXC_CCM_CSCDR2, 25, 3);
clk[ecspi_podf] = imx_clk_divider("ecspi_podf", "ecspi_pred", MXC_CCM_CSCDR2, 19, 6);
clk[usboh3_sel] = imx_clk_mux("usboh3_sel", MXC_CCM_CSCMR1, 22, 2,
standard_pll_sel, ARRAY_SIZE(standard_pll_sel));
clk[usboh3_pred] = imx_clk_divider("usboh3_pred", "usboh3_sel", MXC_CCM_CSCDR1, 8, 3);
clk[usboh3_podf] = imx_clk_divider("usboh3_podf", "usboh3_pred", MXC_CCM_CSCDR1, 6, 2);
clk[usb_phy_pred] = imx_clk_divider("usb_phy_pred", "pll3_sw", MXC_CCM_CDCDR, 3, 3);
clk[usb_phy_podf] = imx_clk_divider("usb_phy_podf", "usb_phy_pred", MXC_CCM_CDCDR, 0, 3);
clk[usb_phy_sel] = imx_clk_mux("usb_phy_sel", MXC_CCM_CSCMR1, 26, 1,
usb_phy_sel_str, ARRAY_SIZE(usb_phy_sel_str));
clk[cpu_podf] = imx_clk_divider("cpu_podf", "pll1_sw", MXC_CCM_CACRR, 0, 3);
clk[di_pred] = imx_clk_divider("di_pred", "pll3_sw", MXC_CCM_CDCDR, 6, 3);
clk[iim_gate] = imx_clk_gate2("iim_gate", "ipg", MXC_CCM_CCGR0, 30);
clk[uart1_ipg_gate] = imx_clk_gate2("uart1_ipg_gate", "ipg", MXC_CCM_CCGR1, 6);
clk[uart1_per_gate] = imx_clk_gate2("uart1_per_gate", "uart_root", MXC_CCM_CCGR1, 8);
clk[uart2_ipg_gate] = imx_clk_gate2("uart2_ipg_gate", "ipg", MXC_CCM_CCGR1, 10);
clk[uart2_per_gate] = imx_clk_gate2("uart2_per_gate", "uart_root", MXC_CCM_CCGR1, 12);
clk[uart3_ipg_gate] = imx_clk_gate2("uart3_ipg_gate", "ipg", MXC_CCM_CCGR1, 14);
clk[uart3_per_gate] = imx_clk_gate2("uart3_per_gate", "uart_root", MXC_CCM_CCGR1, 16);
clk[i2c1_gate] = imx_clk_gate2("i2c1_gate", "per_root", MXC_CCM_CCGR1, 18);
clk[i2c2_gate] = imx_clk_gate2("i2c2_gate", "per_root", MXC_CCM_CCGR1, 20);
clk[pwm1_ipg_gate] = imx_clk_gate2("pwm1_ipg_gate", "ipg", MXC_CCM_CCGR2, 10);
clk[pwm1_hf_gate] = imx_clk_gate2("pwm1_hf_gate", "per_root", MXC_CCM_CCGR2, 12);
clk[pwm2_ipg_gate] = imx_clk_gate2("pwm2_ipg_gate", "ipg", MXC_CCM_CCGR2, 14);
clk[pwm2_hf_gate] = imx_clk_gate2("pwm2_hf_gate", "per_root", MXC_CCM_CCGR2, 16);
clk[gpt_ipg_gate] = imx_clk_gate2("gpt_ipg_gate", "ipg", MXC_CCM_CCGR2, 18);
clk[gpt_hf_gate] = imx_clk_gate2("gpt_hf_gate", "per_root", MXC_CCM_CCGR2, 20);
clk[fec_gate] = imx_clk_gate2("fec_gate", "ipg", MXC_CCM_CCGR2, 24);
clk[usboh3_gate] = imx_clk_gate2("usboh3_gate", "ipg", MXC_CCM_CCGR2, 26);
clk[usboh3_per_gate] = imx_clk_gate2("usboh3_per_gate", "usboh3_podf", MXC_CCM_CCGR2, 28);
clk[esdhc1_ipg_gate] = imx_clk_gate2("esdhc1_ipg_gate", "ipg", MXC_CCM_CCGR3, 0);
clk[esdhc2_ipg_gate] = imx_clk_gate2("esdhc2_ipg_gate", "ipg", MXC_CCM_CCGR3, 4);
clk[esdhc3_ipg_gate] = imx_clk_gate2("esdhc3_ipg_gate", "ipg", MXC_CCM_CCGR3, 8);
clk[esdhc4_ipg_gate] = imx_clk_gate2("esdhc4_ipg_gate", "ipg", MXC_CCM_CCGR3, 12);
clk[ssi1_ipg_gate] = imx_clk_gate2("ssi1_ipg_gate", "ipg", MXC_CCM_CCGR3, 16);
clk[ssi2_ipg_gate] = imx_clk_gate2("ssi2_ipg_gate", "ipg", MXC_CCM_CCGR3, 20);
clk[ssi3_ipg_gate] = imx_clk_gate2("ssi3_ipg_gate", "ipg", MXC_CCM_CCGR3, 24);
clk[ecspi1_ipg_gate] = imx_clk_gate2("ecspi1_ipg_gate", "ipg", MXC_CCM_CCGR4, 18);
clk[ecspi1_per_gate] = imx_clk_gate2("ecspi1_per_gate", "ecspi_podf", MXC_CCM_CCGR4, 20);
clk[ecspi2_ipg_gate] = imx_clk_gate2("ecspi2_ipg_gate", "ipg", MXC_CCM_CCGR4, 22);
clk[ecspi2_per_gate] = imx_clk_gate2("ecspi2_per_gate", "ecspi_podf", MXC_CCM_CCGR4, 24);
clk[cspi_ipg_gate] = imx_clk_gate2("cspi_ipg_gate", "ipg", MXC_CCM_CCGR4, 26);
clk[sdma_gate] = imx_clk_gate2("sdma_gate", "ipg", MXC_CCM_CCGR4, 30);
clk[emi_fast_gate] = imx_clk_gate2("emi_fast_gate", "dummy", MXC_CCM_CCGR5, 14);
clk[emi_slow_gate] = imx_clk_gate2("emi_slow_gate", "emi_slow_podf", MXC_CCM_CCGR5, 16);
clk[ipu_s] = imx_clk_mux("ipu_sel", MXC_CCM_CBCMR, 6, 2, ipu_sel, ARRAY_SIZE(ipu_sel));
clk[ipu_gate] = imx_clk_gate2("ipu_gate", "ipu_sel", MXC_CCM_CCGR5, 10);
clk[nfc_gate] = imx_clk_gate2("nfc_gate", "nfc_podf", MXC_CCM_CCGR5, 20);
clk[ipu_di0_gate] = imx_clk_gate2("ipu_di0_gate", "ipu_di0_sel", MXC_CCM_CCGR6, 10);
clk[ipu_di1_gate] = imx_clk_gate2("ipu_di1_gate", "ipu_di1_sel", MXC_CCM_CCGR6, 12);
clk[gpu3d_s] = imx_clk_mux("gpu3d_sel", MXC_CCM_CBCMR, 4, 2, gpu3d_sel, ARRAY_SIZE(gpu3d_sel));
clk[gpu2d_s] = imx_clk_mux("gpu2d_sel", MXC_CCM_CBCMR, 16, 2, gpu2d_sel, ARRAY_SIZE(gpu2d_sel));
clk[gpu3d_gate] = imx_clk_gate2("gpu3d_gate", "gpu3d_sel", MXC_CCM_CCGR5, 2);
clk[garb_gate] = imx_clk_gate2("garb_gate", "axi_a", MXC_CCM_CCGR5, 4);
clk[gpu2d_gate] = imx_clk_gate2("gpu2d_gate", "gpu2d_sel", MXC_CCM_CCGR6, 14);
clk[vpu_s] = imx_clk_mux("vpu_sel", MXC_CCM_CBCMR, 14, 2, vpu_sel, ARRAY_SIZE(vpu_sel));
clk[vpu_gate] = imx_clk_gate2("vpu_gate", "vpu_sel", MXC_CCM_CCGR5, 6);
clk[vpu_reference_gate] = imx_clk_gate2("vpu_reference_gate", "osc", MXC_CCM_CCGR5, 8);
clk[uart4_ipg_gate] = imx_clk_gate2("uart4_ipg_gate", "ipg", MXC_CCM_CCGR7, 8);
clk[uart4_per_gate] = imx_clk_gate2("uart4_per_gate", "uart_root", MXC_CCM_CCGR7, 10);
clk[uart5_ipg_gate] = imx_clk_gate2("uart5_ipg_gate", "ipg", MXC_CCM_CCGR7, 12);
clk[uart5_per_gate] = imx_clk_gate2("uart5_per_gate", "uart_root", MXC_CCM_CCGR7, 14);
clk[gpc_dvfs] = imx_clk_gate2("gpc_dvfs", "dummy", MXC_CCM_CCGR5, 24);
clk[ssi_apm] = imx_clk_mux("ssi_apm", MXC_CCM_CSCMR1, 8, 2, ssi_apm_sels, ARRAY_SIZE(ssi_apm_sels));
clk[ssi1_root_sel] = imx_clk_mux("ssi1_root_sel", MXC_CCM_CSCMR1, 14, 2, ssi_clk_sels, ARRAY_SIZE(ssi_clk_sels));
clk[ssi2_root_sel] = imx_clk_mux("ssi2_root_sel", MXC_CCM_CSCMR1, 12, 2, ssi_clk_sels, ARRAY_SIZE(ssi_clk_sels));
clk[ssi3_root_sel] = imx_clk_mux("ssi3_root_sel", MXC_CCM_CSCMR1, 11, 1, ssi3_clk_sels, ARRAY_SIZE(ssi3_clk_sels));
clk[ssi_ext1_sel] = imx_clk_mux("ssi_ext1_sel", MXC_CCM_CSCMR1, 28, 2, ssi_clk_sels, ARRAY_SIZE(ssi_clk_sels));
clk[ssi_ext2_sel] = imx_clk_mux("ssi_ext2_sel", MXC_CCM_CSCMR1, 30, 2, ssi_clk_sels, ARRAY_SIZE(ssi_clk_sels));
clk[ssi_ext1_com_sel] = imx_clk_mux("ssi_ext1_com_sel", MXC_CCM_CSCMR1, 0, 1, ssi_ext1_com_sels, ARRAY_SIZE(ssi_ext1_com_sels));
clk[ssi_ext2_com_sel] = imx_clk_mux("ssi_ext2_com_sel", MXC_CCM_CSCMR1, 1, 1, ssi_ext2_com_sels, ARRAY_SIZE(ssi_ext2_com_sels));
clk[ssi1_root_pred] = imx_clk_divider("ssi1_root_pred", "ssi1_root_sel", MXC_CCM_CS1CDR, 6, 3);
clk[ssi1_root_podf] = imx_clk_divider("ssi1_root_podf", "ssi1_root_pred", MXC_CCM_CS1CDR, 0, 6);
clk[ssi2_root_pred] = imx_clk_divider("ssi2_root_pred", "ssi2_root_sel", MXC_CCM_CS2CDR, 6, 3);
clk[ssi2_root_podf] = imx_clk_divider("ssi2_root_podf", "ssi2_root_pred", MXC_CCM_CS2CDR, 0, 6);
clk[ssi_ext1_pred] = imx_clk_divider("ssi_ext1_pred", "ssi_ext1_sel", MXC_CCM_CS1CDR, 22, 3);
clk[ssi_ext1_podf] = imx_clk_divider("ssi_ext1_podf", "ssi_ext1_pred", MXC_CCM_CS1CDR, 16, 6);
clk[ssi_ext2_pred] = imx_clk_divider("ssi_ext2_pred", "ssi_ext2_sel", MXC_CCM_CS2CDR, 22, 3);
clk[ssi_ext2_podf] = imx_clk_divider("ssi_ext2_podf", "ssi_ext2_pred", MXC_CCM_CS2CDR, 16, 6);
clk[ssi1_root_gate] = imx_clk_gate2("ssi1_root_gate", "ssi1_root_podf", MXC_CCM_CCGR3, 18);
clk[ssi2_root_gate] = imx_clk_gate2("ssi2_root_gate", "ssi2_root_podf", MXC_CCM_CCGR3, 22);
clk[ssi3_root_gate] = imx_clk_gate2("ssi3_root_gate", "ssi3_root_sel", MXC_CCM_CCGR3, 26);
clk[ssi_ext1_gate] = imx_clk_gate2("ssi_ext1_gate", "ssi_ext1_com_sel", MXC_CCM_CCGR3, 28);
clk[ssi_ext2_gate] = imx_clk_gate2("ssi_ext2_gate", "ssi_ext2_com_sel", MXC_CCM_CCGR3, 30);
clk[epit1_ipg_gate] = imx_clk_gate2("epit1_ipg_gate", "ipg", MXC_CCM_CCGR2, 2);
clk[epit1_hf_gate] = imx_clk_gate2("epit1_hf_gate", "per_root", MXC_CCM_CCGR2, 4);
clk[epit2_ipg_gate] = imx_clk_gate2("epit2_ipg_gate", "ipg", MXC_CCM_CCGR2, 6);
clk[epit2_hf_gate] = imx_clk_gate2("epit2_hf_gate", "per_root", MXC_CCM_CCGR2, 8);
clk[owire_gate] = imx_clk_gate2("owire_gate", "per_root", MXC_CCM_CCGR2, 22);
clk[srtc_gate] = imx_clk_gate2("srtc_gate", "per_root", MXC_CCM_CCGR4, 28);
clk[pata_gate] = imx_clk_gate2("pata_gate", "ipg", MXC_CCM_CCGR4, 0);
for (i = 0; i < ARRAY_SIZE(clk); i++)
if (IS_ERR(clk[i]))
pr_err("i.MX5 clk %d: register failed with %ld\n",
i, PTR_ERR(clk[i]));
clk_register_clkdev(clk[gpt_hf_gate], "per", "imx-gpt.0");
clk_register_clkdev(clk[gpt_ipg_gate], "ipg", "imx-gpt.0");
clk_register_clkdev(clk[uart1_per_gate], "per", "imx21-uart.0");
clk_register_clkdev(clk[uart1_ipg_gate], "ipg", "imx21-uart.0");
clk_register_clkdev(clk[uart2_per_gate], "per", "imx21-uart.1");
clk_register_clkdev(clk[uart2_ipg_gate], "ipg", "imx21-uart.1");
clk_register_clkdev(clk[uart3_per_gate], "per", "imx21-uart.2");
clk_register_clkdev(clk[uart3_ipg_gate], "ipg", "imx21-uart.2");
clk_register_clkdev(clk[uart4_per_gate], "per", "imx21-uart.3");
clk_register_clkdev(clk[uart4_ipg_gate], "ipg", "imx21-uart.3");
clk_register_clkdev(clk[uart5_per_gate], "per", "imx21-uart.4");
clk_register_clkdev(clk[uart5_ipg_gate], "ipg", "imx21-uart.4");
clk_register_clkdev(clk[ecspi1_per_gate], "per", "imx51-ecspi.0");
clk_register_clkdev(clk[ecspi1_ipg_gate], "ipg", "imx51-ecspi.0");
clk_register_clkdev(clk[ecspi2_per_gate], "per", "imx51-ecspi.1");
clk_register_clkdev(clk[ecspi2_ipg_gate], "ipg", "imx51-ecspi.1");
clk_register_clkdev(clk[cspi_ipg_gate], NULL, "imx35-cspi.2");
clk_register_clkdev(clk[pwm1_ipg_gate], "pwm", "mxc_pwm.0");
clk_register_clkdev(clk[pwm2_ipg_gate], "pwm", "mxc_pwm.1");
clk_register_clkdev(clk[i2c1_gate], NULL, "imx21-i2c.0");
clk_register_clkdev(clk[i2c2_gate], NULL, "imx21-i2c.1");
clk_register_clkdev(clk[usboh3_per_gate], "per", "mxc-ehci.0");
clk_register_clkdev(clk[usboh3_gate], "ipg", "mxc-ehci.0");
clk_register_clkdev(clk[usboh3_gate], "ahb", "mxc-ehci.0");
clk_register_clkdev(clk[usboh3_per_gate], "per", "mxc-ehci.1");
clk_register_clkdev(clk[usboh3_gate], "ipg", "mxc-ehci.1");
clk_register_clkdev(clk[usboh3_gate], "ahb", "mxc-ehci.1");
clk_register_clkdev(clk[usboh3_per_gate], "per", "mxc-ehci.2");
clk_register_clkdev(clk[usboh3_gate], "ipg", "mxc-ehci.2");
clk_register_clkdev(clk[usboh3_gate], "ahb", "mxc-ehci.2");
clk_register_clkdev(clk[usboh3_per_gate], "per", "imx-udc-mx51");
clk_register_clkdev(clk[usboh3_gate], "ipg", "imx-udc-mx51");
clk_register_clkdev(clk[usboh3_gate], "ahb", "imx-udc-mx51");
clk_register_clkdev(clk[nfc_gate], NULL, "imx51-nand");
clk_register_clkdev(clk[ssi1_ipg_gate], NULL, "imx-ssi.0");
clk_register_clkdev(clk[ssi2_ipg_gate], NULL, "imx-ssi.1");
clk_register_clkdev(clk[ssi3_ipg_gate], NULL, "imx-ssi.2");
clk_register_clkdev(clk[ssi_ext1_gate], "ssi_ext1", NULL);
clk_register_clkdev(clk[ssi_ext2_gate], "ssi_ext2", NULL);
clk_register_clkdev(clk[sdma_gate], NULL, "imx35-sdma");
clk_register_clkdev(clk[cpu_podf], NULL, "cpufreq-cpu0.0");
clk_register_clkdev(clk[iim_gate], "iim", NULL);
clk_register_clkdev(clk[dummy], NULL, "imx2-wdt.0");
clk_register_clkdev(clk[dummy], NULL, "imx2-wdt.1");
clk_register_clkdev(clk[dummy], NULL, "imx-keypad");
clk_register_clkdev(clk[ipu_di1_gate], "di1", "imx-tve.0");
clk_register_clkdev(clk[gpc_dvfs], "gpc_dvfs", NULL);
clk_register_clkdev(clk[epit1_ipg_gate], "ipg", "imx-epit.0");
clk_register_clkdev(clk[epit1_hf_gate], "per", "imx-epit.0");
clk_register_clkdev(clk[epit2_ipg_gate], "ipg", "imx-epit.1");
clk_register_clkdev(clk[epit2_hf_gate], "per", "imx-epit.1");
/* Set SDHC parents to be PLL2 */
clk_set_parent(clk[esdhc_a_sel], clk[pll2_sw]);
clk_set_parent(clk[esdhc_b_sel], clk[pll2_sw]);
/* move usb phy clk to 24MHz */
clk_set_parent(clk[usb_phy_sel], clk[osc]);
clk_prepare_enable(clk[gpc_dvfs]);
clk_prepare_enable(clk[ahb_max]); /* esdhc3 */
clk_prepare_enable(clk[aips_tz1]);
clk_prepare_enable(clk[aips_tz2]); /* fec */
clk_prepare_enable(clk[spba]);
clk_prepare_enable(clk[emi_fast_gate]); /* fec */
clk_prepare_enable(clk[emi_slow_gate]); /* eim */
clk_prepare_enable(clk[mipi_hsc1_gate]);
clk_prepare_enable(clk[mipi_hsc2_gate]);
clk_prepare_enable(clk[mipi_esc_gate]);
clk_prepare_enable(clk[mipi_hsp_gate]);
clk_prepare_enable(clk[tmax1]);
clk_prepare_enable(clk[tmax2]); /* esdhc2, fec */
clk_prepare_enable(clk[tmax3]); /* esdhc1, esdhc4 */
}
int __init mx51_clocks_init(unsigned long rate_ckil, unsigned long rate_osc,
unsigned long rate_ckih1, unsigned long rate_ckih2)
{
int i;
u32 val;
struct device_node *np;
clk[pll1_sw] = imx_clk_pllv2("pll1_sw", "osc", MX51_DPLL1_BASE);
clk[pll2_sw] = imx_clk_pllv2("pll2_sw", "osc", MX51_DPLL2_BASE);
clk[pll3_sw] = imx_clk_pllv2("pll3_sw", "osc", MX51_DPLL3_BASE);
clk[ipu_di0_sel] = imx_clk_mux("ipu_di0_sel", MXC_CCM_CSCMR2, 26, 3,
mx51_ipu_di0_sel, ARRAY_SIZE(mx51_ipu_di0_sel));
clk[ipu_di1_sel] = imx_clk_mux("ipu_di1_sel", MXC_CCM_CSCMR2, 29, 3,
mx51_ipu_di1_sel, ARRAY_SIZE(mx51_ipu_di1_sel));
clk[tve_ext_sel] = imx_clk_mux_flags("tve_ext_sel", MXC_CCM_CSCMR1, 6, 1,
mx51_tve_ext_sel, ARRAY_SIZE(mx51_tve_ext_sel), CLK_SET_RATE_PARENT);
clk[tve_s] = imx_clk_mux("tve_sel", MXC_CCM_CSCMR1, 7, 1,
mx51_tve_sel, ARRAY_SIZE(mx51_tve_sel));
clk[tve_gate] = imx_clk_gate2("tve_gate", "tve_sel", MXC_CCM_CCGR2, 30);
clk[tve_pred] = imx_clk_divider("tve_pred", "pll3_sw", MXC_CCM_CDCDR, 28, 3);
clk[esdhc1_per_gate] = imx_clk_gate2("esdhc1_per_gate", "esdhc_a_podf", MXC_CCM_CCGR3, 2);
clk[esdhc2_per_gate] = imx_clk_gate2("esdhc2_per_gate", "esdhc_b_podf", MXC_CCM_CCGR3, 6);
clk[esdhc3_per_gate] = imx_clk_gate2("esdhc3_per_gate", "esdhc_c_sel", MXC_CCM_CCGR3, 10);
clk[esdhc4_per_gate] = imx_clk_gate2("esdhc4_per_gate", "esdhc_d_sel", MXC_CCM_CCGR3, 14);
clk[usb_phy_gate] = imx_clk_gate2("usb_phy_gate", "usb_phy_sel", MXC_CCM_CCGR2, 0);
clk[hsi2c_gate] = imx_clk_gate2("hsi2c_gate", "ipg", MXC_CCM_CCGR1, 22);
clk[mipi_hsc1_gate] = imx_clk_gate2("mipi_hsc1_gate", "ipg", MXC_CCM_CCGR4, 6);
clk[mipi_hsc2_gate] = imx_clk_gate2("mipi_hsc2_gate", "ipg", MXC_CCM_CCGR4, 8);
clk[mipi_esc_gate] = imx_clk_gate2("mipi_esc_gate", "ipg", MXC_CCM_CCGR4, 10);
clk[mipi_hsp_gate] = imx_clk_gate2("mipi_hsp_gate", "ipg", MXC_CCM_CCGR4, 12);
for (i = 0; i < ARRAY_SIZE(clk); i++)
if (IS_ERR(clk[i]))
pr_err("i.MX51 clk %d: register failed with %ld\n",
i, PTR_ERR(clk[i]));
np = of_find_compatible_node(NULL, NULL, "fsl,imx51-ccm");
clk_data.clks = clk;
clk_data.clk_num = ARRAY_SIZE(clk);
of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
mx5_clocks_common_init(rate_ckil, rate_osc, rate_ckih1, rate_ckih2);
clk_register_clkdev(clk[hsi2c_gate], NULL, "imx21-i2c.2");
clk_register_clkdev(clk[mx51_mipi], "mipi_hsp", NULL);
clk_register_clkdev(clk[vpu_gate], NULL, "imx51-vpu.0");
clk_register_clkdev(clk[fec_gate], NULL, "imx27-fec.0");
clk_register_clkdev(clk[usb_phy_gate], "phy", "mxc-ehci.0");
clk_register_clkdev(clk[esdhc1_ipg_gate], "ipg", "sdhci-esdhc-imx51.0");
clk_register_clkdev(clk[dummy], "ahb", "sdhci-esdhc-imx51.0");
clk_register_clkdev(clk[esdhc1_per_gate], "per", "sdhci-esdhc-imx51.0");
clk_register_clkdev(clk[esdhc2_ipg_gate], "ipg", "sdhci-esdhc-imx51.1");
clk_register_clkdev(clk[dummy], "ahb", "sdhci-esdhc-imx51.1");
clk_register_clkdev(clk[esdhc2_per_gate], "per", "sdhci-esdhc-imx51.1");
clk_register_clkdev(clk[esdhc3_ipg_gate], "ipg", "sdhci-esdhc-imx51.2");
clk_register_clkdev(clk[dummy], "ahb", "sdhci-esdhc-imx51.2");
clk_register_clkdev(clk[esdhc3_per_gate], "per", "sdhci-esdhc-imx51.2");
clk_register_clkdev(clk[esdhc4_ipg_gate], "ipg", "sdhci-esdhc-imx51.3");
clk_register_clkdev(clk[dummy], "ahb", "sdhci-esdhc-imx51.3");
clk_register_clkdev(clk[esdhc4_per_gate], "per", "sdhci-esdhc-imx51.3");
/* set the usboh3 parent to pll2_sw */
clk_set_parent(clk[usboh3_sel], clk[pll2_sw]);
/* set SDHC root clock to 166.25MHZ*/
clk_set_rate(clk[esdhc_a_podf], 166250000);
clk_set_rate(clk[esdhc_b_podf], 166250000);
/* System timer */
mxc_timer_init(MX51_IO_ADDRESS(MX51_GPT1_BASE_ADDR), MX51_INT_GPT);
clk_prepare_enable(clk[iim_gate]);
imx_print_silicon_rev("i.MX51", mx51_revision());
clk_disable_unprepare(clk[iim_gate]);
/*
* Reference Manual says: Functionality of CCDR[18] and CLPCR[23] is no
* longer supported. Set to one for better power saving.
*
* The effect of not setting these bits is that MIPI clocks can't be
* enabled without the IPU clock being enabled aswell.
*/
val = readl(MXC_CCM_CCDR);
val |= 1 << 18;
writel(val, MXC_CCM_CCDR);
val = readl(MXC_CCM_CLPCR);
val |= 1 << 23;
writel(val, MXC_CCM_CLPCR);
return 0;
}
int __init mx53_clocks_init(unsigned long rate_ckil, unsigned long rate_osc,
unsigned long rate_ckih1, unsigned long rate_ckih2)
{
int i;
unsigned long r;
struct device_node *np;
clk[pll1_sw] = imx_clk_pllv2("pll1_sw", "osc", MX53_DPLL1_BASE);
clk[pll2_sw] = imx_clk_pllv2("pll2_sw", "osc", MX53_DPLL2_BASE);
clk[pll3_sw] = imx_clk_pllv2("pll3_sw", "osc", MX53_DPLL3_BASE);
clk[pll4_sw] = imx_clk_pllv2("pll4_sw", "osc", MX53_DPLL4_BASE);
clk[ldb_di1_div_3_5] = imx_clk_fixed_factor("ldb_di1_div_3_5", "ldb_di1_sel", 2, 7);
clk[ldb_di1_div] = imx_clk_divider_flags("ldb_di1_div", "ldb_di1_div_3_5", MXC_CCM_CSCMR2, 11, 1, 0);
clk[ldb_di1_sel] = imx_clk_mux_flags("ldb_di1_sel", MXC_CCM_CSCMR2, 9, 1,
mx53_ldb_di1_sel, ARRAY_SIZE(mx53_ldb_di1_sel), CLK_SET_RATE_PARENT);
clk[di_pll4_podf] = imx_clk_divider("di_pll4_podf", "pll4_sw", MXC_CCM_CDCDR, 16, 3);
clk[ldb_di0_div_3_5] = imx_clk_fixed_factor("ldb_di0_div_3_5", "ldb_di0_sel", 2, 7);
clk[ldb_di0_div] = imx_clk_divider_flags("ldb_di0_div", "ldb_di0_div_3_5", MXC_CCM_CSCMR2, 10, 1, 0);
clk[ldb_di0_sel] = imx_clk_mux_flags("ldb_di0_sel", MXC_CCM_CSCMR2, 8, 1,
mx53_ldb_di0_sel, ARRAY_SIZE(mx53_ldb_di0_sel), CLK_SET_RATE_PARENT);
clk[ldb_di0_gate] = imx_clk_gate2("ldb_di0_gate", "ldb_di0_div", MXC_CCM_CCGR6, 28);
clk[ldb_di1_gate] = imx_clk_gate2("ldb_di1_gate", "ldb_di1_div", MXC_CCM_CCGR6, 30);
clk[ipu_di0_sel] = imx_clk_mux("ipu_di0_sel", MXC_CCM_CSCMR2, 26, 3,
mx53_ipu_di0_sel, ARRAY_SIZE(mx53_ipu_di0_sel));
clk[ipu_di1_sel] = imx_clk_mux("ipu_di1_sel", MXC_CCM_CSCMR2, 29, 3,
mx53_ipu_di1_sel, ARRAY_SIZE(mx53_ipu_di1_sel));
clk[tve_ext_sel] = imx_clk_mux_flags("tve_ext_sel", MXC_CCM_CSCMR1, 6, 1,
mx53_tve_ext_sel, ARRAY_SIZE(mx53_tve_ext_sel), CLK_SET_RATE_PARENT);
clk[tve_gate] = imx_clk_gate2("tve_gate", "tve_pred", MXC_CCM_CCGR2, 30);
clk[tve_pred] = imx_clk_divider("tve_pred", "tve_ext_sel", MXC_CCM_CDCDR, 28, 3);
clk[esdhc1_per_gate] = imx_clk_gate2("esdhc1_per_gate", "esdhc_a_podf", MXC_CCM_CCGR3, 2);
clk[esdhc2_per_gate] = imx_clk_gate2("esdhc2_per_gate", "esdhc_c_sel", MXC_CCM_CCGR3, 6);
clk[esdhc3_per_gate] = imx_clk_gate2("esdhc3_per_gate", "esdhc_b_podf", MXC_CCM_CCGR3, 10);
clk[esdhc4_per_gate] = imx_clk_gate2("esdhc4_per_gate", "esdhc_d_sel", MXC_CCM_CCGR3, 14);
clk[usb_phy1_gate] = imx_clk_gate2("usb_phy1_gate", "usb_phy_sel", MXC_CCM_CCGR4, 10);
clk[usb_phy2_gate] = imx_clk_gate2("usb_phy2_gate", "usb_phy_sel", MXC_CCM_CCGR4, 12);
clk[can_sel] = imx_clk_mux("can_sel", MXC_CCM_CSCMR2, 6, 2,
mx53_can_sel, ARRAY_SIZE(mx53_can_sel));
clk[can1_serial_gate] = imx_clk_gate2("can1_serial_gate", "can_sel", MXC_CCM_CCGR6, 22);
clk[can1_ipg_gate] = imx_clk_gate2("can1_ipg_gate", "ipg", MXC_CCM_CCGR6, 20);
clk[can2_serial_gate] = imx_clk_gate2("can2_serial_gate", "can_sel", MXC_CCM_CCGR4, 8);
clk[can2_ipg_gate] = imx_clk_gate2("can2_ipg_gate", "ipg", MXC_CCM_CCGR4, 6);
clk[i2c3_gate] = imx_clk_gate2("i2c3_gate", "per_root", MXC_CCM_CCGR1, 22);
clk[cko1_sel] = imx_clk_mux("cko1_sel", MXC_CCM_CCOSR, 0, 4,
mx53_cko1_sel, ARRAY_SIZE(mx53_cko1_sel));
clk[cko1_podf] = imx_clk_divider("cko1_podf", "cko1_sel", MXC_CCM_CCOSR, 4, 3);
clk[cko1] = imx_clk_gate2("cko1", "cko1_podf", MXC_CCM_CCOSR, 7);
clk[cko2_sel] = imx_clk_mux("cko2_sel", MXC_CCM_CCOSR, 16, 5,
mx53_cko2_sel, ARRAY_SIZE(mx53_cko2_sel));
clk[cko2_podf] = imx_clk_divider("cko2_podf", "cko2_sel", MXC_CCM_CCOSR, 21, 3);
clk[cko2] = imx_clk_gate2("cko2", "cko2_podf", MXC_CCM_CCOSR, 24);
for (i = 0; i < ARRAY_SIZE(clk); i++)
if (IS_ERR(clk[i]))
pr_err("i.MX53 clk %d: register failed with %ld\n",
i, PTR_ERR(clk[i]));
np = of_find_compatible_node(NULL, NULL, "fsl,imx53-ccm");
clk_data.clks = clk;
clk_data.clk_num = ARRAY_SIZE(clk);
of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
mx5_clocks_common_init(rate_ckil, rate_osc, rate_ckih1, rate_ckih2);
clk_register_clkdev(clk[vpu_gate], NULL, "imx53-vpu.0");
clk_register_clkdev(clk[i2c3_gate], NULL, "imx21-i2c.2");
clk_register_clkdev(clk[fec_gate], NULL, "imx25-fec.0");
clk_register_clkdev(clk[usb_phy1_gate], "usb_phy1", "mxc-ehci.0");
clk_register_clkdev(clk[esdhc1_ipg_gate], "ipg", "sdhci-esdhc-imx53.0");
clk_register_clkdev(clk[dummy], "ahb", "sdhci-esdhc-imx53.0");
clk_register_clkdev(clk[esdhc1_per_gate], "per", "sdhci-esdhc-imx53.0");
clk_register_clkdev(clk[esdhc2_ipg_gate], "ipg", "sdhci-esdhc-imx53.1");
clk_register_clkdev(clk[dummy], "ahb", "sdhci-esdhc-imx53.1");
clk_register_clkdev(clk[esdhc2_per_gate], "per", "sdhci-esdhc-imx53.1");
clk_register_clkdev(clk[esdhc3_ipg_gate], "ipg", "sdhci-esdhc-imx53.2");
clk_register_clkdev(clk[dummy], "ahb", "sdhci-esdhc-imx53.2");
clk_register_clkdev(clk[esdhc3_per_gate], "per", "sdhci-esdhc-imx53.2");
clk_register_clkdev(clk[esdhc4_ipg_gate], "ipg", "sdhci-esdhc-imx53.3");
clk_register_clkdev(clk[dummy], "ahb", "sdhci-esdhc-imx53.3");
clk_register_clkdev(clk[esdhc4_per_gate], "per", "sdhci-esdhc-imx53.3");
/* set SDHC root clock to 200MHZ*/
clk_set_rate(clk[esdhc_a_podf], 200000000);
clk_set_rate(clk[esdhc_b_podf], 200000000);
/* System timer */
mxc_timer_init(MX53_IO_ADDRESS(MX53_GPT1_BASE_ADDR), MX53_INT_GPT);
clk_prepare_enable(clk[iim_gate]);
imx_print_silicon_rev("i.MX53", mx53_revision());
clk_disable_unprepare(clk[iim_gate]);
r = clk_round_rate(clk[usboh3_per_gate], 54000000);
clk_set_rate(clk[usboh3_per_gate], r);
return 0;
}
#ifdef CONFIG_OF
static void __init clk_get_freq_dt(unsigned long *ckil, unsigned long *osc,
unsigned long *ckih1, unsigned long *ckih2)
{
struct device_node *np;
/* retrieve the freqency of fixed clocks from device tree */
for_each_compatible_node(np, NULL, "fixed-clock") {
u32 rate;
if (of_property_read_u32(np, "clock-frequency", &rate))
continue;
if (of_device_is_compatible(np, "fsl,imx-ckil"))
*ckil = rate;
else if (of_device_is_compatible(np, "fsl,imx-osc"))
*osc = rate;
else if (of_device_is_compatible(np, "fsl,imx-ckih1"))
*ckih1 = rate;
else if (of_device_is_compatible(np, "fsl,imx-ckih2"))
*ckih2 = rate;
}
}
int __init mx51_clocks_init_dt(void)
{
unsigned long ckil, osc, ckih1, ckih2;
clk_get_freq_dt(&ckil, &osc, &ckih1, &ckih2);
return mx51_clocks_init(ckil, osc, ckih1, ckih2);
}
int __init mx53_clocks_init_dt(void)
{
unsigned long ckil, osc, ckih1, ckih2;
clk_get_freq_dt(&ckil, &osc, &ckih1, &ckih2);
return mx53_clocks_init(ckil, osc, ckih1, ckih2);
}
#endif
| gpl-2.0 |
Cardinal97/android-kernel-msm8939 | arch/arm/mach-imx/clk-imx51-imx53.c | 2002 | 32150 | /*
* Copyright (C) 2011 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/
#include <linux/mm.h>
#include <linux/delay.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/clkdev.h>
#include <linux/of.h>
#include <linux/err.h>
#include "crm-regs-imx5.h"
#include "clk.h"
#include "common.h"
#include "hardware.h"
/* Low-power Audio Playback Mode clock */
static const char *lp_apm_sel[] = { "osc", };
/* This is used multiple times */
static const char *standard_pll_sel[] = { "pll1_sw", "pll2_sw", "pll3_sw", "lp_apm", };
static const char *periph_apm_sel[] = { "pll1_sw", "pll3_sw", "lp_apm", };
static const char *main_bus_sel[] = { "pll2_sw", "periph_apm", };
static const char *per_lp_apm_sel[] = { "main_bus", "lp_apm", };
static const char *per_root_sel[] = { "per_podf", "ipg", };
static const char *esdhc_c_sel[] = { "esdhc_a_podf", "esdhc_b_podf", };
static const char *esdhc_d_sel[] = { "esdhc_a_podf", "esdhc_b_podf", };
static const char *ssi_apm_sels[] = { "ckih1", "lp_amp", "ckih2", };
static const char *ssi_clk_sels[] = { "pll1_sw", "pll2_sw", "pll3_sw", "ssi_apm", };
static const char *ssi3_clk_sels[] = { "ssi1_root_gate", "ssi2_root_gate", };
static const char *ssi_ext1_com_sels[] = { "ssi_ext1_podf", "ssi1_root_gate", };
static const char *ssi_ext2_com_sels[] = { "ssi_ext2_podf", "ssi2_root_gate", };
static const char *emi_slow_sel[] = { "main_bus", "ahb", };
static const char *usb_phy_sel_str[] = { "osc", "usb_phy_podf", };
static const char *mx51_ipu_di0_sel[] = { "di_pred", "osc", "ckih1", "tve_di", };
static const char *mx53_ipu_di0_sel[] = { "di_pred", "osc", "ckih1", "di_pll4_podf", "dummy", "ldb_di0_gate", };
static const char *mx53_ldb_di0_sel[] = { "pll3_sw", "pll4_sw", };
static const char *mx51_ipu_di1_sel[] = { "di_pred", "osc", "ckih1", "tve_di", "ipp_di1", };
static const char *mx53_ipu_di1_sel[] = { "di_pred", "osc", "ckih1", "tve_di", "ipp_di1", "ldb_di1_gate", };
static const char *mx53_ldb_di1_sel[] = { "pll3_sw", "pll4_sw", };
static const char *mx51_tve_ext_sel[] = { "osc", "ckih1", };
static const char *mx53_tve_ext_sel[] = { "pll4_sw", "ckih1", };
static const char *mx51_tve_sel[] = { "tve_pred", "tve_ext_sel", };
static const char *ipu_sel[] = { "axi_a", "axi_b", "emi_slow_gate", "ahb", };
static const char *gpu3d_sel[] = { "axi_a", "axi_b", "emi_slow_gate", "ahb" };
static const char *gpu2d_sel[] = { "axi_a", "axi_b", "emi_slow_gate", "ahb" };
static const char *vpu_sel[] = { "axi_a", "axi_b", "emi_slow_gate", "ahb", };
static const char *mx53_can_sel[] = { "ipg", "ckih1", "ckih2", "lp_apm", };
static const char *mx53_cko1_sel[] = {
"cpu_podf", "pll1_sw", "pll2_sw", "pll3_sw",
"emi_slow_podf", "pll4_sw", "nfc_podf", "dummy",
"di_pred", "dummy", "dummy", "ahb",
"ipg", "per_root", "ckil", "dummy",};
static const char *mx53_cko2_sel[] = {
"dummy"/* dptc_core */, "dummy"/* dptc_perich */,
"dummy", "esdhc_a_podf",
"usboh3_podf", "dummy"/* wrck_clk_root */,
"ecspi_podf", "dummy"/* pll1_ref_clk */,
"esdhc_b_podf", "dummy"/* ddr_clk_root */,
"dummy"/* arm_axi_clk_root */, "dummy"/* usb_phy_out */,
"vpu_sel", "ipu_sel",
"osc", "ckih1",
"dummy", "esdhc_c_sel",
"ssi1_root_podf", "ssi2_root_podf",
"dummy", "dummy",
"dummy"/* lpsr_clk_root */, "dummy"/* pgc_clk_root */,
"dummy"/* tve_out */, "usb_phy_sel",
"tve_sel", "lp_apm",
"uart_root", "dummy"/* spdif0_clk_root */,
"dummy", "dummy", };
enum imx5_clks {
dummy, ckil, osc, ckih1, ckih2, ahb, ipg, axi_a, axi_b, uart_pred,
uart_root, esdhc_a_pred, esdhc_b_pred, esdhc_c_s, esdhc_d_s,
emi_sel, emi_slow_podf, nfc_podf, ecspi_pred, ecspi_podf, usboh3_pred,
usboh3_podf, usb_phy_pred, usb_phy_podf, cpu_podf, di_pred, tve_di_unused,
tve_s, uart1_ipg_gate, uart1_per_gate, uart2_ipg_gate,
uart2_per_gate, uart3_ipg_gate, uart3_per_gate, i2c1_gate, i2c2_gate,
gpt_ipg_gate, pwm1_ipg_gate, pwm1_hf_gate, pwm2_ipg_gate, pwm2_hf_gate,
gpt_hf_gate, fec_gate, usboh3_per_gate, esdhc1_ipg_gate, esdhc2_ipg_gate,
esdhc3_ipg_gate, esdhc4_ipg_gate, ssi1_ipg_gate, ssi2_ipg_gate,
ssi3_ipg_gate, ecspi1_ipg_gate, ecspi1_per_gate, ecspi2_ipg_gate,
ecspi2_per_gate, cspi_ipg_gate, sdma_gate, emi_slow_gate, ipu_s,
ipu_gate, nfc_gate, ipu_di1_gate, vpu_s, vpu_gate,
vpu_reference_gate, uart4_ipg_gate, uart4_per_gate, uart5_ipg_gate,
uart5_per_gate, tve_gate, tve_pred, esdhc1_per_gate, esdhc2_per_gate,
esdhc3_per_gate, esdhc4_per_gate, usb_phy_gate, hsi2c_gate,
mipi_hsc1_gate, mipi_hsc2_gate, mipi_esc_gate, mipi_hsp_gate,
ldb_di1_div_3_5, ldb_di1_div, ldb_di0_div_3_5, ldb_di0_div,
ldb_di1_gate, can2_serial_gate, can2_ipg_gate, i2c3_gate, lp_apm,
periph_apm, main_bus, ahb_max, aips_tz1, aips_tz2, tmax1, tmax2,
tmax3, spba, uart_sel, esdhc_a_sel, esdhc_b_sel, esdhc_a_podf,
esdhc_b_podf, ecspi_sel, usboh3_sel, usb_phy_sel, iim_gate,
usboh3_gate, emi_fast_gate, ipu_di0_gate,gpc_dvfs, pll1_sw, pll2_sw,
pll3_sw, ipu_di0_sel, ipu_di1_sel, tve_ext_sel, mx51_mipi, pll4_sw,
ldb_di1_sel, di_pll4_podf, ldb_di0_sel, ldb_di0_gate, usb_phy1_gate,
usb_phy2_gate, per_lp_apm, per_pred1, per_pred2, per_podf, per_root,
ssi_apm, ssi1_root_sel, ssi2_root_sel, ssi3_root_sel, ssi_ext1_sel,
ssi_ext2_sel, ssi_ext1_com_sel, ssi_ext2_com_sel, ssi1_root_pred,
ssi1_root_podf, ssi2_root_pred, ssi2_root_podf, ssi_ext1_pred,
ssi_ext1_podf, ssi_ext2_pred, ssi_ext2_podf, ssi1_root_gate,
ssi2_root_gate, ssi3_root_gate, ssi_ext1_gate, ssi_ext2_gate,
epit1_ipg_gate, epit1_hf_gate, epit2_ipg_gate, epit2_hf_gate,
can_sel, can1_serial_gate, can1_ipg_gate,
owire_gate, gpu3d_s, gpu2d_s, gpu3d_gate, gpu2d_gate, garb_gate,
cko1_sel, cko1_podf, cko1,
cko2_sel, cko2_podf, cko2,
srtc_gate, pata_gate,
clk_max
};
static struct clk *clk[clk_max];
static struct clk_onecell_data clk_data;
static void __init mx5_clocks_common_init(unsigned long rate_ckil,
unsigned long rate_osc, unsigned long rate_ckih1,
unsigned long rate_ckih2)
{
int i;
clk[dummy] = imx_clk_fixed("dummy", 0);
clk[ckil] = imx_clk_fixed("ckil", rate_ckil);
clk[osc] = imx_clk_fixed("osc", rate_osc);
clk[ckih1] = imx_clk_fixed("ckih1", rate_ckih1);
clk[ckih2] = imx_clk_fixed("ckih2", rate_ckih2);
clk[lp_apm] = imx_clk_mux("lp_apm", MXC_CCM_CCSR, 9, 1,
lp_apm_sel, ARRAY_SIZE(lp_apm_sel));
clk[periph_apm] = imx_clk_mux("periph_apm", MXC_CCM_CBCMR, 12, 2,
periph_apm_sel, ARRAY_SIZE(periph_apm_sel));
clk[main_bus] = imx_clk_mux("main_bus", MXC_CCM_CBCDR, 25, 1,
main_bus_sel, ARRAY_SIZE(main_bus_sel));
clk[per_lp_apm] = imx_clk_mux("per_lp_apm", MXC_CCM_CBCMR, 1, 1,
per_lp_apm_sel, ARRAY_SIZE(per_lp_apm_sel));
clk[per_pred1] = imx_clk_divider("per_pred1", "per_lp_apm", MXC_CCM_CBCDR, 6, 2);
clk[per_pred2] = imx_clk_divider("per_pred2", "per_pred1", MXC_CCM_CBCDR, 3, 3);
clk[per_podf] = imx_clk_divider("per_podf", "per_pred2", MXC_CCM_CBCDR, 0, 3);
clk[per_root] = imx_clk_mux("per_root", MXC_CCM_CBCMR, 0, 1,
per_root_sel, ARRAY_SIZE(per_root_sel));
clk[ahb] = imx_clk_divider("ahb", "main_bus", MXC_CCM_CBCDR, 10, 3);
clk[ahb_max] = imx_clk_gate2("ahb_max", "ahb", MXC_CCM_CCGR0, 28);
clk[aips_tz1] = imx_clk_gate2("aips_tz1", "ahb", MXC_CCM_CCGR0, 24);
clk[aips_tz2] = imx_clk_gate2("aips_tz2", "ahb", MXC_CCM_CCGR0, 26);
clk[tmax1] = imx_clk_gate2("tmax1", "ahb", MXC_CCM_CCGR1, 0);
clk[tmax2] = imx_clk_gate2("tmax2", "ahb", MXC_CCM_CCGR1, 2);
clk[tmax3] = imx_clk_gate2("tmax3", "ahb", MXC_CCM_CCGR1, 4);
clk[spba] = imx_clk_gate2("spba", "ipg", MXC_CCM_CCGR5, 0);
clk[ipg] = imx_clk_divider("ipg", "ahb", MXC_CCM_CBCDR, 8, 2);
clk[axi_a] = imx_clk_divider("axi_a", "main_bus", MXC_CCM_CBCDR, 16, 3);
clk[axi_b] = imx_clk_divider("axi_b", "main_bus", MXC_CCM_CBCDR, 19, 3);
clk[uart_sel] = imx_clk_mux("uart_sel", MXC_CCM_CSCMR1, 24, 2,
standard_pll_sel, ARRAY_SIZE(standard_pll_sel));
clk[uart_pred] = imx_clk_divider("uart_pred", "uart_sel", MXC_CCM_CSCDR1, 3, 3);
clk[uart_root] = imx_clk_divider("uart_root", "uart_pred", MXC_CCM_CSCDR1, 0, 3);
clk[esdhc_a_sel] = imx_clk_mux("esdhc_a_sel", MXC_CCM_CSCMR1, 20, 2,
standard_pll_sel, ARRAY_SIZE(standard_pll_sel));
clk[esdhc_b_sel] = imx_clk_mux("esdhc_b_sel", MXC_CCM_CSCMR1, 16, 2,
standard_pll_sel, ARRAY_SIZE(standard_pll_sel));
clk[esdhc_a_pred] = imx_clk_divider("esdhc_a_pred", "esdhc_a_sel", MXC_CCM_CSCDR1, 16, 3);
clk[esdhc_a_podf] = imx_clk_divider("esdhc_a_podf", "esdhc_a_pred", MXC_CCM_CSCDR1, 11, 3);
clk[esdhc_b_pred] = imx_clk_divider("esdhc_b_pred", "esdhc_b_sel", MXC_CCM_CSCDR1, 22, 3);
clk[esdhc_b_podf] = imx_clk_divider("esdhc_b_podf", "esdhc_b_pred", MXC_CCM_CSCDR1, 19, 3);
clk[esdhc_c_s] = imx_clk_mux("esdhc_c_sel", MXC_CCM_CSCMR1, 19, 1, esdhc_c_sel, ARRAY_SIZE(esdhc_c_sel));
clk[esdhc_d_s] = imx_clk_mux("esdhc_d_sel", MXC_CCM_CSCMR1, 18, 1, esdhc_d_sel, ARRAY_SIZE(esdhc_d_sel));
clk[emi_sel] = imx_clk_mux("emi_sel", MXC_CCM_CBCDR, 26, 1,
emi_slow_sel, ARRAY_SIZE(emi_slow_sel));
clk[emi_slow_podf] = imx_clk_divider("emi_slow_podf", "emi_sel", MXC_CCM_CBCDR, 22, 3);
clk[nfc_podf] = imx_clk_divider("nfc_podf", "emi_slow_podf", MXC_CCM_CBCDR, 13, 3);
clk[ecspi_sel] = imx_clk_mux("ecspi_sel", MXC_CCM_CSCMR1, 4, 2,
standard_pll_sel, ARRAY_SIZE(standard_pll_sel));
clk[ecspi_pred] = imx_clk_divider("ecspi_pred", "ecspi_sel", MXC_CCM_CSCDR2, 25, 3);
clk[ecspi_podf] = imx_clk_divider("ecspi_podf", "ecspi_pred", MXC_CCM_CSCDR2, 19, 6);
clk[usboh3_sel] = imx_clk_mux("usboh3_sel", MXC_CCM_CSCMR1, 22, 2,
standard_pll_sel, ARRAY_SIZE(standard_pll_sel));
clk[usboh3_pred] = imx_clk_divider("usboh3_pred", "usboh3_sel", MXC_CCM_CSCDR1, 8, 3);
clk[usboh3_podf] = imx_clk_divider("usboh3_podf", "usboh3_pred", MXC_CCM_CSCDR1, 6, 2);
clk[usb_phy_pred] = imx_clk_divider("usb_phy_pred", "pll3_sw", MXC_CCM_CDCDR, 3, 3);
clk[usb_phy_podf] = imx_clk_divider("usb_phy_podf", "usb_phy_pred", MXC_CCM_CDCDR, 0, 3);
clk[usb_phy_sel] = imx_clk_mux("usb_phy_sel", MXC_CCM_CSCMR1, 26, 1,
usb_phy_sel_str, ARRAY_SIZE(usb_phy_sel_str));
clk[cpu_podf] = imx_clk_divider("cpu_podf", "pll1_sw", MXC_CCM_CACRR, 0, 3);
clk[di_pred] = imx_clk_divider("di_pred", "pll3_sw", MXC_CCM_CDCDR, 6, 3);
clk[iim_gate] = imx_clk_gate2("iim_gate", "ipg", MXC_CCM_CCGR0, 30);
clk[uart1_ipg_gate] = imx_clk_gate2("uart1_ipg_gate", "ipg", MXC_CCM_CCGR1, 6);
clk[uart1_per_gate] = imx_clk_gate2("uart1_per_gate", "uart_root", MXC_CCM_CCGR1, 8);
clk[uart2_ipg_gate] = imx_clk_gate2("uart2_ipg_gate", "ipg", MXC_CCM_CCGR1, 10);
clk[uart2_per_gate] = imx_clk_gate2("uart2_per_gate", "uart_root", MXC_CCM_CCGR1, 12);
clk[uart3_ipg_gate] = imx_clk_gate2("uart3_ipg_gate", "ipg", MXC_CCM_CCGR1, 14);
clk[uart3_per_gate] = imx_clk_gate2("uart3_per_gate", "uart_root", MXC_CCM_CCGR1, 16);
clk[i2c1_gate] = imx_clk_gate2("i2c1_gate", "per_root", MXC_CCM_CCGR1, 18);
clk[i2c2_gate] = imx_clk_gate2("i2c2_gate", "per_root", MXC_CCM_CCGR1, 20);
clk[pwm1_ipg_gate] = imx_clk_gate2("pwm1_ipg_gate", "ipg", MXC_CCM_CCGR2, 10);
clk[pwm1_hf_gate] = imx_clk_gate2("pwm1_hf_gate", "per_root", MXC_CCM_CCGR2, 12);
clk[pwm2_ipg_gate] = imx_clk_gate2("pwm2_ipg_gate", "ipg", MXC_CCM_CCGR2, 14);
clk[pwm2_hf_gate] = imx_clk_gate2("pwm2_hf_gate", "per_root", MXC_CCM_CCGR2, 16);
clk[gpt_ipg_gate] = imx_clk_gate2("gpt_ipg_gate", "ipg", MXC_CCM_CCGR2, 18);
clk[gpt_hf_gate] = imx_clk_gate2("gpt_hf_gate", "per_root", MXC_CCM_CCGR2, 20);
clk[fec_gate] = imx_clk_gate2("fec_gate", "ipg", MXC_CCM_CCGR2, 24);
clk[usboh3_gate] = imx_clk_gate2("usboh3_gate", "ipg", MXC_CCM_CCGR2, 26);
clk[usboh3_per_gate] = imx_clk_gate2("usboh3_per_gate", "usboh3_podf", MXC_CCM_CCGR2, 28);
clk[esdhc1_ipg_gate] = imx_clk_gate2("esdhc1_ipg_gate", "ipg", MXC_CCM_CCGR3, 0);
clk[esdhc2_ipg_gate] = imx_clk_gate2("esdhc2_ipg_gate", "ipg", MXC_CCM_CCGR3, 4);
clk[esdhc3_ipg_gate] = imx_clk_gate2("esdhc3_ipg_gate", "ipg", MXC_CCM_CCGR3, 8);
clk[esdhc4_ipg_gate] = imx_clk_gate2("esdhc4_ipg_gate", "ipg", MXC_CCM_CCGR3, 12);
clk[ssi1_ipg_gate] = imx_clk_gate2("ssi1_ipg_gate", "ipg", MXC_CCM_CCGR3, 16);
clk[ssi2_ipg_gate] = imx_clk_gate2("ssi2_ipg_gate", "ipg", MXC_CCM_CCGR3, 20);
clk[ssi3_ipg_gate] = imx_clk_gate2("ssi3_ipg_gate", "ipg", MXC_CCM_CCGR3, 24);
clk[ecspi1_ipg_gate] = imx_clk_gate2("ecspi1_ipg_gate", "ipg", MXC_CCM_CCGR4, 18);
clk[ecspi1_per_gate] = imx_clk_gate2("ecspi1_per_gate", "ecspi_podf", MXC_CCM_CCGR4, 20);
clk[ecspi2_ipg_gate] = imx_clk_gate2("ecspi2_ipg_gate", "ipg", MXC_CCM_CCGR4, 22);
clk[ecspi2_per_gate] = imx_clk_gate2("ecspi2_per_gate", "ecspi_podf", MXC_CCM_CCGR4, 24);
clk[cspi_ipg_gate] = imx_clk_gate2("cspi_ipg_gate", "ipg", MXC_CCM_CCGR4, 26);
clk[sdma_gate] = imx_clk_gate2("sdma_gate", "ipg", MXC_CCM_CCGR4, 30);
clk[emi_fast_gate] = imx_clk_gate2("emi_fast_gate", "dummy", MXC_CCM_CCGR5, 14);
clk[emi_slow_gate] = imx_clk_gate2("emi_slow_gate", "emi_slow_podf", MXC_CCM_CCGR5, 16);
clk[ipu_s] = imx_clk_mux("ipu_sel", MXC_CCM_CBCMR, 6, 2, ipu_sel, ARRAY_SIZE(ipu_sel));
clk[ipu_gate] = imx_clk_gate2("ipu_gate", "ipu_sel", MXC_CCM_CCGR5, 10);
clk[nfc_gate] = imx_clk_gate2("nfc_gate", "nfc_podf", MXC_CCM_CCGR5, 20);
clk[ipu_di0_gate] = imx_clk_gate2("ipu_di0_gate", "ipu_di0_sel", MXC_CCM_CCGR6, 10);
clk[ipu_di1_gate] = imx_clk_gate2("ipu_di1_gate", "ipu_di1_sel", MXC_CCM_CCGR6, 12);
clk[gpu3d_s] = imx_clk_mux("gpu3d_sel", MXC_CCM_CBCMR, 4, 2, gpu3d_sel, ARRAY_SIZE(gpu3d_sel));
clk[gpu2d_s] = imx_clk_mux("gpu2d_sel", MXC_CCM_CBCMR, 16, 2, gpu2d_sel, ARRAY_SIZE(gpu2d_sel));
clk[gpu3d_gate] = imx_clk_gate2("gpu3d_gate", "gpu3d_sel", MXC_CCM_CCGR5, 2);
clk[garb_gate] = imx_clk_gate2("garb_gate", "axi_a", MXC_CCM_CCGR5, 4);
clk[gpu2d_gate] = imx_clk_gate2("gpu2d_gate", "gpu2d_sel", MXC_CCM_CCGR6, 14);
clk[vpu_s] = imx_clk_mux("vpu_sel", MXC_CCM_CBCMR, 14, 2, vpu_sel, ARRAY_SIZE(vpu_sel));
clk[vpu_gate] = imx_clk_gate2("vpu_gate", "vpu_sel", MXC_CCM_CCGR5, 6);
clk[vpu_reference_gate] = imx_clk_gate2("vpu_reference_gate", "osc", MXC_CCM_CCGR5, 8);
clk[uart4_ipg_gate] = imx_clk_gate2("uart4_ipg_gate", "ipg", MXC_CCM_CCGR7, 8);
clk[uart4_per_gate] = imx_clk_gate2("uart4_per_gate", "uart_root", MXC_CCM_CCGR7, 10);
clk[uart5_ipg_gate] = imx_clk_gate2("uart5_ipg_gate", "ipg", MXC_CCM_CCGR7, 12);
clk[uart5_per_gate] = imx_clk_gate2("uart5_per_gate", "uart_root", MXC_CCM_CCGR7, 14);
clk[gpc_dvfs] = imx_clk_gate2("gpc_dvfs", "dummy", MXC_CCM_CCGR5, 24);
clk[ssi_apm] = imx_clk_mux("ssi_apm", MXC_CCM_CSCMR1, 8, 2, ssi_apm_sels, ARRAY_SIZE(ssi_apm_sels));
clk[ssi1_root_sel] = imx_clk_mux("ssi1_root_sel", MXC_CCM_CSCMR1, 14, 2, ssi_clk_sels, ARRAY_SIZE(ssi_clk_sels));
clk[ssi2_root_sel] = imx_clk_mux("ssi2_root_sel", MXC_CCM_CSCMR1, 12, 2, ssi_clk_sels, ARRAY_SIZE(ssi_clk_sels));
clk[ssi3_root_sel] = imx_clk_mux("ssi3_root_sel", MXC_CCM_CSCMR1, 11, 1, ssi3_clk_sels, ARRAY_SIZE(ssi3_clk_sels));
clk[ssi_ext1_sel] = imx_clk_mux("ssi_ext1_sel", MXC_CCM_CSCMR1, 28, 2, ssi_clk_sels, ARRAY_SIZE(ssi_clk_sels));
clk[ssi_ext2_sel] = imx_clk_mux("ssi_ext2_sel", MXC_CCM_CSCMR1, 30, 2, ssi_clk_sels, ARRAY_SIZE(ssi_clk_sels));
clk[ssi_ext1_com_sel] = imx_clk_mux("ssi_ext1_com_sel", MXC_CCM_CSCMR1, 0, 1, ssi_ext1_com_sels, ARRAY_SIZE(ssi_ext1_com_sels));
clk[ssi_ext2_com_sel] = imx_clk_mux("ssi_ext2_com_sel", MXC_CCM_CSCMR1, 1, 1, ssi_ext2_com_sels, ARRAY_SIZE(ssi_ext2_com_sels));
clk[ssi1_root_pred] = imx_clk_divider("ssi1_root_pred", "ssi1_root_sel", MXC_CCM_CS1CDR, 6, 3);
clk[ssi1_root_podf] = imx_clk_divider("ssi1_root_podf", "ssi1_root_pred", MXC_CCM_CS1CDR, 0, 6);
clk[ssi2_root_pred] = imx_clk_divider("ssi2_root_pred", "ssi2_root_sel", MXC_CCM_CS2CDR, 6, 3);
clk[ssi2_root_podf] = imx_clk_divider("ssi2_root_podf", "ssi2_root_pred", MXC_CCM_CS2CDR, 0, 6);
clk[ssi_ext1_pred] = imx_clk_divider("ssi_ext1_pred", "ssi_ext1_sel", MXC_CCM_CS1CDR, 22, 3);
clk[ssi_ext1_podf] = imx_clk_divider("ssi_ext1_podf", "ssi_ext1_pred", MXC_CCM_CS1CDR, 16, 6);
clk[ssi_ext2_pred] = imx_clk_divider("ssi_ext2_pred", "ssi_ext2_sel", MXC_CCM_CS2CDR, 22, 3);
clk[ssi_ext2_podf] = imx_clk_divider("ssi_ext2_podf", "ssi_ext2_pred", MXC_CCM_CS2CDR, 16, 6);
clk[ssi1_root_gate] = imx_clk_gate2("ssi1_root_gate", "ssi1_root_podf", MXC_CCM_CCGR3, 18);
clk[ssi2_root_gate] = imx_clk_gate2("ssi2_root_gate", "ssi2_root_podf", MXC_CCM_CCGR3, 22);
clk[ssi3_root_gate] = imx_clk_gate2("ssi3_root_gate", "ssi3_root_sel", MXC_CCM_CCGR3, 26);
clk[ssi_ext1_gate] = imx_clk_gate2("ssi_ext1_gate", "ssi_ext1_com_sel", MXC_CCM_CCGR3, 28);
clk[ssi_ext2_gate] = imx_clk_gate2("ssi_ext2_gate", "ssi_ext2_com_sel", MXC_CCM_CCGR3, 30);
clk[epit1_ipg_gate] = imx_clk_gate2("epit1_ipg_gate", "ipg", MXC_CCM_CCGR2, 2);
clk[epit1_hf_gate] = imx_clk_gate2("epit1_hf_gate", "per_root", MXC_CCM_CCGR2, 4);
clk[epit2_ipg_gate] = imx_clk_gate2("epit2_ipg_gate", "ipg", MXC_CCM_CCGR2, 6);
clk[epit2_hf_gate] = imx_clk_gate2("epit2_hf_gate", "per_root", MXC_CCM_CCGR2, 8);
clk[owire_gate] = imx_clk_gate2("owire_gate", "per_root", MXC_CCM_CCGR2, 22);
clk[srtc_gate] = imx_clk_gate2("srtc_gate", "per_root", MXC_CCM_CCGR4, 28);
clk[pata_gate] = imx_clk_gate2("pata_gate", "ipg", MXC_CCM_CCGR4, 0);
for (i = 0; i < ARRAY_SIZE(clk); i++)
if (IS_ERR(clk[i]))
pr_err("i.MX5 clk %d: register failed with %ld\n",
i, PTR_ERR(clk[i]));
clk_register_clkdev(clk[gpt_hf_gate], "per", "imx-gpt.0");
clk_register_clkdev(clk[gpt_ipg_gate], "ipg", "imx-gpt.0");
clk_register_clkdev(clk[uart1_per_gate], "per", "imx21-uart.0");
clk_register_clkdev(clk[uart1_ipg_gate], "ipg", "imx21-uart.0");
clk_register_clkdev(clk[uart2_per_gate], "per", "imx21-uart.1");
clk_register_clkdev(clk[uart2_ipg_gate], "ipg", "imx21-uart.1");
clk_register_clkdev(clk[uart3_per_gate], "per", "imx21-uart.2");
clk_register_clkdev(clk[uart3_ipg_gate], "ipg", "imx21-uart.2");
clk_register_clkdev(clk[uart4_per_gate], "per", "imx21-uart.3");
clk_register_clkdev(clk[uart4_ipg_gate], "ipg", "imx21-uart.3");
clk_register_clkdev(clk[uart5_per_gate], "per", "imx21-uart.4");
clk_register_clkdev(clk[uart5_ipg_gate], "ipg", "imx21-uart.4");
clk_register_clkdev(clk[ecspi1_per_gate], "per", "imx51-ecspi.0");
clk_register_clkdev(clk[ecspi1_ipg_gate], "ipg", "imx51-ecspi.0");
clk_register_clkdev(clk[ecspi2_per_gate], "per", "imx51-ecspi.1");
clk_register_clkdev(clk[ecspi2_ipg_gate], "ipg", "imx51-ecspi.1");
clk_register_clkdev(clk[cspi_ipg_gate], NULL, "imx35-cspi.2");
clk_register_clkdev(clk[pwm1_ipg_gate], "pwm", "mxc_pwm.0");
clk_register_clkdev(clk[pwm2_ipg_gate], "pwm", "mxc_pwm.1");
clk_register_clkdev(clk[i2c1_gate], NULL, "imx21-i2c.0");
clk_register_clkdev(clk[i2c2_gate], NULL, "imx21-i2c.1");
clk_register_clkdev(clk[usboh3_per_gate], "per", "mxc-ehci.0");
clk_register_clkdev(clk[usboh3_gate], "ipg", "mxc-ehci.0");
clk_register_clkdev(clk[usboh3_gate], "ahb", "mxc-ehci.0");
clk_register_clkdev(clk[usboh3_per_gate], "per", "mxc-ehci.1");
clk_register_clkdev(clk[usboh3_gate], "ipg", "mxc-ehci.1");
clk_register_clkdev(clk[usboh3_gate], "ahb", "mxc-ehci.1");
clk_register_clkdev(clk[usboh3_per_gate], "per", "mxc-ehci.2");
clk_register_clkdev(clk[usboh3_gate], "ipg", "mxc-ehci.2");
clk_register_clkdev(clk[usboh3_gate], "ahb", "mxc-ehci.2");
clk_register_clkdev(clk[usboh3_per_gate], "per", "imx-udc-mx51");
clk_register_clkdev(clk[usboh3_gate], "ipg", "imx-udc-mx51");
clk_register_clkdev(clk[usboh3_gate], "ahb", "imx-udc-mx51");
clk_register_clkdev(clk[nfc_gate], NULL, "imx51-nand");
clk_register_clkdev(clk[ssi1_ipg_gate], NULL, "imx-ssi.0");
clk_register_clkdev(clk[ssi2_ipg_gate], NULL, "imx-ssi.1");
clk_register_clkdev(clk[ssi3_ipg_gate], NULL, "imx-ssi.2");
clk_register_clkdev(clk[ssi_ext1_gate], "ssi_ext1", NULL);
clk_register_clkdev(clk[ssi_ext2_gate], "ssi_ext2", NULL);
clk_register_clkdev(clk[sdma_gate], NULL, "imx35-sdma");
clk_register_clkdev(clk[cpu_podf], NULL, "cpufreq-cpu0.0");
clk_register_clkdev(clk[iim_gate], "iim", NULL);
clk_register_clkdev(clk[dummy], NULL, "imx2-wdt.0");
clk_register_clkdev(clk[dummy], NULL, "imx2-wdt.1");
clk_register_clkdev(clk[dummy], NULL, "imx-keypad");
clk_register_clkdev(clk[ipu_di1_gate], "di1", "imx-tve.0");
clk_register_clkdev(clk[gpc_dvfs], "gpc_dvfs", NULL);
clk_register_clkdev(clk[epit1_ipg_gate], "ipg", "imx-epit.0");
clk_register_clkdev(clk[epit1_hf_gate], "per", "imx-epit.0");
clk_register_clkdev(clk[epit2_ipg_gate], "ipg", "imx-epit.1");
clk_register_clkdev(clk[epit2_hf_gate], "per", "imx-epit.1");
/* Set SDHC parents to be PLL2 */
clk_set_parent(clk[esdhc_a_sel], clk[pll2_sw]);
clk_set_parent(clk[esdhc_b_sel], clk[pll2_sw]);
/* move usb phy clk to 24MHz */
clk_set_parent(clk[usb_phy_sel], clk[osc]);
clk_prepare_enable(clk[gpc_dvfs]);
clk_prepare_enable(clk[ahb_max]); /* esdhc3 */
clk_prepare_enable(clk[aips_tz1]);
clk_prepare_enable(clk[aips_tz2]); /* fec */
clk_prepare_enable(clk[spba]);
clk_prepare_enable(clk[emi_fast_gate]); /* fec */
clk_prepare_enable(clk[emi_slow_gate]); /* eim */
clk_prepare_enable(clk[mipi_hsc1_gate]);
clk_prepare_enable(clk[mipi_hsc2_gate]);
clk_prepare_enable(clk[mipi_esc_gate]);
clk_prepare_enable(clk[mipi_hsp_gate]);
clk_prepare_enable(clk[tmax1]);
clk_prepare_enable(clk[tmax2]); /* esdhc2, fec */
clk_prepare_enable(clk[tmax3]); /* esdhc1, esdhc4 */
}
int __init mx51_clocks_init(unsigned long rate_ckil, unsigned long rate_osc,
unsigned long rate_ckih1, unsigned long rate_ckih2)
{
int i;
u32 val;
struct device_node *np;
clk[pll1_sw] = imx_clk_pllv2("pll1_sw", "osc", MX51_DPLL1_BASE);
clk[pll2_sw] = imx_clk_pllv2("pll2_sw", "osc", MX51_DPLL2_BASE);
clk[pll3_sw] = imx_clk_pllv2("pll3_sw", "osc", MX51_DPLL3_BASE);
clk[ipu_di0_sel] = imx_clk_mux("ipu_di0_sel", MXC_CCM_CSCMR2, 26, 3,
mx51_ipu_di0_sel, ARRAY_SIZE(mx51_ipu_di0_sel));
clk[ipu_di1_sel] = imx_clk_mux("ipu_di1_sel", MXC_CCM_CSCMR2, 29, 3,
mx51_ipu_di1_sel, ARRAY_SIZE(mx51_ipu_di1_sel));
clk[tve_ext_sel] = imx_clk_mux_flags("tve_ext_sel", MXC_CCM_CSCMR1, 6, 1,
mx51_tve_ext_sel, ARRAY_SIZE(mx51_tve_ext_sel), CLK_SET_RATE_PARENT);
clk[tve_s] = imx_clk_mux("tve_sel", MXC_CCM_CSCMR1, 7, 1,
mx51_tve_sel, ARRAY_SIZE(mx51_tve_sel));
clk[tve_gate] = imx_clk_gate2("tve_gate", "tve_sel", MXC_CCM_CCGR2, 30);
clk[tve_pred] = imx_clk_divider("tve_pred", "pll3_sw", MXC_CCM_CDCDR, 28, 3);
clk[esdhc1_per_gate] = imx_clk_gate2("esdhc1_per_gate", "esdhc_a_podf", MXC_CCM_CCGR3, 2);
clk[esdhc2_per_gate] = imx_clk_gate2("esdhc2_per_gate", "esdhc_b_podf", MXC_CCM_CCGR3, 6);
clk[esdhc3_per_gate] = imx_clk_gate2("esdhc3_per_gate", "esdhc_c_sel", MXC_CCM_CCGR3, 10);
clk[esdhc4_per_gate] = imx_clk_gate2("esdhc4_per_gate", "esdhc_d_sel", MXC_CCM_CCGR3, 14);
clk[usb_phy_gate] = imx_clk_gate2("usb_phy_gate", "usb_phy_sel", MXC_CCM_CCGR2, 0);
clk[hsi2c_gate] = imx_clk_gate2("hsi2c_gate", "ipg", MXC_CCM_CCGR1, 22);
clk[mipi_hsc1_gate] = imx_clk_gate2("mipi_hsc1_gate", "ipg", MXC_CCM_CCGR4, 6);
clk[mipi_hsc2_gate] = imx_clk_gate2("mipi_hsc2_gate", "ipg", MXC_CCM_CCGR4, 8);
clk[mipi_esc_gate] = imx_clk_gate2("mipi_esc_gate", "ipg", MXC_CCM_CCGR4, 10);
clk[mipi_hsp_gate] = imx_clk_gate2("mipi_hsp_gate", "ipg", MXC_CCM_CCGR4, 12);
for (i = 0; i < ARRAY_SIZE(clk); i++)
if (IS_ERR(clk[i]))
pr_err("i.MX51 clk %d: register failed with %ld\n",
i, PTR_ERR(clk[i]));
np = of_find_compatible_node(NULL, NULL, "fsl,imx51-ccm");
clk_data.clks = clk;
clk_data.clk_num = ARRAY_SIZE(clk);
of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
mx5_clocks_common_init(rate_ckil, rate_osc, rate_ckih1, rate_ckih2);
clk_register_clkdev(clk[hsi2c_gate], NULL, "imx21-i2c.2");
clk_register_clkdev(clk[mx51_mipi], "mipi_hsp", NULL);
clk_register_clkdev(clk[vpu_gate], NULL, "imx51-vpu.0");
clk_register_clkdev(clk[fec_gate], NULL, "imx27-fec.0");
clk_register_clkdev(clk[usb_phy_gate], "phy", "mxc-ehci.0");
clk_register_clkdev(clk[esdhc1_ipg_gate], "ipg", "sdhci-esdhc-imx51.0");
clk_register_clkdev(clk[dummy], "ahb", "sdhci-esdhc-imx51.0");
clk_register_clkdev(clk[esdhc1_per_gate], "per", "sdhci-esdhc-imx51.0");
clk_register_clkdev(clk[esdhc2_ipg_gate], "ipg", "sdhci-esdhc-imx51.1");
clk_register_clkdev(clk[dummy], "ahb", "sdhci-esdhc-imx51.1");
clk_register_clkdev(clk[esdhc2_per_gate], "per", "sdhci-esdhc-imx51.1");
clk_register_clkdev(clk[esdhc3_ipg_gate], "ipg", "sdhci-esdhc-imx51.2");
clk_register_clkdev(clk[dummy], "ahb", "sdhci-esdhc-imx51.2");
clk_register_clkdev(clk[esdhc3_per_gate], "per", "sdhci-esdhc-imx51.2");
clk_register_clkdev(clk[esdhc4_ipg_gate], "ipg", "sdhci-esdhc-imx51.3");
clk_register_clkdev(clk[dummy], "ahb", "sdhci-esdhc-imx51.3");
clk_register_clkdev(clk[esdhc4_per_gate], "per", "sdhci-esdhc-imx51.3");
/* set the usboh3 parent to pll2_sw */
clk_set_parent(clk[usboh3_sel], clk[pll2_sw]);
/* set SDHC root clock to 166.25MHZ*/
clk_set_rate(clk[esdhc_a_podf], 166250000);
clk_set_rate(clk[esdhc_b_podf], 166250000);
/* System timer */
mxc_timer_init(MX51_IO_ADDRESS(MX51_GPT1_BASE_ADDR), MX51_INT_GPT);
clk_prepare_enable(clk[iim_gate]);
imx_print_silicon_rev("i.MX51", mx51_revision());
clk_disable_unprepare(clk[iim_gate]);
/*
* Reference Manual says: Functionality of CCDR[18] and CLPCR[23] is no
* longer supported. Set to one for better power saving.
*
* The effect of not setting these bits is that MIPI clocks can't be
* enabled without the IPU clock being enabled aswell.
*/
val = readl(MXC_CCM_CCDR);
val |= 1 << 18;
writel(val, MXC_CCM_CCDR);
val = readl(MXC_CCM_CLPCR);
val |= 1 << 23;
writel(val, MXC_CCM_CLPCR);
return 0;
}
int __init mx53_clocks_init(unsigned long rate_ckil, unsigned long rate_osc,
unsigned long rate_ckih1, unsigned long rate_ckih2)
{
int i;
unsigned long r;
struct device_node *np;
clk[pll1_sw] = imx_clk_pllv2("pll1_sw", "osc", MX53_DPLL1_BASE);
clk[pll2_sw] = imx_clk_pllv2("pll2_sw", "osc", MX53_DPLL2_BASE);
clk[pll3_sw] = imx_clk_pllv2("pll3_sw", "osc", MX53_DPLL3_BASE);
clk[pll4_sw] = imx_clk_pllv2("pll4_sw", "osc", MX53_DPLL4_BASE);
clk[ldb_di1_div_3_5] = imx_clk_fixed_factor("ldb_di1_div_3_5", "ldb_di1_sel", 2, 7);
clk[ldb_di1_div] = imx_clk_divider_flags("ldb_di1_div", "ldb_di1_div_3_5", MXC_CCM_CSCMR2, 11, 1, 0);
clk[ldb_di1_sel] = imx_clk_mux_flags("ldb_di1_sel", MXC_CCM_CSCMR2, 9, 1,
mx53_ldb_di1_sel, ARRAY_SIZE(mx53_ldb_di1_sel), CLK_SET_RATE_PARENT);
clk[di_pll4_podf] = imx_clk_divider("di_pll4_podf", "pll4_sw", MXC_CCM_CDCDR, 16, 3);
clk[ldb_di0_div_3_5] = imx_clk_fixed_factor("ldb_di0_div_3_5", "ldb_di0_sel", 2, 7);
clk[ldb_di0_div] = imx_clk_divider_flags("ldb_di0_div", "ldb_di0_div_3_5", MXC_CCM_CSCMR2, 10, 1, 0);
clk[ldb_di0_sel] = imx_clk_mux_flags("ldb_di0_sel", MXC_CCM_CSCMR2, 8, 1,
mx53_ldb_di0_sel, ARRAY_SIZE(mx53_ldb_di0_sel), CLK_SET_RATE_PARENT);
clk[ldb_di0_gate] = imx_clk_gate2("ldb_di0_gate", "ldb_di0_div", MXC_CCM_CCGR6, 28);
clk[ldb_di1_gate] = imx_clk_gate2("ldb_di1_gate", "ldb_di1_div", MXC_CCM_CCGR6, 30);
clk[ipu_di0_sel] = imx_clk_mux("ipu_di0_sel", MXC_CCM_CSCMR2, 26, 3,
mx53_ipu_di0_sel, ARRAY_SIZE(mx53_ipu_di0_sel));
clk[ipu_di1_sel] = imx_clk_mux("ipu_di1_sel", MXC_CCM_CSCMR2, 29, 3,
mx53_ipu_di1_sel, ARRAY_SIZE(mx53_ipu_di1_sel));
clk[tve_ext_sel] = imx_clk_mux_flags("tve_ext_sel", MXC_CCM_CSCMR1, 6, 1,
mx53_tve_ext_sel, ARRAY_SIZE(mx53_tve_ext_sel), CLK_SET_RATE_PARENT);
clk[tve_gate] = imx_clk_gate2("tve_gate", "tve_pred", MXC_CCM_CCGR2, 30);
clk[tve_pred] = imx_clk_divider("tve_pred", "tve_ext_sel", MXC_CCM_CDCDR, 28, 3);
clk[esdhc1_per_gate] = imx_clk_gate2("esdhc1_per_gate", "esdhc_a_podf", MXC_CCM_CCGR3, 2);
clk[esdhc2_per_gate] = imx_clk_gate2("esdhc2_per_gate", "esdhc_c_sel", MXC_CCM_CCGR3, 6);
clk[esdhc3_per_gate] = imx_clk_gate2("esdhc3_per_gate", "esdhc_b_podf", MXC_CCM_CCGR3, 10);
clk[esdhc4_per_gate] = imx_clk_gate2("esdhc4_per_gate", "esdhc_d_sel", MXC_CCM_CCGR3, 14);
clk[usb_phy1_gate] = imx_clk_gate2("usb_phy1_gate", "usb_phy_sel", MXC_CCM_CCGR4, 10);
clk[usb_phy2_gate] = imx_clk_gate2("usb_phy2_gate", "usb_phy_sel", MXC_CCM_CCGR4, 12);
clk[can_sel] = imx_clk_mux("can_sel", MXC_CCM_CSCMR2, 6, 2,
mx53_can_sel, ARRAY_SIZE(mx53_can_sel));
clk[can1_serial_gate] = imx_clk_gate2("can1_serial_gate", "can_sel", MXC_CCM_CCGR6, 22);
clk[can1_ipg_gate] = imx_clk_gate2("can1_ipg_gate", "ipg", MXC_CCM_CCGR6, 20);
clk[can2_serial_gate] = imx_clk_gate2("can2_serial_gate", "can_sel", MXC_CCM_CCGR4, 8);
clk[can2_ipg_gate] = imx_clk_gate2("can2_ipg_gate", "ipg", MXC_CCM_CCGR4, 6);
clk[i2c3_gate] = imx_clk_gate2("i2c3_gate", "per_root", MXC_CCM_CCGR1, 22);
clk[cko1_sel] = imx_clk_mux("cko1_sel", MXC_CCM_CCOSR, 0, 4,
mx53_cko1_sel, ARRAY_SIZE(mx53_cko1_sel));
clk[cko1_podf] = imx_clk_divider("cko1_podf", "cko1_sel", MXC_CCM_CCOSR, 4, 3);
clk[cko1] = imx_clk_gate2("cko1", "cko1_podf", MXC_CCM_CCOSR, 7);
clk[cko2_sel] = imx_clk_mux("cko2_sel", MXC_CCM_CCOSR, 16, 5,
mx53_cko2_sel, ARRAY_SIZE(mx53_cko2_sel));
clk[cko2_podf] = imx_clk_divider("cko2_podf", "cko2_sel", MXC_CCM_CCOSR, 21, 3);
clk[cko2] = imx_clk_gate2("cko2", "cko2_podf", MXC_CCM_CCOSR, 24);
for (i = 0; i < ARRAY_SIZE(clk); i++)
if (IS_ERR(clk[i]))
pr_err("i.MX53 clk %d: register failed with %ld\n",
i, PTR_ERR(clk[i]));
np = of_find_compatible_node(NULL, NULL, "fsl,imx53-ccm");
clk_data.clks = clk;
clk_data.clk_num = ARRAY_SIZE(clk);
of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
mx5_clocks_common_init(rate_ckil, rate_osc, rate_ckih1, rate_ckih2);
clk_register_clkdev(clk[vpu_gate], NULL, "imx53-vpu.0");
clk_register_clkdev(clk[i2c3_gate], NULL, "imx21-i2c.2");
clk_register_clkdev(clk[fec_gate], NULL, "imx25-fec.0");
clk_register_clkdev(clk[usb_phy1_gate], "usb_phy1", "mxc-ehci.0");
clk_register_clkdev(clk[esdhc1_ipg_gate], "ipg", "sdhci-esdhc-imx53.0");
clk_register_clkdev(clk[dummy], "ahb", "sdhci-esdhc-imx53.0");
clk_register_clkdev(clk[esdhc1_per_gate], "per", "sdhci-esdhc-imx53.0");
clk_register_clkdev(clk[esdhc2_ipg_gate], "ipg", "sdhci-esdhc-imx53.1");
clk_register_clkdev(clk[dummy], "ahb", "sdhci-esdhc-imx53.1");
clk_register_clkdev(clk[esdhc2_per_gate], "per", "sdhci-esdhc-imx53.1");
clk_register_clkdev(clk[esdhc3_ipg_gate], "ipg", "sdhci-esdhc-imx53.2");
clk_register_clkdev(clk[dummy], "ahb", "sdhci-esdhc-imx53.2");
clk_register_clkdev(clk[esdhc3_per_gate], "per", "sdhci-esdhc-imx53.2");
clk_register_clkdev(clk[esdhc4_ipg_gate], "ipg", "sdhci-esdhc-imx53.3");
clk_register_clkdev(clk[dummy], "ahb", "sdhci-esdhc-imx53.3");
clk_register_clkdev(clk[esdhc4_per_gate], "per", "sdhci-esdhc-imx53.3");
/* set SDHC root clock to 200MHZ*/
clk_set_rate(clk[esdhc_a_podf], 200000000);
clk_set_rate(clk[esdhc_b_podf], 200000000);
/* System timer */
mxc_timer_init(MX53_IO_ADDRESS(MX53_GPT1_BASE_ADDR), MX53_INT_GPT);
clk_prepare_enable(clk[iim_gate]);
imx_print_silicon_rev("i.MX53", mx53_revision());
clk_disable_unprepare(clk[iim_gate]);
r = clk_round_rate(clk[usboh3_per_gate], 54000000);
clk_set_rate(clk[usboh3_per_gate], r);
return 0;
}
#ifdef CONFIG_OF
static void __init clk_get_freq_dt(unsigned long *ckil, unsigned long *osc,
unsigned long *ckih1, unsigned long *ckih2)
{
struct device_node *np;
/* retrieve the freqency of fixed clocks from device tree */
for_each_compatible_node(np, NULL, "fixed-clock") {
u32 rate;
if (of_property_read_u32(np, "clock-frequency", &rate))
continue;
if (of_device_is_compatible(np, "fsl,imx-ckil"))
*ckil = rate;
else if (of_device_is_compatible(np, "fsl,imx-osc"))
*osc = rate;
else if (of_device_is_compatible(np, "fsl,imx-ckih1"))
*ckih1 = rate;
else if (of_device_is_compatible(np, "fsl,imx-ckih2"))
*ckih2 = rate;
}
}
int __init mx51_clocks_init_dt(void)
{
unsigned long ckil, osc, ckih1, ckih2;
clk_get_freq_dt(&ckil, &osc, &ckih1, &ckih2);
return mx51_clocks_init(ckil, osc, ckih1, ckih2);
}
int __init mx53_clocks_init_dt(void)
{
unsigned long ckil, osc, ckih1, ckih2;
clk_get_freq_dt(&ckil, &osc, &ckih1, &ckih2);
return mx53_clocks_init(ckil, osc, ckih1, ckih2);
}
#endif
| gpl-2.0 |
asopov/linux-tpt-2.6.39 | drivers/usb/host/fsl-mph-dr-of.c | 3026 | 8182 | /*
* Setup platform devices needed by the Freescale multi-port host
* and/or dual-role USB controller modules based on the description
* in flat device tree.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/fsl_devices.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/of_platform.h>
#include <linux/clk.h>
struct fsl_usb2_dev_data {
char *dr_mode; /* controller mode */
char *drivers[3]; /* drivers to instantiate for this mode */
enum fsl_usb2_operating_modes op_mode; /* operating mode */
};
struct fsl_usb2_dev_data dr_mode_data[] __devinitdata = {
{
.dr_mode = "host",
.drivers = { "fsl-ehci", NULL, NULL, },
.op_mode = FSL_USB2_DR_HOST,
},
{
.dr_mode = "otg",
.drivers = { "fsl-usb2-otg", "fsl-ehci", "fsl-usb2-udc", },
.op_mode = FSL_USB2_DR_OTG,
},
{
.dr_mode = "peripheral",
.drivers = { "fsl-usb2-udc", NULL, NULL, },
.op_mode = FSL_USB2_DR_DEVICE,
},
};
struct fsl_usb2_dev_data * __devinit get_dr_mode_data(struct device_node *np)
{
const unsigned char *prop;
int i;
prop = of_get_property(np, "dr_mode", NULL);
if (prop) {
for (i = 0; i < ARRAY_SIZE(dr_mode_data); i++) {
if (!strcmp(prop, dr_mode_data[i].dr_mode))
return &dr_mode_data[i];
}
}
pr_warn("%s: Invalid 'dr_mode' property, fallback to host mode\n",
np->full_name);
return &dr_mode_data[0]; /* mode not specified, use host */
}
static enum fsl_usb2_phy_modes __devinit determine_usb_phy(const char *phy_type)
{
if (!phy_type)
return FSL_USB2_PHY_NONE;
if (!strcasecmp(phy_type, "ulpi"))
return FSL_USB2_PHY_ULPI;
if (!strcasecmp(phy_type, "utmi"))
return FSL_USB2_PHY_UTMI;
if (!strcasecmp(phy_type, "utmi_wide"))
return FSL_USB2_PHY_UTMI_WIDE;
if (!strcasecmp(phy_type, "serial"))
return FSL_USB2_PHY_SERIAL;
return FSL_USB2_PHY_NONE;
}
struct platform_device * __devinit fsl_usb2_device_register(
struct platform_device *ofdev,
struct fsl_usb2_platform_data *pdata,
const char *name, int id)
{
struct platform_device *pdev;
const struct resource *res = ofdev->resource;
unsigned int num = ofdev->num_resources;
int retval;
pdev = platform_device_alloc(name, id);
if (!pdev) {
retval = -ENOMEM;
goto error;
}
pdev->dev.parent = &ofdev->dev;
pdev->dev.coherent_dma_mask = ofdev->dev.coherent_dma_mask;
pdev->dev.dma_mask = &pdev->archdata.dma_mask;
*pdev->dev.dma_mask = *ofdev->dev.dma_mask;
retval = platform_device_add_data(pdev, pdata, sizeof(*pdata));
if (retval)
goto error;
if (num) {
retval = platform_device_add_resources(pdev, res, num);
if (retval)
goto error;
}
retval = platform_device_add(pdev);
if (retval)
goto error;
return pdev;
error:
platform_device_put(pdev);
return ERR_PTR(retval);
}
static const struct of_device_id fsl_usb2_mph_dr_of_match[];
static int __devinit fsl_usb2_mph_dr_of_probe(struct platform_device *ofdev)
{
struct device_node *np = ofdev->dev.of_node;
struct platform_device *usb_dev;
struct fsl_usb2_platform_data data, *pdata;
struct fsl_usb2_dev_data *dev_data;
const struct of_device_id *match;
const unsigned char *prop;
static unsigned int idx;
int i;
if (!of_device_is_available(np))
return -ENODEV;
match = of_match_device(fsl_usb2_mph_dr_of_match, &ofdev->dev);
if (!match)
return -ENODEV;
pdata = &data;
if (match->data)
memcpy(pdata, match->data, sizeof(data));
else
memset(pdata, 0, sizeof(data));
dev_data = get_dr_mode_data(np);
if (of_device_is_compatible(np, "fsl-usb2-mph")) {
if (of_get_property(np, "port0", NULL))
pdata->port_enables |= FSL_USB2_PORT0_ENABLED;
if (of_get_property(np, "port1", NULL))
pdata->port_enables |= FSL_USB2_PORT1_ENABLED;
pdata->operating_mode = FSL_USB2_MPH_HOST;
} else {
if (of_get_property(np, "fsl,invert-drvvbus", NULL))
pdata->invert_drvvbus = 1;
if (of_get_property(np, "fsl,invert-pwr-fault", NULL))
pdata->invert_pwr_fault = 1;
/* setup mode selected in the device tree */
pdata->operating_mode = dev_data->op_mode;
}
prop = of_get_property(np, "phy_type", NULL);
pdata->phy_mode = determine_usb_phy(prop);
for (i = 0; i < ARRAY_SIZE(dev_data->drivers); i++) {
if (!dev_data->drivers[i])
continue;
usb_dev = fsl_usb2_device_register(ofdev, pdata,
dev_data->drivers[i], idx);
if (IS_ERR(usb_dev)) {
dev_err(&ofdev->dev, "Can't register usb device\n");
return PTR_ERR(usb_dev);
}
}
idx++;
return 0;
}
static int __devexit __unregister_subdev(struct device *dev, void *d)
{
platform_device_unregister(to_platform_device(dev));
return 0;
}
static int __devexit fsl_usb2_mph_dr_of_remove(struct platform_device *ofdev)
{
device_for_each_child(&ofdev->dev, NULL, __unregister_subdev);
return 0;
}
#ifdef CONFIG_PPC_MPC512x
#define USBGENCTRL 0x200 /* NOTE: big endian */
#define GC_WU_INT_CLR (1 << 5) /* Wakeup int clear */
#define GC_ULPI_SEL (1 << 4) /* ULPI i/f select (usb0 only)*/
#define GC_PPP (1 << 3) /* Inv. Port Power Polarity */
#define GC_PFP (1 << 2) /* Inv. Power Fault Polarity */
#define GC_WU_ULPI_EN (1 << 1) /* Wakeup on ULPI event */
#define GC_WU_IE (1 << 1) /* Wakeup interrupt enable */
#define ISIPHYCTRL 0x204 /* NOTE: big endian */
#define PHYCTRL_PHYE (1 << 4) /* On-chip UTMI PHY enable */
#define PHYCTRL_BSENH (1 << 3) /* Bit Stuff Enable High */
#define PHYCTRL_BSEN (1 << 2) /* Bit Stuff Enable */
#define PHYCTRL_LSFE (1 << 1) /* Line State Filter Enable */
#define PHYCTRL_PXE (1 << 0) /* PHY oscillator enable */
int fsl_usb2_mpc5121_init(struct platform_device *pdev)
{
struct fsl_usb2_platform_data *pdata = pdev->dev.platform_data;
struct clk *clk;
char clk_name[10];
int base, clk_num;
base = pdev->resource->start & 0xf000;
if (base == 0x3000)
clk_num = 1;
else if (base == 0x4000)
clk_num = 2;
else
return -ENODEV;
snprintf(clk_name, sizeof(clk_name), "usb%d_clk", clk_num);
clk = clk_get(&pdev->dev, clk_name);
if (IS_ERR(clk)) {
dev_err(&pdev->dev, "failed to get clk\n");
return PTR_ERR(clk);
}
clk_enable(clk);
pdata->clk = clk;
if (pdata->phy_mode == FSL_USB2_PHY_UTMI_WIDE) {
u32 reg = 0;
if (pdata->invert_drvvbus)
reg |= GC_PPP;
if (pdata->invert_pwr_fault)
reg |= GC_PFP;
out_be32(pdata->regs + ISIPHYCTRL, PHYCTRL_PHYE | PHYCTRL_PXE);
out_be32(pdata->regs + USBGENCTRL, reg);
}
return 0;
}
static void fsl_usb2_mpc5121_exit(struct platform_device *pdev)
{
struct fsl_usb2_platform_data *pdata = pdev->dev.platform_data;
pdata->regs = NULL;
if (pdata->clk) {
clk_disable(pdata->clk);
clk_put(pdata->clk);
}
}
static struct fsl_usb2_platform_data fsl_usb2_mpc5121_pd = {
.big_endian_desc = 1,
.big_endian_mmio = 1,
.es = 1,
.have_sysif_regs = 0,
.le_setup_buf = 1,
.init = fsl_usb2_mpc5121_init,
.exit = fsl_usb2_mpc5121_exit,
};
#endif /* CONFIG_PPC_MPC512x */
static struct fsl_usb2_platform_data fsl_usb2_mpc8xxx_pd = {
.have_sysif_regs = 1,
};
static const struct of_device_id fsl_usb2_mph_dr_of_match[] = {
{ .compatible = "fsl-usb2-mph", .data = &fsl_usb2_mpc8xxx_pd, },
{ .compatible = "fsl-usb2-dr", .data = &fsl_usb2_mpc8xxx_pd, },
#ifdef CONFIG_PPC_MPC512x
{ .compatible = "fsl,mpc5121-usb2-dr", .data = &fsl_usb2_mpc5121_pd, },
#endif
{},
};
static struct platform_driver fsl_usb2_mph_dr_driver = {
.driver = {
.name = "fsl-usb2-mph-dr",
.owner = THIS_MODULE,
.of_match_table = fsl_usb2_mph_dr_of_match,
},
.probe = fsl_usb2_mph_dr_of_probe,
.remove = __devexit_p(fsl_usb2_mph_dr_of_remove),
};
static int __init fsl_usb2_mph_dr_init(void)
{
return platform_driver_register(&fsl_usb2_mph_dr_driver);
}
module_init(fsl_usb2_mph_dr_init);
static void __exit fsl_usb2_mph_dr_exit(void)
{
platform_driver_unregister(&fsl_usb2_mph_dr_driver);
}
module_exit(fsl_usb2_mph_dr_exit);
MODULE_DESCRIPTION("FSL MPH DR OF devices driver");
MODULE_AUTHOR("Anatolij Gustschin <agust@denx.de>");
MODULE_LICENSE("GPL");
| gpl-2.0 |
shimabde/android_kernel_samsung_galaxys2plus-common | net/x25/x25_proc.c | 4050 | 6463 | /*
* X.25 Packet Layer release 002
*
* This is ALPHA test software. This code may break your machine,
* randomly fail to work with new releases, misbehave and/or generally
* screw up. It might even work.
*
* This code REQUIRES 2.4 with seq_file support
*
* This module:
* This module is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* History
* 2002/10/06 Arnaldo Carvalho de Melo seq_file support
*/
#include <linux/init.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <net/net_namespace.h>
#include <net/sock.h>
#include <net/x25.h>
#ifdef CONFIG_PROC_FS
static void *x25_seq_route_start(struct seq_file *seq, loff_t *pos)
__acquires(x25_route_list_lock)
{
read_lock_bh(&x25_route_list_lock);
return seq_list_start_head(&x25_route_list, *pos);
}
static void *x25_seq_route_next(struct seq_file *seq, void *v, loff_t *pos)
{
return seq_list_next(v, &x25_route_list, pos);
}
static void x25_seq_route_stop(struct seq_file *seq, void *v)
__releases(x25_route_list_lock)
{
read_unlock_bh(&x25_route_list_lock);
}
static int x25_seq_route_show(struct seq_file *seq, void *v)
{
struct x25_route *rt = list_entry(v, struct x25_route, node);
if (v == &x25_route_list) {
seq_puts(seq, "Address Digits Device\n");
goto out;
}
rt = v;
seq_printf(seq, "%-15s %-6d %-5s\n",
rt->address.x25_addr, rt->sigdigits,
rt->dev ? rt->dev->name : "???");
out:
return 0;
}
static void *x25_seq_socket_start(struct seq_file *seq, loff_t *pos)
__acquires(x25_list_lock)
{
read_lock_bh(&x25_list_lock);
return seq_hlist_start_head(&x25_list, *pos);
}
static void *x25_seq_socket_next(struct seq_file *seq, void *v, loff_t *pos)
{
return seq_hlist_next(v, &x25_list, pos);
}
static void x25_seq_socket_stop(struct seq_file *seq, void *v)
__releases(x25_list_lock)
{
read_unlock_bh(&x25_list_lock);
}
static int x25_seq_socket_show(struct seq_file *seq, void *v)
{
struct sock *s;
struct x25_sock *x25;
struct net_device *dev;
const char *devname;
if (v == SEQ_START_TOKEN) {
seq_printf(seq, "dest_addr src_addr dev lci st vs vr "
"va t t2 t21 t22 t23 Snd-Q Rcv-Q inode\n");
goto out;
}
s = sk_entry(v);
x25 = x25_sk(s);
if (!x25->neighbour || (dev = x25->neighbour->dev) == NULL)
devname = "???";
else
devname = x25->neighbour->dev->name;
seq_printf(seq, "%-10s %-10s %-5s %3.3X %d %d %d %d %3lu %3lu "
"%3lu %3lu %3lu %5d %5d %ld\n",
!x25->dest_addr.x25_addr[0] ? "*" : x25->dest_addr.x25_addr,
!x25->source_addr.x25_addr[0] ? "*" : x25->source_addr.x25_addr,
devname, x25->lci & 0x0FFF, x25->state, x25->vs, x25->vr,
x25->va, x25_display_timer(s) / HZ, x25->t2 / HZ,
x25->t21 / HZ, x25->t22 / HZ, x25->t23 / HZ,
sk_wmem_alloc_get(s),
sk_rmem_alloc_get(s),
s->sk_socket ? SOCK_INODE(s->sk_socket)->i_ino : 0L);
out:
return 0;
}
static void *x25_seq_forward_start(struct seq_file *seq, loff_t *pos)
__acquires(x25_forward_list_lock)
{
read_lock_bh(&x25_forward_list_lock);
return seq_list_start_head(&x25_forward_list, *pos);
}
static void *x25_seq_forward_next(struct seq_file *seq, void *v, loff_t *pos)
{
return seq_list_next(v, &x25_forward_list, pos);
}
static void x25_seq_forward_stop(struct seq_file *seq, void *v)
__releases(x25_forward_list_lock)
{
read_unlock_bh(&x25_forward_list_lock);
}
static int x25_seq_forward_show(struct seq_file *seq, void *v)
{
struct x25_forward *f = list_entry(v, struct x25_forward, node);
if (v == &x25_forward_list) {
seq_printf(seq, "lci dev1 dev2\n");
goto out;
}
f = v;
seq_printf(seq, "%d %-10s %-10s\n",
f->lci, f->dev1->name, f->dev2->name);
out:
return 0;
}
static const struct seq_operations x25_seq_route_ops = {
.start = x25_seq_route_start,
.next = x25_seq_route_next,
.stop = x25_seq_route_stop,
.show = x25_seq_route_show,
};
static const struct seq_operations x25_seq_socket_ops = {
.start = x25_seq_socket_start,
.next = x25_seq_socket_next,
.stop = x25_seq_socket_stop,
.show = x25_seq_socket_show,
};
static const struct seq_operations x25_seq_forward_ops = {
.start = x25_seq_forward_start,
.next = x25_seq_forward_next,
.stop = x25_seq_forward_stop,
.show = x25_seq_forward_show,
};
static int x25_seq_socket_open(struct inode *inode, struct file *file)
{
return seq_open(file, &x25_seq_socket_ops);
}
static int x25_seq_route_open(struct inode *inode, struct file *file)
{
return seq_open(file, &x25_seq_route_ops);
}
static int x25_seq_forward_open(struct inode *inode, struct file *file)
{
return seq_open(file, &x25_seq_forward_ops);
}
static const struct file_operations x25_seq_socket_fops = {
.owner = THIS_MODULE,
.open = x25_seq_socket_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
static const struct file_operations x25_seq_route_fops = {
.owner = THIS_MODULE,
.open = x25_seq_route_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
static const struct file_operations x25_seq_forward_fops = {
.owner = THIS_MODULE,
.open = x25_seq_forward_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
static struct proc_dir_entry *x25_proc_dir;
int __init x25_proc_init(void)
{
struct proc_dir_entry *p;
int rc = -ENOMEM;
x25_proc_dir = proc_mkdir("x25", init_net.proc_net);
if (!x25_proc_dir)
goto out;
p = proc_create("route", S_IRUGO, x25_proc_dir, &x25_seq_route_fops);
if (!p)
goto out_route;
p = proc_create("socket", S_IRUGO, x25_proc_dir, &x25_seq_socket_fops);
if (!p)
goto out_socket;
p = proc_create("forward", S_IRUGO, x25_proc_dir,
&x25_seq_forward_fops);
if (!p)
goto out_forward;
rc = 0;
out:
return rc;
out_forward:
remove_proc_entry("socket", x25_proc_dir);
out_socket:
remove_proc_entry("route", x25_proc_dir);
out_route:
remove_proc_entry("x25", init_net.proc_net);
goto out;
}
void __exit x25_proc_exit(void)
{
remove_proc_entry("forward", x25_proc_dir);
remove_proc_entry("route", x25_proc_dir);
remove_proc_entry("socket", x25_proc_dir);
remove_proc_entry("x25", init_net.proc_net);
}
#else /* CONFIG_PROC_FS */
int __init x25_proc_init(void)
{
return 0;
}
void __exit x25_proc_exit(void)
{
}
#endif /* CONFIG_PROC_FS */
| gpl-2.0 |
qdk0901/kernel-exynos5410 | drivers/net/wireless/rtlwifi/rtl8192de/dm.c | 4818 | 47624 | /******************************************************************************
*
* Copyright(c) 2009-2012 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
*
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
* Hsinchu 300, Taiwan.
*
* Larry Finger <Larry.Finger@lwfinger.net>
*
*****************************************************************************/
#include "../wifi.h"
#include "../base.h"
#include "reg.h"
#include "def.h"
#include "phy.h"
#include "dm.h"
#include "fw.h"
#define UNDEC_SM_PWDB entry_min_undecoratedsmoothed_pwdb
struct dig_t de_digtable;
static const u32 ofdmswing_table[OFDM_TABLE_SIZE_92D] = {
0x7f8001fe, /* 0, +6.0dB */
0x788001e2, /* 1, +5.5dB */
0x71c001c7, /* 2, +5.0dB */
0x6b8001ae, /* 3, +4.5dB */
0x65400195, /* 4, +4.0dB */
0x5fc0017f, /* 5, +3.5dB */
0x5a400169, /* 6, +3.0dB */
0x55400155, /* 7, +2.5dB */
0x50800142, /* 8, +2.0dB */
0x4c000130, /* 9, +1.5dB */
0x47c0011f, /* 10, +1.0dB */
0x43c0010f, /* 11, +0.5dB */
0x40000100, /* 12, +0dB */
0x3c8000f2, /* 13, -0.5dB */
0x390000e4, /* 14, -1.0dB */
0x35c000d7, /* 15, -1.5dB */
0x32c000cb, /* 16, -2.0dB */
0x300000c0, /* 17, -2.5dB */
0x2d4000b5, /* 18, -3.0dB */
0x2ac000ab, /* 19, -3.5dB */
0x288000a2, /* 20, -4.0dB */
0x26000098, /* 21, -4.5dB */
0x24000090, /* 22, -5.0dB */
0x22000088, /* 23, -5.5dB */
0x20000080, /* 24, -6.0dB */
0x1e400079, /* 25, -6.5dB */
0x1c800072, /* 26, -7.0dB */
0x1b00006c, /* 27. -7.5dB */
0x19800066, /* 28, -8.0dB */
0x18000060, /* 29, -8.5dB */
0x16c0005b, /* 30, -9.0dB */
0x15800056, /* 31, -9.5dB */
0x14400051, /* 32, -10.0dB */
0x1300004c, /* 33, -10.5dB */
0x12000048, /* 34, -11.0dB */
0x11000044, /* 35, -11.5dB */
0x10000040, /* 36, -12.0dB */
0x0f00003c, /* 37, -12.5dB */
0x0e400039, /* 38, -13.0dB */
0x0d800036, /* 39, -13.5dB */
0x0cc00033, /* 40, -14.0dB */
0x0c000030, /* 41, -14.5dB */
0x0b40002d, /* 42, -15.0dB */
};
static const u8 cckswing_table_ch1ch13[CCK_TABLE_SIZE][8] = {
{0x36, 0x35, 0x2e, 0x25, 0x1c, 0x12, 0x09, 0x04}, /* 0, +0dB */
{0x33, 0x32, 0x2b, 0x23, 0x1a, 0x11, 0x08, 0x04}, /* 1, -0.5dB */
{0x30, 0x2f, 0x29, 0x21, 0x19, 0x10, 0x08, 0x03}, /* 2, -1.0dB */
{0x2d, 0x2d, 0x27, 0x1f, 0x18, 0x0f, 0x08, 0x03}, /* 3, -1.5dB */
{0x2b, 0x2a, 0x25, 0x1e, 0x16, 0x0e, 0x07, 0x03}, /* 4, -2.0dB */
{0x28, 0x28, 0x22, 0x1c, 0x15, 0x0d, 0x07, 0x03}, /* 5, -2.5dB */
{0x26, 0x25, 0x21, 0x1b, 0x14, 0x0d, 0x06, 0x03}, /* 6, -3.0dB */
{0x24, 0x23, 0x1f, 0x19, 0x13, 0x0c, 0x06, 0x03}, /* 7, -3.5dB */
{0x22, 0x21, 0x1d, 0x18, 0x11, 0x0b, 0x06, 0x02}, /* 8, -4.0dB */
{0x20, 0x20, 0x1b, 0x16, 0x11, 0x08, 0x05, 0x02}, /* 9, -4.5dB */
{0x1f, 0x1e, 0x1a, 0x15, 0x10, 0x0a, 0x05, 0x02}, /* 10, -5.0dB */
{0x1d, 0x1c, 0x18, 0x14, 0x0f, 0x0a, 0x05, 0x02}, /* 11, -5.5dB */
{0x1b, 0x1a, 0x17, 0x13, 0x0e, 0x09, 0x04, 0x02}, /* 12, -6.0dB */
{0x1a, 0x19, 0x16, 0x12, 0x0d, 0x09, 0x04, 0x02}, /* 13, -6.5dB */
{0x18, 0x17, 0x15, 0x11, 0x0c, 0x08, 0x04, 0x02}, /* 14, -7.0dB */
{0x17, 0x16, 0x13, 0x10, 0x0c, 0x08, 0x04, 0x02}, /* 15, -7.5dB */
{0x16, 0x15, 0x12, 0x0f, 0x0b, 0x07, 0x04, 0x01}, /* 16, -8.0dB */
{0x14, 0x14, 0x11, 0x0e, 0x0b, 0x07, 0x03, 0x02}, /* 17, -8.5dB */
{0x13, 0x13, 0x10, 0x0d, 0x0a, 0x06, 0x03, 0x01}, /* 18, -9.0dB */
{0x12, 0x12, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01}, /* 19, -9.5dB */
{0x11, 0x11, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01}, /* 20, -10.0dB */
{0x10, 0x10, 0x0e, 0x0b, 0x08, 0x05, 0x03, 0x01}, /* 21, -10.5dB */
{0x0f, 0x0f, 0x0d, 0x0b, 0x08, 0x05, 0x03, 0x01}, /* 22, -11.0dB */
{0x0e, 0x0e, 0x0c, 0x0a, 0x08, 0x05, 0x02, 0x01}, /* 23, -11.5dB */
{0x0d, 0x0d, 0x0c, 0x0a, 0x07, 0x05, 0x02, 0x01}, /* 24, -12.0dB */
{0x0d, 0x0c, 0x0b, 0x09, 0x07, 0x04, 0x02, 0x01}, /* 25, -12.5dB */
{0x0c, 0x0c, 0x0a, 0x09, 0x06, 0x04, 0x02, 0x01}, /* 26, -13.0dB */
{0x0b, 0x0b, 0x0a, 0x08, 0x06, 0x04, 0x02, 0x01}, /* 27, -13.5dB */
{0x0b, 0x0a, 0x09, 0x08, 0x06, 0x04, 0x02, 0x01}, /* 28, -14.0dB */
{0x0a, 0x0a, 0x09, 0x07, 0x05, 0x03, 0x02, 0x01}, /* 29, -14.5dB */
{0x0a, 0x09, 0x08, 0x07, 0x05, 0x03, 0x02, 0x01}, /* 30, -15.0dB */
{0x09, 0x09, 0x08, 0x06, 0x05, 0x03, 0x01, 0x01}, /* 31, -15.5dB */
{0x09, 0x08, 0x07, 0x06, 0x04, 0x03, 0x01, 0x01} /* 32, -16.0dB */
};
static const u8 cckswing_table_ch14[CCK_TABLE_SIZE][8] = {
{0x36, 0x35, 0x2e, 0x1b, 0x00, 0x00, 0x00, 0x00}, /* 0, +0dB */
{0x33, 0x32, 0x2b, 0x19, 0x00, 0x00, 0x00, 0x00}, /* 1, -0.5dB */
{0x30, 0x2f, 0x29, 0x18, 0x00, 0x00, 0x00, 0x00}, /* 2, -1.0dB */
{0x2d, 0x2d, 0x17, 0x17, 0x00, 0x00, 0x00, 0x00}, /* 3, -1.5dB */
{0x2b, 0x2a, 0x25, 0x15, 0x00, 0x00, 0x00, 0x00}, /* 4, -2.0dB */
{0x28, 0x28, 0x24, 0x14, 0x00, 0x00, 0x00, 0x00}, /* 5, -2.5dB */
{0x26, 0x25, 0x21, 0x13, 0x00, 0x00, 0x00, 0x00}, /* 6, -3.0dB */
{0x24, 0x23, 0x1f, 0x12, 0x00, 0x00, 0x00, 0x00}, /* 7, -3.5dB */
{0x22, 0x21, 0x1d, 0x11, 0x00, 0x00, 0x00, 0x00}, /* 8, -4.0dB */
{0x20, 0x20, 0x1b, 0x10, 0x00, 0x00, 0x00, 0x00}, /* 9, -4.5dB */
{0x1f, 0x1e, 0x1a, 0x0f, 0x00, 0x00, 0x00, 0x00}, /* 10, -5.0dB */
{0x1d, 0x1c, 0x18, 0x0e, 0x00, 0x00, 0x00, 0x00}, /* 11, -5.5dB */
{0x1b, 0x1a, 0x17, 0x0e, 0x00, 0x00, 0x00, 0x00}, /* 12, -6.0dB */
{0x1a, 0x19, 0x16, 0x0d, 0x00, 0x00, 0x00, 0x00}, /* 13, -6.5dB */
{0x18, 0x17, 0x15, 0x0c, 0x00, 0x00, 0x00, 0x00}, /* 14, -7.0dB */
{0x17, 0x16, 0x13, 0x0b, 0x00, 0x00, 0x00, 0x00}, /* 15, -7.5dB */
{0x16, 0x15, 0x12, 0x0b, 0x00, 0x00, 0x00, 0x00}, /* 16, -8.0dB */
{0x14, 0x14, 0x11, 0x0a, 0x00, 0x00, 0x00, 0x00}, /* 17, -8.5dB */
{0x13, 0x13, 0x10, 0x0a, 0x00, 0x00, 0x00, 0x00}, /* 18, -9.0dB */
{0x12, 0x12, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00}, /* 19, -9.5dB */
{0x11, 0x11, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00}, /* 20, -10.0dB */
{0x10, 0x10, 0x0e, 0x08, 0x00, 0x00, 0x00, 0x00}, /* 21, -10.5dB */
{0x0f, 0x0f, 0x0d, 0x08, 0x00, 0x00, 0x00, 0x00}, /* 22, -11.0dB */
{0x0e, 0x0e, 0x0c, 0x07, 0x00, 0x00, 0x00, 0x00}, /* 23, -11.5dB */
{0x0d, 0x0d, 0x0c, 0x07, 0x00, 0x00, 0x00, 0x00}, /* 24, -12.0dB */
{0x0d, 0x0c, 0x0b, 0x06, 0x00, 0x00, 0x00, 0x00}, /* 25, -12.5dB */
{0x0c, 0x0c, 0x0a, 0x06, 0x00, 0x00, 0x00, 0x00}, /* 26, -13.0dB */
{0x0b, 0x0b, 0x0a, 0x06, 0x00, 0x00, 0x00, 0x00}, /* 27, -13.5dB */
{0x0b, 0x0a, 0x09, 0x05, 0x00, 0x00, 0x00, 0x00}, /* 28, -14.0dB */
{0x0a, 0x0a, 0x09, 0x05, 0x00, 0x00, 0x00, 0x00}, /* 29, -14.5dB */
{0x0a, 0x09, 0x08, 0x05, 0x00, 0x00, 0x00, 0x00}, /* 30, -15.0dB */
{0x09, 0x09, 0x08, 0x05, 0x00, 0x00, 0x00, 0x00}, /* 31, -15.5dB */
{0x09, 0x08, 0x07, 0x04, 0x00, 0x00, 0x00, 0x00} /* 32, -16.0dB */
};
static void rtl92d_dm_diginit(struct ieee80211_hw *hw)
{
de_digtable.dig_enable_flag = true;
de_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX;
de_digtable.cur_igvalue = 0x20;
de_digtable.pre_igvalue = 0x0;
de_digtable.cursta_connectctate = DIG_STA_DISCONNECT;
de_digtable.presta_connectstate = DIG_STA_DISCONNECT;
de_digtable.curmultista_connectstate = DIG_MULTISTA_DISCONNECT;
de_digtable.rssi_lowthresh = DM_DIG_THRESH_LOW;
de_digtable.rssi_highthresh = DM_DIG_THRESH_HIGH;
de_digtable.fa_lowthresh = DM_FALSEALARM_THRESH_LOW;
de_digtable.fa_highthresh = DM_FALSEALARM_THRESH_HIGH;
de_digtable.rx_gain_range_max = DM_DIG_FA_UPPER;
de_digtable.rx_gain_range_min = DM_DIG_FA_LOWER;
de_digtable.backoff_val = DM_DIG_BACKOFF_DEFAULT;
de_digtable.backoff_val_range_max = DM_DIG_BACKOFF_MAX;
de_digtable.backoff_val_range_min = DM_DIG_BACKOFF_MIN;
de_digtable.pre_cck_pd_state = CCK_PD_STAGE_LOWRSSI;
de_digtable.cur_cck_pd_state = CCK_PD_STAGE_MAX;
de_digtable.large_fa_hit = 0;
de_digtable.recover_cnt = 0;
de_digtable.forbidden_igi = DM_DIG_FA_LOWER;
}
static void rtl92d_dm_false_alarm_counter_statistics(struct ieee80211_hw *hw)
{
u32 ret_value;
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct false_alarm_statistics *falsealm_cnt = &(rtlpriv->falsealm_cnt);
unsigned long flag = 0;
/* hold ofdm counter */
rtl_set_bbreg(hw, ROFDM0_LSTF, BIT(31), 1); /* hold page C counter */
rtl_set_bbreg(hw, ROFDM1_LSTF, BIT(31), 1); /*hold page D counter */
ret_value = rtl_get_bbreg(hw, ROFDM0_FRAMESYNC, BMASKDWORD);
falsealm_cnt->cnt_fast_fsync_fail = (ret_value & 0xffff);
falsealm_cnt->cnt_sb_search_fail = ((ret_value & 0xffff0000) >> 16);
ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER1, BMASKDWORD);
falsealm_cnt->cnt_parity_fail = ((ret_value & 0xffff0000) >> 16);
ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER2, BMASKDWORD);
falsealm_cnt->cnt_rate_illegal = (ret_value & 0xffff);
falsealm_cnt->cnt_crc8_fail = ((ret_value & 0xffff0000) >> 16);
ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER3, BMASKDWORD);
falsealm_cnt->cnt_mcs_fail = (ret_value & 0xffff);
falsealm_cnt->cnt_ofdm_fail = falsealm_cnt->cnt_parity_fail +
falsealm_cnt->cnt_rate_illegal +
falsealm_cnt->cnt_crc8_fail +
falsealm_cnt->cnt_mcs_fail +
falsealm_cnt->cnt_fast_fsync_fail +
falsealm_cnt->cnt_sb_search_fail;
if (rtlpriv->rtlhal.current_bandtype != BAND_ON_5G) {
/* hold cck counter */
rtl92d_acquire_cckandrw_pagea_ctl(hw, &flag);
ret_value = rtl_get_bbreg(hw, RCCK0_FACOUNTERLOWER, BMASKBYTE0);
falsealm_cnt->cnt_cck_fail = ret_value;
ret_value = rtl_get_bbreg(hw, RCCK0_FACOUNTERUPPER, BMASKBYTE3);
falsealm_cnt->cnt_cck_fail += (ret_value & 0xff) << 8;
rtl92d_release_cckandrw_pagea_ctl(hw, &flag);
} else {
falsealm_cnt->cnt_cck_fail = 0;
}
/* reset false alarm counter registers */
falsealm_cnt->cnt_all = falsealm_cnt->cnt_fast_fsync_fail +
falsealm_cnt->cnt_sb_search_fail +
falsealm_cnt->cnt_parity_fail +
falsealm_cnt->cnt_rate_illegal +
falsealm_cnt->cnt_crc8_fail +
falsealm_cnt->cnt_mcs_fail +
falsealm_cnt->cnt_cck_fail;
rtl_set_bbreg(hw, ROFDM1_LSTF, 0x08000000, 1);
/* update ofdm counter */
rtl_set_bbreg(hw, ROFDM1_LSTF, 0x08000000, 0);
/* update page C counter */
rtl_set_bbreg(hw, ROFDM0_LSTF, BIT(31), 0);
/* update page D counter */
rtl_set_bbreg(hw, ROFDM1_LSTF, BIT(31), 0);
if (rtlpriv->rtlhal.current_bandtype != BAND_ON_5G) {
/* reset cck counter */
rtl92d_acquire_cckandrw_pagea_ctl(hw, &flag);
rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, 0x0000c000, 0);
/* enable cck counter */
rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, 0x0000c000, 2);
rtl92d_release_cckandrw_pagea_ctl(hw, &flag);
}
RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
"Cnt_Fast_Fsync_fail = %x, Cnt_SB_Search_fail = %x\n",
falsealm_cnt->cnt_fast_fsync_fail,
falsealm_cnt->cnt_sb_search_fail);
RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
"Cnt_Parity_Fail = %x, Cnt_Rate_Illegal = %x, Cnt_Crc8_fail = %x, Cnt_Mcs_fail = %x\n",
falsealm_cnt->cnt_parity_fail,
falsealm_cnt->cnt_rate_illegal,
falsealm_cnt->cnt_crc8_fail,
falsealm_cnt->cnt_mcs_fail);
RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
"Cnt_Ofdm_fail = %x, Cnt_Cck_fail = %x, Cnt_all = %x\n",
falsealm_cnt->cnt_ofdm_fail,
falsealm_cnt->cnt_cck_fail,
falsealm_cnt->cnt_all);
}
static void rtl92d_dm_find_minimum_rssi(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_mac *mac = rtl_mac(rtlpriv);
/* Determine the minimum RSSI */
if ((mac->link_state < MAC80211_LINKED) &&
(rtlpriv->dm.UNDEC_SM_PWDB == 0)) {
de_digtable.min_undecorated_pwdb_for_dm = 0;
RT_TRACE(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
"Not connected to any\n");
}
if (mac->link_state >= MAC80211_LINKED) {
if (mac->opmode == NL80211_IFTYPE_AP ||
mac->opmode == NL80211_IFTYPE_ADHOC) {
de_digtable.min_undecorated_pwdb_for_dm =
rtlpriv->dm.UNDEC_SM_PWDB;
RT_TRACE(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
"AP Client PWDB = 0x%lx\n",
rtlpriv->dm.UNDEC_SM_PWDB);
} else {
de_digtable.min_undecorated_pwdb_for_dm =
rtlpriv->dm.undecorated_smoothed_pwdb;
RT_TRACE(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
"STA Default Port PWDB = 0x%x\n",
de_digtable.min_undecorated_pwdb_for_dm);
}
} else {
de_digtable.min_undecorated_pwdb_for_dm =
rtlpriv->dm.UNDEC_SM_PWDB;
RT_TRACE(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
"AP Ext Port or disconnect PWDB = 0x%x\n",
de_digtable.min_undecorated_pwdb_for_dm);
}
RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "MinUndecoratedPWDBForDM =%d\n",
de_digtable.min_undecorated_pwdb_for_dm);
}
static void rtl92d_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
unsigned long flag = 0;
if (de_digtable.cursta_connectctate == DIG_STA_CONNECT) {
if (de_digtable.pre_cck_pd_state == CCK_PD_STAGE_LOWRSSI) {
if (de_digtable.min_undecorated_pwdb_for_dm <= 25)
de_digtable.cur_cck_pd_state =
CCK_PD_STAGE_LOWRSSI;
else
de_digtable.cur_cck_pd_state =
CCK_PD_STAGE_HIGHRSSI;
} else {
if (de_digtable.min_undecorated_pwdb_for_dm <= 20)
de_digtable.cur_cck_pd_state =
CCK_PD_STAGE_LOWRSSI;
else
de_digtable.cur_cck_pd_state =
CCK_PD_STAGE_HIGHRSSI;
}
} else {
de_digtable.cur_cck_pd_state = CCK_PD_STAGE_LOWRSSI;
}
if (de_digtable.pre_cck_pd_state != de_digtable.cur_cck_pd_state) {
if (de_digtable.cur_cck_pd_state == CCK_PD_STAGE_LOWRSSI) {
rtl92d_acquire_cckandrw_pagea_ctl(hw, &flag);
rtl_set_bbreg(hw, RCCK0_CCA, BMASKBYTE2, 0x83);
rtl92d_release_cckandrw_pagea_ctl(hw, &flag);
} else {
rtl92d_acquire_cckandrw_pagea_ctl(hw, &flag);
rtl_set_bbreg(hw, RCCK0_CCA, BMASKBYTE2, 0xcd);
rtl92d_release_cckandrw_pagea_ctl(hw, &flag);
}
de_digtable.pre_cck_pd_state = de_digtable.cur_cck_pd_state;
}
RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "CurSTAConnectState=%s\n",
de_digtable.cursta_connectctate == DIG_STA_CONNECT ?
"DIG_STA_CONNECT " : "DIG_STA_DISCONNECT");
RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "CCKPDStage=%s\n",
de_digtable.cur_cck_pd_state == CCK_PD_STAGE_LOWRSSI ?
"Low RSSI " : "High RSSI ");
RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "is92d single phy =%x\n",
IS_92D_SINGLEPHY(rtlpriv->rtlhal.version));
}
void rtl92d_dm_write_dig(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
"cur_igvalue = 0x%x, pre_igvalue = 0x%x, backoff_val = %d\n",
de_digtable.cur_igvalue, de_digtable.pre_igvalue,
de_digtable.backoff_val);
if (de_digtable.dig_enable_flag == false) {
RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "DIG is disabled\n");
de_digtable.pre_igvalue = 0x17;
return;
}
if (de_digtable.pre_igvalue != de_digtable.cur_igvalue) {
rtl_set_bbreg(hw, ROFDM0_XAAGCCORE1, 0x7f,
de_digtable.cur_igvalue);
rtl_set_bbreg(hw, ROFDM0_XBAGCCORE1, 0x7f,
de_digtable.cur_igvalue);
de_digtable.pre_igvalue = de_digtable.cur_igvalue;
}
}
static void rtl92d_early_mode_enabled(struct rtl_priv *rtlpriv)
{
if ((rtlpriv->mac80211.link_state >= MAC80211_LINKED) &&
(rtlpriv->mac80211.vendor == PEER_CISCO)) {
RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "IOT_PEER = CISCO\n");
if (de_digtable.last_min_undecorated_pwdb_for_dm >= 50
&& de_digtable.min_undecorated_pwdb_for_dm < 50) {
rtl_write_byte(rtlpriv, REG_EARLY_MODE_CONTROL, 0x00);
RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
"Early Mode Off\n");
} else if (de_digtable.last_min_undecorated_pwdb_for_dm <= 55 &&
de_digtable.min_undecorated_pwdb_for_dm > 55) {
rtl_write_byte(rtlpriv, REG_EARLY_MODE_CONTROL, 0x0f);
RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
"Early Mode On\n");
}
} else if (!(rtl_read_byte(rtlpriv, REG_EARLY_MODE_CONTROL) & 0xf)) {
rtl_write_byte(rtlpriv, REG_EARLY_MODE_CONTROL, 0x0f);
RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "Early Mode On\n");
}
}
static void rtl92d_dm_dig(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
u8 value_igi = de_digtable.cur_igvalue;
struct false_alarm_statistics *falsealm_cnt = &(rtlpriv->falsealm_cnt);
RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "==>\n");
if (rtlpriv->rtlhal.earlymode_enable) {
rtl92d_early_mode_enabled(rtlpriv);
de_digtable.last_min_undecorated_pwdb_for_dm =
de_digtable.min_undecorated_pwdb_for_dm;
}
if (!rtlpriv->dm.dm_initialgain_enable)
return;
/* because we will send data pkt when scanning
* this will cause some ap like gear-3700 wep TP
* lower if we retrun here, this is the diff of
* mac80211 driver vs ieee80211 driver */
/* if (rtlpriv->mac80211.act_scanning)
* return; */
/* Not STA mode return tmp */
if (rtlpriv->mac80211.opmode != NL80211_IFTYPE_STATION)
return;
RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "progress\n");
/* Decide the current status and if modify initial gain or not */
if (rtlpriv->mac80211.link_state >= MAC80211_LINKED)
de_digtable.cursta_connectctate = DIG_STA_CONNECT;
else
de_digtable.cursta_connectctate = DIG_STA_DISCONNECT;
/* adjust initial gain according to false alarm counter */
if (falsealm_cnt->cnt_all < DM_DIG_FA_TH0)
value_igi--;
else if (falsealm_cnt->cnt_all < DM_DIG_FA_TH1)
value_igi += 0;
else if (falsealm_cnt->cnt_all < DM_DIG_FA_TH2)
value_igi++;
else if (falsealm_cnt->cnt_all >= DM_DIG_FA_TH2)
value_igi += 2;
RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
"dm_DIG() Before: large_fa_hit=%d, forbidden_igi=%x\n",
de_digtable.large_fa_hit, de_digtable.forbidden_igi);
RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
"dm_DIG() Before: Recover_cnt=%d, rx_gain_range_min=%x\n",
de_digtable.recover_cnt, de_digtable.rx_gain_range_min);
/* deal with abnorally large false alarm */
if (falsealm_cnt->cnt_all > 10000) {
RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
"dm_DIG(): Abnormally false alarm case\n");
de_digtable.large_fa_hit++;
if (de_digtable.forbidden_igi < de_digtable.cur_igvalue) {
de_digtable.forbidden_igi = de_digtable.cur_igvalue;
de_digtable.large_fa_hit = 1;
}
if (de_digtable.large_fa_hit >= 3) {
if ((de_digtable.forbidden_igi + 1) > DM_DIG_MAX)
de_digtable.rx_gain_range_min = DM_DIG_MAX;
else
de_digtable.rx_gain_range_min =
(de_digtable.forbidden_igi + 1);
de_digtable.recover_cnt = 3600; /* 3600=2hr */
}
} else {
/* Recovery mechanism for IGI lower bound */
if (de_digtable.recover_cnt != 0) {
de_digtable.recover_cnt--;
} else {
if (de_digtable.large_fa_hit == 0) {
if ((de_digtable.forbidden_igi - 1) <
DM_DIG_FA_LOWER) {
de_digtable.forbidden_igi =
DM_DIG_FA_LOWER;
de_digtable.rx_gain_range_min =
DM_DIG_FA_LOWER;
} else {
de_digtable.forbidden_igi--;
de_digtable.rx_gain_range_min =
(de_digtable.forbidden_igi + 1);
}
} else if (de_digtable.large_fa_hit == 3) {
de_digtable.large_fa_hit = 0;
}
}
}
RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
"dm_DIG() After: large_fa_hit=%d, forbidden_igi=%x\n",
de_digtable.large_fa_hit, de_digtable.forbidden_igi);
RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
"dm_DIG() After: recover_cnt=%d, rx_gain_range_min=%x\n",
de_digtable.recover_cnt, de_digtable.rx_gain_range_min);
if (value_igi > DM_DIG_MAX)
value_igi = DM_DIG_MAX;
else if (value_igi < de_digtable.rx_gain_range_min)
value_igi = de_digtable.rx_gain_range_min;
de_digtable.cur_igvalue = value_igi;
rtl92d_dm_write_dig(hw);
if (rtlpriv->rtlhal.current_bandtype != BAND_ON_5G)
rtl92d_dm_cck_packet_detection_thresh(hw);
RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "<<==\n");
}
static void rtl92d_dm_init_dynamic_txpower(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
rtlpriv->dm.dynamic_txpower_enable = true;
rtlpriv->dm.last_dtp_lvl = TXHIGHPWRLEVEL_NORMAL;
rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL;
}
static void rtl92d_dm_dynamic_txpower(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_phy *rtlphy = &(rtlpriv->phy);
struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
long undecorated_smoothed_pwdb;
if ((!rtlpriv->dm.dynamic_txpower_enable)
|| rtlpriv->dm.dm_flag & HAL_DM_HIPWR_DISABLE) {
rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL;
return;
}
if ((mac->link_state < MAC80211_LINKED) &&
(rtlpriv->dm.UNDEC_SM_PWDB == 0)) {
RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE,
"Not connected to any\n");
rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL;
rtlpriv->dm.last_dtp_lvl = TXHIGHPWRLEVEL_NORMAL;
return;
}
if (mac->link_state >= MAC80211_LINKED) {
if (mac->opmode == NL80211_IFTYPE_ADHOC) {
undecorated_smoothed_pwdb =
rtlpriv->dm.UNDEC_SM_PWDB;
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
"IBSS Client PWDB = 0x%lx\n",
undecorated_smoothed_pwdb);
} else {
undecorated_smoothed_pwdb =
rtlpriv->dm.undecorated_smoothed_pwdb;
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
"STA Default Port PWDB = 0x%lx\n",
undecorated_smoothed_pwdb);
}
} else {
undecorated_smoothed_pwdb =
rtlpriv->dm.UNDEC_SM_PWDB;
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
"AP Ext Port PWDB = 0x%lx\n",
undecorated_smoothed_pwdb);
}
if (rtlhal->current_bandtype == BAND_ON_5G) {
if (undecorated_smoothed_pwdb >= 0x33) {
rtlpriv->dm.dynamic_txhighpower_lvl =
TXHIGHPWRLEVEL_LEVEL2;
RT_TRACE(rtlpriv, COMP_HIPWR, DBG_LOUD,
"5G:TxHighPwrLevel_Level2 (TxPwr=0x0)\n");
} else if ((undecorated_smoothed_pwdb < 0x33)
&& (undecorated_smoothed_pwdb >= 0x2b)) {
rtlpriv->dm.dynamic_txhighpower_lvl =
TXHIGHPWRLEVEL_LEVEL1;
RT_TRACE(rtlpriv, COMP_HIPWR, DBG_LOUD,
"5G:TxHighPwrLevel_Level1 (TxPwr=0x10)\n");
} else if (undecorated_smoothed_pwdb < 0x2b) {
rtlpriv->dm.dynamic_txhighpower_lvl =
TXHIGHPWRLEVEL_NORMAL;
RT_TRACE(rtlpriv, COMP_HIPWR, DBG_LOUD,
"5G:TxHighPwrLevel_Normal\n");
}
} else {
if (undecorated_smoothed_pwdb >=
TX_POWER_NEAR_FIELD_THRESH_LVL2) {
rtlpriv->dm.dynamic_txhighpower_lvl =
TXHIGHPWRLEVEL_LEVEL2;
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
"TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x0)\n");
} else
if ((undecorated_smoothed_pwdb <
(TX_POWER_NEAR_FIELD_THRESH_LVL2 - 3))
&& (undecorated_smoothed_pwdb >=
TX_POWER_NEAR_FIELD_THRESH_LVL1)) {
rtlpriv->dm.dynamic_txhighpower_lvl =
TXHIGHPWRLEVEL_LEVEL1;
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
"TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x10)\n");
} else if (undecorated_smoothed_pwdb <
(TX_POWER_NEAR_FIELD_THRESH_LVL1 - 5)) {
rtlpriv->dm.dynamic_txhighpower_lvl =
TXHIGHPWRLEVEL_NORMAL;
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
"TXHIGHPWRLEVEL_NORMAL\n");
}
}
if ((rtlpriv->dm.dynamic_txhighpower_lvl != rtlpriv->dm.last_dtp_lvl)) {
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
"PHY_SetTxPowerLevel8192S() Channel = %d\n",
rtlphy->current_channel);
rtl92d_phy_set_txpower_level(hw, rtlphy->current_channel);
}
rtlpriv->dm.last_dtp_lvl = rtlpriv->dm.dynamic_txhighpower_lvl;
}
static void rtl92d_dm_pwdb_monitor(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
/* AP & ADHOC & MESH will return tmp */
if (rtlpriv->mac80211.opmode != NL80211_IFTYPE_STATION)
return;
/* Indicate Rx signal strength to FW. */
if (rtlpriv->dm.useramask) {
u32 temp = rtlpriv->dm.undecorated_smoothed_pwdb;
temp <<= 16;
temp |= 0x100;
/* fw v12 cmdid 5:use max macid ,for nic ,
* default macid is 0 ,max macid is 1 */
rtl92d_fill_h2c_cmd(hw, H2C_RSSI_REPORT, 3, (u8 *) (&temp));
} else {
rtl_write_byte(rtlpriv, 0x4fe,
(u8) rtlpriv->dm.undecorated_smoothed_pwdb);
}
}
void rtl92d_dm_init_edca_turbo(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
rtlpriv->dm.current_turbo_edca = false;
rtlpriv->dm.is_any_nonbepkts = false;
rtlpriv->dm.is_cur_rdlstate = false;
}
static void rtl92d_dm_check_edca_turbo(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
static u64 last_txok_cnt;
static u64 last_rxok_cnt;
u64 cur_txok_cnt;
u64 cur_rxok_cnt;
u32 edca_be_ul = 0x5ea42b;
u32 edca_be_dl = 0x5ea42b;
if (mac->link_state != MAC80211_LINKED) {
rtlpriv->dm.current_turbo_edca = false;
goto exit;
}
/* Enable BEQ TxOP limit configuration in wireless G-mode. */
/* To check whether we shall force turn on TXOP configuration. */
if ((!rtlpriv->dm.disable_framebursting) &&
(rtlpriv->sec.pairwise_enc_algorithm == WEP40_ENCRYPTION ||
rtlpriv->sec.pairwise_enc_algorithm == WEP104_ENCRYPTION ||
rtlpriv->sec.pairwise_enc_algorithm == TKIP_ENCRYPTION)) {
/* Force TxOP limit to 0x005e for UL. */
if (!(edca_be_ul & 0xffff0000))
edca_be_ul |= 0x005e0000;
/* Force TxOP limit to 0x005e for DL. */
if (!(edca_be_dl & 0xffff0000))
edca_be_dl |= 0x005e0000;
}
if ((!rtlpriv->dm.is_any_nonbepkts) &&
(!rtlpriv->dm.disable_framebursting)) {
cur_txok_cnt = rtlpriv->stats.txbytesunicast - last_txok_cnt;
cur_rxok_cnt = rtlpriv->stats.rxbytesunicast - last_rxok_cnt;
if (cur_rxok_cnt > 4 * cur_txok_cnt) {
if (!rtlpriv->dm.is_cur_rdlstate ||
!rtlpriv->dm.current_turbo_edca) {
rtl_write_dword(rtlpriv, REG_EDCA_BE_PARAM,
edca_be_dl);
rtlpriv->dm.is_cur_rdlstate = true;
}
} else {
if (rtlpriv->dm.is_cur_rdlstate ||
!rtlpriv->dm.current_turbo_edca) {
rtl_write_dword(rtlpriv, REG_EDCA_BE_PARAM,
edca_be_ul);
rtlpriv->dm.is_cur_rdlstate = false;
}
}
rtlpriv->dm.current_turbo_edca = true;
} else {
if (rtlpriv->dm.current_turbo_edca) {
u8 tmp = AC0_BE;
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AC_PARAM,
(u8 *) (&tmp));
rtlpriv->dm.current_turbo_edca = false;
}
}
exit:
rtlpriv->dm.is_any_nonbepkts = false;
last_txok_cnt = rtlpriv->stats.txbytesunicast;
last_rxok_cnt = rtlpriv->stats.rxbytesunicast;
}
static void rtl92d_dm_rxgain_tracking_thermalmeter(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
u8 index_mapping[RX_INDEX_MAPPING_NUM] = {
0x0f, 0x0f, 0x0d, 0x0c, 0x0b,
0x0a, 0x09, 0x08, 0x07, 0x06,
0x05, 0x04, 0x04, 0x03, 0x02
};
int i;
u32 u4tmp;
u4tmp = (index_mapping[(rtlpriv->efuse.eeprom_thermalmeter -
rtlpriv->dm.thermalvalue_rxgain)]) << 12;
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
"===> Rx Gain %x\n", u4tmp);
for (i = RF90_PATH_A; i < rtlpriv->phy.num_total_rfpath; i++)
rtl_set_rfreg(hw, i, 0x3C, BRFREGOFFSETMASK,
(rtlpriv->phy.reg_rf3c[i] & (~(0xF000))) | u4tmp);
}
static void rtl92d_bandtype_2_4G(struct ieee80211_hw *hw, long *temp_cckg,
u8 *cck_index_old)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
int i;
unsigned long flag = 0;
long temp_cck;
/* Query CCK default setting From 0xa24 */
rtl92d_acquire_cckandrw_pagea_ctl(hw, &flag);
temp_cck = rtl_get_bbreg(hw, RCCK0_TXFILTER2,
BMASKDWORD) & BMASKCCK;
rtl92d_release_cckandrw_pagea_ctl(hw, &flag);
for (i = 0; i < CCK_TABLE_LENGTH; i++) {
if (rtlpriv->dm.cck_inch14) {
if (!memcmp((void *)&temp_cck,
(void *)&cckswing_table_ch14[i][2], 4)) {
*cck_index_old = (u8) i;
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
"Initial reg0x%x = 0x%lx, cck_index=0x%x, ch 14 %d\n",
RCCK0_TXFILTER2, temp_cck,
*cck_index_old,
rtlpriv->dm.cck_inch14);
break;
}
} else {
if (!memcmp((void *) &temp_cck,
&cckswing_table_ch1ch13[i][2], 4)) {
*cck_index_old = (u8) i;
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
"Initial reg0x%x = 0x%lx, cck_index = 0x%x, ch14 %d\n",
RCCK0_TXFILTER2, temp_cck,
*cck_index_old,
rtlpriv->dm.cck_inch14);
break;
}
}
}
*temp_cckg = temp_cck;
}
static void rtl92d_bandtype_5G(struct rtl_hal *rtlhal, u8 *ofdm_index,
bool *internal_pa, u8 thermalvalue, u8 delta,
u8 rf, struct rtl_efuse *rtlefuse,
struct rtl_priv *rtlpriv, struct rtl_phy *rtlphy,
u8 index_mapping[5][INDEX_MAPPING_NUM],
u8 index_mapping_pa[8][INDEX_MAPPING_NUM])
{
int i;
u8 index;
u8 offset = 0;
for (i = 0; i < rf; i++) {
if (rtlhal->macphymode == DUALMAC_DUALPHY &&
rtlhal->interfaceindex == 1) /* MAC 1 5G */
*internal_pa = rtlefuse->internal_pa_5g[1];
else
*internal_pa = rtlefuse->internal_pa_5g[i];
if (*internal_pa) {
if (rtlhal->interfaceindex == 1 || i == rf)
offset = 4;
else
offset = 0;
if (rtlphy->current_channel >= 100 &&
rtlphy->current_channel <= 165)
offset += 2;
} else {
if (rtlhal->interfaceindex == 1 || i == rf)
offset = 2;
else
offset = 0;
}
if (thermalvalue > rtlefuse->eeprom_thermalmeter)
offset++;
if (*internal_pa) {
if (delta > INDEX_MAPPING_NUM - 1)
index = index_mapping_pa[offset]
[INDEX_MAPPING_NUM - 1];
else
index =
index_mapping_pa[offset][delta];
} else {
if (delta > INDEX_MAPPING_NUM - 1)
index =
index_mapping[offset][INDEX_MAPPING_NUM - 1];
else
index = index_mapping[offset][delta];
}
if (thermalvalue > rtlefuse->eeprom_thermalmeter) {
if (*internal_pa && thermalvalue > 0x12) {
ofdm_index[i] = rtlpriv->dm.ofdm_index[i] -
((delta / 2) * 3 + (delta % 2));
} else {
ofdm_index[i] -= index;
}
} else {
ofdm_index[i] += index;
}
}
}
static void rtl92d_dm_txpower_tracking_callback_thermalmeter(
struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
struct rtl_phy *rtlphy = &(rtlpriv->phy);
struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
u8 thermalvalue, delta, delta_lck, delta_iqk, delta_rxgain;
u8 offset, thermalvalue_avg_count = 0;
u32 thermalvalue_avg = 0;
bool internal_pa = false;
long ele_a = 0, ele_d, temp_cck, val_x, value32;
long val_y, ele_c = 0;
u8 ofdm_index[2];
u8 cck_index = 0;
u8 ofdm_index_old[2];
u8 cck_index_old = 0;
u8 index;
int i;
bool is2t = IS_92D_SINGLEPHY(rtlhal->version);
u8 ofdm_min_index = 6, ofdm_min_index_internal_pa = 3, rf;
u8 indexforchannel =
rtl92d_get_rightchnlplace_for_iqk(rtlphy->current_channel);
u8 index_mapping[5][INDEX_MAPPING_NUM] = {
/* 5G, path A/MAC 0, decrease power */
{0, 1, 3, 6, 8, 9, 11, 13, 14, 16, 17, 18, 18},
/* 5G, path A/MAC 0, increase power */
{0, 2, 4, 5, 7, 10, 12, 14, 16, 18, 18, 18, 18},
/* 5G, path B/MAC 1, decrease power */
{0, 2, 3, 6, 8, 9, 11, 13, 14, 16, 17, 18, 18},
/* 5G, path B/MAC 1, increase power */
{0, 2, 4, 5, 7, 10, 13, 16, 16, 18, 18, 18, 18},
/* 2.4G, for decreas power */
{0, 1, 2, 3, 4, 5, 6, 7, 7, 8, 9, 10, 10},
};
u8 index_mapping_internal_pa[8][INDEX_MAPPING_NUM] = {
/* 5G, path A/MAC 0, ch36-64, decrease power */
{0, 1, 2, 4, 6, 7, 9, 11, 12, 14, 15, 16, 16},
/* 5G, path A/MAC 0, ch36-64, increase power */
{0, 2, 4, 5, 7, 10, 12, 14, 16, 18, 18, 18, 18},
/* 5G, path A/MAC 0, ch100-165, decrease power */
{0, 1, 2, 3, 5, 6, 8, 10, 11, 13, 14, 15, 15},
/* 5G, path A/MAC 0, ch100-165, increase power */
{0, 2, 4, 5, 7, 10, 12, 14, 16, 18, 18, 18, 18},
/* 5G, path B/MAC 1, ch36-64, decrease power */
{0, 1, 2, 4, 6, 7, 9, 11, 12, 14, 15, 16, 16},
/* 5G, path B/MAC 1, ch36-64, increase power */
{0, 2, 4, 5, 7, 10, 13, 16, 16, 18, 18, 18, 18},
/* 5G, path B/MAC 1, ch100-165, decrease power */
{0, 1, 2, 3, 5, 6, 8, 9, 10, 12, 13, 14, 14},
/* 5G, path B/MAC 1, ch100-165, increase power */
{0, 2, 4, 5, 7, 10, 13, 16, 16, 18, 18, 18, 18},
};
rtlpriv->dm.txpower_trackinginit = true;
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "\n");
thermalvalue = (u8) rtl_get_rfreg(hw, RF90_PATH_A, RF_T_METER, 0xf800);
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
"Readback Thermal Meter = 0x%x pre thermal meter 0x%x eeprom_thermalmeter 0x%x\n",
thermalvalue,
rtlpriv->dm.thermalvalue, rtlefuse->eeprom_thermalmeter);
rtl92d_phy_ap_calibrate(hw, (thermalvalue -
rtlefuse->eeprom_thermalmeter));
if (is2t)
rf = 2;
else
rf = 1;
if (thermalvalue) {
ele_d = rtl_get_bbreg(hw, ROFDM0_XATxIQIMBALANCE,
BMASKDWORD) & BMASKOFDM_D;
for (i = 0; i < OFDM_TABLE_SIZE_92D; i++) {
if (ele_d == (ofdmswing_table[i] & BMASKOFDM_D)) {
ofdm_index_old[0] = (u8) i;
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
"Initial pathA ele_d reg0x%x = 0x%lx, ofdm_index=0x%x\n",
ROFDM0_XATxIQIMBALANCE,
ele_d, ofdm_index_old[0]);
break;
}
}
if (is2t) {
ele_d = rtl_get_bbreg(hw, ROFDM0_XBTxIQIMBALANCE,
BMASKDWORD) & BMASKOFDM_D;
for (i = 0; i < OFDM_TABLE_SIZE_92D; i++) {
if (ele_d ==
(ofdmswing_table[i] & BMASKOFDM_D)) {
ofdm_index_old[1] = (u8) i;
RT_TRACE(rtlpriv, COMP_POWER_TRACKING,
DBG_LOUD,
"Initial pathB ele_d reg 0x%x = 0x%lx, ofdm_index = 0x%x\n",
ROFDM0_XBTxIQIMBALANCE, ele_d,
ofdm_index_old[1]);
break;
}
}
}
if (rtlhal->current_bandtype == BAND_ON_2_4G) {
rtl92d_bandtype_2_4G(hw, &temp_cck, &cck_index_old);
} else {
temp_cck = 0x090e1317;
cck_index_old = 12;
}
if (!rtlpriv->dm.thermalvalue) {
rtlpriv->dm.thermalvalue =
rtlefuse->eeprom_thermalmeter;
rtlpriv->dm.thermalvalue_lck = thermalvalue;
rtlpriv->dm.thermalvalue_iqk = thermalvalue;
rtlpriv->dm.thermalvalue_rxgain =
rtlefuse->eeprom_thermalmeter;
for (i = 0; i < rf; i++)
rtlpriv->dm.ofdm_index[i] = ofdm_index_old[i];
rtlpriv->dm.cck_index = cck_index_old;
}
if (rtlhal->reloadtxpowerindex) {
for (i = 0; i < rf; i++)
rtlpriv->dm.ofdm_index[i] = ofdm_index_old[i];
rtlpriv->dm.cck_index = cck_index_old;
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
"reload ofdm index for band switch\n");
}
rtlpriv->dm.thermalvalue_avg
[rtlpriv->dm.thermalvalue_avg_index] = thermalvalue;
rtlpriv->dm.thermalvalue_avg_index++;
if (rtlpriv->dm.thermalvalue_avg_index == AVG_THERMAL_NUM)
rtlpriv->dm.thermalvalue_avg_index = 0;
for (i = 0; i < AVG_THERMAL_NUM; i++) {
if (rtlpriv->dm.thermalvalue_avg[i]) {
thermalvalue_avg +=
rtlpriv->dm.thermalvalue_avg[i];
thermalvalue_avg_count++;
}
}
if (thermalvalue_avg_count)
thermalvalue = (u8) (thermalvalue_avg /
thermalvalue_avg_count);
if (rtlhal->reloadtxpowerindex) {
delta = (thermalvalue > rtlefuse->eeprom_thermalmeter) ?
(thermalvalue - rtlefuse->eeprom_thermalmeter) :
(rtlefuse->eeprom_thermalmeter - thermalvalue);
rtlhal->reloadtxpowerindex = false;
rtlpriv->dm.done_txpower = false;
} else if (rtlpriv->dm.done_txpower) {
delta = (thermalvalue > rtlpriv->dm.thermalvalue) ?
(thermalvalue - rtlpriv->dm.thermalvalue) :
(rtlpriv->dm.thermalvalue - thermalvalue);
} else {
delta = (thermalvalue > rtlefuse->eeprom_thermalmeter) ?
(thermalvalue - rtlefuse->eeprom_thermalmeter) :
(rtlefuse->eeprom_thermalmeter - thermalvalue);
}
delta_lck = (thermalvalue > rtlpriv->dm.thermalvalue_lck) ?
(thermalvalue - rtlpriv->dm.thermalvalue_lck) :
(rtlpriv->dm.thermalvalue_lck - thermalvalue);
delta_iqk = (thermalvalue > rtlpriv->dm.thermalvalue_iqk) ?
(thermalvalue - rtlpriv->dm.thermalvalue_iqk) :
(rtlpriv->dm.thermalvalue_iqk - thermalvalue);
delta_rxgain =
(thermalvalue > rtlpriv->dm.thermalvalue_rxgain) ?
(thermalvalue - rtlpriv->dm.thermalvalue_rxgain) :
(rtlpriv->dm.thermalvalue_rxgain - thermalvalue);
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
"Readback Thermal Meter = 0x%x pre thermal meter 0x%x eeprom_thermalmeter 0x%x delta 0x%x delta_lck 0x%x delta_iqk 0x%x\n",
thermalvalue, rtlpriv->dm.thermalvalue,
rtlefuse->eeprom_thermalmeter, delta, delta_lck,
delta_iqk);
if ((delta_lck > rtlefuse->delta_lck) &&
(rtlefuse->delta_lck != 0)) {
rtlpriv->dm.thermalvalue_lck = thermalvalue;
rtl92d_phy_lc_calibrate(hw);
}
if (delta > 0 && rtlpriv->dm.txpower_track_control) {
rtlpriv->dm.done_txpower = true;
delta = (thermalvalue > rtlefuse->eeprom_thermalmeter) ?
(thermalvalue - rtlefuse->eeprom_thermalmeter) :
(rtlefuse->eeprom_thermalmeter - thermalvalue);
if (rtlhal->current_bandtype == BAND_ON_2_4G) {
offset = 4;
if (delta > INDEX_MAPPING_NUM - 1)
index = index_mapping[offset]
[INDEX_MAPPING_NUM - 1];
else
index = index_mapping[offset][delta];
if (thermalvalue > rtlpriv->dm.thermalvalue) {
for (i = 0; i < rf; i++)
ofdm_index[i] -= delta;
cck_index -= delta;
} else {
for (i = 0; i < rf; i++)
ofdm_index[i] += index;
cck_index += index;
}
} else if (rtlhal->current_bandtype == BAND_ON_5G) {
rtl92d_bandtype_5G(rtlhal, ofdm_index,
&internal_pa, thermalvalue,
delta, rf, rtlefuse, rtlpriv,
rtlphy, index_mapping,
index_mapping_internal_pa);
}
if (is2t) {
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
"temp OFDM_A_index=0x%x, OFDM_B_index = 0x%x,cck_index=0x%x\n",
rtlpriv->dm.ofdm_index[0],
rtlpriv->dm.ofdm_index[1],
rtlpriv->dm.cck_index);
} else {
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
"temp OFDM_A_index=0x%x,cck_index = 0x%x\n",
rtlpriv->dm.ofdm_index[0],
rtlpriv->dm.cck_index);
}
for (i = 0; i < rf; i++) {
if (ofdm_index[i] > OFDM_TABLE_SIZE_92D - 1)
ofdm_index[i] = OFDM_TABLE_SIZE_92D - 1;
else if (ofdm_index[i] < ofdm_min_index)
ofdm_index[i] = ofdm_min_index;
}
if (rtlhal->current_bandtype == BAND_ON_2_4G) {
if (cck_index > CCK_TABLE_SIZE - 1) {
cck_index = CCK_TABLE_SIZE - 1;
} else if (internal_pa ||
rtlhal->current_bandtype ==
BAND_ON_2_4G) {
if (ofdm_index[i] <
ofdm_min_index_internal_pa)
ofdm_index[i] =
ofdm_min_index_internal_pa;
} else if (cck_index < 0) {
cck_index = 0;
}
}
if (is2t) {
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
"new OFDM_A_index=0x%x, OFDM_B_index = 0x%x, cck_index=0x%x\n",
ofdm_index[0], ofdm_index[1],
cck_index);
} else {
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
"new OFDM_A_index=0x%x,cck_index = 0x%x\n",
ofdm_index[0], cck_index);
}
ele_d = (ofdmswing_table[(u8) ofdm_index[0]] &
0xFFC00000) >> 22;
val_x = rtlphy->iqk_matrix_regsetting
[indexforchannel].value[0][0];
val_y = rtlphy->iqk_matrix_regsetting
[indexforchannel].value[0][1];
if (val_x != 0) {
if ((val_x & 0x00000200) != 0)
val_x = val_x | 0xFFFFFC00;
ele_a =
((val_x * ele_d) >> 8) & 0x000003FF;
/* new element C = element D x Y */
if ((val_y & 0x00000200) != 0)
val_y = val_y | 0xFFFFFC00;
ele_c = ((val_y * ele_d) >> 8) & 0x000003FF;
/* wirte new elements A, C, D to regC80 and
* regC94, element B is always 0 */
value32 = (ele_d << 22) | ((ele_c & 0x3F) <<
16) | ele_a;
rtl_set_bbreg(hw, ROFDM0_XATxIQIMBALANCE,
BMASKDWORD, value32);
value32 = (ele_c & 0x000003C0) >> 6;
rtl_set_bbreg(hw, ROFDM0_XCTxAFE, BMASKH4BITS,
value32);
value32 = ((val_x * ele_d) >> 7) & 0x01;
rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(24),
value32);
} else {
rtl_set_bbreg(hw, ROFDM0_XATxIQIMBALANCE,
BMASKDWORD,
ofdmswing_table
[(u8)ofdm_index[0]]);
rtl_set_bbreg(hw, ROFDM0_XCTxAFE, BMASKH4BITS,
0x00);
rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD,
BIT(24), 0x00);
}
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
"TxPwrTracking for interface %d path A: X = 0x%lx, Y = 0x%lx ele_A = 0x%lx ele_C = 0x%lx ele_D = 0x%lx 0xe94 = 0x%lx 0xe9c = 0x%lx\n",
rtlhal->interfaceindex,
val_x, val_y, ele_a, ele_c, ele_d,
val_x, val_y);
if (rtlhal->current_bandtype == BAND_ON_2_4G) {
/* Adjust CCK according to IQK result */
if (!rtlpriv->dm.cck_inch14) {
rtl_write_byte(rtlpriv, 0xa22,
cckswing_table_ch1ch13
[(u8)cck_index][0]);
rtl_write_byte(rtlpriv, 0xa23,
cckswing_table_ch1ch13
[(u8)cck_index][1]);
rtl_write_byte(rtlpriv, 0xa24,
cckswing_table_ch1ch13
[(u8)cck_index][2]);
rtl_write_byte(rtlpriv, 0xa25,
cckswing_table_ch1ch13
[(u8)cck_index][3]);
rtl_write_byte(rtlpriv, 0xa26,
cckswing_table_ch1ch13
[(u8)cck_index][4]);
rtl_write_byte(rtlpriv, 0xa27,
cckswing_table_ch1ch13
[(u8)cck_index][5]);
rtl_write_byte(rtlpriv, 0xa28,
cckswing_table_ch1ch13
[(u8)cck_index][6]);
rtl_write_byte(rtlpriv, 0xa29,
cckswing_table_ch1ch13
[(u8)cck_index][7]);
} else {
rtl_write_byte(rtlpriv, 0xa22,
cckswing_table_ch14
[(u8)cck_index][0]);
rtl_write_byte(rtlpriv, 0xa23,
cckswing_table_ch14
[(u8)cck_index][1]);
rtl_write_byte(rtlpriv, 0xa24,
cckswing_table_ch14
[(u8)cck_index][2]);
rtl_write_byte(rtlpriv, 0xa25,
cckswing_table_ch14
[(u8)cck_index][3]);
rtl_write_byte(rtlpriv, 0xa26,
cckswing_table_ch14
[(u8)cck_index][4]);
rtl_write_byte(rtlpriv, 0xa27,
cckswing_table_ch14
[(u8)cck_index][5]);
rtl_write_byte(rtlpriv, 0xa28,
cckswing_table_ch14
[(u8)cck_index][6]);
rtl_write_byte(rtlpriv, 0xa29,
cckswing_table_ch14
[(u8)cck_index][7]);
}
}
if (is2t) {
ele_d = (ofdmswing_table[(u8) ofdm_index[1]] &
0xFFC00000) >> 22;
val_x = rtlphy->iqk_matrix_regsetting
[indexforchannel].value[0][4];
val_y = rtlphy->iqk_matrix_regsetting
[indexforchannel].value[0][5];
if (val_x != 0) {
if ((val_x & 0x00000200) != 0)
/* consider minus */
val_x = val_x | 0xFFFFFC00;
ele_a = ((val_x * ele_d) >> 8) &
0x000003FF;
/* new element C = element D x Y */
if ((val_y & 0x00000200) != 0)
val_y =
val_y | 0xFFFFFC00;
ele_c =
((val_y *
ele_d) >> 8) & 0x00003FF;
/* write new elements A, C, D to regC88
* and regC9C, element B is always 0
*/
value32 = (ele_d << 22) |
((ele_c & 0x3F) << 16) |
ele_a;
rtl_set_bbreg(hw,
ROFDM0_XBTxIQIMBALANCE,
BMASKDWORD, value32);
value32 = (ele_c & 0x000003C0) >> 6;
rtl_set_bbreg(hw, ROFDM0_XDTxAFE,
BMASKH4BITS, value32);
value32 = ((val_x * ele_d) >> 7) & 0x01;
rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD,
BIT(28), value32);
} else {
rtl_set_bbreg(hw,
ROFDM0_XBTxIQIMBALANCE,
BMASKDWORD,
ofdmswing_table
[(u8) ofdm_index[1]]);
rtl_set_bbreg(hw, ROFDM0_XDTxAFE,
BMASKH4BITS, 0x00);
rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD,
BIT(28), 0x00);
}
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
"TxPwrTracking path B: X = 0x%lx, Y = 0x%lx ele_A = 0x%lx ele_C = 0x%lx ele_D = 0x%lx 0xeb4 = 0x%lx 0xebc = 0x%lx\n",
val_x, val_y, ele_a, ele_c,
ele_d, val_x, val_y);
}
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
"TxPwrTracking 0xc80 = 0x%x, 0xc94 = 0x%x RF 0x24 = 0x%x\n",
rtl_get_bbreg(hw, 0xc80, BMASKDWORD),
rtl_get_bbreg(hw, 0xc94, BMASKDWORD),
rtl_get_rfreg(hw, RF90_PATH_A, 0x24,
BRFREGOFFSETMASK));
}
if ((delta_iqk > rtlefuse->delta_iqk) &&
(rtlefuse->delta_iqk != 0)) {
rtl92d_phy_reset_iqk_result(hw);
rtlpriv->dm.thermalvalue_iqk = thermalvalue;
rtl92d_phy_iq_calibrate(hw);
}
if (delta_rxgain > 0 && rtlhal->current_bandtype == BAND_ON_5G
&& thermalvalue <= rtlefuse->eeprom_thermalmeter) {
rtlpriv->dm.thermalvalue_rxgain = thermalvalue;
rtl92d_dm_rxgain_tracking_thermalmeter(hw);
}
if (rtlpriv->dm.txpower_track_control)
rtlpriv->dm.thermalvalue = thermalvalue;
}
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "<===\n");
}
static void rtl92d_dm_initialize_txpower_tracking(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
rtlpriv->dm.txpower_tracking = true;
rtlpriv->dm.txpower_trackinginit = false;
rtlpriv->dm.txpower_track_control = true;
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
"pMgntInfo->txpower_tracking = %d\n",
rtlpriv->dm.txpower_tracking);
}
void rtl92d_dm_check_txpower_tracking_thermal_meter(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
static u8 tm_trigger;
if (!rtlpriv->dm.txpower_tracking)
return;
if (!tm_trigger) {
rtl_set_rfreg(hw, RF90_PATH_A, RF_T_METER, BIT(17) |
BIT(16), 0x03);
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
"Trigger 92S Thermal Meter!!\n");
tm_trigger = 1;
return;
} else {
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
"Schedule TxPowerTracking direct call!!\n");
rtl92d_dm_txpower_tracking_callback_thermalmeter(hw);
tm_trigger = 0;
}
}
void rtl92d_dm_init_rate_adaptive_mask(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rate_adaptive *ra = &(rtlpriv->ra);
ra->ratr_state = DM_RATR_STA_INIT;
ra->pre_ratr_state = DM_RATR_STA_INIT;
if (rtlpriv->dm.dm_type == DM_TYPE_BYDRIVER)
rtlpriv->dm.useramask = true;
else
rtlpriv->dm.useramask = false;
}
void rtl92d_dm_init(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
rtlpriv->dm.dm_type = DM_TYPE_BYDRIVER;
rtl92d_dm_diginit(hw);
rtl92d_dm_init_dynamic_txpower(hw);
rtl92d_dm_init_edca_turbo(hw);
rtl92d_dm_init_rate_adaptive_mask(hw);
rtl92d_dm_initialize_txpower_tracking(hw);
}
void rtl92d_dm_watchdog(struct ieee80211_hw *hw)
{
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
bool fw_current_inpsmode = false;
bool fwps_awake = true;
/* 1. RF is OFF. (No need to do DM.)
* 2. Fw is under power saving mode for FwLPS.
* (Prevent from SW/FW I/O racing.)
* 3. IPS workitem is scheduled. (Prevent from IPS sequence
* to be swapped with DM.
* 4. RFChangeInProgress is TRUE.
* (Prevent from broken by IPS/HW/SW Rf off.) */
if ((ppsc->rfpwr_state == ERFON) && ((!fw_current_inpsmode) &&
fwps_awake) && (!ppsc->rfchange_inprogress)) {
rtl92d_dm_pwdb_monitor(hw);
rtl92d_dm_false_alarm_counter_statistics(hw);
rtl92d_dm_find_minimum_rssi(hw);
rtl92d_dm_dig(hw);
/* rtl92d_dm_dynamic_bb_powersaving(hw); */
rtl92d_dm_dynamic_txpower(hw);
/* rtl92d_dm_check_txpower_tracking_thermal_meter(hw); */
/* rtl92d_dm_refresh_rate_adaptive_mask(hw); */
/* rtl92d_dm_interrupt_migration(hw); */
rtl92d_dm_check_edca_turbo(hw);
}
}
| gpl-2.0 |
n3ocort3x/n3oMako | arch/arm/plat-mxc/cpufreq.c | 5074 | 4820 | /*
* Copyright (C) 2010 Freescale Semiconductor, Inc. All Rights Reserved.
*/
/*
* The code contained herein is licensed under the GNU General Public
* License. You may obtain a copy of the GNU General Public License
* Version 2 or later at the following locations:
*
* http://www.opensource.org/licenses/gpl-license.html
* http://www.gnu.org/copyleft/gpl.html
*/
/*
* A driver for the Freescale Semiconductor i.MXC CPUfreq module.
* The CPUFREQ driver is for controlling CPU frequency. It allows you to change
* the CPU clock speed on the fly.
*/
#include <linux/module.h>
#include <linux/cpufreq.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/slab.h>
#include <mach/hardware.h>
#include <mach/clock.h>
#define CLK32_FREQ 32768
#define NANOSECOND (1000 * 1000 * 1000)
struct cpu_op *(*get_cpu_op)(int *op);
static int cpu_freq_khz_min;
static int cpu_freq_khz_max;
static struct clk *cpu_clk;
static struct cpufreq_frequency_table *imx_freq_table;
static int cpu_op_nr;
static struct cpu_op *cpu_op_tbl;
static int set_cpu_freq(int freq)
{
int ret = 0;
int org_cpu_rate;
org_cpu_rate = clk_get_rate(cpu_clk);
if (org_cpu_rate == freq)
return ret;
ret = clk_set_rate(cpu_clk, freq);
if (ret != 0) {
printk(KERN_DEBUG "cannot set CPU clock rate\n");
return ret;
}
return ret;
}
static int mxc_verify_speed(struct cpufreq_policy *policy)
{
if (policy->cpu != 0)
return -EINVAL;
return cpufreq_frequency_table_verify(policy, imx_freq_table);
}
static unsigned int mxc_get_speed(unsigned int cpu)
{
if (cpu)
return 0;
return clk_get_rate(cpu_clk) / 1000;
}
static int mxc_set_target(struct cpufreq_policy *policy,
unsigned int target_freq, unsigned int relation)
{
struct cpufreq_freqs freqs;
int freq_Hz;
int ret = 0;
unsigned int index;
cpufreq_frequency_table_target(policy, imx_freq_table,
target_freq, relation, &index);
freq_Hz = imx_freq_table[index].frequency * 1000;
freqs.old = clk_get_rate(cpu_clk) / 1000;
freqs.new = freq_Hz / 1000;
freqs.cpu = 0;
freqs.flags = 0;
cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
ret = set_cpu_freq(freq_Hz);
cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
return ret;
}
static int mxc_cpufreq_init(struct cpufreq_policy *policy)
{
int ret;
int i;
printk(KERN_INFO "i.MXC CPU frequency driver\n");
if (policy->cpu != 0)
return -EINVAL;
if (!get_cpu_op)
return -EINVAL;
cpu_clk = clk_get(NULL, "cpu_clk");
if (IS_ERR(cpu_clk)) {
printk(KERN_ERR "%s: failed to get cpu clock\n", __func__);
return PTR_ERR(cpu_clk);
}
cpu_op_tbl = get_cpu_op(&cpu_op_nr);
cpu_freq_khz_min = cpu_op_tbl[0].cpu_rate / 1000;
cpu_freq_khz_max = cpu_op_tbl[0].cpu_rate / 1000;
imx_freq_table = kmalloc(
sizeof(struct cpufreq_frequency_table) * (cpu_op_nr + 1),
GFP_KERNEL);
if (!imx_freq_table) {
ret = -ENOMEM;
goto err1;
}
for (i = 0; i < cpu_op_nr; i++) {
imx_freq_table[i].index = i;
imx_freq_table[i].frequency = cpu_op_tbl[i].cpu_rate / 1000;
if ((cpu_op_tbl[i].cpu_rate / 1000) < cpu_freq_khz_min)
cpu_freq_khz_min = cpu_op_tbl[i].cpu_rate / 1000;
if ((cpu_op_tbl[i].cpu_rate / 1000) > cpu_freq_khz_max)
cpu_freq_khz_max = cpu_op_tbl[i].cpu_rate / 1000;
}
imx_freq_table[i].index = i;
imx_freq_table[i].frequency = CPUFREQ_TABLE_END;
policy->cur = clk_get_rate(cpu_clk) / 1000;
policy->min = policy->cpuinfo.min_freq = cpu_freq_khz_min;
policy->max = policy->cpuinfo.max_freq = cpu_freq_khz_max;
/* Manual states, that PLL stabilizes in two CLK32 periods */
policy->cpuinfo.transition_latency = 2 * NANOSECOND / CLK32_FREQ;
ret = cpufreq_frequency_table_cpuinfo(policy, imx_freq_table);
if (ret < 0) {
printk(KERN_ERR "%s: failed to register i.MXC CPUfreq with error code %d\n",
__func__, ret);
goto err;
}
cpufreq_frequency_table_get_attr(imx_freq_table, policy->cpu);
return 0;
err:
kfree(imx_freq_table);
err1:
clk_put(cpu_clk);
return ret;
}
static int mxc_cpufreq_exit(struct cpufreq_policy *policy)
{
cpufreq_frequency_table_put_attr(policy->cpu);
set_cpu_freq(cpu_freq_khz_max * 1000);
clk_put(cpu_clk);
kfree(imx_freq_table);
return 0;
}
static struct cpufreq_driver mxc_driver = {
.flags = CPUFREQ_STICKY,
.verify = mxc_verify_speed,
.target = mxc_set_target,
.get = mxc_get_speed,
.init = mxc_cpufreq_init,
.exit = mxc_cpufreq_exit,
.name = "imx",
};
static int __devinit mxc_cpufreq_driver_init(void)
{
return cpufreq_register_driver(&mxc_driver);
}
static void mxc_cpufreq_driver_exit(void)
{
cpufreq_unregister_driver(&mxc_driver);
}
module_init(mxc_cpufreq_driver_init);
module_exit(mxc_cpufreq_driver_exit);
MODULE_AUTHOR("Freescale Semiconductor Inc. Yong Shen <yong.shen@linaro.org>");
MODULE_DESCRIPTION("CPUfreq driver for i.MX");
MODULE_LICENSE("GPL");
| gpl-2.0 |
boa19861105/B2-Test | sound/soc/samsung/rx1950_uda1380.c | 5074 | 7291 | /*
* rx1950.c -- ALSA Soc Audio Layer
*
* Copyright (c) 2010 Vasily Khoruzhick <anarsoul@gmail.com>
*
* Based on smdk2440.c and magician.c
*
* Authors: Graeme Gregory graeme.gregory@wolfsonmicro.com
* Philipp Zabel <philipp.zabel@gmail.com>
* Denis Grigoriev <dgreenday@gmail.com>
* Vasily Khoruzhick <anarsoul@gmail.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
*/
#include <linux/types.h>
#include <linux/gpio.h>
#include <linux/module.h>
#include <sound/soc.h>
#include <sound/jack.h>
#include <plat/regs-iis.h>
#include <asm/mach-types.h>
#include "s3c24xx-i2s.h"
static int rx1950_uda1380_init(struct snd_soc_pcm_runtime *rtd);
static int rx1950_startup(struct snd_pcm_substream *substream);
static int rx1950_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params);
static int rx1950_spk_power(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event);
static unsigned int rates[] = {
16000,
44100,
48000,
};
static struct snd_pcm_hw_constraint_list hw_rates = {
.count = ARRAY_SIZE(rates),
.list = rates,
.mask = 0,
};
static struct snd_soc_jack hp_jack;
static struct snd_soc_jack_pin hp_jack_pins[] = {
{
.pin = "Headphone Jack",
.mask = SND_JACK_HEADPHONE,
},
{
.pin = "Speaker",
.mask = SND_JACK_HEADPHONE,
.invert = 1,
},
};
static struct snd_soc_jack_gpio hp_jack_gpios[] = {
[0] = {
.gpio = S3C2410_GPG(12),
.name = "hp-gpio",
.report = SND_JACK_HEADPHONE,
.invert = 1,
.debounce_time = 200,
},
};
static struct snd_soc_ops rx1950_ops = {
.startup = rx1950_startup,
.hw_params = rx1950_hw_params,
};
/* s3c24xx digital audio interface glue - connects codec <--> CPU */
static struct snd_soc_dai_link rx1950_uda1380_dai[] = {
{
.name = "uda1380",
.stream_name = "UDA1380 Duplex",
.cpu_dai_name = "s3c24xx-iis",
.codec_dai_name = "uda1380-hifi",
.init = rx1950_uda1380_init,
.platform_name = "samsung-audio",
.codec_name = "uda1380-codec.0-001a",
.ops = &rx1950_ops,
},
};
/* rx1950 machine dapm widgets */
static const struct snd_soc_dapm_widget uda1380_dapm_widgets[] = {
SND_SOC_DAPM_HP("Headphone Jack", NULL),
SND_SOC_DAPM_MIC("Mic Jack", NULL),
SND_SOC_DAPM_SPK("Speaker", rx1950_spk_power),
};
/* rx1950 machine audio_map */
static const struct snd_soc_dapm_route audio_map[] = {
/* headphone connected to VOUTLHP, VOUTRHP */
{"Headphone Jack", NULL, "VOUTLHP"},
{"Headphone Jack", NULL, "VOUTRHP"},
/* ext speaker connected to VOUTL, VOUTR */
{"Speaker", NULL, "VOUTL"},
{"Speaker", NULL, "VOUTR"},
/* mic is connected to VINM */
{"VINM", NULL, "Mic Jack"},
};
static struct snd_soc_card rx1950_asoc = {
.name = "rx1950",
.owner = THIS_MODULE,
.dai_link = rx1950_uda1380_dai,
.num_links = ARRAY_SIZE(rx1950_uda1380_dai),
.dapm_widgets = uda1380_dapm_widgets,
.num_dapm_widgets = ARRAY_SIZE(uda1380_dapm_widgets),
.dapm_routes = audio_map,
.num_dapm_routes = ARRAY_SIZE(audio_map),
};
static struct platform_device *s3c24xx_snd_device;
static int rx1950_startup(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
runtime->hw.rate_min = hw_rates.list[0];
runtime->hw.rate_max = hw_rates.list[hw_rates.count - 1];
runtime->hw.rates = SNDRV_PCM_RATE_KNOT;
return snd_pcm_hw_constraint_list(runtime, 0,
SNDRV_PCM_HW_PARAM_RATE,
&hw_rates);
}
static int rx1950_spk_power(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
if (SND_SOC_DAPM_EVENT_ON(event))
gpio_set_value(S3C2410_GPA(1), 1);
else
gpio_set_value(S3C2410_GPA(1), 0);
return 0;
}
static int rx1950_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
struct snd_soc_dai *codec_dai = rtd->codec_dai;
int div;
int ret;
unsigned int rate = params_rate(params);
int clk_source, fs_mode;
switch (rate) {
case 16000:
case 48000:
clk_source = S3C24XX_CLKSRC_PCLK;
fs_mode = S3C2410_IISMOD_256FS;
div = s3c24xx_i2s_get_clockrate() / (256 * rate);
if (s3c24xx_i2s_get_clockrate() % (256 * rate) > (128 * rate))
div++;
break;
case 44100:
case 88200:
clk_source = S3C24XX_CLKSRC_MPLL;
fs_mode = S3C2410_IISMOD_384FS;
div = 1;
break;
default:
printk(KERN_ERR "%s: rate %d is not supported\n",
__func__, rate);
return -EINVAL;
}
/* set codec DAI configuration */
ret = snd_soc_dai_set_fmt(codec_dai, SND_SOC_DAIFMT_I2S |
SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBS_CFS);
if (ret < 0)
return ret;
/* set cpu DAI configuration */
ret = snd_soc_dai_set_fmt(cpu_dai, SND_SOC_DAIFMT_I2S |
SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBS_CFS);
if (ret < 0)
return ret;
/* select clock source */
ret = snd_soc_dai_set_sysclk(cpu_dai, clk_source, rate,
SND_SOC_CLOCK_OUT);
if (ret < 0)
return ret;
/* set MCLK division for sample rate */
ret = snd_soc_dai_set_clkdiv(cpu_dai, S3C24XX_DIV_MCLK,
fs_mode);
if (ret < 0)
return ret;
/* set BCLK division for sample rate */
ret = snd_soc_dai_set_clkdiv(cpu_dai, S3C24XX_DIV_BCLK,
S3C2410_IISMOD_32FS);
if (ret < 0)
return ret;
/* set prescaler division for sample rate */
ret = snd_soc_dai_set_clkdiv(cpu_dai, S3C24XX_DIV_PRESCALER,
S3C24XX_PRESCALE(div, div));
if (ret < 0)
return ret;
return 0;
}
static int rx1950_uda1380_init(struct snd_soc_pcm_runtime *rtd)
{
struct snd_soc_codec *codec = rtd->codec;
struct snd_soc_dapm_context *dapm = &codec->dapm;
int err;
snd_soc_dapm_enable_pin(dapm, "Headphone Jack");
snd_soc_dapm_enable_pin(dapm, "Speaker");
snd_soc_dapm_enable_pin(dapm, "Mic Jack");
snd_soc_jack_new(codec, "Headphone Jack", SND_JACK_HEADPHONE,
&hp_jack);
snd_soc_jack_add_pins(&hp_jack, ARRAY_SIZE(hp_jack_pins),
hp_jack_pins);
snd_soc_jack_add_gpios(&hp_jack, ARRAY_SIZE(hp_jack_gpios),
hp_jack_gpios);
return 0;
}
static int __init rx1950_init(void)
{
int ret;
if (!machine_is_rx1950())
return -ENODEV;
/* configure some gpios */
ret = gpio_request(S3C2410_GPA(1), "speaker-power");
if (ret)
goto err_gpio;
ret = gpio_direction_output(S3C2410_GPA(1), 0);
if (ret)
goto err_gpio_conf;
s3c24xx_snd_device = platform_device_alloc("soc-audio", -1);
if (!s3c24xx_snd_device) {
ret = -ENOMEM;
goto err_plat_alloc;
}
platform_set_drvdata(s3c24xx_snd_device, &rx1950_asoc);
ret = platform_device_add(s3c24xx_snd_device);
if (ret) {
platform_device_put(s3c24xx_snd_device);
goto err_plat_add;
}
return 0;
err_plat_add:
err_plat_alloc:
err_gpio_conf:
gpio_free(S3C2410_GPA(1));
err_gpio:
return ret;
}
static void __exit rx1950_exit(void)
{
platform_device_unregister(s3c24xx_snd_device);
snd_soc_jack_free_gpios(&hp_jack, ARRAY_SIZE(hp_jack_gpios),
hp_jack_gpios);
gpio_free(S3C2410_GPA(1));
}
module_init(rx1950_init);
module_exit(rx1950_exit);
/* Module information */
MODULE_AUTHOR("Vasily Khoruzhick");
MODULE_DESCRIPTION("ALSA SoC RX1950");
MODULE_LICENSE("GPL");
| gpl-2.0 |
razaina/android-kernel-samsung-smdk4412 | drivers/isdn/mISDN/dsp_blowfish.c | 5074 | 23738 | /*
* Blowfish encryption/decryption for mISDN_dsp.
*
* Copyright Andreas Eversberg (jolly@eversberg.eu)
*
* This software may be used and distributed according to the terms
* of the GNU General Public License, incorporated herein by reference.
*
*/
#include <linux/mISDNif.h>
#include <linux/mISDNdsp.h>
#include "core.h"
#include "dsp.h"
/*
* how to encode a sample stream to 64-bit blocks that will be encryped
*
* first of all, data is collected until a block of 9 samples are received.
* of course, a packet may have much more than 9 sample, but is may have
* not excacly the multiple of 9 samples. if there is a rest, the next
* received data will complete the block.
*
* the block is then converted to 9 uLAW samples without the least sigificant
* bit. the result is a 7-bit encoded sample.
*
* the samples will be reoganised to form 8 bytes of data:
* (5(6) means: encoded sample no. 5, bit 6)
*
* 0(6) 0(5) 0(4) 0(3) 0(2) 0(1) 0(0) 1(6)
* 1(5) 1(4) 1(3) 1(2) 1(1) 1(0) 2(6) 2(5)
* 2(4) 2(3) 2(2) 2(1) 2(0) 3(6) 3(5) 3(4)
* 3(3) 3(2) 3(1) 3(0) 4(6) 4(5) 4(4) 4(3)
* 4(2) 4(1) 4(0) 5(6) 5(5) 5(4) 5(3) 5(2)
* 5(1) 5(0) 6(6) 6(5) 6(4) 6(3) 6(2) 6(1)
* 6(0) 7(6) 7(5) 7(4) 7(3) 7(2) 7(1) 7(0)
* 8(6) 8(5) 8(4) 8(3) 8(2) 8(1) 8(0)
*
* the missing bit 0 of the last byte is filled with some
* random noise, to fill all 8 bytes.
*
* the 8 bytes will be encrypted using blowfish.
*
* the result will be converted into 9 bytes. the bit 7 is used for
* checksumme (CS) for sync (0, 1) and for the last bit:
* (5(6) means: crypted byte 5, bit 6)
*
* 1 0(7) 0(6) 0(5) 0(4) 0(3) 0(2) 0(1)
* 0 0(0) 1(7) 1(6) 1(5) 1(4) 1(3) 1(2)
* 0 1(1) 1(0) 2(7) 2(6) 2(5) 2(4) 2(3)
* 0 2(2) 2(1) 2(0) 3(7) 3(6) 3(5) 3(4)
* 0 3(3) 3(2) 3(1) 3(0) 4(7) 4(6) 4(5)
* CS 4(4) 4(3) 4(2) 4(1) 4(0) 5(7) 5(6)
* CS 5(5) 5(4) 5(3) 5(2) 5(1) 5(0) 6(7)
* CS 6(6) 6(5) 6(4) 6(3) 6(2) 6(1) 6(0)
* 7(0) 7(6) 7(5) 7(4) 7(3) 7(2) 7(1) 7(0)
*
* the checksum is used to detect transmission errors and frame drops.
*
* synchronisation of received block is done by shifting the upper bit of each
* byte (bit 7) to a shift register. if the rigister has the first five bits
* (10000), this is used to find the sync. only if sync has been found, the
* current block of 9 received bytes are decrypted. before that the check
* sum is calculated. if it is incorrect the block is dropped.
* this will avoid loud noise due to corrupt encrypted data.
*
* if the last block is corrupt, the current decoded block is repeated
* until a valid block has been received.
*/
/*
* some blowfish parts are taken from the
* crypto-api for faster implementation
*/
struct bf_ctx {
u32 p[18];
u32 s[1024];
};
static const u32 bf_pbox[16 + 2] = {
0x243f6a88, 0x85a308d3, 0x13198a2e, 0x03707344,
0xa4093822, 0x299f31d0, 0x082efa98, 0xec4e6c89,
0x452821e6, 0x38d01377, 0xbe5466cf, 0x34e90c6c,
0xc0ac29b7, 0xc97c50dd, 0x3f84d5b5, 0xb5470917,
0x9216d5d9, 0x8979fb1b,
};
static const u32 bf_sbox[256 * 4] = {
0xd1310ba6, 0x98dfb5ac, 0x2ffd72db, 0xd01adfb7,
0xb8e1afed, 0x6a267e96, 0xba7c9045, 0xf12c7f99,
0x24a19947, 0xb3916cf7, 0x0801f2e2, 0x858efc16,
0x636920d8, 0x71574e69, 0xa458fea3, 0xf4933d7e,
0x0d95748f, 0x728eb658, 0x718bcd58, 0x82154aee,
0x7b54a41d, 0xc25a59b5, 0x9c30d539, 0x2af26013,
0xc5d1b023, 0x286085f0, 0xca417918, 0xb8db38ef,
0x8e79dcb0, 0x603a180e, 0x6c9e0e8b, 0xb01e8a3e,
0xd71577c1, 0xbd314b27, 0x78af2fda, 0x55605c60,
0xe65525f3, 0xaa55ab94, 0x57489862, 0x63e81440,
0x55ca396a, 0x2aab10b6, 0xb4cc5c34, 0x1141e8ce,
0xa15486af, 0x7c72e993, 0xb3ee1411, 0x636fbc2a,
0x2ba9c55d, 0x741831f6, 0xce5c3e16, 0x9b87931e,
0xafd6ba33, 0x6c24cf5c, 0x7a325381, 0x28958677,
0x3b8f4898, 0x6b4bb9af, 0xc4bfe81b, 0x66282193,
0x61d809cc, 0xfb21a991, 0x487cac60, 0x5dec8032,
0xef845d5d, 0xe98575b1, 0xdc262302, 0xeb651b88,
0x23893e81, 0xd396acc5, 0x0f6d6ff3, 0x83f44239,
0x2e0b4482, 0xa4842004, 0x69c8f04a, 0x9e1f9b5e,
0x21c66842, 0xf6e96c9a, 0x670c9c61, 0xabd388f0,
0x6a51a0d2, 0xd8542f68, 0x960fa728, 0xab5133a3,
0x6eef0b6c, 0x137a3be4, 0xba3bf050, 0x7efb2a98,
0xa1f1651d, 0x39af0176, 0x66ca593e, 0x82430e88,
0x8cee8619, 0x456f9fb4, 0x7d84a5c3, 0x3b8b5ebe,
0xe06f75d8, 0x85c12073, 0x401a449f, 0x56c16aa6,
0x4ed3aa62, 0x363f7706, 0x1bfedf72, 0x429b023d,
0x37d0d724, 0xd00a1248, 0xdb0fead3, 0x49f1c09b,
0x075372c9, 0x80991b7b, 0x25d479d8, 0xf6e8def7,
0xe3fe501a, 0xb6794c3b, 0x976ce0bd, 0x04c006ba,
0xc1a94fb6, 0x409f60c4, 0x5e5c9ec2, 0x196a2463,
0x68fb6faf, 0x3e6c53b5, 0x1339b2eb, 0x3b52ec6f,
0x6dfc511f, 0x9b30952c, 0xcc814544, 0xaf5ebd09,
0xbee3d004, 0xde334afd, 0x660f2807, 0x192e4bb3,
0xc0cba857, 0x45c8740f, 0xd20b5f39, 0xb9d3fbdb,
0x5579c0bd, 0x1a60320a, 0xd6a100c6, 0x402c7279,
0x679f25fe, 0xfb1fa3cc, 0x8ea5e9f8, 0xdb3222f8,
0x3c7516df, 0xfd616b15, 0x2f501ec8, 0xad0552ab,
0x323db5fa, 0xfd238760, 0x53317b48, 0x3e00df82,
0x9e5c57bb, 0xca6f8ca0, 0x1a87562e, 0xdf1769db,
0xd542a8f6, 0x287effc3, 0xac6732c6, 0x8c4f5573,
0x695b27b0, 0xbbca58c8, 0xe1ffa35d, 0xb8f011a0,
0x10fa3d98, 0xfd2183b8, 0x4afcb56c, 0x2dd1d35b,
0x9a53e479, 0xb6f84565, 0xd28e49bc, 0x4bfb9790,
0xe1ddf2da, 0xa4cb7e33, 0x62fb1341, 0xcee4c6e8,
0xef20cada, 0x36774c01, 0xd07e9efe, 0x2bf11fb4,
0x95dbda4d, 0xae909198, 0xeaad8e71, 0x6b93d5a0,
0xd08ed1d0, 0xafc725e0, 0x8e3c5b2f, 0x8e7594b7,
0x8ff6e2fb, 0xf2122b64, 0x8888b812, 0x900df01c,
0x4fad5ea0, 0x688fc31c, 0xd1cff191, 0xb3a8c1ad,
0x2f2f2218, 0xbe0e1777, 0xea752dfe, 0x8b021fa1,
0xe5a0cc0f, 0xb56f74e8, 0x18acf3d6, 0xce89e299,
0xb4a84fe0, 0xfd13e0b7, 0x7cc43b81, 0xd2ada8d9,
0x165fa266, 0x80957705, 0x93cc7314, 0x211a1477,
0xe6ad2065, 0x77b5fa86, 0xc75442f5, 0xfb9d35cf,
0xebcdaf0c, 0x7b3e89a0, 0xd6411bd3, 0xae1e7e49,
0x00250e2d, 0x2071b35e, 0x226800bb, 0x57b8e0af,
0x2464369b, 0xf009b91e, 0x5563911d, 0x59dfa6aa,
0x78c14389, 0xd95a537f, 0x207d5ba2, 0x02e5b9c5,
0x83260376, 0x6295cfa9, 0x11c81968, 0x4e734a41,
0xb3472dca, 0x7b14a94a, 0x1b510052, 0x9a532915,
0xd60f573f, 0xbc9bc6e4, 0x2b60a476, 0x81e67400,
0x08ba6fb5, 0x571be91f, 0xf296ec6b, 0x2a0dd915,
0xb6636521, 0xe7b9f9b6, 0xff34052e, 0xc5855664,
0x53b02d5d, 0xa99f8fa1, 0x08ba4799, 0x6e85076a,
0x4b7a70e9, 0xb5b32944, 0xdb75092e, 0xc4192623,
0xad6ea6b0, 0x49a7df7d, 0x9cee60b8, 0x8fedb266,
0xecaa8c71, 0x699a17ff, 0x5664526c, 0xc2b19ee1,
0x193602a5, 0x75094c29, 0xa0591340, 0xe4183a3e,
0x3f54989a, 0x5b429d65, 0x6b8fe4d6, 0x99f73fd6,
0xa1d29c07, 0xefe830f5, 0x4d2d38e6, 0xf0255dc1,
0x4cdd2086, 0x8470eb26, 0x6382e9c6, 0x021ecc5e,
0x09686b3f, 0x3ebaefc9, 0x3c971814, 0x6b6a70a1,
0x687f3584, 0x52a0e286, 0xb79c5305, 0xaa500737,
0x3e07841c, 0x7fdeae5c, 0x8e7d44ec, 0x5716f2b8,
0xb03ada37, 0xf0500c0d, 0xf01c1f04, 0x0200b3ff,
0xae0cf51a, 0x3cb574b2, 0x25837a58, 0xdc0921bd,
0xd19113f9, 0x7ca92ff6, 0x94324773, 0x22f54701,
0x3ae5e581, 0x37c2dadc, 0xc8b57634, 0x9af3dda7,
0xa9446146, 0x0fd0030e, 0xecc8c73e, 0xa4751e41,
0xe238cd99, 0x3bea0e2f, 0x3280bba1, 0x183eb331,
0x4e548b38, 0x4f6db908, 0x6f420d03, 0xf60a04bf,
0x2cb81290, 0x24977c79, 0x5679b072, 0xbcaf89af,
0xde9a771f, 0xd9930810, 0xb38bae12, 0xdccf3f2e,
0x5512721f, 0x2e6b7124, 0x501adde6, 0x9f84cd87,
0x7a584718, 0x7408da17, 0xbc9f9abc, 0xe94b7d8c,
0xec7aec3a, 0xdb851dfa, 0x63094366, 0xc464c3d2,
0xef1c1847, 0x3215d908, 0xdd433b37, 0x24c2ba16,
0x12a14d43, 0x2a65c451, 0x50940002, 0x133ae4dd,
0x71dff89e, 0x10314e55, 0x81ac77d6, 0x5f11199b,
0x043556f1, 0xd7a3c76b, 0x3c11183b, 0x5924a509,
0xf28fe6ed, 0x97f1fbfa, 0x9ebabf2c, 0x1e153c6e,
0x86e34570, 0xeae96fb1, 0x860e5e0a, 0x5a3e2ab3,
0x771fe71c, 0x4e3d06fa, 0x2965dcb9, 0x99e71d0f,
0x803e89d6, 0x5266c825, 0x2e4cc978, 0x9c10b36a,
0xc6150eba, 0x94e2ea78, 0xa5fc3c53, 0x1e0a2df4,
0xf2f74ea7, 0x361d2b3d, 0x1939260f, 0x19c27960,
0x5223a708, 0xf71312b6, 0xebadfe6e, 0xeac31f66,
0xe3bc4595, 0xa67bc883, 0xb17f37d1, 0x018cff28,
0xc332ddef, 0xbe6c5aa5, 0x65582185, 0x68ab9802,
0xeecea50f, 0xdb2f953b, 0x2aef7dad, 0x5b6e2f84,
0x1521b628, 0x29076170, 0xecdd4775, 0x619f1510,
0x13cca830, 0xeb61bd96, 0x0334fe1e, 0xaa0363cf,
0xb5735c90, 0x4c70a239, 0xd59e9e0b, 0xcbaade14,
0xeecc86bc, 0x60622ca7, 0x9cab5cab, 0xb2f3846e,
0x648b1eaf, 0x19bdf0ca, 0xa02369b9, 0x655abb50,
0x40685a32, 0x3c2ab4b3, 0x319ee9d5, 0xc021b8f7,
0x9b540b19, 0x875fa099, 0x95f7997e, 0x623d7da8,
0xf837889a, 0x97e32d77, 0x11ed935f, 0x16681281,
0x0e358829, 0xc7e61fd6, 0x96dedfa1, 0x7858ba99,
0x57f584a5, 0x1b227263, 0x9b83c3ff, 0x1ac24696,
0xcdb30aeb, 0x532e3054, 0x8fd948e4, 0x6dbc3128,
0x58ebf2ef, 0x34c6ffea, 0xfe28ed61, 0xee7c3c73,
0x5d4a14d9, 0xe864b7e3, 0x42105d14, 0x203e13e0,
0x45eee2b6, 0xa3aaabea, 0xdb6c4f15, 0xfacb4fd0,
0xc742f442, 0xef6abbb5, 0x654f3b1d, 0x41cd2105,
0xd81e799e, 0x86854dc7, 0xe44b476a, 0x3d816250,
0xcf62a1f2, 0x5b8d2646, 0xfc8883a0, 0xc1c7b6a3,
0x7f1524c3, 0x69cb7492, 0x47848a0b, 0x5692b285,
0x095bbf00, 0xad19489d, 0x1462b174, 0x23820e00,
0x58428d2a, 0x0c55f5ea, 0x1dadf43e, 0x233f7061,
0x3372f092, 0x8d937e41, 0xd65fecf1, 0x6c223bdb,
0x7cde3759, 0xcbee7460, 0x4085f2a7, 0xce77326e,
0xa6078084, 0x19f8509e, 0xe8efd855, 0x61d99735,
0xa969a7aa, 0xc50c06c2, 0x5a04abfc, 0x800bcadc,
0x9e447a2e, 0xc3453484, 0xfdd56705, 0x0e1e9ec9,
0xdb73dbd3, 0x105588cd, 0x675fda79, 0xe3674340,
0xc5c43465, 0x713e38d8, 0x3d28f89e, 0xf16dff20,
0x153e21e7, 0x8fb03d4a, 0xe6e39f2b, 0xdb83adf7,
0xe93d5a68, 0x948140f7, 0xf64c261c, 0x94692934,
0x411520f7, 0x7602d4f7, 0xbcf46b2e, 0xd4a20068,
0xd4082471, 0x3320f46a, 0x43b7d4b7, 0x500061af,
0x1e39f62e, 0x97244546, 0x14214f74, 0xbf8b8840,
0x4d95fc1d, 0x96b591af, 0x70f4ddd3, 0x66a02f45,
0xbfbc09ec, 0x03bd9785, 0x7fac6dd0, 0x31cb8504,
0x96eb27b3, 0x55fd3941, 0xda2547e6, 0xabca0a9a,
0x28507825, 0x530429f4, 0x0a2c86da, 0xe9b66dfb,
0x68dc1462, 0xd7486900, 0x680ec0a4, 0x27a18dee,
0x4f3ffea2, 0xe887ad8c, 0xb58ce006, 0x7af4d6b6,
0xaace1e7c, 0xd3375fec, 0xce78a399, 0x406b2a42,
0x20fe9e35, 0xd9f385b9, 0xee39d7ab, 0x3b124e8b,
0x1dc9faf7, 0x4b6d1856, 0x26a36631, 0xeae397b2,
0x3a6efa74, 0xdd5b4332, 0x6841e7f7, 0xca7820fb,
0xfb0af54e, 0xd8feb397, 0x454056ac, 0xba489527,
0x55533a3a, 0x20838d87, 0xfe6ba9b7, 0xd096954b,
0x55a867bc, 0xa1159a58, 0xcca92963, 0x99e1db33,
0xa62a4a56, 0x3f3125f9, 0x5ef47e1c, 0x9029317c,
0xfdf8e802, 0x04272f70, 0x80bb155c, 0x05282ce3,
0x95c11548, 0xe4c66d22, 0x48c1133f, 0xc70f86dc,
0x07f9c9ee, 0x41041f0f, 0x404779a4, 0x5d886e17,
0x325f51eb, 0xd59bc0d1, 0xf2bcc18f, 0x41113564,
0x257b7834, 0x602a9c60, 0xdff8e8a3, 0x1f636c1b,
0x0e12b4c2, 0x02e1329e, 0xaf664fd1, 0xcad18115,
0x6b2395e0, 0x333e92e1, 0x3b240b62, 0xeebeb922,
0x85b2a20e, 0xe6ba0d99, 0xde720c8c, 0x2da2f728,
0xd0127845, 0x95b794fd, 0x647d0862, 0xe7ccf5f0,
0x5449a36f, 0x877d48fa, 0xc39dfd27, 0xf33e8d1e,
0x0a476341, 0x992eff74, 0x3a6f6eab, 0xf4f8fd37,
0xa812dc60, 0xa1ebddf8, 0x991be14c, 0xdb6e6b0d,
0xc67b5510, 0x6d672c37, 0x2765d43b, 0xdcd0e804,
0xf1290dc7, 0xcc00ffa3, 0xb5390f92, 0x690fed0b,
0x667b9ffb, 0xcedb7d9c, 0xa091cf0b, 0xd9155ea3,
0xbb132f88, 0x515bad24, 0x7b9479bf, 0x763bd6eb,
0x37392eb3, 0xcc115979, 0x8026e297, 0xf42e312d,
0x6842ada7, 0xc66a2b3b, 0x12754ccc, 0x782ef11c,
0x6a124237, 0xb79251e7, 0x06a1bbe6, 0x4bfb6350,
0x1a6b1018, 0x11caedfa, 0x3d25bdd8, 0xe2e1c3c9,
0x44421659, 0x0a121386, 0xd90cec6e, 0xd5abea2a,
0x64af674e, 0xda86a85f, 0xbebfe988, 0x64e4c3fe,
0x9dbc8057, 0xf0f7c086, 0x60787bf8, 0x6003604d,
0xd1fd8346, 0xf6381fb0, 0x7745ae04, 0xd736fccc,
0x83426b33, 0xf01eab71, 0xb0804187, 0x3c005e5f,
0x77a057be, 0xbde8ae24, 0x55464299, 0xbf582e61,
0x4e58f48f, 0xf2ddfda2, 0xf474ef38, 0x8789bdc2,
0x5366f9c3, 0xc8b38e74, 0xb475f255, 0x46fcd9b9,
0x7aeb2661, 0x8b1ddf84, 0x846a0e79, 0x915f95e2,
0x466e598e, 0x20b45770, 0x8cd55591, 0xc902de4c,
0xb90bace1, 0xbb8205d0, 0x11a86248, 0x7574a99e,
0xb77f19b6, 0xe0a9dc09, 0x662d09a1, 0xc4324633,
0xe85a1f02, 0x09f0be8c, 0x4a99a025, 0x1d6efe10,
0x1ab93d1d, 0x0ba5a4df, 0xa186f20f, 0x2868f169,
0xdcb7da83, 0x573906fe, 0xa1e2ce9b, 0x4fcd7f52,
0x50115e01, 0xa70683fa, 0xa002b5c4, 0x0de6d027,
0x9af88c27, 0x773f8641, 0xc3604c06, 0x61a806b5,
0xf0177a28, 0xc0f586e0, 0x006058aa, 0x30dc7d62,
0x11e69ed7, 0x2338ea63, 0x53c2dd94, 0xc2c21634,
0xbbcbee56, 0x90bcb6de, 0xebfc7da1, 0xce591d76,
0x6f05e409, 0x4b7c0188, 0x39720a3d, 0x7c927c24,
0x86e3725f, 0x724d9db9, 0x1ac15bb4, 0xd39eb8fc,
0xed545578, 0x08fca5b5, 0xd83d7cd3, 0x4dad0fc4,
0x1e50ef5e, 0xb161e6f8, 0xa28514d9, 0x6c51133c,
0x6fd5c7e7, 0x56e14ec4, 0x362abfce, 0xddc6c837,
0xd79a3234, 0x92638212, 0x670efa8e, 0x406000e0,
0x3a39ce37, 0xd3faf5cf, 0xabc27737, 0x5ac52d1b,
0x5cb0679e, 0x4fa33742, 0xd3822740, 0x99bc9bbe,
0xd5118e9d, 0xbf0f7315, 0xd62d1c7e, 0xc700c47b,
0xb78c1b6b, 0x21a19045, 0xb26eb1be, 0x6a366eb4,
0x5748ab2f, 0xbc946e79, 0xc6a376d2, 0x6549c2c8,
0x530ff8ee, 0x468dde7d, 0xd5730a1d, 0x4cd04dc6,
0x2939bbdb, 0xa9ba4650, 0xac9526e8, 0xbe5ee304,
0xa1fad5f0, 0x6a2d519a, 0x63ef8ce2, 0x9a86ee22,
0xc089c2b8, 0x43242ef6, 0xa51e03aa, 0x9cf2d0a4,
0x83c061ba, 0x9be96a4d, 0x8fe51550, 0xba645bd6,
0x2826a2f9, 0xa73a3ae1, 0x4ba99586, 0xef5562e9,
0xc72fefd3, 0xf752f7da, 0x3f046f69, 0x77fa0a59,
0x80e4a915, 0x87b08601, 0x9b09e6ad, 0x3b3ee593,
0xe990fd5a, 0x9e34d797, 0x2cf0b7d9, 0x022b8b51,
0x96d5ac3a, 0x017da67d, 0xd1cf3ed6, 0x7c7d2d28,
0x1f9f25cf, 0xadf2b89b, 0x5ad6b472, 0x5a88f54c,
0xe029ac71, 0xe019a5e6, 0x47b0acfd, 0xed93fa9b,
0xe8d3c48d, 0x283b57cc, 0xf8d56629, 0x79132e28,
0x785f0191, 0xed756055, 0xf7960e44, 0xe3d35e8c,
0x15056dd4, 0x88f46dba, 0x03a16125, 0x0564f0bd,
0xc3eb9e15, 0x3c9057a2, 0x97271aec, 0xa93a072a,
0x1b3f6d9b, 0x1e6321f5, 0xf59c66fb, 0x26dcf319,
0x7533d928, 0xb155fdf5, 0x03563482, 0x8aba3cbb,
0x28517711, 0xc20ad9f8, 0xabcc5167, 0xccad925f,
0x4de81751, 0x3830dc8e, 0x379d5862, 0x9320f991,
0xea7a90c2, 0xfb3e7bce, 0x5121ce64, 0x774fbe32,
0xa8b6e37e, 0xc3293d46, 0x48de5369, 0x6413e680,
0xa2ae0810, 0xdd6db224, 0x69852dfd, 0x09072166,
0xb39a460a, 0x6445c0dd, 0x586cdecf, 0x1c20c8ae,
0x5bbef7dd, 0x1b588d40, 0xccd2017f, 0x6bb4e3bb,
0xdda26a7e, 0x3a59ff45, 0x3e350a44, 0xbcb4cdd5,
0x72eacea8, 0xfa6484bb, 0x8d6612ae, 0xbf3c6f47,
0xd29be463, 0x542f5d9e, 0xaec2771b, 0xf64e6370,
0x740e0d8d, 0xe75b1357, 0xf8721671, 0xaf537d5d,
0x4040cb08, 0x4eb4e2cc, 0x34d2466a, 0x0115af84,
0xe1b00428, 0x95983a1d, 0x06b89fb4, 0xce6ea048,
0x6f3f3b82, 0x3520ab82, 0x011a1d4b, 0x277227f8,
0x611560b1, 0xe7933fdc, 0xbb3a792b, 0x344525bd,
0xa08839e1, 0x51ce794b, 0x2f32c9b7, 0xa01fbac9,
0xe01cc87e, 0xbcc7d1f6, 0xcf0111c3, 0xa1e8aac7,
0x1a908749, 0xd44fbd9a, 0xd0dadecb, 0xd50ada38,
0x0339c32a, 0xc6913667, 0x8df9317c, 0xe0b12b4f,
0xf79e59b7, 0x43f5bb3a, 0xf2d519ff, 0x27d9459c,
0xbf97222c, 0x15e6fc2a, 0x0f91fc71, 0x9b941525,
0xfae59361, 0xceb69ceb, 0xc2a86459, 0x12baa8d1,
0xb6c1075e, 0xe3056a0c, 0x10d25065, 0xcb03a442,
0xe0ec6e0e, 0x1698db3b, 0x4c98a0be, 0x3278e964,
0x9f1f9532, 0xe0d392df, 0xd3a0342b, 0x8971f21e,
0x1b0a7441, 0x4ba3348c, 0xc5be7120, 0xc37632d8,
0xdf359f8d, 0x9b992f2e, 0xe60b6f47, 0x0fe3f11d,
0xe54cda54, 0x1edad891, 0xce6279cf, 0xcd3e7e6f,
0x1618b166, 0xfd2c1d05, 0x848fd2c5, 0xf6fb2299,
0xf523f357, 0xa6327623, 0x93a83531, 0x56cccd02,
0xacf08162, 0x5a75ebb5, 0x6e163697, 0x88d273cc,
0xde966292, 0x81b949d0, 0x4c50901b, 0x71c65614,
0xe6c6c7bd, 0x327a140a, 0x45e1d006, 0xc3f27b9a,
0xc9aa53fd, 0x62a80f00, 0xbb25bfe2, 0x35bdd2f6,
0x71126905, 0xb2040222, 0xb6cbcf7c, 0xcd769c2b,
0x53113ec0, 0x1640e3d3, 0x38abbd60, 0x2547adf0,
0xba38209c, 0xf746ce76, 0x77afa1c5, 0x20756060,
0x85cbfe4e, 0x8ae88dd8, 0x7aaaf9b0, 0x4cf9aa7e,
0x1948c25c, 0x02fb8a8c, 0x01c36ae4, 0xd6ebe1f9,
0x90d4f869, 0xa65cdea0, 0x3f09252d, 0xc208e69f,
0xb74e6132, 0xce77e25b, 0x578fdfe3, 0x3ac372e6,
};
/*
* Round loop unrolling macros, S is a pointer to a S-Box array
* organized in 4 unsigned longs at a row.
*/
#define GET32_3(x) (((x) & 0xff))
#define GET32_2(x) (((x) >> (8)) & (0xff))
#define GET32_1(x) (((x) >> (16)) & (0xff))
#define GET32_0(x) (((x) >> (24)) & (0xff))
#define bf_F(x) (((S[GET32_0(x)] + S[256 + GET32_1(x)]) ^ \
S[512 + GET32_2(x)]) + S[768 + GET32_3(x)])
#define EROUND(a, b, n) do { b ^= P[n]; a ^= bf_F(b); } while (0)
#define DROUND(a, b, n) do { a ^= bf_F(b); b ^= P[n]; } while (0)
/*
* encrypt isdn data frame
* every block with 9 samples is encrypted
*/
void
dsp_bf_encrypt(struct dsp *dsp, u8 *data, int len)
{
int i = 0, j = dsp->bf_crypt_pos;
u8 *bf_data_in = dsp->bf_data_in;
u8 *bf_crypt_out = dsp->bf_crypt_out;
u32 *P = dsp->bf_p;
u32 *S = dsp->bf_s;
u32 yl, yr;
u32 cs;
u8 nibble;
while (i < len) {
/* collect a block of 9 samples */
if (j < 9) {
bf_data_in[j] = *data;
*data++ = bf_crypt_out[j++];
i++;
continue;
}
j = 0;
/* transcode 9 samples xlaw to 8 bytes */
yl = dsp_audio_law2seven[bf_data_in[0]];
yl = (yl<<7) | dsp_audio_law2seven[bf_data_in[1]];
yl = (yl<<7) | dsp_audio_law2seven[bf_data_in[2]];
yl = (yl<<7) | dsp_audio_law2seven[bf_data_in[3]];
nibble = dsp_audio_law2seven[bf_data_in[4]];
yr = nibble;
yl = (yl<<4) | (nibble>>3);
yr = (yr<<7) | dsp_audio_law2seven[bf_data_in[5]];
yr = (yr<<7) | dsp_audio_law2seven[bf_data_in[6]];
yr = (yr<<7) | dsp_audio_law2seven[bf_data_in[7]];
yr = (yr<<7) | dsp_audio_law2seven[bf_data_in[8]];
yr = (yr<<1) | (bf_data_in[0] & 1);
/* fill unused bit with random noise of audio input */
/* encrypt */
EROUND(yr, yl, 0);
EROUND(yl, yr, 1);
EROUND(yr, yl, 2);
EROUND(yl, yr, 3);
EROUND(yr, yl, 4);
EROUND(yl, yr, 5);
EROUND(yr, yl, 6);
EROUND(yl, yr, 7);
EROUND(yr, yl, 8);
EROUND(yl, yr, 9);
EROUND(yr, yl, 10);
EROUND(yl, yr, 11);
EROUND(yr, yl, 12);
EROUND(yl, yr, 13);
EROUND(yr, yl, 14);
EROUND(yl, yr, 15);
yl ^= P[16];
yr ^= P[17];
/* calculate 3-bit checksumme */
cs = yl ^ (yl>>3) ^ (yl>>6) ^ (yl>>9) ^ (yl>>12) ^ (yl>>15)
^ (yl>>18) ^ (yl>>21) ^ (yl>>24) ^ (yl>>27) ^ (yl>>30)
^ (yr<<2) ^ (yr>>1) ^ (yr>>4) ^ (yr>>7) ^ (yr>>10)
^ (yr>>13) ^ (yr>>16) ^ (yr>>19) ^ (yr>>22) ^ (yr>>25)
^ (yr>>28) ^ (yr>>31);
/*
* transcode 8 crypted bytes to 9 data bytes with sync
* and checksum information
*/
bf_crypt_out[0] = (yl>>25) | 0x80;
bf_crypt_out[1] = (yl>>18) & 0x7f;
bf_crypt_out[2] = (yl>>11) & 0x7f;
bf_crypt_out[3] = (yl>>4) & 0x7f;
bf_crypt_out[4] = ((yl<<3) & 0x78) | ((yr>>29) & 0x07);
bf_crypt_out[5] = ((yr>>22) & 0x7f) | ((cs<<5) & 0x80);
bf_crypt_out[6] = ((yr>>15) & 0x7f) | ((cs<<6) & 0x80);
bf_crypt_out[7] = ((yr>>8) & 0x7f) | (cs<<7);
bf_crypt_out[8] = yr;
}
/* write current count */
dsp->bf_crypt_pos = j;
}
/*
* decrypt isdn data frame
* every block with 9 bytes is decrypted
*/
void
dsp_bf_decrypt(struct dsp *dsp, u8 *data, int len)
{
int i = 0;
u8 j = dsp->bf_decrypt_in_pos;
u8 k = dsp->bf_decrypt_out_pos;
u8 *bf_crypt_inring = dsp->bf_crypt_inring;
u8 *bf_data_out = dsp->bf_data_out;
u16 sync = dsp->bf_sync;
u32 *P = dsp->bf_p;
u32 *S = dsp->bf_s;
u32 yl, yr;
u8 nibble;
u8 cs, cs0, cs1, cs2;
while (i < len) {
/*
* shift upper bit and rotate data to buffer ring
* send current decrypted data
*/
sync = (sync<<1) | ((*data)>>7);
bf_crypt_inring[j++ & 15] = *data;
*data++ = bf_data_out[k++];
i++;
if (k == 9)
k = 0; /* repeat if no sync has been found */
/* check if not in sync */
if ((sync&0x1f0) != 0x100)
continue;
j -= 9;
/* transcode receive data to 64 bit block of encrypted data */
yl = bf_crypt_inring[j++ & 15];
yl = (yl<<7) | bf_crypt_inring[j++ & 15]; /* bit7 = 0 */
yl = (yl<<7) | bf_crypt_inring[j++ & 15]; /* bit7 = 0 */
yl = (yl<<7) | bf_crypt_inring[j++ & 15]; /* bit7 = 0 */
nibble = bf_crypt_inring[j++ & 15]; /* bit7 = 0 */
yr = nibble;
yl = (yl<<4) | (nibble>>3);
cs2 = bf_crypt_inring[j++ & 15];
yr = (yr<<7) | (cs2 & 0x7f);
cs1 = bf_crypt_inring[j++ & 15];
yr = (yr<<7) | (cs1 & 0x7f);
cs0 = bf_crypt_inring[j++ & 15];
yr = (yr<<7) | (cs0 & 0x7f);
yr = (yr<<8) | bf_crypt_inring[j++ & 15];
/* calculate 3-bit checksumme */
cs = yl ^ (yl>>3) ^ (yl>>6) ^ (yl>>9) ^ (yl>>12) ^ (yl>>15)
^ (yl>>18) ^ (yl>>21) ^ (yl>>24) ^ (yl>>27) ^ (yl>>30)
^ (yr<<2) ^ (yr>>1) ^ (yr>>4) ^ (yr>>7) ^ (yr>>10)
^ (yr>>13) ^ (yr>>16) ^ (yr>>19) ^ (yr>>22) ^ (yr>>25)
^ (yr>>28) ^ (yr>>31);
/* check if frame is valid */
if ((cs&0x7) != (((cs2>>5)&4) | ((cs1>>6)&2) | (cs0 >> 7))) {
if (dsp_debug & DEBUG_DSP_BLOWFISH)
printk(KERN_DEBUG
"DSP BLOWFISH: received corrupt frame, "
"checksumme is not correct\n");
continue;
}
/* decrypt */
yr ^= P[17];
yl ^= P[16];
DROUND(yl, yr, 15);
DROUND(yr, yl, 14);
DROUND(yl, yr, 13);
DROUND(yr, yl, 12);
DROUND(yl, yr, 11);
DROUND(yr, yl, 10);
DROUND(yl, yr, 9);
DROUND(yr, yl, 8);
DROUND(yl, yr, 7);
DROUND(yr, yl, 6);
DROUND(yl, yr, 5);
DROUND(yr, yl, 4);
DROUND(yl, yr, 3);
DROUND(yr, yl, 2);
DROUND(yl, yr, 1);
DROUND(yr, yl, 0);
/* transcode 8 crypted bytes to 9 sample bytes */
bf_data_out[0] = dsp_audio_seven2law[(yl>>25) & 0x7f];
bf_data_out[1] = dsp_audio_seven2law[(yl>>18) & 0x7f];
bf_data_out[2] = dsp_audio_seven2law[(yl>>11) & 0x7f];
bf_data_out[3] = dsp_audio_seven2law[(yl>>4) & 0x7f];
bf_data_out[4] = dsp_audio_seven2law[((yl<<3) & 0x78) |
((yr>>29) & 0x07)];
bf_data_out[5] = dsp_audio_seven2law[(yr>>22) & 0x7f];
bf_data_out[6] = dsp_audio_seven2law[(yr>>15) & 0x7f];
bf_data_out[7] = dsp_audio_seven2law[(yr>>8) & 0x7f];
bf_data_out[8] = dsp_audio_seven2law[(yr>>1) & 0x7f];
k = 0; /* start with new decoded frame */
}
/* write current count and sync */
dsp->bf_decrypt_in_pos = j;
dsp->bf_decrypt_out_pos = k;
dsp->bf_sync = sync;
}
/* used to encrypt S and P boxes */
static inline void
encrypt_block(const u32 *P, const u32 *S, u32 *dst, u32 *src)
{
u32 yl = src[0];
u32 yr = src[1];
EROUND(yr, yl, 0);
EROUND(yl, yr, 1);
EROUND(yr, yl, 2);
EROUND(yl, yr, 3);
EROUND(yr, yl, 4);
EROUND(yl, yr, 5);
EROUND(yr, yl, 6);
EROUND(yl, yr, 7);
EROUND(yr, yl, 8);
EROUND(yl, yr, 9);
EROUND(yr, yl, 10);
EROUND(yl, yr, 11);
EROUND(yr, yl, 12);
EROUND(yl, yr, 13);
EROUND(yr, yl, 14);
EROUND(yl, yr, 15);
yl ^= P[16];
yr ^= P[17];
dst[0] = yr;
dst[1] = yl;
}
/*
* initialize the dsp for encryption and decryption using the same key
* Calculates the blowfish S and P boxes for encryption and decryption.
* The margin of keylen must be 4-56 bytes.
* returns 0 if ok.
*/
int
dsp_bf_init(struct dsp *dsp, const u8 *key, uint keylen)
{
short i, j, count;
u32 data[2], temp;
u32 *P = (u32 *)dsp->bf_p;
u32 *S = (u32 *)dsp->bf_s;
if (keylen < 4 || keylen > 56)
return 1;
/* Set dsp states */
i = 0;
while (i < 9) {
dsp->bf_crypt_out[i] = 0xff;
dsp->bf_data_out[i] = dsp_silence;
i++;
}
dsp->bf_crypt_pos = 0;
dsp->bf_decrypt_in_pos = 0;
dsp->bf_decrypt_out_pos = 0;
dsp->bf_sync = 0x1ff;
dsp->bf_enable = 1;
/* Copy the initialization s-boxes */
for (i = 0, count = 0; i < 256; i++)
for (j = 0; j < 4; j++, count++)
S[count] = bf_sbox[count];
/* Set the p-boxes */
for (i = 0; i < 16 + 2; i++)
P[i] = bf_pbox[i];
/* Actual subkey generation */
for (j = 0, i = 0; i < 16 + 2; i++) {
temp = (((u32)key[j] << 24) |
((u32)key[(j + 1) % keylen] << 16) |
((u32)key[(j + 2) % keylen] << 8) |
((u32)key[(j + 3) % keylen]));
P[i] = P[i] ^ temp;
j = (j + 4) % keylen;
}
data[0] = 0x00000000;
data[1] = 0x00000000;
for (i = 0; i < 16 + 2; i += 2) {
encrypt_block(P, S, data, data);
P[i] = data[0];
P[i + 1] = data[1];
}
for (i = 0; i < 4; i++) {
for (j = 0, count = i * 256; j < 256; j += 2, count += 2) {
encrypt_block(P, S, data, data);
S[count] = data[0];
S[count + 1] = data[1];
}
}
return 0;
}
/*
* turn encryption off
*/
void
dsp_bf_cleanup(struct dsp *dsp)
{
dsp->bf_enable = 0;
}
| gpl-2.0 |
motley-git/kernel-Nexus4 | drivers/ssb/driver_chipcommon.c | 5330 | 14836 | /*
* Sonics Silicon Backplane
* Broadcom ChipCommon core driver
*
* Copyright 2005, Broadcom Corporation
* Copyright 2006, 2007, Michael Buesch <m@bues.ch>
*
* Licensed under the GNU/GPL. See COPYING for details.
*/
#include <linux/ssb/ssb.h>
#include <linux/ssb/ssb_regs.h>
#include <linux/export.h>
#include <linux/pci.h>
#include "ssb_private.h"
/* Clock sources */
enum ssb_clksrc {
/* PCI clock */
SSB_CHIPCO_CLKSRC_PCI,
/* Crystal slow clock oscillator */
SSB_CHIPCO_CLKSRC_XTALOS,
/* Low power oscillator */
SSB_CHIPCO_CLKSRC_LOPWROS,
};
static inline u32 chipco_write32_masked(struct ssb_chipcommon *cc, u16 offset,
u32 mask, u32 value)
{
value &= mask;
value |= chipco_read32(cc, offset) & ~mask;
chipco_write32(cc, offset, value);
return value;
}
void ssb_chipco_set_clockmode(struct ssb_chipcommon *cc,
enum ssb_clkmode mode)
{
struct ssb_device *ccdev = cc->dev;
struct ssb_bus *bus;
u32 tmp;
if (!ccdev)
return;
bus = ccdev->bus;
/* We support SLOW only on 6..9 */
if (ccdev->id.revision >= 10 && mode == SSB_CLKMODE_SLOW)
mode = SSB_CLKMODE_DYNAMIC;
if (cc->capabilities & SSB_CHIPCO_CAP_PMU)
return; /* PMU controls clockmode, separated function needed */
SSB_WARN_ON(ccdev->id.revision >= 20);
/* chipcommon cores prior to rev6 don't support dynamic clock control */
if (ccdev->id.revision < 6)
return;
/* ChipCommon cores rev10+ need testing */
if (ccdev->id.revision >= 10)
return;
if (!(cc->capabilities & SSB_CHIPCO_CAP_PCTL))
return;
switch (mode) {
case SSB_CLKMODE_SLOW: /* For revs 6..9 only */
tmp = chipco_read32(cc, SSB_CHIPCO_SLOWCLKCTL);
tmp |= SSB_CHIPCO_SLOWCLKCTL_FSLOW;
chipco_write32(cc, SSB_CHIPCO_SLOWCLKCTL, tmp);
break;
case SSB_CLKMODE_FAST:
if (ccdev->id.revision < 10) {
ssb_pci_xtal(bus, SSB_GPIO_XTAL, 1); /* Force crystal on */
tmp = chipco_read32(cc, SSB_CHIPCO_SLOWCLKCTL);
tmp &= ~SSB_CHIPCO_SLOWCLKCTL_FSLOW;
tmp |= SSB_CHIPCO_SLOWCLKCTL_IPLL;
chipco_write32(cc, SSB_CHIPCO_SLOWCLKCTL, tmp);
} else {
chipco_write32(cc, SSB_CHIPCO_SYSCLKCTL,
(chipco_read32(cc, SSB_CHIPCO_SYSCLKCTL) |
SSB_CHIPCO_SYSCLKCTL_FORCEHT));
/* udelay(150); TODO: not available in early init */
}
break;
case SSB_CLKMODE_DYNAMIC:
if (ccdev->id.revision < 10) {
tmp = chipco_read32(cc, SSB_CHIPCO_SLOWCLKCTL);
tmp &= ~SSB_CHIPCO_SLOWCLKCTL_FSLOW;
tmp &= ~SSB_CHIPCO_SLOWCLKCTL_IPLL;
tmp &= ~SSB_CHIPCO_SLOWCLKCTL_ENXTAL;
if ((tmp & SSB_CHIPCO_SLOWCLKCTL_SRC) !=
SSB_CHIPCO_SLOWCLKCTL_SRC_XTAL)
tmp |= SSB_CHIPCO_SLOWCLKCTL_ENXTAL;
chipco_write32(cc, SSB_CHIPCO_SLOWCLKCTL, tmp);
/* For dynamic control, we have to release our xtal_pu
* "force on" */
if (tmp & SSB_CHIPCO_SLOWCLKCTL_ENXTAL)
ssb_pci_xtal(bus, SSB_GPIO_XTAL, 0);
} else {
chipco_write32(cc, SSB_CHIPCO_SYSCLKCTL,
(chipco_read32(cc, SSB_CHIPCO_SYSCLKCTL) &
~SSB_CHIPCO_SYSCLKCTL_FORCEHT));
}
break;
default:
SSB_WARN_ON(1);
}
}
/* Get the Slow Clock Source */
static enum ssb_clksrc chipco_pctl_get_slowclksrc(struct ssb_chipcommon *cc)
{
struct ssb_bus *bus = cc->dev->bus;
u32 uninitialized_var(tmp);
if (cc->dev->id.revision < 6) {
if (bus->bustype == SSB_BUSTYPE_SSB ||
bus->bustype == SSB_BUSTYPE_PCMCIA)
return SSB_CHIPCO_CLKSRC_XTALOS;
if (bus->bustype == SSB_BUSTYPE_PCI) {
pci_read_config_dword(bus->host_pci, SSB_GPIO_OUT, &tmp);
if (tmp & 0x10)
return SSB_CHIPCO_CLKSRC_PCI;
return SSB_CHIPCO_CLKSRC_XTALOS;
}
}
if (cc->dev->id.revision < 10) {
tmp = chipco_read32(cc, SSB_CHIPCO_SLOWCLKCTL);
tmp &= 0x7;
if (tmp == 0)
return SSB_CHIPCO_CLKSRC_LOPWROS;
if (tmp == 1)
return SSB_CHIPCO_CLKSRC_XTALOS;
if (tmp == 2)
return SSB_CHIPCO_CLKSRC_PCI;
}
return SSB_CHIPCO_CLKSRC_XTALOS;
}
/* Get maximum or minimum (depending on get_max flag) slowclock frequency. */
static int chipco_pctl_clockfreqlimit(struct ssb_chipcommon *cc, int get_max)
{
int uninitialized_var(limit);
enum ssb_clksrc clocksrc;
int divisor = 1;
u32 tmp;
clocksrc = chipco_pctl_get_slowclksrc(cc);
if (cc->dev->id.revision < 6) {
switch (clocksrc) {
case SSB_CHIPCO_CLKSRC_PCI:
divisor = 64;
break;
case SSB_CHIPCO_CLKSRC_XTALOS:
divisor = 32;
break;
default:
SSB_WARN_ON(1);
}
} else if (cc->dev->id.revision < 10) {
switch (clocksrc) {
case SSB_CHIPCO_CLKSRC_LOPWROS:
break;
case SSB_CHIPCO_CLKSRC_XTALOS:
case SSB_CHIPCO_CLKSRC_PCI:
tmp = chipco_read32(cc, SSB_CHIPCO_SLOWCLKCTL);
divisor = (tmp >> 16) + 1;
divisor *= 4;
break;
}
} else {
tmp = chipco_read32(cc, SSB_CHIPCO_SYSCLKCTL);
divisor = (tmp >> 16) + 1;
divisor *= 4;
}
switch (clocksrc) {
case SSB_CHIPCO_CLKSRC_LOPWROS:
if (get_max)
limit = 43000;
else
limit = 25000;
break;
case SSB_CHIPCO_CLKSRC_XTALOS:
if (get_max)
limit = 20200000;
else
limit = 19800000;
break;
case SSB_CHIPCO_CLKSRC_PCI:
if (get_max)
limit = 34000000;
else
limit = 25000000;
break;
}
limit /= divisor;
return limit;
}
static void chipco_powercontrol_init(struct ssb_chipcommon *cc)
{
struct ssb_bus *bus = cc->dev->bus;
if (bus->chip_id == 0x4321) {
if (bus->chip_rev == 0)
chipco_write32(cc, SSB_CHIPCO_CHIPCTL, 0x3A4);
else if (bus->chip_rev == 1)
chipco_write32(cc, SSB_CHIPCO_CHIPCTL, 0xA4);
}
if (!(cc->capabilities & SSB_CHIPCO_CAP_PCTL))
return;
if (cc->dev->id.revision >= 10) {
/* Set Idle Power clock rate to 1Mhz */
chipco_write32(cc, SSB_CHIPCO_SYSCLKCTL,
(chipco_read32(cc, SSB_CHIPCO_SYSCLKCTL) &
0x0000FFFF) | 0x00040000);
} else {
int maxfreq;
maxfreq = chipco_pctl_clockfreqlimit(cc, 1);
chipco_write32(cc, SSB_CHIPCO_PLLONDELAY,
(maxfreq * 150 + 999999) / 1000000);
chipco_write32(cc, SSB_CHIPCO_FREFSELDELAY,
(maxfreq * 15 + 999999) / 1000000);
}
}
/* http://bcm-v4.sipsolutions.net/802.11/PmuFastPwrupDelay */
static u16 pmu_fast_powerup_delay(struct ssb_chipcommon *cc)
{
struct ssb_bus *bus = cc->dev->bus;
switch (bus->chip_id) {
case 0x4312:
case 0x4322:
case 0x4328:
return 7000;
case 0x4325:
/* TODO: */
default:
return 15000;
}
}
/* http://bcm-v4.sipsolutions.net/802.11/ClkctlFastPwrupDelay */
static void calc_fast_powerup_delay(struct ssb_chipcommon *cc)
{
struct ssb_bus *bus = cc->dev->bus;
int minfreq;
unsigned int tmp;
u32 pll_on_delay;
if (bus->bustype != SSB_BUSTYPE_PCI)
return;
if (cc->capabilities & SSB_CHIPCO_CAP_PMU) {
cc->fast_pwrup_delay = pmu_fast_powerup_delay(cc);
return;
}
if (!(cc->capabilities & SSB_CHIPCO_CAP_PCTL))
return;
minfreq = chipco_pctl_clockfreqlimit(cc, 0);
pll_on_delay = chipco_read32(cc, SSB_CHIPCO_PLLONDELAY);
tmp = (((pll_on_delay + 2) * 1000000) + (minfreq - 1)) / minfreq;
SSB_WARN_ON(tmp & ~0xFFFF);
cc->fast_pwrup_delay = tmp;
}
void ssb_chipcommon_init(struct ssb_chipcommon *cc)
{
if (!cc->dev)
return; /* We don't have a ChipCommon */
if (cc->dev->id.revision >= 11)
cc->status = chipco_read32(cc, SSB_CHIPCO_CHIPSTAT);
ssb_dprintk(KERN_INFO PFX "chipcommon status is 0x%x\n", cc->status);
if (cc->dev->id.revision >= 20) {
chipco_write32(cc, SSB_CHIPCO_GPIOPULLUP, 0);
chipco_write32(cc, SSB_CHIPCO_GPIOPULLDOWN, 0);
}
ssb_pmu_init(cc);
chipco_powercontrol_init(cc);
ssb_chipco_set_clockmode(cc, SSB_CLKMODE_FAST);
calc_fast_powerup_delay(cc);
}
void ssb_chipco_suspend(struct ssb_chipcommon *cc)
{
if (!cc->dev)
return;
ssb_chipco_set_clockmode(cc, SSB_CLKMODE_SLOW);
}
void ssb_chipco_resume(struct ssb_chipcommon *cc)
{
if (!cc->dev)
return;
chipco_powercontrol_init(cc);
ssb_chipco_set_clockmode(cc, SSB_CLKMODE_FAST);
}
/* Get the processor clock */
void ssb_chipco_get_clockcpu(struct ssb_chipcommon *cc,
u32 *plltype, u32 *n, u32 *m)
{
*n = chipco_read32(cc, SSB_CHIPCO_CLOCK_N);
*plltype = (cc->capabilities & SSB_CHIPCO_CAP_PLLT);
switch (*plltype) {
case SSB_PLLTYPE_2:
case SSB_PLLTYPE_4:
case SSB_PLLTYPE_6:
case SSB_PLLTYPE_7:
*m = chipco_read32(cc, SSB_CHIPCO_CLOCK_MIPS);
break;
case SSB_PLLTYPE_3:
/* 5350 uses m2 to control mips */
*m = chipco_read32(cc, SSB_CHIPCO_CLOCK_M2);
break;
default:
*m = chipco_read32(cc, SSB_CHIPCO_CLOCK_SB);
break;
}
}
/* Get the bus clock */
void ssb_chipco_get_clockcontrol(struct ssb_chipcommon *cc,
u32 *plltype, u32 *n, u32 *m)
{
*n = chipco_read32(cc, SSB_CHIPCO_CLOCK_N);
*plltype = (cc->capabilities & SSB_CHIPCO_CAP_PLLT);
switch (*plltype) {
case SSB_PLLTYPE_6: /* 100/200 or 120/240 only */
*m = chipco_read32(cc, SSB_CHIPCO_CLOCK_MIPS);
break;
case SSB_PLLTYPE_3: /* 25Mhz, 2 dividers */
if (cc->dev->bus->chip_id != 0x5365) {
*m = chipco_read32(cc, SSB_CHIPCO_CLOCK_M2);
break;
}
/* Fallthough */
default:
*m = chipco_read32(cc, SSB_CHIPCO_CLOCK_SB);
}
}
void ssb_chipco_timing_init(struct ssb_chipcommon *cc,
unsigned long ns)
{
struct ssb_device *dev = cc->dev;
struct ssb_bus *bus = dev->bus;
u32 tmp;
/* set register for external IO to control LED. */
chipco_write32(cc, SSB_CHIPCO_PROG_CFG, 0x11);
tmp = DIV_ROUND_UP(10, ns) << SSB_PROG_WCNT_3_SHIFT; /* Waitcount-3 = 10ns */
tmp |= DIV_ROUND_UP(40, ns) << SSB_PROG_WCNT_1_SHIFT; /* Waitcount-1 = 40ns */
tmp |= DIV_ROUND_UP(240, ns); /* Waitcount-0 = 240ns */
chipco_write32(cc, SSB_CHIPCO_PROG_WAITCNT, tmp); /* 0x01020a0c for a 100Mhz clock */
/* Set timing for the flash */
tmp = DIV_ROUND_UP(10, ns) << SSB_FLASH_WCNT_3_SHIFT; /* Waitcount-3 = 10nS */
tmp |= DIV_ROUND_UP(10, ns) << SSB_FLASH_WCNT_1_SHIFT; /* Waitcount-1 = 10nS */
tmp |= DIV_ROUND_UP(120, ns); /* Waitcount-0 = 120nS */
if ((bus->chip_id == 0x5365) ||
(dev->id.revision < 9))
chipco_write32(cc, SSB_CHIPCO_FLASH_WAITCNT, tmp);
if ((bus->chip_id == 0x5365) ||
(dev->id.revision < 9) ||
((bus->chip_id == 0x5350) && (bus->chip_rev == 0)))
chipco_write32(cc, SSB_CHIPCO_PCMCIA_MEMWAIT, tmp);
if (bus->chip_id == 0x5350) {
/* Enable EXTIF */
tmp = DIV_ROUND_UP(10, ns) << SSB_PROG_WCNT_3_SHIFT; /* Waitcount-3 = 10ns */
tmp |= DIV_ROUND_UP(20, ns) << SSB_PROG_WCNT_2_SHIFT; /* Waitcount-2 = 20ns */
tmp |= DIV_ROUND_UP(100, ns) << SSB_PROG_WCNT_1_SHIFT; /* Waitcount-1 = 100ns */
tmp |= DIV_ROUND_UP(120, ns); /* Waitcount-0 = 120ns */
chipco_write32(cc, SSB_CHIPCO_PROG_WAITCNT, tmp); /* 0x01020a0c for a 100Mhz clock */
}
}
/* Set chip watchdog reset timer to fire in 'ticks' backplane cycles */
void ssb_chipco_watchdog_timer_set(struct ssb_chipcommon *cc, u32 ticks)
{
/* instant NMI */
chipco_write32(cc, SSB_CHIPCO_WATCHDOG, ticks);
}
void ssb_chipco_irq_mask(struct ssb_chipcommon *cc, u32 mask, u32 value)
{
chipco_write32_masked(cc, SSB_CHIPCO_IRQMASK, mask, value);
}
u32 ssb_chipco_irq_status(struct ssb_chipcommon *cc, u32 mask)
{
return chipco_read32(cc, SSB_CHIPCO_IRQSTAT) & mask;
}
u32 ssb_chipco_gpio_in(struct ssb_chipcommon *cc, u32 mask)
{
return chipco_read32(cc, SSB_CHIPCO_GPIOIN) & mask;
}
u32 ssb_chipco_gpio_out(struct ssb_chipcommon *cc, u32 mask, u32 value)
{
return chipco_write32_masked(cc, SSB_CHIPCO_GPIOOUT, mask, value);
}
u32 ssb_chipco_gpio_outen(struct ssb_chipcommon *cc, u32 mask, u32 value)
{
return chipco_write32_masked(cc, SSB_CHIPCO_GPIOOUTEN, mask, value);
}
u32 ssb_chipco_gpio_control(struct ssb_chipcommon *cc, u32 mask, u32 value)
{
return chipco_write32_masked(cc, SSB_CHIPCO_GPIOCTL, mask, value);
}
EXPORT_SYMBOL(ssb_chipco_gpio_control);
u32 ssb_chipco_gpio_intmask(struct ssb_chipcommon *cc, u32 mask, u32 value)
{
return chipco_write32_masked(cc, SSB_CHIPCO_GPIOIRQ, mask, value);
}
u32 ssb_chipco_gpio_polarity(struct ssb_chipcommon *cc, u32 mask, u32 value)
{
return chipco_write32_masked(cc, SSB_CHIPCO_GPIOPOL, mask, value);
}
#ifdef CONFIG_SSB_SERIAL
int ssb_chipco_serial_init(struct ssb_chipcommon *cc,
struct ssb_serial_port *ports)
{
struct ssb_bus *bus = cc->dev->bus;
int nr_ports = 0;
u32 plltype;
unsigned int irq;
u32 baud_base, div;
u32 i, n;
unsigned int ccrev = cc->dev->id.revision;
plltype = (cc->capabilities & SSB_CHIPCO_CAP_PLLT);
irq = ssb_mips_irq(cc->dev);
if (plltype == SSB_PLLTYPE_1) {
/* PLL clock */
baud_base = ssb_calc_clock_rate(plltype,
chipco_read32(cc, SSB_CHIPCO_CLOCK_N),
chipco_read32(cc, SSB_CHIPCO_CLOCK_M2));
div = 1;
} else {
if (ccrev == 20) {
/* BCM5354 uses constant 25MHz clock */
baud_base = 25000000;
div = 48;
/* Set the override bit so we don't divide it */
chipco_write32(cc, SSB_CHIPCO_CORECTL,
chipco_read32(cc, SSB_CHIPCO_CORECTL)
| SSB_CHIPCO_CORECTL_UARTCLK0);
} else if ((ccrev >= 11) && (ccrev != 15)) {
/* Fixed ALP clock */
baud_base = 20000000;
if (cc->capabilities & SSB_CHIPCO_CAP_PMU) {
/* FIXME: baud_base is different for devices with a PMU */
SSB_WARN_ON(1);
}
div = 1;
if (ccrev >= 21) {
/* Turn off UART clock before switching clocksource. */
chipco_write32(cc, SSB_CHIPCO_CORECTL,
chipco_read32(cc, SSB_CHIPCO_CORECTL)
& ~SSB_CHIPCO_CORECTL_UARTCLKEN);
}
/* Set the override bit so we don't divide it */
chipco_write32(cc, SSB_CHIPCO_CORECTL,
chipco_read32(cc, SSB_CHIPCO_CORECTL)
| SSB_CHIPCO_CORECTL_UARTCLK0);
if (ccrev >= 21) {
/* Re-enable the UART clock. */
chipco_write32(cc, SSB_CHIPCO_CORECTL,
chipco_read32(cc, SSB_CHIPCO_CORECTL)
| SSB_CHIPCO_CORECTL_UARTCLKEN);
}
} else if (ccrev >= 3) {
/* Internal backplane clock */
baud_base = ssb_clockspeed(bus);
div = chipco_read32(cc, SSB_CHIPCO_CLKDIV)
& SSB_CHIPCO_CLKDIV_UART;
} else {
/* Fixed internal backplane clock */
baud_base = 88000000;
div = 48;
}
/* Clock source depends on strapping if UartClkOverride is unset */
if ((ccrev > 0) &&
!(chipco_read32(cc, SSB_CHIPCO_CORECTL) & SSB_CHIPCO_CORECTL_UARTCLK0)) {
if ((cc->capabilities & SSB_CHIPCO_CAP_UARTCLK) ==
SSB_CHIPCO_CAP_UARTCLK_INT) {
/* Internal divided backplane clock */
baud_base /= div;
} else {
/* Assume external clock of 1.8432 MHz */
baud_base = 1843200;
}
}
}
/* Determine the registers of the UARTs */
n = (cc->capabilities & SSB_CHIPCO_CAP_NRUART);
for (i = 0; i < n; i++) {
void __iomem *cc_mmio;
void __iomem *uart_regs;
cc_mmio = cc->dev->bus->mmio + (cc->dev->core_index * SSB_CORE_SIZE);
uart_regs = cc_mmio + SSB_CHIPCO_UART0_DATA;
/* Offset changed at after rev 0 */
if (ccrev == 0)
uart_regs += (i * 8);
else
uart_regs += (i * 256);
nr_ports++;
ports[i].regs = uart_regs;
ports[i].irq = irq;
ports[i].baud_base = baud_base;
ports[i].reg_shift = 0;
}
return nr_ports;
}
#endif /* CONFIG_SSB_SERIAL */
| gpl-2.0 |
KFire-Android/kernel_omap_bowser-common | ipc/msg.c | 7122 | 21364 | /*
* linux/ipc/msg.c
* Copyright (C) 1992 Krishna Balasubramanian
*
* Removed all the remaining kerneld mess
* Catch the -EFAULT stuff properly
* Use GFP_KERNEL for messages as in 1.2
* Fixed up the unchecked user space derefs
* Copyright (C) 1998 Alan Cox & Andi Kleen
*
* /proc/sysvipc/msg support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
*
* mostly rewritten, threaded and wake-one semantics added
* MSGMAX limit removed, sysctl's added
* (c) 1999 Manfred Spraul <manfred@colorfullife.com>
*
* support for audit of ipc object properties and permission changes
* Dustin Kirkland <dustin.kirkland@us.ibm.com>
*
* namespaces support
* OpenVZ, SWsoft Inc.
* Pavel Emelianov <xemul@openvz.org>
*/
#include <linux/capability.h>
#include <linux/msg.h>
#include <linux/spinlock.h>
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/proc_fs.h>
#include <linux/list.h>
#include <linux/security.h>
#include <linux/sched.h>
#include <linux/syscalls.h>
#include <linux/audit.h>
#include <linux/seq_file.h>
#include <linux/rwsem.h>
#include <linux/nsproxy.h>
#include <linux/ipc_namespace.h>
#include <asm/current.h>
#include <asm/uaccess.h>
#include "util.h"
/*
* one msg_receiver structure for each sleeping receiver:
*/
struct msg_receiver {
struct list_head r_list;
struct task_struct *r_tsk;
int r_mode;
long r_msgtype;
long r_maxsize;
struct msg_msg *volatile r_msg;
};
/* one msg_sender for each sleeping sender */
struct msg_sender {
struct list_head list;
struct task_struct *tsk;
};
#define SEARCH_ANY 1
#define SEARCH_EQUAL 2
#define SEARCH_NOTEQUAL 3
#define SEARCH_LESSEQUAL 4
#define msg_ids(ns) ((ns)->ids[IPC_MSG_IDS])
#define msg_unlock(msq) ipc_unlock(&(msq)->q_perm)
static void freeque(struct ipc_namespace *, struct kern_ipc_perm *);
static int newque(struct ipc_namespace *, struct ipc_params *);
#ifdef CONFIG_PROC_FS
static int sysvipc_msg_proc_show(struct seq_file *s, void *it);
#endif
/*
* Scale msgmni with the available lowmem size: the memory dedicated to msg
* queues should occupy at most 1/MSG_MEM_SCALE of lowmem.
* Also take into account the number of nsproxies created so far.
* This should be done staying within the (MSGMNI , IPCMNI/nr_ipc_ns) range.
*/
void recompute_msgmni(struct ipc_namespace *ns)
{
struct sysinfo i;
unsigned long allowed;
int nb_ns;
si_meminfo(&i);
allowed = (((i.totalram - i.totalhigh) / MSG_MEM_SCALE) * i.mem_unit)
/ MSGMNB;
nb_ns = atomic_read(&nr_ipc_ns);
allowed /= nb_ns;
if (allowed < MSGMNI) {
ns->msg_ctlmni = MSGMNI;
return;
}
if (allowed > IPCMNI / nb_ns) {
ns->msg_ctlmni = IPCMNI / nb_ns;
return;
}
ns->msg_ctlmni = allowed;
}
void msg_init_ns(struct ipc_namespace *ns)
{
ns->msg_ctlmax = MSGMAX;
ns->msg_ctlmnb = MSGMNB;
recompute_msgmni(ns);
atomic_set(&ns->msg_bytes, 0);
atomic_set(&ns->msg_hdrs, 0);
ipc_init_ids(&ns->ids[IPC_MSG_IDS]);
}
#ifdef CONFIG_IPC_NS
void msg_exit_ns(struct ipc_namespace *ns)
{
free_ipcs(ns, &msg_ids(ns), freeque);
idr_destroy(&ns->ids[IPC_MSG_IDS].ipcs_idr);
}
#endif
void __init msg_init(void)
{
msg_init_ns(&init_ipc_ns);
printk(KERN_INFO "msgmni has been set to %d\n",
init_ipc_ns.msg_ctlmni);
ipc_init_proc_interface("sysvipc/msg",
" key msqid perms cbytes qnum lspid lrpid uid gid cuid cgid stime rtime ctime\n",
IPC_MSG_IDS, sysvipc_msg_proc_show);
}
/*
* msg_lock_(check_) routines are called in the paths where the rw_mutex
* is not held.
*/
static inline struct msg_queue *msg_lock(struct ipc_namespace *ns, int id)
{
struct kern_ipc_perm *ipcp = ipc_lock(&msg_ids(ns), id);
if (IS_ERR(ipcp))
return (struct msg_queue *)ipcp;
return container_of(ipcp, struct msg_queue, q_perm);
}
static inline struct msg_queue *msg_lock_check(struct ipc_namespace *ns,
int id)
{
struct kern_ipc_perm *ipcp = ipc_lock_check(&msg_ids(ns), id);
if (IS_ERR(ipcp))
return (struct msg_queue *)ipcp;
return container_of(ipcp, struct msg_queue, q_perm);
}
static inline void msg_rmid(struct ipc_namespace *ns, struct msg_queue *s)
{
ipc_rmid(&msg_ids(ns), &s->q_perm);
}
/**
* newque - Create a new msg queue
* @ns: namespace
* @params: ptr to the structure that contains the key and msgflg
*
* Called with msg_ids.rw_mutex held (writer)
*/
static int newque(struct ipc_namespace *ns, struct ipc_params *params)
{
struct msg_queue *msq;
int id, retval;
key_t key = params->key;
int msgflg = params->flg;
msq = ipc_rcu_alloc(sizeof(*msq));
if (!msq)
return -ENOMEM;
msq->q_perm.mode = msgflg & S_IRWXUGO;
msq->q_perm.key = key;
msq->q_perm.security = NULL;
retval = security_msg_queue_alloc(msq);
if (retval) {
ipc_rcu_putref(msq);
return retval;
}
/*
* ipc_addid() locks msq
*/
id = ipc_addid(&msg_ids(ns), &msq->q_perm, ns->msg_ctlmni);
if (id < 0) {
security_msg_queue_free(msq);
ipc_rcu_putref(msq);
return id;
}
msq->q_stime = msq->q_rtime = 0;
msq->q_ctime = get_seconds();
msq->q_cbytes = msq->q_qnum = 0;
msq->q_qbytes = ns->msg_ctlmnb;
msq->q_lspid = msq->q_lrpid = 0;
INIT_LIST_HEAD(&msq->q_messages);
INIT_LIST_HEAD(&msq->q_receivers);
INIT_LIST_HEAD(&msq->q_senders);
msg_unlock(msq);
return msq->q_perm.id;
}
static inline void ss_add(struct msg_queue *msq, struct msg_sender *mss)
{
mss->tsk = current;
current->state = TASK_INTERRUPTIBLE;
list_add_tail(&mss->list, &msq->q_senders);
}
static inline void ss_del(struct msg_sender *mss)
{
if (mss->list.next != NULL)
list_del(&mss->list);
}
static void ss_wakeup(struct list_head *h, int kill)
{
struct list_head *tmp;
tmp = h->next;
while (tmp != h) {
struct msg_sender *mss;
mss = list_entry(tmp, struct msg_sender, list);
tmp = tmp->next;
if (kill)
mss->list.next = NULL;
wake_up_process(mss->tsk);
}
}
static void expunge_all(struct msg_queue *msq, int res)
{
struct list_head *tmp;
tmp = msq->q_receivers.next;
while (tmp != &msq->q_receivers) {
struct msg_receiver *msr;
msr = list_entry(tmp, struct msg_receiver, r_list);
tmp = tmp->next;
msr->r_msg = NULL;
wake_up_process(msr->r_tsk);
smp_mb();
msr->r_msg = ERR_PTR(res);
}
}
/*
* freeque() wakes up waiters on the sender and receiver waiting queue,
* removes the message queue from message queue ID IDR, and cleans up all the
* messages associated with this queue.
*
* msg_ids.rw_mutex (writer) and the spinlock for this message queue are held
* before freeque() is called. msg_ids.rw_mutex remains locked on exit.
*/
static void freeque(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
{
struct list_head *tmp;
struct msg_queue *msq = container_of(ipcp, struct msg_queue, q_perm);
expunge_all(msq, -EIDRM);
ss_wakeup(&msq->q_senders, 1);
msg_rmid(ns, msq);
msg_unlock(msq);
tmp = msq->q_messages.next;
while (tmp != &msq->q_messages) {
struct msg_msg *msg = list_entry(tmp, struct msg_msg, m_list);
tmp = tmp->next;
atomic_dec(&ns->msg_hdrs);
free_msg(msg);
}
atomic_sub(msq->q_cbytes, &ns->msg_bytes);
security_msg_queue_free(msq);
ipc_rcu_putref(msq);
}
/*
* Called with msg_ids.rw_mutex and ipcp locked.
*/
static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg)
{
struct msg_queue *msq = container_of(ipcp, struct msg_queue, q_perm);
return security_msg_queue_associate(msq, msgflg);
}
SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
{
struct ipc_namespace *ns;
struct ipc_ops msg_ops;
struct ipc_params msg_params;
ns = current->nsproxy->ipc_ns;
msg_ops.getnew = newque;
msg_ops.associate = msg_security;
msg_ops.more_checks = NULL;
msg_params.key = key;
msg_params.flg = msgflg;
return ipcget(ns, &msg_ids(ns), &msg_ops, &msg_params);
}
static inline unsigned long
copy_msqid_to_user(void __user *buf, struct msqid64_ds *in, int version)
{
switch(version) {
case IPC_64:
return copy_to_user(buf, in, sizeof(*in));
case IPC_OLD:
{
struct msqid_ds out;
memset(&out, 0, sizeof(out));
ipc64_perm_to_ipc_perm(&in->msg_perm, &out.msg_perm);
out.msg_stime = in->msg_stime;
out.msg_rtime = in->msg_rtime;
out.msg_ctime = in->msg_ctime;
if (in->msg_cbytes > USHRT_MAX)
out.msg_cbytes = USHRT_MAX;
else
out.msg_cbytes = in->msg_cbytes;
out.msg_lcbytes = in->msg_cbytes;
if (in->msg_qnum > USHRT_MAX)
out.msg_qnum = USHRT_MAX;
else
out.msg_qnum = in->msg_qnum;
if (in->msg_qbytes > USHRT_MAX)
out.msg_qbytes = USHRT_MAX;
else
out.msg_qbytes = in->msg_qbytes;
out.msg_lqbytes = in->msg_qbytes;
out.msg_lspid = in->msg_lspid;
out.msg_lrpid = in->msg_lrpid;
return copy_to_user(buf, &out, sizeof(out));
}
default:
return -EINVAL;
}
}
static inline unsigned long
copy_msqid_from_user(struct msqid64_ds *out, void __user *buf, int version)
{
switch(version) {
case IPC_64:
if (copy_from_user(out, buf, sizeof(*out)))
return -EFAULT;
return 0;
case IPC_OLD:
{
struct msqid_ds tbuf_old;
if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
return -EFAULT;
out->msg_perm.uid = tbuf_old.msg_perm.uid;
out->msg_perm.gid = tbuf_old.msg_perm.gid;
out->msg_perm.mode = tbuf_old.msg_perm.mode;
if (tbuf_old.msg_qbytes == 0)
out->msg_qbytes = tbuf_old.msg_lqbytes;
else
out->msg_qbytes = tbuf_old.msg_qbytes;
return 0;
}
default:
return -EINVAL;
}
}
/*
* This function handles some msgctl commands which require the rw_mutex
* to be held in write mode.
* NOTE: no locks must be held, the rw_mutex is taken inside this function.
*/
static int msgctl_down(struct ipc_namespace *ns, int msqid, int cmd,
struct msqid_ds __user *buf, int version)
{
struct kern_ipc_perm *ipcp;
struct msqid64_ds uninitialized_var(msqid64);
struct msg_queue *msq;
int err;
if (cmd == IPC_SET) {
if (copy_msqid_from_user(&msqid64, buf, version))
return -EFAULT;
}
ipcp = ipcctl_pre_down(ns, &msg_ids(ns), msqid, cmd,
&msqid64.msg_perm, msqid64.msg_qbytes);
if (IS_ERR(ipcp))
return PTR_ERR(ipcp);
msq = container_of(ipcp, struct msg_queue, q_perm);
err = security_msg_queue_msgctl(msq, cmd);
if (err)
goto out_unlock;
switch (cmd) {
case IPC_RMID:
freeque(ns, ipcp);
goto out_up;
case IPC_SET:
if (msqid64.msg_qbytes > ns->msg_ctlmnb &&
!capable(CAP_SYS_RESOURCE)) {
err = -EPERM;
goto out_unlock;
}
msq->q_qbytes = msqid64.msg_qbytes;
ipc_update_perm(&msqid64.msg_perm, ipcp);
msq->q_ctime = get_seconds();
/* sleeping receivers might be excluded by
* stricter permissions.
*/
expunge_all(msq, -EAGAIN);
/* sleeping senders might be able to send
* due to a larger queue size.
*/
ss_wakeup(&msq->q_senders, 0);
break;
default:
err = -EINVAL;
}
out_unlock:
msg_unlock(msq);
out_up:
up_write(&msg_ids(ns).rw_mutex);
return err;
}
SYSCALL_DEFINE3(msgctl, int, msqid, int, cmd, struct msqid_ds __user *, buf)
{
struct msg_queue *msq;
int err, version;
struct ipc_namespace *ns;
if (msqid < 0 || cmd < 0)
return -EINVAL;
version = ipc_parse_version(&cmd);
ns = current->nsproxy->ipc_ns;
switch (cmd) {
case IPC_INFO:
case MSG_INFO:
{
struct msginfo msginfo;
int max_id;
if (!buf)
return -EFAULT;
/*
* We must not return kernel stack data.
* due to padding, it's not enough
* to set all member fields.
*/
err = security_msg_queue_msgctl(NULL, cmd);
if (err)
return err;
memset(&msginfo, 0, sizeof(msginfo));
msginfo.msgmni = ns->msg_ctlmni;
msginfo.msgmax = ns->msg_ctlmax;
msginfo.msgmnb = ns->msg_ctlmnb;
msginfo.msgssz = MSGSSZ;
msginfo.msgseg = MSGSEG;
down_read(&msg_ids(ns).rw_mutex);
if (cmd == MSG_INFO) {
msginfo.msgpool = msg_ids(ns).in_use;
msginfo.msgmap = atomic_read(&ns->msg_hdrs);
msginfo.msgtql = atomic_read(&ns->msg_bytes);
} else {
msginfo.msgmap = MSGMAP;
msginfo.msgpool = MSGPOOL;
msginfo.msgtql = MSGTQL;
}
max_id = ipc_get_maxid(&msg_ids(ns));
up_read(&msg_ids(ns).rw_mutex);
if (copy_to_user(buf, &msginfo, sizeof(struct msginfo)))
return -EFAULT;
return (max_id < 0) ? 0 : max_id;
}
case MSG_STAT: /* msqid is an index rather than a msg queue id */
case IPC_STAT:
{
struct msqid64_ds tbuf;
int success_return;
if (!buf)
return -EFAULT;
if (cmd == MSG_STAT) {
msq = msg_lock(ns, msqid);
if (IS_ERR(msq))
return PTR_ERR(msq);
success_return = msq->q_perm.id;
} else {
msq = msg_lock_check(ns, msqid);
if (IS_ERR(msq))
return PTR_ERR(msq);
success_return = 0;
}
err = -EACCES;
if (ipcperms(ns, &msq->q_perm, S_IRUGO))
goto out_unlock;
err = security_msg_queue_msgctl(msq, cmd);
if (err)
goto out_unlock;
memset(&tbuf, 0, sizeof(tbuf));
kernel_to_ipc64_perm(&msq->q_perm, &tbuf.msg_perm);
tbuf.msg_stime = msq->q_stime;
tbuf.msg_rtime = msq->q_rtime;
tbuf.msg_ctime = msq->q_ctime;
tbuf.msg_cbytes = msq->q_cbytes;
tbuf.msg_qnum = msq->q_qnum;
tbuf.msg_qbytes = msq->q_qbytes;
tbuf.msg_lspid = msq->q_lspid;
tbuf.msg_lrpid = msq->q_lrpid;
msg_unlock(msq);
if (copy_msqid_to_user(buf, &tbuf, version))
return -EFAULT;
return success_return;
}
case IPC_SET:
case IPC_RMID:
err = msgctl_down(ns, msqid, cmd, buf, version);
return err;
default:
return -EINVAL;
}
out_unlock:
msg_unlock(msq);
return err;
}
static int testmsg(struct msg_msg *msg, long type, int mode)
{
switch(mode)
{
case SEARCH_ANY:
return 1;
case SEARCH_LESSEQUAL:
if (msg->m_type <=type)
return 1;
break;
case SEARCH_EQUAL:
if (msg->m_type == type)
return 1;
break;
case SEARCH_NOTEQUAL:
if (msg->m_type != type)
return 1;
break;
}
return 0;
}
static inline int pipelined_send(struct msg_queue *msq, struct msg_msg *msg)
{
struct list_head *tmp;
tmp = msq->q_receivers.next;
while (tmp != &msq->q_receivers) {
struct msg_receiver *msr;
msr = list_entry(tmp, struct msg_receiver, r_list);
tmp = tmp->next;
if (testmsg(msg, msr->r_msgtype, msr->r_mode) &&
!security_msg_queue_msgrcv(msq, msg, msr->r_tsk,
msr->r_msgtype, msr->r_mode)) {
list_del(&msr->r_list);
if (msr->r_maxsize < msg->m_ts) {
msr->r_msg = NULL;
wake_up_process(msr->r_tsk);
smp_mb();
msr->r_msg = ERR_PTR(-E2BIG);
} else {
msr->r_msg = NULL;
msq->q_lrpid = task_pid_vnr(msr->r_tsk);
msq->q_rtime = get_seconds();
wake_up_process(msr->r_tsk);
smp_mb();
msr->r_msg = msg;
return 1;
}
}
}
return 0;
}
long do_msgsnd(int msqid, long mtype, void __user *mtext,
size_t msgsz, int msgflg)
{
struct msg_queue *msq;
struct msg_msg *msg;
int err;
struct ipc_namespace *ns;
ns = current->nsproxy->ipc_ns;
if (msgsz > ns->msg_ctlmax || (long) msgsz < 0 || msqid < 0)
return -EINVAL;
if (mtype < 1)
return -EINVAL;
msg = load_msg(mtext, msgsz);
if (IS_ERR(msg))
return PTR_ERR(msg);
msg->m_type = mtype;
msg->m_ts = msgsz;
msq = msg_lock_check(ns, msqid);
if (IS_ERR(msq)) {
err = PTR_ERR(msq);
goto out_free;
}
for (;;) {
struct msg_sender s;
err = -EACCES;
if (ipcperms(ns, &msq->q_perm, S_IWUGO))
goto out_unlock_free;
err = security_msg_queue_msgsnd(msq, msg, msgflg);
if (err)
goto out_unlock_free;
if (msgsz + msq->q_cbytes <= msq->q_qbytes &&
1 + msq->q_qnum <= msq->q_qbytes) {
break;
}
/* queue full, wait: */
if (msgflg & IPC_NOWAIT) {
err = -EAGAIN;
goto out_unlock_free;
}
ss_add(msq, &s);
ipc_rcu_getref(msq);
msg_unlock(msq);
schedule();
ipc_lock_by_ptr(&msq->q_perm);
ipc_rcu_putref(msq);
if (msq->q_perm.deleted) {
err = -EIDRM;
goto out_unlock_free;
}
ss_del(&s);
if (signal_pending(current)) {
err = -ERESTARTNOHAND;
goto out_unlock_free;
}
}
msq->q_lspid = task_tgid_vnr(current);
msq->q_stime = get_seconds();
if (!pipelined_send(msq, msg)) {
/* no one is waiting for this message, enqueue it */
list_add_tail(&msg->m_list, &msq->q_messages);
msq->q_cbytes += msgsz;
msq->q_qnum++;
atomic_add(msgsz, &ns->msg_bytes);
atomic_inc(&ns->msg_hdrs);
}
err = 0;
msg = NULL;
out_unlock_free:
msg_unlock(msq);
out_free:
if (msg != NULL)
free_msg(msg);
return err;
}
SYSCALL_DEFINE4(msgsnd, int, msqid, struct msgbuf __user *, msgp, size_t, msgsz,
int, msgflg)
{
long mtype;
if (get_user(mtype, &msgp->mtype))
return -EFAULT;
return do_msgsnd(msqid, mtype, msgp->mtext, msgsz, msgflg);
}
static inline int convert_mode(long *msgtyp, int msgflg)
{
/*
* find message of correct type.
* msgtyp = 0 => get first.
* msgtyp > 0 => get first message of matching type.
* msgtyp < 0 => get message with least type must be < abs(msgtype).
*/
if (*msgtyp == 0)
return SEARCH_ANY;
if (*msgtyp < 0) {
*msgtyp = -*msgtyp;
return SEARCH_LESSEQUAL;
}
if (msgflg & MSG_EXCEPT)
return SEARCH_NOTEQUAL;
return SEARCH_EQUAL;
}
long do_msgrcv(int msqid, long *pmtype, void __user *mtext,
size_t msgsz, long msgtyp, int msgflg)
{
struct msg_queue *msq;
struct msg_msg *msg;
int mode;
struct ipc_namespace *ns;
if (msqid < 0 || (long) msgsz < 0)
return -EINVAL;
mode = convert_mode(&msgtyp, msgflg);
ns = current->nsproxy->ipc_ns;
msq = msg_lock_check(ns, msqid);
if (IS_ERR(msq))
return PTR_ERR(msq);
for (;;) {
struct msg_receiver msr_d;
struct list_head *tmp;
msg = ERR_PTR(-EACCES);
if (ipcperms(ns, &msq->q_perm, S_IRUGO))
goto out_unlock;
msg = ERR_PTR(-EAGAIN);
tmp = msq->q_messages.next;
while (tmp != &msq->q_messages) {
struct msg_msg *walk_msg;
walk_msg = list_entry(tmp, struct msg_msg, m_list);
if (testmsg(walk_msg, msgtyp, mode) &&
!security_msg_queue_msgrcv(msq, walk_msg, current,
msgtyp, mode)) {
msg = walk_msg;
if (mode == SEARCH_LESSEQUAL &&
walk_msg->m_type != 1) {
msg = walk_msg;
msgtyp = walk_msg->m_type - 1;
} else {
msg = walk_msg;
break;
}
}
tmp = tmp->next;
}
if (!IS_ERR(msg)) {
/*
* Found a suitable message.
* Unlink it from the queue.
*/
if ((msgsz < msg->m_ts) && !(msgflg & MSG_NOERROR)) {
msg = ERR_PTR(-E2BIG);
goto out_unlock;
}
list_del(&msg->m_list);
msq->q_qnum--;
msq->q_rtime = get_seconds();
msq->q_lrpid = task_tgid_vnr(current);
msq->q_cbytes -= msg->m_ts;
atomic_sub(msg->m_ts, &ns->msg_bytes);
atomic_dec(&ns->msg_hdrs);
ss_wakeup(&msq->q_senders, 0);
msg_unlock(msq);
break;
}
/* No message waiting. Wait for a message */
if (msgflg & IPC_NOWAIT) {
msg = ERR_PTR(-ENOMSG);
goto out_unlock;
}
list_add_tail(&msr_d.r_list, &msq->q_receivers);
msr_d.r_tsk = current;
msr_d.r_msgtype = msgtyp;
msr_d.r_mode = mode;
if (msgflg & MSG_NOERROR)
msr_d.r_maxsize = INT_MAX;
else
msr_d.r_maxsize = msgsz;
msr_d.r_msg = ERR_PTR(-EAGAIN);
current->state = TASK_INTERRUPTIBLE;
msg_unlock(msq);
schedule();
/* Lockless receive, part 1:
* Disable preemption. We don't hold a reference to the queue
* and getting a reference would defeat the idea of a lockless
* operation, thus the code relies on rcu to guarantee the
* existence of msq:
* Prior to destruction, expunge_all(-EIRDM) changes r_msg.
* Thus if r_msg is -EAGAIN, then the queue not yet destroyed.
* rcu_read_lock() prevents preemption between reading r_msg
* and the spin_lock() inside ipc_lock_by_ptr().
*/
rcu_read_lock();
/* Lockless receive, part 2:
* Wait until pipelined_send or expunge_all are outside of
* wake_up_process(). There is a race with exit(), see
* ipc/mqueue.c for the details.
*/
msg = (struct msg_msg*)msr_d.r_msg;
while (msg == NULL) {
cpu_relax();
msg = (struct msg_msg *)msr_d.r_msg;
}
/* Lockless receive, part 3:
* If there is a message or an error then accept it without
* locking.
*/
if (msg != ERR_PTR(-EAGAIN)) {
rcu_read_unlock();
break;
}
/* Lockless receive, part 3:
* Acquire the queue spinlock.
*/
ipc_lock_by_ptr(&msq->q_perm);
rcu_read_unlock();
/* Lockless receive, part 4:
* Repeat test after acquiring the spinlock.
*/
msg = (struct msg_msg*)msr_d.r_msg;
if (msg != ERR_PTR(-EAGAIN))
goto out_unlock;
list_del(&msr_d.r_list);
if (signal_pending(current)) {
msg = ERR_PTR(-ERESTARTNOHAND);
out_unlock:
msg_unlock(msq);
break;
}
}
if (IS_ERR(msg))
return PTR_ERR(msg);
msgsz = (msgsz > msg->m_ts) ? msg->m_ts : msgsz;
*pmtype = msg->m_type;
if (store_msg(mtext, msg, msgsz))
msgsz = -EFAULT;
free_msg(msg);
return msgsz;
}
SYSCALL_DEFINE5(msgrcv, int, msqid, struct msgbuf __user *, msgp, size_t, msgsz,
long, msgtyp, int, msgflg)
{
long err, mtype;
err = do_msgrcv(msqid, &mtype, msgp->mtext, msgsz, msgtyp, msgflg);
if (err < 0)
goto out;
if (put_user(mtype, &msgp->mtype))
err = -EFAULT;
out:
return err;
}
#ifdef CONFIG_PROC_FS
static int sysvipc_msg_proc_show(struct seq_file *s, void *it)
{
struct msg_queue *msq = it;
return seq_printf(s,
"%10d %10d %4o %10lu %10lu %5u %5u %5u %5u %5u %5u %10lu %10lu %10lu\n",
msq->q_perm.key,
msq->q_perm.id,
msq->q_perm.mode,
msq->q_cbytes,
msq->q_qnum,
msq->q_lspid,
msq->q_lrpid,
msq->q_perm.uid,
msq->q_perm.gid,
msq->q_perm.cuid,
msq->q_perm.cgid,
msq->q_stime,
msq->q_rtime,
msq->q_ctime);
}
#endif
| gpl-2.0 |
delapuente/codeaurora_kernel_msm | fs/jffs2/compr_rtime.c | 8402 | 2895 | /*
* JFFS2 -- Journalling Flash File System, Version 2.
*
* Copyright © 2001-2007 Red Hat, Inc.
* Copyright © 2004-2010 David Woodhouse <dwmw2@infradead.org>
*
* Created by Arjan van de Ven <arjanv@redhat.com>
*
* For licensing information, see the file 'LICENCE' in this directory.
*
*
*
* Very simple lz77-ish encoder.
*
* Theory of operation: Both encoder and decoder have a list of "last
* occurrences" for every possible source-value; after sending the
* first source-byte, the second byte indicated the "run" length of
* matches
*
* The algorithm is intended to only send "whole bytes", no bit-messing.
*
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/jffs2.h>
#include "compr.h"
/* _compress returns the compressed size, -1 if bigger */
static int jffs2_rtime_compress(unsigned char *data_in,
unsigned char *cpage_out,
uint32_t *sourcelen, uint32_t *dstlen)
{
short positions[256];
int outpos = 0;
int pos=0;
memset(positions,0,sizeof(positions));
while (pos < (*sourcelen) && outpos <= (*dstlen)-2) {
int backpos, runlen=0;
unsigned char value;
value = data_in[pos];
cpage_out[outpos++] = data_in[pos++];
backpos = positions[value];
positions[value]=pos;
while ((backpos < pos) && (pos < (*sourcelen)) &&
(data_in[pos]==data_in[backpos++]) && (runlen<255)) {
pos++;
runlen++;
}
cpage_out[outpos++] = runlen;
}
if (outpos >= pos) {
/* We failed */
return -1;
}
/* Tell the caller how much we managed to compress, and how much space it took */
*sourcelen = pos;
*dstlen = outpos;
return 0;
}
static int jffs2_rtime_decompress(unsigned char *data_in,
unsigned char *cpage_out,
uint32_t srclen, uint32_t destlen)
{
short positions[256];
int outpos = 0;
int pos=0;
memset(positions,0,sizeof(positions));
while (outpos<destlen) {
unsigned char value;
int backoffs;
int repeat;
value = data_in[pos++];
cpage_out[outpos++] = value; /* first the verbatim copied byte */
repeat = data_in[pos++];
backoffs = positions[value];
positions[value]=outpos;
if (repeat) {
if (backoffs + repeat >= outpos) {
while(repeat) {
cpage_out[outpos++] = cpage_out[backoffs++];
repeat--;
}
} else {
memcpy(&cpage_out[outpos],&cpage_out[backoffs],repeat);
outpos+=repeat;
}
}
}
return 0;
}
static struct jffs2_compressor jffs2_rtime_comp = {
.priority = JFFS2_RTIME_PRIORITY,
.name = "rtime",
.compr = JFFS2_COMPR_RTIME,
.compress = &jffs2_rtime_compress,
.decompress = &jffs2_rtime_decompress,
#ifdef JFFS2_RTIME_DISABLED
.disabled = 1,
#else
.disabled = 0,
#endif
};
int jffs2_rtime_init(void)
{
return jffs2_register_compressor(&jffs2_rtime_comp);
}
void jffs2_rtime_exit(void)
{
jffs2_unregister_compressor(&jffs2_rtime_comp);
}
| gpl-2.0 |
leadpoizon/android_kernel_moto_shamu | drivers/ide/ide-atapi.c | 8914 | 18109 | /*
* ATAPI support.
*/
#include <linux/kernel.h>
#include <linux/cdrom.h>
#include <linux/delay.h>
#include <linux/export.h>
#include <linux/ide.h>
#include <linux/scatterlist.h>
#include <linux/gfp.h>
#include <scsi/scsi.h>
#define DRV_NAME "ide-atapi"
#define PFX DRV_NAME ": "
#ifdef DEBUG
#define debug_log(fmt, args...) \
printk(KERN_INFO "ide: " fmt, ## args)
#else
#define debug_log(fmt, args...) do {} while (0)
#endif
#define ATAPI_MIN_CDB_BYTES 12
static inline int dev_is_idecd(ide_drive_t *drive)
{
return drive->media == ide_cdrom || drive->media == ide_optical;
}
/*
* Check whether we can support a device,
* based on the ATAPI IDENTIFY command results.
*/
int ide_check_atapi_device(ide_drive_t *drive, const char *s)
{
u16 *id = drive->id;
u8 gcw[2], protocol, device_type, removable, drq_type, packet_size;
*((u16 *)&gcw) = id[ATA_ID_CONFIG];
protocol = (gcw[1] & 0xC0) >> 6;
device_type = gcw[1] & 0x1F;
removable = (gcw[0] & 0x80) >> 7;
drq_type = (gcw[0] & 0x60) >> 5;
packet_size = gcw[0] & 0x03;
#ifdef CONFIG_PPC
/* kludge for Apple PowerBook internal zip */
if (drive->media == ide_floppy && device_type == 5 &&
!strstr((char *)&id[ATA_ID_PROD], "CD-ROM") &&
strstr((char *)&id[ATA_ID_PROD], "ZIP"))
device_type = 0;
#endif
if (protocol != 2)
printk(KERN_ERR "%s: %s: protocol (0x%02x) is not ATAPI\n",
s, drive->name, protocol);
else if ((drive->media == ide_floppy && device_type != 0) ||
(drive->media == ide_tape && device_type != 1))
printk(KERN_ERR "%s: %s: invalid device type (0x%02x)\n",
s, drive->name, device_type);
else if (removable == 0)
printk(KERN_ERR "%s: %s: the removable flag is not set\n",
s, drive->name);
else if (drive->media == ide_floppy && drq_type == 3)
printk(KERN_ERR "%s: %s: sorry, DRQ type (0x%02x) not "
"supported\n", s, drive->name, drq_type);
else if (packet_size != 0)
printk(KERN_ERR "%s: %s: packet size (0x%02x) is not 12 "
"bytes\n", s, drive->name, packet_size);
else
return 1;
return 0;
}
EXPORT_SYMBOL_GPL(ide_check_atapi_device);
void ide_init_pc(struct ide_atapi_pc *pc)
{
memset(pc, 0, sizeof(*pc));
}
EXPORT_SYMBOL_GPL(ide_init_pc);
/*
* Add a special packet command request to the tail of the request queue,
* and wait for it to be serviced.
*/
int ide_queue_pc_tail(ide_drive_t *drive, struct gendisk *disk,
struct ide_atapi_pc *pc, void *buf, unsigned int bufflen)
{
struct request *rq;
int error;
rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
rq->cmd_type = REQ_TYPE_SPECIAL;
rq->special = (char *)pc;
if (buf && bufflen) {
error = blk_rq_map_kern(drive->queue, rq, buf, bufflen,
GFP_NOIO);
if (error)
goto put_req;
}
memcpy(rq->cmd, pc->c, 12);
if (drive->media == ide_tape)
rq->cmd[13] = REQ_IDETAPE_PC1;
error = blk_execute_rq(drive->queue, disk, rq, 0);
put_req:
blk_put_request(rq);
return error;
}
EXPORT_SYMBOL_GPL(ide_queue_pc_tail);
int ide_do_test_unit_ready(ide_drive_t *drive, struct gendisk *disk)
{
struct ide_atapi_pc pc;
ide_init_pc(&pc);
pc.c[0] = TEST_UNIT_READY;
return ide_queue_pc_tail(drive, disk, &pc, NULL, 0);
}
EXPORT_SYMBOL_GPL(ide_do_test_unit_ready);
int ide_do_start_stop(ide_drive_t *drive, struct gendisk *disk, int start)
{
struct ide_atapi_pc pc;
ide_init_pc(&pc);
pc.c[0] = START_STOP;
pc.c[4] = start;
if (drive->media == ide_tape)
pc.flags |= PC_FLAG_WAIT_FOR_DSC;
return ide_queue_pc_tail(drive, disk, &pc, NULL, 0);
}
EXPORT_SYMBOL_GPL(ide_do_start_stop);
int ide_set_media_lock(ide_drive_t *drive, struct gendisk *disk, int on)
{
struct ide_atapi_pc pc;
if ((drive->dev_flags & IDE_DFLAG_DOORLOCKING) == 0)
return 0;
ide_init_pc(&pc);
pc.c[0] = ALLOW_MEDIUM_REMOVAL;
pc.c[4] = on;
return ide_queue_pc_tail(drive, disk, &pc, NULL, 0);
}
EXPORT_SYMBOL_GPL(ide_set_media_lock);
void ide_create_request_sense_cmd(ide_drive_t *drive, struct ide_atapi_pc *pc)
{
ide_init_pc(pc);
pc->c[0] = REQUEST_SENSE;
if (drive->media == ide_floppy) {
pc->c[4] = 255;
pc->req_xfer = 18;
} else {
pc->c[4] = 20;
pc->req_xfer = 20;
}
}
EXPORT_SYMBOL_GPL(ide_create_request_sense_cmd);
void ide_prep_sense(ide_drive_t *drive, struct request *rq)
{
struct request_sense *sense = &drive->sense_data;
struct request *sense_rq = &drive->sense_rq;
unsigned int cmd_len, sense_len;
int err;
switch (drive->media) {
case ide_floppy:
cmd_len = 255;
sense_len = 18;
break;
case ide_tape:
cmd_len = 20;
sense_len = 20;
break;
default:
cmd_len = 18;
sense_len = 18;
}
BUG_ON(sense_len > sizeof(*sense));
if (rq->cmd_type == REQ_TYPE_SENSE || drive->sense_rq_armed)
return;
memset(sense, 0, sizeof(*sense));
blk_rq_init(rq->q, sense_rq);
err = blk_rq_map_kern(drive->queue, sense_rq, sense, sense_len,
GFP_NOIO);
if (unlikely(err)) {
if (printk_ratelimit())
printk(KERN_WARNING PFX "%s: failed to map sense "
"buffer\n", drive->name);
return;
}
sense_rq->rq_disk = rq->rq_disk;
sense_rq->cmd[0] = GPCMD_REQUEST_SENSE;
sense_rq->cmd[4] = cmd_len;
sense_rq->cmd_type = REQ_TYPE_SENSE;
sense_rq->cmd_flags |= REQ_PREEMPT;
if (drive->media == ide_tape)
sense_rq->cmd[13] = REQ_IDETAPE_PC1;
drive->sense_rq_armed = true;
}
EXPORT_SYMBOL_GPL(ide_prep_sense);
int ide_queue_sense_rq(ide_drive_t *drive, void *special)
{
/* deferred failure from ide_prep_sense() */
if (!drive->sense_rq_armed) {
printk(KERN_WARNING PFX "%s: error queuing a sense request\n",
drive->name);
return -ENOMEM;
}
drive->sense_rq.special = special;
drive->sense_rq_armed = false;
drive->hwif->rq = NULL;
elv_add_request(drive->queue, &drive->sense_rq, ELEVATOR_INSERT_FRONT);
return 0;
}
EXPORT_SYMBOL_GPL(ide_queue_sense_rq);
/*
* Called when an error was detected during the last packet command.
* We queue a request sense packet command at the head of the request
* queue.
*/
void ide_retry_pc(ide_drive_t *drive)
{
struct request *failed_rq = drive->hwif->rq;
struct request *sense_rq = &drive->sense_rq;
struct ide_atapi_pc *pc = &drive->request_sense_pc;
(void)ide_read_error(drive);
/* init pc from sense_rq */
ide_init_pc(pc);
memcpy(pc->c, sense_rq->cmd, 12);
if (drive->media == ide_tape)
drive->atapi_flags |= IDE_AFLAG_IGNORE_DSC;
/*
* Push back the failed request and put request sense on top
* of it. The failed command will be retried after sense data
* is acquired.
*/
drive->hwif->rq = NULL;
ide_requeue_and_plug(drive, failed_rq);
if (ide_queue_sense_rq(drive, pc)) {
blk_start_request(failed_rq);
ide_complete_rq(drive, -EIO, blk_rq_bytes(failed_rq));
}
}
EXPORT_SYMBOL_GPL(ide_retry_pc);
int ide_cd_expiry(ide_drive_t *drive)
{
struct request *rq = drive->hwif->rq;
unsigned long wait = 0;
debug_log("%s: rq->cmd[0]: 0x%x\n", __func__, rq->cmd[0]);
/*
* Some commands are *slow* and normally take a long time to complete.
* Usually we can use the ATAPI "disconnect" to bypass this, but not all
* commands/drives support that. Let ide_timer_expiry keep polling us
* for these.
*/
switch (rq->cmd[0]) {
case GPCMD_BLANK:
case GPCMD_FORMAT_UNIT:
case GPCMD_RESERVE_RZONE_TRACK:
case GPCMD_CLOSE_TRACK:
case GPCMD_FLUSH_CACHE:
wait = ATAPI_WAIT_PC;
break;
default:
if (!(rq->cmd_flags & REQ_QUIET))
printk(KERN_INFO PFX "cmd 0x%x timed out\n",
rq->cmd[0]);
wait = 0;
break;
}
return wait;
}
EXPORT_SYMBOL_GPL(ide_cd_expiry);
int ide_cd_get_xferlen(struct request *rq)
{
switch (rq->cmd_type) {
case REQ_TYPE_FS:
return 32768;
case REQ_TYPE_SENSE:
case REQ_TYPE_BLOCK_PC:
case REQ_TYPE_ATA_PC:
return blk_rq_bytes(rq);
default:
return 0;
}
}
EXPORT_SYMBOL_GPL(ide_cd_get_xferlen);
void ide_read_bcount_and_ireason(ide_drive_t *drive, u16 *bcount, u8 *ireason)
{
struct ide_taskfile tf;
drive->hwif->tp_ops->tf_read(drive, &tf, IDE_VALID_NSECT |
IDE_VALID_LBAM | IDE_VALID_LBAH);
*bcount = (tf.lbah << 8) | tf.lbam;
*ireason = tf.nsect & 3;
}
EXPORT_SYMBOL_GPL(ide_read_bcount_and_ireason);
/*
* Check the contents of the interrupt reason register and attempt to recover if
* there are problems.
*
* Returns:
* - 0 if everything's ok
* - 1 if the request has to be terminated.
*/
int ide_check_ireason(ide_drive_t *drive, struct request *rq, int len,
int ireason, int rw)
{
ide_hwif_t *hwif = drive->hwif;
debug_log("ireason: 0x%x, rw: 0x%x\n", ireason, rw);
if (ireason == (!rw << 1))
return 0;
else if (ireason == (rw << 1)) {
printk(KERN_ERR PFX "%s: %s: wrong transfer direction!\n",
drive->name, __func__);
if (dev_is_idecd(drive))
ide_pad_transfer(drive, rw, len);
} else if (!rw && ireason == ATAPI_COD) {
if (dev_is_idecd(drive)) {
/*
* Some drives (ASUS) seem to tell us that status info
* is available. Just get it and ignore.
*/
(void)hwif->tp_ops->read_status(hwif);
return 0;
}
} else {
if (ireason & ATAPI_COD)
printk(KERN_ERR PFX "%s: CoD != 0 in %s\n", drive->name,
__func__);
/* drive wants a command packet, or invalid ireason... */
printk(KERN_ERR PFX "%s: %s: bad interrupt reason 0x%02x\n",
drive->name, __func__, ireason);
}
if (dev_is_idecd(drive) && rq->cmd_type == REQ_TYPE_ATA_PC)
rq->cmd_flags |= REQ_FAILED;
return 1;
}
EXPORT_SYMBOL_GPL(ide_check_ireason);
/*
* This is the usual interrupt handler which will be called during a packet
* command. We will transfer some of the data (as requested by the drive)
* and will re-point interrupt handler to us.
*/
static ide_startstop_t ide_pc_intr(ide_drive_t *drive)
{
struct ide_atapi_pc *pc = drive->pc;
ide_hwif_t *hwif = drive->hwif;
struct ide_cmd *cmd = &hwif->cmd;
struct request *rq = hwif->rq;
const struct ide_tp_ops *tp_ops = hwif->tp_ops;
unsigned int timeout, done;
u16 bcount;
u8 stat, ireason, dsc = 0;
u8 write = !!(pc->flags & PC_FLAG_WRITING);
debug_log("Enter %s - interrupt handler\n", __func__);
timeout = (drive->media == ide_floppy) ? WAIT_FLOPPY_CMD
: WAIT_TAPE_CMD;
/* Clear the interrupt */
stat = tp_ops->read_status(hwif);
if (pc->flags & PC_FLAG_DMA_IN_PROGRESS) {
int rc;
drive->waiting_for_dma = 0;
rc = hwif->dma_ops->dma_end(drive);
ide_dma_unmap_sg(drive, cmd);
if (rc || (drive->media == ide_tape && (stat & ATA_ERR))) {
if (drive->media == ide_floppy)
printk(KERN_ERR PFX "%s: DMA %s error\n",
drive->name, rq_data_dir(pc->rq)
? "write" : "read");
pc->flags |= PC_FLAG_DMA_ERROR;
} else
rq->resid_len = 0;
debug_log("%s: DMA finished\n", drive->name);
}
/* No more interrupts */
if ((stat & ATA_DRQ) == 0) {
int uptodate, error;
debug_log("Packet command completed, %d bytes transferred\n",
blk_rq_bytes(rq));
pc->flags &= ~PC_FLAG_DMA_IN_PROGRESS;
local_irq_enable_in_hardirq();
if (drive->media == ide_tape &&
(stat & ATA_ERR) && rq->cmd[0] == REQUEST_SENSE)
stat &= ~ATA_ERR;
if ((stat & ATA_ERR) || (pc->flags & PC_FLAG_DMA_ERROR)) {
/* Error detected */
debug_log("%s: I/O error\n", drive->name);
if (drive->media != ide_tape)
pc->rq->errors++;
if (rq->cmd[0] == REQUEST_SENSE) {
printk(KERN_ERR PFX "%s: I/O error in request "
"sense command\n", drive->name);
return ide_do_reset(drive);
}
debug_log("[cmd %x]: check condition\n", rq->cmd[0]);
/* Retry operation */
ide_retry_pc(drive);
/* queued, but not started */
return ide_stopped;
}
pc->error = 0;
if ((pc->flags & PC_FLAG_WAIT_FOR_DSC) && (stat & ATA_DSC) == 0)
dsc = 1;
/*
* ->pc_callback() might change rq->data_len for
* residual count, cache total length.
*/
done = blk_rq_bytes(rq);
/* Command finished - Call the callback function */
uptodate = drive->pc_callback(drive, dsc);
if (uptodate == 0)
drive->failed_pc = NULL;
if (rq->cmd_type == REQ_TYPE_SPECIAL) {
rq->errors = 0;
error = 0;
} else {
if (rq->cmd_type != REQ_TYPE_FS && uptodate <= 0) {
if (rq->errors == 0)
rq->errors = -EIO;
}
error = uptodate ? 0 : -EIO;
}
ide_complete_rq(drive, error, blk_rq_bytes(rq));
return ide_stopped;
}
if (pc->flags & PC_FLAG_DMA_IN_PROGRESS) {
pc->flags &= ~PC_FLAG_DMA_IN_PROGRESS;
printk(KERN_ERR PFX "%s: The device wants to issue more "
"interrupts in DMA mode\n", drive->name);
ide_dma_off(drive);
return ide_do_reset(drive);
}
/* Get the number of bytes to transfer on this interrupt. */
ide_read_bcount_and_ireason(drive, &bcount, &ireason);
if (ide_check_ireason(drive, rq, bcount, ireason, write))
return ide_do_reset(drive);
done = min_t(unsigned int, bcount, cmd->nleft);
ide_pio_bytes(drive, cmd, write, done);
/* Update transferred byte count */
rq->resid_len -= done;
bcount -= done;
if (bcount)
ide_pad_transfer(drive, write, bcount);
debug_log("[cmd %x] transferred %d bytes, padded %d bytes, resid: %u\n",
rq->cmd[0], done, bcount, rq->resid_len);
/* And set the interrupt handler again */
ide_set_handler(drive, ide_pc_intr, timeout);
return ide_started;
}
static void ide_init_packet_cmd(struct ide_cmd *cmd, u8 valid_tf,
u16 bcount, u8 dma)
{
cmd->protocol = dma ? ATAPI_PROT_DMA : ATAPI_PROT_PIO;
cmd->valid.out.tf = IDE_VALID_LBAH | IDE_VALID_LBAM |
IDE_VALID_FEATURE | valid_tf;
cmd->tf.command = ATA_CMD_PACKET;
cmd->tf.feature = dma; /* Use PIO/DMA */
cmd->tf.lbam = bcount & 0xff;
cmd->tf.lbah = (bcount >> 8) & 0xff;
}
static u8 ide_read_ireason(ide_drive_t *drive)
{
struct ide_taskfile tf;
drive->hwif->tp_ops->tf_read(drive, &tf, IDE_VALID_NSECT);
return tf.nsect & 3;
}
static u8 ide_wait_ireason(ide_drive_t *drive, u8 ireason)
{
int retries = 100;
while (retries-- && ((ireason & ATAPI_COD) == 0 ||
(ireason & ATAPI_IO))) {
printk(KERN_ERR PFX "%s: (IO,CoD != (0,1) while issuing "
"a packet command, retrying\n", drive->name);
udelay(100);
ireason = ide_read_ireason(drive);
if (retries == 0) {
printk(KERN_ERR PFX "%s: (IO,CoD != (0,1) while issuing"
" a packet command, ignoring\n",
drive->name);
ireason |= ATAPI_COD;
ireason &= ~ATAPI_IO;
}
}
return ireason;
}
static int ide_delayed_transfer_pc(ide_drive_t *drive)
{
/* Send the actual packet */
drive->hwif->tp_ops->output_data(drive, NULL, drive->pc->c, 12);
/* Timeout for the packet command */
return WAIT_FLOPPY_CMD;
}
static ide_startstop_t ide_transfer_pc(ide_drive_t *drive)
{
struct ide_atapi_pc *uninitialized_var(pc);
ide_hwif_t *hwif = drive->hwif;
struct request *rq = hwif->rq;
ide_expiry_t *expiry;
unsigned int timeout;
int cmd_len;
ide_startstop_t startstop;
u8 ireason;
if (ide_wait_stat(&startstop, drive, ATA_DRQ, ATA_BUSY, WAIT_READY)) {
printk(KERN_ERR PFX "%s: Strange, packet command initiated yet "
"DRQ isn't asserted\n", drive->name);
return startstop;
}
if (drive->atapi_flags & IDE_AFLAG_DRQ_INTERRUPT) {
if (drive->dma)
drive->waiting_for_dma = 1;
}
if (dev_is_idecd(drive)) {
/* ATAPI commands get padded out to 12 bytes minimum */
cmd_len = COMMAND_SIZE(rq->cmd[0]);
if (cmd_len < ATAPI_MIN_CDB_BYTES)
cmd_len = ATAPI_MIN_CDB_BYTES;
timeout = rq->timeout;
expiry = ide_cd_expiry;
} else {
pc = drive->pc;
cmd_len = ATAPI_MIN_CDB_BYTES;
/*
* If necessary schedule the packet transfer to occur 'timeout'
* milliseconds later in ide_delayed_transfer_pc() after the
* device says it's ready for a packet.
*/
if (drive->atapi_flags & IDE_AFLAG_ZIP_DRIVE) {
timeout = drive->pc_delay;
expiry = &ide_delayed_transfer_pc;
} else {
timeout = (drive->media == ide_floppy) ? WAIT_FLOPPY_CMD
: WAIT_TAPE_CMD;
expiry = NULL;
}
ireason = ide_read_ireason(drive);
if (drive->media == ide_tape)
ireason = ide_wait_ireason(drive, ireason);
if ((ireason & ATAPI_COD) == 0 || (ireason & ATAPI_IO)) {
printk(KERN_ERR PFX "%s: (IO,CoD) != (0,1) while "
"issuing a packet command\n", drive->name);
return ide_do_reset(drive);
}
}
hwif->expiry = expiry;
/* Set the interrupt routine */
ide_set_handler(drive,
(dev_is_idecd(drive) ? drive->irq_handler
: ide_pc_intr),
timeout);
/* Send the actual packet */
if ((drive->atapi_flags & IDE_AFLAG_ZIP_DRIVE) == 0)
hwif->tp_ops->output_data(drive, NULL, rq->cmd, cmd_len);
/* Begin DMA, if necessary */
if (dev_is_idecd(drive)) {
if (drive->dma)
hwif->dma_ops->dma_start(drive);
} else {
if (pc->flags & PC_FLAG_DMA_OK) {
pc->flags |= PC_FLAG_DMA_IN_PROGRESS;
hwif->dma_ops->dma_start(drive);
}
}
return ide_started;
}
ide_startstop_t ide_issue_pc(ide_drive_t *drive, struct ide_cmd *cmd)
{
struct ide_atapi_pc *pc;
ide_hwif_t *hwif = drive->hwif;
ide_expiry_t *expiry = NULL;
struct request *rq = hwif->rq;
unsigned int timeout, bytes;
u16 bcount;
u8 valid_tf;
u8 drq_int = !!(drive->atapi_flags & IDE_AFLAG_DRQ_INTERRUPT);
if (dev_is_idecd(drive)) {
valid_tf = IDE_VALID_NSECT | IDE_VALID_LBAL;
bcount = ide_cd_get_xferlen(rq);
expiry = ide_cd_expiry;
timeout = ATAPI_WAIT_PC;
if (drive->dma)
drive->dma = !ide_dma_prepare(drive, cmd);
} else {
pc = drive->pc;
valid_tf = IDE_VALID_DEVICE;
bytes = blk_rq_bytes(rq);
bcount = ((drive->media == ide_tape) ? bytes
: min_t(unsigned int,
bytes, 63 * 1024));
/* We haven't transferred any data yet */
rq->resid_len = bcount;
if (pc->flags & PC_FLAG_DMA_ERROR) {
pc->flags &= ~PC_FLAG_DMA_ERROR;
ide_dma_off(drive);
}
if (pc->flags & PC_FLAG_DMA_OK)
drive->dma = !ide_dma_prepare(drive, cmd);
if (!drive->dma)
pc->flags &= ~PC_FLAG_DMA_OK;
timeout = (drive->media == ide_floppy) ? WAIT_FLOPPY_CMD
: WAIT_TAPE_CMD;
}
ide_init_packet_cmd(cmd, valid_tf, bcount, drive->dma);
(void)do_rw_taskfile(drive, cmd);
if (drq_int) {
if (drive->dma)
drive->waiting_for_dma = 0;
hwif->expiry = expiry;
}
ide_execute_command(drive, cmd, ide_transfer_pc, timeout);
return drq_int ? ide_started : ide_transfer_pc(drive);
}
EXPORT_SYMBOL_GPL(ide_issue_pc);
| gpl-2.0 |
bobcmartin/linux4sam | drivers/pci/of.c | 9426 | 1561 | /*
* PCI <-> OF mapping helpers
*
* Copyright 2011 IBM Corp.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/of.h>
#include <linux/of_pci.h>
#include "pci.h"
void pci_set_of_node(struct pci_dev *dev)
{
if (!dev->bus->dev.of_node)
return;
dev->dev.of_node = of_pci_find_child_device(dev->bus->dev.of_node,
dev->devfn);
}
void pci_release_of_node(struct pci_dev *dev)
{
of_node_put(dev->dev.of_node);
dev->dev.of_node = NULL;
}
void pci_set_bus_of_node(struct pci_bus *bus)
{
if (bus->self == NULL)
bus->dev.of_node = pcibios_get_phb_of_node(bus);
else
bus->dev.of_node = of_node_get(bus->self->dev.of_node);
}
void pci_release_bus_of_node(struct pci_bus *bus)
{
of_node_put(bus->dev.of_node);
bus->dev.of_node = NULL;
}
struct device_node * __weak pcibios_get_phb_of_node(struct pci_bus *bus)
{
/* This should only be called for PHBs */
if (WARN_ON(bus->self || bus->parent))
return NULL;
/* Look for a node pointer in either the intermediary device we
* create above the root bus or it's own parent. Normally only
* the later is populated.
*/
if (bus->bridge->of_node)
return of_node_get(bus->bridge->of_node);
if (bus->bridge->parent && bus->bridge->parent->of_node)
return of_node_get(bus->bridge->parent->of_node);
return NULL;
}
| gpl-2.0 |
bgn9000/Shun-Andromeda | drivers/video/n411.c | 14290 | 4883 | /*
* linux/drivers/video/n411.c -- Platform device for N411 EPD kit
*
* Copyright (C) 2008, Jaya Kumar
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive for
* more details.
*
* Layout is based on skeletonfb.c by James Simmons and Geert Uytterhoeven.
*
* This driver is written to be used with the Hecuba display controller
* board, and tested with the EInk 800x600 display in 1 bit mode.
* The interface between Hecuba and the host is TTL based GPIO. The
* GPIO requirements are 8 writable data lines and 6 lines for control.
* Only 4 of the controls are actually used here but 6 for future use.
* The driver requires the IO addresses for data and control GPIO at
* load time. It is also possible to use this display with a standard
* PC parallel port.
*
* General notes:
* - User must set dio_addr=0xIOADDR cio_addr=0xIOADDR c2io_addr=0xIOADDR
*
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/fb.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/list.h>
#include <linux/uaccess.h>
#include <linux/irq.h>
#include <video/hecubafb.h>
static unsigned long dio_addr;
static unsigned long cio_addr;
static unsigned long c2io_addr;
static unsigned long splashval;
static unsigned int nosplash;
static unsigned char ctl;
static void n411_set_ctl(struct hecubafb_par *par, unsigned char bit, unsigned
char state)
{
switch (bit) {
case HCB_CD_BIT:
if (state)
ctl &= ~(HCB_CD_BIT);
else
ctl |= HCB_CD_BIT;
break;
case HCB_DS_BIT:
if (state)
ctl &= ~(HCB_DS_BIT);
else
ctl |= HCB_DS_BIT;
break;
}
outb(ctl, cio_addr);
}
static unsigned char n411_get_ctl(struct hecubafb_par *par)
{
return inb(c2io_addr);
}
static void n411_set_data(struct hecubafb_par *par, unsigned char value)
{
outb(value, dio_addr);
}
static void n411_wait_for_ack(struct hecubafb_par *par, int clear)
{
int timeout;
unsigned char tmp;
timeout = 500;
do {
tmp = n411_get_ctl(par);
if ((tmp & HCB_ACK_BIT) && (!clear))
return;
else if (!(tmp & HCB_ACK_BIT) && (clear))
return;
udelay(1);
} while (timeout--);
printk(KERN_ERR "timed out waiting for ack\n");
}
static int n411_init_control(struct hecubafb_par *par)
{
unsigned char tmp;
/* for init, we want the following setup to be set:
WUP = lo
ACK = hi
DS = hi
RW = hi
CD = lo
*/
/* write WUP to lo, DS to hi, RW to hi, CD to lo */
ctl = HCB_WUP_BIT | HCB_RW_BIT | HCB_CD_BIT ;
n411_set_ctl(par, HCB_DS_BIT, 1);
/* check ACK is not lo */
tmp = n411_get_ctl(par);
if (tmp & HCB_ACK_BIT) {
printk(KERN_ERR "Fail because ACK is already low\n");
return -ENXIO;
}
return 0;
}
static int n411_init_board(struct hecubafb_par *par)
{
int retval;
retval = n411_init_control(par);
if (retval)
return retval;
par->send_command(par, APOLLO_INIT_DISPLAY);
par->send_data(par, 0x81);
/* have to wait while display resets */
udelay(1000);
/* if we were told to splash the screen, we just clear it */
if (!nosplash) {
par->send_command(par, APOLLO_ERASE_DISPLAY);
par->send_data(par, splashval);
}
return 0;
}
static struct hecuba_board n411_board = {
.owner = THIS_MODULE,
.init = n411_init_board,
.set_ctl = n411_set_ctl,
.set_data = n411_set_data,
.wait_for_ack = n411_wait_for_ack,
};
static struct platform_device *n411_device;
static int __init n411_init(void)
{
int ret;
if (!dio_addr || !cio_addr || !c2io_addr) {
printk(KERN_WARNING "no IO addresses supplied\n");
return -EINVAL;
}
/* request our platform independent driver */
request_module("hecubafb");
n411_device = platform_device_alloc("hecubafb", -1);
if (!n411_device)
return -ENOMEM;
platform_device_add_data(n411_device, &n411_board, sizeof(n411_board));
/* this _add binds hecubafb to n411. hecubafb refcounts n411 */
ret = platform_device_add(n411_device);
if (ret)
platform_device_put(n411_device);
return ret;
}
static void __exit n411_exit(void)
{
platform_device_unregister(n411_device);
}
module_init(n411_init);
module_exit(n411_exit);
module_param(nosplash, uint, 0);
MODULE_PARM_DESC(nosplash, "Disable doing the splash screen");
module_param(dio_addr, ulong, 0);
MODULE_PARM_DESC(dio_addr, "IO address for data, eg: 0x480");
module_param(cio_addr, ulong, 0);
MODULE_PARM_DESC(cio_addr, "IO address for control, eg: 0x400");
module_param(c2io_addr, ulong, 0);
MODULE_PARM_DESC(c2io_addr, "IO address for secondary control, eg: 0x408");
module_param(splashval, ulong, 0);
MODULE_PARM_DESC(splashval, "Splash pattern: 0x00 is black, 0x01 is white");
MODULE_DESCRIPTION("board driver for n411 hecuba/apollo epd kit");
MODULE_AUTHOR("Jaya Kumar");
MODULE_LICENSE("GPL");
| gpl-2.0 |
sudosurootdev/gcc | gcc/testsuite/gcc.dg/vmx/gcc-bug-6.c | 211 | 1296 | /* { dg-do compile } */
#include <altivec.h>
void valuen014(vector float p1, vector float p2, vector float p3,
vector float p4, vector float p5, vector float p6,
vector float p7, vector float p8, vector float p9,
vector float p10, vector float p11, vector float p12,
vector float px, vector float py, vector float pz,
int p13)
{
}
void f()
{
valuen014(((vector float) {1.83e+09, 5.73e+08, -2.96e+08, -7.46e+08}),
((vector float) {-2.01e+09, 9.89e+08, -1.92e+09, 2.09e+09}),
((vector float) {1.95e+09, -2.41e+08, 2.67e+08, 1.67e+09}),
((vector float) {-2.12e+09, 8.18e+08, 9.47e+08, -1.25e+09}),
((vector float) {-9.47e+08, -9.3e+08, -1.65e+09, 1.64e+09}),
((vector float) {-7.99e+07, 4.86e+08, -3.4e+06, 3.11e+08}),
((vector float) {1.78e+09, 1.22e+09, -1.27e+09, -3.11e+08}),
((vector float) {1.41e+09, -5.38e+07, -2.08e+09, 1.54e+09}),
((vector float) {3.1e+08, -1.49e+09, 5.38e+08, -1.3e+09}),
((vector float) {9.66e+08, 5.5e+08, 1.75e+08, -8.22e+07}),
((vector float) {-1.72e+08, -2.06e+09, 1.14e+09, -4.64e+08}),
((vector float) {-1.25e+09, 8.12e+07, -2.02e+09, 4.71e+08}),
((vector float){1,1,1,1}),
((vector float){2,2,2,2}),
((vector float){3,3,3,3}),
962425441);
}
| gpl-2.0 |
Chadder43/Asus-T200TA-Linux | drivers/iommu/intel_irq_remapping.c | 211 | 28096 | #include <linux/interrupt.h>
#include <linux/dmar.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/jiffies.h>
#include <linux/hpet.h>
#include <linux/pci.h>
#include <linux/irq.h>
#include <linux/intel-iommu.h>
#include <linux/acpi.h>
#include <asm/io_apic.h>
#include <asm/smp.h>
#include <asm/cpu.h>
#include <asm/irq_remapping.h>
#include <asm/pci-direct.h>
#include <asm/msidef.h>
#include "irq_remapping.h"
struct ioapic_scope {
struct intel_iommu *iommu;
unsigned int id;
unsigned int bus; /* PCI bus number */
unsigned int devfn; /* PCI devfn number */
};
struct hpet_scope {
struct intel_iommu *iommu;
u8 id;
unsigned int bus;
unsigned int devfn;
};
#define IR_X2APIC_MODE(mode) (mode ? (1 << 11) : 0)
#define IRTE_DEST(dest) ((x2apic_mode) ? dest : dest << 8)
static struct ioapic_scope ir_ioapic[MAX_IO_APICS];
static struct hpet_scope ir_hpet[MAX_HPET_TBS];
static int ir_ioapic_num, ir_hpet_num;
/*
* Lock ordering:
* ->dmar_global_lock
* ->irq_2_ir_lock
* ->qi->q_lock
* ->iommu->register_lock
* Note:
* intel_irq_remap_ops.{supported,prepare,enable,disable,reenable} are called
* in single-threaded environment with interrupt disabled, so no need to tabke
* the dmar_global_lock.
*/
static DEFINE_RAW_SPINLOCK(irq_2_ir_lock);
static int __init parse_ioapics_under_ir(void);
static struct irq_2_iommu *irq_2_iommu(unsigned int irq)
{
struct irq_cfg *cfg = irq_get_chip_data(irq);
return cfg ? &cfg->irq_2_iommu : NULL;
}
static int get_irte(int irq, struct irte *entry)
{
struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
unsigned long flags;
int index;
if (!entry || !irq_iommu)
return -1;
raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
if (unlikely(!irq_iommu->iommu)) {
raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
return -1;
}
index = irq_iommu->irte_index + irq_iommu->sub_handle;
*entry = *(irq_iommu->iommu->ir_table->base + index);
raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
return 0;
}
static int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
{
struct ir_table *table = iommu->ir_table;
struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
struct irq_cfg *cfg = irq_get_chip_data(irq);
unsigned int mask = 0;
unsigned long flags;
int index;
if (!count || !irq_iommu)
return -1;
if (count > 1) {
count = __roundup_pow_of_two(count);
mask = ilog2(count);
}
if (mask > ecap_max_handle_mask(iommu->ecap)) {
printk(KERN_ERR
"Requested mask %x exceeds the max invalidation handle"
" mask value %Lx\n", mask,
ecap_max_handle_mask(iommu->ecap));
return -1;
}
raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
index = bitmap_find_free_region(table->bitmap,
INTR_REMAP_TABLE_ENTRIES, mask);
if (index < 0) {
pr_warn("IR%d: can't allocate an IRTE\n", iommu->seq_id);
} else {
cfg->remapped = 1;
irq_iommu->iommu = iommu;
irq_iommu->irte_index = index;
irq_iommu->sub_handle = 0;
irq_iommu->irte_mask = mask;
}
raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
return index;
}
static int qi_flush_iec(struct intel_iommu *iommu, int index, int mask)
{
struct qi_desc desc;
desc.low = QI_IEC_IIDEX(index) | QI_IEC_TYPE | QI_IEC_IM(mask)
| QI_IEC_SELECTIVE;
desc.high = 0;
return qi_submit_sync(&desc, iommu);
}
static int map_irq_to_irte_handle(int irq, u16 *sub_handle)
{
struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
unsigned long flags;
int index;
if (!irq_iommu)
return -1;
raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
*sub_handle = irq_iommu->sub_handle;
index = irq_iommu->irte_index;
raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
return index;
}
static int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle)
{
struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
struct irq_cfg *cfg = irq_get_chip_data(irq);
unsigned long flags;
if (!irq_iommu)
return -1;
raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
cfg->remapped = 1;
irq_iommu->iommu = iommu;
irq_iommu->irte_index = index;
irq_iommu->sub_handle = subhandle;
irq_iommu->irte_mask = 0;
raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
return 0;
}
static int modify_irte(int irq, struct irte *irte_modified)
{
struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
struct intel_iommu *iommu;
unsigned long flags;
struct irte *irte;
int rc, index;
if (!irq_iommu)
return -1;
raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
iommu = irq_iommu->iommu;
index = irq_iommu->irte_index + irq_iommu->sub_handle;
irte = &iommu->ir_table->base[index];
set_64bit(&irte->low, irte_modified->low);
set_64bit(&irte->high, irte_modified->high);
__iommu_flush_cache(iommu, irte, sizeof(*irte));
rc = qi_flush_iec(iommu, index, 0);
raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
return rc;
}
static struct intel_iommu *map_hpet_to_ir(u8 hpet_id)
{
int i;
for (i = 0; i < MAX_HPET_TBS; i++)
if (ir_hpet[i].id == hpet_id)
return ir_hpet[i].iommu;
return NULL;
}
static struct intel_iommu *map_ioapic_to_ir(int apic)
{
int i;
for (i = 0; i < MAX_IO_APICS; i++)
if (ir_ioapic[i].id == apic)
return ir_ioapic[i].iommu;
return NULL;
}
static struct intel_iommu *map_dev_to_ir(struct pci_dev *dev)
{
struct dmar_drhd_unit *drhd;
drhd = dmar_find_matched_drhd_unit(dev);
if (!drhd)
return NULL;
return drhd->iommu;
}
static int clear_entries(struct irq_2_iommu *irq_iommu)
{
struct irte *start, *entry, *end;
struct intel_iommu *iommu;
int index;
if (irq_iommu->sub_handle)
return 0;
iommu = irq_iommu->iommu;
index = irq_iommu->irte_index + irq_iommu->sub_handle;
start = iommu->ir_table->base + index;
end = start + (1 << irq_iommu->irte_mask);
for (entry = start; entry < end; entry++) {
set_64bit(&entry->low, 0);
set_64bit(&entry->high, 0);
}
bitmap_release_region(iommu->ir_table->bitmap, index,
irq_iommu->irte_mask);
return qi_flush_iec(iommu, index, irq_iommu->irte_mask);
}
static int free_irte(int irq)
{
struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
unsigned long flags;
int rc;
if (!irq_iommu)
return -1;
raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
rc = clear_entries(irq_iommu);
irq_iommu->iommu = NULL;
irq_iommu->irte_index = 0;
irq_iommu->sub_handle = 0;
irq_iommu->irte_mask = 0;
raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
return rc;
}
/*
* source validation type
*/
#define SVT_NO_VERIFY 0x0 /* no verification is required */
#define SVT_VERIFY_SID_SQ 0x1 /* verify using SID and SQ fields */
#define SVT_VERIFY_BUS 0x2 /* verify bus of request-id */
/*
* source-id qualifier
*/
#define SQ_ALL_16 0x0 /* verify all 16 bits of request-id */
#define SQ_13_IGNORE_1 0x1 /* verify most significant 13 bits, ignore
* the third least significant bit
*/
#define SQ_13_IGNORE_2 0x2 /* verify most significant 13 bits, ignore
* the second and third least significant bits
*/
#define SQ_13_IGNORE_3 0x3 /* verify most significant 13 bits, ignore
* the least three significant bits
*/
/*
* set SVT, SQ and SID fields of irte to verify
* source ids of interrupt requests
*/
static void set_irte_sid(struct irte *irte, unsigned int svt,
unsigned int sq, unsigned int sid)
{
if (disable_sourceid_checking)
svt = SVT_NO_VERIFY;
irte->svt = svt;
irte->sq = sq;
irte->sid = sid;
}
static int set_ioapic_sid(struct irte *irte, int apic)
{
int i;
u16 sid = 0;
if (!irte)
return -1;
down_read(&dmar_global_lock);
for (i = 0; i < MAX_IO_APICS; i++) {
if (ir_ioapic[i].id == apic) {
sid = (ir_ioapic[i].bus << 8) | ir_ioapic[i].devfn;
break;
}
}
up_read(&dmar_global_lock);
if (sid == 0) {
pr_warning("Failed to set source-id of IOAPIC (%d)\n", apic);
return -1;
}
set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16, sid);
return 0;
}
static int set_hpet_sid(struct irte *irte, u8 id)
{
int i;
u16 sid = 0;
if (!irte)
return -1;
down_read(&dmar_global_lock);
for (i = 0; i < MAX_HPET_TBS; i++) {
if (ir_hpet[i].id == id) {
sid = (ir_hpet[i].bus << 8) | ir_hpet[i].devfn;
break;
}
}
up_read(&dmar_global_lock);
if (sid == 0) {
pr_warning("Failed to set source-id of HPET block (%d)\n", id);
return -1;
}
/*
* Should really use SQ_ALL_16. Some platforms are broken.
* While we figure out the right quirks for these broken platforms, use
* SQ_13_IGNORE_3 for now.
*/
set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_13_IGNORE_3, sid);
return 0;
}
struct set_msi_sid_data {
struct pci_dev *pdev;
u16 alias;
};
static int set_msi_sid_cb(struct pci_dev *pdev, u16 alias, void *opaque)
{
struct set_msi_sid_data *data = opaque;
data->pdev = pdev;
data->alias = alias;
return 0;
}
static int set_msi_sid(struct irte *irte, struct pci_dev *dev)
{
struct set_msi_sid_data data;
if (!irte || !dev)
return -1;
pci_for_each_dma_alias(dev, set_msi_sid_cb, &data);
/*
* DMA alias provides us with a PCI device and alias. The only case
* where the it will return an alias on a different bus than the
* device is the case of a PCIe-to-PCI bridge, where the alias is for
* the subordinate bus. In this case we can only verify the bus.
*
* If the alias device is on a different bus than our source device
* then we have a topology based alias, use it.
*
* Otherwise, the alias is for a device DMA quirk and we cannot
* assume that MSI uses the same requester ID. Therefore use the
* original device.
*/
if (PCI_BUS_NUM(data.alias) != data.pdev->bus->number)
set_irte_sid(irte, SVT_VERIFY_BUS, SQ_ALL_16,
PCI_DEVID(PCI_BUS_NUM(data.alias),
dev->bus->number));
else if (data.pdev->bus->number != dev->bus->number)
set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16, data.alias);
else
set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16,
PCI_DEVID(dev->bus->number, dev->devfn));
return 0;
}
static void iommu_set_irq_remapping(struct intel_iommu *iommu, int mode)
{
u64 addr;
u32 sts;
unsigned long flags;
addr = virt_to_phys((void *)iommu->ir_table->base);
raw_spin_lock_irqsave(&iommu->register_lock, flags);
dmar_writeq(iommu->reg + DMAR_IRTA_REG,
(addr) | IR_X2APIC_MODE(mode) | INTR_REMAP_TABLE_REG_SIZE);
/* Set interrupt-remapping table pointer */
writel(iommu->gcmd | DMA_GCMD_SIRTP, iommu->reg + DMAR_GCMD_REG);
IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
readl, (sts & DMA_GSTS_IRTPS), sts);
raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
/*
* global invalidation of interrupt entry cache before enabling
* interrupt-remapping.
*/
qi_global_iec(iommu);
raw_spin_lock_irqsave(&iommu->register_lock, flags);
/* Enable interrupt-remapping */
iommu->gcmd |= DMA_GCMD_IRE;
iommu->gcmd &= ~DMA_GCMD_CFI; /* Block compatibility-format MSIs */
writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
readl, (sts & DMA_GSTS_IRES), sts);
/*
* With CFI clear in the Global Command register, we should be
* protected from dangerous (i.e. compatibility) interrupts
* regardless of x2apic status. Check just to be sure.
*/
if (sts & DMA_GSTS_CFIS)
WARN(1, KERN_WARNING
"Compatibility-format IRQs enabled despite intr remapping;\n"
"you are vulnerable to IRQ injection.\n");
raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
}
static int intel_setup_irq_remapping(struct intel_iommu *iommu, int mode)
{
struct ir_table *ir_table;
struct page *pages;
unsigned long *bitmap;
ir_table = iommu->ir_table = kzalloc(sizeof(struct ir_table),
GFP_ATOMIC);
if (!iommu->ir_table)
return -ENOMEM;
pages = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO,
INTR_REMAP_PAGE_ORDER);
if (!pages) {
pr_err("IR%d: failed to allocate pages of order %d\n",
iommu->seq_id, INTR_REMAP_PAGE_ORDER);
kfree(iommu->ir_table);
return -ENOMEM;
}
bitmap = kcalloc(BITS_TO_LONGS(INTR_REMAP_TABLE_ENTRIES),
sizeof(long), GFP_ATOMIC);
if (bitmap == NULL) {
pr_err("IR%d: failed to allocate bitmap\n", iommu->seq_id);
__free_pages(pages, INTR_REMAP_PAGE_ORDER);
kfree(ir_table);
return -ENOMEM;
}
ir_table->base = page_address(pages);
ir_table->bitmap = bitmap;
iommu_set_irq_remapping(iommu, mode);
return 0;
}
/*
* Disable Interrupt Remapping.
*/
static void iommu_disable_irq_remapping(struct intel_iommu *iommu)
{
unsigned long flags;
u32 sts;
if (!ecap_ir_support(iommu->ecap))
return;
/*
* global invalidation of interrupt entry cache before disabling
* interrupt-remapping.
*/
qi_global_iec(iommu);
raw_spin_lock_irqsave(&iommu->register_lock, flags);
sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
if (!(sts & DMA_GSTS_IRES))
goto end;
iommu->gcmd &= ~DMA_GCMD_IRE;
writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
readl, !(sts & DMA_GSTS_IRES), sts);
end:
raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
}
static int __init dmar_x2apic_optout(void)
{
struct acpi_table_dmar *dmar;
dmar = (struct acpi_table_dmar *)dmar_tbl;
if (!dmar || no_x2apic_optout)
return 0;
return dmar->flags & DMAR_X2APIC_OPT_OUT;
}
static int __init intel_irq_remapping_supported(void)
{
struct dmar_drhd_unit *drhd;
struct intel_iommu *iommu;
if (disable_irq_remap)
return 0;
if (irq_remap_broken) {
printk(KERN_WARNING
"This system BIOS has enabled interrupt remapping\n"
"on a chipset that contains an erratum making that\n"
"feature unstable. To maintain system stability\n"
"interrupt remapping is being disabled. Please\n"
"contact your BIOS vendor for an update\n");
add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
disable_irq_remap = 1;
return 0;
}
if (!dmar_ir_support())
return 0;
for_each_iommu(iommu, drhd)
if (!ecap_ir_support(iommu->ecap))
return 0;
return 1;
}
static int __init intel_enable_irq_remapping(void)
{
struct dmar_drhd_unit *drhd;
struct intel_iommu *iommu;
bool x2apic_present;
int setup = 0;
int eim = 0;
x2apic_present = x2apic_supported();
if (parse_ioapics_under_ir() != 1) {
printk(KERN_INFO "Not enable interrupt remapping\n");
goto error;
}
if (x2apic_present) {
pr_info("Queued invalidation will be enabled to support x2apic and Intr-remapping.\n");
eim = !dmar_x2apic_optout();
if (!eim)
printk(KERN_WARNING
"Your BIOS is broken and requested that x2apic be disabled.\n"
"This will slightly decrease performance.\n"
"Use 'intremap=no_x2apic_optout' to override BIOS request.\n");
}
for_each_iommu(iommu, drhd) {
/*
* If the queued invalidation is already initialized,
* shouldn't disable it.
*/
if (iommu->qi)
continue;
/*
* Clear previous faults.
*/
dmar_fault(-1, iommu);
/*
* Disable intr remapping and queued invalidation, if already
* enabled prior to OS handover.
*/
iommu_disable_irq_remapping(iommu);
dmar_disable_qi(iommu);
}
/*
* check for the Interrupt-remapping support
*/
for_each_iommu(iommu, drhd) {
if (!ecap_ir_support(iommu->ecap))
continue;
if (eim && !ecap_eim_support(iommu->ecap)) {
printk(KERN_INFO "DRHD %Lx: EIM not supported by DRHD, "
" ecap %Lx\n", drhd->reg_base_addr, iommu->ecap);
goto error;
}
}
/*
* Enable queued invalidation for all the DRHD's.
*/
for_each_iommu(iommu, drhd) {
int ret = dmar_enable_qi(iommu);
if (ret) {
printk(KERN_ERR "DRHD %Lx: failed to enable queued, "
" invalidation, ecap %Lx, ret %d\n",
drhd->reg_base_addr, iommu->ecap, ret);
goto error;
}
}
/*
* Setup Interrupt-remapping for all the DRHD's now.
*/
for_each_iommu(iommu, drhd) {
if (!ecap_ir_support(iommu->ecap))
continue;
if (intel_setup_irq_remapping(iommu, eim))
goto error;
setup = 1;
}
if (!setup)
goto error;
irq_remapping_enabled = 1;
/*
* VT-d has a different layout for IO-APIC entries when
* interrupt remapping is enabled. So it needs a special routine
* to print IO-APIC entries for debugging purposes too.
*/
x86_io_apic_ops.print_entries = intel_ir_io_apic_print_entries;
pr_info("Enabled IRQ remapping in %s mode\n", eim ? "x2apic" : "xapic");
return eim ? IRQ_REMAP_X2APIC_MODE : IRQ_REMAP_XAPIC_MODE;
error:
/*
* handle error condition gracefully here!
*/
if (x2apic_present)
pr_warn("Failed to enable irq remapping. You are vulnerable to irq-injection attacks.\n");
return -1;
}
static void ir_parse_one_hpet_scope(struct acpi_dmar_device_scope *scope,
struct intel_iommu *iommu)
{
struct acpi_dmar_pci_path *path;
u8 bus;
int count;
bus = scope->bus;
path = (struct acpi_dmar_pci_path *)(scope + 1);
count = (scope->length - sizeof(struct acpi_dmar_device_scope))
/ sizeof(struct acpi_dmar_pci_path);
while (--count > 0) {
/*
* Access PCI directly due to the PCI
* subsystem isn't initialized yet.
*/
bus = read_pci_config_byte(bus, path->device, path->function,
PCI_SECONDARY_BUS);
path++;
}
ir_hpet[ir_hpet_num].bus = bus;
ir_hpet[ir_hpet_num].devfn = PCI_DEVFN(path->device, path->function);
ir_hpet[ir_hpet_num].iommu = iommu;
ir_hpet[ir_hpet_num].id = scope->enumeration_id;
ir_hpet_num++;
}
static void ir_parse_one_ioapic_scope(struct acpi_dmar_device_scope *scope,
struct intel_iommu *iommu)
{
struct acpi_dmar_pci_path *path;
u8 bus;
int count;
bus = scope->bus;
path = (struct acpi_dmar_pci_path *)(scope + 1);
count = (scope->length - sizeof(struct acpi_dmar_device_scope))
/ sizeof(struct acpi_dmar_pci_path);
while (--count > 0) {
/*
* Access PCI directly due to the PCI
* subsystem isn't initialized yet.
*/
bus = read_pci_config_byte(bus, path->device, path->function,
PCI_SECONDARY_BUS);
path++;
}
ir_ioapic[ir_ioapic_num].bus = bus;
ir_ioapic[ir_ioapic_num].devfn = PCI_DEVFN(path->device, path->function);
ir_ioapic[ir_ioapic_num].iommu = iommu;
ir_ioapic[ir_ioapic_num].id = scope->enumeration_id;
ir_ioapic_num++;
}
static int ir_parse_ioapic_hpet_scope(struct acpi_dmar_header *header,
struct intel_iommu *iommu)
{
struct acpi_dmar_hardware_unit *drhd;
struct acpi_dmar_device_scope *scope;
void *start, *end;
drhd = (struct acpi_dmar_hardware_unit *)header;
start = (void *)(drhd + 1);
end = ((void *)drhd) + header->length;
while (start < end) {
scope = start;
if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_IOAPIC) {
if (ir_ioapic_num == MAX_IO_APICS) {
printk(KERN_WARNING "Exceeded Max IO APICS\n");
return -1;
}
printk(KERN_INFO "IOAPIC id %d under DRHD base "
" 0x%Lx IOMMU %d\n", scope->enumeration_id,
drhd->address, iommu->seq_id);
ir_parse_one_ioapic_scope(scope, iommu);
} else if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_HPET) {
if (ir_hpet_num == MAX_HPET_TBS) {
printk(KERN_WARNING "Exceeded Max HPET blocks\n");
return -1;
}
printk(KERN_INFO "HPET id %d under DRHD base"
" 0x%Lx\n", scope->enumeration_id,
drhd->address);
ir_parse_one_hpet_scope(scope, iommu);
}
start += scope->length;
}
return 0;
}
/*
* Finds the assocaition between IOAPIC's and its Interrupt-remapping
* hardware unit.
*/
static int __init parse_ioapics_under_ir(void)
{
struct dmar_drhd_unit *drhd;
struct intel_iommu *iommu;
int ir_supported = 0;
int ioapic_idx;
for_each_iommu(iommu, drhd)
if (ecap_ir_support(iommu->ecap)) {
if (ir_parse_ioapic_hpet_scope(drhd->hdr, iommu))
return -1;
ir_supported = 1;
}
if (!ir_supported)
return 0;
for (ioapic_idx = 0; ioapic_idx < nr_ioapics; ioapic_idx++) {
int ioapic_id = mpc_ioapic_id(ioapic_idx);
if (!map_ioapic_to_ir(ioapic_id)) {
pr_err(FW_BUG "ioapic %d has no mapping iommu, "
"interrupt remapping will be disabled\n",
ioapic_id);
return -1;
}
}
return 1;
}
static int __init ir_dev_scope_init(void)
{
int ret;
if (!irq_remapping_enabled)
return 0;
down_write(&dmar_global_lock);
ret = dmar_dev_scope_init();
up_write(&dmar_global_lock);
return ret;
}
rootfs_initcall(ir_dev_scope_init);
static void disable_irq_remapping(void)
{
struct dmar_drhd_unit *drhd;
struct intel_iommu *iommu = NULL;
/*
* Disable Interrupt-remapping for all the DRHD's now.
*/
for_each_iommu(iommu, drhd) {
if (!ecap_ir_support(iommu->ecap))
continue;
iommu_disable_irq_remapping(iommu);
}
}
static int reenable_irq_remapping(int eim)
{
struct dmar_drhd_unit *drhd;
int setup = 0;
struct intel_iommu *iommu = NULL;
for_each_iommu(iommu, drhd)
if (iommu->qi)
dmar_reenable_qi(iommu);
/*
* Setup Interrupt-remapping for all the DRHD's now.
*/
for_each_iommu(iommu, drhd) {
if (!ecap_ir_support(iommu->ecap))
continue;
/* Set up interrupt remapping for iommu.*/
iommu_set_irq_remapping(iommu, eim);
setup = 1;
}
if (!setup)
goto error;
return 0;
error:
/*
* handle error condition gracefully here!
*/
return -1;
}
static void prepare_irte(struct irte *irte, int vector,
unsigned int dest)
{
memset(irte, 0, sizeof(*irte));
irte->present = 1;
irte->dst_mode = apic->irq_dest_mode;
/*
* Trigger mode in the IRTE will always be edge, and for IO-APIC, the
* actual level or edge trigger will be setup in the IO-APIC
* RTE. This will help simplify level triggered irq migration.
* For more details, see the comments (in io_apic.c) explainig IO-APIC
* irq migration in the presence of interrupt-remapping.
*/
irte->trigger_mode = 0;
irte->dlvry_mode = apic->irq_delivery_mode;
irte->vector = vector;
irte->dest_id = IRTE_DEST(dest);
irte->redir_hint = 1;
}
static int intel_setup_ioapic_entry(int irq,
struct IO_APIC_route_entry *route_entry,
unsigned int destination, int vector,
struct io_apic_irq_attr *attr)
{
int ioapic_id = mpc_ioapic_id(attr->ioapic);
struct intel_iommu *iommu;
struct IR_IO_APIC_route_entry *entry;
struct irte irte;
int index;
down_read(&dmar_global_lock);
iommu = map_ioapic_to_ir(ioapic_id);
if (!iommu) {
pr_warn("No mapping iommu for ioapic %d\n", ioapic_id);
index = -ENODEV;
} else {
index = alloc_irte(iommu, irq, 1);
if (index < 0) {
pr_warn("Failed to allocate IRTE for ioapic %d\n",
ioapic_id);
index = -ENOMEM;
}
}
up_read(&dmar_global_lock);
if (index < 0)
return index;
prepare_irte(&irte, vector, destination);
/* Set source-id of interrupt request */
set_ioapic_sid(&irte, ioapic_id);
modify_irte(irq, &irte);
apic_printk(APIC_VERBOSE, KERN_DEBUG "IOAPIC[%d]: "
"Set IRTE entry (P:%d FPD:%d Dst_Mode:%d "
"Redir_hint:%d Trig_Mode:%d Dlvry_Mode:%X "
"Avail:%X Vector:%02X Dest:%08X "
"SID:%04X SQ:%X SVT:%X)\n",
attr->ioapic, irte.present, irte.fpd, irte.dst_mode,
irte.redir_hint, irte.trigger_mode, irte.dlvry_mode,
irte.avail, irte.vector, irte.dest_id,
irte.sid, irte.sq, irte.svt);
entry = (struct IR_IO_APIC_route_entry *)route_entry;
memset(entry, 0, sizeof(*entry));
entry->index2 = (index >> 15) & 0x1;
entry->zero = 0;
entry->format = 1;
entry->index = (index & 0x7fff);
/*
* IO-APIC RTE will be configured with virtual vector.
* irq handler will do the explicit EOI to the io-apic.
*/
entry->vector = attr->ioapic_pin;
entry->mask = 0; /* enable IRQ */
entry->trigger = attr->trigger;
entry->polarity = attr->polarity;
/* Mask level triggered irqs.
* Use IRQ_DELAYED_DISABLE for edge triggered irqs.
*/
if (attr->trigger)
entry->mask = 1;
return 0;
}
/*
* Migrate the IO-APIC irq in the presence of intr-remapping.
*
* For both level and edge triggered, irq migration is a simple atomic
* update(of vector and cpu destination) of IRTE and flush the hardware cache.
*
* For level triggered, we eliminate the io-apic RTE modification (with the
* updated vector information), by using a virtual vector (io-apic pin number).
* Real vector that is used for interrupting cpu will be coming from
* the interrupt-remapping table entry.
*
* As the migration is a simple atomic update of IRTE, the same mechanism
* is used to migrate MSI irq's in the presence of interrupt-remapping.
*/
static int
intel_ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask,
bool force)
{
struct irq_cfg *cfg = data->chip_data;
unsigned int dest, irq = data->irq;
struct irte irte;
int err;
if (!config_enabled(CONFIG_SMP))
return -EINVAL;
if (!cpumask_intersects(mask, cpu_online_mask))
return -EINVAL;
if (get_irte(irq, &irte))
return -EBUSY;
err = assign_irq_vector(irq, cfg, mask);
if (err)
return err;
err = apic->cpu_mask_to_apicid_and(cfg->domain, mask, &dest);
if (err) {
if (assign_irq_vector(irq, cfg, data->affinity))
pr_err("Failed to recover vector for irq %d\n", irq);
return err;
}
irte.vector = cfg->vector;
irte.dest_id = IRTE_DEST(dest);
/*
* Atomically updates the IRTE with the new destination, vector
* and flushes the interrupt entry cache.
*/
modify_irte(irq, &irte);
/*
* After this point, all the interrupts will start arriving
* at the new destination. So, time to cleanup the previous
* vector allocation.
*/
if (cfg->move_in_progress)
send_cleanup_vector(cfg);
cpumask_copy(data->affinity, mask);
return 0;
}
static void intel_compose_msi_msg(struct pci_dev *pdev,
unsigned int irq, unsigned int dest,
struct msi_msg *msg, u8 hpet_id)
{
struct irq_cfg *cfg;
struct irte irte;
u16 sub_handle = 0;
int ir_index;
cfg = irq_get_chip_data(irq);
ir_index = map_irq_to_irte_handle(irq, &sub_handle);
BUG_ON(ir_index == -1);
prepare_irte(&irte, cfg->vector, dest);
/* Set source-id of interrupt request */
if (pdev)
set_msi_sid(&irte, pdev);
else
set_hpet_sid(&irte, hpet_id);
modify_irte(irq, &irte);
msg->address_hi = MSI_ADDR_BASE_HI;
msg->data = sub_handle;
msg->address_lo = MSI_ADDR_BASE_LO | MSI_ADDR_IR_EXT_INT |
MSI_ADDR_IR_SHV |
MSI_ADDR_IR_INDEX1(ir_index) |
MSI_ADDR_IR_INDEX2(ir_index);
}
/*
* Map the PCI dev to the corresponding remapping hardware unit
* and allocate 'nvec' consecutive interrupt-remapping table entries
* in it.
*/
static int intel_msi_alloc_irq(struct pci_dev *dev, int irq, int nvec)
{
struct intel_iommu *iommu;
int index;
down_read(&dmar_global_lock);
iommu = map_dev_to_ir(dev);
if (!iommu) {
printk(KERN_ERR
"Unable to map PCI %s to iommu\n", pci_name(dev));
index = -ENOENT;
} else {
index = alloc_irte(iommu, irq, nvec);
if (index < 0) {
printk(KERN_ERR
"Unable to allocate %d IRTE for PCI %s\n",
nvec, pci_name(dev));
index = -ENOSPC;
}
}
up_read(&dmar_global_lock);
return index;
}
static int intel_msi_setup_irq(struct pci_dev *pdev, unsigned int irq,
int index, int sub_handle)
{
struct intel_iommu *iommu;
int ret = -ENOENT;
down_read(&dmar_global_lock);
iommu = map_dev_to_ir(pdev);
if (iommu) {
/*
* setup the mapping between the irq and the IRTE
* base index, the sub_handle pointing to the
* appropriate interrupt remap table entry.
*/
set_irte_irq(irq, iommu, index, sub_handle);
ret = 0;
}
up_read(&dmar_global_lock);
return ret;
}
static int intel_alloc_hpet_msi(unsigned int irq, unsigned int id)
{
int ret = -1;
struct intel_iommu *iommu;
int index;
down_read(&dmar_global_lock);
iommu = map_hpet_to_ir(id);
if (iommu) {
index = alloc_irte(iommu, irq, 1);
if (index >= 0)
ret = 0;
}
up_read(&dmar_global_lock);
return ret;
}
struct irq_remap_ops intel_irq_remap_ops = {
.supported = intel_irq_remapping_supported,
.prepare = dmar_table_init,
.enable = intel_enable_irq_remapping,
.disable = disable_irq_remapping,
.reenable = reenable_irq_remapping,
.enable_faulting = enable_drhd_fault_handling,
.setup_ioapic_entry = intel_setup_ioapic_entry,
.set_affinity = intel_ioapic_set_affinity,
.free_irq = free_irte,
.compose_msi_msg = intel_compose_msi_msg,
.msi_alloc_irq = intel_msi_alloc_irq,
.msi_setup_irq = intel_msi_setup_irq,
.alloc_hpet_msi = intel_alloc_hpet_msi,
};
| gpl-2.0 |
barakinflorida/tmo_tab-open | fs/xfs/xfs_btree.c | 467 | 95843 | /*
* Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it would be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "xfs.h"
#include "xfs_fs.h"
#include "xfs_types.h"
#include "xfs_bit.h"
#include "xfs_log.h"
#include "xfs_inum.h"
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_dir2.h"
#include "xfs_dmapi.h"
#include "xfs_mount.h"
#include "xfs_bmap_btree.h"
#include "xfs_alloc_btree.h"
#include "xfs_ialloc_btree.h"
#include "xfs_dir2_sf.h"
#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_inode_item.h"
#include "xfs_btree.h"
#include "xfs_btree_trace.h"
#include "xfs_ialloc.h"
#include "xfs_error.h"
/*
* Cursor allocation zone.
*/
kmem_zone_t *xfs_btree_cur_zone;
/*
* Btree magic numbers.
*/
const __uint32_t xfs_magics[XFS_BTNUM_MAX] = {
XFS_ABTB_MAGIC, XFS_ABTC_MAGIC, XFS_BMAP_MAGIC, XFS_IBT_MAGIC
};
STATIC int /* error (0 or EFSCORRUPTED) */
xfs_btree_check_lblock(
struct xfs_btree_cur *cur, /* btree cursor */
struct xfs_btree_block *block, /* btree long form block pointer */
int level, /* level of the btree block */
struct xfs_buf *bp) /* buffer for block, if any */
{
int lblock_ok; /* block passes checks */
struct xfs_mount *mp; /* file system mount point */
mp = cur->bc_mp;
lblock_ok =
be32_to_cpu(block->bb_magic) == xfs_magics[cur->bc_btnum] &&
be16_to_cpu(block->bb_level) == level &&
be16_to_cpu(block->bb_numrecs) <=
cur->bc_ops->get_maxrecs(cur, level) &&
block->bb_u.l.bb_leftsib &&
(be64_to_cpu(block->bb_u.l.bb_leftsib) == NULLDFSBNO ||
XFS_FSB_SANITY_CHECK(mp,
be64_to_cpu(block->bb_u.l.bb_leftsib))) &&
block->bb_u.l.bb_rightsib &&
(be64_to_cpu(block->bb_u.l.bb_rightsib) == NULLDFSBNO ||
XFS_FSB_SANITY_CHECK(mp,
be64_to_cpu(block->bb_u.l.bb_rightsib)));
if (unlikely(XFS_TEST_ERROR(!lblock_ok, mp,
XFS_ERRTAG_BTREE_CHECK_LBLOCK,
XFS_RANDOM_BTREE_CHECK_LBLOCK))) {
if (bp)
xfs_buftrace("LBTREE ERROR", bp);
XFS_ERROR_REPORT("xfs_btree_check_lblock", XFS_ERRLEVEL_LOW,
mp);
return XFS_ERROR(EFSCORRUPTED);
}
return 0;
}
STATIC int /* error (0 or EFSCORRUPTED) */
xfs_btree_check_sblock(
struct xfs_btree_cur *cur, /* btree cursor */
struct xfs_btree_block *block, /* btree short form block pointer */
int level, /* level of the btree block */
struct xfs_buf *bp) /* buffer containing block */
{
struct xfs_buf *agbp; /* buffer for ag. freespace struct */
struct xfs_agf *agf; /* ag. freespace structure */
xfs_agblock_t agflen; /* native ag. freespace length */
int sblock_ok; /* block passes checks */
agbp = cur->bc_private.a.agbp;
agf = XFS_BUF_TO_AGF(agbp);
agflen = be32_to_cpu(agf->agf_length);
sblock_ok =
be32_to_cpu(block->bb_magic) == xfs_magics[cur->bc_btnum] &&
be16_to_cpu(block->bb_level) == level &&
be16_to_cpu(block->bb_numrecs) <=
cur->bc_ops->get_maxrecs(cur, level) &&
(be32_to_cpu(block->bb_u.s.bb_leftsib) == NULLAGBLOCK ||
be32_to_cpu(block->bb_u.s.bb_leftsib) < agflen) &&
block->bb_u.s.bb_leftsib &&
(be32_to_cpu(block->bb_u.s.bb_rightsib) == NULLAGBLOCK ||
be32_to_cpu(block->bb_u.s.bb_rightsib) < agflen) &&
block->bb_u.s.bb_rightsib;
if (unlikely(XFS_TEST_ERROR(!sblock_ok, cur->bc_mp,
XFS_ERRTAG_BTREE_CHECK_SBLOCK,
XFS_RANDOM_BTREE_CHECK_SBLOCK))) {
if (bp)
xfs_buftrace("SBTREE ERROR", bp);
XFS_CORRUPTION_ERROR("xfs_btree_check_sblock",
XFS_ERRLEVEL_LOW, cur->bc_mp, block);
return XFS_ERROR(EFSCORRUPTED);
}
return 0;
}
/*
* Debug routine: check that block header is ok.
*/
int
xfs_btree_check_block(
struct xfs_btree_cur *cur, /* btree cursor */
struct xfs_btree_block *block, /* generic btree block pointer */
int level, /* level of the btree block */
struct xfs_buf *bp) /* buffer containing block, if any */
{
if (cur->bc_flags & XFS_BTREE_LONG_PTRS)
return xfs_btree_check_lblock(cur, block, level, bp);
else
return xfs_btree_check_sblock(cur, block, level, bp);
}
/*
* Check that (long) pointer is ok.
*/
int /* error (0 or EFSCORRUPTED) */
xfs_btree_check_lptr(
struct xfs_btree_cur *cur, /* btree cursor */
xfs_dfsbno_t bno, /* btree block disk address */
int level) /* btree block level */
{
XFS_WANT_CORRUPTED_RETURN(
level > 0 &&
bno != NULLDFSBNO &&
XFS_FSB_SANITY_CHECK(cur->bc_mp, bno));
return 0;
}
#ifdef DEBUG
/*
* Check that (short) pointer is ok.
*/
STATIC int /* error (0 or EFSCORRUPTED) */
xfs_btree_check_sptr(
struct xfs_btree_cur *cur, /* btree cursor */
xfs_agblock_t bno, /* btree block disk address */
int level) /* btree block level */
{
xfs_agblock_t agblocks = cur->bc_mp->m_sb.sb_agblocks;
XFS_WANT_CORRUPTED_RETURN(
level > 0 &&
bno != NULLAGBLOCK &&
bno != 0 &&
bno < agblocks);
return 0;
}
/*
* Check that block ptr is ok.
*/
STATIC int /* error (0 or EFSCORRUPTED) */
xfs_btree_check_ptr(
struct xfs_btree_cur *cur, /* btree cursor */
union xfs_btree_ptr *ptr, /* btree block disk address */
int index, /* offset from ptr to check */
int level) /* btree block level */
{
if (cur->bc_flags & XFS_BTREE_LONG_PTRS) {
return xfs_btree_check_lptr(cur,
be64_to_cpu((&ptr->l)[index]), level);
} else {
return xfs_btree_check_sptr(cur,
be32_to_cpu((&ptr->s)[index]), level);
}
}
#endif
/*
* Delete the btree cursor.
*/
void
xfs_btree_del_cursor(
xfs_btree_cur_t *cur, /* btree cursor */
int error) /* del because of error */
{
int i; /* btree level */
/*
* Clear the buffer pointers, and release the buffers.
* If we're doing this in the face of an error, we
* need to make sure to inspect all of the entries
* in the bc_bufs array for buffers to be unlocked.
* This is because some of the btree code works from
* level n down to 0, and if we get an error along
* the way we won't have initialized all the entries
* down to 0.
*/
for (i = 0; i < cur->bc_nlevels; i++) {
if (cur->bc_bufs[i])
xfs_btree_setbuf(cur, i, NULL);
else if (!error)
break;
}
/*
* Can't free a bmap cursor without having dealt with the
* allocated indirect blocks' accounting.
*/
ASSERT(cur->bc_btnum != XFS_BTNUM_BMAP ||
cur->bc_private.b.allocated == 0);
/*
* Free the cursor.
*/
kmem_zone_free(xfs_btree_cur_zone, cur);
}
/*
* Duplicate the btree cursor.
* Allocate a new one, copy the record, re-get the buffers.
*/
int /* error */
xfs_btree_dup_cursor(
xfs_btree_cur_t *cur, /* input cursor */
xfs_btree_cur_t **ncur) /* output cursor */
{
xfs_buf_t *bp; /* btree block's buffer pointer */
int error; /* error return value */
int i; /* level number of btree block */
xfs_mount_t *mp; /* mount structure for filesystem */
xfs_btree_cur_t *new; /* new cursor value */
xfs_trans_t *tp; /* transaction pointer, can be NULL */
tp = cur->bc_tp;
mp = cur->bc_mp;
/*
* Allocate a new cursor like the old one.
*/
new = cur->bc_ops->dup_cursor(cur);
/*
* Copy the record currently in the cursor.
*/
new->bc_rec = cur->bc_rec;
/*
* For each level current, re-get the buffer and copy the ptr value.
*/
for (i = 0; i < new->bc_nlevels; i++) {
new->bc_ptrs[i] = cur->bc_ptrs[i];
new->bc_ra[i] = cur->bc_ra[i];
if ((bp = cur->bc_bufs[i])) {
if ((error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp,
XFS_BUF_ADDR(bp), mp->m_bsize, 0, &bp))) {
xfs_btree_del_cursor(new, error);
*ncur = NULL;
return error;
}
new->bc_bufs[i] = bp;
ASSERT(bp);
ASSERT(!XFS_BUF_GETERROR(bp));
} else
new->bc_bufs[i] = NULL;
}
*ncur = new;
return 0;
}
/*
* XFS btree block layout and addressing:
*
* There are two types of blocks in the btree: leaf and non-leaf blocks.
*
* The leaf record start with a header then followed by records containing
* the values. A non-leaf block also starts with the same header, and
* then first contains lookup keys followed by an equal number of pointers
* to the btree blocks at the previous level.
*
* +--------+-------+-------+-------+-------+-------+-------+
* Leaf: | header | rec 1 | rec 2 | rec 3 | rec 4 | rec 5 | rec N |
* +--------+-------+-------+-------+-------+-------+-------+
*
* +--------+-------+-------+-------+-------+-------+-------+
* Non-Leaf: | header | key 1 | key 2 | key N | ptr 1 | ptr 2 | ptr N |
* +--------+-------+-------+-------+-------+-------+-------+
*
* The header is called struct xfs_btree_block for reasons better left unknown
* and comes in different versions for short (32bit) and long (64bit) block
* pointers. The record and key structures are defined by the btree instances
* and opaque to the btree core. The block pointers are simple disk endian
* integers, available in a short (32bit) and long (64bit) variant.
*
* The helpers below calculate the offset of a given record, key or pointer
* into a btree block (xfs_btree_*_offset) or return a pointer to the given
* record, key or pointer (xfs_btree_*_addr). Note that all addressing
* inside the btree block is done using indices starting at one, not zero!
*/
/*
* Return size of the btree block header for this btree instance.
*/
static inline size_t xfs_btree_block_len(struct xfs_btree_cur *cur)
{
return (cur->bc_flags & XFS_BTREE_LONG_PTRS) ?
XFS_BTREE_LBLOCK_LEN :
XFS_BTREE_SBLOCK_LEN;
}
/*
* Return size of btree block pointers for this btree instance.
*/
static inline size_t xfs_btree_ptr_len(struct xfs_btree_cur *cur)
{
return (cur->bc_flags & XFS_BTREE_LONG_PTRS) ?
sizeof(__be64) : sizeof(__be32);
}
/*
* Calculate offset of the n-th record in a btree block.
*/
STATIC size_t
xfs_btree_rec_offset(
struct xfs_btree_cur *cur,
int n)
{
return xfs_btree_block_len(cur) +
(n - 1) * cur->bc_ops->rec_len;
}
/*
* Calculate offset of the n-th key in a btree block.
*/
STATIC size_t
xfs_btree_key_offset(
struct xfs_btree_cur *cur,
int n)
{
return xfs_btree_block_len(cur) +
(n - 1) * cur->bc_ops->key_len;
}
/*
* Calculate offset of the n-th block pointer in a btree block.
*/
STATIC size_t
xfs_btree_ptr_offset(
struct xfs_btree_cur *cur,
int n,
int level)
{
return xfs_btree_block_len(cur) +
cur->bc_ops->get_maxrecs(cur, level) * cur->bc_ops->key_len +
(n - 1) * xfs_btree_ptr_len(cur);
}
/*
* Return a pointer to the n-th record in the btree block.
*/
STATIC union xfs_btree_rec *
xfs_btree_rec_addr(
struct xfs_btree_cur *cur,
int n,
struct xfs_btree_block *block)
{
return (union xfs_btree_rec *)
((char *)block + xfs_btree_rec_offset(cur, n));
}
/*
* Return a pointer to the n-th key in the btree block.
*/
STATIC union xfs_btree_key *
xfs_btree_key_addr(
struct xfs_btree_cur *cur,
int n,
struct xfs_btree_block *block)
{
return (union xfs_btree_key *)
((char *)block + xfs_btree_key_offset(cur, n));
}
/*
* Return a pointer to the n-th block pointer in the btree block.
*/
STATIC union xfs_btree_ptr *
xfs_btree_ptr_addr(
struct xfs_btree_cur *cur,
int n,
struct xfs_btree_block *block)
{
int level = xfs_btree_get_level(block);
ASSERT(block->bb_level != 0);
return (union xfs_btree_ptr *)
((char *)block + xfs_btree_ptr_offset(cur, n, level));
}
/*
* Get a the root block which is stored in the inode.
*
* For now this btree implementation assumes the btree root is always
* stored in the if_broot field of an inode fork.
*/
STATIC struct xfs_btree_block *
xfs_btree_get_iroot(
struct xfs_btree_cur *cur)
{
struct xfs_ifork *ifp;
ifp = XFS_IFORK_PTR(cur->bc_private.b.ip, cur->bc_private.b.whichfork);
return (struct xfs_btree_block *)ifp->if_broot;
}
/*
* Retrieve the block pointer from the cursor at the given level.
* This may be an inode btree root or from a buffer.
*/
STATIC struct xfs_btree_block * /* generic btree block pointer */
xfs_btree_get_block(
struct xfs_btree_cur *cur, /* btree cursor */
int level, /* level in btree */
struct xfs_buf **bpp) /* buffer containing the block */
{
if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) &&
(level == cur->bc_nlevels - 1)) {
*bpp = NULL;
return xfs_btree_get_iroot(cur);
}
*bpp = cur->bc_bufs[level];
return XFS_BUF_TO_BLOCK(*bpp);
}
/*
* Get a buffer for the block, return it with no data read.
* Long-form addressing.
*/
xfs_buf_t * /* buffer for fsbno */
xfs_btree_get_bufl(
xfs_mount_t *mp, /* file system mount point */
xfs_trans_t *tp, /* transaction pointer */
xfs_fsblock_t fsbno, /* file system block number */
uint lock) /* lock flags for get_buf */
{
xfs_buf_t *bp; /* buffer pointer (return value) */
xfs_daddr_t d; /* real disk block address */
ASSERT(fsbno != NULLFSBLOCK);
d = XFS_FSB_TO_DADDR(mp, fsbno);
bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, d, mp->m_bsize, lock);
ASSERT(bp);
ASSERT(!XFS_BUF_GETERROR(bp));
return bp;
}
/*
* Get a buffer for the block, return it with no data read.
* Short-form addressing.
*/
xfs_buf_t * /* buffer for agno/agbno */
xfs_btree_get_bufs(
xfs_mount_t *mp, /* file system mount point */
xfs_trans_t *tp, /* transaction pointer */
xfs_agnumber_t agno, /* allocation group number */
xfs_agblock_t agbno, /* allocation group block number */
uint lock) /* lock flags for get_buf */
{
xfs_buf_t *bp; /* buffer pointer (return value) */
xfs_daddr_t d; /* real disk block address */
ASSERT(agno != NULLAGNUMBER);
ASSERT(agbno != NULLAGBLOCK);
d = XFS_AGB_TO_DADDR(mp, agno, agbno);
bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, d, mp->m_bsize, lock);
ASSERT(bp);
ASSERT(!XFS_BUF_GETERROR(bp));
return bp;
}
/*
* Check for the cursor referring to the last block at the given level.
*/
int /* 1=is last block, 0=not last block */
xfs_btree_islastblock(
xfs_btree_cur_t *cur, /* btree cursor */
int level) /* level to check */
{
struct xfs_btree_block *block; /* generic btree block pointer */
xfs_buf_t *bp; /* buffer containing block */
block = xfs_btree_get_block(cur, level, &bp);
xfs_btree_check_block(cur, block, level, bp);
if (cur->bc_flags & XFS_BTREE_LONG_PTRS)
return be64_to_cpu(block->bb_u.l.bb_rightsib) == NULLDFSBNO;
else
return be32_to_cpu(block->bb_u.s.bb_rightsib) == NULLAGBLOCK;
}
/*
* Change the cursor to point to the first record at the given level.
* Other levels are unaffected.
*/
STATIC int /* success=1, failure=0 */
xfs_btree_firstrec(
xfs_btree_cur_t *cur, /* btree cursor */
int level) /* level to change */
{
struct xfs_btree_block *block; /* generic btree block pointer */
xfs_buf_t *bp; /* buffer containing block */
/*
* Get the block pointer for this level.
*/
block = xfs_btree_get_block(cur, level, &bp);
xfs_btree_check_block(cur, block, level, bp);
/*
* It's empty, there is no such record.
*/
if (!block->bb_numrecs)
return 0;
/*
* Set the ptr value to 1, that's the first record/key.
*/
cur->bc_ptrs[level] = 1;
return 1;
}
/*
* Change the cursor to point to the last record in the current block
* at the given level. Other levels are unaffected.
*/
STATIC int /* success=1, failure=0 */
xfs_btree_lastrec(
xfs_btree_cur_t *cur, /* btree cursor */
int level) /* level to change */
{
struct xfs_btree_block *block; /* generic btree block pointer */
xfs_buf_t *bp; /* buffer containing block */
/*
* Get the block pointer for this level.
*/
block = xfs_btree_get_block(cur, level, &bp);
xfs_btree_check_block(cur, block, level, bp);
/*
* It's empty, there is no such record.
*/
if (!block->bb_numrecs)
return 0;
/*
* Set the ptr value to numrecs, that's the last record/key.
*/
cur->bc_ptrs[level] = be16_to_cpu(block->bb_numrecs);
return 1;
}
/*
* Compute first and last byte offsets for the fields given.
* Interprets the offsets table, which contains struct field offsets.
*/
void
xfs_btree_offsets(
__int64_t fields, /* bitmask of fields */
const short *offsets, /* table of field offsets */
int nbits, /* number of bits to inspect */
int *first, /* output: first byte offset */
int *last) /* output: last byte offset */
{
int i; /* current bit number */
__int64_t imask; /* mask for current bit number */
ASSERT(fields != 0);
/*
* Find the lowest bit, so the first byte offset.
*/
for (i = 0, imask = 1LL; ; i++, imask <<= 1) {
if (imask & fields) {
*first = offsets[i];
break;
}
}
/*
* Find the highest bit, so the last byte offset.
*/
for (i = nbits - 1, imask = 1LL << i; ; i--, imask >>= 1) {
if (imask & fields) {
*last = offsets[i + 1] - 1;
break;
}
}
}
/*
* Get a buffer for the block, return it read in.
* Long-form addressing.
*/
int /* error */
xfs_btree_read_bufl(
xfs_mount_t *mp, /* file system mount point */
xfs_trans_t *tp, /* transaction pointer */
xfs_fsblock_t fsbno, /* file system block number */
uint lock, /* lock flags for read_buf */
xfs_buf_t **bpp, /* buffer for fsbno */
int refval) /* ref count value for buffer */
{
xfs_buf_t *bp; /* return value */
xfs_daddr_t d; /* real disk block address */
int error;
ASSERT(fsbno != NULLFSBLOCK);
d = XFS_FSB_TO_DADDR(mp, fsbno);
if ((error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, d,
mp->m_bsize, lock, &bp))) {
return error;
}
ASSERT(!bp || !XFS_BUF_GETERROR(bp));
if (bp != NULL) {
XFS_BUF_SET_VTYPE_REF(bp, B_FS_MAP, refval);
}
*bpp = bp;
return 0;
}
/*
* Read-ahead the block, don't wait for it, don't return a buffer.
* Long-form addressing.
*/
/* ARGSUSED */
void
xfs_btree_reada_bufl(
xfs_mount_t *mp, /* file system mount point */
xfs_fsblock_t fsbno, /* file system block number */
xfs_extlen_t count) /* count of filesystem blocks */
{
xfs_daddr_t d;
ASSERT(fsbno != NULLFSBLOCK);
d = XFS_FSB_TO_DADDR(mp, fsbno);
xfs_baread(mp->m_ddev_targp, d, mp->m_bsize * count);
}
/*
* Read-ahead the block, don't wait for it, don't return a buffer.
* Short-form addressing.
*/
/* ARGSUSED */
void
xfs_btree_reada_bufs(
xfs_mount_t *mp, /* file system mount point */
xfs_agnumber_t agno, /* allocation group number */
xfs_agblock_t agbno, /* allocation group block number */
xfs_extlen_t count) /* count of filesystem blocks */
{
xfs_daddr_t d;
ASSERT(agno != NULLAGNUMBER);
ASSERT(agbno != NULLAGBLOCK);
d = XFS_AGB_TO_DADDR(mp, agno, agbno);
xfs_baread(mp->m_ddev_targp, d, mp->m_bsize * count);
}
STATIC int
xfs_btree_readahead_lblock(
struct xfs_btree_cur *cur,
int lr,
struct xfs_btree_block *block)
{
int rval = 0;
xfs_dfsbno_t left = be64_to_cpu(block->bb_u.l.bb_leftsib);
xfs_dfsbno_t right = be64_to_cpu(block->bb_u.l.bb_rightsib);
if ((lr & XFS_BTCUR_LEFTRA) && left != NULLDFSBNO) {
xfs_btree_reada_bufl(cur->bc_mp, left, 1);
rval++;
}
if ((lr & XFS_BTCUR_RIGHTRA) && right != NULLDFSBNO) {
xfs_btree_reada_bufl(cur->bc_mp, right, 1);
rval++;
}
return rval;
}
STATIC int
xfs_btree_readahead_sblock(
struct xfs_btree_cur *cur,
int lr,
struct xfs_btree_block *block)
{
int rval = 0;
xfs_agblock_t left = be32_to_cpu(block->bb_u.s.bb_leftsib);
xfs_agblock_t right = be32_to_cpu(block->bb_u.s.bb_rightsib);
if ((lr & XFS_BTCUR_LEFTRA) && left != NULLAGBLOCK) {
xfs_btree_reada_bufs(cur->bc_mp, cur->bc_private.a.agno,
left, 1);
rval++;
}
if ((lr & XFS_BTCUR_RIGHTRA) && right != NULLAGBLOCK) {
xfs_btree_reada_bufs(cur->bc_mp, cur->bc_private.a.agno,
right, 1);
rval++;
}
return rval;
}
/*
* Read-ahead btree blocks, at the given level.
* Bits in lr are set from XFS_BTCUR_{LEFT,RIGHT}RA.
*/
STATIC int
xfs_btree_readahead(
struct xfs_btree_cur *cur, /* btree cursor */
int lev, /* level in btree */
int lr) /* left/right bits */
{
struct xfs_btree_block *block;
/*
* No readahead needed if we are at the root level and the
* btree root is stored in the inode.
*/
if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) &&
(lev == cur->bc_nlevels - 1))
return 0;
if ((cur->bc_ra[lev] | lr) == cur->bc_ra[lev])
return 0;
cur->bc_ra[lev] |= lr;
block = XFS_BUF_TO_BLOCK(cur->bc_bufs[lev]);
if (cur->bc_flags & XFS_BTREE_LONG_PTRS)
return xfs_btree_readahead_lblock(cur, lr, block);
return xfs_btree_readahead_sblock(cur, lr, block);
}
/*
* Set the buffer for level "lev" in the cursor to bp, releasing
* any previous buffer.
*/
void
xfs_btree_setbuf(
xfs_btree_cur_t *cur, /* btree cursor */
int lev, /* level in btree */
xfs_buf_t *bp) /* new buffer to set */
{
struct xfs_btree_block *b; /* btree block */
xfs_buf_t *obp; /* old buffer pointer */
obp = cur->bc_bufs[lev];
if (obp)
xfs_trans_brelse(cur->bc_tp, obp);
cur->bc_bufs[lev] = bp;
cur->bc_ra[lev] = 0;
if (!bp)
return;
b = XFS_BUF_TO_BLOCK(bp);
if (cur->bc_flags & XFS_BTREE_LONG_PTRS) {
if (be64_to_cpu(b->bb_u.l.bb_leftsib) == NULLDFSBNO)
cur->bc_ra[lev] |= XFS_BTCUR_LEFTRA;
if (be64_to_cpu(b->bb_u.l.bb_rightsib) == NULLDFSBNO)
cur->bc_ra[lev] |= XFS_BTCUR_RIGHTRA;
} else {
if (be32_to_cpu(b->bb_u.s.bb_leftsib) == NULLAGBLOCK)
cur->bc_ra[lev] |= XFS_BTCUR_LEFTRA;
if (be32_to_cpu(b->bb_u.s.bb_rightsib) == NULLAGBLOCK)
cur->bc_ra[lev] |= XFS_BTCUR_RIGHTRA;
}
}
STATIC int
xfs_btree_ptr_is_null(
struct xfs_btree_cur *cur,
union xfs_btree_ptr *ptr)
{
if (cur->bc_flags & XFS_BTREE_LONG_PTRS)
return be64_to_cpu(ptr->l) == NULLDFSBNO;
else
return be32_to_cpu(ptr->s) == NULLAGBLOCK;
}
STATIC void
xfs_btree_set_ptr_null(
struct xfs_btree_cur *cur,
union xfs_btree_ptr *ptr)
{
if (cur->bc_flags & XFS_BTREE_LONG_PTRS)
ptr->l = cpu_to_be64(NULLDFSBNO);
else
ptr->s = cpu_to_be32(NULLAGBLOCK);
}
/*
* Get/set/init sibling pointers
*/
STATIC void
xfs_btree_get_sibling(
struct xfs_btree_cur *cur,
struct xfs_btree_block *block,
union xfs_btree_ptr *ptr,
int lr)
{
ASSERT(lr == XFS_BB_LEFTSIB || lr == XFS_BB_RIGHTSIB);
if (cur->bc_flags & XFS_BTREE_LONG_PTRS) {
if (lr == XFS_BB_RIGHTSIB)
ptr->l = block->bb_u.l.bb_rightsib;
else
ptr->l = block->bb_u.l.bb_leftsib;
} else {
if (lr == XFS_BB_RIGHTSIB)
ptr->s = block->bb_u.s.bb_rightsib;
else
ptr->s = block->bb_u.s.bb_leftsib;
}
}
STATIC void
xfs_btree_set_sibling(
struct xfs_btree_cur *cur,
struct xfs_btree_block *block,
union xfs_btree_ptr *ptr,
int lr)
{
ASSERT(lr == XFS_BB_LEFTSIB || lr == XFS_BB_RIGHTSIB);
if (cur->bc_flags & XFS_BTREE_LONG_PTRS) {
if (lr == XFS_BB_RIGHTSIB)
block->bb_u.l.bb_rightsib = ptr->l;
else
block->bb_u.l.bb_leftsib = ptr->l;
} else {
if (lr == XFS_BB_RIGHTSIB)
block->bb_u.s.bb_rightsib = ptr->s;
else
block->bb_u.s.bb_leftsib = ptr->s;
}
}
STATIC void
xfs_btree_init_block(
struct xfs_btree_cur *cur,
int level,
int numrecs,
struct xfs_btree_block *new) /* new block */
{
new->bb_magic = cpu_to_be32(xfs_magics[cur->bc_btnum]);
new->bb_level = cpu_to_be16(level);
new->bb_numrecs = cpu_to_be16(numrecs);
if (cur->bc_flags & XFS_BTREE_LONG_PTRS) {
new->bb_u.l.bb_leftsib = cpu_to_be64(NULLDFSBNO);
new->bb_u.l.bb_rightsib = cpu_to_be64(NULLDFSBNO);
} else {
new->bb_u.s.bb_leftsib = cpu_to_be32(NULLAGBLOCK);
new->bb_u.s.bb_rightsib = cpu_to_be32(NULLAGBLOCK);
}
}
/*
* Return true if ptr is the last record in the btree and
* we need to track updateѕ to this record. The decision
* will be further refined in the update_lastrec method.
*/
STATIC int
xfs_btree_is_lastrec(
struct xfs_btree_cur *cur,
struct xfs_btree_block *block,
int level)
{
union xfs_btree_ptr ptr;
if (level > 0)
return 0;
if (!(cur->bc_flags & XFS_BTREE_LASTREC_UPDATE))
return 0;
xfs_btree_get_sibling(cur, block, &ptr, XFS_BB_RIGHTSIB);
if (!xfs_btree_ptr_is_null(cur, &ptr))
return 0;
return 1;
}
STATIC void
xfs_btree_buf_to_ptr(
struct xfs_btree_cur *cur,
struct xfs_buf *bp,
union xfs_btree_ptr *ptr)
{
if (cur->bc_flags & XFS_BTREE_LONG_PTRS)
ptr->l = cpu_to_be64(XFS_DADDR_TO_FSB(cur->bc_mp,
XFS_BUF_ADDR(bp)));
else {
ptr->s = cpu_to_be32(xfs_daddr_to_agbno(cur->bc_mp,
XFS_BUF_ADDR(bp)));
}
}
STATIC xfs_daddr_t
xfs_btree_ptr_to_daddr(
struct xfs_btree_cur *cur,
union xfs_btree_ptr *ptr)
{
if (cur->bc_flags & XFS_BTREE_LONG_PTRS) {
ASSERT(be64_to_cpu(ptr->l) != NULLDFSBNO);
return XFS_FSB_TO_DADDR(cur->bc_mp, be64_to_cpu(ptr->l));
} else {
ASSERT(cur->bc_private.a.agno != NULLAGNUMBER);
ASSERT(be32_to_cpu(ptr->s) != NULLAGBLOCK);
return XFS_AGB_TO_DADDR(cur->bc_mp, cur->bc_private.a.agno,
be32_to_cpu(ptr->s));
}
}
STATIC void
xfs_btree_set_refs(
struct xfs_btree_cur *cur,
struct xfs_buf *bp)
{
switch (cur->bc_btnum) {
case XFS_BTNUM_BNO:
case XFS_BTNUM_CNT:
XFS_BUF_SET_VTYPE_REF(*bpp, B_FS_MAP, XFS_ALLOC_BTREE_REF);
break;
case XFS_BTNUM_INO:
XFS_BUF_SET_VTYPE_REF(*bpp, B_FS_INOMAP, XFS_INO_BTREE_REF);
break;
case XFS_BTNUM_BMAP:
XFS_BUF_SET_VTYPE_REF(*bpp, B_FS_MAP, XFS_BMAP_BTREE_REF);
break;
default:
ASSERT(0);
}
}
STATIC int
xfs_btree_get_buf_block(
struct xfs_btree_cur *cur,
union xfs_btree_ptr *ptr,
int flags,
struct xfs_btree_block **block,
struct xfs_buf **bpp)
{
struct xfs_mount *mp = cur->bc_mp;
xfs_daddr_t d;
/* need to sort out how callers deal with failures first */
ASSERT(!(flags & XFS_BUF_TRYLOCK));
d = xfs_btree_ptr_to_daddr(cur, ptr);
*bpp = xfs_trans_get_buf(cur->bc_tp, mp->m_ddev_targp, d,
mp->m_bsize, flags);
ASSERT(*bpp);
ASSERT(!XFS_BUF_GETERROR(*bpp));
*block = XFS_BUF_TO_BLOCK(*bpp);
return 0;
}
/*
* Read in the buffer at the given ptr and return the buffer and
* the block pointer within the buffer.
*/
STATIC int
xfs_btree_read_buf_block(
struct xfs_btree_cur *cur,
union xfs_btree_ptr *ptr,
int level,
int flags,
struct xfs_btree_block **block,
struct xfs_buf **bpp)
{
struct xfs_mount *mp = cur->bc_mp;
xfs_daddr_t d;
int error;
/* need to sort out how callers deal with failures first */
ASSERT(!(flags & XFS_BUF_TRYLOCK));
d = xfs_btree_ptr_to_daddr(cur, ptr);
error = xfs_trans_read_buf(mp, cur->bc_tp, mp->m_ddev_targp, d,
mp->m_bsize, flags, bpp);
if (error)
return error;
ASSERT(*bpp != NULL);
ASSERT(!XFS_BUF_GETERROR(*bpp));
xfs_btree_set_refs(cur, *bpp);
*block = XFS_BUF_TO_BLOCK(*bpp);
error = xfs_btree_check_block(cur, *block, level, *bpp);
if (error)
xfs_trans_brelse(cur->bc_tp, *bpp);
return error;
}
/*
* Copy keys from one btree block to another.
*/
STATIC void
xfs_btree_copy_keys(
struct xfs_btree_cur *cur,
union xfs_btree_key *dst_key,
union xfs_btree_key *src_key,
int numkeys)
{
ASSERT(numkeys >= 0);
memcpy(dst_key, src_key, numkeys * cur->bc_ops->key_len);
}
/*
* Copy records from one btree block to another.
*/
STATIC void
xfs_btree_copy_recs(
struct xfs_btree_cur *cur,
union xfs_btree_rec *dst_rec,
union xfs_btree_rec *src_rec,
int numrecs)
{
ASSERT(numrecs >= 0);
memcpy(dst_rec, src_rec, numrecs * cur->bc_ops->rec_len);
}
/*
* Copy block pointers from one btree block to another.
*/
STATIC void
xfs_btree_copy_ptrs(
struct xfs_btree_cur *cur,
union xfs_btree_ptr *dst_ptr,
union xfs_btree_ptr *src_ptr,
int numptrs)
{
ASSERT(numptrs >= 0);
memcpy(dst_ptr, src_ptr, numptrs * xfs_btree_ptr_len(cur));
}
/*
* Shift keys one index left/right inside a single btree block.
*/
STATIC void
xfs_btree_shift_keys(
struct xfs_btree_cur *cur,
union xfs_btree_key *key,
int dir,
int numkeys)
{
char *dst_key;
ASSERT(numkeys >= 0);
ASSERT(dir == 1 || dir == -1);
dst_key = (char *)key + (dir * cur->bc_ops->key_len);
memmove(dst_key, key, numkeys * cur->bc_ops->key_len);
}
/*
* Shift records one index left/right inside a single btree block.
*/
STATIC void
xfs_btree_shift_recs(
struct xfs_btree_cur *cur,
union xfs_btree_rec *rec,
int dir,
int numrecs)
{
char *dst_rec;
ASSERT(numrecs >= 0);
ASSERT(dir == 1 || dir == -1);
dst_rec = (char *)rec + (dir * cur->bc_ops->rec_len);
memmove(dst_rec, rec, numrecs * cur->bc_ops->rec_len);
}
/*
* Shift block pointers one index left/right inside a single btree block.
*/
STATIC void
xfs_btree_shift_ptrs(
struct xfs_btree_cur *cur,
union xfs_btree_ptr *ptr,
int dir,
int numptrs)
{
char *dst_ptr;
ASSERT(numptrs >= 0);
ASSERT(dir == 1 || dir == -1);
dst_ptr = (char *)ptr + (dir * xfs_btree_ptr_len(cur));
memmove(dst_ptr, ptr, numptrs * xfs_btree_ptr_len(cur));
}
/*
* Log key values from the btree block.
*/
STATIC void
xfs_btree_log_keys(
struct xfs_btree_cur *cur,
struct xfs_buf *bp,
int first,
int last)
{
XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
XFS_BTREE_TRACE_ARGBII(cur, bp, first, last);
if (bp) {
xfs_trans_log_buf(cur->bc_tp, bp,
xfs_btree_key_offset(cur, first),
xfs_btree_key_offset(cur, last + 1) - 1);
} else {
xfs_trans_log_inode(cur->bc_tp, cur->bc_private.b.ip,
xfs_ilog_fbroot(cur->bc_private.b.whichfork));
}
XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
}
/*
* Log record values from the btree block.
*/
void
xfs_btree_log_recs(
struct xfs_btree_cur *cur,
struct xfs_buf *bp,
int first,
int last)
{
XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
XFS_BTREE_TRACE_ARGBII(cur, bp, first, last);
xfs_trans_log_buf(cur->bc_tp, bp,
xfs_btree_rec_offset(cur, first),
xfs_btree_rec_offset(cur, last + 1) - 1);
XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
}
/*
* Log block pointer fields from a btree block (nonleaf).
*/
STATIC void
xfs_btree_log_ptrs(
struct xfs_btree_cur *cur, /* btree cursor */
struct xfs_buf *bp, /* buffer containing btree block */
int first, /* index of first pointer to log */
int last) /* index of last pointer to log */
{
XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
XFS_BTREE_TRACE_ARGBII(cur, bp, first, last);
if (bp) {
struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
int level = xfs_btree_get_level(block);
xfs_trans_log_buf(cur->bc_tp, bp,
xfs_btree_ptr_offset(cur, first, level),
xfs_btree_ptr_offset(cur, last + 1, level) - 1);
} else {
xfs_trans_log_inode(cur->bc_tp, cur->bc_private.b.ip,
xfs_ilog_fbroot(cur->bc_private.b.whichfork));
}
XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
}
/*
* Log fields from a btree block header.
*/
void
xfs_btree_log_block(
struct xfs_btree_cur *cur, /* btree cursor */
struct xfs_buf *bp, /* buffer containing btree block */
int fields) /* mask of fields: XFS_BB_... */
{
int first; /* first byte offset logged */
int last; /* last byte offset logged */
static const short soffsets[] = { /* table of offsets (short) */
offsetof(struct xfs_btree_block, bb_magic),
offsetof(struct xfs_btree_block, bb_level),
offsetof(struct xfs_btree_block, bb_numrecs),
offsetof(struct xfs_btree_block, bb_u.s.bb_leftsib),
offsetof(struct xfs_btree_block, bb_u.s.bb_rightsib),
XFS_BTREE_SBLOCK_LEN
};
static const short loffsets[] = { /* table of offsets (long) */
offsetof(struct xfs_btree_block, bb_magic),
offsetof(struct xfs_btree_block, bb_level),
offsetof(struct xfs_btree_block, bb_numrecs),
offsetof(struct xfs_btree_block, bb_u.l.bb_leftsib),
offsetof(struct xfs_btree_block, bb_u.l.bb_rightsib),
XFS_BTREE_LBLOCK_LEN
};
XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
XFS_BTREE_TRACE_ARGBI(cur, bp, fields);
if (bp) {
xfs_btree_offsets(fields,
(cur->bc_flags & XFS_BTREE_LONG_PTRS) ?
loffsets : soffsets,
XFS_BB_NUM_BITS, &first, &last);
xfs_trans_log_buf(cur->bc_tp, bp, first, last);
} else {
xfs_trans_log_inode(cur->bc_tp, cur->bc_private.b.ip,
xfs_ilog_fbroot(cur->bc_private.b.whichfork));
}
XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
}
/*
* Increment cursor by one record at the level.
* For nonzero levels the leaf-ward information is untouched.
*/
int /* error */
xfs_btree_increment(
struct xfs_btree_cur *cur,
int level,
int *stat) /* success/failure */
{
struct xfs_btree_block *block;
union xfs_btree_ptr ptr;
struct xfs_buf *bp;
int error; /* error return value */
int lev;
XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
XFS_BTREE_TRACE_ARGI(cur, level);
ASSERT(level < cur->bc_nlevels);
/* Read-ahead to the right at this level. */
xfs_btree_readahead(cur, level, XFS_BTCUR_RIGHTRA);
/* Get a pointer to the btree block. */
block = xfs_btree_get_block(cur, level, &bp);
#ifdef DEBUG
error = xfs_btree_check_block(cur, block, level, bp);
if (error)
goto error0;
#endif
/* We're done if we remain in the block after the increment. */
if (++cur->bc_ptrs[level] <= xfs_btree_get_numrecs(block))
goto out1;
/* Fail if we just went off the right edge of the tree. */
xfs_btree_get_sibling(cur, block, &ptr, XFS_BB_RIGHTSIB);
if (xfs_btree_ptr_is_null(cur, &ptr))
goto out0;
XFS_BTREE_STATS_INC(cur, increment);
/*
* March up the tree incrementing pointers.
* Stop when we don't go off the right edge of a block.
*/
for (lev = level + 1; lev < cur->bc_nlevels; lev++) {
block = xfs_btree_get_block(cur, lev, &bp);
#ifdef DEBUG
error = xfs_btree_check_block(cur, block, lev, bp);
if (error)
goto error0;
#endif
if (++cur->bc_ptrs[lev] <= xfs_btree_get_numrecs(block))
break;
/* Read-ahead the right block for the next loop. */
xfs_btree_readahead(cur, lev, XFS_BTCUR_RIGHTRA);
}
/*
* If we went off the root then we are either seriously
* confused or have the tree root in an inode.
*/
if (lev == cur->bc_nlevels) {
if (cur->bc_flags & XFS_BTREE_ROOT_IN_INODE)
goto out0;
ASSERT(0);
error = EFSCORRUPTED;
goto error0;
}
ASSERT(lev < cur->bc_nlevels);
/*
* Now walk back down the tree, fixing up the cursor's buffer
* pointers and key numbers.
*/
for (block = xfs_btree_get_block(cur, lev, &bp); lev > level; ) {
union xfs_btree_ptr *ptrp;
ptrp = xfs_btree_ptr_addr(cur, cur->bc_ptrs[lev], block);
error = xfs_btree_read_buf_block(cur, ptrp, --lev,
0, &block, &bp);
if (error)
goto error0;
xfs_btree_setbuf(cur, lev, bp);
cur->bc_ptrs[lev] = 1;
}
out1:
XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
*stat = 1;
return 0;
out0:
XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
*stat = 0;
return 0;
error0:
XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
return error;
}
/*
* Decrement cursor by one record at the level.
* For nonzero levels the leaf-ward information is untouched.
*/
int /* error */
xfs_btree_decrement(
struct xfs_btree_cur *cur,
int level,
int *stat) /* success/failure */
{
struct xfs_btree_block *block;
xfs_buf_t *bp;
int error; /* error return value */
int lev;
union xfs_btree_ptr ptr;
XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
XFS_BTREE_TRACE_ARGI(cur, level);
ASSERT(level < cur->bc_nlevels);
/* Read-ahead to the left at this level. */
xfs_btree_readahead(cur, level, XFS_BTCUR_LEFTRA);
/* We're done if we remain in the block after the decrement. */
if (--cur->bc_ptrs[level] > 0)
goto out1;
/* Get a pointer to the btree block. */
block = xfs_btree_get_block(cur, level, &bp);
#ifdef DEBUG
error = xfs_btree_check_block(cur, block, level, bp);
if (error)
goto error0;
#endif
/* Fail if we just went off the left edge of the tree. */
xfs_btree_get_sibling(cur, block, &ptr, XFS_BB_LEFTSIB);
if (xfs_btree_ptr_is_null(cur, &ptr))
goto out0;
XFS_BTREE_STATS_INC(cur, decrement);
/*
* March up the tree decrementing pointers.
* Stop when we don't go off the left edge of a block.
*/
for (lev = level + 1; lev < cur->bc_nlevels; lev++) {
if (--cur->bc_ptrs[lev] > 0)
break;
/* Read-ahead the left block for the next loop. */
xfs_btree_readahead(cur, lev, XFS_BTCUR_LEFTRA);
}
/*
* If we went off the root then we are seriously confused.
* or the root of the tree is in an inode.
*/
if (lev == cur->bc_nlevels) {
if (cur->bc_flags & XFS_BTREE_ROOT_IN_INODE)
goto out0;
ASSERT(0);
error = EFSCORRUPTED;
goto error0;
}
ASSERT(lev < cur->bc_nlevels);
/*
* Now walk back down the tree, fixing up the cursor's buffer
* pointers and key numbers.
*/
for (block = xfs_btree_get_block(cur, lev, &bp); lev > level; ) {
union xfs_btree_ptr *ptrp;
ptrp = xfs_btree_ptr_addr(cur, cur->bc_ptrs[lev], block);
error = xfs_btree_read_buf_block(cur, ptrp, --lev,
0, &block, &bp);
if (error)
goto error0;
xfs_btree_setbuf(cur, lev, bp);
cur->bc_ptrs[lev] = xfs_btree_get_numrecs(block);
}
out1:
XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
*stat = 1;
return 0;
out0:
XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
*stat = 0;
return 0;
error0:
XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
return error;
}
STATIC int
xfs_btree_lookup_get_block(
struct xfs_btree_cur *cur, /* btree cursor */
int level, /* level in the btree */
union xfs_btree_ptr *pp, /* ptr to btree block */
struct xfs_btree_block **blkp) /* return btree block */
{
struct xfs_buf *bp; /* buffer pointer for btree block */
int error = 0;
/* special case the root block if in an inode */
if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) &&
(level == cur->bc_nlevels - 1)) {
*blkp = xfs_btree_get_iroot(cur);
return 0;
}
/*
* If the old buffer at this level for the disk address we are
* looking for re-use it.
*
* Otherwise throw it away and get a new one.
*/
bp = cur->bc_bufs[level];
if (bp && XFS_BUF_ADDR(bp) == xfs_btree_ptr_to_daddr(cur, pp)) {
*blkp = XFS_BUF_TO_BLOCK(bp);
return 0;
}
error = xfs_btree_read_buf_block(cur, pp, level, 0, blkp, &bp);
if (error)
return error;
xfs_btree_setbuf(cur, level, bp);
return 0;
}
/*
* Get current search key. For level 0 we don't actually have a key
* structure so we make one up from the record. For all other levels
* we just return the right key.
*/
STATIC union xfs_btree_key *
xfs_lookup_get_search_key(
struct xfs_btree_cur *cur,
int level,
int keyno,
struct xfs_btree_block *block,
union xfs_btree_key *kp)
{
if (level == 0) {
cur->bc_ops->init_key_from_rec(kp,
xfs_btree_rec_addr(cur, keyno, block));
return kp;
}
return xfs_btree_key_addr(cur, keyno, block);
}
/*
* Lookup the record. The cursor is made to point to it, based on dir.
* Return 0 if can't find any such record, 1 for success.
*/
int /* error */
xfs_btree_lookup(
struct xfs_btree_cur *cur, /* btree cursor */
xfs_lookup_t dir, /* <=, ==, or >= */
int *stat) /* success/failure */
{
struct xfs_btree_block *block; /* current btree block */
__int64_t diff; /* difference for the current key */
int error; /* error return value */
int keyno; /* current key number */
int level; /* level in the btree */
union xfs_btree_ptr *pp; /* ptr to btree block */
union xfs_btree_ptr ptr; /* ptr to btree block */
XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
XFS_BTREE_TRACE_ARGI(cur, dir);
XFS_BTREE_STATS_INC(cur, lookup);
block = NULL;
keyno = 0;
/* initialise start pointer from cursor */
cur->bc_ops->init_ptr_from_cur(cur, &ptr);
pp = &ptr;
/*
* Iterate over each level in the btree, starting at the root.
* For each level above the leaves, find the key we need, based
* on the lookup record, then follow the corresponding block
* pointer down to the next level.
*/
for (level = cur->bc_nlevels - 1, diff = 1; level >= 0; level--) {
/* Get the block we need to do the lookup on. */
error = xfs_btree_lookup_get_block(cur, level, pp, &block);
if (error)
goto error0;
if (diff == 0) {
/*
* If we already had a key match at a higher level, we
* know we need to use the first entry in this block.
*/
keyno = 1;
} else {
/* Otherwise search this block. Do a binary search. */
int high; /* high entry number */
int low; /* low entry number */
/* Set low and high entry numbers, 1-based. */
low = 1;
high = xfs_btree_get_numrecs(block);
if (!high) {
/* Block is empty, must be an empty leaf. */
ASSERT(level == 0 && cur->bc_nlevels == 1);
cur->bc_ptrs[0] = dir != XFS_LOOKUP_LE;
XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
*stat = 0;
return 0;
}
/* Binary search the block. */
while (low <= high) {
union xfs_btree_key key;
union xfs_btree_key *kp;
XFS_BTREE_STATS_INC(cur, compare);
/* keyno is average of low and high. */
keyno = (low + high) >> 1;
/* Get current search key */
kp = xfs_lookup_get_search_key(cur, level,
keyno, block, &key);
/*
* Compute difference to get next direction:
* - less than, move right
* - greater than, move left
* - equal, we're done
*/
diff = cur->bc_ops->key_diff(cur, kp);
if (diff < 0)
low = keyno + 1;
else if (diff > 0)
high = keyno - 1;
else
break;
}
}
/*
* If there are more levels, set up for the next level
* by getting the block number and filling in the cursor.
*/
if (level > 0) {
/*
* If we moved left, need the previous key number,
* unless there isn't one.
*/
if (diff > 0 && --keyno < 1)
keyno = 1;
pp = xfs_btree_ptr_addr(cur, keyno, block);
#ifdef DEBUG
error = xfs_btree_check_ptr(cur, pp, 0, level);
if (error)
goto error0;
#endif
cur->bc_ptrs[level] = keyno;
}
}
/* Done with the search. See if we need to adjust the results. */
if (dir != XFS_LOOKUP_LE && diff < 0) {
keyno++;
/*
* If ge search and we went off the end of the block, but it's
* not the last block, we're in the wrong block.
*/
xfs_btree_get_sibling(cur, block, &ptr, XFS_BB_RIGHTSIB);
if (dir == XFS_LOOKUP_GE &&
keyno > xfs_btree_get_numrecs(block) &&
!xfs_btree_ptr_is_null(cur, &ptr)) {
int i;
cur->bc_ptrs[0] = keyno;
error = xfs_btree_increment(cur, 0, &i);
if (error)
goto error0;
XFS_WANT_CORRUPTED_RETURN(i == 1);
XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
*stat = 1;
return 0;
}
} else if (dir == XFS_LOOKUP_LE && diff > 0)
keyno--;
cur->bc_ptrs[0] = keyno;
/* Return if we succeeded or not. */
if (keyno == 0 || keyno > xfs_btree_get_numrecs(block))
*stat = 0;
else if (dir != XFS_LOOKUP_EQ || diff == 0)
*stat = 1;
else
*stat = 0;
XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
return 0;
error0:
XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
return error;
}
/*
* Update keys at all levels from here to the root along the cursor's path.
*/
STATIC int
xfs_btree_updkey(
struct xfs_btree_cur *cur,
union xfs_btree_key *keyp,
int level)
{
struct xfs_btree_block *block;
struct xfs_buf *bp;
union xfs_btree_key *kp;
int ptr;
XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
XFS_BTREE_TRACE_ARGIK(cur, level, keyp);
ASSERT(!(cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) || level >= 1);
/*
* Go up the tree from this level toward the root.
* At each level, update the key value to the value input.
* Stop when we reach a level where the cursor isn't pointing
* at the first entry in the block.
*/
for (ptr = 1; ptr == 1 && level < cur->bc_nlevels; level++) {
#ifdef DEBUG
int error;
#endif
block = xfs_btree_get_block(cur, level, &bp);
#ifdef DEBUG
error = xfs_btree_check_block(cur, block, level, bp);
if (error) {
XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
return error;
}
#endif
ptr = cur->bc_ptrs[level];
kp = xfs_btree_key_addr(cur, ptr, block);
xfs_btree_copy_keys(cur, kp, keyp, 1);
xfs_btree_log_keys(cur, bp, ptr, ptr);
}
XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
return 0;
}
/*
* Update the record referred to by cur to the value in the
* given record. This either works (return 0) or gets an
* EFSCORRUPTED error.
*/
int
xfs_btree_update(
struct xfs_btree_cur *cur,
union xfs_btree_rec *rec)
{
struct xfs_btree_block *block;
struct xfs_buf *bp;
int error;
int ptr;
union xfs_btree_rec *rp;
XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
XFS_BTREE_TRACE_ARGR(cur, rec);
/* Pick up the current block. */
block = xfs_btree_get_block(cur, 0, &bp);
#ifdef DEBUG
error = xfs_btree_check_block(cur, block, 0, bp);
if (error)
goto error0;
#endif
/* Get the address of the rec to be updated. */
ptr = cur->bc_ptrs[0];
rp = xfs_btree_rec_addr(cur, ptr, block);
/* Fill in the new contents and log them. */
xfs_btree_copy_recs(cur, rp, rec, 1);
xfs_btree_log_recs(cur, bp, ptr, ptr);
/*
* If we are tracking the last record in the tree and
* we are at the far right edge of the tree, update it.
*/
if (xfs_btree_is_lastrec(cur, block, 0)) {
cur->bc_ops->update_lastrec(cur, block, rec,
ptr, LASTREC_UPDATE);
}
/* Updating first rec in leaf. Pass new key value up to our parent. */
if (ptr == 1) {
union xfs_btree_key key;
cur->bc_ops->init_key_from_rec(&key, rec);
error = xfs_btree_updkey(cur, &key, 1);
if (error)
goto error0;
}
XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
return 0;
error0:
XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
return error;
}
/*
* Move 1 record left from cur/level if possible.
* Update cur to reflect the new path.
*/
STATIC int /* error */
xfs_btree_lshift(
struct xfs_btree_cur *cur,
int level,
int *stat) /* success/failure */
{
union xfs_btree_key key; /* btree key */
struct xfs_buf *lbp; /* left buffer pointer */
struct xfs_btree_block *left; /* left btree block */
int lrecs; /* left record count */
struct xfs_buf *rbp; /* right buffer pointer */
struct xfs_btree_block *right; /* right btree block */
int rrecs; /* right record count */
union xfs_btree_ptr lptr; /* left btree pointer */
union xfs_btree_key *rkp = NULL; /* right btree key */
union xfs_btree_ptr *rpp = NULL; /* right address pointer */
union xfs_btree_rec *rrp = NULL; /* right record pointer */
int error; /* error return value */
XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
XFS_BTREE_TRACE_ARGI(cur, level);
if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) &&
level == cur->bc_nlevels - 1)
goto out0;
/* Set up variables for this block as "right". */
right = xfs_btree_get_block(cur, level, &rbp);
#ifdef DEBUG
error = xfs_btree_check_block(cur, right, level, rbp);
if (error)
goto error0;
#endif
/* If we've got no left sibling then we can't shift an entry left. */
xfs_btree_get_sibling(cur, right, &lptr, XFS_BB_LEFTSIB);
if (xfs_btree_ptr_is_null(cur, &lptr))
goto out0;
/*
* If the cursor entry is the one that would be moved, don't
* do it... it's too complicated.
*/
if (cur->bc_ptrs[level] <= 1)
goto out0;
/* Set up the left neighbor as "left". */
error = xfs_btree_read_buf_block(cur, &lptr, level, 0, &left, &lbp);
if (error)
goto error0;
/* If it's full, it can't take another entry. */
lrecs = xfs_btree_get_numrecs(left);
if (lrecs == cur->bc_ops->get_maxrecs(cur, level))
goto out0;
rrecs = xfs_btree_get_numrecs(right);
/*
* We add one entry to the left side and remove one for the right side.
* Account for it here, the changes will be updated on disk and logged
* later.
*/
lrecs++;
rrecs--;
XFS_BTREE_STATS_INC(cur, lshift);
XFS_BTREE_STATS_ADD(cur, moves, 1);
/*
* If non-leaf, copy a key and a ptr to the left block.
* Log the changes to the left block.
*/
if (level > 0) {
/* It's a non-leaf. Move keys and pointers. */
union xfs_btree_key *lkp; /* left btree key */
union xfs_btree_ptr *lpp; /* left address pointer */
lkp = xfs_btree_key_addr(cur, lrecs, left);
rkp = xfs_btree_key_addr(cur, 1, right);
lpp = xfs_btree_ptr_addr(cur, lrecs, left);
rpp = xfs_btree_ptr_addr(cur, 1, right);
#ifdef DEBUG
error = xfs_btree_check_ptr(cur, rpp, 0, level);
if (error)
goto error0;
#endif
xfs_btree_copy_keys(cur, lkp, rkp, 1);
xfs_btree_copy_ptrs(cur, lpp, rpp, 1);
xfs_btree_log_keys(cur, lbp, lrecs, lrecs);
xfs_btree_log_ptrs(cur, lbp, lrecs, lrecs);
ASSERT(cur->bc_ops->keys_inorder(cur,
xfs_btree_key_addr(cur, lrecs - 1, left), lkp));
} else {
/* It's a leaf. Move records. */
union xfs_btree_rec *lrp; /* left record pointer */
lrp = xfs_btree_rec_addr(cur, lrecs, left);
rrp = xfs_btree_rec_addr(cur, 1, right);
xfs_btree_copy_recs(cur, lrp, rrp, 1);
xfs_btree_log_recs(cur, lbp, lrecs, lrecs);
ASSERT(cur->bc_ops->recs_inorder(cur,
xfs_btree_rec_addr(cur, lrecs - 1, left), lrp));
}
xfs_btree_set_numrecs(left, lrecs);
xfs_btree_log_block(cur, lbp, XFS_BB_NUMRECS);
xfs_btree_set_numrecs(right, rrecs);
xfs_btree_log_block(cur, rbp, XFS_BB_NUMRECS);
/*
* Slide the contents of right down one entry.
*/
XFS_BTREE_STATS_ADD(cur, moves, rrecs - 1);
if (level > 0) {
/* It's a nonleaf. operate on keys and ptrs */
#ifdef DEBUG
int i; /* loop index */
for (i = 0; i < rrecs; i++) {
error = xfs_btree_check_ptr(cur, rpp, i + 1, level);
if (error)
goto error0;
}
#endif
xfs_btree_shift_keys(cur,
xfs_btree_key_addr(cur, 2, right),
-1, rrecs);
xfs_btree_shift_ptrs(cur,
xfs_btree_ptr_addr(cur, 2, right),
-1, rrecs);
xfs_btree_log_keys(cur, rbp, 1, rrecs);
xfs_btree_log_ptrs(cur, rbp, 1, rrecs);
} else {
/* It's a leaf. operate on records */
xfs_btree_shift_recs(cur,
xfs_btree_rec_addr(cur, 2, right),
-1, rrecs);
xfs_btree_log_recs(cur, rbp, 1, rrecs);
/*
* If it's the first record in the block, we'll need a key
* structure to pass up to the next level (updkey).
*/
cur->bc_ops->init_key_from_rec(&key,
xfs_btree_rec_addr(cur, 1, right));
rkp = &key;
}
/* Update the parent key values of right. */
error = xfs_btree_updkey(cur, rkp, level + 1);
if (error)
goto error0;
/* Slide the cursor value left one. */
cur->bc_ptrs[level]--;
XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
*stat = 1;
return 0;
out0:
XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
*stat = 0;
return 0;
error0:
XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
return error;
}
/*
* Move 1 record right from cur/level if possible.
* Update cur to reflect the new path.
*/
STATIC int /* error */
xfs_btree_rshift(
struct xfs_btree_cur *cur,
int level,
int *stat) /* success/failure */
{
union xfs_btree_key key; /* btree key */
struct xfs_buf *lbp; /* left buffer pointer */
struct xfs_btree_block *left; /* left btree block */
struct xfs_buf *rbp; /* right buffer pointer */
struct xfs_btree_block *right; /* right btree block */
struct xfs_btree_cur *tcur; /* temporary btree cursor */
union xfs_btree_ptr rptr; /* right block pointer */
union xfs_btree_key *rkp; /* right btree key */
int rrecs; /* right record count */
int lrecs; /* left record count */
int error; /* error return value */
int i; /* loop counter */
XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
XFS_BTREE_TRACE_ARGI(cur, level);
if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) &&
(level == cur->bc_nlevels - 1))
goto out0;
/* Set up variables for this block as "left". */
left = xfs_btree_get_block(cur, level, &lbp);
#ifdef DEBUG
error = xfs_btree_check_block(cur, left, level, lbp);
if (error)
goto error0;
#endif
/* If we've got no right sibling then we can't shift an entry right. */
xfs_btree_get_sibling(cur, left, &rptr, XFS_BB_RIGHTSIB);
if (xfs_btree_ptr_is_null(cur, &rptr))
goto out0;
/*
* If the cursor entry is the one that would be moved, don't
* do it... it's too complicated.
*/
lrecs = xfs_btree_get_numrecs(left);
if (cur->bc_ptrs[level] >= lrecs)
goto out0;
/* Set up the right neighbor as "right". */
error = xfs_btree_read_buf_block(cur, &rptr, level, 0, &right, &rbp);
if (error)
goto error0;
/* If it's full, it can't take another entry. */
rrecs = xfs_btree_get_numrecs(right);
if (rrecs == cur->bc_ops->get_maxrecs(cur, level))
goto out0;
XFS_BTREE_STATS_INC(cur, rshift);
XFS_BTREE_STATS_ADD(cur, moves, rrecs);
/*
* Make a hole at the start of the right neighbor block, then
* copy the last left block entry to the hole.
*/
if (level > 0) {
/* It's a nonleaf. make a hole in the keys and ptrs */
union xfs_btree_key *lkp;
union xfs_btree_ptr *lpp;
union xfs_btree_ptr *rpp;
lkp = xfs_btree_key_addr(cur, lrecs, left);
lpp = xfs_btree_ptr_addr(cur, lrecs, left);
rkp = xfs_btree_key_addr(cur, 1, right);
rpp = xfs_btree_ptr_addr(cur, 1, right);
#ifdef DEBUG
for (i = rrecs - 1; i >= 0; i--) {
error = xfs_btree_check_ptr(cur, rpp, i, level);
if (error)
goto error0;
}
#endif
xfs_btree_shift_keys(cur, rkp, 1, rrecs);
xfs_btree_shift_ptrs(cur, rpp, 1, rrecs);
#ifdef DEBUG
error = xfs_btree_check_ptr(cur, lpp, 0, level);
if (error)
goto error0;
#endif
/* Now put the new data in, and log it. */
xfs_btree_copy_keys(cur, rkp, lkp, 1);
xfs_btree_copy_ptrs(cur, rpp, lpp, 1);
xfs_btree_log_keys(cur, rbp, 1, rrecs + 1);
xfs_btree_log_ptrs(cur, rbp, 1, rrecs + 1);
ASSERT(cur->bc_ops->keys_inorder(cur, rkp,
xfs_btree_key_addr(cur, 2, right)));
} else {
/* It's a leaf. make a hole in the records */
union xfs_btree_rec *lrp;
union xfs_btree_rec *rrp;
lrp = xfs_btree_rec_addr(cur, lrecs, left);
rrp = xfs_btree_rec_addr(cur, 1, right);
xfs_btree_shift_recs(cur, rrp, 1, rrecs);
/* Now put the new data in, and log it. */
xfs_btree_copy_recs(cur, rrp, lrp, 1);
xfs_btree_log_recs(cur, rbp, 1, rrecs + 1);
cur->bc_ops->init_key_from_rec(&key, rrp);
rkp = &key;
ASSERT(cur->bc_ops->recs_inorder(cur, rrp,
xfs_btree_rec_addr(cur, 2, right)));
}
/*
* Decrement and log left's numrecs, bump and log right's numrecs.
*/
xfs_btree_set_numrecs(left, --lrecs);
xfs_btree_log_block(cur, lbp, XFS_BB_NUMRECS);
xfs_btree_set_numrecs(right, ++rrecs);
xfs_btree_log_block(cur, rbp, XFS_BB_NUMRECS);
/*
* Using a temporary cursor, update the parent key values of the
* block on the right.
*/
error = xfs_btree_dup_cursor(cur, &tcur);
if (error)
goto error0;
i = xfs_btree_lastrec(tcur, level);
XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
error = xfs_btree_increment(tcur, level, &i);
if (error)
goto error1;
error = xfs_btree_updkey(tcur, rkp, level + 1);
if (error)
goto error1;
xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
*stat = 1;
return 0;
out0:
XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
*stat = 0;
return 0;
error0:
XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
return error;
error1:
XFS_BTREE_TRACE_CURSOR(tcur, XBT_ERROR);
xfs_btree_del_cursor(tcur, XFS_BTREE_ERROR);
return error;
}
/*
* Split cur/level block in half.
* Return new block number and the key to its first
* record (to be inserted into parent).
*/
STATIC int /* error */
xfs_btree_split(
struct xfs_btree_cur *cur,
int level,
union xfs_btree_ptr *ptrp,
union xfs_btree_key *key,
struct xfs_btree_cur **curp,
int *stat) /* success/failure */
{
union xfs_btree_ptr lptr; /* left sibling block ptr */
struct xfs_buf *lbp; /* left buffer pointer */
struct xfs_btree_block *left; /* left btree block */
union xfs_btree_ptr rptr; /* right sibling block ptr */
struct xfs_buf *rbp; /* right buffer pointer */
struct xfs_btree_block *right; /* right btree block */
union xfs_btree_ptr rrptr; /* right-right sibling ptr */
struct xfs_buf *rrbp; /* right-right buffer pointer */
struct xfs_btree_block *rrblock; /* right-right btree block */
int lrecs;
int rrecs;
int src_index;
int error; /* error return value */
#ifdef DEBUG
int i;
#endif
XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
XFS_BTREE_TRACE_ARGIPK(cur, level, *ptrp, key);
XFS_BTREE_STATS_INC(cur, split);
/* Set up left block (current one). */
left = xfs_btree_get_block(cur, level, &lbp);
#ifdef DEBUG
error = xfs_btree_check_block(cur, left, level, lbp);
if (error)
goto error0;
#endif
xfs_btree_buf_to_ptr(cur, lbp, &lptr);
/* Allocate the new block. If we can't do it, we're toast. Give up. */
error = cur->bc_ops->alloc_block(cur, &lptr, &rptr, 1, stat);
if (error)
goto error0;
if (*stat == 0)
goto out0;
XFS_BTREE_STATS_INC(cur, alloc);
/* Set up the new block as "right". */
error = xfs_btree_get_buf_block(cur, &rptr, 0, &right, &rbp);
if (error)
goto error0;
/* Fill in the btree header for the new right block. */
xfs_btree_init_block(cur, xfs_btree_get_level(left), 0, right);
/*
* Split the entries between the old and the new block evenly.
* Make sure that if there's an odd number of entries now, that
* each new block will have the same number of entries.
*/
lrecs = xfs_btree_get_numrecs(left);
rrecs = lrecs / 2;
if ((lrecs & 1) && cur->bc_ptrs[level] <= rrecs + 1)
rrecs++;
src_index = (lrecs - rrecs + 1);
XFS_BTREE_STATS_ADD(cur, moves, rrecs);
/*
* Copy btree block entries from the left block over to the
* new block, the right. Update the right block and log the
* changes.
*/
if (level > 0) {
/* It's a non-leaf. Move keys and pointers. */
union xfs_btree_key *lkp; /* left btree key */
union xfs_btree_ptr *lpp; /* left address pointer */
union xfs_btree_key *rkp; /* right btree key */
union xfs_btree_ptr *rpp; /* right address pointer */
lkp = xfs_btree_key_addr(cur, src_index, left);
lpp = xfs_btree_ptr_addr(cur, src_index, left);
rkp = xfs_btree_key_addr(cur, 1, right);
rpp = xfs_btree_ptr_addr(cur, 1, right);
#ifdef DEBUG
for (i = src_index; i < rrecs; i++) {
error = xfs_btree_check_ptr(cur, lpp, i, level);
if (error)
goto error0;
}
#endif
xfs_btree_copy_keys(cur, rkp, lkp, rrecs);
xfs_btree_copy_ptrs(cur, rpp, lpp, rrecs);
xfs_btree_log_keys(cur, rbp, 1, rrecs);
xfs_btree_log_ptrs(cur, rbp, 1, rrecs);
/* Grab the keys to the entries moved to the right block */
xfs_btree_copy_keys(cur, key, rkp, 1);
} else {
/* It's a leaf. Move records. */
union xfs_btree_rec *lrp; /* left record pointer */
union xfs_btree_rec *rrp; /* right record pointer */
lrp = xfs_btree_rec_addr(cur, src_index, left);
rrp = xfs_btree_rec_addr(cur, 1, right);
xfs_btree_copy_recs(cur, rrp, lrp, rrecs);
xfs_btree_log_recs(cur, rbp, 1, rrecs);
cur->bc_ops->init_key_from_rec(key,
xfs_btree_rec_addr(cur, 1, right));
}
/*
* Find the left block number by looking in the buffer.
* Adjust numrecs, sibling pointers.
*/
xfs_btree_get_sibling(cur, left, &rrptr, XFS_BB_RIGHTSIB);
xfs_btree_set_sibling(cur, right, &rrptr, XFS_BB_RIGHTSIB);
xfs_btree_set_sibling(cur, right, &lptr, XFS_BB_LEFTSIB);
xfs_btree_set_sibling(cur, left, &rptr, XFS_BB_RIGHTSIB);
lrecs -= rrecs;
xfs_btree_set_numrecs(left, lrecs);
xfs_btree_set_numrecs(right, xfs_btree_get_numrecs(right) + rrecs);
xfs_btree_log_block(cur, rbp, XFS_BB_ALL_BITS);
xfs_btree_log_block(cur, lbp, XFS_BB_NUMRECS | XFS_BB_RIGHTSIB);
/*
* If there's a block to the new block's right, make that block
* point back to right instead of to left.
*/
if (!xfs_btree_ptr_is_null(cur, &rrptr)) {
error = xfs_btree_read_buf_block(cur, &rrptr, level,
0, &rrblock, &rrbp);
if (error)
goto error0;
xfs_btree_set_sibling(cur, rrblock, &rptr, XFS_BB_LEFTSIB);
xfs_btree_log_block(cur, rrbp, XFS_BB_LEFTSIB);
}
/*
* If the cursor is really in the right block, move it there.
* If it's just pointing past the last entry in left, then we'll
* insert there, so don't change anything in that case.
*/
if (cur->bc_ptrs[level] > lrecs + 1) {
xfs_btree_setbuf(cur, level, rbp);
cur->bc_ptrs[level] -= lrecs;
}
/*
* If there are more levels, we'll need another cursor which refers
* the right block, no matter where this cursor was.
*/
if (level + 1 < cur->bc_nlevels) {
error = xfs_btree_dup_cursor(cur, curp);
if (error)
goto error0;
(*curp)->bc_ptrs[level + 1]++;
}
*ptrp = rptr;
XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
*stat = 1;
return 0;
out0:
XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
*stat = 0;
return 0;
error0:
XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
return error;
}
/*
* Copy the old inode root contents into a real block and make the
* broot point to it.
*/
int /* error */
xfs_btree_new_iroot(
struct xfs_btree_cur *cur, /* btree cursor */
int *logflags, /* logging flags for inode */
int *stat) /* return status - 0 fail */
{
struct xfs_buf *cbp; /* buffer for cblock */
struct xfs_btree_block *block; /* btree block */
struct xfs_btree_block *cblock; /* child btree block */
union xfs_btree_key *ckp; /* child key pointer */
union xfs_btree_ptr *cpp; /* child ptr pointer */
union xfs_btree_key *kp; /* pointer to btree key */
union xfs_btree_ptr *pp; /* pointer to block addr */
union xfs_btree_ptr nptr; /* new block addr */
int level; /* btree level */
int error; /* error return code */
#ifdef DEBUG
int i; /* loop counter */
#endif
XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
XFS_BTREE_STATS_INC(cur, newroot);
ASSERT(cur->bc_flags & XFS_BTREE_ROOT_IN_INODE);
level = cur->bc_nlevels - 1;
block = xfs_btree_get_iroot(cur);
pp = xfs_btree_ptr_addr(cur, 1, block);
/* Allocate the new block. If we can't do it, we're toast. Give up. */
error = cur->bc_ops->alloc_block(cur, pp, &nptr, 1, stat);
if (error)
goto error0;
if (*stat == 0) {
XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
return 0;
}
XFS_BTREE_STATS_INC(cur, alloc);
/* Copy the root into a real block. */
error = xfs_btree_get_buf_block(cur, &nptr, 0, &cblock, &cbp);
if (error)
goto error0;
memcpy(cblock, block, xfs_btree_block_len(cur));
be16_add_cpu(&block->bb_level, 1);
xfs_btree_set_numrecs(block, 1);
cur->bc_nlevels++;
cur->bc_ptrs[level + 1] = 1;
kp = xfs_btree_key_addr(cur, 1, block);
ckp = xfs_btree_key_addr(cur, 1, cblock);
xfs_btree_copy_keys(cur, ckp, kp, xfs_btree_get_numrecs(cblock));
cpp = xfs_btree_ptr_addr(cur, 1, cblock);
#ifdef DEBUG
for (i = 0; i < be16_to_cpu(cblock->bb_numrecs); i++) {
error = xfs_btree_check_ptr(cur, pp, i, level);
if (error)
goto error0;
}
#endif
xfs_btree_copy_ptrs(cur, cpp, pp, xfs_btree_get_numrecs(cblock));
#ifdef DEBUG
error = xfs_btree_check_ptr(cur, &nptr, 0, level);
if (error)
goto error0;
#endif
xfs_btree_copy_ptrs(cur, pp, &nptr, 1);
xfs_iroot_realloc(cur->bc_private.b.ip,
1 - xfs_btree_get_numrecs(cblock),
cur->bc_private.b.whichfork);
xfs_btree_setbuf(cur, level, cbp);
/*
* Do all this logging at the end so that
* the root is at the right level.
*/
xfs_btree_log_block(cur, cbp, XFS_BB_ALL_BITS);
xfs_btree_log_keys(cur, cbp, 1, be16_to_cpu(cblock->bb_numrecs));
xfs_btree_log_ptrs(cur, cbp, 1, be16_to_cpu(cblock->bb_numrecs));
*logflags |=
XFS_ILOG_CORE | xfs_ilog_fbroot(cur->bc_private.b.whichfork);
*stat = 1;
XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
return 0;
error0:
XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
return error;
}
/*
* Allocate a new root block, fill it in.
*/
STATIC int /* error */
xfs_btree_new_root(
struct xfs_btree_cur *cur, /* btree cursor */
int *stat) /* success/failure */
{
struct xfs_btree_block *block; /* one half of the old root block */
struct xfs_buf *bp; /* buffer containing block */
int error; /* error return value */
struct xfs_buf *lbp; /* left buffer pointer */
struct xfs_btree_block *left; /* left btree block */
struct xfs_buf *nbp; /* new (root) buffer */
struct xfs_btree_block *new; /* new (root) btree block */
int nptr; /* new value for key index, 1 or 2 */
struct xfs_buf *rbp; /* right buffer pointer */
struct xfs_btree_block *right; /* right btree block */
union xfs_btree_ptr rptr;
union xfs_btree_ptr lptr;
XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
XFS_BTREE_STATS_INC(cur, newroot);
/* initialise our start point from the cursor */
cur->bc_ops->init_ptr_from_cur(cur, &rptr);
/* Allocate the new block. If we can't do it, we're toast. Give up. */
error = cur->bc_ops->alloc_block(cur, &rptr, &lptr, 1, stat);
if (error)
goto error0;
if (*stat == 0)
goto out0;
XFS_BTREE_STATS_INC(cur, alloc);
/* Set up the new block. */
error = xfs_btree_get_buf_block(cur, &lptr, 0, &new, &nbp);
if (error)
goto error0;
/* Set the root in the holding structure increasing the level by 1. */
cur->bc_ops->set_root(cur, &lptr, 1);
/*
* At the previous root level there are now two blocks: the old root,
* and the new block generated when it was split. We don't know which
* one the cursor is pointing at, so we set up variables "left" and
* "right" for each case.
*/
block = xfs_btree_get_block(cur, cur->bc_nlevels - 1, &bp);
#ifdef DEBUG
error = xfs_btree_check_block(cur, block, cur->bc_nlevels - 1, bp);
if (error)
goto error0;
#endif
xfs_btree_get_sibling(cur, block, &rptr, XFS_BB_RIGHTSIB);
if (!xfs_btree_ptr_is_null(cur, &rptr)) {
/* Our block is left, pick up the right block. */
lbp = bp;
xfs_btree_buf_to_ptr(cur, lbp, &lptr);
left = block;
error = xfs_btree_read_buf_block(cur, &rptr,
cur->bc_nlevels - 1, 0, &right, &rbp);
if (error)
goto error0;
bp = rbp;
nptr = 1;
} else {
/* Our block is right, pick up the left block. */
rbp = bp;
xfs_btree_buf_to_ptr(cur, rbp, &rptr);
right = block;
xfs_btree_get_sibling(cur, right, &lptr, XFS_BB_LEFTSIB);
error = xfs_btree_read_buf_block(cur, &lptr,
cur->bc_nlevels - 1, 0, &left, &lbp);
if (error)
goto error0;
bp = lbp;
nptr = 2;
}
/* Fill in the new block's btree header and log it. */
xfs_btree_init_block(cur, cur->bc_nlevels, 2, new);
xfs_btree_log_block(cur, nbp, XFS_BB_ALL_BITS);
ASSERT(!xfs_btree_ptr_is_null(cur, &lptr) &&
!xfs_btree_ptr_is_null(cur, &rptr));
/* Fill in the key data in the new root. */
if (xfs_btree_get_level(left) > 0) {
xfs_btree_copy_keys(cur,
xfs_btree_key_addr(cur, 1, new),
xfs_btree_key_addr(cur, 1, left), 1);
xfs_btree_copy_keys(cur,
xfs_btree_key_addr(cur, 2, new),
xfs_btree_key_addr(cur, 1, right), 1);
} else {
cur->bc_ops->init_key_from_rec(
xfs_btree_key_addr(cur, 1, new),
xfs_btree_rec_addr(cur, 1, left));
cur->bc_ops->init_key_from_rec(
xfs_btree_key_addr(cur, 2, new),
xfs_btree_rec_addr(cur, 1, right));
}
xfs_btree_log_keys(cur, nbp, 1, 2);
/* Fill in the pointer data in the new root. */
xfs_btree_copy_ptrs(cur,
xfs_btree_ptr_addr(cur, 1, new), &lptr, 1);
xfs_btree_copy_ptrs(cur,
xfs_btree_ptr_addr(cur, 2, new), &rptr, 1);
xfs_btree_log_ptrs(cur, nbp, 1, 2);
/* Fix up the cursor. */
xfs_btree_setbuf(cur, cur->bc_nlevels, nbp);
cur->bc_ptrs[cur->bc_nlevels] = nptr;
cur->bc_nlevels++;
XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
*stat = 1;
return 0;
error0:
XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
return error;
out0:
XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
*stat = 0;
return 0;
}
STATIC int
xfs_btree_make_block_unfull(
struct xfs_btree_cur *cur, /* btree cursor */
int level, /* btree level */
int numrecs,/* # of recs in block */
int *oindex,/* old tree index */
int *index, /* new tree index */
union xfs_btree_ptr *nptr, /* new btree ptr */
struct xfs_btree_cur **ncur, /* new btree cursor */
union xfs_btree_rec *nrec, /* new record */
int *stat)
{
union xfs_btree_key key; /* new btree key value */
int error = 0;
if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) &&
level == cur->bc_nlevels - 1) {
struct xfs_inode *ip = cur->bc_private.b.ip;
if (numrecs < cur->bc_ops->get_dmaxrecs(cur, level)) {
/* A root block that can be made bigger. */
xfs_iroot_realloc(ip, 1, cur->bc_private.b.whichfork);
} else {
/* A root block that needs replacing */
int logflags = 0;
error = xfs_btree_new_iroot(cur, &logflags, stat);
if (error || *stat == 0)
return error;
xfs_trans_log_inode(cur->bc_tp, ip, logflags);
}
return 0;
}
/* First, try shifting an entry to the right neighbor. */
error = xfs_btree_rshift(cur, level, stat);
if (error || *stat)
return error;
/* Next, try shifting an entry to the left neighbor. */
error = xfs_btree_lshift(cur, level, stat);
if (error)
return error;
if (*stat) {
*oindex = *index = cur->bc_ptrs[level];
return 0;
}
/*
* Next, try splitting the current block in half.
*
* If this works we have to re-set our variables because we
* could be in a different block now.
*/
error = xfs_btree_split(cur, level, nptr, &key, ncur, stat);
if (error || *stat == 0)
return error;
*index = cur->bc_ptrs[level];
cur->bc_ops->init_rec_from_key(&key, nrec);
return 0;
}
/*
* Insert one record/level. Return information to the caller
* allowing the next level up to proceed if necessary.
*/
STATIC int
xfs_btree_insrec(
struct xfs_btree_cur *cur, /* btree cursor */
int level, /* level to insert record at */
union xfs_btree_ptr *ptrp, /* i/o: block number inserted */
union xfs_btree_rec *recp, /* i/o: record data inserted */
struct xfs_btree_cur **curp, /* output: new cursor replacing cur */
int *stat) /* success/failure */
{
struct xfs_btree_block *block; /* btree block */
struct xfs_buf *bp; /* buffer for block */
union xfs_btree_key key; /* btree key */
union xfs_btree_ptr nptr; /* new block ptr */
struct xfs_btree_cur *ncur; /* new btree cursor */
union xfs_btree_rec nrec; /* new record count */
int optr; /* old key/record index */
int ptr; /* key/record index */
int numrecs;/* number of records */
int error; /* error return value */
#ifdef DEBUG
int i;
#endif
XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
XFS_BTREE_TRACE_ARGIPR(cur, level, *ptrp, recp);
ncur = NULL;
/*
* If we have an external root pointer, and we've made it to the
* root level, allocate a new root block and we're done.
*/
if (!(cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) &&
(level >= cur->bc_nlevels)) {
error = xfs_btree_new_root(cur, stat);
xfs_btree_set_ptr_null(cur, ptrp);
XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
return error;
}
/* If we're off the left edge, return failure. */
ptr = cur->bc_ptrs[level];
if (ptr == 0) {
XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
*stat = 0;
return 0;
}
/* Make a key out of the record data to be inserted, and save it. */
cur->bc_ops->init_key_from_rec(&key, recp);
optr = ptr;
XFS_BTREE_STATS_INC(cur, insrec);
/* Get pointers to the btree buffer and block. */
block = xfs_btree_get_block(cur, level, &bp);
numrecs = xfs_btree_get_numrecs(block);
#ifdef DEBUG
error = xfs_btree_check_block(cur, block, level, bp);
if (error)
goto error0;
/* Check that the new entry is being inserted in the right place. */
if (ptr <= numrecs) {
if (level == 0) {
ASSERT(cur->bc_ops->recs_inorder(cur, recp,
xfs_btree_rec_addr(cur, ptr, block)));
} else {
ASSERT(cur->bc_ops->keys_inorder(cur, &key,
xfs_btree_key_addr(cur, ptr, block)));
}
}
#endif
/*
* If the block is full, we can't insert the new entry until we
* make the block un-full.
*/
xfs_btree_set_ptr_null(cur, &nptr);
if (numrecs == cur->bc_ops->get_maxrecs(cur, level)) {
error = xfs_btree_make_block_unfull(cur, level, numrecs,
&optr, &ptr, &nptr, &ncur, &nrec, stat);
if (error || *stat == 0)
goto error0;
}
/*
* The current block may have changed if the block was
* previously full and we have just made space in it.
*/
block = xfs_btree_get_block(cur, level, &bp);
numrecs = xfs_btree_get_numrecs(block);
#ifdef DEBUG
error = xfs_btree_check_block(cur, block, level, bp);
if (error)
return error;
#endif
/*
* At this point we know there's room for our new entry in the block
* we're pointing at.
*/
XFS_BTREE_STATS_ADD(cur, moves, numrecs - ptr + 1);
if (level > 0) {
/* It's a nonleaf. make a hole in the keys and ptrs */
union xfs_btree_key *kp;
union xfs_btree_ptr *pp;
kp = xfs_btree_key_addr(cur, ptr, block);
pp = xfs_btree_ptr_addr(cur, ptr, block);
#ifdef DEBUG
for (i = numrecs - ptr; i >= 0; i--) {
error = xfs_btree_check_ptr(cur, pp, i, level);
if (error)
return error;
}
#endif
xfs_btree_shift_keys(cur, kp, 1, numrecs - ptr + 1);
xfs_btree_shift_ptrs(cur, pp, 1, numrecs - ptr + 1);
#ifdef DEBUG
error = xfs_btree_check_ptr(cur, ptrp, 0, level);
if (error)
goto error0;
#endif
/* Now put the new data in, bump numrecs and log it. */
xfs_btree_copy_keys(cur, kp, &key, 1);
xfs_btree_copy_ptrs(cur, pp, ptrp, 1);
numrecs++;
xfs_btree_set_numrecs(block, numrecs);
xfs_btree_log_ptrs(cur, bp, ptr, numrecs);
xfs_btree_log_keys(cur, bp, ptr, numrecs);
#ifdef DEBUG
if (ptr < numrecs) {
ASSERT(cur->bc_ops->keys_inorder(cur, kp,
xfs_btree_key_addr(cur, ptr + 1, block)));
}
#endif
} else {
/* It's a leaf. make a hole in the records */
union xfs_btree_rec *rp;
rp = xfs_btree_rec_addr(cur, ptr, block);
xfs_btree_shift_recs(cur, rp, 1, numrecs - ptr + 1);
/* Now put the new data in, bump numrecs and log it. */
xfs_btree_copy_recs(cur, rp, recp, 1);
xfs_btree_set_numrecs(block, ++numrecs);
xfs_btree_log_recs(cur, bp, ptr, numrecs);
#ifdef DEBUG
if (ptr < numrecs) {
ASSERT(cur->bc_ops->recs_inorder(cur, rp,
xfs_btree_rec_addr(cur, ptr + 1, block)));
}
#endif
}
/* Log the new number of records in the btree header. */
xfs_btree_log_block(cur, bp, XFS_BB_NUMRECS);
/* If we inserted at the start of a block, update the parents' keys. */
if (optr == 1) {
error = xfs_btree_updkey(cur, &key, level + 1);
if (error)
goto error0;
}
/*
* If we are tracking the last record in the tree and
* we are at the far right edge of the tree, update it.
*/
if (xfs_btree_is_lastrec(cur, block, level)) {
cur->bc_ops->update_lastrec(cur, block, recp,
ptr, LASTREC_INSREC);
}
/*
* Return the new block number, if any.
* If there is one, give back a record value and a cursor too.
*/
*ptrp = nptr;
if (!xfs_btree_ptr_is_null(cur, &nptr)) {
*recp = nrec;
*curp = ncur;
}
XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
*stat = 1;
return 0;
error0:
XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
return error;
}
/*
* Insert the record at the point referenced by cur.
*
* A multi-level split of the tree on insert will invalidate the original
* cursor. All callers of this function should assume that the cursor is
* no longer valid and revalidate it.
*/
int
xfs_btree_insert(
struct xfs_btree_cur *cur,
int *stat)
{
int error; /* error return value */
int i; /* result value, 0 for failure */
int level; /* current level number in btree */
union xfs_btree_ptr nptr; /* new block number (split result) */
struct xfs_btree_cur *ncur; /* new cursor (split result) */
struct xfs_btree_cur *pcur; /* previous level's cursor */
union xfs_btree_rec rec; /* record to insert */
level = 0;
ncur = NULL;
pcur = cur;
xfs_btree_set_ptr_null(cur, &nptr);
cur->bc_ops->init_rec_from_cur(cur, &rec);
/*
* Loop going up the tree, starting at the leaf level.
* Stop when we don't get a split block, that must mean that
* the insert is finished with this level.
*/
do {
/*
* Insert nrec/nptr into this level of the tree.
* Note if we fail, nptr will be null.
*/
error = xfs_btree_insrec(pcur, level, &nptr, &rec, &ncur, &i);
if (error) {
if (pcur != cur)
xfs_btree_del_cursor(pcur, XFS_BTREE_ERROR);
goto error0;
}
XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
level++;
/*
* See if the cursor we just used is trash.
* Can't trash the caller's cursor, but otherwise we should
* if ncur is a new cursor or we're about to be done.
*/
if (pcur != cur &&
(ncur || xfs_btree_ptr_is_null(cur, &nptr))) {
/* Save the state from the cursor before we trash it */
if (cur->bc_ops->update_cursor)
cur->bc_ops->update_cursor(pcur, cur);
cur->bc_nlevels = pcur->bc_nlevels;
xfs_btree_del_cursor(pcur, XFS_BTREE_NOERROR);
}
/* If we got a new cursor, switch to it. */
if (ncur) {
pcur = ncur;
ncur = NULL;
}
} while (!xfs_btree_ptr_is_null(cur, &nptr));
XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
*stat = i;
return 0;
error0:
XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
return error;
}
/*
* Try to merge a non-leaf block back into the inode root.
*
* Note: the killroot names comes from the fact that we're effectively
* killing the old root block. But because we can't just delete the
* inode we have to copy the single block it was pointing to into the
* inode.
*/
STATIC int
xfs_btree_kill_iroot(
struct xfs_btree_cur *cur)
{
int whichfork = cur->bc_private.b.whichfork;
struct xfs_inode *ip = cur->bc_private.b.ip;
struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
struct xfs_btree_block *block;
struct xfs_btree_block *cblock;
union xfs_btree_key *kp;
union xfs_btree_key *ckp;
union xfs_btree_ptr *pp;
union xfs_btree_ptr *cpp;
struct xfs_buf *cbp;
int level;
int index;
int numrecs;
#ifdef DEBUG
union xfs_btree_ptr ptr;
int i;
#endif
XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
ASSERT(cur->bc_flags & XFS_BTREE_ROOT_IN_INODE);
ASSERT(cur->bc_nlevels > 1);
/*
* Don't deal with the root block needs to be a leaf case.
* We're just going to turn the thing back into extents anyway.
*/
level = cur->bc_nlevels - 1;
if (level == 1)
goto out0;
/*
* Give up if the root has multiple children.
*/
block = xfs_btree_get_iroot(cur);
if (xfs_btree_get_numrecs(block) != 1)
goto out0;
cblock = xfs_btree_get_block(cur, level - 1, &cbp);
numrecs = xfs_btree_get_numrecs(cblock);
/*
* Only do this if the next level will fit.
* Then the data must be copied up to the inode,
* instead of freeing the root you free the next level.
*/
if (numrecs > cur->bc_ops->get_dmaxrecs(cur, level))
goto out0;
XFS_BTREE_STATS_INC(cur, killroot);
#ifdef DEBUG
xfs_btree_get_sibling(cur, block, &ptr, XFS_BB_LEFTSIB);
ASSERT(xfs_btree_ptr_is_null(cur, &ptr));
xfs_btree_get_sibling(cur, block, &ptr, XFS_BB_RIGHTSIB);
ASSERT(xfs_btree_ptr_is_null(cur, &ptr));
#endif
index = numrecs - cur->bc_ops->get_maxrecs(cur, level);
if (index) {
xfs_iroot_realloc(cur->bc_private.b.ip, index,
cur->bc_private.b.whichfork);
block = ifp->if_broot;
}
be16_add_cpu(&block->bb_numrecs, index);
ASSERT(block->bb_numrecs == cblock->bb_numrecs);
kp = xfs_btree_key_addr(cur, 1, block);
ckp = xfs_btree_key_addr(cur, 1, cblock);
xfs_btree_copy_keys(cur, kp, ckp, numrecs);
pp = xfs_btree_ptr_addr(cur, 1, block);
cpp = xfs_btree_ptr_addr(cur, 1, cblock);
#ifdef DEBUG
for (i = 0; i < numrecs; i++) {
int error;
error = xfs_btree_check_ptr(cur, cpp, i, level - 1);
if (error) {
XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
return error;
}
}
#endif
xfs_btree_copy_ptrs(cur, pp, cpp, numrecs);
cur->bc_ops->free_block(cur, cbp);
XFS_BTREE_STATS_INC(cur, free);
cur->bc_bufs[level - 1] = NULL;
be16_add_cpu(&block->bb_level, -1);
xfs_trans_log_inode(cur->bc_tp, ip,
XFS_ILOG_CORE | xfs_ilog_fbroot(cur->bc_private.b.whichfork));
cur->bc_nlevels--;
out0:
XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
return 0;
}
STATIC int
xfs_btree_dec_cursor(
struct xfs_btree_cur *cur,
int level,
int *stat)
{
int error;
int i;
if (level > 0) {
error = xfs_btree_decrement(cur, level, &i);
if (error)
return error;
}
XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
*stat = 1;
return 0;
}
/*
* Single level of the btree record deletion routine.
* Delete record pointed to by cur/level.
* Remove the record from its block then rebalance the tree.
* Return 0 for error, 1 for done, 2 to go on to the next level.
*/
STATIC int /* error */
xfs_btree_delrec(
struct xfs_btree_cur *cur, /* btree cursor */
int level, /* level removing record from */
int *stat) /* fail/done/go-on */
{
struct xfs_btree_block *block; /* btree block */
union xfs_btree_ptr cptr; /* current block ptr */
struct xfs_buf *bp; /* buffer for block */
int error; /* error return value */
int i; /* loop counter */
union xfs_btree_key key; /* storage for keyp */
union xfs_btree_key *keyp = &key; /* passed to the next level */
union xfs_btree_ptr lptr; /* left sibling block ptr */
struct xfs_buf *lbp; /* left buffer pointer */
struct xfs_btree_block *left; /* left btree block */
int lrecs = 0; /* left record count */
int ptr; /* key/record index */
union xfs_btree_ptr rptr; /* right sibling block ptr */
struct xfs_buf *rbp; /* right buffer pointer */
struct xfs_btree_block *right; /* right btree block */
struct xfs_btree_block *rrblock; /* right-right btree block */
struct xfs_buf *rrbp; /* right-right buffer pointer */
int rrecs = 0; /* right record count */
struct xfs_btree_cur *tcur; /* temporary btree cursor */
int numrecs; /* temporary numrec count */
XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
XFS_BTREE_TRACE_ARGI(cur, level);
tcur = NULL;
/* Get the index of the entry being deleted, check for nothing there. */
ptr = cur->bc_ptrs[level];
if (ptr == 0) {
XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
*stat = 0;
return 0;
}
/* Get the buffer & block containing the record or key/ptr. */
block = xfs_btree_get_block(cur, level, &bp);
numrecs = xfs_btree_get_numrecs(block);
#ifdef DEBUG
error = xfs_btree_check_block(cur, block, level, bp);
if (error)
goto error0;
#endif
/* Fail if we're off the end of the block. */
if (ptr > numrecs) {
XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
*stat = 0;
return 0;
}
XFS_BTREE_STATS_INC(cur, delrec);
XFS_BTREE_STATS_ADD(cur, moves, numrecs - ptr);
/* Excise the entries being deleted. */
if (level > 0) {
/* It's a nonleaf. operate on keys and ptrs */
union xfs_btree_key *lkp;
union xfs_btree_ptr *lpp;
lkp = xfs_btree_key_addr(cur, ptr + 1, block);
lpp = xfs_btree_ptr_addr(cur, ptr + 1, block);
#ifdef DEBUG
for (i = 0; i < numrecs - ptr; i++) {
error = xfs_btree_check_ptr(cur, lpp, i, level);
if (error)
goto error0;
}
#endif
if (ptr < numrecs) {
xfs_btree_shift_keys(cur, lkp, -1, numrecs - ptr);
xfs_btree_shift_ptrs(cur, lpp, -1, numrecs - ptr);
xfs_btree_log_keys(cur, bp, ptr, numrecs - 1);
xfs_btree_log_ptrs(cur, bp, ptr, numrecs - 1);
}
/*
* If it's the first record in the block, we'll need to pass a
* key up to the next level (updkey).
*/
if (ptr == 1)
keyp = xfs_btree_key_addr(cur, 1, block);
} else {
/* It's a leaf. operate on records */
if (ptr < numrecs) {
xfs_btree_shift_recs(cur,
xfs_btree_rec_addr(cur, ptr + 1, block),
-1, numrecs - ptr);
xfs_btree_log_recs(cur, bp, ptr, numrecs - 1);
}
/*
* If it's the first record in the block, we'll need a key
* structure to pass up to the next level (updkey).
*/
if (ptr == 1) {
cur->bc_ops->init_key_from_rec(&key,
xfs_btree_rec_addr(cur, 1, block));
keyp = &key;
}
}
/*
* Decrement and log the number of entries in the block.
*/
xfs_btree_set_numrecs(block, --numrecs);
xfs_btree_log_block(cur, bp, XFS_BB_NUMRECS);
/*
* If we are tracking the last record in the tree and
* we are at the far right edge of the tree, update it.
*/
if (xfs_btree_is_lastrec(cur, block, level)) {
cur->bc_ops->update_lastrec(cur, block, NULL,
ptr, LASTREC_DELREC);
}
/*
* We're at the root level. First, shrink the root block in-memory.
* Try to get rid of the next level down. If we can't then there's
* nothing left to do.
*/
if (level == cur->bc_nlevels - 1) {
if (cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) {
xfs_iroot_realloc(cur->bc_private.b.ip, -1,
cur->bc_private.b.whichfork);
error = xfs_btree_kill_iroot(cur);
if (error)
goto error0;
error = xfs_btree_dec_cursor(cur, level, stat);
if (error)
goto error0;
*stat = 1;
return 0;
}
/*
* If this is the root level, and there's only one entry left,
* and it's NOT the leaf level, then we can get rid of this
* level.
*/
if (numrecs == 1 && level > 0) {
union xfs_btree_ptr *pp;
/*
* pp is still set to the first pointer in the block.
* Make it the new root of the btree.
*/
pp = xfs_btree_ptr_addr(cur, 1, block);
error = cur->bc_ops->kill_root(cur, bp, level, pp);
if (error)
goto error0;
} else if (level > 0) {
error = xfs_btree_dec_cursor(cur, level, stat);
if (error)
goto error0;
}
*stat = 1;
return 0;
}
/*
* If we deleted the leftmost entry in the block, update the
* key values above us in the tree.
*/
if (ptr == 1) {
error = xfs_btree_updkey(cur, keyp, level + 1);
if (error)
goto error0;
}
/*
* If the number of records remaining in the block is at least
* the minimum, we're done.
*/
if (numrecs >= cur->bc_ops->get_minrecs(cur, level)) {
error = xfs_btree_dec_cursor(cur, level, stat);
if (error)
goto error0;
return 0;
}
/*
* Otherwise, we have to move some records around to keep the
* tree balanced. Look at the left and right sibling blocks to
* see if we can re-balance by moving only one record.
*/
xfs_btree_get_sibling(cur, block, &rptr, XFS_BB_RIGHTSIB);
xfs_btree_get_sibling(cur, block, &lptr, XFS_BB_LEFTSIB);
if (cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) {
/*
* One child of root, need to get a chance to copy its contents
* into the root and delete it. Can't go up to next level,
* there's nothing to delete there.
*/
if (xfs_btree_ptr_is_null(cur, &rptr) &&
xfs_btree_ptr_is_null(cur, &lptr) &&
level == cur->bc_nlevels - 2) {
error = xfs_btree_kill_iroot(cur);
if (!error)
error = xfs_btree_dec_cursor(cur, level, stat);
if (error)
goto error0;
return 0;
}
}
ASSERT(!xfs_btree_ptr_is_null(cur, &rptr) ||
!xfs_btree_ptr_is_null(cur, &lptr));
/*
* Duplicate the cursor so our btree manipulations here won't
* disrupt the next level up.
*/
error = xfs_btree_dup_cursor(cur, &tcur);
if (error)
goto error0;
/*
* If there's a right sibling, see if it's ok to shift an entry
* out of it.
*/
if (!xfs_btree_ptr_is_null(cur, &rptr)) {
/*
* Move the temp cursor to the last entry in the next block.
* Actually any entry but the first would suffice.
*/
i = xfs_btree_lastrec(tcur, level);
XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
error = xfs_btree_increment(tcur, level, &i);
if (error)
goto error0;
XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
i = xfs_btree_lastrec(tcur, level);
XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
/* Grab a pointer to the block. */
right = xfs_btree_get_block(tcur, level, &rbp);
#ifdef DEBUG
error = xfs_btree_check_block(tcur, right, level, rbp);
if (error)
goto error0;
#endif
/* Grab the current block number, for future use. */
xfs_btree_get_sibling(tcur, right, &cptr, XFS_BB_LEFTSIB);
/*
* If right block is full enough so that removing one entry
* won't make it too empty, and left-shifting an entry out
* of right to us works, we're done.
*/
if (xfs_btree_get_numrecs(right) - 1 >=
cur->bc_ops->get_minrecs(tcur, level)) {
error = xfs_btree_lshift(tcur, level, &i);
if (error)
goto error0;
if (i) {
ASSERT(xfs_btree_get_numrecs(block) >=
cur->bc_ops->get_minrecs(tcur, level));
xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
tcur = NULL;
error = xfs_btree_dec_cursor(cur, level, stat);
if (error)
goto error0;
return 0;
}
}
/*
* Otherwise, grab the number of records in right for
* future reference, and fix up the temp cursor to point
* to our block again (last record).
*/
rrecs = xfs_btree_get_numrecs(right);
if (!xfs_btree_ptr_is_null(cur, &lptr)) {
i = xfs_btree_firstrec(tcur, level);
XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
error = xfs_btree_decrement(tcur, level, &i);
if (error)
goto error0;
XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
}
}
/*
* If there's a left sibling, see if it's ok to shift an entry
* out of it.
*/
if (!xfs_btree_ptr_is_null(cur, &lptr)) {
/*
* Move the temp cursor to the first entry in the
* previous block.
*/
i = xfs_btree_firstrec(tcur, level);
XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
error = xfs_btree_decrement(tcur, level, &i);
if (error)
goto error0;
i = xfs_btree_firstrec(tcur, level);
XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
/* Grab a pointer to the block. */
left = xfs_btree_get_block(tcur, level, &lbp);
#ifdef DEBUG
error = xfs_btree_check_block(cur, left, level, lbp);
if (error)
goto error0;
#endif
/* Grab the current block number, for future use. */
xfs_btree_get_sibling(tcur, left, &cptr, XFS_BB_RIGHTSIB);
/*
* If left block is full enough so that removing one entry
* won't make it too empty, and right-shifting an entry out
* of left to us works, we're done.
*/
if (xfs_btree_get_numrecs(left) - 1 >=
cur->bc_ops->get_minrecs(tcur, level)) {
error = xfs_btree_rshift(tcur, level, &i);
if (error)
goto error0;
if (i) {
ASSERT(xfs_btree_get_numrecs(block) >=
cur->bc_ops->get_minrecs(tcur, level));
xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
tcur = NULL;
if (level == 0)
cur->bc_ptrs[0]++;
XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
*stat = 1;
return 0;
}
}
/*
* Otherwise, grab the number of records in right for
* future reference.
*/
lrecs = xfs_btree_get_numrecs(left);
}
/* Delete the temp cursor, we're done with it. */
xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
tcur = NULL;
/* If here, we need to do a join to keep the tree balanced. */
ASSERT(!xfs_btree_ptr_is_null(cur, &cptr));
if (!xfs_btree_ptr_is_null(cur, &lptr) &&
lrecs + xfs_btree_get_numrecs(block) <=
cur->bc_ops->get_maxrecs(cur, level)) {
/*
* Set "right" to be the starting block,
* "left" to be the left neighbor.
*/
rptr = cptr;
right = block;
rbp = bp;
error = xfs_btree_read_buf_block(cur, &lptr, level,
0, &left, &lbp);
if (error)
goto error0;
/*
* If that won't work, see if we can join with the right neighbor block.
*/
} else if (!xfs_btree_ptr_is_null(cur, &rptr) &&
rrecs + xfs_btree_get_numrecs(block) <=
cur->bc_ops->get_maxrecs(cur, level)) {
/*
* Set "left" to be the starting block,
* "right" to be the right neighbor.
*/
lptr = cptr;
left = block;
lbp = bp;
error = xfs_btree_read_buf_block(cur, &rptr, level,
0, &right, &rbp);
if (error)
goto error0;
/*
* Otherwise, we can't fix the imbalance.
* Just return. This is probably a logic error, but it's not fatal.
*/
} else {
error = xfs_btree_dec_cursor(cur, level, stat);
if (error)
goto error0;
return 0;
}
rrecs = xfs_btree_get_numrecs(right);
lrecs = xfs_btree_get_numrecs(left);
/*
* We're now going to join "left" and "right" by moving all the stuff
* in "right" to "left" and deleting "right".
*/
XFS_BTREE_STATS_ADD(cur, moves, rrecs);
if (level > 0) {
/* It's a non-leaf. Move keys and pointers. */
union xfs_btree_key *lkp; /* left btree key */
union xfs_btree_ptr *lpp; /* left address pointer */
union xfs_btree_key *rkp; /* right btree key */
union xfs_btree_ptr *rpp; /* right address pointer */
lkp = xfs_btree_key_addr(cur, lrecs + 1, left);
lpp = xfs_btree_ptr_addr(cur, lrecs + 1, left);
rkp = xfs_btree_key_addr(cur, 1, right);
rpp = xfs_btree_ptr_addr(cur, 1, right);
#ifdef DEBUG
for (i = 1; i < rrecs; i++) {
error = xfs_btree_check_ptr(cur, rpp, i, level);
if (error)
goto error0;
}
#endif
xfs_btree_copy_keys(cur, lkp, rkp, rrecs);
xfs_btree_copy_ptrs(cur, lpp, rpp, rrecs);
xfs_btree_log_keys(cur, lbp, lrecs + 1, lrecs + rrecs);
xfs_btree_log_ptrs(cur, lbp, lrecs + 1, lrecs + rrecs);
} else {
/* It's a leaf. Move records. */
union xfs_btree_rec *lrp; /* left record pointer */
union xfs_btree_rec *rrp; /* right record pointer */
lrp = xfs_btree_rec_addr(cur, lrecs + 1, left);
rrp = xfs_btree_rec_addr(cur, 1, right);
xfs_btree_copy_recs(cur, lrp, rrp, rrecs);
xfs_btree_log_recs(cur, lbp, lrecs + 1, lrecs + rrecs);
}
XFS_BTREE_STATS_INC(cur, join);
/*
* Fix up the number of records and right block pointer in the
* surviving block, and log it.
*/
xfs_btree_set_numrecs(left, lrecs + rrecs);
xfs_btree_get_sibling(cur, right, &cptr, XFS_BB_RIGHTSIB),
xfs_btree_set_sibling(cur, left, &cptr, XFS_BB_RIGHTSIB);
xfs_btree_log_block(cur, lbp, XFS_BB_NUMRECS | XFS_BB_RIGHTSIB);
/* If there is a right sibling, point it to the remaining block. */
xfs_btree_get_sibling(cur, left, &cptr, XFS_BB_RIGHTSIB);
if (!xfs_btree_ptr_is_null(cur, &cptr)) {
error = xfs_btree_read_buf_block(cur, &cptr, level,
0, &rrblock, &rrbp);
if (error)
goto error0;
xfs_btree_set_sibling(cur, rrblock, &lptr, XFS_BB_LEFTSIB);
xfs_btree_log_block(cur, rrbp, XFS_BB_LEFTSIB);
}
/* Free the deleted block. */
error = cur->bc_ops->free_block(cur, rbp);
if (error)
goto error0;
XFS_BTREE_STATS_INC(cur, free);
/*
* If we joined with the left neighbor, set the buffer in the
* cursor to the left block, and fix up the index.
*/
if (bp != lbp) {
cur->bc_bufs[level] = lbp;
cur->bc_ptrs[level] += lrecs;
cur->bc_ra[level] = 0;
}
/*
* If we joined with the right neighbor and there's a level above
* us, increment the cursor at that level.
*/
else if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) ||
(level + 1 < cur->bc_nlevels)) {
error = xfs_btree_increment(cur, level + 1, &i);
if (error)
goto error0;
}
/*
* Readjust the ptr at this level if it's not a leaf, since it's
* still pointing at the deletion point, which makes the cursor
* inconsistent. If this makes the ptr 0, the caller fixes it up.
* We can't use decrement because it would change the next level up.
*/
if (level > 0)
cur->bc_ptrs[level]--;
XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
/* Return value means the next level up has something to do. */
*stat = 2;
return 0;
error0:
XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
if (tcur)
xfs_btree_del_cursor(tcur, XFS_BTREE_ERROR);
return error;
}
/*
* Delete the record pointed to by cur.
* The cursor refers to the place where the record was (could be inserted)
* when the operation returns.
*/
int /* error */
xfs_btree_delete(
struct xfs_btree_cur *cur,
int *stat) /* success/failure */
{
int error; /* error return value */
int level;
int i;
XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
/*
* Go up the tree, starting at leaf level.
*
* If 2 is returned then a join was done; go to the next level.
* Otherwise we are done.
*/
for (level = 0, i = 2; i == 2; level++) {
error = xfs_btree_delrec(cur, level, &i);
if (error)
goto error0;
}
if (i == 0) {
for (level = 1; level < cur->bc_nlevels; level++) {
if (cur->bc_ptrs[level] == 0) {
error = xfs_btree_decrement(cur, level, &i);
if (error)
goto error0;
break;
}
}
}
XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
*stat = i;
return 0;
error0:
XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
return error;
}
/*
* Get the data from the pointed-to record.
*/
int /* error */
xfs_btree_get_rec(
struct xfs_btree_cur *cur, /* btree cursor */
union xfs_btree_rec **recp, /* output: btree record */
int *stat) /* output: success/failure */
{
struct xfs_btree_block *block; /* btree block */
struct xfs_buf *bp; /* buffer pointer */
int ptr; /* record number */
#ifdef DEBUG
int error; /* error return value */
#endif
ptr = cur->bc_ptrs[0];
block = xfs_btree_get_block(cur, 0, &bp);
#ifdef DEBUG
error = xfs_btree_check_block(cur, block, 0, bp);
if (error)
return error;
#endif
/*
* Off the right end or left end, return failure.
*/
if (ptr > xfs_btree_get_numrecs(block) || ptr <= 0) {
*stat = 0;
return 0;
}
/*
* Point to the record and extract its data.
*/
*recp = xfs_btree_rec_addr(cur, ptr, block);
*stat = 1;
return 0;
}
| gpl-2.0 |
johnjelinek/photon4g.motorola | drivers/net/wireless/iwmc3200wifi/main.c | 467 | 18987 | /*
* Intel Wireless Multicomm 3200 WiFi driver
*
* Copyright (C) 2009 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*
* Intel Corporation <ilw@linux.intel.com>
* Samuel Ortiz <samuel.ortiz@intel.com>
* Zhu Yi <yi.zhu@intel.com>
*
*/
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/sched.h>
#include <linux/ieee80211.h>
#include <linux/wireless.h>
#include "iwm.h"
#include "debug.h"
#include "bus.h"
#include "umac.h"
#include "commands.h"
#include "hal.h"
#include "fw.h"
#include "rx.h"
static struct iwm_conf def_iwm_conf = {
.sdio_ior_timeout = 5000,
.calib_map = BIT(CALIB_CFG_DC_IDX) |
BIT(CALIB_CFG_LO_IDX) |
BIT(CALIB_CFG_TX_IQ_IDX) |
BIT(CALIB_CFG_RX_IQ_IDX) |
BIT(SHILOH_PHY_CALIBRATE_BASE_BAND_CMD),
.expected_calib_map = BIT(PHY_CALIBRATE_DC_CMD) |
BIT(PHY_CALIBRATE_LO_CMD) |
BIT(PHY_CALIBRATE_TX_IQ_CMD) |
BIT(PHY_CALIBRATE_RX_IQ_CMD) |
BIT(SHILOH_PHY_CALIBRATE_BASE_BAND_CMD),
.reset_on_fatal_err = 1,
.auto_connect = 1,
.wimax_not_present = 0,
.enable_qos = 1,
.mode = UMAC_MODE_BSS,
/* UMAC configuration */
.power_index = 0,
.frag_threshold = IEEE80211_MAX_FRAG_THRESHOLD,
.rts_threshold = IEEE80211_MAX_RTS_THRESHOLD,
.cts_to_self = 0,
.assoc_timeout = 2,
.roam_timeout = 10,
.wireless_mode = WIRELESS_MODE_11A | WIRELESS_MODE_11G,
.coexist_mode = COEX_MODE_CM,
/* IBSS */
.ibss_band = UMAC_BAND_2GHZ,
.ibss_channel = 1,
.mac_addr = {0x00, 0x02, 0xb3, 0x01, 0x02, 0x03},
};
static int modparam_reset;
module_param_named(reset, modparam_reset, bool, 0644);
MODULE_PARM_DESC(reset, "reset on firmware errors (default 0 [not reset])");
int iwm_mode_to_nl80211_iftype(int mode)
{
switch (mode) {
case UMAC_MODE_BSS:
return NL80211_IFTYPE_STATION;
case UMAC_MODE_IBSS:
return NL80211_IFTYPE_ADHOC;
default:
return NL80211_IFTYPE_UNSPECIFIED;
}
return 0;
}
static void iwm_statistics_request(struct work_struct *work)
{
struct iwm_priv *iwm =
container_of(work, struct iwm_priv, stats_request.work);
iwm_send_umac_stats_req(iwm, 0);
}
static void iwm_disconnect_work(struct work_struct *work)
{
struct iwm_priv *iwm =
container_of(work, struct iwm_priv, disconnect.work);
if (iwm->umac_profile_active)
iwm_invalidate_mlme_profile(iwm);
clear_bit(IWM_STATUS_ASSOCIATED, &iwm->status);
iwm->umac_profile_active = 0;
memset(iwm->bssid, 0, ETH_ALEN);
iwm->channel = 0;
iwm_link_off(iwm);
wake_up_interruptible(&iwm->mlme_queue);
cfg80211_disconnected(iwm_to_ndev(iwm), 0, NULL, 0, GFP_KERNEL);
}
static int __iwm_up(struct iwm_priv *iwm);
static int __iwm_down(struct iwm_priv *iwm);
static void iwm_reset_worker(struct work_struct *work)
{
struct iwm_priv *iwm;
struct iwm_umac_profile *profile = NULL;
int uninitialized_var(ret), retry = 0;
iwm = container_of(work, struct iwm_priv, reset_worker);
/*
* XXX: The iwm->mutex is introduced purely for this reset work,
* because the other users for iwm_up and iwm_down are only netdev
* ndo_open and ndo_stop which are already protected by rtnl.
* Please remove iwm->mutex together if iwm_reset_worker() is not
* required in the future.
*/
if (!mutex_trylock(&iwm->mutex)) {
IWM_WARN(iwm, "We are in the middle of interface bringing "
"UP/DOWN. Skip driver resetting.\n");
return;
}
if (iwm->umac_profile_active) {
profile = kmalloc(sizeof(struct iwm_umac_profile), GFP_KERNEL);
if (profile)
memcpy(profile, iwm->umac_profile, sizeof(*profile));
else
IWM_ERR(iwm, "Couldn't alloc memory for profile\n");
}
__iwm_down(iwm);
while (retry++ < 3) {
ret = __iwm_up(iwm);
if (!ret)
break;
schedule_timeout_uninterruptible(10 * HZ);
}
if (ret) {
IWM_WARN(iwm, "iwm_up() failed: %d\n", ret);
kfree(profile);
goto out;
}
if (profile) {
IWM_DBG_MLME(iwm, DBG, "Resend UMAC profile\n");
memcpy(iwm->umac_profile, profile, sizeof(*profile));
iwm_send_mlme_profile(iwm);
kfree(profile);
} else
clear_bit(IWM_STATUS_RESETTING, &iwm->status);
out:
mutex_unlock(&iwm->mutex);
}
static void iwm_watchdog(unsigned long data)
{
struct iwm_priv *iwm = (struct iwm_priv *)data;
IWM_WARN(iwm, "Watchdog expired: UMAC stalls!\n");
if (modparam_reset)
iwm_resetting(iwm);
}
int iwm_priv_init(struct iwm_priv *iwm)
{
int i;
char name[32];
iwm->status = 0;
INIT_LIST_HEAD(&iwm->pending_notif);
init_waitqueue_head(&iwm->notif_queue);
init_waitqueue_head(&iwm->nonwifi_queue);
init_waitqueue_head(&iwm->wifi_ntfy_queue);
init_waitqueue_head(&iwm->mlme_queue);
memcpy(&iwm->conf, &def_iwm_conf, sizeof(struct iwm_conf));
spin_lock_init(&iwm->tx_credit.lock);
INIT_LIST_HEAD(&iwm->wifi_pending_cmd);
INIT_LIST_HEAD(&iwm->nonwifi_pending_cmd);
iwm->wifi_seq_num = UMAC_WIFI_SEQ_NUM_BASE;
iwm->nonwifi_seq_num = UMAC_NONWIFI_SEQ_NUM_BASE;
spin_lock_init(&iwm->cmd_lock);
iwm->scan_id = 1;
INIT_DELAYED_WORK(&iwm->stats_request, iwm_statistics_request);
INIT_DELAYED_WORK(&iwm->disconnect, iwm_disconnect_work);
INIT_WORK(&iwm->reset_worker, iwm_reset_worker);
INIT_LIST_HEAD(&iwm->bss_list);
skb_queue_head_init(&iwm->rx_list);
INIT_LIST_HEAD(&iwm->rx_tickets);
for (i = 0; i < IWM_RX_ID_HASH; i++)
INIT_LIST_HEAD(&iwm->rx_packets[i]);
INIT_WORK(&iwm->rx_worker, iwm_rx_worker);
iwm->rx_wq = create_singlethread_workqueue(KBUILD_MODNAME "_rx");
if (!iwm->rx_wq)
return -EAGAIN;
for (i = 0; i < IWM_TX_QUEUES; i++) {
INIT_WORK(&iwm->txq[i].worker, iwm_tx_worker);
snprintf(name, 32, KBUILD_MODNAME "_tx_%d", i);
iwm->txq[i].id = i;
iwm->txq[i].wq = create_singlethread_workqueue(name);
if (!iwm->txq[i].wq)
return -EAGAIN;
skb_queue_head_init(&iwm->txq[i].queue);
}
for (i = 0; i < IWM_NUM_KEYS; i++)
memset(&iwm->keys[i], 0, sizeof(struct iwm_key));
iwm->default_key = -1;
init_timer(&iwm->watchdog);
iwm->watchdog.function = iwm_watchdog;
iwm->watchdog.data = (unsigned long)iwm;
mutex_init(&iwm->mutex);
iwm->last_fw_err = kzalloc(sizeof(struct iwm_fw_error_hdr),
GFP_KERNEL);
if (iwm->last_fw_err == NULL)
return -ENOMEM;
return 0;
}
void iwm_priv_deinit(struct iwm_priv *iwm)
{
int i;
for (i = 0; i < IWM_TX_QUEUES; i++)
destroy_workqueue(iwm->txq[i].wq);
destroy_workqueue(iwm->rx_wq);
kfree(iwm->last_fw_err);
}
/*
* We reset all the structures, and we reset the UMAC.
* After calling this routine, you're expected to reload
* the firmware.
*/
void iwm_reset(struct iwm_priv *iwm)
{
struct iwm_notif *notif, *next;
if (test_bit(IWM_STATUS_READY, &iwm->status))
iwm_target_reset(iwm);
if (test_bit(IWM_STATUS_RESETTING, &iwm->status)) {
iwm->status = 0;
set_bit(IWM_STATUS_RESETTING, &iwm->status);
} else
iwm->status = 0;
iwm->scan_id = 1;
list_for_each_entry_safe(notif, next, &iwm->pending_notif, pending) {
list_del(¬if->pending);
kfree(notif->buf);
kfree(notif);
}
iwm_cmd_flush(iwm);
flush_workqueue(iwm->rx_wq);
iwm_link_off(iwm);
}
void iwm_resetting(struct iwm_priv *iwm)
{
set_bit(IWM_STATUS_RESETTING, &iwm->status);
schedule_work(&iwm->reset_worker);
}
/*
* Notification code:
*
* We're faced with the following issue: Any host command can
* have an answer or not, and if there's an answer to expect,
* it can be treated synchronously or asynchronously.
* To work around the synchronous answer case, we implemented
* our notification mechanism.
* When a code path needs to wait for a command response
* synchronously, it calls notif_handle(), which waits for the
* right notification to show up, and then process it. Before
* starting to wait, it registered as a waiter for this specific
* answer (by toggling a bit in on of the handler_map), so that
* the rx code knows that it needs to send a notification to the
* waiting processes. It does so by calling iwm_notif_send(),
* which adds the notification to the pending notifications list,
* and then wakes the waiting processes up.
*/
int iwm_notif_send(struct iwm_priv *iwm, struct iwm_wifi_cmd *cmd,
u8 cmd_id, u8 source, u8 *buf, unsigned long buf_size)
{
struct iwm_notif *notif;
notif = kzalloc(sizeof(struct iwm_notif), GFP_KERNEL);
if (!notif) {
IWM_ERR(iwm, "Couldn't alloc memory for notification\n");
return -ENOMEM;
}
INIT_LIST_HEAD(¬if->pending);
notif->cmd = cmd;
notif->cmd_id = cmd_id;
notif->src = source;
notif->buf = kzalloc(buf_size, GFP_KERNEL);
if (!notif->buf) {
IWM_ERR(iwm, "Couldn't alloc notification buffer\n");
kfree(notif);
return -ENOMEM;
}
notif->buf_size = buf_size;
memcpy(notif->buf, buf, buf_size);
list_add_tail(¬if->pending, &iwm->pending_notif);
wake_up_interruptible(&iwm->notif_queue);
return 0;
}
static struct iwm_notif *iwm_notif_find(struct iwm_priv *iwm, u32 cmd,
u8 source)
{
struct iwm_notif *notif, *next;
list_for_each_entry_safe(notif, next, &iwm->pending_notif, pending) {
if ((notif->cmd_id == cmd) && (notif->src == source)) {
list_del(¬if->pending);
return notif;
}
}
return NULL;
}
static struct iwm_notif *iwm_notif_wait(struct iwm_priv *iwm, u32 cmd,
u8 source, long timeout)
{
int ret;
struct iwm_notif *notif;
unsigned long *map = NULL;
switch (source) {
case IWM_SRC_LMAC:
map = &iwm->lmac_handler_map[0];
break;
case IWM_SRC_UMAC:
map = &iwm->umac_handler_map[0];
break;
case IWM_SRC_UDMA:
map = &iwm->udma_handler_map[0];
break;
}
set_bit(cmd, map);
ret = wait_event_interruptible_timeout(iwm->notif_queue,
((notif = iwm_notif_find(iwm, cmd, source)) != NULL),
timeout);
clear_bit(cmd, map);
if (!ret)
return NULL;
return notif;
}
int iwm_notif_handle(struct iwm_priv *iwm, u32 cmd, u8 source, long timeout)
{
int ret;
struct iwm_notif *notif;
notif = iwm_notif_wait(iwm, cmd, source, timeout);
if (!notif)
return -ETIME;
ret = iwm_rx_handle_resp(iwm, notif->buf, notif->buf_size, notif->cmd);
kfree(notif->buf);
kfree(notif);
return ret;
}
static int iwm_config_boot_params(struct iwm_priv *iwm)
{
struct iwm_udma_nonwifi_cmd target_cmd;
int ret;
/* check Wimax is off and config debug monitor */
if (iwm->conf.wimax_not_present) {
u32 data1 = 0x1f;
u32 addr1 = 0x606BE258;
u32 data2_set = 0x0;
u32 data2_clr = 0x1;
u32 addr2 = 0x606BE100;
u32 data3 = 0x1;
u32 addr3 = 0x606BEC00;
target_cmd.resp = 0;
target_cmd.handle_by_hw = 0;
target_cmd.eop = 1;
target_cmd.opcode = UMAC_HDI_OUT_OPCODE_WRITE;
target_cmd.addr = cpu_to_le32(addr1);
target_cmd.op1_sz = cpu_to_le32(sizeof(u32));
target_cmd.op2 = 0;
ret = iwm_hal_send_target_cmd(iwm, &target_cmd, &data1);
if (ret < 0) {
IWM_ERR(iwm, "iwm_hal_send_target_cmd failed\n");
return ret;
}
target_cmd.opcode = UMAC_HDI_OUT_OPCODE_READ_MODIFY_WRITE;
target_cmd.addr = cpu_to_le32(addr2);
target_cmd.op1_sz = cpu_to_le32(data2_set);
target_cmd.op2 = cpu_to_le32(data2_clr);
ret = iwm_hal_send_target_cmd(iwm, &target_cmd, &data1);
if (ret < 0) {
IWM_ERR(iwm, "iwm_hal_send_target_cmd failed\n");
return ret;
}
target_cmd.opcode = UMAC_HDI_OUT_OPCODE_WRITE;
target_cmd.addr = cpu_to_le32(addr3);
target_cmd.op1_sz = cpu_to_le32(sizeof(u32));
target_cmd.op2 = 0;
ret = iwm_hal_send_target_cmd(iwm, &target_cmd, &data3);
if (ret < 0) {
IWM_ERR(iwm, "iwm_hal_send_target_cmd failed\n");
return ret;
}
}
return 0;
}
void iwm_init_default_profile(struct iwm_priv *iwm,
struct iwm_umac_profile *profile)
{
memset(profile, 0, sizeof(struct iwm_umac_profile));
profile->sec.auth_type = UMAC_AUTH_TYPE_OPEN;
profile->sec.flags = UMAC_SEC_FLG_LEGACY_PROFILE;
profile->sec.ucast_cipher = UMAC_CIPHER_TYPE_NONE;
profile->sec.mcast_cipher = UMAC_CIPHER_TYPE_NONE;
if (iwm->conf.enable_qos)
profile->flags |= cpu_to_le16(UMAC_PROFILE_QOS_ALLOWED);
profile->wireless_mode = iwm->conf.wireless_mode;
profile->mode = cpu_to_le32(iwm->conf.mode);
profile->ibss.atim = 0;
profile->ibss.beacon_interval = 100;
profile->ibss.join_only = 0;
profile->ibss.band = iwm->conf.ibss_band;
profile->ibss.channel = iwm->conf.ibss_channel;
}
void iwm_link_on(struct iwm_priv *iwm)
{
netif_carrier_on(iwm_to_ndev(iwm));
netif_tx_wake_all_queues(iwm_to_ndev(iwm));
iwm_send_umac_stats_req(iwm, 0);
}
void iwm_link_off(struct iwm_priv *iwm)
{
struct iw_statistics *wstats = &iwm->wstats;
int i;
netif_tx_stop_all_queues(iwm_to_ndev(iwm));
netif_carrier_off(iwm_to_ndev(iwm));
for (i = 0; i < IWM_TX_QUEUES; i++) {
skb_queue_purge(&iwm->txq[i].queue);
iwm->txq[i].concat_count = 0;
iwm->txq[i].concat_ptr = iwm->txq[i].concat_buf;
flush_workqueue(iwm->txq[i].wq);
}
iwm_rx_free(iwm);
cancel_delayed_work_sync(&iwm->stats_request);
memset(wstats, 0, sizeof(struct iw_statistics));
wstats->qual.updated = IW_QUAL_ALL_INVALID;
kfree(iwm->req_ie);
iwm->req_ie = NULL;
iwm->req_ie_len = 0;
kfree(iwm->resp_ie);
iwm->resp_ie = NULL;
iwm->resp_ie_len = 0;
del_timer_sync(&iwm->watchdog);
}
static void iwm_bss_list_clean(struct iwm_priv *iwm)
{
struct iwm_bss_info *bss, *next;
list_for_each_entry_safe(bss, next, &iwm->bss_list, node) {
list_del(&bss->node);
kfree(bss->bss);
kfree(bss);
}
}
static int iwm_channels_init(struct iwm_priv *iwm)
{
int ret;
ret = iwm_send_umac_channel_list(iwm);
if (ret) {
IWM_ERR(iwm, "Send channel list failed\n");
return ret;
}
ret = iwm_notif_handle(iwm, UMAC_CMD_OPCODE_GET_CHAN_INFO_LIST,
IWM_SRC_UMAC, WAIT_NOTIF_TIMEOUT);
if (ret) {
IWM_ERR(iwm, "Didn't get a channel list notification\n");
return ret;
}
return 0;
}
static int __iwm_up(struct iwm_priv *iwm)
{
int ret;
struct iwm_notif *notif_reboot, *notif_ack = NULL;
ret = iwm_bus_enable(iwm);
if (ret) {
IWM_ERR(iwm, "Couldn't enable function\n");
return ret;
}
iwm_rx_setup_handlers(iwm);
/* Wait for initial BARKER_REBOOT from hardware */
notif_reboot = iwm_notif_wait(iwm, IWM_BARKER_REBOOT_NOTIFICATION,
IWM_SRC_UDMA, 2 * HZ);
if (!notif_reboot) {
IWM_ERR(iwm, "Wait for REBOOT_BARKER timeout\n");
goto err_disable;
}
/* We send the barker back */
ret = iwm_bus_send_chunk(iwm, notif_reboot->buf, 16);
if (ret) {
IWM_ERR(iwm, "REBOOT barker response failed\n");
kfree(notif_reboot);
goto err_disable;
}
kfree(notif_reboot->buf);
kfree(notif_reboot);
/* Wait for ACK_BARKER from hardware */
notif_ack = iwm_notif_wait(iwm, IWM_ACK_BARKER_NOTIFICATION,
IWM_SRC_UDMA, 2 * HZ);
if (!notif_ack) {
IWM_ERR(iwm, "Wait for ACK_BARKER timeout\n");
goto err_disable;
}
kfree(notif_ack->buf);
kfree(notif_ack);
/* We start to config static boot parameters */
ret = iwm_config_boot_params(iwm);
if (ret) {
IWM_ERR(iwm, "Config boot parameters failed\n");
goto err_disable;
}
ret = iwm_read_mac(iwm, iwm_to_ndev(iwm)->dev_addr);
if (ret) {
IWM_ERR(iwm, "MAC reading failed\n");
goto err_disable;
}
/* We can load the FWs */
ret = iwm_load_fw(iwm);
if (ret) {
IWM_ERR(iwm, "FW loading failed\n");
goto err_disable;
}
/* We configure the UMAC and enable the wifi module */
ret = iwm_send_umac_config(iwm,
cpu_to_le32(UMAC_RST_CTRL_FLG_WIFI_CORE_EN) |
cpu_to_le32(UMAC_RST_CTRL_FLG_WIFI_LINK_EN) |
cpu_to_le32(UMAC_RST_CTRL_FLG_WIFI_MLME_EN));
if (ret) {
IWM_ERR(iwm, "UMAC config failed\n");
goto err_fw;
}
ret = iwm_notif_handle(iwm, UMAC_NOTIFY_OPCODE_WIFI_CORE_STATUS,
IWM_SRC_UMAC, WAIT_NOTIF_TIMEOUT);
if (ret) {
IWM_ERR(iwm, "Didn't get a wifi core status notification\n");
goto err_fw;
}
if (iwm->core_enabled != (UMAC_NTFY_WIFI_CORE_STATUS_LINK_EN |
UMAC_NTFY_WIFI_CORE_STATUS_MLME_EN)) {
IWM_DBG_BOOT(iwm, DBG, "Not all cores enabled:0x%x\n",
iwm->core_enabled);
ret = iwm_notif_handle(iwm, UMAC_NOTIFY_OPCODE_WIFI_CORE_STATUS,
IWM_SRC_UMAC, WAIT_NOTIF_TIMEOUT);
if (ret) {
IWM_ERR(iwm, "Didn't get a core status notification\n");
goto err_fw;
}
if (iwm->core_enabled != (UMAC_NTFY_WIFI_CORE_STATUS_LINK_EN |
UMAC_NTFY_WIFI_CORE_STATUS_MLME_EN)) {
IWM_ERR(iwm, "Not all cores enabled: 0x%x\n",
iwm->core_enabled);
goto err_fw;
} else {
IWM_INFO(iwm, "All cores enabled\n");
}
}
ret = iwm_channels_init(iwm);
if (ret < 0) {
IWM_ERR(iwm, "Couldn't init channels\n");
goto err_fw;
}
/* Set the READY bit to indicate interface is brought up successfully */
set_bit(IWM_STATUS_READY, &iwm->status);
return 0;
err_fw:
iwm_eeprom_exit(iwm);
err_disable:
ret = iwm_bus_disable(iwm);
if (ret < 0)
IWM_ERR(iwm, "Couldn't disable function\n");
return -EIO;
}
int iwm_up(struct iwm_priv *iwm)
{
int ret;
mutex_lock(&iwm->mutex);
ret = __iwm_up(iwm);
mutex_unlock(&iwm->mutex);
return ret;
}
static int __iwm_down(struct iwm_priv *iwm)
{
int ret;
/* The interface is already down */
if (!test_bit(IWM_STATUS_READY, &iwm->status))
return 0;
if (iwm->scan_request) {
cfg80211_scan_done(iwm->scan_request, true);
iwm->scan_request = NULL;
}
clear_bit(IWM_STATUS_READY, &iwm->status);
iwm_eeprom_exit(iwm);
iwm_bss_list_clean(iwm);
iwm_init_default_profile(iwm, iwm->umac_profile);
iwm->umac_profile_active = false;
iwm->default_key = -1;
iwm->core_enabled = 0;
ret = iwm_bus_disable(iwm);
if (ret < 0) {
IWM_ERR(iwm, "Couldn't disable function\n");
return ret;
}
return 0;
}
int iwm_down(struct iwm_priv *iwm)
{
int ret;
mutex_lock(&iwm->mutex);
ret = __iwm_down(iwm);
mutex_unlock(&iwm->mutex);
return ret;
}
| gpl-2.0 |
JoeyJiao/huawei_kernel_2.6.32_9 | arch/sh/boards/mach-se/7722/irq.c | 723 | 1932 | /*
* linux/arch/sh/boards/se/7722/irq.c
*
* Copyright (C) 2007 Nobuhiro Iwamatsu
*
* Hitachi UL SolutionEngine 7722 Support.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <asm/irq.h>
#include <asm/io.h>
#include <mach-se/mach/se7722.h>
static void disable_se7722_irq(unsigned int irq)
{
unsigned int bit = irq - SE7722_FPGA_IRQ_BASE;
ctrl_outw(ctrl_inw(IRQ01_MASK) | 1 << bit, IRQ01_MASK);
}
static void enable_se7722_irq(unsigned int irq)
{
unsigned int bit = irq - SE7722_FPGA_IRQ_BASE;
ctrl_outw(ctrl_inw(IRQ01_MASK) & ~(1 << bit), IRQ01_MASK);
}
static struct irq_chip se7722_irq_chip __read_mostly = {
.name = "SE7722-FPGA",
.mask = disable_se7722_irq,
.unmask = enable_se7722_irq,
.mask_ack = disable_se7722_irq,
};
static void se7722_irq_demux(unsigned int irq, struct irq_desc *desc)
{
unsigned short intv = ctrl_inw(IRQ01_STS);
struct irq_desc *ext_desc;
unsigned int ext_irq = SE7722_FPGA_IRQ_BASE;
intv &= (1 << SE7722_FPGA_IRQ_NR) - 1;
while (intv) {
if (intv & 1) {
ext_desc = irq_desc + ext_irq;
handle_level_irq(ext_irq, ext_desc);
}
intv >>= 1;
ext_irq++;
}
}
/*
* Initialize IRQ setting
*/
void __init init_se7722_IRQ(void)
{
int i;
ctrl_outw(0, IRQ01_MASK); /* disable all irqs */
ctrl_outw(0x2000, 0xb03fffec); /* mrshpc irq enable */
for (i = 0; i < SE7722_FPGA_IRQ_NR; i++)
set_irq_chip_and_handler_name(SE7722_FPGA_IRQ_BASE + i,
&se7722_irq_chip,
handle_level_irq, "level");
set_irq_chained_handler(IRQ0_IRQ, se7722_irq_demux);
set_irq_type(IRQ0_IRQ, IRQ_TYPE_LEVEL_LOW);
set_irq_chained_handler(IRQ1_IRQ, se7722_irq_demux);
set_irq_type(IRQ1_IRQ, IRQ_TYPE_LEVEL_LOW);
}
| gpl-2.0 |
geiti94/NEMESIS_KERNEL_N5_NOUGAT | drivers/cpufreq/loongson2_cpufreq.c | 723 | 5967 | /*
* Cpufreq driver for the loongson-2 processors
*
* The 2E revision of loongson processor not support this feature.
*
* Copyright (C) 2006 - 2008 Lemote Inc. & Insititute of Computing Technology
* Author: Yanhua, yanh@lemote.com
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/cpufreq.h>
#include <linux/module.h>
#include <linux/err.h>
#include <linux/sched.h> /* set_cpus_allowed() */
#include <linux/delay.h>
#include <linux/platform_device.h>
#include <asm/clock.h>
#include <asm/idle.h>
#include <asm/mach-loongson/loongson.h>
static uint nowait;
static struct clk *cpuclk;
static void (*saved_cpu_wait) (void);
static int loongson2_cpu_freq_notifier(struct notifier_block *nb,
unsigned long val, void *data);
static struct notifier_block loongson2_cpufreq_notifier_block = {
.notifier_call = loongson2_cpu_freq_notifier
};
static int loongson2_cpu_freq_notifier(struct notifier_block *nb,
unsigned long val, void *data)
{
if (val == CPUFREQ_POSTCHANGE)
current_cpu_data.udelay_val = loops_per_jiffy;
return 0;
}
static unsigned int loongson2_cpufreq_get(unsigned int cpu)
{
return clk_get_rate(cpuclk);
}
/*
* Here we notify other drivers of the proposed change and the final change.
*/
static int loongson2_cpufreq_target(struct cpufreq_policy *policy,
unsigned int target_freq,
unsigned int relation)
{
unsigned int cpu = policy->cpu;
unsigned int newstate = 0;
cpumask_t cpus_allowed;
struct cpufreq_freqs freqs;
unsigned int freq;
cpus_allowed = current->cpus_allowed;
set_cpus_allowed_ptr(current, cpumask_of(cpu));
if (cpufreq_frequency_table_target
(policy, &loongson2_clockmod_table[0], target_freq, relation,
&newstate))
return -EINVAL;
freq =
((cpu_clock_freq / 1000) *
loongson2_clockmod_table[newstate].index) / 8;
if (freq < policy->min || freq > policy->max)
return -EINVAL;
pr_debug("cpufreq: requested frequency %u Hz\n", target_freq * 1000);
freqs.old = loongson2_cpufreq_get(cpu);
freqs.new = freq;
freqs.flags = 0;
if (freqs.new == freqs.old)
return 0;
/* notifiers */
cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
set_cpus_allowed_ptr(current, &cpus_allowed);
/* setting the cpu frequency */
clk_set_rate(cpuclk, freq);
/* notifiers */
cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
pr_debug("cpufreq: set frequency %u kHz\n", freq);
return 0;
}
static int loongson2_cpufreq_cpu_init(struct cpufreq_policy *policy)
{
int i;
unsigned long rate;
int ret;
cpuclk = clk_get(NULL, "cpu_clk");
if (IS_ERR(cpuclk)) {
printk(KERN_ERR "cpufreq: couldn't get CPU clk\n");
return PTR_ERR(cpuclk);
}
rate = cpu_clock_freq / 1000;
if (!rate) {
clk_put(cpuclk);
return -EINVAL;
}
/* clock table init */
for (i = 2;
(loongson2_clockmod_table[i].frequency != CPUFREQ_TABLE_END);
i++)
loongson2_clockmod_table[i].frequency = (rate * i) / 8;
ret = clk_set_rate(cpuclk, rate);
if (ret) {
clk_put(cpuclk);
return ret;
}
policy->cur = loongson2_cpufreq_get(policy->cpu);
cpufreq_frequency_table_get_attr(&loongson2_clockmod_table[0],
policy->cpu);
return cpufreq_frequency_table_cpuinfo(policy,
&loongson2_clockmod_table[0]);
}
static int loongson2_cpufreq_verify(struct cpufreq_policy *policy)
{
return cpufreq_frequency_table_verify(policy,
&loongson2_clockmod_table[0]);
}
static int loongson2_cpufreq_exit(struct cpufreq_policy *policy)
{
clk_put(cpuclk);
return 0;
}
static struct freq_attr *loongson2_table_attr[] = {
&cpufreq_freq_attr_scaling_available_freqs,
NULL,
};
static struct cpufreq_driver loongson2_cpufreq_driver = {
.owner = THIS_MODULE,
.name = "loongson2",
.init = loongson2_cpufreq_cpu_init,
.verify = loongson2_cpufreq_verify,
.target = loongson2_cpufreq_target,
.get = loongson2_cpufreq_get,
.exit = loongson2_cpufreq_exit,
.attr = loongson2_table_attr,
};
static struct platform_device_id platform_device_ids[] = {
{
.name = "loongson2_cpufreq",
},
{}
};
MODULE_DEVICE_TABLE(platform, platform_device_ids);
static struct platform_driver platform_driver = {
.driver = {
.name = "loongson2_cpufreq",
.owner = THIS_MODULE,
},
.id_table = platform_device_ids,
};
/*
* This is the simple version of Loongson-2 wait, Maybe we need do this in
* interrupt disabled context.
*/
static DEFINE_SPINLOCK(loongson2_wait_lock);
static void loongson2_cpu_wait(void)
{
unsigned long flags;
u32 cpu_freq;
spin_lock_irqsave(&loongson2_wait_lock, flags);
cpu_freq = LOONGSON_CHIPCFG0;
LOONGSON_CHIPCFG0 &= ~0x7; /* Put CPU into wait mode */
LOONGSON_CHIPCFG0 = cpu_freq; /* Restore CPU state */
spin_unlock_irqrestore(&loongson2_wait_lock, flags);
local_irq_enable();
}
static int __init cpufreq_init(void)
{
int ret;
/* Register platform stuff */
ret = platform_driver_register(&platform_driver);
if (ret)
return ret;
pr_info("cpufreq: Loongson-2F CPU frequency driver.\n");
cpufreq_register_notifier(&loongson2_cpufreq_notifier_block,
CPUFREQ_TRANSITION_NOTIFIER);
ret = cpufreq_register_driver(&loongson2_cpufreq_driver);
if (!ret && !nowait) {
saved_cpu_wait = cpu_wait;
cpu_wait = loongson2_cpu_wait;
}
return ret;
}
static void __exit cpufreq_exit(void)
{
if (!nowait && saved_cpu_wait)
cpu_wait = saved_cpu_wait;
cpufreq_unregister_driver(&loongson2_cpufreq_driver);
cpufreq_unregister_notifier(&loongson2_cpufreq_notifier_block,
CPUFREQ_TRANSITION_NOTIFIER);
platform_driver_unregister(&platform_driver);
}
module_init(cpufreq_init);
module_exit(cpufreq_exit);
module_param(nowait, uint, 0644);
MODULE_PARM_DESC(nowait, "Disable Loongson-2F specific wait");
MODULE_AUTHOR("Yanhua <yanh@lemote.com>");
MODULE_DESCRIPTION("cpufreq driver for Loongson2F");
MODULE_LICENSE("GPL");
| gpl-2.0 |
MyAOSP/kernel_moto_wingray | arch/mips/sibyte/sb1250/smp.c | 1747 | 4862 | /*
* Copyright (C) 2001, 2002, 2003 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/smp.h>
#include <linux/kernel_stat.h>
#include <asm/mmu_context.h>
#include <asm/io.h>
#include <asm/fw/cfe/cfe_api.h>
#include <asm/sibyte/sb1250.h>
#include <asm/sibyte/sb1250_regs.h>
#include <asm/sibyte/sb1250_int.h>
static void *mailbox_set_regs[] = {
IOADDR(A_IMR_CPU0_BASE + R_IMR_MAILBOX_SET_CPU),
IOADDR(A_IMR_CPU1_BASE + R_IMR_MAILBOX_SET_CPU)
};
static void *mailbox_clear_regs[] = {
IOADDR(A_IMR_CPU0_BASE + R_IMR_MAILBOX_CLR_CPU),
IOADDR(A_IMR_CPU1_BASE + R_IMR_MAILBOX_CLR_CPU)
};
static void *mailbox_regs[] = {
IOADDR(A_IMR_CPU0_BASE + R_IMR_MAILBOX_CPU),
IOADDR(A_IMR_CPU1_BASE + R_IMR_MAILBOX_CPU)
};
/*
* SMP init and finish on secondary CPUs
*/
void __cpuinit sb1250_smp_init(void)
{
unsigned int imask = STATUSF_IP4 | STATUSF_IP3 | STATUSF_IP2 |
STATUSF_IP1 | STATUSF_IP0;
/* Set interrupt mask, but don't enable */
change_c0_status(ST0_IM, imask);
}
/*
* These are routines for dealing with the sb1250 smp capabilities
* independent of board/firmware
*/
/*
* Simple enough; everything is set up, so just poke the appropriate mailbox
* register, and we should be set
*/
static void sb1250_send_ipi_single(int cpu, unsigned int action)
{
__raw_writeq((((u64)action) << 48), mailbox_set_regs[cpu]);
}
static inline void sb1250_send_ipi_mask(const struct cpumask *mask,
unsigned int action)
{
unsigned int i;
for_each_cpu(i, mask)
sb1250_send_ipi_single(i, action);
}
/*
* Code to run on secondary just after probing the CPU
*/
static void __cpuinit sb1250_init_secondary(void)
{
extern void sb1250_smp_init(void);
sb1250_smp_init();
}
/*
* Do any tidying up before marking online and running the idle
* loop
*/
static void __cpuinit sb1250_smp_finish(void)
{
extern void sb1250_clockevent_init(void);
sb1250_clockevent_init();
local_irq_enable();
}
/*
* Final cleanup after all secondaries booted
*/
static void sb1250_cpus_done(void)
{
}
/*
* Setup the PC, SP, and GP of a secondary processor and start it
* running!
*/
static void __cpuinit sb1250_boot_secondary(int cpu, struct task_struct *idle)
{
int retval;
retval = cfe_cpu_start(cpu_logical_map(cpu), &smp_bootstrap,
__KSTK_TOS(idle),
(unsigned long)task_thread_info(idle), 0);
if (retval != 0)
printk("cfe_start_cpu(%i) returned %i\n" , cpu, retval);
}
/*
* Use CFE to find out how many CPUs are available, setting up
* cpu_possible_map and the logical/physical mappings.
* XXXKW will the boot CPU ever not be physical 0?
*
* Common setup before any secondaries are started
*/
static void __init sb1250_smp_setup(void)
{
int i, num;
cpus_clear(cpu_possible_map);
cpu_set(0, cpu_possible_map);
__cpu_number_map[0] = 0;
__cpu_logical_map[0] = 0;
for (i = 1, num = 0; i < NR_CPUS; i++) {
if (cfe_cpu_stop(i) == 0) {
cpu_set(i, cpu_possible_map);
__cpu_number_map[i] = ++num;
__cpu_logical_map[num] = i;
}
}
printk(KERN_INFO "Detected %i available secondary CPU(s)\n", num);
}
static void __init sb1250_prepare_cpus(unsigned int max_cpus)
{
}
struct plat_smp_ops sb_smp_ops = {
.send_ipi_single = sb1250_send_ipi_single,
.send_ipi_mask = sb1250_send_ipi_mask,
.init_secondary = sb1250_init_secondary,
.smp_finish = sb1250_smp_finish,
.cpus_done = sb1250_cpus_done,
.boot_secondary = sb1250_boot_secondary,
.smp_setup = sb1250_smp_setup,
.prepare_cpus = sb1250_prepare_cpus,
};
void sb1250_mailbox_interrupt(void)
{
int cpu = smp_processor_id();
int irq = K_INT_MBOX_0;
unsigned int action;
kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq));
/* Load the mailbox register to figure out what we're supposed to do */
action = (____raw_readq(mailbox_regs[cpu]) >> 48) & 0xffff;
/* Clear the mailbox to clear the interrupt */
____raw_writeq(((u64)action) << 48, mailbox_clear_regs[cpu]);
/*
* Nothing to do for SMP_RESCHEDULE_YOURSELF; returning from the
* interrupt will do the reschedule for us
*/
if (action & SMP_CALL_FUNCTION)
smp_call_function_interrupt();
}
| gpl-2.0 |
Andy1911/AndyKernel_ZukZ1 | drivers/media/platform/msm/vcap/vcap_vp.c | 2003 | 25402 | /* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/kthread.h>
#include <linux/freezer.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <mach/camera.h>
#include <mach/clk.h>
#include <media/v4l2-event.h>
#include <media/vcap_v4l2.h>
#include <media/vcap_fmt.h>
#include "vcap_vp.h"
void config_nr_buffer(struct vcap_client_data *c_data,
struct vcap_buffer *buf)
{
struct vcap_dev *dev = c_data->dev;
int size = c_data->vp_in_fmt.height * c_data->vp_in_fmt.width;
writel_relaxed(buf->paddr, VCAP_VP_NR_T2_Y_BASE_ADDR);
writel_relaxed(buf->paddr + size, VCAP_VP_NR_T2_C_BASE_ADDR);
}
void config_in_buffer(struct vcap_client_data *c_data,
struct vcap_buffer *buf)
{
struct vcap_dev *dev = c_data->dev;
int size = c_data->vp_in_fmt.height * c_data->vp_in_fmt.width;
writel_relaxed(buf->paddr, VCAP_VP_T2_Y_BASE_ADDR);
writel_relaxed(buf->paddr + size, VCAP_VP_T2_C_BASE_ADDR);
}
void config_out_buffer(struct vcap_client_data *c_data,
struct vcap_buffer *buf)
{
struct vcap_dev *dev = c_data->dev;
int size;
size = c_data->vp_out_fmt.height * c_data->vp_out_fmt.width;
writel_relaxed(buf->paddr, VCAP_VP_OUT_Y_BASE_ADDR);
writel_relaxed(buf->paddr + size, VCAP_VP_OUT_C_BASE_ADDR);
}
int vp_setup_buffers(struct vcap_client_data *c_data)
{
struct vp_action *vp_act;
struct vcap_dev *dev;
unsigned long flags = 0;
if (!c_data->streaming)
return -ENOEXEC;
dev = c_data->dev;
pr_debug("VP: Start setup buffers\n");
if (dev->vp_shutdown) {
pr_debug("%s: VP shutting down, no buf setup\n",
__func__);
return -EPERM;
}
/* No need to verify vp_client is not NULL caller does so */
vp_act = &dev->vp_client->vp_action;
spin_lock_irqsave(&dev->vp_client->cap_slock, flags);
if (list_empty(&vp_act->in_active)) {
spin_unlock_irqrestore(&dev->vp_client->cap_slock, flags);
pr_debug("%s: VP We have no more input buffers\n",
__func__);
return -EAGAIN;
}
if (list_empty(&vp_act->out_active)) {
spin_unlock_irqrestore(&dev->vp_client->cap_slock,
flags);
pr_debug("%s: VP We have no more output buffers\n",
__func__);
return -EAGAIN;
}
vp_act->bufT2 = list_entry(vp_act->in_active.next,
struct vcap_buffer, list);
list_del(&vp_act->bufT2->list);
vp_act->bufOut = list_entry(vp_act->out_active.next,
struct vcap_buffer, list);
list_del(&vp_act->bufOut->list);
spin_unlock_irqrestore(&dev->vp_client->cap_slock, flags);
config_in_buffer(c_data, vp_act->bufT2);
config_out_buffer(c_data, vp_act->bufOut);
return 0;
}
static void mov_buf_to_vc(struct work_struct *work)
{
struct vp_work_t *vp_work = container_of(work, struct vp_work_t, work);
struct v4l2_buffer p;
struct vb2_buffer *vb_vc;
struct vcap_buffer *buf_vc;
struct vb2_buffer *vb_vp;
struct vcap_buffer *buf_vp;
int rc;
p.memory = V4L2_MEMORY_USERPTR;
/* This loop exits when there is no more buffers left */
while (1) {
p.type = V4L2_BUF_TYPE_INTERLACED_IN_DECODER;
if (!vp_work->cd->streaming)
return;
rc = vcvp_dqbuf(&vp_work->cd->vp_in_vidq, &p);
if (rc < 0)
return;
vb_vc = vp_work->cd->vc_vidq.bufs[p.index];
if (NULL == vb_vc) {
pr_debug("%s: buffer is NULL\n", __func__);
vcvp_qbuf(&vp_work->cd->vp_in_vidq, &p);
return;
}
buf_vc = container_of(vb_vc, struct vcap_buffer, vb);
vb_vp = vp_work->cd->vp_in_vidq.bufs[p.index];
if (NULL == vb_vp) {
pr_debug("%s: buffer is NULL\n", __func__);
vcvp_qbuf(&vp_work->cd->vp_in_vidq, &p);
return;
}
buf_vp = container_of(vb_vp, struct vcap_buffer, vb);
buf_vc->ion_handle = buf_vp->ion_handle;
buf_vc->paddr = buf_vp->paddr;
buf_vp->ion_handle = NULL;
buf_vp->paddr = 0;
p.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
/* This call should not fail */
rc = vcvp_qbuf(&vp_work->cd->vc_vidq, &p);
if (rc < 0) {
pr_err("%s: qbuf to vc failed\n", __func__);
buf_vp->ion_handle = buf_vc->ion_handle;
buf_vp->paddr = buf_vc->paddr;
buf_vc->ion_handle = NULL;
buf_vc->paddr = 0;
p.type = V4L2_BUF_TYPE_INTERLACED_IN_DECODER;
vcvp_qbuf(&vp_work->cd->vp_in_vidq, &p);
}
}
}
void update_nr_value(struct vcap_dev *dev)
{
struct nr_param *par;
uint32_t val = 0;
par = &dev->nr_param;
if (par->mode == NR_MANUAL) {
writel_relaxed(par->window << 24 | par->decay_ratio << 20,
VCAP_VP_NR_CONFIG);
if (par->threshold)
val = VP_NR_DYNAMIC_THRESHOLD;
writel_relaxed(val |
par->luma.max_blend_ratio << 24 |
par->luma.scale_diff_ratio << 12 |
par->luma.diff_limit_ratio << 8 |
par->luma.scale_motion_ratio << 4 |
par->luma.blend_limit_ratio << 0,
VCAP_VP_NR_LUMA_CONFIG);
writel_relaxed(val |
par->chroma.max_blend_ratio << 24 |
par->chroma.scale_diff_ratio << 12 |
par->chroma.diff_limit_ratio << 8 |
par->chroma.scale_motion_ratio << 4 |
par->chroma.blend_limit_ratio << 0,
VCAP_VP_NR_CHROMA_CONFIG);
}
dev->nr_update = false;
}
static void vp_wq_fnc(struct work_struct *work)
{
struct vp_work_t *vp_work = container_of(work, struct vp_work_t, work);
struct vcap_dev *dev;
struct vp_action *vp_act;
struct timeval tv;
unsigned long flags = 0;
uint32_t irq;
int rc;
bool top_field = 0;
if (vp_work && vp_work->cd && vp_work->cd->dev)
dev = vp_work->cd->dev;
else
return;
vp_act = &dev->vp_client->vp_action;
rc = readl_relaxed(VCAP_OFFSET(0x048));
while (!(rc & 0x00000100))
rc = readl_relaxed(VCAP_OFFSET(0x048));
irq = readl_relaxed(VCAP_VP_INT_STATUS);
writel_relaxed(0x00000000, VCAP_VP_BAL_VMOTION_STATE);
writel_relaxed(0x40000000, VCAP_VP_REDUCT_AVG_MOTION2);
spin_lock_irqsave(&dev->vp_client->cap_slock, flags);
if (dev->nr_update == true)
update_nr_value(dev);
spin_unlock_irqrestore(&dev->vp_client->cap_slock, flags);
/* Queue the done buffers */
if (vp_act->vp_state == VP_NORMAL &&
vp_act->bufNR.nr_pos != TM1_BUF) {
vb2_buffer_done(&vp_act->bufTm1->vb, VB2_BUF_STATE_DONE);
if (vp_work->cd->op_mode == VC_AND_VP_VCAP_OP)
queue_work(dev->vcap_wq, &dev->vp_to_vc_work.work);
}
if (vp_act->bufT0 != NULL && vp_act->vp_state == VP_NORMAL) {
vp_act->bufOut->vb.v4l2_buf.timestamp =
vp_act->bufT0->vb.v4l2_buf.timestamp;
}
vb2_buffer_done(&vp_act->bufOut->vb, VB2_BUF_STATE_DONE);
/* Cycle to next state */
if (vp_act->vp_state != VP_NORMAL)
vp_act->vp_state++;
/* Cycle Buffers*/
if (dev->nr_param.mode) {
if (vp_act->bufNR.nr_pos == TM1_BUF)
vp_act->bufNR.nr_pos = BUF_NOT_IN_USE;
if (vp_act->bufNR.nr_pos != BUF_NOT_IN_USE)
vp_act->bufNR.nr_pos++;
vp_act->bufTm1 = vp_act->bufT0;
vp_act->bufT0 = vp_act->bufT1;
vp_act->bufT1 = vp_act->bufNRT2;
vp_act->bufNRT2 = vp_act->bufT2;
config_nr_buffer(vp_work->cd, vp_act->bufNRT2);
} else {
vp_act->bufTm1 = vp_act->bufT0;
vp_act->bufT0 = vp_act->bufT1;
vp_act->bufT1 = vp_act->bufT2;
}
rc = vp_setup_buffers(vp_work->cd);
if (rc < 0) {
/* setup_buf failed because we are waiting for buffers */
writel_relaxed(0x00000000, VCAP_VP_INTERRUPT_ENABLE);
writel_iowmb(irq, VCAP_VP_INT_CLEAR);
atomic_set(&dev->vp_enabled, 0);
if (dev->vp_shutdown)
wake_up(&dev->vp_dummy_waitq);
return;
}
/* Config VP */
if (vp_act->bufT2->vb.v4l2_buf.field == V4L2_FIELD_BOTTOM)
top_field = 1;
writel_iowmb(0x00000000 | top_field, VCAP_VP_CTRL);
writel_iowmb(0x00010000 | top_field, VCAP_VP_CTRL);
enable_irq(dev->vpirq->start);
do_gettimeofday(&tv);
dev->dbg_p.vp_timestamp = (uint32_t) (tv.tv_sec * VCAP_USEC +
tv.tv_usec);
writel_iowmb(irq, VCAP_VP_INT_CLEAR);
}
irqreturn_t vp_handler(struct vcap_dev *dev)
{
struct vcap_client_data *c_data;
struct vp_action *vp_act;
struct v4l2_event v4l2_evt;
uint32_t irq;
int rc;
struct timeval tv;
uint32_t new_ts;
irq = readl_relaxed(VCAP_VP_INT_STATUS);
if (dev->vp_dummy_event == true) {
writel_relaxed(irq, VCAP_VP_INT_CLEAR);
dev->vp_dummy_complete = true;
wake_up(&dev->vp_dummy_waitq);
return IRQ_HANDLED;
}
if (irq & 0x02000000) {
v4l2_evt.type = V4L2_EVENT_PRIVATE_START +
VCAP_VP_REG_R_ERR_EVENT;
v4l2_event_queue(dev->vfd, &v4l2_evt);
}
if (irq & 0x01000000) {
v4l2_evt.type = V4L2_EVENT_PRIVATE_START +
VCAP_VP_REG_W_ERR_EVENT;
v4l2_event_queue(dev->vfd, &v4l2_evt);
}
if (irq & 0x00020000) {
v4l2_evt.type = V4L2_EVENT_PRIVATE_START +
VCAP_VP_IN_HEIGHT_ERR_EVENT;
v4l2_event_queue(dev->vfd, &v4l2_evt);
}
if (irq & 0x00010000) {
v4l2_evt.type = V4L2_EVENT_PRIVATE_START +
VCAP_VP_IN_WIDTH_ERR_EVENT;
v4l2_event_queue(dev->vfd, &v4l2_evt);
}
pr_debug("%s: irq=0x%08x\n", __func__, irq);
if (!(irq & (VP_PIC_DONE | VP_MODE_CHANGE))) {
writel_relaxed(irq, VCAP_VP_INT_CLEAR);
pr_err("VP IRQ shows some error\n");
return IRQ_HANDLED;
}
if (dev->vp_client == NULL) {
writel_relaxed(irq, VCAP_VP_INT_CLEAR);
pr_err("VC: There is no active vp client\n");
return IRQ_HANDLED;
}
vp_act = &dev->vp_client->vp_action;
c_data = dev->vp_client;
if (vp_act->vp_state == VP_UNKNOWN) {
writel_relaxed(irq, VCAP_VP_INT_CLEAR);
pr_err("%s: VP is in an unknown state\n",
__func__);
return -EAGAIN;
}
do_gettimeofday(&tv);
new_ts = (uint32_t) (tv.tv_sec * VCAP_USEC +
tv.tv_usec);
if (new_ts > dev->dbg_p.vp_timestamp) {
dev->dbg_p.vp_ewma = ((new_ts - dev->dbg_p.vp_timestamp) /
10 + (dev->dbg_p.vp_ewma / 10 * 9));
}
dev->dbg_p.vp_timestamp = (uint32_t) (tv.tv_sec * VCAP_USEC +
tv.tv_usec);
INIT_WORK(&dev->vp_work.work, vp_wq_fnc);
dev->vp_work.cd = c_data;
rc = queue_work(dev->vcap_wq, &dev->vp_work.work);
disable_irq_nosync(dev->vpirq->start);
return IRQ_HANDLED;
}
int vp_sw_reset(struct vcap_dev *dev)
{
int timeout;
writel_iowmb(0x00000010, VCAP_SW_RESET_REQ);
timeout = 10000;
while (1) {
if (!(readl_relaxed(VCAP_SW_RESET_STATUS) & 0x10))
break;
timeout--;
if (timeout == 0) {
/* This should not happen */
pr_err("VP is not resetting properly\n");
writel_iowmb(0x00000000, VCAP_SW_RESET_REQ);
return -EINVAL;
}
}
return 0;
}
void vp_stop_capture(struct vcap_client_data *c_data)
{
struct vcap_dev *dev = c_data->dev;
int rc;
dev->vp_shutdown = true;
flush_workqueue(dev->vcap_wq);
if (atomic_read(&dev->vp_enabled) == 1) {
rc = wait_event_interruptible_timeout(dev->vp_dummy_waitq,
!atomic_read(&dev->vp_enabled),
msecs_to_jiffies(50));
if (rc == 0 && atomic_read(&dev->vp_enabled) == 1) {
/* This should not happen, if it does hw is stuck */
disable_irq_nosync(dev->vpirq->start);
atomic_set(&dev->vp_enabled, 0);
pr_err("%s: VP Timeout and VP still running\n",
__func__);
}
}
vp_sw_reset(dev);
dev->vp_shutdown = false;
}
int config_vp_format(struct vcap_client_data *c_data)
{
struct vcap_dev *dev = c_data->dev;
int rc;
INIT_WORK(&dev->vp_to_vc_work.work, mov_buf_to_vc);
dev->vp_to_vc_work.cd = c_data;
/* SW restart VP */
rc = vp_sw_reset(dev);
if (rc < 0)
return rc;
/* Film Mode related settings */
writel_iowmb(0x00000000, VCAP_VP_FILM_PROJECTION_T0);
writel_relaxed(0x00000000, VCAP_VP_FILM_PROJECTION_T2);
writel_relaxed(0x00000000, VCAP_VP_FILM_PAST_MAX_PROJ);
writel_relaxed(0x00000000, VCAP_VP_FILM_PAST_MIN_PROJ);
writel_relaxed(0x00000000, VCAP_VP_FILM_SEQUENCE_HIST);
writel_relaxed(0x00000000, VCAP_VP_FILM_MODE_STATE);
writel_relaxed(0x00000000, VCAP_VP_BAL_VMOTION_STATE);
writel_relaxed(0x00000010, VCAP_VP_REDUCT_AVG_MOTION);
writel_relaxed(0x40000000, VCAP_VP_REDUCT_AVG_MOTION2);
writel_relaxed(0x40000000, VCAP_VP_NR_AVG_LUMA);
writel_relaxed(0x40000000, VCAP_VP_NR_AVG_CHROMA);
writel_relaxed(0x40000000, VCAP_VP_NR_CTRL_LUMA);
writel_relaxed(0x40000000, VCAP_VP_NR_CTRL_CHROMA);
writel_relaxed(0x00000000, VCAP_VP_BAL_AVG_BLEND);
writel_relaxed(0x00000000, VCAP_VP_VMOTION_HIST);
writel_relaxed(0x05047D19, VCAP_VP_FILM_ANALYSIS_CONFIG);
writel_relaxed(0x20260200, VCAP_VP_FILM_STATE_CONFIG);
writel_relaxed(0x23A60114, VCAP_VP_FVM_CONFIG);
writel_relaxed(0x03043210, VCAP_VP_FILM_ANALYSIS_CONFIG2);
writel_relaxed(0x04DB7A51, VCAP_VP_MIXED_ANALYSIS_CONFIG);
writel_relaxed(0x14224916, VCAP_VP_SPATIAL_CONFIG);
writel_relaxed(0x83270400, VCAP_VP_SPATIAL_CONFIG2);
writel_relaxed(0x0F000F92, VCAP_VP_SPATIAL_CONFIG3);
writel_relaxed(0x00000000, VCAP_VP_TEMPORAL_CONFIG);
writel_relaxed(0x00000000, VCAP_VP_PIXEL_DIFF_CONFIG);
writel_relaxed(0x0C090511, VCAP_VP_H_FREQ_CONFIG);
writel_relaxed(0x0A000000, VCAP_VP_NR_CONFIG);
writel_relaxed(0x008F4149, VCAP_VP_NR_LUMA_CONFIG);
writel_relaxed(0x008F4149, VCAP_VP_NR_CHROMA_CONFIG);
writel_relaxed(0x43C0FD0C, VCAP_VP_BAL_CONFIG);
writel_relaxed(0x00000255, VCAP_VP_BAL_MOTION_CONFIG);
writel_relaxed(0x24154252, VCAP_VP_BAL_LIGHT_COMB);
writel_relaxed(0x10024414, VCAP_VP_BAL_VMOTION_CONFIG);
writel_relaxed(0x00000002, VCAP_VP_NR_CONFIG2);
writel_relaxed((c_data->vp_out_fmt.height-1)<<16 |
(c_data->vp_out_fmt.width - 1), VCAP_VP_FRAME_SIZE);
writel_relaxed(0x00000000, VCAP_VP_SPLIT_SCRN_CTRL);
return 0;
}
int init_motion_buf(struct vcap_client_data *c_data)
{
int rc;
struct vcap_dev *dev = c_data->dev;
struct ion_handle *handle = NULL;
unsigned long paddr, len;
void *vaddr;
size_t size = ((c_data->vp_out_fmt.width + 63) >> 6) *
((c_data->vp_out_fmt.height + 7) >> 3) * 16;
if (c_data->vp_action.motionHandle) {
pr_err("Motion buffer has already been created");
return -ENOEXEC;
}
handle = ion_alloc(dev->ion_client, size, SZ_4K,
ION_HEAP(ION_CP_MM_HEAP_ID), 0);
if (IS_ERR_OR_NULL(handle)) {
pr_err("%s: ion_alloc failed\n", __func__);
return -ENOMEM;
}
vaddr = ion_map_kernel(dev->ion_client, handle);
if (IS_ERR(vaddr)) {
pr_err("%s: Map motion buffer failed\n", __func__);
ion_free(dev->ion_client, handle);
rc = -ENOMEM;
return rc;
}
memset(vaddr, 0, size);
ion_unmap_kernel(dev->ion_client, handle);
rc = ion_map_iommu(dev->ion_client, handle,
dev->domain_num, 0, SZ_4K, 0, &paddr, &len,
0, 0);
if (rc < 0) {
pr_err("%s: map_iommu failed\n", __func__);
ion_free(dev->ion_client, handle);
return rc;
}
c_data->vp_action.motionHandle = handle;
vaddr = NULL;
writel_iowmb(paddr, VCAP_VP_MOTION_EST_ADDR);
return 0;
}
void deinit_motion_buf(struct vcap_client_data *c_data)
{
struct vcap_dev *dev = c_data->dev;
if (!c_data->vp_action.motionHandle) {
pr_err("Motion buffer has not been created");
return;
}
writel_iowmb(0x00000000, VCAP_VP_MOTION_EST_ADDR);
ion_unmap_iommu(dev->ion_client, c_data->vp_action.motionHandle,
dev->domain_num, 0);
ion_free(dev->ion_client, c_data->vp_action.motionHandle);
c_data->vp_action.motionHandle = NULL;
return;
}
int init_nr_buf(struct vcap_client_data *c_data)
{
struct vcap_dev *dev = c_data->dev;
struct ion_handle *handle = NULL;
size_t frame_size, tot_size;
unsigned long paddr, len;
int rc;
if (c_data->vp_action.bufNR.nr_handle) {
pr_err("NR buffer has already been created");
return -ENOEXEC;
}
frame_size = c_data->vp_in_fmt.width * c_data->vp_in_fmt.height;
if (c_data->vp_in_fmt.pixfmt == V4L2_PIX_FMT_NV16)
tot_size = frame_size * 2;
else
tot_size = frame_size / 2 * 3;
handle = ion_alloc(dev->ion_client, tot_size, SZ_4K,
ION_HEAP(ION_CP_MM_HEAP_ID), 0);
if (IS_ERR_OR_NULL(handle)) {
pr_err("%s: ion_alloc failed\n", __func__);
return -ENOMEM;
}
rc = ion_map_iommu(dev->ion_client, handle,
dev->domain_num, 0, SZ_4K, 0, &paddr, &len,
0, 0);
if (rc < 0) {
pr_err("%s: map_iommu failed\n", __func__);
ion_free(dev->ion_client, handle);
return rc;
}
c_data->vp_action.bufNR.nr_handle = handle;
update_nr_value(dev);
c_data->vp_action.bufNR.paddr = paddr;
rc = readl_relaxed(VCAP_VP_NR_CONFIG2);
rc |= (((c_data->vp_out_fmt.width / 16) << 20) | 0x1);
writel_relaxed(rc, VCAP_VP_NR_CONFIG2);
writel_relaxed(paddr, VCAP_VP_NR_T2_Y_BASE_ADDR);
writel_relaxed(paddr + frame_size, VCAP_VP_NR_T2_C_BASE_ADDR);
c_data->vp_action.bufNR.nr_pos = NRT2_BUF;
return 0;
}
void deinit_nr_buf(struct vcap_client_data *c_data)
{
struct vcap_dev *dev = c_data->dev;
struct nr_buffer *buf;
uint32_t rc;
if (!c_data->vp_action.bufNR.nr_handle) {
pr_err("NR buffer has not been created");
return;
}
buf = &c_data->vp_action.bufNR;
rc = readl_relaxed(VCAP_VP_NR_CONFIG2);
rc &= !(0x0FF00001);
writel_relaxed(rc, VCAP_VP_NR_CONFIG2);
ion_unmap_iommu(dev->ion_client, buf->nr_handle, dev->domain_num, 0);
ion_free(dev->ion_client, buf->nr_handle);
buf->nr_handle = NULL;
buf->paddr = 0;
return;
}
int nr_s_param(struct vcap_client_data *c_data, struct nr_param *param)
{
if (param->mode != NR_MANUAL)
return 0;
/* Verify values in range */
if (param->window > VP_NR_MAX_WINDOW)
return -EINVAL;
if (param->luma.max_blend_ratio > VP_NR_MAX_RATIO)
return -EINVAL;
if (param->luma.scale_diff_ratio > VP_NR_MAX_RATIO)
return -EINVAL;
if (param->luma.diff_limit_ratio > VP_NR_MAX_RATIO)
return -EINVAL;
if (param->luma.scale_motion_ratio > VP_NR_MAX_RATIO)
return -EINVAL;
if (param->luma.blend_limit_ratio > VP_NR_MAX_RATIO)
return -EINVAL;
if (param->chroma.max_blend_ratio > VP_NR_MAX_RATIO)
return -EINVAL;
if (param->chroma.scale_diff_ratio > VP_NR_MAX_RATIO)
return -EINVAL;
if (param->chroma.diff_limit_ratio > VP_NR_MAX_RATIO)
return -EINVAL;
if (param->chroma.scale_motion_ratio > VP_NR_MAX_RATIO)
return -EINVAL;
if (param->chroma.blend_limit_ratio > VP_NR_MAX_RATIO)
return -EINVAL;
return 0;
}
void nr_g_param(struct vcap_client_data *c_data, struct nr_param *param)
{
struct vcap_dev *dev = c_data->dev;
uint32_t rc;
rc = readl_relaxed(VCAP_VP_NR_CONFIG);
param->window = BITS_VALUE(rc, 24, 4);
param->decay_ratio = BITS_VALUE(rc, 20, 3);
rc = readl_relaxed(VCAP_VP_NR_LUMA_CONFIG);
param->threshold = NR_THRESHOLD_STATIC;
if (BITS_VALUE(rc, 16, 1))
param->threshold = NR_THRESHOLD_DYNAMIC;
param->luma.max_blend_ratio = BITS_VALUE(rc, 24, 4);
param->luma.scale_diff_ratio = BITS_VALUE(rc, 12, 4);
param->luma.diff_limit_ratio = BITS_VALUE(rc, 8, 4);
param->luma.scale_motion_ratio = BITS_VALUE(rc, 4, 4);
param->luma.blend_limit_ratio = BITS_VALUE(rc, 0, 4);
rc = readl_relaxed(VCAP_VP_NR_CHROMA_CONFIG);
param->chroma.max_blend_ratio = BITS_VALUE(rc, 24, 4);
param->chroma.scale_diff_ratio = BITS_VALUE(rc, 12, 4);
param->chroma.diff_limit_ratio = BITS_VALUE(rc, 8, 4);
param->chroma.scale_motion_ratio = BITS_VALUE(rc, 4, 4);
param->chroma.blend_limit_ratio = BITS_VALUE(rc, 0, 4);
}
void s_default_nr_val(struct nr_param *param)
{
param->threshold = NR_THRESHOLD_STATIC;
param->window = 10;
param->decay_ratio = 0;
param->luma.max_blend_ratio = 0;
param->luma.scale_diff_ratio = 4;
param->luma.diff_limit_ratio = 1;
param->luma.scale_motion_ratio = 4;
param->luma.blend_limit_ratio = 9;
param->chroma.max_blend_ratio = 0;
param->chroma.scale_diff_ratio = 4;
param->chroma.diff_limit_ratio = 1;
param->chroma.scale_motion_ratio = 4;
param->chroma.blend_limit_ratio = 9;
}
int vp_dummy_event(struct vcap_client_data *c_data)
{
struct vcap_dev *dev = c_data->dev;
unsigned int width, height;
struct ion_handle *handle = NULL;
unsigned long paddr, len;
uint32_t reg;
int rc = 0;
pr_debug("%s: Start VP dummy event\n", __func__);
handle = ion_alloc(dev->ion_client, 0x1200, SZ_4K,
ION_HEAP(ION_CP_MM_HEAP_ID), 0);
if (IS_ERR_OR_NULL(handle)) {
pr_err("%s: ion_alloc failed\n", __func__);
return -ENOMEM;
}
rc = ion_map_iommu(dev->ion_client, handle,
dev->domain_num, 0, SZ_4K, 0, &paddr, &len,
0, 0);
if (rc < 0) {
pr_err("%s: map_iommu failed\n", __func__);
ion_free(dev->ion_client, handle);
return rc;
}
width = c_data->vp_out_fmt.width;
height = c_data->vp_out_fmt.height;
c_data->vp_out_fmt.width = 0x3F;
c_data->vp_out_fmt.height = 0x16;
config_vp_format(c_data);
writel_relaxed(paddr, VCAP_VP_T1_Y_BASE_ADDR);
writel_relaxed(paddr + 0x2C0, VCAP_VP_T1_C_BASE_ADDR);
writel_relaxed(paddr + 0x440, VCAP_VP_T2_Y_BASE_ADDR);
writel_relaxed(paddr + 0x700, VCAP_VP_T2_C_BASE_ADDR);
writel_relaxed(paddr + 0x880, VCAP_VP_OUT_Y_BASE_ADDR);
writel_relaxed(paddr + 0xB40, VCAP_VP_OUT_C_BASE_ADDR);
writel_iowmb(paddr + 0x1100, VCAP_VP_MOTION_EST_ADDR);
writel_relaxed(4 << 20 | 0x2 << 4, VCAP_VP_IN_CONFIG);
writel_relaxed(4 << 20 | 0x1 << 4, VCAP_VP_OUT_CONFIG);
dev->vp_dummy_event = true;
enable_irq(dev->vpirq->start);
writel_relaxed(0x01100101, VCAP_VP_INTERRUPT_ENABLE);
writel_iowmb(0x00000000, VCAP_VP_CTRL);
writel_iowmb(0x00010000, VCAP_VP_CTRL);
rc = wait_event_interruptible_timeout(dev->vp_dummy_waitq,
dev->vp_dummy_complete, msecs_to_jiffies(50));
if (!rc && !dev->vp_dummy_complete) {
pr_err("%s: VP dummy event timeout", __func__);
rc = -ETIME;
vp_sw_reset(dev);
dev->vp_dummy_complete = false;
}
writel_relaxed(0x00000000, VCAP_VP_INTERRUPT_ENABLE);
disable_irq(dev->vpirq->start);
dev->vp_dummy_event = false;
reg = readl_relaxed(VCAP_OFFSET(0x0D94));
writel_relaxed(reg, VCAP_OFFSET(0x0D9C));
c_data->vp_out_fmt.width = width;
c_data->vp_out_fmt.height = height;
ion_unmap_iommu(dev->ion_client, handle, dev->domain_num, 0);
ion_free(dev->ion_client, handle);
pr_debug("%s: Exit VP dummy event\n", __func__);
return rc;
}
int kickoff_vp(struct vcap_client_data *c_data)
{
struct vcap_dev *dev;
struct vp_action *vp_act;
struct timeval tv;
unsigned long flags = 0;
unsigned int chroma_fmt = 0;
int size;
bool top_field = 0;
if (!c_data->streaming)
return -ENOEXEC;
dev = c_data->dev;
pr_debug("Start VP Kickoff\n");
if (dev->vp_client == NULL) {
pr_err("No active vp client\n");
return -ENODEV;
}
vp_act = &dev->vp_client->vp_action;
spin_lock_irqsave(&dev->vp_client->cap_slock, flags);
if (list_empty(&vp_act->in_active)) {
spin_unlock_irqrestore(&dev->vp_client->cap_slock, flags);
pr_err("%s: VP We have no more input buffers\n",
__func__);
return -EAGAIN;
}
vp_act->bufT1 = list_entry(vp_act->in_active.next,
struct vcap_buffer, list);
list_del(&vp_act->bufT1->list);
if (list_empty(&vp_act->in_active)) {
spin_unlock_irqrestore(&dev->vp_client->cap_slock, flags);
list_add(&vp_act->bufT1->list, &vp_act->in_active);
pr_err("%s: VP We have no more input buffers\n",
__func__);
return -EAGAIN;
}
vp_act->bufT2 = list_entry(vp_act->in_active.next,
struct vcap_buffer, list);
list_del(&vp_act->bufT2->list);
if (list_empty(&vp_act->out_active)) {
spin_unlock_irqrestore(&dev->vp_client->cap_slock, flags);
list_add(&vp_act->bufT2->list, &vp_act->in_active);
list_add(&vp_act->bufT1->list, &vp_act->in_active);
pr_err("%s: VP We have no more output buffers\n",
__func__);
return -EAGAIN;
}
vp_act->bufOut = list_entry(vp_act->out_active.next,
struct vcap_buffer, list);
list_del(&vp_act->bufOut->list);
spin_unlock_irqrestore(&dev->vp_client->cap_slock, flags);
size = c_data->vp_in_fmt.height * c_data->vp_in_fmt.width;
writel_relaxed(vp_act->bufT1->paddr, VCAP_VP_T1_Y_BASE_ADDR);
writel_relaxed(vp_act->bufT1->paddr + size, VCAP_VP_T1_C_BASE_ADDR);
config_in_buffer(c_data, vp_act->bufT2);
config_out_buffer(c_data, vp_act->bufOut);
/* Config VP */
if (c_data->vp_in_fmt.pixfmt == V4L2_PIX_FMT_NV16)
chroma_fmt = 1;
writel_relaxed((c_data->vp_in_fmt.width / 16) << 20 |
chroma_fmt << 11 | 0x2 << 4, VCAP_VP_IN_CONFIG);
chroma_fmt = 0;
if (c_data->vp_out_fmt.pixfmt == V4L2_PIX_FMT_NV16)
chroma_fmt = 1;
writel_relaxed((c_data->vp_out_fmt.width / 16) << 20 |
chroma_fmt << 11 | 0x1 << 4, VCAP_VP_OUT_CONFIG);
/* Enable Interrupt */
if (vp_act->bufT2->vb.v4l2_buf.field == V4L2_FIELD_BOTTOM)
top_field = 1;
vp_act->vp_state = VP_FRAME2;
writel_relaxed(0x01100001, VCAP_VP_INTERRUPT_ENABLE);
writel_iowmb(0x00000000 | top_field, VCAP_VP_CTRL);
writel_iowmb(0x00010000 | top_field, VCAP_VP_CTRL);
atomic_set(&c_data->dev->vp_enabled, 1);
enable_irq(dev->vpirq->start);
do_gettimeofday(&tv);
dev->dbg_p.vp_timestamp = (uint32_t) (tv.tv_sec * VCAP_USEC +
tv.tv_usec);
return 0;
}
int continue_vp(struct vcap_client_data *c_data)
{
struct vcap_dev *dev;
struct vp_action *vp_act;
struct timeval tv;
int rc;
bool top_field = 0;
pr_debug("Start VP Continue\n");
dev = c_data->dev;
if (dev->vp_client == NULL) {
pr_err("No active vp client\n");
return -ENODEV;
}
vp_act = &dev->vp_client->vp_action;
if (vp_act->vp_state == VP_UNKNOWN) {
pr_err("%s: VP is in an unknown state\n",
__func__);
return -EAGAIN;
}
rc = vp_setup_buffers(c_data);
if (rc < 0)
return rc;
if (vp_act->bufT2->vb.v4l2_buf.field == V4L2_FIELD_BOTTOM)
top_field = 1;
/* Config VP & Enable Interrupt */
writel_relaxed(0x01100001, VCAP_VP_INTERRUPT_ENABLE);
writel_iowmb(0x00000000 | top_field, VCAP_VP_CTRL);
writel_iowmb(0x00010000 | top_field, VCAP_VP_CTRL);
atomic_set(&c_data->dev->vp_enabled, 1);
enable_irq(dev->vpirq->start);
do_gettimeofday(&tv);
dev->dbg_p.vp_timestamp = (uint32_t) (tv.tv_sec * VCAP_USEC +
tv.tv_usec);
return 0;
}
| gpl-2.0 |
sim0629/linux-openwrt | drivers/media/pci/pt1/pt1.c | 2259 | 25790 | /*
* driver for Earthsoft PT1/PT2
*
* Copyright (C) 2009 HIRANO Takahito <hiranotaka@zng.info>
*
* based on pt1dvr - http://pt1dvr.sourceforge.jp/
* by Tomoaki Ishikawa <tomy@users.sourceforge.jp>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/pci.h>
#include <linux/kthread.h>
#include <linux/freezer.h>
#include <linux/ratelimit.h>
#include "dvbdev.h"
#include "dvb_demux.h"
#include "dmxdev.h"
#include "dvb_net.h"
#include "dvb_frontend.h"
#include "va1j5jf8007t.h"
#include "va1j5jf8007s.h"
#define DRIVER_NAME "earth-pt1"
#define PT1_PAGE_SHIFT 12
#define PT1_PAGE_SIZE (1 << PT1_PAGE_SHIFT)
#define PT1_NR_UPACKETS 1024
#define PT1_NR_BUFS 511
struct pt1_buffer_page {
__le32 upackets[PT1_NR_UPACKETS];
};
struct pt1_table_page {
__le32 next_pfn;
__le32 buf_pfns[PT1_NR_BUFS];
};
struct pt1_buffer {
struct pt1_buffer_page *page;
dma_addr_t addr;
};
struct pt1_table {
struct pt1_table_page *page;
dma_addr_t addr;
struct pt1_buffer bufs[PT1_NR_BUFS];
};
#define PT1_NR_ADAPS 4
struct pt1_adapter;
struct pt1 {
struct pci_dev *pdev;
void __iomem *regs;
struct i2c_adapter i2c_adap;
int i2c_running;
struct pt1_adapter *adaps[PT1_NR_ADAPS];
struct pt1_table *tables;
struct task_struct *kthread;
int table_index;
int buf_index;
struct mutex lock;
int power;
int reset;
};
struct pt1_adapter {
struct pt1 *pt1;
int index;
u8 *buf;
int upacket_count;
int packet_count;
int st_count;
struct dvb_adapter adap;
struct dvb_demux demux;
int users;
struct dmxdev dmxdev;
struct dvb_frontend *fe;
int (*orig_set_voltage)(struct dvb_frontend *fe,
fe_sec_voltage_t voltage);
int (*orig_sleep)(struct dvb_frontend *fe);
int (*orig_init)(struct dvb_frontend *fe);
fe_sec_voltage_t voltage;
int sleep;
};
#define pt1_printk(level, pt1, format, arg...) \
dev_printk(level, &(pt1)->pdev->dev, format, ##arg)
static void pt1_write_reg(struct pt1 *pt1, int reg, u32 data)
{
writel(data, pt1->regs + reg * 4);
}
static u32 pt1_read_reg(struct pt1 *pt1, int reg)
{
return readl(pt1->regs + reg * 4);
}
static int pt1_nr_tables = 8;
module_param_named(nr_tables, pt1_nr_tables, int, 0);
static void pt1_increment_table_count(struct pt1 *pt1)
{
pt1_write_reg(pt1, 0, 0x00000020);
}
static void pt1_init_table_count(struct pt1 *pt1)
{
pt1_write_reg(pt1, 0, 0x00000010);
}
static void pt1_register_tables(struct pt1 *pt1, u32 first_pfn)
{
pt1_write_reg(pt1, 5, first_pfn);
pt1_write_reg(pt1, 0, 0x0c000040);
}
static void pt1_unregister_tables(struct pt1 *pt1)
{
pt1_write_reg(pt1, 0, 0x08080000);
}
static int pt1_sync(struct pt1 *pt1)
{
int i;
for (i = 0; i < 57; i++) {
if (pt1_read_reg(pt1, 0) & 0x20000000)
return 0;
pt1_write_reg(pt1, 0, 0x00000008);
}
pt1_printk(KERN_ERR, pt1, "could not sync\n");
return -EIO;
}
static u64 pt1_identify(struct pt1 *pt1)
{
int i;
u64 id;
id = 0;
for (i = 0; i < 57; i++) {
id |= (u64)(pt1_read_reg(pt1, 0) >> 30 & 1) << i;
pt1_write_reg(pt1, 0, 0x00000008);
}
return id;
}
static int pt1_unlock(struct pt1 *pt1)
{
int i;
pt1_write_reg(pt1, 0, 0x00000008);
for (i = 0; i < 3; i++) {
if (pt1_read_reg(pt1, 0) & 0x80000000)
return 0;
schedule_timeout_uninterruptible((HZ + 999) / 1000);
}
pt1_printk(KERN_ERR, pt1, "could not unlock\n");
return -EIO;
}
static int pt1_reset_pci(struct pt1 *pt1)
{
int i;
pt1_write_reg(pt1, 0, 0x01010000);
pt1_write_reg(pt1, 0, 0x01000000);
for (i = 0; i < 10; i++) {
if (pt1_read_reg(pt1, 0) & 0x00000001)
return 0;
schedule_timeout_uninterruptible((HZ + 999) / 1000);
}
pt1_printk(KERN_ERR, pt1, "could not reset PCI\n");
return -EIO;
}
static int pt1_reset_ram(struct pt1 *pt1)
{
int i;
pt1_write_reg(pt1, 0, 0x02020000);
pt1_write_reg(pt1, 0, 0x02000000);
for (i = 0; i < 10; i++) {
if (pt1_read_reg(pt1, 0) & 0x00000002)
return 0;
schedule_timeout_uninterruptible((HZ + 999) / 1000);
}
pt1_printk(KERN_ERR, pt1, "could not reset RAM\n");
return -EIO;
}
static int pt1_do_enable_ram(struct pt1 *pt1)
{
int i, j;
u32 status;
status = pt1_read_reg(pt1, 0) & 0x00000004;
pt1_write_reg(pt1, 0, 0x00000002);
for (i = 0; i < 10; i++) {
for (j = 0; j < 1024; j++) {
if ((pt1_read_reg(pt1, 0) & 0x00000004) != status)
return 0;
}
schedule_timeout_uninterruptible((HZ + 999) / 1000);
}
pt1_printk(KERN_ERR, pt1, "could not enable RAM\n");
return -EIO;
}
static int pt1_enable_ram(struct pt1 *pt1)
{
int i, ret;
int phase;
schedule_timeout_uninterruptible((HZ + 999) / 1000);
phase = pt1->pdev->device == 0x211a ? 128 : 166;
for (i = 0; i < phase; i++) {
ret = pt1_do_enable_ram(pt1);
if (ret < 0)
return ret;
}
return 0;
}
static void pt1_disable_ram(struct pt1 *pt1)
{
pt1_write_reg(pt1, 0, 0x0b0b0000);
}
static void pt1_set_stream(struct pt1 *pt1, int index, int enabled)
{
pt1_write_reg(pt1, 2, 1 << (index + 8) | enabled << index);
}
static void pt1_init_streams(struct pt1 *pt1)
{
int i;
for (i = 0; i < PT1_NR_ADAPS; i++)
pt1_set_stream(pt1, i, 0);
}
static int pt1_filter(struct pt1 *pt1, struct pt1_buffer_page *page)
{
u32 upacket;
int i;
int index;
struct pt1_adapter *adap;
int offset;
u8 *buf;
int sc;
if (!page->upackets[PT1_NR_UPACKETS - 1])
return 0;
for (i = 0; i < PT1_NR_UPACKETS; i++) {
upacket = le32_to_cpu(page->upackets[i]);
index = (upacket >> 29) - 1;
if (index < 0 || index >= PT1_NR_ADAPS)
continue;
adap = pt1->adaps[index];
if (upacket >> 25 & 1)
adap->upacket_count = 0;
else if (!adap->upacket_count)
continue;
if (upacket >> 24 & 1)
printk_ratelimited(KERN_INFO "earth-pt1: device "
"buffer overflowing. table[%d] buf[%d]\n",
pt1->table_index, pt1->buf_index);
sc = upacket >> 26 & 0x7;
if (adap->st_count != -1 && sc != ((adap->st_count + 1) & 0x7))
printk_ratelimited(KERN_INFO "earth-pt1: data loss"
" in streamID(adapter)[%d]\n", index);
adap->st_count = sc;
buf = adap->buf;
offset = adap->packet_count * 188 + adap->upacket_count * 3;
buf[offset] = upacket >> 16;
buf[offset + 1] = upacket >> 8;
if (adap->upacket_count != 62)
buf[offset + 2] = upacket;
if (++adap->upacket_count >= 63) {
adap->upacket_count = 0;
if (++adap->packet_count >= 21) {
dvb_dmx_swfilter_packets(&adap->demux, buf, 21);
adap->packet_count = 0;
}
}
}
page->upackets[PT1_NR_UPACKETS - 1] = 0;
return 1;
}
static int pt1_thread(void *data)
{
struct pt1 *pt1;
struct pt1_buffer_page *page;
pt1 = data;
set_freezable();
while (!kthread_should_stop()) {
try_to_freeze();
page = pt1->tables[pt1->table_index].bufs[pt1->buf_index].page;
if (!pt1_filter(pt1, page)) {
schedule_timeout_interruptible((HZ + 999) / 1000);
continue;
}
if (++pt1->buf_index >= PT1_NR_BUFS) {
pt1_increment_table_count(pt1);
pt1->buf_index = 0;
if (++pt1->table_index >= pt1_nr_tables)
pt1->table_index = 0;
}
}
return 0;
}
static void pt1_free_page(struct pt1 *pt1, void *page, dma_addr_t addr)
{
dma_free_coherent(&pt1->pdev->dev, PT1_PAGE_SIZE, page, addr);
}
static void *pt1_alloc_page(struct pt1 *pt1, dma_addr_t *addrp, u32 *pfnp)
{
void *page;
dma_addr_t addr;
page = dma_alloc_coherent(&pt1->pdev->dev, PT1_PAGE_SIZE, &addr,
GFP_KERNEL);
if (page == NULL)
return NULL;
BUG_ON(addr & (PT1_PAGE_SIZE - 1));
BUG_ON(addr >> PT1_PAGE_SHIFT >> 31 >> 1);
*addrp = addr;
*pfnp = addr >> PT1_PAGE_SHIFT;
return page;
}
static void pt1_cleanup_buffer(struct pt1 *pt1, struct pt1_buffer *buf)
{
pt1_free_page(pt1, buf->page, buf->addr);
}
static int
pt1_init_buffer(struct pt1 *pt1, struct pt1_buffer *buf, u32 *pfnp)
{
struct pt1_buffer_page *page;
dma_addr_t addr;
page = pt1_alloc_page(pt1, &addr, pfnp);
if (page == NULL)
return -ENOMEM;
page->upackets[PT1_NR_UPACKETS - 1] = 0;
buf->page = page;
buf->addr = addr;
return 0;
}
static void pt1_cleanup_table(struct pt1 *pt1, struct pt1_table *table)
{
int i;
for (i = 0; i < PT1_NR_BUFS; i++)
pt1_cleanup_buffer(pt1, &table->bufs[i]);
pt1_free_page(pt1, table->page, table->addr);
}
static int
pt1_init_table(struct pt1 *pt1, struct pt1_table *table, u32 *pfnp)
{
struct pt1_table_page *page;
dma_addr_t addr;
int i, ret;
u32 buf_pfn;
page = pt1_alloc_page(pt1, &addr, pfnp);
if (page == NULL)
return -ENOMEM;
for (i = 0; i < PT1_NR_BUFS; i++) {
ret = pt1_init_buffer(pt1, &table->bufs[i], &buf_pfn);
if (ret < 0)
goto err;
page->buf_pfns[i] = cpu_to_le32(buf_pfn);
}
pt1_increment_table_count(pt1);
table->page = page;
table->addr = addr;
return 0;
err:
while (i--)
pt1_cleanup_buffer(pt1, &table->bufs[i]);
pt1_free_page(pt1, page, addr);
return ret;
}
static void pt1_cleanup_tables(struct pt1 *pt1)
{
struct pt1_table *tables;
int i;
tables = pt1->tables;
pt1_unregister_tables(pt1);
for (i = 0; i < pt1_nr_tables; i++)
pt1_cleanup_table(pt1, &tables[i]);
vfree(tables);
}
static int pt1_init_tables(struct pt1 *pt1)
{
struct pt1_table *tables;
int i, ret;
u32 first_pfn, pfn;
tables = vmalloc(sizeof(struct pt1_table) * pt1_nr_tables);
if (tables == NULL)
return -ENOMEM;
pt1_init_table_count(pt1);
i = 0;
if (pt1_nr_tables) {
ret = pt1_init_table(pt1, &tables[0], &first_pfn);
if (ret)
goto err;
i++;
}
while (i < pt1_nr_tables) {
ret = pt1_init_table(pt1, &tables[i], &pfn);
if (ret)
goto err;
tables[i - 1].page->next_pfn = cpu_to_le32(pfn);
i++;
}
tables[pt1_nr_tables - 1].page->next_pfn = cpu_to_le32(first_pfn);
pt1_register_tables(pt1, first_pfn);
pt1->tables = tables;
return 0;
err:
while (i--)
pt1_cleanup_table(pt1, &tables[i]);
vfree(tables);
return ret;
}
static int pt1_start_polling(struct pt1 *pt1)
{
int ret = 0;
mutex_lock(&pt1->lock);
if (!pt1->kthread) {
pt1->kthread = kthread_run(pt1_thread, pt1, "earth-pt1");
if (IS_ERR(pt1->kthread)) {
ret = PTR_ERR(pt1->kthread);
pt1->kthread = NULL;
}
}
mutex_unlock(&pt1->lock);
return ret;
}
static int pt1_start_feed(struct dvb_demux_feed *feed)
{
struct pt1_adapter *adap;
adap = container_of(feed->demux, struct pt1_adapter, demux);
if (!adap->users++) {
int ret;
ret = pt1_start_polling(adap->pt1);
if (ret)
return ret;
pt1_set_stream(adap->pt1, adap->index, 1);
}
return 0;
}
static void pt1_stop_polling(struct pt1 *pt1)
{
int i, count;
mutex_lock(&pt1->lock);
for (i = 0, count = 0; i < PT1_NR_ADAPS; i++)
count += pt1->adaps[i]->users;
if (count == 0 && pt1->kthread) {
kthread_stop(pt1->kthread);
pt1->kthread = NULL;
}
mutex_unlock(&pt1->lock);
}
static int pt1_stop_feed(struct dvb_demux_feed *feed)
{
struct pt1_adapter *adap;
adap = container_of(feed->demux, struct pt1_adapter, demux);
if (!--adap->users) {
pt1_set_stream(adap->pt1, adap->index, 0);
pt1_stop_polling(adap->pt1);
}
return 0;
}
static void
pt1_update_power(struct pt1 *pt1)
{
int bits;
int i;
struct pt1_adapter *adap;
static const int sleep_bits[] = {
1 << 4,
1 << 6 | 1 << 7,
1 << 5,
1 << 6 | 1 << 8,
};
bits = pt1->power | !pt1->reset << 3;
mutex_lock(&pt1->lock);
for (i = 0; i < PT1_NR_ADAPS; i++) {
adap = pt1->adaps[i];
switch (adap->voltage) {
case SEC_VOLTAGE_13: /* actually 11V */
bits |= 1 << 1;
break;
case SEC_VOLTAGE_18: /* actually 15V */
bits |= 1 << 1 | 1 << 2;
break;
default:
break;
}
/* XXX: The bits should be changed depending on adap->sleep. */
bits |= sleep_bits[i];
}
pt1_write_reg(pt1, 1, bits);
mutex_unlock(&pt1->lock);
}
static int pt1_set_voltage(struct dvb_frontend *fe, fe_sec_voltage_t voltage)
{
struct pt1_adapter *adap;
adap = container_of(fe->dvb, struct pt1_adapter, adap);
adap->voltage = voltage;
pt1_update_power(adap->pt1);
if (adap->orig_set_voltage)
return adap->orig_set_voltage(fe, voltage);
else
return 0;
}
static int pt1_sleep(struct dvb_frontend *fe)
{
struct pt1_adapter *adap;
adap = container_of(fe->dvb, struct pt1_adapter, adap);
adap->sleep = 1;
pt1_update_power(adap->pt1);
if (adap->orig_sleep)
return adap->orig_sleep(fe);
else
return 0;
}
static int pt1_wakeup(struct dvb_frontend *fe)
{
struct pt1_adapter *adap;
adap = container_of(fe->dvb, struct pt1_adapter, adap);
adap->sleep = 0;
pt1_update_power(adap->pt1);
schedule_timeout_uninterruptible((HZ + 999) / 1000);
if (adap->orig_init)
return adap->orig_init(fe);
else
return 0;
}
static void pt1_free_adapter(struct pt1_adapter *adap)
{
adap->demux.dmx.close(&adap->demux.dmx);
dvb_dmxdev_release(&adap->dmxdev);
dvb_dmx_release(&adap->demux);
dvb_unregister_adapter(&adap->adap);
free_page((unsigned long)adap->buf);
kfree(adap);
}
DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
static struct pt1_adapter *
pt1_alloc_adapter(struct pt1 *pt1)
{
struct pt1_adapter *adap;
void *buf;
struct dvb_adapter *dvb_adap;
struct dvb_demux *demux;
struct dmxdev *dmxdev;
int ret;
adap = kzalloc(sizeof(struct pt1_adapter), GFP_KERNEL);
if (!adap) {
ret = -ENOMEM;
goto err;
}
adap->pt1 = pt1;
adap->voltage = SEC_VOLTAGE_OFF;
adap->sleep = 1;
buf = (u8 *)__get_free_page(GFP_KERNEL);
if (!buf) {
ret = -ENOMEM;
goto err_kfree;
}
adap->buf = buf;
adap->upacket_count = 0;
adap->packet_count = 0;
adap->st_count = -1;
dvb_adap = &adap->adap;
dvb_adap->priv = adap;
ret = dvb_register_adapter(dvb_adap, DRIVER_NAME, THIS_MODULE,
&pt1->pdev->dev, adapter_nr);
if (ret < 0)
goto err_free_page;
demux = &adap->demux;
demux->dmx.capabilities = DMX_TS_FILTERING | DMX_SECTION_FILTERING;
demux->priv = adap;
demux->feednum = 256;
demux->filternum = 256;
demux->start_feed = pt1_start_feed;
demux->stop_feed = pt1_stop_feed;
demux->write_to_decoder = NULL;
ret = dvb_dmx_init(demux);
if (ret < 0)
goto err_unregister_adapter;
dmxdev = &adap->dmxdev;
dmxdev->filternum = 256;
dmxdev->demux = &demux->dmx;
dmxdev->capabilities = 0;
ret = dvb_dmxdev_init(dmxdev, dvb_adap);
if (ret < 0)
goto err_dmx_release;
return adap;
err_dmx_release:
dvb_dmx_release(demux);
err_unregister_adapter:
dvb_unregister_adapter(dvb_adap);
err_free_page:
free_page((unsigned long)buf);
err_kfree:
kfree(adap);
err:
return ERR_PTR(ret);
}
static void pt1_cleanup_adapters(struct pt1 *pt1)
{
int i;
for (i = 0; i < PT1_NR_ADAPS; i++)
pt1_free_adapter(pt1->adaps[i]);
}
static int pt1_init_adapters(struct pt1 *pt1)
{
int i;
struct pt1_adapter *adap;
int ret;
for (i = 0; i < PT1_NR_ADAPS; i++) {
adap = pt1_alloc_adapter(pt1);
if (IS_ERR(adap)) {
ret = PTR_ERR(adap);
goto err;
}
adap->index = i;
pt1->adaps[i] = adap;
}
return 0;
err:
while (i--)
pt1_free_adapter(pt1->adaps[i]);
return ret;
}
static void pt1_cleanup_frontend(struct pt1_adapter *adap)
{
dvb_unregister_frontend(adap->fe);
}
static int pt1_init_frontend(struct pt1_adapter *adap, struct dvb_frontend *fe)
{
int ret;
adap->orig_set_voltage = fe->ops.set_voltage;
adap->orig_sleep = fe->ops.sleep;
adap->orig_init = fe->ops.init;
fe->ops.set_voltage = pt1_set_voltage;
fe->ops.sleep = pt1_sleep;
fe->ops.init = pt1_wakeup;
ret = dvb_register_frontend(&adap->adap, fe);
if (ret < 0)
return ret;
adap->fe = fe;
return 0;
}
static void pt1_cleanup_frontends(struct pt1 *pt1)
{
int i;
for (i = 0; i < PT1_NR_ADAPS; i++)
pt1_cleanup_frontend(pt1->adaps[i]);
}
struct pt1_config {
struct va1j5jf8007s_config va1j5jf8007s_config;
struct va1j5jf8007t_config va1j5jf8007t_config;
};
static const struct pt1_config pt1_configs[2] = {
{
{
.demod_address = 0x1b,
.frequency = VA1J5JF8007S_20MHZ,
},
{
.demod_address = 0x1a,
.frequency = VA1J5JF8007T_20MHZ,
},
}, {
{
.demod_address = 0x19,
.frequency = VA1J5JF8007S_20MHZ,
},
{
.demod_address = 0x18,
.frequency = VA1J5JF8007T_20MHZ,
},
},
};
static const struct pt1_config pt2_configs[2] = {
{
{
.demod_address = 0x1b,
.frequency = VA1J5JF8007S_25MHZ,
},
{
.demod_address = 0x1a,
.frequency = VA1J5JF8007T_25MHZ,
},
}, {
{
.demod_address = 0x19,
.frequency = VA1J5JF8007S_25MHZ,
},
{
.demod_address = 0x18,
.frequency = VA1J5JF8007T_25MHZ,
},
},
};
static int pt1_init_frontends(struct pt1 *pt1)
{
int i, j;
struct i2c_adapter *i2c_adap;
const struct pt1_config *configs, *config;
struct dvb_frontend *fe[4];
int ret;
i = 0;
j = 0;
i2c_adap = &pt1->i2c_adap;
configs = pt1->pdev->device == 0x211a ? pt1_configs : pt2_configs;
do {
config = &configs[i / 2];
fe[i] = va1j5jf8007s_attach(&config->va1j5jf8007s_config,
i2c_adap);
if (!fe[i]) {
ret = -ENODEV; /* This does not sound nice... */
goto err;
}
i++;
fe[i] = va1j5jf8007t_attach(&config->va1j5jf8007t_config,
i2c_adap);
if (!fe[i]) {
ret = -ENODEV;
goto err;
}
i++;
ret = va1j5jf8007s_prepare(fe[i - 2]);
if (ret < 0)
goto err;
ret = va1j5jf8007t_prepare(fe[i - 1]);
if (ret < 0)
goto err;
} while (i < 4);
do {
ret = pt1_init_frontend(pt1->adaps[j], fe[j]);
if (ret < 0)
goto err;
} while (++j < 4);
return 0;
err:
while (i-- > j)
fe[i]->ops.release(fe[i]);
while (j--)
dvb_unregister_frontend(fe[j]);
return ret;
}
static void pt1_i2c_emit(struct pt1 *pt1, int addr, int busy, int read_enable,
int clock, int data, int next_addr)
{
pt1_write_reg(pt1, 4, addr << 18 | busy << 13 | read_enable << 12 |
!clock << 11 | !data << 10 | next_addr);
}
static void pt1_i2c_write_bit(struct pt1 *pt1, int addr, int *addrp, int data)
{
pt1_i2c_emit(pt1, addr, 1, 0, 0, data, addr + 1);
pt1_i2c_emit(pt1, addr + 1, 1, 0, 1, data, addr + 2);
pt1_i2c_emit(pt1, addr + 2, 1, 0, 0, data, addr + 3);
*addrp = addr + 3;
}
static void pt1_i2c_read_bit(struct pt1 *pt1, int addr, int *addrp)
{
pt1_i2c_emit(pt1, addr, 1, 0, 0, 1, addr + 1);
pt1_i2c_emit(pt1, addr + 1, 1, 0, 1, 1, addr + 2);
pt1_i2c_emit(pt1, addr + 2, 1, 1, 1, 1, addr + 3);
pt1_i2c_emit(pt1, addr + 3, 1, 0, 0, 1, addr + 4);
*addrp = addr + 4;
}
static void pt1_i2c_write_byte(struct pt1 *pt1, int addr, int *addrp, int data)
{
int i;
for (i = 0; i < 8; i++)
pt1_i2c_write_bit(pt1, addr, &addr, data >> (7 - i) & 1);
pt1_i2c_write_bit(pt1, addr, &addr, 1);
*addrp = addr;
}
static void pt1_i2c_read_byte(struct pt1 *pt1, int addr, int *addrp, int last)
{
int i;
for (i = 0; i < 8; i++)
pt1_i2c_read_bit(pt1, addr, &addr);
pt1_i2c_write_bit(pt1, addr, &addr, last);
*addrp = addr;
}
static void pt1_i2c_prepare(struct pt1 *pt1, int addr, int *addrp)
{
pt1_i2c_emit(pt1, addr, 1, 0, 1, 1, addr + 1);
pt1_i2c_emit(pt1, addr + 1, 1, 0, 1, 0, addr + 2);
pt1_i2c_emit(pt1, addr + 2, 1, 0, 0, 0, addr + 3);
*addrp = addr + 3;
}
static void
pt1_i2c_write_msg(struct pt1 *pt1, int addr, int *addrp, struct i2c_msg *msg)
{
int i;
pt1_i2c_prepare(pt1, addr, &addr);
pt1_i2c_write_byte(pt1, addr, &addr, msg->addr << 1);
for (i = 0; i < msg->len; i++)
pt1_i2c_write_byte(pt1, addr, &addr, msg->buf[i]);
*addrp = addr;
}
static void
pt1_i2c_read_msg(struct pt1 *pt1, int addr, int *addrp, struct i2c_msg *msg)
{
int i;
pt1_i2c_prepare(pt1, addr, &addr);
pt1_i2c_write_byte(pt1, addr, &addr, msg->addr << 1 | 1);
for (i = 0; i < msg->len; i++)
pt1_i2c_read_byte(pt1, addr, &addr, i == msg->len - 1);
*addrp = addr;
}
static int pt1_i2c_end(struct pt1 *pt1, int addr)
{
pt1_i2c_emit(pt1, addr, 1, 0, 0, 0, addr + 1);
pt1_i2c_emit(pt1, addr + 1, 1, 0, 1, 0, addr + 2);
pt1_i2c_emit(pt1, addr + 2, 1, 0, 1, 1, 0);
pt1_write_reg(pt1, 0, 0x00000004);
do {
if (signal_pending(current))
return -EINTR;
schedule_timeout_interruptible((HZ + 999) / 1000);
} while (pt1_read_reg(pt1, 0) & 0x00000080);
return 0;
}
static void pt1_i2c_begin(struct pt1 *pt1, int *addrp)
{
int addr;
addr = 0;
pt1_i2c_emit(pt1, addr, 0, 0, 1, 1, addr /* itself */);
addr = addr + 1;
if (!pt1->i2c_running) {
pt1_i2c_emit(pt1, addr, 1, 0, 1, 1, addr + 1);
pt1_i2c_emit(pt1, addr + 1, 1, 0, 1, 0, addr + 2);
addr = addr + 2;
pt1->i2c_running = 1;
}
*addrp = addr;
}
static int pt1_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
{
struct pt1 *pt1;
int i;
struct i2c_msg *msg, *next_msg;
int addr, ret;
u16 len;
u32 word;
pt1 = i2c_get_adapdata(adap);
for (i = 0; i < num; i++) {
msg = &msgs[i];
if (msg->flags & I2C_M_RD)
return -ENOTSUPP;
if (i + 1 < num)
next_msg = &msgs[i + 1];
else
next_msg = NULL;
if (next_msg && next_msg->flags & I2C_M_RD) {
i++;
len = next_msg->len;
if (len > 4)
return -ENOTSUPP;
pt1_i2c_begin(pt1, &addr);
pt1_i2c_write_msg(pt1, addr, &addr, msg);
pt1_i2c_read_msg(pt1, addr, &addr, next_msg);
ret = pt1_i2c_end(pt1, addr);
if (ret < 0)
return ret;
word = pt1_read_reg(pt1, 2);
while (len--) {
next_msg->buf[len] = word;
word >>= 8;
}
} else {
pt1_i2c_begin(pt1, &addr);
pt1_i2c_write_msg(pt1, addr, &addr, msg);
ret = pt1_i2c_end(pt1, addr);
if (ret < 0)
return ret;
}
}
return num;
}
static u32 pt1_i2c_func(struct i2c_adapter *adap)
{
return I2C_FUNC_I2C;
}
static const struct i2c_algorithm pt1_i2c_algo = {
.master_xfer = pt1_i2c_xfer,
.functionality = pt1_i2c_func,
};
static void pt1_i2c_wait(struct pt1 *pt1)
{
int i;
for (i = 0; i < 128; i++)
pt1_i2c_emit(pt1, 0, 0, 0, 1, 1, 0);
}
static void pt1_i2c_init(struct pt1 *pt1)
{
int i;
for (i = 0; i < 1024; i++)
pt1_i2c_emit(pt1, i, 0, 0, 1, 1, 0);
}
static void pt1_remove(struct pci_dev *pdev)
{
struct pt1 *pt1;
void __iomem *regs;
pt1 = pci_get_drvdata(pdev);
regs = pt1->regs;
if (pt1->kthread)
kthread_stop(pt1->kthread);
pt1_cleanup_tables(pt1);
pt1_cleanup_frontends(pt1);
pt1_disable_ram(pt1);
pt1->power = 0;
pt1->reset = 1;
pt1_update_power(pt1);
pt1_cleanup_adapters(pt1);
i2c_del_adapter(&pt1->i2c_adap);
pci_set_drvdata(pdev, NULL);
kfree(pt1);
pci_iounmap(pdev, regs);
pci_release_regions(pdev);
pci_disable_device(pdev);
}
static int pt1_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
int ret;
void __iomem *regs;
struct pt1 *pt1;
struct i2c_adapter *i2c_adap;
ret = pci_enable_device(pdev);
if (ret < 0)
goto err;
ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (ret < 0)
goto err_pci_disable_device;
pci_set_master(pdev);
ret = pci_request_regions(pdev, DRIVER_NAME);
if (ret < 0)
goto err_pci_disable_device;
regs = pci_iomap(pdev, 0, 0);
if (!regs) {
ret = -EIO;
goto err_pci_release_regions;
}
pt1 = kzalloc(sizeof(struct pt1), GFP_KERNEL);
if (!pt1) {
ret = -ENOMEM;
goto err_pci_iounmap;
}
mutex_init(&pt1->lock);
pt1->pdev = pdev;
pt1->regs = regs;
pci_set_drvdata(pdev, pt1);
ret = pt1_init_adapters(pt1);
if (ret < 0)
goto err_kfree;
mutex_init(&pt1->lock);
pt1->power = 0;
pt1->reset = 1;
pt1_update_power(pt1);
i2c_adap = &pt1->i2c_adap;
i2c_adap->algo = &pt1_i2c_algo;
i2c_adap->algo_data = NULL;
i2c_adap->dev.parent = &pdev->dev;
strcpy(i2c_adap->name, DRIVER_NAME);
i2c_set_adapdata(i2c_adap, pt1);
ret = i2c_add_adapter(i2c_adap);
if (ret < 0)
goto err_pt1_cleanup_adapters;
pt1_i2c_init(pt1);
pt1_i2c_wait(pt1);
ret = pt1_sync(pt1);
if (ret < 0)
goto err_i2c_del_adapter;
pt1_identify(pt1);
ret = pt1_unlock(pt1);
if (ret < 0)
goto err_i2c_del_adapter;
ret = pt1_reset_pci(pt1);
if (ret < 0)
goto err_i2c_del_adapter;
ret = pt1_reset_ram(pt1);
if (ret < 0)
goto err_i2c_del_adapter;
ret = pt1_enable_ram(pt1);
if (ret < 0)
goto err_i2c_del_adapter;
pt1_init_streams(pt1);
pt1->power = 1;
pt1_update_power(pt1);
schedule_timeout_uninterruptible((HZ + 49) / 50);
pt1->reset = 0;
pt1_update_power(pt1);
schedule_timeout_uninterruptible((HZ + 999) / 1000);
ret = pt1_init_frontends(pt1);
if (ret < 0)
goto err_pt1_disable_ram;
ret = pt1_init_tables(pt1);
if (ret < 0)
goto err_pt1_cleanup_frontends;
return 0;
err_pt1_cleanup_frontends:
pt1_cleanup_frontends(pt1);
err_pt1_disable_ram:
pt1_disable_ram(pt1);
pt1->power = 0;
pt1->reset = 1;
pt1_update_power(pt1);
err_i2c_del_adapter:
i2c_del_adapter(i2c_adap);
err_pt1_cleanup_adapters:
pt1_cleanup_adapters(pt1);
err_kfree:
pci_set_drvdata(pdev, NULL);
kfree(pt1);
err_pci_iounmap:
pci_iounmap(pdev, regs);
err_pci_release_regions:
pci_release_regions(pdev);
err_pci_disable_device:
pci_disable_device(pdev);
err:
return ret;
}
static struct pci_device_id pt1_id_table[] = {
{ PCI_DEVICE(0x10ee, 0x211a) },
{ PCI_DEVICE(0x10ee, 0x222a) },
{ },
};
MODULE_DEVICE_TABLE(pci, pt1_id_table);
static struct pci_driver pt1_driver = {
.name = DRIVER_NAME,
.probe = pt1_probe,
.remove = pt1_remove,
.id_table = pt1_id_table,
};
static int __init pt1_init(void)
{
return pci_register_driver(&pt1_driver);
}
static void __exit pt1_cleanup(void)
{
pci_unregister_driver(&pt1_driver);
}
module_init(pt1_init);
module_exit(pt1_cleanup);
MODULE_AUTHOR("Takahito HIRANO <hiranotaka@zng.info>");
MODULE_DESCRIPTION("Earthsoft PT1/PT2 Driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
Renzo-Olivares/BAMF_android_kernel_htc_msm8660 | drivers/s390/cio/css.c | 2515 | 29170 | /*
* driver for channel subsystem
*
* Copyright IBM Corp. 2002, 2010
*
* Author(s): Arnd Bergmann (arndb@de.ibm.com)
* Cornelia Huck (cornelia.huck@de.ibm.com)
*/
#define KMSG_COMPONENT "cio"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/module.h>
#include <linux/init.h>
#include <linux/device.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/list.h>
#include <linux/reboot.h>
#include <linux/suspend.h>
#include <linux/proc_fs.h>
#include <asm/isc.h>
#include <asm/crw.h>
#include "css.h"
#include "cio.h"
#include "cio_debug.h"
#include "ioasm.h"
#include "chsc.h"
#include "device.h"
#include "idset.h"
#include "chp.h"
int css_init_done = 0;
int max_ssid;
struct channel_subsystem *channel_subsystems[__MAX_CSSID + 1];
static struct bus_type css_bus_type;
int
for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data)
{
struct subchannel_id schid;
int ret;
init_subchannel_id(&schid);
ret = -ENODEV;
do {
do {
ret = fn(schid, data);
if (ret)
break;
} while (schid.sch_no++ < __MAX_SUBCHANNEL);
schid.sch_no = 0;
} while (schid.ssid++ < max_ssid);
return ret;
}
struct cb_data {
void *data;
struct idset *set;
int (*fn_known_sch)(struct subchannel *, void *);
int (*fn_unknown_sch)(struct subchannel_id, void *);
};
static int call_fn_known_sch(struct device *dev, void *data)
{
struct subchannel *sch = to_subchannel(dev);
struct cb_data *cb = data;
int rc = 0;
idset_sch_del(cb->set, sch->schid);
if (cb->fn_known_sch)
rc = cb->fn_known_sch(sch, cb->data);
return rc;
}
static int call_fn_unknown_sch(struct subchannel_id schid, void *data)
{
struct cb_data *cb = data;
int rc = 0;
if (idset_sch_contains(cb->set, schid))
rc = cb->fn_unknown_sch(schid, cb->data);
return rc;
}
static int call_fn_all_sch(struct subchannel_id schid, void *data)
{
struct cb_data *cb = data;
struct subchannel *sch;
int rc = 0;
sch = get_subchannel_by_schid(schid);
if (sch) {
if (cb->fn_known_sch)
rc = cb->fn_known_sch(sch, cb->data);
put_device(&sch->dev);
} else {
if (cb->fn_unknown_sch)
rc = cb->fn_unknown_sch(schid, cb->data);
}
return rc;
}
int for_each_subchannel_staged(int (*fn_known)(struct subchannel *, void *),
int (*fn_unknown)(struct subchannel_id,
void *), void *data)
{
struct cb_data cb;
int rc;
cb.data = data;
cb.fn_known_sch = fn_known;
cb.fn_unknown_sch = fn_unknown;
cb.set = idset_sch_new();
if (!cb.set)
/* fall back to brute force scanning in case of oom */
return for_each_subchannel(call_fn_all_sch, &cb);
idset_fill(cb.set);
/* Process registered subchannels. */
rc = bus_for_each_dev(&css_bus_type, NULL, &cb, call_fn_known_sch);
if (rc)
goto out;
/* Process unregistered subchannels. */
if (fn_unknown)
rc = for_each_subchannel(call_fn_unknown_sch, &cb);
out:
idset_free(cb.set);
return rc;
}
static void css_sch_todo(struct work_struct *work);
static struct subchannel *
css_alloc_subchannel(struct subchannel_id schid)
{
struct subchannel *sch;
int ret;
sch = kmalloc (sizeof (*sch), GFP_KERNEL | GFP_DMA);
if (sch == NULL)
return ERR_PTR(-ENOMEM);
ret = cio_validate_subchannel (sch, schid);
if (ret < 0) {
kfree(sch);
return ERR_PTR(ret);
}
INIT_WORK(&sch->todo_work, css_sch_todo);
return sch;
}
static void
css_subchannel_release(struct device *dev)
{
struct subchannel *sch;
sch = to_subchannel(dev);
if (!cio_is_console(sch->schid)) {
/* Reset intparm to zeroes. */
sch->config.intparm = 0;
cio_commit_config(sch);
kfree(sch->lock);
kfree(sch);
}
}
static int css_sch_device_register(struct subchannel *sch)
{
int ret;
mutex_lock(&sch->reg_mutex);
dev_set_name(&sch->dev, "0.%x.%04x", sch->schid.ssid,
sch->schid.sch_no);
ret = device_register(&sch->dev);
mutex_unlock(&sch->reg_mutex);
return ret;
}
/**
* css_sch_device_unregister - unregister a subchannel
* @sch: subchannel to be unregistered
*/
void css_sch_device_unregister(struct subchannel *sch)
{
mutex_lock(&sch->reg_mutex);
if (device_is_registered(&sch->dev))
device_unregister(&sch->dev);
mutex_unlock(&sch->reg_mutex);
}
EXPORT_SYMBOL_GPL(css_sch_device_unregister);
static void css_sch_todo(struct work_struct *work)
{
struct subchannel *sch;
enum sch_todo todo;
sch = container_of(work, struct subchannel, todo_work);
/* Find out todo. */
spin_lock_irq(sch->lock);
todo = sch->todo;
CIO_MSG_EVENT(4, "sch_todo: sch=0.%x.%04x, todo=%d\n", sch->schid.ssid,
sch->schid.sch_no, todo);
sch->todo = SCH_TODO_NOTHING;
spin_unlock_irq(sch->lock);
/* Perform todo. */
if (todo == SCH_TODO_UNREG)
css_sch_device_unregister(sch);
/* Release workqueue ref. */
put_device(&sch->dev);
}
/**
* css_sched_sch_todo - schedule a subchannel operation
* @sch: subchannel
* @todo: todo
*
* Schedule the operation identified by @todo to be performed on the slow path
* workqueue. Do nothing if another operation with higher priority is already
* scheduled. Needs to be called with subchannel lock held.
*/
void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo)
{
CIO_MSG_EVENT(4, "sch_todo: sched sch=0.%x.%04x todo=%d\n",
sch->schid.ssid, sch->schid.sch_no, todo);
if (sch->todo >= todo)
return;
/* Get workqueue ref. */
if (!get_device(&sch->dev))
return;
sch->todo = todo;
if (!queue_work(cio_work_q, &sch->todo_work)) {
/* Already queued, release workqueue ref. */
put_device(&sch->dev);
}
}
static void ssd_from_pmcw(struct chsc_ssd_info *ssd, struct pmcw *pmcw)
{
int i;
int mask;
memset(ssd, 0, sizeof(struct chsc_ssd_info));
ssd->path_mask = pmcw->pim;
for (i = 0; i < 8; i++) {
mask = 0x80 >> i;
if (pmcw->pim & mask) {
chp_id_init(&ssd->chpid[i]);
ssd->chpid[i].id = pmcw->chpid[i];
}
}
}
static void ssd_register_chpids(struct chsc_ssd_info *ssd)
{
int i;
int mask;
for (i = 0; i < 8; i++) {
mask = 0x80 >> i;
if (ssd->path_mask & mask)
if (!chp_is_registered(ssd->chpid[i]))
chp_new(ssd->chpid[i]);
}
}
void css_update_ssd_info(struct subchannel *sch)
{
int ret;
if (cio_is_console(sch->schid)) {
/* Console is initialized too early for functions requiring
* memory allocation. */
ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw);
} else {
ret = chsc_get_ssd_info(sch->schid, &sch->ssd_info);
if (ret)
ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw);
ssd_register_chpids(&sch->ssd_info);
}
}
static ssize_t type_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct subchannel *sch = to_subchannel(dev);
return sprintf(buf, "%01x\n", sch->st);
}
static DEVICE_ATTR(type, 0444, type_show, NULL);
static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct subchannel *sch = to_subchannel(dev);
return sprintf(buf, "css:t%01X\n", sch->st);
}
static DEVICE_ATTR(modalias, 0444, modalias_show, NULL);
static struct attribute *subch_attrs[] = {
&dev_attr_type.attr,
&dev_attr_modalias.attr,
NULL,
};
static struct attribute_group subch_attr_group = {
.attrs = subch_attrs,
};
static const struct attribute_group *default_subch_attr_groups[] = {
&subch_attr_group,
NULL,
};
static int css_register_subchannel(struct subchannel *sch)
{
int ret;
/* Initialize the subchannel structure */
sch->dev.parent = &channel_subsystems[0]->device;
sch->dev.bus = &css_bus_type;
sch->dev.release = &css_subchannel_release;
sch->dev.groups = default_subch_attr_groups;
/*
* We don't want to generate uevents for I/O subchannels that don't
* have a working ccw device behind them since they will be
* unregistered before they can be used anyway, so we delay the add
* uevent until after device recognition was successful.
* Note that we suppress the uevent for all subchannel types;
* the subchannel driver can decide itself when it wants to inform
* userspace of its existence.
*/
dev_set_uevent_suppress(&sch->dev, 1);
css_update_ssd_info(sch);
/* make it known to the system */
ret = css_sch_device_register(sch);
if (ret) {
CIO_MSG_EVENT(0, "Could not register sch 0.%x.%04x: %d\n",
sch->schid.ssid, sch->schid.sch_no, ret);
return ret;
}
if (!sch->driver) {
/*
* No driver matched. Generate the uevent now so that
* a fitting driver module may be loaded based on the
* modalias.
*/
dev_set_uevent_suppress(&sch->dev, 0);
kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
}
return ret;
}
int css_probe_device(struct subchannel_id schid)
{
int ret;
struct subchannel *sch;
if (cio_is_console(schid))
sch = cio_get_console_subchannel();
else {
sch = css_alloc_subchannel(schid);
if (IS_ERR(sch))
return PTR_ERR(sch);
}
ret = css_register_subchannel(sch);
if (ret) {
if (!cio_is_console(schid))
put_device(&sch->dev);
}
return ret;
}
static int
check_subchannel(struct device * dev, void * data)
{
struct subchannel *sch;
struct subchannel_id *schid = data;
sch = to_subchannel(dev);
return schid_equal(&sch->schid, schid);
}
struct subchannel *
get_subchannel_by_schid(struct subchannel_id schid)
{
struct device *dev;
dev = bus_find_device(&css_bus_type, NULL,
&schid, check_subchannel);
return dev ? to_subchannel(dev) : NULL;
}
/**
* css_sch_is_valid() - check if a subchannel is valid
* @schib: subchannel information block for the subchannel
*/
int css_sch_is_valid(struct schib *schib)
{
if ((schib->pmcw.st == SUBCHANNEL_TYPE_IO) && !schib->pmcw.dnv)
return 0;
if ((schib->pmcw.st == SUBCHANNEL_TYPE_MSG) && !schib->pmcw.w)
return 0;
return 1;
}
EXPORT_SYMBOL_GPL(css_sch_is_valid);
static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow)
{
struct schib schib;
if (!slow) {
/* Will be done on the slow path. */
return -EAGAIN;
}
if (stsch_err(schid, &schib) || !css_sch_is_valid(&schib)) {
/* Unusable - ignore. */
return 0;
}
CIO_MSG_EVENT(4, "event: sch 0.%x.%04x, new\n", schid.ssid,
schid.sch_no);
return css_probe_device(schid);
}
static int css_evaluate_known_subchannel(struct subchannel *sch, int slow)
{
int ret = 0;
if (sch->driver) {
if (sch->driver->sch_event)
ret = sch->driver->sch_event(sch, slow);
else
dev_dbg(&sch->dev,
"Got subchannel machine check but "
"no sch_event handler provided.\n");
}
if (ret != 0 && ret != -EAGAIN) {
CIO_MSG_EVENT(2, "eval: sch 0.%x.%04x, rc=%d\n",
sch->schid.ssid, sch->schid.sch_no, ret);
}
return ret;
}
static void css_evaluate_subchannel(struct subchannel_id schid, int slow)
{
struct subchannel *sch;
int ret;
sch = get_subchannel_by_schid(schid);
if (sch) {
ret = css_evaluate_known_subchannel(sch, slow);
put_device(&sch->dev);
} else
ret = css_evaluate_new_subchannel(schid, slow);
if (ret == -EAGAIN)
css_schedule_eval(schid);
}
static struct idset *slow_subchannel_set;
static spinlock_t slow_subchannel_lock;
static wait_queue_head_t css_eval_wq;
static atomic_t css_eval_scheduled;
static int __init slow_subchannel_init(void)
{
spin_lock_init(&slow_subchannel_lock);
atomic_set(&css_eval_scheduled, 0);
init_waitqueue_head(&css_eval_wq);
slow_subchannel_set = idset_sch_new();
if (!slow_subchannel_set) {
CIO_MSG_EVENT(0, "could not allocate slow subchannel set\n");
return -ENOMEM;
}
return 0;
}
static int slow_eval_known_fn(struct subchannel *sch, void *data)
{
int eval;
int rc;
spin_lock_irq(&slow_subchannel_lock);
eval = idset_sch_contains(slow_subchannel_set, sch->schid);
idset_sch_del(slow_subchannel_set, sch->schid);
spin_unlock_irq(&slow_subchannel_lock);
if (eval) {
rc = css_evaluate_known_subchannel(sch, 1);
if (rc == -EAGAIN)
css_schedule_eval(sch->schid);
}
return 0;
}
static int slow_eval_unknown_fn(struct subchannel_id schid, void *data)
{
int eval;
int rc = 0;
spin_lock_irq(&slow_subchannel_lock);
eval = idset_sch_contains(slow_subchannel_set, schid);
idset_sch_del(slow_subchannel_set, schid);
spin_unlock_irq(&slow_subchannel_lock);
if (eval) {
rc = css_evaluate_new_subchannel(schid, 1);
switch (rc) {
case -EAGAIN:
css_schedule_eval(schid);
rc = 0;
break;
case -ENXIO:
case -ENOMEM:
case -EIO:
/* These should abort looping */
break;
default:
rc = 0;
}
}
return rc;
}
static void css_slow_path_func(struct work_struct *unused)
{
unsigned long flags;
CIO_TRACE_EVENT(4, "slowpath");
for_each_subchannel_staged(slow_eval_known_fn, slow_eval_unknown_fn,
NULL);
spin_lock_irqsave(&slow_subchannel_lock, flags);
if (idset_is_empty(slow_subchannel_set)) {
atomic_set(&css_eval_scheduled, 0);
wake_up(&css_eval_wq);
}
spin_unlock_irqrestore(&slow_subchannel_lock, flags);
}
static DECLARE_WORK(slow_path_work, css_slow_path_func);
struct workqueue_struct *cio_work_q;
void css_schedule_eval(struct subchannel_id schid)
{
unsigned long flags;
spin_lock_irqsave(&slow_subchannel_lock, flags);
idset_sch_add(slow_subchannel_set, schid);
atomic_set(&css_eval_scheduled, 1);
queue_work(cio_work_q, &slow_path_work);
spin_unlock_irqrestore(&slow_subchannel_lock, flags);
}
void css_schedule_eval_all(void)
{
unsigned long flags;
spin_lock_irqsave(&slow_subchannel_lock, flags);
idset_fill(slow_subchannel_set);
atomic_set(&css_eval_scheduled, 1);
queue_work(cio_work_q, &slow_path_work);
spin_unlock_irqrestore(&slow_subchannel_lock, flags);
}
static int __unset_registered(struct device *dev, void *data)
{
struct idset *set = data;
struct subchannel *sch = to_subchannel(dev);
idset_sch_del(set, sch->schid);
return 0;
}
static void css_schedule_eval_all_unreg(void)
{
unsigned long flags;
struct idset *unreg_set;
/* Find unregistered subchannels. */
unreg_set = idset_sch_new();
if (!unreg_set) {
/* Fallback. */
css_schedule_eval_all();
return;
}
idset_fill(unreg_set);
bus_for_each_dev(&css_bus_type, NULL, unreg_set, __unset_registered);
/* Apply to slow_subchannel_set. */
spin_lock_irqsave(&slow_subchannel_lock, flags);
idset_add_set(slow_subchannel_set, unreg_set);
atomic_set(&css_eval_scheduled, 1);
queue_work(cio_work_q, &slow_path_work);
spin_unlock_irqrestore(&slow_subchannel_lock, flags);
idset_free(unreg_set);
}
void css_wait_for_slow_path(void)
{
flush_workqueue(cio_work_q);
}
/* Schedule reprobing of all unregistered subchannels. */
void css_schedule_reprobe(void)
{
css_schedule_eval_all_unreg();
}
EXPORT_SYMBOL_GPL(css_schedule_reprobe);
/*
* Called from the machine check handler for subchannel report words.
*/
static void css_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
{
struct subchannel_id mchk_schid;
struct subchannel *sch;
if (overflow) {
css_schedule_eval_all();
return;
}
CIO_CRW_EVENT(2, "CRW0 reports slct=%d, oflw=%d, "
"chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc,
crw0->erc, crw0->rsid);
if (crw1)
CIO_CRW_EVENT(2, "CRW1 reports slct=%d, oflw=%d, "
"chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
crw1->slct, crw1->oflw, crw1->chn, crw1->rsc,
crw1->anc, crw1->erc, crw1->rsid);
init_subchannel_id(&mchk_schid);
mchk_schid.sch_no = crw0->rsid;
if (crw1)
mchk_schid.ssid = (crw1->rsid >> 4) & 3;
if (crw0->erc == CRW_ERC_PMOD) {
sch = get_subchannel_by_schid(mchk_schid);
if (sch) {
css_update_ssd_info(sch);
put_device(&sch->dev);
}
}
/*
* Since we are always presented with IPI in the CRW, we have to
* use stsch() to find out if the subchannel in question has come
* or gone.
*/
css_evaluate_subchannel(mchk_schid, 0);
}
static void __init
css_generate_pgid(struct channel_subsystem *css, u32 tod_high)
{
struct cpuid cpu_id;
if (css_general_characteristics.mcss) {
css->global_pgid.pgid_high.ext_cssid.version = 0x80;
css->global_pgid.pgid_high.ext_cssid.cssid = css->cssid;
} else {
#ifdef CONFIG_SMP
css->global_pgid.pgid_high.cpu_addr = stap();
#else
css->global_pgid.pgid_high.cpu_addr = 0;
#endif
}
get_cpu_id(&cpu_id);
css->global_pgid.cpu_id = cpu_id.ident;
css->global_pgid.cpu_model = cpu_id.machine;
css->global_pgid.tod_high = tod_high;
}
static void
channel_subsystem_release(struct device *dev)
{
struct channel_subsystem *css;
css = to_css(dev);
mutex_destroy(&css->mutex);
if (css->pseudo_subchannel) {
/* Implies that it has been generated but never registered. */
css_subchannel_release(&css->pseudo_subchannel->dev);
css->pseudo_subchannel = NULL;
}
kfree(css);
}
static ssize_t
css_cm_enable_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct channel_subsystem *css = to_css(dev);
int ret;
if (!css)
return 0;
mutex_lock(&css->mutex);
ret = sprintf(buf, "%x\n", css->cm_enabled);
mutex_unlock(&css->mutex);
return ret;
}
static ssize_t
css_cm_enable_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct channel_subsystem *css = to_css(dev);
int ret;
unsigned long val;
ret = strict_strtoul(buf, 16, &val);
if (ret)
return ret;
mutex_lock(&css->mutex);
switch (val) {
case 0:
ret = css->cm_enabled ? chsc_secm(css, 0) : 0;
break;
case 1:
ret = css->cm_enabled ? 0 : chsc_secm(css, 1);
break;
default:
ret = -EINVAL;
}
mutex_unlock(&css->mutex);
return ret < 0 ? ret : count;
}
static DEVICE_ATTR(cm_enable, 0644, css_cm_enable_show, css_cm_enable_store);
static int __init setup_css(int nr)
{
u32 tod_high;
int ret;
struct channel_subsystem *css;
css = channel_subsystems[nr];
memset(css, 0, sizeof(struct channel_subsystem));
css->pseudo_subchannel =
kzalloc(sizeof(*css->pseudo_subchannel), GFP_KERNEL);
if (!css->pseudo_subchannel)
return -ENOMEM;
css->pseudo_subchannel->dev.parent = &css->device;
css->pseudo_subchannel->dev.release = css_subchannel_release;
dev_set_name(&css->pseudo_subchannel->dev, "defunct");
mutex_init(&css->pseudo_subchannel->reg_mutex);
ret = cio_create_sch_lock(css->pseudo_subchannel);
if (ret) {
kfree(css->pseudo_subchannel);
return ret;
}
mutex_init(&css->mutex);
css->valid = 1;
css->cssid = nr;
dev_set_name(&css->device, "css%x", nr);
css->device.release = channel_subsystem_release;
tod_high = (u32) (get_clock() >> 32);
css_generate_pgid(css, tod_high);
return 0;
}
static int css_reboot_event(struct notifier_block *this,
unsigned long event,
void *ptr)
{
int ret, i;
ret = NOTIFY_DONE;
for (i = 0; i <= __MAX_CSSID; i++) {
struct channel_subsystem *css;
css = channel_subsystems[i];
mutex_lock(&css->mutex);
if (css->cm_enabled)
if (chsc_secm(css, 0))
ret = NOTIFY_BAD;
mutex_unlock(&css->mutex);
}
return ret;
}
static struct notifier_block css_reboot_notifier = {
.notifier_call = css_reboot_event,
};
/*
* Since the css devices are neither on a bus nor have a class
* nor have a special device type, we cannot stop/restart channel
* path measurements via the normal suspend/resume callbacks, but have
* to use notifiers.
*/
static int css_power_event(struct notifier_block *this, unsigned long event,
void *ptr)
{
int ret, i;
switch (event) {
case PM_HIBERNATION_PREPARE:
case PM_SUSPEND_PREPARE:
ret = NOTIFY_DONE;
for (i = 0; i <= __MAX_CSSID; i++) {
struct channel_subsystem *css;
css = channel_subsystems[i];
mutex_lock(&css->mutex);
if (!css->cm_enabled) {
mutex_unlock(&css->mutex);
continue;
}
if (__chsc_do_secm(css, 0))
ret = NOTIFY_BAD;
mutex_unlock(&css->mutex);
}
break;
case PM_POST_HIBERNATION:
case PM_POST_SUSPEND:
ret = NOTIFY_DONE;
for (i = 0; i <= __MAX_CSSID; i++) {
struct channel_subsystem *css;
css = channel_subsystems[i];
mutex_lock(&css->mutex);
if (!css->cm_enabled) {
mutex_unlock(&css->mutex);
continue;
}
if (__chsc_do_secm(css, 1))
ret = NOTIFY_BAD;
mutex_unlock(&css->mutex);
}
/* search for subchannels, which appeared during hibernation */
css_schedule_reprobe();
break;
default:
ret = NOTIFY_DONE;
}
return ret;
}
static struct notifier_block css_power_notifier = {
.notifier_call = css_power_event,
};
/*
* Now that the driver core is running, we can setup our channel subsystem.
* The struct subchannel's are created during probing (except for the
* static console subchannel).
*/
static int __init css_bus_init(void)
{
int ret, i;
ret = chsc_init();
if (ret)
return ret;
chsc_determine_css_characteristics();
/* Try to enable MSS. */
ret = chsc_enable_facility(CHSC_SDA_OC_MSS);
if (ret)
max_ssid = 0;
else /* Success. */
max_ssid = __MAX_SSID;
ret = slow_subchannel_init();
if (ret)
goto out;
ret = crw_register_handler(CRW_RSC_SCH, css_process_crw);
if (ret)
goto out;
if ((ret = bus_register(&css_bus_type)))
goto out;
/* Setup css structure. */
for (i = 0; i <= __MAX_CSSID; i++) {
struct channel_subsystem *css;
css = kmalloc(sizeof(struct channel_subsystem), GFP_KERNEL);
if (!css) {
ret = -ENOMEM;
goto out_unregister;
}
channel_subsystems[i] = css;
ret = setup_css(i);
if (ret) {
kfree(channel_subsystems[i]);
goto out_unregister;
}
ret = device_register(&css->device);
if (ret) {
put_device(&css->device);
goto out_unregister;
}
if (css_chsc_characteristics.secm) {
ret = device_create_file(&css->device,
&dev_attr_cm_enable);
if (ret)
goto out_device;
}
ret = device_register(&css->pseudo_subchannel->dev);
if (ret) {
put_device(&css->pseudo_subchannel->dev);
goto out_file;
}
}
ret = register_reboot_notifier(&css_reboot_notifier);
if (ret)
goto out_unregister;
ret = register_pm_notifier(&css_power_notifier);
if (ret) {
unregister_reboot_notifier(&css_reboot_notifier);
goto out_unregister;
}
css_init_done = 1;
/* Enable default isc for I/O subchannels. */
isc_register(IO_SCH_ISC);
return 0;
out_file:
if (css_chsc_characteristics.secm)
device_remove_file(&channel_subsystems[i]->device,
&dev_attr_cm_enable);
out_device:
device_unregister(&channel_subsystems[i]->device);
out_unregister:
while (i > 0) {
struct channel_subsystem *css;
i--;
css = channel_subsystems[i];
device_unregister(&css->pseudo_subchannel->dev);
css->pseudo_subchannel = NULL;
if (css_chsc_characteristics.secm)
device_remove_file(&css->device,
&dev_attr_cm_enable);
device_unregister(&css->device);
}
bus_unregister(&css_bus_type);
out:
crw_unregister_handler(CRW_RSC_SCH);
idset_free(slow_subchannel_set);
chsc_init_cleanup();
pr_alert("The CSS device driver initialization failed with "
"errno=%d\n", ret);
return ret;
}
static void __init css_bus_cleanup(void)
{
struct channel_subsystem *css;
int i;
for (i = 0; i <= __MAX_CSSID; i++) {
css = channel_subsystems[i];
device_unregister(&css->pseudo_subchannel->dev);
css->pseudo_subchannel = NULL;
if (css_chsc_characteristics.secm)
device_remove_file(&css->device, &dev_attr_cm_enable);
device_unregister(&css->device);
}
bus_unregister(&css_bus_type);
crw_unregister_handler(CRW_RSC_SCH);
idset_free(slow_subchannel_set);
chsc_init_cleanup();
isc_unregister(IO_SCH_ISC);
}
static int __init channel_subsystem_init(void)
{
int ret;
ret = css_bus_init();
if (ret)
return ret;
cio_work_q = create_singlethread_workqueue("cio");
if (!cio_work_q) {
ret = -ENOMEM;
goto out_bus;
}
ret = io_subchannel_init();
if (ret)
goto out_wq;
return ret;
out_wq:
destroy_workqueue(cio_work_q);
out_bus:
css_bus_cleanup();
return ret;
}
subsys_initcall(channel_subsystem_init);
static int css_settle(struct device_driver *drv, void *unused)
{
struct css_driver *cssdrv = to_cssdriver(drv);
if (cssdrv->settle)
return cssdrv->settle();
return 0;
}
int css_complete_work(void)
{
int ret;
/* Wait for the evaluation of subchannels to finish. */
ret = wait_event_interruptible(css_eval_wq,
atomic_read(&css_eval_scheduled) == 0);
if (ret)
return -EINTR;
flush_workqueue(cio_work_q);
/* Wait for the subchannel type specific initialization to finish */
return bus_for_each_drv(&css_bus_type, NULL, NULL, css_settle);
}
/*
* Wait for the initialization of devices to finish, to make sure we are
* done with our setup if the search for the root device starts.
*/
static int __init channel_subsystem_init_sync(void)
{
/* Start initial subchannel evaluation. */
css_schedule_eval_all();
css_complete_work();
return 0;
}
subsys_initcall_sync(channel_subsystem_init_sync);
void channel_subsystem_reinit(void)
{
struct channel_path *chp;
struct chp_id chpid;
chsc_enable_facility(CHSC_SDA_OC_MSS);
chp_id_for_each(&chpid) {
chp = chpid_to_chp(chpid);
if (!chp)
continue;
chsc_determine_base_channel_path_desc(chpid, &chp->desc);
}
}
#ifdef CONFIG_PROC_FS
static ssize_t cio_settle_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
int ret;
/* Handle pending CRW's. */
crw_wait_for_channel_report();
ret = css_complete_work();
return ret ? ret : count;
}
static const struct file_operations cio_settle_proc_fops = {
.open = nonseekable_open,
.write = cio_settle_write,
.llseek = no_llseek,
};
static int __init cio_settle_init(void)
{
struct proc_dir_entry *entry;
entry = proc_create("cio_settle", S_IWUSR, NULL,
&cio_settle_proc_fops);
if (!entry)
return -ENOMEM;
return 0;
}
device_initcall(cio_settle_init);
#endif /*CONFIG_PROC_FS*/
int sch_is_pseudo_sch(struct subchannel *sch)
{
return sch == to_css(sch->dev.parent)->pseudo_subchannel;
}
static int css_bus_match(struct device *dev, struct device_driver *drv)
{
struct subchannel *sch = to_subchannel(dev);
struct css_driver *driver = to_cssdriver(drv);
struct css_device_id *id;
for (id = driver->subchannel_type; id->match_flags; id++) {
if (sch->st == id->type)
return 1;
}
return 0;
}
static int css_probe(struct device *dev)
{
struct subchannel *sch;
int ret;
sch = to_subchannel(dev);
sch->driver = to_cssdriver(dev->driver);
ret = sch->driver->probe ? sch->driver->probe(sch) : 0;
if (ret)
sch->driver = NULL;
return ret;
}
static int css_remove(struct device *dev)
{
struct subchannel *sch;
int ret;
sch = to_subchannel(dev);
ret = sch->driver->remove ? sch->driver->remove(sch) : 0;
sch->driver = NULL;
return ret;
}
static void css_shutdown(struct device *dev)
{
struct subchannel *sch;
sch = to_subchannel(dev);
if (sch->driver && sch->driver->shutdown)
sch->driver->shutdown(sch);
}
static int css_uevent(struct device *dev, struct kobj_uevent_env *env)
{
struct subchannel *sch = to_subchannel(dev);
int ret;
ret = add_uevent_var(env, "ST=%01X", sch->st);
if (ret)
return ret;
ret = add_uevent_var(env, "MODALIAS=css:t%01X", sch->st);
return ret;
}
static int css_pm_prepare(struct device *dev)
{
struct subchannel *sch = to_subchannel(dev);
struct css_driver *drv;
if (mutex_is_locked(&sch->reg_mutex))
return -EAGAIN;
if (!sch->dev.driver)
return 0;
drv = to_cssdriver(sch->dev.driver);
/* Notify drivers that they may not register children. */
return drv->prepare ? drv->prepare(sch) : 0;
}
static void css_pm_complete(struct device *dev)
{
struct subchannel *sch = to_subchannel(dev);
struct css_driver *drv;
if (!sch->dev.driver)
return;
drv = to_cssdriver(sch->dev.driver);
if (drv->complete)
drv->complete(sch);
}
static int css_pm_freeze(struct device *dev)
{
struct subchannel *sch = to_subchannel(dev);
struct css_driver *drv;
if (!sch->dev.driver)
return 0;
drv = to_cssdriver(sch->dev.driver);
return drv->freeze ? drv->freeze(sch) : 0;
}
static int css_pm_thaw(struct device *dev)
{
struct subchannel *sch = to_subchannel(dev);
struct css_driver *drv;
if (!sch->dev.driver)
return 0;
drv = to_cssdriver(sch->dev.driver);
return drv->thaw ? drv->thaw(sch) : 0;
}
static int css_pm_restore(struct device *dev)
{
struct subchannel *sch = to_subchannel(dev);
struct css_driver *drv;
css_update_ssd_info(sch);
if (!sch->dev.driver)
return 0;
drv = to_cssdriver(sch->dev.driver);
return drv->restore ? drv->restore(sch) : 0;
}
static const struct dev_pm_ops css_pm_ops = {
.prepare = css_pm_prepare,
.complete = css_pm_complete,
.freeze = css_pm_freeze,
.thaw = css_pm_thaw,
.restore = css_pm_restore,
};
static struct bus_type css_bus_type = {
.name = "css",
.match = css_bus_match,
.probe = css_probe,
.remove = css_remove,
.shutdown = css_shutdown,
.uevent = css_uevent,
.pm = &css_pm_ops,
};
/**
* css_driver_register - register a css driver
* @cdrv: css driver to register
*
* This is mainly a wrapper around driver_register that sets name
* and bus_type in the embedded struct device_driver correctly.
*/
int css_driver_register(struct css_driver *cdrv)
{
cdrv->drv.bus = &css_bus_type;
return driver_register(&cdrv->drv);
}
EXPORT_SYMBOL_GPL(css_driver_register);
/**
* css_driver_unregister - unregister a css driver
* @cdrv: css driver to unregister
*
* This is a wrapper around driver_unregister.
*/
void css_driver_unregister(struct css_driver *cdrv)
{
driver_unregister(&cdrv->drv);
}
EXPORT_SYMBOL_GPL(css_driver_unregister);
MODULE_LICENSE("GPL");
| gpl-2.0 |
RealVNC/android-kernel-omap | drivers/isdn/hisax/arcofi.c | 3027 | 3675 | /* $Id: arcofi.c,v 1.14.2.3 2004/01/13 14:31:24 keil Exp $
*
* Ansteuerung ARCOFI 2165
*
* Author Karsten Keil
* Copyright by Karsten Keil <keil@isdn4linux.de>
*
* This software may be used and distributed according to the terms
* of the GNU General Public License, incorporated herein by reference.
*
*/
#include <linux/sched.h>
#include "hisax.h"
#include "isdnl1.h"
#include "isac.h"
#include "arcofi.h"
#define ARCOFI_TIMER_VALUE 20
static void
add_arcofi_timer(struct IsdnCardState *cs) {
if (test_and_set_bit(FLG_ARCOFI_TIMER, &cs->HW_Flags)) {
del_timer(&cs->dc.isac.arcofitimer);
}
init_timer(&cs->dc.isac.arcofitimer);
cs->dc.isac.arcofitimer.expires = jiffies + ((ARCOFI_TIMER_VALUE * HZ)/1000);
add_timer(&cs->dc.isac.arcofitimer);
}
static void
send_arcofi(struct IsdnCardState *cs) {
add_arcofi_timer(cs);
cs->dc.isac.mon_txp = 0;
cs->dc.isac.mon_txc = cs->dc.isac.arcofi_list->len;
memcpy(cs->dc.isac.mon_tx, cs->dc.isac.arcofi_list->msg, cs->dc.isac.mon_txc);
switch(cs->dc.isac.arcofi_bc) {
case 0: break;
case 1: cs->dc.isac.mon_tx[1] |= 0x40;
break;
default: break;
}
cs->dc.isac.mocr &= 0x0f;
cs->dc.isac.mocr |= 0xa0;
cs->writeisac(cs, ISAC_MOCR, cs->dc.isac.mocr);
(void) cs->readisac(cs, ISAC_MOSR);
cs->writeisac(cs, ISAC_MOX1, cs->dc.isac.mon_tx[cs->dc.isac.mon_txp++]);
cs->dc.isac.mocr |= 0x10;
cs->writeisac(cs, ISAC_MOCR, cs->dc.isac.mocr);
}
int
arcofi_fsm(struct IsdnCardState *cs, int event, void *data) {
if (cs->debug & L1_DEB_MONITOR) {
debugl1(cs, "arcofi state %d event %d", cs->dc.isac.arcofi_state, event);
}
if (event == ARCOFI_TIMEOUT) {
cs->dc.isac.arcofi_state = ARCOFI_NOP;
test_and_set_bit(FLG_ARCOFI_ERROR, &cs->HW_Flags);
wake_up(&cs->dc.isac.arcofi_wait);
return(1);
}
switch (cs->dc.isac.arcofi_state) {
case ARCOFI_NOP:
if (event == ARCOFI_START) {
cs->dc.isac.arcofi_list = data;
cs->dc.isac.arcofi_state = ARCOFI_TRANSMIT;
send_arcofi(cs);
}
break;
case ARCOFI_TRANSMIT:
if (event == ARCOFI_TX_END) {
if (cs->dc.isac.arcofi_list->receive) {
add_arcofi_timer(cs);
cs->dc.isac.arcofi_state = ARCOFI_RECEIVE;
} else {
if (cs->dc.isac.arcofi_list->next) {
cs->dc.isac.arcofi_list =
cs->dc.isac.arcofi_list->next;
send_arcofi(cs);
} else {
if (test_and_clear_bit(FLG_ARCOFI_TIMER, &cs->HW_Flags)) {
del_timer(&cs->dc.isac.arcofitimer);
}
cs->dc.isac.arcofi_state = ARCOFI_NOP;
wake_up(&cs->dc.isac.arcofi_wait);
}
}
}
break;
case ARCOFI_RECEIVE:
if (event == ARCOFI_RX_END) {
if (cs->dc.isac.arcofi_list->next) {
cs->dc.isac.arcofi_list =
cs->dc.isac.arcofi_list->next;
cs->dc.isac.arcofi_state = ARCOFI_TRANSMIT;
send_arcofi(cs);
} else {
if (test_and_clear_bit(FLG_ARCOFI_TIMER, &cs->HW_Flags)) {
del_timer(&cs->dc.isac.arcofitimer);
}
cs->dc.isac.arcofi_state = ARCOFI_NOP;
wake_up(&cs->dc.isac.arcofi_wait);
}
}
break;
default:
debugl1(cs, "Arcofi unknown state %x", cs->dc.isac.arcofi_state);
return(2);
}
return(0);
}
static void
arcofi_timer(struct IsdnCardState *cs) {
arcofi_fsm(cs, ARCOFI_TIMEOUT, NULL);
}
void
clear_arcofi(struct IsdnCardState *cs) {
if (test_and_clear_bit(FLG_ARCOFI_TIMER, &cs->HW_Flags)) {
del_timer(&cs->dc.isac.arcofitimer);
}
}
void
init_arcofi(struct IsdnCardState *cs) {
cs->dc.isac.arcofitimer.function = (void *) arcofi_timer;
cs->dc.isac.arcofitimer.data = (long) cs;
init_timer(&cs->dc.isac.arcofitimer);
init_waitqueue_head(&cs->dc.isac.arcofi_wait);
test_and_set_bit(HW_ARCOFI, &cs->HW_Flags);
}
| gpl-2.0 |
crazyquark/linux | drivers/isdn/hardware/eicon/istream.c | 3283 | 6603 |
/*
*
Copyright (c) Eicon Networks, 2002.
*
This source file is supplied for the use with
Eicon Networks range of DIVA Server Adapters.
*
Eicon File Revision : 2.1
*
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2, or (at your option)
any later version.
*
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY OF ANY KIND WHATSOEVER INCLUDING ANY
implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU General Public License for more details.
*
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
*/
#include "platform.h"
#if defined(DIVA_ISTREAM) /* { */
#include "pc.h"
#include "pr_pc.h"
#include "di_defs.h"
#include "divasync.h"
#include "di.h"
#if !defined USE_EXTENDED_DEBUGS
#include "dimaint.h"
#else
#define dprintf
#endif
#include "dfifo.h"
int diva_istream_write (void* context,
int Id,
void* data,
int length,
int final,
byte usr1,
byte usr2);
int diva_istream_read (void* context,
int Id,
void* data,
int max_length,
int* final,
byte* usr1,
byte* usr2);
/* -------------------------------------------------------------------
Does provide iStream interface to the client
------------------------------------------------------------------- */
void diva_xdi_provide_istream_info (ADAPTER* a,
diva_xdi_stream_interface_t* pi) {
pi->provided_service = 0;
}
/* ------------------------------------------------------------------
Does write the data from caller's buffer to the card's
stream interface.
If synchronous service was requested, then function
does return amount of data written to stream.
'final' does indicate that piece of data to be written is
final part of frame (necessary only by structured datatransfer)
return 0 if zero lengh packet was written
return -1 if stream is full
------------------------------------------------------------------ */
int diva_istream_write (void* context,
int Id,
void* data,
int length,
int final,
byte usr1,
byte usr2) {
ADAPTER* a = (ADAPTER*)context;
int written = 0, to_write = -1;
char tmp[4];
byte* data_ptr = (byte*)data;
for (;;) {
a->ram_in_dw (a,
#ifdef PLATFORM_GT_32BIT
ULongToPtr(a->tx_stream[Id] + a->tx_pos[Id]),
#else
(void*)(a->tx_stream[Id] + a->tx_pos[Id]),
#endif
(dword*)&tmp[0],
1);
if (tmp[0] & DIVA_DFIFO_READY) { /* No free blocks more */
if (to_write < 0)
return (-1); /* was not able to write */
break; /* only part of message was written */
}
to_write = min(length, DIVA_DFIFO_DATA_SZ);
if (to_write) {
a->ram_out_buffer (a,
#ifdef PLATFORM_GT_32BIT
ULongToPtr(a->tx_stream[Id] + a->tx_pos[Id]+4),
#else
(void*)(a->tx_stream[Id] + a->tx_pos[Id] + 4),
#endif
data_ptr,
(word)to_write);
length -= to_write;
written += to_write;
data_ptr += to_write;
}
tmp[1] = (char)to_write;
tmp[0] = (tmp[0] & DIVA_DFIFO_WRAP) |
DIVA_DFIFO_READY |
((!length && final) ? DIVA_DFIFO_LAST : 0);
if (tmp[0] & DIVA_DFIFO_LAST) {
tmp[2] = usr1;
tmp[3] = usr2;
}
a->ram_out_dw (a,
#ifdef PLATFORM_GT_32BIT
ULongToPtr(a->tx_stream[Id] + a->tx_pos[Id]),
#else
(void*)(a->tx_stream[Id] + a->tx_pos[Id]),
#endif
(dword*)&tmp[0],
1);
if (tmp[0] & DIVA_DFIFO_WRAP) {
a->tx_pos[Id] = 0;
} else {
a->tx_pos[Id] += DIVA_DFIFO_STEP;
}
if (!length) {
break;
}
}
return (written);
}
/* -------------------------------------------------------------------
In case of SYNCRONOUS service:
Does write data from stream in caller's buffer.
Does return amount of data written to buffer
Final flag is set on return if last part of structured frame
was received
return 0 if zero packet was received
return -1 if stream is empty
return -2 if read buffer does not profide sufficient space
to accommodate entire segment
max_length should be at least 68 bytes
------------------------------------------------------------------- */
int diva_istream_read (void* context,
int Id,
void* data,
int max_length,
int* final,
byte* usr1,
byte* usr2) {
ADAPTER* a = (ADAPTER*)context;
int read = 0, to_read = -1;
char tmp[4];
byte* data_ptr = (byte*)data;
*final = 0;
for (;;) {
a->ram_in_dw (a,
#ifdef PLATFORM_GT_32BIT
ULongToPtr(a->rx_stream[Id] + a->rx_pos[Id]),
#else
(void*)(a->rx_stream[Id] + a->rx_pos[Id]),
#endif
(dword*)&tmp[0],
1);
if (tmp[1] > max_length) {
if (to_read < 0)
return (-2); /* was not able to read */
break;
}
if (!(tmp[0] & DIVA_DFIFO_READY)) {
if (to_read < 0)
return (-1); /* was not able to read */
break;
}
to_read = min(max_length, (int)tmp[1]);
if (to_read) {
a->ram_in_buffer(a,
#ifdef PLATFORM_GT_32BIT
ULongToPtr(a->rx_stream[Id] + a->rx_pos[Id] + 4),
#else
(void*)(a->rx_stream[Id] + a->rx_pos[Id] + 4),
#endif
data_ptr,
(word)to_read);
max_length -= to_read;
read += to_read;
data_ptr += to_read;
}
if (tmp[0] & DIVA_DFIFO_LAST) {
*final = 1;
}
tmp[0] &= DIVA_DFIFO_WRAP;
a->ram_out_dw(a,
#ifdef PLATFORM_GT_32BIT
ULongToPtr(a->rx_stream[Id] + a->rx_pos[Id]),
#else
(void*)(a->rx_stream[Id] + a->rx_pos[Id]),
#endif
(dword*)&tmp[0],
1);
if (tmp[0] & DIVA_DFIFO_WRAP) {
a->rx_pos[Id] = 0;
} else {
a->rx_pos[Id] += DIVA_DFIFO_STEP;
}
if (*final) {
if (usr1)
*usr1 = tmp[2];
if (usr2)
*usr2 = tmp[3];
break;
}
}
return (read);
}
/* ---------------------------------------------------------------------
Does check if one of streams had caused interrupt and does
wake up corresponding application
--------------------------------------------------------------------- */
void pr_stream (ADAPTER * a) {
}
#endif /* } */
| gpl-2.0 |
spica234/HP-Krnl-2.6.32.9 | arch/powerpc/platforms/cell/beat.c | 4563 | 5680 | /*
* Simple routines for Celleb/Beat
*
* (C) Copyright 2006-2007 TOSHIBA CORPORATION
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/err.h>
#include <linux/rtc.h>
#include <linux/interrupt.h>
#include <linux/irqreturn.h>
#include <linux/reboot.h>
#include <asm/hvconsole.h>
#include <asm/time.h>
#include <asm/machdep.h>
#include <asm/firmware.h>
#include "beat_wrapper.h"
#include "beat.h"
#include "beat_interrupt.h"
static int beat_pm_poweroff_flag;
void beat_restart(char *cmd)
{
beat_shutdown_logical_partition(!beat_pm_poweroff_flag);
}
void beat_power_off(void)
{
beat_shutdown_logical_partition(0);
}
u64 beat_halt_code = 0x1000000000000000UL;
EXPORT_SYMBOL(beat_halt_code);
void beat_halt(void)
{
beat_shutdown_logical_partition(beat_halt_code);
}
int beat_set_rtc_time(struct rtc_time *rtc_time)
{
u64 tim;
tim = mktime(rtc_time->tm_year+1900,
rtc_time->tm_mon+1, rtc_time->tm_mday,
rtc_time->tm_hour, rtc_time->tm_min, rtc_time->tm_sec);
if (beat_rtc_write(tim))
return -1;
return 0;
}
void beat_get_rtc_time(struct rtc_time *rtc_time)
{
u64 tim;
if (beat_rtc_read(&tim))
tim = 0;
to_tm(tim, rtc_time);
rtc_time->tm_year -= 1900;
rtc_time->tm_mon -= 1;
}
#define BEAT_NVRAM_SIZE 4096
ssize_t beat_nvram_read(char *buf, size_t count, loff_t *index)
{
unsigned int i;
unsigned long len;
char *p = buf;
if (*index >= BEAT_NVRAM_SIZE)
return -ENODEV;
i = *index;
if (i + count > BEAT_NVRAM_SIZE)
count = BEAT_NVRAM_SIZE - i;
for (; count != 0; count -= len) {
len = count;
if (len > BEAT_NVRW_CNT)
len = BEAT_NVRW_CNT;
if (beat_eeprom_read(i, len, p))
return -EIO;
p += len;
i += len;
}
*index = i;
return p - buf;
}
ssize_t beat_nvram_write(char *buf, size_t count, loff_t *index)
{
unsigned int i;
unsigned long len;
char *p = buf;
if (*index >= BEAT_NVRAM_SIZE)
return -ENODEV;
i = *index;
if (i + count > BEAT_NVRAM_SIZE)
count = BEAT_NVRAM_SIZE - i;
for (; count != 0; count -= len) {
len = count;
if (len > BEAT_NVRW_CNT)
len = BEAT_NVRW_CNT;
if (beat_eeprom_write(i, len, p))
return -EIO;
p += len;
i += len;
}
*index = i;
return p - buf;
}
ssize_t beat_nvram_get_size(void)
{
return BEAT_NVRAM_SIZE;
}
int beat_set_xdabr(unsigned long dabr)
{
if (beat_set_dabr(dabr, DABRX_KERNEL | DABRX_USER))
return -1;
return 0;
}
int64_t beat_get_term_char(u64 vterm, u64 *len, u64 *t1, u64 *t2)
{
u64 db[2];
s64 ret;
ret = beat_get_characters_from_console(vterm, len, (u8 *)db);
if (ret == 0) {
*t1 = db[0];
*t2 = db[1];
}
return ret;
}
EXPORT_SYMBOL(beat_get_term_char);
int64_t beat_put_term_char(u64 vterm, u64 len, u64 t1, u64 t2)
{
u64 db[2];
db[0] = t1;
db[1] = t2;
return beat_put_characters_to_console(vterm, len, (u8 *)db);
}
EXPORT_SYMBOL(beat_put_term_char);
void beat_power_save(void)
{
beat_pause(0);
}
#ifdef CONFIG_KEXEC
void beat_kexec_cpu_down(int crash, int secondary)
{
beatic_deinit_IRQ();
}
#endif
static irqreturn_t beat_power_event(int virq, void *arg)
{
printk(KERN_DEBUG "Beat: power button pressed\n");
beat_pm_poweroff_flag = 1;
ctrl_alt_del();
return IRQ_HANDLED;
}
static irqreturn_t beat_reset_event(int virq, void *arg)
{
printk(KERN_DEBUG "Beat: reset button pressed\n");
beat_pm_poweroff_flag = 0;
ctrl_alt_del();
return IRQ_HANDLED;
}
static struct beat_event_list {
const char *typecode;
irq_handler_t handler;
unsigned int virq;
} beat_event_list[] = {
{ "power", beat_power_event, 0 },
{ "reset", beat_reset_event, 0 },
};
static int __init beat_register_event(void)
{
u64 path[4], data[2];
int rc, i;
unsigned int virq;
for (i = 0; i < ARRAY_SIZE(beat_event_list); i++) {
struct beat_event_list *ev = &beat_event_list[i];
if (beat_construct_event_receive_port(data) != 0) {
printk(KERN_ERR "Beat: "
"cannot construct event receive port for %s\n",
ev->typecode);
return -EINVAL;
}
virq = irq_create_mapping(NULL, data[0]);
if (virq == NO_IRQ) {
printk(KERN_ERR "Beat: failed to get virtual IRQ"
" for event receive port for %s\n",
ev->typecode);
beat_destruct_event_receive_port(data[0]);
return -EIO;
}
ev->virq = virq;
rc = request_irq(virq, ev->handler, IRQF_DISABLED,
ev->typecode, NULL);
if (rc != 0) {
printk(KERN_ERR "Beat: failed to request virtual IRQ"
" for event receive port for %s\n",
ev->typecode);
beat_destruct_event_receive_port(data[0]);
return rc;
}
path[0] = 0x1000000065780000ul; /* 1,ex */
path[1] = 0x627574746f6e0000ul; /* button */
path[2] = 0;
strncpy((char *)&path[2], ev->typecode, 8);
path[3] = 0;
data[1] = 0;
beat_create_repository_node(path, data);
}
return 0;
}
static int __init beat_event_init(void)
{
if (!firmware_has_feature(FW_FEATURE_BEAT))
return -EINVAL;
beat_pm_poweroff_flag = 0;
return beat_register_event();
}
device_initcall(beat_event_init);
| gpl-2.0 |
TheEdge-/Leaping_kernel | net/ipv4/udp_diag.c | 4819 | 5316 | /*
* udp_diag.c Module for monitoring UDP transport protocols sockets.
*
* Authors: Pavel Emelyanov, <xemul@parallels.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/module.h>
#include <linux/inet_diag.h>
#include <linux/udp.h>
#include <net/udp.h>
#include <net/udplite.h>
#include <linux/sock_diag.h>
static int sk_diag_dump(struct sock *sk, struct sk_buff *skb,
struct netlink_callback *cb, struct inet_diag_req_v2 *req,
struct nlattr *bc)
{
if (!inet_diag_bc_sk(bc, sk))
return 0;
return inet_sk_diag_fill(sk, NULL, skb, req, NETLINK_CB(cb->skb).pid,
cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh);
}
static int udp_dump_one(struct udp_table *tbl, struct sk_buff *in_skb,
const struct nlmsghdr *nlh, struct inet_diag_req_v2 *req)
{
int err = -EINVAL;
struct sock *sk;
struct sk_buff *rep;
if (req->sdiag_family == AF_INET)
sk = __udp4_lib_lookup(&init_net,
req->id.idiag_src[0], req->id.idiag_sport,
req->id.idiag_dst[0], req->id.idiag_dport,
req->id.idiag_if, tbl);
#if IS_ENABLED(CONFIG_IPV6)
else if (req->sdiag_family == AF_INET6)
sk = __udp6_lib_lookup(&init_net,
(struct in6_addr *)req->id.idiag_src,
req->id.idiag_sport,
(struct in6_addr *)req->id.idiag_dst,
req->id.idiag_dport,
req->id.idiag_if, tbl);
#endif
else
goto out_nosk;
err = -ENOENT;
if (sk == NULL)
goto out_nosk;
err = sock_diag_check_cookie(sk, req->id.idiag_cookie);
if (err)
goto out;
err = -ENOMEM;
rep = alloc_skb(NLMSG_SPACE((sizeof(struct inet_diag_msg) +
sizeof(struct inet_diag_meminfo) +
64)), GFP_KERNEL);
if (!rep)
goto out;
err = inet_sk_diag_fill(sk, NULL, rep, req,
NETLINK_CB(in_skb).pid,
nlh->nlmsg_seq, 0, nlh);
if (err < 0) {
WARN_ON(err == -EMSGSIZE);
kfree_skb(rep);
goto out;
}
err = netlink_unicast(sock_diag_nlsk, rep, NETLINK_CB(in_skb).pid,
MSG_DONTWAIT);
if (err > 0)
err = 0;
out:
if (sk)
sock_put(sk);
out_nosk:
return err;
}
static void udp_dump(struct udp_table *table, struct sk_buff *skb, struct netlink_callback *cb,
struct inet_diag_req_v2 *r, struct nlattr *bc)
{
int num, s_num, slot, s_slot;
s_slot = cb->args[0];
num = s_num = cb->args[1];
for (slot = s_slot; slot <= table->mask; num = s_num = 0, slot++) {
struct sock *sk;
struct hlist_nulls_node *node;
struct udp_hslot *hslot = &table->hash[slot];
if (hlist_nulls_empty(&hslot->head))
continue;
spin_lock_bh(&hslot->lock);
sk_nulls_for_each(sk, node, &hslot->head) {
struct inet_sock *inet = inet_sk(sk);
if (num < s_num)
goto next;
if (!(r->idiag_states & (1 << sk->sk_state)))
goto next;
if (r->sdiag_family != AF_UNSPEC &&
sk->sk_family != r->sdiag_family)
goto next;
if (r->id.idiag_sport != inet->inet_sport &&
r->id.idiag_sport)
goto next;
if (r->id.idiag_dport != inet->inet_dport &&
r->id.idiag_dport)
goto next;
if (sk_diag_dump(sk, skb, cb, r, bc) < 0) {
spin_unlock_bh(&hslot->lock);
goto done;
}
next:
num++;
}
spin_unlock_bh(&hslot->lock);
}
done:
cb->args[0] = slot;
cb->args[1] = num;
}
static void udp_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
struct inet_diag_req_v2 *r, struct nlattr *bc)
{
udp_dump(&udp_table, skb, cb, r, bc);
}
static int udp_diag_dump_one(struct sk_buff *in_skb, const struct nlmsghdr *nlh,
struct inet_diag_req_v2 *req)
{
return udp_dump_one(&udp_table, in_skb, nlh, req);
}
static void udp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
void *info)
{
r->idiag_rqueue = sk_rmem_alloc_get(sk);
r->idiag_wqueue = sk_wmem_alloc_get(sk);
}
static const struct inet_diag_handler udp_diag_handler = {
.dump = udp_diag_dump,
.dump_one = udp_diag_dump_one,
.idiag_get_info = udp_diag_get_info,
.idiag_type = IPPROTO_UDP,
};
static void udplite_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
struct inet_diag_req_v2 *r, struct nlattr *bc)
{
udp_dump(&udplite_table, skb, cb, r, bc);
}
static int udplite_diag_dump_one(struct sk_buff *in_skb, const struct nlmsghdr *nlh,
struct inet_diag_req_v2 *req)
{
return udp_dump_one(&udplite_table, in_skb, nlh, req);
}
static const struct inet_diag_handler udplite_diag_handler = {
.dump = udplite_diag_dump,
.dump_one = udplite_diag_dump_one,
.idiag_get_info = udp_diag_get_info,
.idiag_type = IPPROTO_UDPLITE,
};
static int __init udp_diag_init(void)
{
int err;
err = inet_diag_register(&udp_diag_handler);
if (err)
goto out;
err = inet_diag_register(&udplite_diag_handler);
if (err)
goto out_lite;
out:
return err;
out_lite:
inet_diag_unregister(&udp_diag_handler);
goto out;
}
static void __exit udp_diag_exit(void)
{
inet_diag_unregister(&udplite_diag_handler);
inet_diag_unregister(&udp_diag_handler);
}
module_init(udp_diag_init);
module_exit(udp_diag_exit);
MODULE_LICENSE("GPL");
MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 2-17 /* AF_INET - IPPROTO_UDP */);
MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 2-136 /* AF_INET - IPPROTO_UDPLITE */);
| gpl-2.0 |
Feche/android_kernel_motorola_olympus_oc | sound/core/seq/seq_midi_emul.c | 4819 | 19908 | /*
* GM/GS/XG midi module.
*
* Copyright (C) 1999 Steve Ratcliffe
*
* Based on awe_wave.c by Takashi Iwai
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
/*
* This module is used to keep track of the current midi state.
* It can be used for drivers that are required to emulate midi when
* the hardware doesn't.
*
* It was written for a AWE64 driver, but there should be no AWE specific
* code in here. If there is it should be reported as a bug.
*/
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <sound/core.h>
#include <sound/seq_kernel.h>
#include <sound/seq_midi_emul.h>
#include <sound/initval.h>
#include <sound/asoundef.h>
MODULE_AUTHOR("Takashi Iwai / Steve Ratcliffe");
MODULE_DESCRIPTION("Advanced Linux Sound Architecture sequencer MIDI emulation.");
MODULE_LICENSE("GPL");
/* Prototypes for static functions */
static void note_off(struct snd_midi_op *ops, void *drv,
struct snd_midi_channel *chan,
int note, int vel);
static void do_control(struct snd_midi_op *ops, void *private,
struct snd_midi_channel_set *chset,
struct snd_midi_channel *chan,
int control, int value);
static void rpn(struct snd_midi_op *ops, void *drv, struct snd_midi_channel *chan,
struct snd_midi_channel_set *chset);
static void nrpn(struct snd_midi_op *ops, void *drv, struct snd_midi_channel *chan,
struct snd_midi_channel_set *chset);
static void sysex(struct snd_midi_op *ops, void *private, unsigned char *sysex,
int len, struct snd_midi_channel_set *chset);
static void all_sounds_off(struct snd_midi_op *ops, void *private,
struct snd_midi_channel *chan);
static void all_notes_off(struct snd_midi_op *ops, void *private,
struct snd_midi_channel *chan);
static void snd_midi_reset_controllers(struct snd_midi_channel *chan);
static void reset_all_channels(struct snd_midi_channel_set *chset);
/*
* Process an event in a driver independent way. This means dealing
* with RPN, NRPN, SysEx etc that are defined for common midi applications
* such as GM, GS and XG.
* There modes that this module will run in are:
* Generic MIDI - no interpretation at all, it will just save current values
* of controllers etc.
* GM - You can use all gm_ prefixed elements of chan. Controls, RPN, NRPN,
* SysEx will be interpreded as defined in General Midi.
* GS - You can use all gs_ prefixed elements of chan. Codes for GS will be
* interpreted.
* XG - You can use all xg_ prefixed elements of chan. Codes for XG will
* be interpreted.
*/
void
snd_midi_process_event(struct snd_midi_op *ops,
struct snd_seq_event *ev,
struct snd_midi_channel_set *chanset)
{
struct snd_midi_channel *chan;
void *drv;
int dest_channel = 0;
if (ev == NULL || chanset == NULL) {
snd_printd("ev or chanbase NULL (snd_midi_process_event)\n");
return;
}
if (chanset->channels == NULL)
return;
if (snd_seq_ev_is_channel_type(ev)) {
dest_channel = ev->data.note.channel;
if (dest_channel >= chanset->max_channels) {
snd_printd("dest channel is %d, max is %d\n",
dest_channel, chanset->max_channels);
return;
}
}
chan = chanset->channels + dest_channel;
drv = chanset->private_data;
/* EVENT_NOTE should be processed before queued */
if (ev->type == SNDRV_SEQ_EVENT_NOTE)
return;
/* Make sure that we don't have a note on that should really be
* a note off */
if (ev->type == SNDRV_SEQ_EVENT_NOTEON && ev->data.note.velocity == 0)
ev->type = SNDRV_SEQ_EVENT_NOTEOFF;
/* Make sure the note is within array range */
if (ev->type == SNDRV_SEQ_EVENT_NOTEON ||
ev->type == SNDRV_SEQ_EVENT_NOTEOFF ||
ev->type == SNDRV_SEQ_EVENT_KEYPRESS) {
if (ev->data.note.note >= 128)
return;
}
switch (ev->type) {
case SNDRV_SEQ_EVENT_NOTEON:
if (chan->note[ev->data.note.note] & SNDRV_MIDI_NOTE_ON) {
if (ops->note_off)
ops->note_off(drv, ev->data.note.note, 0, chan);
}
chan->note[ev->data.note.note] = SNDRV_MIDI_NOTE_ON;
if (ops->note_on)
ops->note_on(drv, ev->data.note.note, ev->data.note.velocity, chan);
break;
case SNDRV_SEQ_EVENT_NOTEOFF:
if (! (chan->note[ev->data.note.note] & SNDRV_MIDI_NOTE_ON))
break;
if (ops->note_off)
note_off(ops, drv, chan, ev->data.note.note, ev->data.note.velocity);
break;
case SNDRV_SEQ_EVENT_KEYPRESS:
if (ops->key_press)
ops->key_press(drv, ev->data.note.note, ev->data.note.velocity, chan);
break;
case SNDRV_SEQ_EVENT_CONTROLLER:
do_control(ops, drv, chanset, chan,
ev->data.control.param, ev->data.control.value);
break;
case SNDRV_SEQ_EVENT_PGMCHANGE:
chan->midi_program = ev->data.control.value;
break;
case SNDRV_SEQ_EVENT_PITCHBEND:
chan->midi_pitchbend = ev->data.control.value;
if (ops->control)
ops->control(drv, MIDI_CTL_PITCHBEND, chan);
break;
case SNDRV_SEQ_EVENT_CHANPRESS:
chan->midi_pressure = ev->data.control.value;
if (ops->control)
ops->control(drv, MIDI_CTL_CHAN_PRESSURE, chan);
break;
case SNDRV_SEQ_EVENT_CONTROL14:
/* Best guess is that this is any of the 14 bit controller values */
if (ev->data.control.param < 32) {
/* set low part first */
chan->control[ev->data.control.param + 32] =
ev->data.control.value & 0x7f;
do_control(ops, drv, chanset, chan,
ev->data.control.param,
((ev->data.control.value>>7) & 0x7f));
} else
do_control(ops, drv, chanset, chan,
ev->data.control.param,
ev->data.control.value);
break;
case SNDRV_SEQ_EVENT_NONREGPARAM:
/* Break it back into its controller values */
chan->param_type = SNDRV_MIDI_PARAM_TYPE_NONREGISTERED;
chan->control[MIDI_CTL_MSB_DATA_ENTRY]
= (ev->data.control.value >> 7) & 0x7f;
chan->control[MIDI_CTL_LSB_DATA_ENTRY]
= ev->data.control.value & 0x7f;
chan->control[MIDI_CTL_NONREG_PARM_NUM_MSB]
= (ev->data.control.param >> 7) & 0x7f;
chan->control[MIDI_CTL_NONREG_PARM_NUM_LSB]
= ev->data.control.param & 0x7f;
nrpn(ops, drv, chan, chanset);
break;
case SNDRV_SEQ_EVENT_REGPARAM:
/* Break it back into its controller values */
chan->param_type = SNDRV_MIDI_PARAM_TYPE_REGISTERED;
chan->control[MIDI_CTL_MSB_DATA_ENTRY]
= (ev->data.control.value >> 7) & 0x7f;
chan->control[MIDI_CTL_LSB_DATA_ENTRY]
= ev->data.control.value & 0x7f;
chan->control[MIDI_CTL_REGIST_PARM_NUM_MSB]
= (ev->data.control.param >> 7) & 0x7f;
chan->control[MIDI_CTL_REGIST_PARM_NUM_LSB]
= ev->data.control.param & 0x7f;
rpn(ops, drv, chan, chanset);
break;
case SNDRV_SEQ_EVENT_SYSEX:
if ((ev->flags & SNDRV_SEQ_EVENT_LENGTH_MASK) == SNDRV_SEQ_EVENT_LENGTH_VARIABLE) {
unsigned char sysexbuf[64];
int len;
len = snd_seq_expand_var_event(ev, sizeof(sysexbuf), sysexbuf, 1, 0);
if (len > 0)
sysex(ops, drv, sysexbuf, len, chanset);
}
break;
case SNDRV_SEQ_EVENT_SONGPOS:
case SNDRV_SEQ_EVENT_SONGSEL:
case SNDRV_SEQ_EVENT_CLOCK:
case SNDRV_SEQ_EVENT_START:
case SNDRV_SEQ_EVENT_CONTINUE:
case SNDRV_SEQ_EVENT_STOP:
case SNDRV_SEQ_EVENT_QFRAME:
case SNDRV_SEQ_EVENT_TEMPO:
case SNDRV_SEQ_EVENT_TIMESIGN:
case SNDRV_SEQ_EVENT_KEYSIGN:
goto not_yet;
case SNDRV_SEQ_EVENT_SENSING:
break;
case SNDRV_SEQ_EVENT_CLIENT_START:
case SNDRV_SEQ_EVENT_CLIENT_EXIT:
case SNDRV_SEQ_EVENT_CLIENT_CHANGE:
case SNDRV_SEQ_EVENT_PORT_START:
case SNDRV_SEQ_EVENT_PORT_EXIT:
case SNDRV_SEQ_EVENT_PORT_CHANGE:
case SNDRV_SEQ_EVENT_ECHO:
not_yet:
default:
/*snd_printd("Unimplemented event %d\n", ev->type);*/
break;
}
}
/*
* release note
*/
static void
note_off(struct snd_midi_op *ops, void *drv, struct snd_midi_channel *chan,
int note, int vel)
{
if (chan->gm_hold) {
/* Hold this note until pedal is turned off */
chan->note[note] |= SNDRV_MIDI_NOTE_RELEASED;
} else if (chan->note[note] & SNDRV_MIDI_NOTE_SOSTENUTO) {
/* Mark this note as release; it will be turned off when sostenuto
* is turned off */
chan->note[note] |= SNDRV_MIDI_NOTE_RELEASED;
} else {
chan->note[note] = 0;
if (ops->note_off)
ops->note_off(drv, note, vel, chan);
}
}
/*
* Do all driver independent operations for this controller and pass
* events that need to take place immediately to the driver.
*/
static void
do_control(struct snd_midi_op *ops, void *drv, struct snd_midi_channel_set *chset,
struct snd_midi_channel *chan, int control, int value)
{
int i;
/* Switches */
if ((control >=64 && control <=69) || (control >= 80 && control <= 83)) {
/* These are all switches; either off or on so set to 0 or 127 */
value = (value >= 64)? 127: 0;
}
chan->control[control] = value;
switch (control) {
case MIDI_CTL_SUSTAIN:
if (value == 0) {
/* Sustain has been released, turn off held notes */
for (i = 0; i < 128; i++) {
if (chan->note[i] & SNDRV_MIDI_NOTE_RELEASED) {
chan->note[i] = SNDRV_MIDI_NOTE_OFF;
if (ops->note_off)
ops->note_off(drv, i, 0, chan);
}
}
}
break;
case MIDI_CTL_PORTAMENTO:
break;
case MIDI_CTL_SOSTENUTO:
if (value) {
/* Mark each note that is currently held down */
for (i = 0; i < 128; i++) {
if (chan->note[i] & SNDRV_MIDI_NOTE_ON)
chan->note[i] |= SNDRV_MIDI_NOTE_SOSTENUTO;
}
} else {
/* release all notes that were held */
for (i = 0; i < 128; i++) {
if (chan->note[i] & SNDRV_MIDI_NOTE_SOSTENUTO) {
chan->note[i] &= ~SNDRV_MIDI_NOTE_SOSTENUTO;
if (chan->note[i] & SNDRV_MIDI_NOTE_RELEASED) {
chan->note[i] = SNDRV_MIDI_NOTE_OFF;
if (ops->note_off)
ops->note_off(drv, i, 0, chan);
}
}
}
}
break;
case MIDI_CTL_MSB_DATA_ENTRY:
chan->control[MIDI_CTL_LSB_DATA_ENTRY] = 0;
/* go through here */
case MIDI_CTL_LSB_DATA_ENTRY:
if (chan->param_type == SNDRV_MIDI_PARAM_TYPE_REGISTERED)
rpn(ops, drv, chan, chset);
else
nrpn(ops, drv, chan, chset);
break;
case MIDI_CTL_REGIST_PARM_NUM_LSB:
case MIDI_CTL_REGIST_PARM_NUM_MSB:
chan->param_type = SNDRV_MIDI_PARAM_TYPE_REGISTERED;
break;
case MIDI_CTL_NONREG_PARM_NUM_LSB:
case MIDI_CTL_NONREG_PARM_NUM_MSB:
chan->param_type = SNDRV_MIDI_PARAM_TYPE_NONREGISTERED;
break;
case MIDI_CTL_ALL_SOUNDS_OFF:
all_sounds_off(ops, drv, chan);
break;
case MIDI_CTL_ALL_NOTES_OFF:
all_notes_off(ops, drv, chan);
break;
case MIDI_CTL_MSB_BANK:
if (chset->midi_mode == SNDRV_MIDI_MODE_XG) {
if (value == 127)
chan->drum_channel = 1;
else
chan->drum_channel = 0;
}
break;
case MIDI_CTL_LSB_BANK:
break;
case MIDI_CTL_RESET_CONTROLLERS:
snd_midi_reset_controllers(chan);
break;
case MIDI_CTL_SOFT_PEDAL:
case MIDI_CTL_LEGATO_FOOTSWITCH:
case MIDI_CTL_HOLD2:
case MIDI_CTL_SC1_SOUND_VARIATION:
case MIDI_CTL_SC2_TIMBRE:
case MIDI_CTL_SC3_RELEASE_TIME:
case MIDI_CTL_SC4_ATTACK_TIME:
case MIDI_CTL_SC5_BRIGHTNESS:
case MIDI_CTL_E1_REVERB_DEPTH:
case MIDI_CTL_E2_TREMOLO_DEPTH:
case MIDI_CTL_E3_CHORUS_DEPTH:
case MIDI_CTL_E4_DETUNE_DEPTH:
case MIDI_CTL_E5_PHASER_DEPTH:
goto notyet;
notyet:
default:
if (ops->control)
ops->control(drv, control, chan);
break;
}
}
/*
* initialize the MIDI status
*/
void
snd_midi_channel_set_clear(struct snd_midi_channel_set *chset)
{
int i;
chset->midi_mode = SNDRV_MIDI_MODE_GM;
chset->gs_master_volume = 127;
for (i = 0; i < chset->max_channels; i++) {
struct snd_midi_channel *chan = chset->channels + i;
memset(chan->note, 0, sizeof(chan->note));
chan->midi_aftertouch = 0;
chan->midi_pressure = 0;
chan->midi_program = 0;
chan->midi_pitchbend = 0;
snd_midi_reset_controllers(chan);
chan->gm_rpn_pitch_bend_range = 256; /* 2 semitones */
chan->gm_rpn_fine_tuning = 0;
chan->gm_rpn_coarse_tuning = 0;
if (i == 9)
chan->drum_channel = 1;
else
chan->drum_channel = 0;
}
}
/*
* Process a rpn message.
*/
static void
rpn(struct snd_midi_op *ops, void *drv, struct snd_midi_channel *chan,
struct snd_midi_channel_set *chset)
{
int type;
int val;
if (chset->midi_mode != SNDRV_MIDI_MODE_NONE) {
type = (chan->control[MIDI_CTL_REGIST_PARM_NUM_MSB] << 8) |
chan->control[MIDI_CTL_REGIST_PARM_NUM_LSB];
val = (chan->control[MIDI_CTL_MSB_DATA_ENTRY] << 7) |
chan->control[MIDI_CTL_LSB_DATA_ENTRY];
switch (type) {
case 0x0000: /* Pitch bend sensitivity */
/* MSB only / 1 semitone per 128 */
chan->gm_rpn_pitch_bend_range = val;
break;
case 0x0001: /* fine tuning: */
/* MSB/LSB, 8192=center, 100/8192 cent step */
chan->gm_rpn_fine_tuning = val - 8192;
break;
case 0x0002: /* coarse tuning */
/* MSB only / 8192=center, 1 semitone per 128 */
chan->gm_rpn_coarse_tuning = val - 8192;
break;
case 0x7F7F: /* "lock-in" RPN */
/* ignored */
break;
}
}
/* should call nrpn or rpn callback here.. */
}
/*
* Process an nrpn message.
*/
static void
nrpn(struct snd_midi_op *ops, void *drv, struct snd_midi_channel *chan,
struct snd_midi_channel_set *chset)
{
/* parse XG NRPNs here if possible */
if (ops->nrpn)
ops->nrpn(drv, chan, chset);
}
/*
* convert channel parameter in GS sysex
*/
static int
get_channel(unsigned char cmd)
{
int p = cmd & 0x0f;
if (p == 0)
p = 9;
else if (p < 10)
p--;
return p;
}
/*
* Process a sysex message.
*/
static void
sysex(struct snd_midi_op *ops, void *private, unsigned char *buf, int len,
struct snd_midi_channel_set *chset)
{
/* GM on */
static unsigned char gm_on_macro[] = {
0x7e,0x7f,0x09,0x01,
};
/* XG on */
static unsigned char xg_on_macro[] = {
0x43,0x10,0x4c,0x00,0x00,0x7e,0x00,
};
/* GS prefix
* drum channel: XX=0x1?(channel), YY=0x15, ZZ=on/off
* reverb mode: XX=0x01, YY=0x30, ZZ=0-7
* chorus mode: XX=0x01, YY=0x38, ZZ=0-7
* master vol: XX=0x00, YY=0x04, ZZ=0-127
*/
static unsigned char gs_pfx_macro[] = {
0x41,0x10,0x42,0x12,0x40,/*XX,YY,ZZ*/
};
int parsed = SNDRV_MIDI_SYSEX_NOT_PARSED;
if (len <= 0 || buf[0] != 0xf0)
return;
/* skip first byte */
buf++;
len--;
/* GM on */
if (len >= (int)sizeof(gm_on_macro) &&
memcmp(buf, gm_on_macro, sizeof(gm_on_macro)) == 0) {
if (chset->midi_mode != SNDRV_MIDI_MODE_GS &&
chset->midi_mode != SNDRV_MIDI_MODE_XG) {
chset->midi_mode = SNDRV_MIDI_MODE_GM;
reset_all_channels(chset);
parsed = SNDRV_MIDI_SYSEX_GM_ON;
}
}
/* GS macros */
else if (len >= 8 &&
memcmp(buf, gs_pfx_macro, sizeof(gs_pfx_macro)) == 0) {
if (chset->midi_mode != SNDRV_MIDI_MODE_GS &&
chset->midi_mode != SNDRV_MIDI_MODE_XG)
chset->midi_mode = SNDRV_MIDI_MODE_GS;
if (buf[5] == 0x00 && buf[6] == 0x7f && buf[7] == 0x00) {
/* GS reset */
parsed = SNDRV_MIDI_SYSEX_GS_RESET;
reset_all_channels(chset);
}
else if ((buf[5] & 0xf0) == 0x10 && buf[6] == 0x15) {
/* drum pattern */
int p = get_channel(buf[5]);
if (p < chset->max_channels) {
parsed = SNDRV_MIDI_SYSEX_GS_DRUM_CHANNEL;
if (buf[7])
chset->channels[p].drum_channel = 1;
else
chset->channels[p].drum_channel = 0;
}
} else if ((buf[5] & 0xf0) == 0x10 && buf[6] == 0x21) {
/* program */
int p = get_channel(buf[5]);
if (p < chset->max_channels &&
! chset->channels[p].drum_channel) {
parsed = SNDRV_MIDI_SYSEX_GS_DRUM_CHANNEL;
chset->channels[p].midi_program = buf[7];
}
} else if (buf[5] == 0x01 && buf[6] == 0x30) {
/* reverb mode */
parsed = SNDRV_MIDI_SYSEX_GS_REVERB_MODE;
chset->gs_reverb_mode = buf[7];
} else if (buf[5] == 0x01 && buf[6] == 0x38) {
/* chorus mode */
parsed = SNDRV_MIDI_SYSEX_GS_CHORUS_MODE;
chset->gs_chorus_mode = buf[7];
} else if (buf[5] == 0x00 && buf[6] == 0x04) {
/* master volume */
parsed = SNDRV_MIDI_SYSEX_GS_MASTER_VOLUME;
chset->gs_master_volume = buf[7];
}
}
/* XG on */
else if (len >= (int)sizeof(xg_on_macro) &&
memcmp(buf, xg_on_macro, sizeof(xg_on_macro)) == 0) {
int i;
chset->midi_mode = SNDRV_MIDI_MODE_XG;
parsed = SNDRV_MIDI_SYSEX_XG_ON;
/* reset CC#0 for drums */
for (i = 0; i < chset->max_channels; i++) {
if (chset->channels[i].drum_channel)
chset->channels[i].control[MIDI_CTL_MSB_BANK] = 127;
else
chset->channels[i].control[MIDI_CTL_MSB_BANK] = 0;
}
}
if (ops->sysex)
ops->sysex(private, buf - 1, len + 1, parsed, chset);
}
/*
* all sound off
*/
static void
all_sounds_off(struct snd_midi_op *ops, void *drv, struct snd_midi_channel *chan)
{
int n;
if (! ops->note_terminate)
return;
for (n = 0; n < 128; n++) {
if (chan->note[n]) {
ops->note_terminate(drv, n, chan);
chan->note[n] = 0;
}
}
}
/*
* all notes off
*/
static void
all_notes_off(struct snd_midi_op *ops, void *drv, struct snd_midi_channel *chan)
{
int n;
if (! ops->note_off)
return;
for (n = 0; n < 128; n++) {
if (chan->note[n] == SNDRV_MIDI_NOTE_ON)
note_off(ops, drv, chan, n, 0);
}
}
/*
* Initialise a single midi channel control block.
*/
static void snd_midi_channel_init(struct snd_midi_channel *p, int n)
{
if (p == NULL)
return;
memset(p, 0, sizeof(struct snd_midi_channel));
p->private = NULL;
p->number = n;
snd_midi_reset_controllers(p);
p->gm_rpn_pitch_bend_range = 256; /* 2 semitones */
p->gm_rpn_fine_tuning = 0;
p->gm_rpn_coarse_tuning = 0;
if (n == 9)
p->drum_channel = 1; /* Default ch 10 as drums */
}
/*
* Allocate and initialise a set of midi channel control blocks.
*/
static struct snd_midi_channel *snd_midi_channel_init_set(int n)
{
struct snd_midi_channel *chan;
int i;
chan = kmalloc(n * sizeof(struct snd_midi_channel), GFP_KERNEL);
if (chan) {
for (i = 0; i < n; i++)
snd_midi_channel_init(chan+i, i);
}
return chan;
}
/*
* reset all midi channels
*/
static void
reset_all_channels(struct snd_midi_channel_set *chset)
{
int ch;
for (ch = 0; ch < chset->max_channels; ch++) {
struct snd_midi_channel *chan = chset->channels + ch;
snd_midi_reset_controllers(chan);
chan->gm_rpn_pitch_bend_range = 256; /* 2 semitones */
chan->gm_rpn_fine_tuning = 0;
chan->gm_rpn_coarse_tuning = 0;
if (ch == 9)
chan->drum_channel = 1;
else
chan->drum_channel = 0;
}
}
/*
* Allocate and initialise a midi channel set.
*/
struct snd_midi_channel_set *snd_midi_channel_alloc_set(int n)
{
struct snd_midi_channel_set *chset;
chset = kmalloc(sizeof(*chset), GFP_KERNEL);
if (chset) {
chset->channels = snd_midi_channel_init_set(n);
chset->private_data = NULL;
chset->max_channels = n;
}
return chset;
}
/*
* Reset the midi controllers on a particular channel to default values.
*/
static void snd_midi_reset_controllers(struct snd_midi_channel *chan)
{
memset(chan->control, 0, sizeof(chan->control));
chan->gm_volume = 127;
chan->gm_expression = 127;
chan->gm_pan = 64;
}
/*
* Free a midi channel set.
*/
void snd_midi_channel_free_set(struct snd_midi_channel_set *chset)
{
if (chset == NULL)
return;
kfree(chset->channels);
kfree(chset);
}
static int __init alsa_seq_midi_emul_init(void)
{
return 0;
}
static void __exit alsa_seq_midi_emul_exit(void)
{
}
module_init(alsa_seq_midi_emul_init)
module_exit(alsa_seq_midi_emul_exit)
EXPORT_SYMBOL(snd_midi_process_event);
EXPORT_SYMBOL(snd_midi_channel_set_clear);
EXPORT_SYMBOL(snd_midi_channel_alloc_set);
EXPORT_SYMBOL(snd_midi_channel_free_set);
| gpl-2.0 |
civato/CivZ-P900-KitKat-SOURCE | fs/xfs/xfs_rw.c | 5075 | 4201 | /*
* Copyright (c) 2000-2006 Silicon Graphics, Inc.
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it would be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "xfs.h"
#include "xfs_fs.h"
#include "xfs_types.h"
#include "xfs_bit.h"
#include "xfs_log.h"
#include "xfs_inum.h"
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_mount.h"
#include "xfs_bmap_btree.h"
#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_error.h"
#include "xfs_rw.h"
/*
* Force a shutdown of the filesystem instantly while keeping
* the filesystem consistent. We don't do an unmount here; just shutdown
* the shop, make sure that absolutely nothing persistent happens to
* this filesystem after this point.
*/
void
xfs_do_force_shutdown(
xfs_mount_t *mp,
int flags,
char *fname,
int lnnum)
{
int logerror;
logerror = flags & SHUTDOWN_LOG_IO_ERROR;
if (!(flags & SHUTDOWN_FORCE_UMOUNT)) {
xfs_notice(mp,
"%s(0x%x) called from line %d of file %s. Return address = 0x%p",
__func__, flags, lnnum, fname, __return_address);
}
/*
* No need to duplicate efforts.
*/
if (XFS_FORCED_SHUTDOWN(mp) && !logerror)
return;
/*
* This flags XFS_MOUNT_FS_SHUTDOWN, makes sure that we don't
* queue up anybody new on the log reservations, and wakes up
* everybody who's sleeping on log reservations to tell them
* the bad news.
*/
if (xfs_log_force_umount(mp, logerror))
return;
if (flags & SHUTDOWN_CORRUPT_INCORE) {
xfs_alert_tag(mp, XFS_PTAG_SHUTDOWN_CORRUPT,
"Corruption of in-memory data detected. Shutting down filesystem");
if (XFS_ERRLEVEL_HIGH <= xfs_error_level)
xfs_stack_trace();
} else if (!(flags & SHUTDOWN_FORCE_UMOUNT)) {
if (logerror) {
xfs_alert_tag(mp, XFS_PTAG_SHUTDOWN_LOGERROR,
"Log I/O Error Detected. Shutting down filesystem");
} else if (flags & SHUTDOWN_DEVICE_REQ) {
xfs_alert_tag(mp, XFS_PTAG_SHUTDOWN_IOERROR,
"All device paths lost. Shutting down filesystem");
} else if (!(flags & SHUTDOWN_REMOTE_REQ)) {
xfs_alert_tag(mp, XFS_PTAG_SHUTDOWN_IOERROR,
"I/O Error Detected. Shutting down filesystem");
}
}
if (!(flags & SHUTDOWN_FORCE_UMOUNT)) {
xfs_alert(mp,
"Please umount the filesystem and rectify the problem(s)");
}
}
/*
* This isn't an absolute requirement, but it is
* just a good idea to call xfs_read_buf instead of
* directly doing a read_buf call. For one, we shouldn't
* be doing this disk read if we are in SHUTDOWN state anyway,
* so this stops that from happening. Secondly, this does all
* the error checking stuff and the brelse if appropriate for
* the caller, so the code can be a little leaner.
*/
int
xfs_read_buf(
struct xfs_mount *mp,
xfs_buftarg_t *target,
xfs_daddr_t blkno,
int len,
uint flags,
xfs_buf_t **bpp)
{
xfs_buf_t *bp;
int error;
if (!flags)
flags = XBF_LOCK | XBF_MAPPED;
bp = xfs_buf_read(target, blkno, len, flags);
if (!bp)
return XFS_ERROR(EIO);
error = bp->b_error;
if (!error && !XFS_FORCED_SHUTDOWN(mp)) {
*bpp = bp;
} else {
*bpp = NULL;
if (error) {
xfs_buf_ioerror_alert(bp, __func__);
} else {
error = XFS_ERROR(EIO);
}
if (bp) {
XFS_BUF_UNDONE(bp);
xfs_buf_stale(bp);
/*
* brelse clears B_ERROR and b_error
*/
xfs_buf_relse(bp);
}
}
return (error);
}
/*
* helper function to extract extent size hint from inode
*/
xfs_extlen_t
xfs_get_extsz_hint(
struct xfs_inode *ip)
{
if ((ip->i_d.di_flags & XFS_DIFLAG_EXTSIZE) && ip->i_d.di_extsize)
return ip->i_d.di_extsize;
if (XFS_IS_REALTIME_INODE(ip))
return ip->i_mount->m_sb.sb_rextsize;
return 0;
}
| gpl-2.0 |
NamanArora/dx_kernel | drivers/tty/serial/serial_txx9.c | 5075 | 34199 | /*
* Derived from many drivers using generic_serial interface,
* especially serial_tx3912.c by Steven J. Hill and r39xx_serial.c
* (was in Linux/VR tree) by Jim Pick.
*
* Copyright (C) 1999 Harald Koerfgen
* Copyright (C) 2000 Jim Pick <jim@jimpick.com>
* Copyright (C) 2001 Steven J. Hill (sjhill@realitydiluted.com)
* Copyright (C) 2000-2002 Toshiba Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Serial driver for TX3927/TX4927/TX4925/TX4938 internal SIO controller
*/
#if defined(CONFIG_SERIAL_TXX9_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
#define SUPPORT_SYSRQ
#endif
#include <linux/module.h>
#include <linux/ioport.h>
#include <linux/init.h>
#include <linux/console.h>
#include <linux/delay.h>
#include <linux/platform_device.h>
#include <linux/pci.h>
#include <linux/serial_core.h>
#include <linux/serial.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <asm/io.h>
static char *serial_version = "1.11";
static char *serial_name = "TX39/49 Serial driver";
#define PASS_LIMIT 256
#if !defined(CONFIG_SERIAL_TXX9_STDSERIAL)
/* "ttyS" is used for standard serial driver */
#define TXX9_TTY_NAME "ttyTX"
#define TXX9_TTY_MINOR_START 196
#define TXX9_TTY_MAJOR 204
#else
/* acts like standard serial driver */
#define TXX9_TTY_NAME "ttyS"
#define TXX9_TTY_MINOR_START 64
#define TXX9_TTY_MAJOR TTY_MAJOR
#endif
/* flag aliases */
#define UPF_TXX9_HAVE_CTS_LINE UPF_BUGGY_UART
#define UPF_TXX9_USE_SCLK UPF_MAGIC_MULTIPLIER
#ifdef CONFIG_PCI
/* support for Toshiba TC86C001 SIO */
#define ENABLE_SERIAL_TXX9_PCI
#endif
/*
* Number of serial ports
*/
#define UART_NR CONFIG_SERIAL_TXX9_NR_UARTS
struct uart_txx9_port {
struct uart_port port;
/* No additional info for now */
};
#define TXX9_REGION_SIZE 0x24
/* TXX9 Serial Registers */
#define TXX9_SILCR 0x00
#define TXX9_SIDICR 0x04
#define TXX9_SIDISR 0x08
#define TXX9_SICISR 0x0c
#define TXX9_SIFCR 0x10
#define TXX9_SIFLCR 0x14
#define TXX9_SIBGR 0x18
#define TXX9_SITFIFO 0x1c
#define TXX9_SIRFIFO 0x20
/* SILCR : Line Control */
#define TXX9_SILCR_SCS_MASK 0x00000060
#define TXX9_SILCR_SCS_IMCLK 0x00000000
#define TXX9_SILCR_SCS_IMCLK_BG 0x00000020
#define TXX9_SILCR_SCS_SCLK 0x00000040
#define TXX9_SILCR_SCS_SCLK_BG 0x00000060
#define TXX9_SILCR_UEPS 0x00000010
#define TXX9_SILCR_UPEN 0x00000008
#define TXX9_SILCR_USBL_MASK 0x00000004
#define TXX9_SILCR_USBL_1BIT 0x00000000
#define TXX9_SILCR_USBL_2BIT 0x00000004
#define TXX9_SILCR_UMODE_MASK 0x00000003
#define TXX9_SILCR_UMODE_8BIT 0x00000000
#define TXX9_SILCR_UMODE_7BIT 0x00000001
/* SIDICR : DMA/Int. Control */
#define TXX9_SIDICR_TDE 0x00008000
#define TXX9_SIDICR_RDE 0x00004000
#define TXX9_SIDICR_TIE 0x00002000
#define TXX9_SIDICR_RIE 0x00001000
#define TXX9_SIDICR_SPIE 0x00000800
#define TXX9_SIDICR_CTSAC 0x00000600
#define TXX9_SIDICR_STIE_MASK 0x0000003f
#define TXX9_SIDICR_STIE_OERS 0x00000020
#define TXX9_SIDICR_STIE_CTSS 0x00000010
#define TXX9_SIDICR_STIE_RBRKD 0x00000008
#define TXX9_SIDICR_STIE_TRDY 0x00000004
#define TXX9_SIDICR_STIE_TXALS 0x00000002
#define TXX9_SIDICR_STIE_UBRKD 0x00000001
/* SIDISR : DMA/Int. Status */
#define TXX9_SIDISR_UBRK 0x00008000
#define TXX9_SIDISR_UVALID 0x00004000
#define TXX9_SIDISR_UFER 0x00002000
#define TXX9_SIDISR_UPER 0x00001000
#define TXX9_SIDISR_UOER 0x00000800
#define TXX9_SIDISR_ERI 0x00000400
#define TXX9_SIDISR_TOUT 0x00000200
#define TXX9_SIDISR_TDIS 0x00000100
#define TXX9_SIDISR_RDIS 0x00000080
#define TXX9_SIDISR_STIS 0x00000040
#define TXX9_SIDISR_RFDN_MASK 0x0000001f
/* SICISR : Change Int. Status */
#define TXX9_SICISR_OERS 0x00000020
#define TXX9_SICISR_CTSS 0x00000010
#define TXX9_SICISR_RBRKD 0x00000008
#define TXX9_SICISR_TRDY 0x00000004
#define TXX9_SICISR_TXALS 0x00000002
#define TXX9_SICISR_UBRKD 0x00000001
/* SIFCR : FIFO Control */
#define TXX9_SIFCR_SWRST 0x00008000
#define TXX9_SIFCR_RDIL_MASK 0x00000180
#define TXX9_SIFCR_RDIL_1 0x00000000
#define TXX9_SIFCR_RDIL_4 0x00000080
#define TXX9_SIFCR_RDIL_8 0x00000100
#define TXX9_SIFCR_RDIL_12 0x00000180
#define TXX9_SIFCR_RDIL_MAX 0x00000180
#define TXX9_SIFCR_TDIL_MASK 0x00000018
#define TXX9_SIFCR_TDIL_MASK 0x00000018
#define TXX9_SIFCR_TDIL_1 0x00000000
#define TXX9_SIFCR_TDIL_4 0x00000001
#define TXX9_SIFCR_TDIL_8 0x00000010
#define TXX9_SIFCR_TDIL_MAX 0x00000010
#define TXX9_SIFCR_TFRST 0x00000004
#define TXX9_SIFCR_RFRST 0x00000002
#define TXX9_SIFCR_FRSTE 0x00000001
#define TXX9_SIO_TX_FIFO 8
#define TXX9_SIO_RX_FIFO 16
/* SIFLCR : Flow Control */
#define TXX9_SIFLCR_RCS 0x00001000
#define TXX9_SIFLCR_TES 0x00000800
#define TXX9_SIFLCR_RTSSC 0x00000200
#define TXX9_SIFLCR_RSDE 0x00000100
#define TXX9_SIFLCR_TSDE 0x00000080
#define TXX9_SIFLCR_RTSTL_MASK 0x0000001e
#define TXX9_SIFLCR_RTSTL_MAX 0x0000001e
#define TXX9_SIFLCR_TBRK 0x00000001
/* SIBGR : Baudrate Control */
#define TXX9_SIBGR_BCLK_MASK 0x00000300
#define TXX9_SIBGR_BCLK_T0 0x00000000
#define TXX9_SIBGR_BCLK_T2 0x00000100
#define TXX9_SIBGR_BCLK_T4 0x00000200
#define TXX9_SIBGR_BCLK_T6 0x00000300
#define TXX9_SIBGR_BRD_MASK 0x000000ff
static inline unsigned int sio_in(struct uart_txx9_port *up, int offset)
{
switch (up->port.iotype) {
default:
return __raw_readl(up->port.membase + offset);
case UPIO_PORT:
return inl(up->port.iobase + offset);
}
}
static inline void
sio_out(struct uart_txx9_port *up, int offset, int value)
{
switch (up->port.iotype) {
default:
__raw_writel(value, up->port.membase + offset);
break;
case UPIO_PORT:
outl(value, up->port.iobase + offset);
break;
}
}
static inline void
sio_mask(struct uart_txx9_port *up, int offset, unsigned int value)
{
sio_out(up, offset, sio_in(up, offset) & ~value);
}
static inline void
sio_set(struct uart_txx9_port *up, int offset, unsigned int value)
{
sio_out(up, offset, sio_in(up, offset) | value);
}
static inline void
sio_quot_set(struct uart_txx9_port *up, int quot)
{
quot >>= 1;
if (quot < 256)
sio_out(up, TXX9_SIBGR, quot | TXX9_SIBGR_BCLK_T0);
else if (quot < (256 << 2))
sio_out(up, TXX9_SIBGR, (quot >> 2) | TXX9_SIBGR_BCLK_T2);
else if (quot < (256 << 4))
sio_out(up, TXX9_SIBGR, (quot >> 4) | TXX9_SIBGR_BCLK_T4);
else if (quot < (256 << 6))
sio_out(up, TXX9_SIBGR, (quot >> 6) | TXX9_SIBGR_BCLK_T6);
else
sio_out(up, TXX9_SIBGR, 0xff | TXX9_SIBGR_BCLK_T6);
}
static struct uart_txx9_port *to_uart_txx9_port(struct uart_port *port)
{
return container_of(port, struct uart_txx9_port, port);
}
static void serial_txx9_stop_tx(struct uart_port *port)
{
struct uart_txx9_port *up = to_uart_txx9_port(port);
sio_mask(up, TXX9_SIDICR, TXX9_SIDICR_TIE);
}
static void serial_txx9_start_tx(struct uart_port *port)
{
struct uart_txx9_port *up = to_uart_txx9_port(port);
sio_set(up, TXX9_SIDICR, TXX9_SIDICR_TIE);
}
static void serial_txx9_stop_rx(struct uart_port *port)
{
struct uart_txx9_port *up = to_uart_txx9_port(port);
up->port.read_status_mask &= ~TXX9_SIDISR_RDIS;
}
static void serial_txx9_enable_ms(struct uart_port *port)
{
/* TXX9-SIO can not control DTR... */
}
static void serial_txx9_initialize(struct uart_port *port)
{
struct uart_txx9_port *up = to_uart_txx9_port(port);
unsigned int tmout = 10000;
sio_out(up, TXX9_SIFCR, TXX9_SIFCR_SWRST);
/* TX4925 BUG WORKAROUND. Accessing SIOC register
* immediately after soft reset causes bus error. */
mmiowb();
udelay(1);
while ((sio_in(up, TXX9_SIFCR) & TXX9_SIFCR_SWRST) && --tmout)
udelay(1);
/* TX Int by FIFO Empty, RX Int by Receiving 1 char. */
sio_set(up, TXX9_SIFCR,
TXX9_SIFCR_TDIL_MAX | TXX9_SIFCR_RDIL_1);
/* initial settings */
sio_out(up, TXX9_SILCR,
TXX9_SILCR_UMODE_8BIT | TXX9_SILCR_USBL_1BIT |
((up->port.flags & UPF_TXX9_USE_SCLK) ?
TXX9_SILCR_SCS_SCLK_BG : TXX9_SILCR_SCS_IMCLK_BG));
sio_quot_set(up, uart_get_divisor(port, 9600));
sio_out(up, TXX9_SIFLCR, TXX9_SIFLCR_RTSTL_MAX /* 15 */);
sio_out(up, TXX9_SIDICR, 0);
}
static inline void
receive_chars(struct uart_txx9_port *up, unsigned int *status)
{
struct tty_struct *tty = up->port.state->port.tty;
unsigned char ch;
unsigned int disr = *status;
int max_count = 256;
char flag;
unsigned int next_ignore_status_mask;
do {
ch = sio_in(up, TXX9_SIRFIFO);
flag = TTY_NORMAL;
up->port.icount.rx++;
/* mask out RFDN_MASK bit added by previous overrun */
next_ignore_status_mask =
up->port.ignore_status_mask & ~TXX9_SIDISR_RFDN_MASK;
if (unlikely(disr & (TXX9_SIDISR_UBRK | TXX9_SIDISR_UPER |
TXX9_SIDISR_UFER | TXX9_SIDISR_UOER))) {
/*
* For statistics only
*/
if (disr & TXX9_SIDISR_UBRK) {
disr &= ~(TXX9_SIDISR_UFER | TXX9_SIDISR_UPER);
up->port.icount.brk++;
/*
* We do the SysRQ and SAK checking
* here because otherwise the break
* may get masked by ignore_status_mask
* or read_status_mask.
*/
if (uart_handle_break(&up->port))
goto ignore_char;
} else if (disr & TXX9_SIDISR_UPER)
up->port.icount.parity++;
else if (disr & TXX9_SIDISR_UFER)
up->port.icount.frame++;
if (disr & TXX9_SIDISR_UOER) {
up->port.icount.overrun++;
/*
* The receiver read buffer still hold
* a char which caused overrun.
* Ignore next char by adding RFDN_MASK
* to ignore_status_mask temporarily.
*/
next_ignore_status_mask |=
TXX9_SIDISR_RFDN_MASK;
}
/*
* Mask off conditions which should be ingored.
*/
disr &= up->port.read_status_mask;
if (disr & TXX9_SIDISR_UBRK) {
flag = TTY_BREAK;
} else if (disr & TXX9_SIDISR_UPER)
flag = TTY_PARITY;
else if (disr & TXX9_SIDISR_UFER)
flag = TTY_FRAME;
}
if (uart_handle_sysrq_char(&up->port, ch))
goto ignore_char;
uart_insert_char(&up->port, disr, TXX9_SIDISR_UOER, ch, flag);
ignore_char:
up->port.ignore_status_mask = next_ignore_status_mask;
disr = sio_in(up, TXX9_SIDISR);
} while (!(disr & TXX9_SIDISR_UVALID) && (max_count-- > 0));
spin_unlock(&up->port.lock);
tty_flip_buffer_push(tty);
spin_lock(&up->port.lock);
*status = disr;
}
static inline void transmit_chars(struct uart_txx9_port *up)
{
struct circ_buf *xmit = &up->port.state->xmit;
int count;
if (up->port.x_char) {
sio_out(up, TXX9_SITFIFO, up->port.x_char);
up->port.icount.tx++;
up->port.x_char = 0;
return;
}
if (uart_circ_empty(xmit) || uart_tx_stopped(&up->port)) {
serial_txx9_stop_tx(&up->port);
return;
}
count = TXX9_SIO_TX_FIFO;
do {
sio_out(up, TXX9_SITFIFO, xmit->buf[xmit->tail]);
xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
up->port.icount.tx++;
if (uart_circ_empty(xmit))
break;
} while (--count > 0);
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(&up->port);
if (uart_circ_empty(xmit))
serial_txx9_stop_tx(&up->port);
}
static irqreturn_t serial_txx9_interrupt(int irq, void *dev_id)
{
int pass_counter = 0;
struct uart_txx9_port *up = dev_id;
unsigned int status;
while (1) {
spin_lock(&up->port.lock);
status = sio_in(up, TXX9_SIDISR);
if (!(sio_in(up, TXX9_SIDICR) & TXX9_SIDICR_TIE))
status &= ~TXX9_SIDISR_TDIS;
if (!(status & (TXX9_SIDISR_TDIS | TXX9_SIDISR_RDIS |
TXX9_SIDISR_TOUT))) {
spin_unlock(&up->port.lock);
break;
}
if (status & TXX9_SIDISR_RDIS)
receive_chars(up, &status);
if (status & TXX9_SIDISR_TDIS)
transmit_chars(up);
/* Clear TX/RX Int. Status */
sio_mask(up, TXX9_SIDISR,
TXX9_SIDISR_TDIS | TXX9_SIDISR_RDIS |
TXX9_SIDISR_TOUT);
spin_unlock(&up->port.lock);
if (pass_counter++ > PASS_LIMIT)
break;
}
return pass_counter ? IRQ_HANDLED : IRQ_NONE;
}
static unsigned int serial_txx9_tx_empty(struct uart_port *port)
{
struct uart_txx9_port *up = to_uart_txx9_port(port);
unsigned long flags;
unsigned int ret;
spin_lock_irqsave(&up->port.lock, flags);
ret = (sio_in(up, TXX9_SICISR) & TXX9_SICISR_TXALS) ? TIOCSER_TEMT : 0;
spin_unlock_irqrestore(&up->port.lock, flags);
return ret;
}
static unsigned int serial_txx9_get_mctrl(struct uart_port *port)
{
struct uart_txx9_port *up = to_uart_txx9_port(port);
unsigned int ret;
/* no modem control lines */
ret = TIOCM_CAR | TIOCM_DSR;
ret |= (sio_in(up, TXX9_SIFLCR) & TXX9_SIFLCR_RTSSC) ? 0 : TIOCM_RTS;
ret |= (sio_in(up, TXX9_SICISR) & TXX9_SICISR_CTSS) ? 0 : TIOCM_CTS;
return ret;
}
static void serial_txx9_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
struct uart_txx9_port *up = to_uart_txx9_port(port);
if (mctrl & TIOCM_RTS)
sio_mask(up, TXX9_SIFLCR, TXX9_SIFLCR_RTSSC);
else
sio_set(up, TXX9_SIFLCR, TXX9_SIFLCR_RTSSC);
}
static void serial_txx9_break_ctl(struct uart_port *port, int break_state)
{
struct uart_txx9_port *up = to_uart_txx9_port(port);
unsigned long flags;
spin_lock_irqsave(&up->port.lock, flags);
if (break_state == -1)
sio_set(up, TXX9_SIFLCR, TXX9_SIFLCR_TBRK);
else
sio_mask(up, TXX9_SIFLCR, TXX9_SIFLCR_TBRK);
spin_unlock_irqrestore(&up->port.lock, flags);
}
#if defined(CONFIG_SERIAL_TXX9_CONSOLE) || (CONFIG_CONSOLE_POLL)
/*
* Wait for transmitter & holding register to empty
*/
static void wait_for_xmitr(struct uart_txx9_port *up)
{
unsigned int tmout = 10000;
/* Wait up to 10ms for the character(s) to be sent. */
while (--tmout &&
!(sio_in(up, TXX9_SICISR) & TXX9_SICISR_TXALS))
udelay(1);
/* Wait up to 1s for flow control if necessary */
if (up->port.flags & UPF_CONS_FLOW) {
tmout = 1000000;
while (--tmout &&
(sio_in(up, TXX9_SICISR) & TXX9_SICISR_CTSS))
udelay(1);
}
}
#endif
#ifdef CONFIG_CONSOLE_POLL
/*
* Console polling routines for writing and reading from the uart while
* in an interrupt or debug context.
*/
static int serial_txx9_get_poll_char(struct uart_port *port)
{
unsigned int ier;
unsigned char c;
struct uart_txx9_port *up = to_uart_txx9_port(port);
/*
* First save the IER then disable the interrupts
*/
ier = sio_in(up, TXX9_SIDICR);
sio_out(up, TXX9_SIDICR, 0);
while (sio_in(up, TXX9_SIDISR) & TXX9_SIDISR_UVALID)
;
c = sio_in(up, TXX9_SIRFIFO);
/*
* Finally, clear RX interrupt status
* and restore the IER
*/
sio_mask(up, TXX9_SIDISR, TXX9_SIDISR_RDIS);
sio_out(up, TXX9_SIDICR, ier);
return c;
}
static void serial_txx9_put_poll_char(struct uart_port *port, unsigned char c)
{
unsigned int ier;
struct uart_txx9_port *up = to_uart_txx9_port(port);
/*
* First save the IER then disable the interrupts
*/
ier = sio_in(up, TXX9_SIDICR);
sio_out(up, TXX9_SIDICR, 0);
wait_for_xmitr(up);
/*
* Send the character out.
* If a LF, also do CR...
*/
sio_out(up, TXX9_SITFIFO, c);
if (c == 10) {
wait_for_xmitr(up);
sio_out(up, TXX9_SITFIFO, 13);
}
/*
* Finally, wait for transmitter to become empty
* and restore the IER
*/
wait_for_xmitr(up);
sio_out(up, TXX9_SIDICR, ier);
}
#endif /* CONFIG_CONSOLE_POLL */
static int serial_txx9_startup(struct uart_port *port)
{
struct uart_txx9_port *up = to_uart_txx9_port(port);
unsigned long flags;
int retval;
/*
* Clear the FIFO buffers and disable them.
* (they will be reenabled in set_termios())
*/
sio_set(up, TXX9_SIFCR,
TXX9_SIFCR_TFRST | TXX9_SIFCR_RFRST | TXX9_SIFCR_FRSTE);
/* clear reset */
sio_mask(up, TXX9_SIFCR,
TXX9_SIFCR_TFRST | TXX9_SIFCR_RFRST | TXX9_SIFCR_FRSTE);
sio_out(up, TXX9_SIDICR, 0);
/*
* Clear the interrupt registers.
*/
sio_out(up, TXX9_SIDISR, 0);
retval = request_irq(up->port.irq, serial_txx9_interrupt,
IRQF_SHARED, "serial_txx9", up);
if (retval)
return retval;
/*
* Now, initialize the UART
*/
spin_lock_irqsave(&up->port.lock, flags);
serial_txx9_set_mctrl(&up->port, up->port.mctrl);
spin_unlock_irqrestore(&up->port.lock, flags);
/* Enable RX/TX */
sio_mask(up, TXX9_SIFLCR, TXX9_SIFLCR_RSDE | TXX9_SIFLCR_TSDE);
/*
* Finally, enable interrupts.
*/
sio_set(up, TXX9_SIDICR, TXX9_SIDICR_RIE);
return 0;
}
static void serial_txx9_shutdown(struct uart_port *port)
{
struct uart_txx9_port *up = to_uart_txx9_port(port);
unsigned long flags;
/*
* Disable interrupts from this port
*/
sio_out(up, TXX9_SIDICR, 0); /* disable all intrs */
spin_lock_irqsave(&up->port.lock, flags);
serial_txx9_set_mctrl(&up->port, up->port.mctrl);
spin_unlock_irqrestore(&up->port.lock, flags);
/*
* Disable break condition
*/
sio_mask(up, TXX9_SIFLCR, TXX9_SIFLCR_TBRK);
#ifdef CONFIG_SERIAL_TXX9_CONSOLE
if (up->port.cons && up->port.line == up->port.cons->index) {
free_irq(up->port.irq, up);
return;
}
#endif
/* reset FIFOs */
sio_set(up, TXX9_SIFCR,
TXX9_SIFCR_TFRST | TXX9_SIFCR_RFRST | TXX9_SIFCR_FRSTE);
/* clear reset */
sio_mask(up, TXX9_SIFCR,
TXX9_SIFCR_TFRST | TXX9_SIFCR_RFRST | TXX9_SIFCR_FRSTE);
/* Disable RX/TX */
sio_set(up, TXX9_SIFLCR, TXX9_SIFLCR_RSDE | TXX9_SIFLCR_TSDE);
free_irq(up->port.irq, up);
}
static void
serial_txx9_set_termios(struct uart_port *port, struct ktermios *termios,
struct ktermios *old)
{
struct uart_txx9_port *up = to_uart_txx9_port(port);
unsigned int cval, fcr = 0;
unsigned long flags;
unsigned int baud, quot;
/*
* We don't support modem control lines.
*/
termios->c_cflag &= ~(HUPCL | CMSPAR);
termios->c_cflag |= CLOCAL;
cval = sio_in(up, TXX9_SILCR);
/* byte size and parity */
cval &= ~TXX9_SILCR_UMODE_MASK;
switch (termios->c_cflag & CSIZE) {
case CS7:
cval |= TXX9_SILCR_UMODE_7BIT;
break;
default:
case CS5: /* not supported */
case CS6: /* not supported */
case CS8:
cval |= TXX9_SILCR_UMODE_8BIT;
break;
}
cval &= ~TXX9_SILCR_USBL_MASK;
if (termios->c_cflag & CSTOPB)
cval |= TXX9_SILCR_USBL_2BIT;
else
cval |= TXX9_SILCR_USBL_1BIT;
cval &= ~(TXX9_SILCR_UPEN | TXX9_SILCR_UEPS);
if (termios->c_cflag & PARENB)
cval |= TXX9_SILCR_UPEN;
if (!(termios->c_cflag & PARODD))
cval |= TXX9_SILCR_UEPS;
/*
* Ask the core to calculate the divisor for us.
*/
baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk/16/2);
quot = uart_get_divisor(port, baud);
/* Set up FIFOs */
/* TX Int by FIFO Empty, RX Int by Receiving 1 char. */
fcr = TXX9_SIFCR_TDIL_MAX | TXX9_SIFCR_RDIL_1;
/*
* Ok, we're now changing the port state. Do it with
* interrupts disabled.
*/
spin_lock_irqsave(&up->port.lock, flags);
/*
* Update the per-port timeout.
*/
uart_update_timeout(port, termios->c_cflag, baud);
up->port.read_status_mask = TXX9_SIDISR_UOER |
TXX9_SIDISR_TDIS | TXX9_SIDISR_RDIS;
if (termios->c_iflag & INPCK)
up->port.read_status_mask |= TXX9_SIDISR_UFER | TXX9_SIDISR_UPER;
if (termios->c_iflag & (BRKINT | PARMRK))
up->port.read_status_mask |= TXX9_SIDISR_UBRK;
/*
* Characteres to ignore
*/
up->port.ignore_status_mask = 0;
if (termios->c_iflag & IGNPAR)
up->port.ignore_status_mask |= TXX9_SIDISR_UPER | TXX9_SIDISR_UFER;
if (termios->c_iflag & IGNBRK) {
up->port.ignore_status_mask |= TXX9_SIDISR_UBRK;
/*
* If we're ignoring parity and break indicators,
* ignore overruns too (for real raw support).
*/
if (termios->c_iflag & IGNPAR)
up->port.ignore_status_mask |= TXX9_SIDISR_UOER;
}
/*
* ignore all characters if CREAD is not set
*/
if ((termios->c_cflag & CREAD) == 0)
up->port.ignore_status_mask |= TXX9_SIDISR_RDIS;
/* CTS flow control flag */
if ((termios->c_cflag & CRTSCTS) &&
(up->port.flags & UPF_TXX9_HAVE_CTS_LINE)) {
sio_set(up, TXX9_SIFLCR,
TXX9_SIFLCR_RCS | TXX9_SIFLCR_TES);
} else {
sio_mask(up, TXX9_SIFLCR,
TXX9_SIFLCR_RCS | TXX9_SIFLCR_TES);
}
sio_out(up, TXX9_SILCR, cval);
sio_quot_set(up, quot);
sio_out(up, TXX9_SIFCR, fcr);
serial_txx9_set_mctrl(&up->port, up->port.mctrl);
spin_unlock_irqrestore(&up->port.lock, flags);
}
static void
serial_txx9_pm(struct uart_port *port, unsigned int state,
unsigned int oldstate)
{
/*
* If oldstate was -1 this is called from
* uart_configure_port(). In this case do not initialize the
* port now, because the port was already initialized (for
* non-console port) or should not be initialized here (for
* console port). If we initialized the port here we lose
* serial console settings.
*/
if (state == 0 && oldstate != -1)
serial_txx9_initialize(port);
}
static int serial_txx9_request_resource(struct uart_txx9_port *up)
{
unsigned int size = TXX9_REGION_SIZE;
int ret = 0;
switch (up->port.iotype) {
default:
if (!up->port.mapbase)
break;
if (!request_mem_region(up->port.mapbase, size, "serial_txx9")) {
ret = -EBUSY;
break;
}
if (up->port.flags & UPF_IOREMAP) {
up->port.membase = ioremap(up->port.mapbase, size);
if (!up->port.membase) {
release_mem_region(up->port.mapbase, size);
ret = -ENOMEM;
}
}
break;
case UPIO_PORT:
if (!request_region(up->port.iobase, size, "serial_txx9"))
ret = -EBUSY;
break;
}
return ret;
}
static void serial_txx9_release_resource(struct uart_txx9_port *up)
{
unsigned int size = TXX9_REGION_SIZE;
switch (up->port.iotype) {
default:
if (!up->port.mapbase)
break;
if (up->port.flags & UPF_IOREMAP) {
iounmap(up->port.membase);
up->port.membase = NULL;
}
release_mem_region(up->port.mapbase, size);
break;
case UPIO_PORT:
release_region(up->port.iobase, size);
break;
}
}
static void serial_txx9_release_port(struct uart_port *port)
{
struct uart_txx9_port *up = to_uart_txx9_port(port);
serial_txx9_release_resource(up);
}
static int serial_txx9_request_port(struct uart_port *port)
{
struct uart_txx9_port *up = to_uart_txx9_port(port);
return serial_txx9_request_resource(up);
}
static void serial_txx9_config_port(struct uart_port *port, int uflags)
{
struct uart_txx9_port *up = to_uart_txx9_port(port);
int ret;
/*
* Find the region that we can probe for. This in turn
* tells us whether we can probe for the type of port.
*/
ret = serial_txx9_request_resource(up);
if (ret < 0)
return;
port->type = PORT_TXX9;
up->port.fifosize = TXX9_SIO_TX_FIFO;
#ifdef CONFIG_SERIAL_TXX9_CONSOLE
if (up->port.line == up->port.cons->index)
return;
#endif
serial_txx9_initialize(port);
}
static const char *
serial_txx9_type(struct uart_port *port)
{
return "txx9";
}
static struct uart_ops serial_txx9_pops = {
.tx_empty = serial_txx9_tx_empty,
.set_mctrl = serial_txx9_set_mctrl,
.get_mctrl = serial_txx9_get_mctrl,
.stop_tx = serial_txx9_stop_tx,
.start_tx = serial_txx9_start_tx,
.stop_rx = serial_txx9_stop_rx,
.enable_ms = serial_txx9_enable_ms,
.break_ctl = serial_txx9_break_ctl,
.startup = serial_txx9_startup,
.shutdown = serial_txx9_shutdown,
.set_termios = serial_txx9_set_termios,
.pm = serial_txx9_pm,
.type = serial_txx9_type,
.release_port = serial_txx9_release_port,
.request_port = serial_txx9_request_port,
.config_port = serial_txx9_config_port,
#ifdef CONFIG_CONSOLE_POLL
.poll_get_char = serial_txx9_get_poll_char,
.poll_put_char = serial_txx9_put_poll_char,
#endif
};
static struct uart_txx9_port serial_txx9_ports[UART_NR];
static void __init serial_txx9_register_ports(struct uart_driver *drv,
struct device *dev)
{
int i;
for (i = 0; i < UART_NR; i++) {
struct uart_txx9_port *up = &serial_txx9_ports[i];
up->port.line = i;
up->port.ops = &serial_txx9_pops;
up->port.dev = dev;
if (up->port.iobase || up->port.mapbase)
uart_add_one_port(drv, &up->port);
}
}
#ifdef CONFIG_SERIAL_TXX9_CONSOLE
static void serial_txx9_console_putchar(struct uart_port *port, int ch)
{
struct uart_txx9_port *up = to_uart_txx9_port(port);
wait_for_xmitr(up);
sio_out(up, TXX9_SITFIFO, ch);
}
/*
* Print a string to the serial port trying not to disturb
* any possible real use of the port...
*
* The console_lock must be held when we get here.
*/
static void
serial_txx9_console_write(struct console *co, const char *s, unsigned int count)
{
struct uart_txx9_port *up = &serial_txx9_ports[co->index];
unsigned int ier, flcr;
/*
* First save the UER then disable the interrupts
*/
ier = sio_in(up, TXX9_SIDICR);
sio_out(up, TXX9_SIDICR, 0);
/*
* Disable flow-control if enabled (and unnecessary)
*/
flcr = sio_in(up, TXX9_SIFLCR);
if (!(up->port.flags & UPF_CONS_FLOW) && (flcr & TXX9_SIFLCR_TES))
sio_out(up, TXX9_SIFLCR, flcr & ~TXX9_SIFLCR_TES);
uart_console_write(&up->port, s, count, serial_txx9_console_putchar);
/*
* Finally, wait for transmitter to become empty
* and restore the IER
*/
wait_for_xmitr(up);
sio_out(up, TXX9_SIFLCR, flcr);
sio_out(up, TXX9_SIDICR, ier);
}
static int __init serial_txx9_console_setup(struct console *co, char *options)
{
struct uart_port *port;
struct uart_txx9_port *up;
int baud = 9600;
int bits = 8;
int parity = 'n';
int flow = 'n';
/*
* Check whether an invalid uart number has been specified, and
* if so, search for the first available port that does have
* console support.
*/
if (co->index >= UART_NR)
co->index = 0;
up = &serial_txx9_ports[co->index];
port = &up->port;
if (!port->ops)
return -ENODEV;
serial_txx9_initialize(&up->port);
if (options)
uart_parse_options(options, &baud, &parity, &bits, &flow);
return uart_set_options(port, co, baud, parity, bits, flow);
}
static struct uart_driver serial_txx9_reg;
static struct console serial_txx9_console = {
.name = TXX9_TTY_NAME,
.write = serial_txx9_console_write,
.device = uart_console_device,
.setup = serial_txx9_console_setup,
.flags = CON_PRINTBUFFER,
.index = -1,
.data = &serial_txx9_reg,
};
static int __init serial_txx9_console_init(void)
{
register_console(&serial_txx9_console);
return 0;
}
console_initcall(serial_txx9_console_init);
#define SERIAL_TXX9_CONSOLE &serial_txx9_console
#else
#define SERIAL_TXX9_CONSOLE NULL
#endif
static struct uart_driver serial_txx9_reg = {
.owner = THIS_MODULE,
.driver_name = "serial_txx9",
.dev_name = TXX9_TTY_NAME,
.major = TXX9_TTY_MAJOR,
.minor = TXX9_TTY_MINOR_START,
.nr = UART_NR,
.cons = SERIAL_TXX9_CONSOLE,
};
int __init early_serial_txx9_setup(struct uart_port *port)
{
if (port->line >= ARRAY_SIZE(serial_txx9_ports))
return -ENODEV;
serial_txx9_ports[port->line].port = *port;
serial_txx9_ports[port->line].port.ops = &serial_txx9_pops;
serial_txx9_ports[port->line].port.flags |=
UPF_BOOT_AUTOCONF | UPF_FIXED_PORT;
return 0;
}
static DEFINE_MUTEX(serial_txx9_mutex);
/**
* serial_txx9_register_port - register a serial port
* @port: serial port template
*
* Configure the serial port specified by the request.
*
* The port is then probed and if necessary the IRQ is autodetected
* If this fails an error is returned.
*
* On success the port is ready to use and the line number is returned.
*/
static int __devinit serial_txx9_register_port(struct uart_port *port)
{
int i;
struct uart_txx9_port *uart;
int ret = -ENOSPC;
mutex_lock(&serial_txx9_mutex);
for (i = 0; i < UART_NR; i++) {
uart = &serial_txx9_ports[i];
if (uart_match_port(&uart->port, port)) {
uart_remove_one_port(&serial_txx9_reg, &uart->port);
break;
}
}
if (i == UART_NR) {
/* Find unused port */
for (i = 0; i < UART_NR; i++) {
uart = &serial_txx9_ports[i];
if (!(uart->port.iobase || uart->port.mapbase))
break;
}
}
if (i < UART_NR) {
uart->port.iobase = port->iobase;
uart->port.membase = port->membase;
uart->port.irq = port->irq;
uart->port.uartclk = port->uartclk;
uart->port.iotype = port->iotype;
uart->port.flags = port->flags
| UPF_BOOT_AUTOCONF | UPF_FIXED_PORT;
uart->port.mapbase = port->mapbase;
if (port->dev)
uart->port.dev = port->dev;
ret = uart_add_one_port(&serial_txx9_reg, &uart->port);
if (ret == 0)
ret = uart->port.line;
}
mutex_unlock(&serial_txx9_mutex);
return ret;
}
/**
* serial_txx9_unregister_port - remove a txx9 serial port at runtime
* @line: serial line number
*
* Remove one serial port. This may not be called from interrupt
* context. We hand the port back to the our control.
*/
static void __devexit serial_txx9_unregister_port(int line)
{
struct uart_txx9_port *uart = &serial_txx9_ports[line];
mutex_lock(&serial_txx9_mutex);
uart_remove_one_port(&serial_txx9_reg, &uart->port);
uart->port.flags = 0;
uart->port.type = PORT_UNKNOWN;
uart->port.iobase = 0;
uart->port.mapbase = 0;
uart->port.membase = NULL;
uart->port.dev = NULL;
mutex_unlock(&serial_txx9_mutex);
}
/*
* Register a set of serial devices attached to a platform device.
*/
static int __devinit serial_txx9_probe(struct platform_device *dev)
{
struct uart_port *p = dev->dev.platform_data;
struct uart_port port;
int ret, i;
memset(&port, 0, sizeof(struct uart_port));
for (i = 0; p && p->uartclk != 0; p++, i++) {
port.iobase = p->iobase;
port.membase = p->membase;
port.irq = p->irq;
port.uartclk = p->uartclk;
port.iotype = p->iotype;
port.flags = p->flags;
port.mapbase = p->mapbase;
port.dev = &dev->dev;
ret = serial_txx9_register_port(&port);
if (ret < 0) {
dev_err(&dev->dev, "unable to register port at index %d "
"(IO%lx MEM%llx IRQ%d): %d\n", i,
p->iobase, (unsigned long long)p->mapbase,
p->irq, ret);
}
}
return 0;
}
/*
* Remove serial ports registered against a platform device.
*/
static int __devexit serial_txx9_remove(struct platform_device *dev)
{
int i;
for (i = 0; i < UART_NR; i++) {
struct uart_txx9_port *up = &serial_txx9_ports[i];
if (up->port.dev == &dev->dev)
serial_txx9_unregister_port(i);
}
return 0;
}
#ifdef CONFIG_PM
static int serial_txx9_suspend(struct platform_device *dev, pm_message_t state)
{
int i;
for (i = 0; i < UART_NR; i++) {
struct uart_txx9_port *up = &serial_txx9_ports[i];
if (up->port.type != PORT_UNKNOWN && up->port.dev == &dev->dev)
uart_suspend_port(&serial_txx9_reg, &up->port);
}
return 0;
}
static int serial_txx9_resume(struct platform_device *dev)
{
int i;
for (i = 0; i < UART_NR; i++) {
struct uart_txx9_port *up = &serial_txx9_ports[i];
if (up->port.type != PORT_UNKNOWN && up->port.dev == &dev->dev)
uart_resume_port(&serial_txx9_reg, &up->port);
}
return 0;
}
#endif
static struct platform_driver serial_txx9_plat_driver = {
.probe = serial_txx9_probe,
.remove = __devexit_p(serial_txx9_remove),
#ifdef CONFIG_PM
.suspend = serial_txx9_suspend,
.resume = serial_txx9_resume,
#endif
.driver = {
.name = "serial_txx9",
.owner = THIS_MODULE,
},
};
#ifdef ENABLE_SERIAL_TXX9_PCI
/*
* Probe one serial board. Unfortunately, there is no rhyme nor reason
* to the arrangement of serial ports on a PCI card.
*/
static int __devinit
pciserial_txx9_init_one(struct pci_dev *dev, const struct pci_device_id *ent)
{
struct uart_port port;
int line;
int rc;
rc = pci_enable_device(dev);
if (rc)
return rc;
memset(&port, 0, sizeof(port));
port.ops = &serial_txx9_pops;
port.flags |= UPF_TXX9_HAVE_CTS_LINE;
port.uartclk = 66670000;
port.irq = dev->irq;
port.iotype = UPIO_PORT;
port.iobase = pci_resource_start(dev, 1);
port.dev = &dev->dev;
line = serial_txx9_register_port(&port);
if (line < 0) {
printk(KERN_WARNING "Couldn't register serial port %s: %d\n", pci_name(dev), line);
pci_disable_device(dev);
return line;
}
pci_set_drvdata(dev, &serial_txx9_ports[line]);
return 0;
}
static void __devexit pciserial_txx9_remove_one(struct pci_dev *dev)
{
struct uart_txx9_port *up = pci_get_drvdata(dev);
pci_set_drvdata(dev, NULL);
if (up) {
serial_txx9_unregister_port(up->port.line);
pci_disable_device(dev);
}
}
#ifdef CONFIG_PM
static int pciserial_txx9_suspend_one(struct pci_dev *dev, pm_message_t state)
{
struct uart_txx9_port *up = pci_get_drvdata(dev);
if (up)
uart_suspend_port(&serial_txx9_reg, &up->port);
pci_save_state(dev);
pci_set_power_state(dev, pci_choose_state(dev, state));
return 0;
}
static int pciserial_txx9_resume_one(struct pci_dev *dev)
{
struct uart_txx9_port *up = pci_get_drvdata(dev);
pci_set_power_state(dev, PCI_D0);
pci_restore_state(dev);
if (up)
uart_resume_port(&serial_txx9_reg, &up->port);
return 0;
}
#endif
static const struct pci_device_id serial_txx9_pci_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_TC86C001_MISC) },
{ 0, }
};
static struct pci_driver serial_txx9_pci_driver = {
.name = "serial_txx9",
.probe = pciserial_txx9_init_one,
.remove = __devexit_p(pciserial_txx9_remove_one),
#ifdef CONFIG_PM
.suspend = pciserial_txx9_suspend_one,
.resume = pciserial_txx9_resume_one,
#endif
.id_table = serial_txx9_pci_tbl,
};
MODULE_DEVICE_TABLE(pci, serial_txx9_pci_tbl);
#endif /* ENABLE_SERIAL_TXX9_PCI */
static struct platform_device *serial_txx9_plat_devs;
static int __init serial_txx9_init(void)
{
int ret;
printk(KERN_INFO "%s version %s\n", serial_name, serial_version);
ret = uart_register_driver(&serial_txx9_reg);
if (ret)
goto out;
serial_txx9_plat_devs = platform_device_alloc("serial_txx9", -1);
if (!serial_txx9_plat_devs) {
ret = -ENOMEM;
goto unreg_uart_drv;
}
ret = platform_device_add(serial_txx9_plat_devs);
if (ret)
goto put_dev;
serial_txx9_register_ports(&serial_txx9_reg,
&serial_txx9_plat_devs->dev);
ret = platform_driver_register(&serial_txx9_plat_driver);
if (ret)
goto del_dev;
#ifdef ENABLE_SERIAL_TXX9_PCI
ret = pci_register_driver(&serial_txx9_pci_driver);
#endif
if (ret == 0)
goto out;
del_dev:
platform_device_del(serial_txx9_plat_devs);
put_dev:
platform_device_put(serial_txx9_plat_devs);
unreg_uart_drv:
uart_unregister_driver(&serial_txx9_reg);
out:
return ret;
}
static void __exit serial_txx9_exit(void)
{
int i;
#ifdef ENABLE_SERIAL_TXX9_PCI
pci_unregister_driver(&serial_txx9_pci_driver);
#endif
platform_driver_unregister(&serial_txx9_plat_driver);
platform_device_unregister(serial_txx9_plat_devs);
for (i = 0; i < UART_NR; i++) {
struct uart_txx9_port *up = &serial_txx9_ports[i];
if (up->port.iobase || up->port.mapbase)
uart_remove_one_port(&serial_txx9_reg, &up->port);
}
uart_unregister_driver(&serial_txx9_reg);
}
module_init(serial_txx9_init);
module_exit(serial_txx9_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("TX39/49 serial driver");
MODULE_ALIAS_CHARDEV_MAJOR(TXX9_TTY_MAJOR);
| gpl-2.0 |
NormandyEGY/android_kernel_nokia_normandy | drivers/gpio/gpio-cs5535.c | 5075 | 9788 | /*
* AMD CS5535/CS5536 GPIO driver
* Copyright (C) 2006 Advanced Micro Devices, Inc.
* Copyright (C) 2007-2009 Andres Salomon <dilinger@collabora.co.uk>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public License
* as published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/spinlock.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/gpio.h>
#include <linux/io.h>
#include <linux/cs5535.h>
#include <asm/msr.h>
#define DRV_NAME "cs5535-gpio"
/*
* Some GPIO pins
* 31-29,23 : reserved (always mask out)
* 28 : Power Button
* 26 : PME#
* 22-16 : LPC
* 14,15 : SMBus
* 9,8 : UART1
* 7 : PCI INTB
* 3,4 : UART2/DDC
* 2 : IDE_IRQ0
* 1 : AC_BEEP
* 0 : PCI INTA
*
* If a mask was not specified, allow all except
* reserved and Power Button
*/
#define GPIO_DEFAULT_MASK 0x0F7FFFFF
static ulong mask = GPIO_DEFAULT_MASK;
module_param_named(mask, mask, ulong, 0444);
MODULE_PARM_DESC(mask, "GPIO channel mask.");
static struct cs5535_gpio_chip {
struct gpio_chip chip;
resource_size_t base;
struct platform_device *pdev;
spinlock_t lock;
} cs5535_gpio_chip;
/*
* The CS5535/CS5536 GPIOs support a number of extra features not defined
* by the gpio_chip API, so these are exported. For a full list of the
* registers, see include/linux/cs5535.h.
*/
static void errata_outl(struct cs5535_gpio_chip *chip, u32 val,
unsigned int reg)
{
unsigned long addr = chip->base + 0x80 + reg;
/*
* According to the CS5536 errata (#36), after suspend
* a write to the high bank GPIO register will clear all
* non-selected bits; the recommended workaround is a
* read-modify-write operation.
*
* Don't apply this errata to the edge status GPIOs, as writing
* to their lower bits will clear them.
*/
if (reg != GPIO_POSITIVE_EDGE_STS && reg != GPIO_NEGATIVE_EDGE_STS) {
if (val & 0xffff)
val |= (inl(addr) & 0xffff); /* ignore the high bits */
else
val |= (inl(addr) ^ (val >> 16));
}
outl(val, addr);
}
static void __cs5535_gpio_set(struct cs5535_gpio_chip *chip, unsigned offset,
unsigned int reg)
{
if (offset < 16)
/* low bank register */
outl(1 << offset, chip->base + reg);
else
/* high bank register */
errata_outl(chip, 1 << (offset - 16), reg);
}
void cs5535_gpio_set(unsigned offset, unsigned int reg)
{
struct cs5535_gpio_chip *chip = &cs5535_gpio_chip;
unsigned long flags;
spin_lock_irqsave(&chip->lock, flags);
__cs5535_gpio_set(chip, offset, reg);
spin_unlock_irqrestore(&chip->lock, flags);
}
EXPORT_SYMBOL_GPL(cs5535_gpio_set);
static void __cs5535_gpio_clear(struct cs5535_gpio_chip *chip, unsigned offset,
unsigned int reg)
{
if (offset < 16)
/* low bank register */
outl(1 << (offset + 16), chip->base + reg);
else
/* high bank register */
errata_outl(chip, 1 << offset, reg);
}
void cs5535_gpio_clear(unsigned offset, unsigned int reg)
{
struct cs5535_gpio_chip *chip = &cs5535_gpio_chip;
unsigned long flags;
spin_lock_irqsave(&chip->lock, flags);
__cs5535_gpio_clear(chip, offset, reg);
spin_unlock_irqrestore(&chip->lock, flags);
}
EXPORT_SYMBOL_GPL(cs5535_gpio_clear);
int cs5535_gpio_isset(unsigned offset, unsigned int reg)
{
struct cs5535_gpio_chip *chip = &cs5535_gpio_chip;
unsigned long flags;
long val;
spin_lock_irqsave(&chip->lock, flags);
if (offset < 16)
/* low bank register */
val = inl(chip->base + reg);
else {
/* high bank register */
val = inl(chip->base + 0x80 + reg);
offset -= 16;
}
spin_unlock_irqrestore(&chip->lock, flags);
return (val & (1 << offset)) ? 1 : 0;
}
EXPORT_SYMBOL_GPL(cs5535_gpio_isset);
int cs5535_gpio_set_irq(unsigned group, unsigned irq)
{
uint32_t lo, hi;
if (group > 7 || irq > 15)
return -EINVAL;
rdmsr(MSR_PIC_ZSEL_HIGH, lo, hi);
lo &= ~(0xF << (group * 4));
lo |= (irq & 0xF) << (group * 4);
wrmsr(MSR_PIC_ZSEL_HIGH, lo, hi);
return 0;
}
EXPORT_SYMBOL_GPL(cs5535_gpio_set_irq);
void cs5535_gpio_setup_event(unsigned offset, int pair, int pme)
{
struct cs5535_gpio_chip *chip = &cs5535_gpio_chip;
uint32_t shift = (offset % 8) * 4;
unsigned long flags;
uint32_t val;
if (offset >= 24)
offset = GPIO_MAP_W;
else if (offset >= 16)
offset = GPIO_MAP_Z;
else if (offset >= 8)
offset = GPIO_MAP_Y;
else
offset = GPIO_MAP_X;
spin_lock_irqsave(&chip->lock, flags);
val = inl(chip->base + offset);
/* Clear whatever was there before */
val &= ~(0xF << shift);
/* Set the new value */
val |= ((pair & 7) << shift);
/* Set the PME bit if this is a PME event */
if (pme)
val |= (1 << (shift + 3));
outl(val, chip->base + offset);
spin_unlock_irqrestore(&chip->lock, flags);
}
EXPORT_SYMBOL_GPL(cs5535_gpio_setup_event);
/*
* Generic gpio_chip API support.
*/
static int chip_gpio_request(struct gpio_chip *c, unsigned offset)
{
struct cs5535_gpio_chip *chip = (struct cs5535_gpio_chip *) c;
unsigned long flags;
spin_lock_irqsave(&chip->lock, flags);
/* check if this pin is available */
if ((mask & (1 << offset)) == 0) {
dev_info(&chip->pdev->dev,
"pin %u is not available (check mask)\n", offset);
spin_unlock_irqrestore(&chip->lock, flags);
return -EINVAL;
}
/* disable output aux 1 & 2 on this pin */
__cs5535_gpio_clear(chip, offset, GPIO_OUTPUT_AUX1);
__cs5535_gpio_clear(chip, offset, GPIO_OUTPUT_AUX2);
/* disable input aux 1 on this pin */
__cs5535_gpio_clear(chip, offset, GPIO_INPUT_AUX1);
spin_unlock_irqrestore(&chip->lock, flags);
return 0;
}
static int chip_gpio_get(struct gpio_chip *chip, unsigned offset)
{
return cs5535_gpio_isset(offset, GPIO_READ_BACK);
}
static void chip_gpio_set(struct gpio_chip *chip, unsigned offset, int val)
{
if (val)
cs5535_gpio_set(offset, GPIO_OUTPUT_VAL);
else
cs5535_gpio_clear(offset, GPIO_OUTPUT_VAL);
}
static int chip_direction_input(struct gpio_chip *c, unsigned offset)
{
struct cs5535_gpio_chip *chip = (struct cs5535_gpio_chip *) c;
unsigned long flags;
spin_lock_irqsave(&chip->lock, flags);
__cs5535_gpio_set(chip, offset, GPIO_INPUT_ENABLE);
__cs5535_gpio_clear(chip, offset, GPIO_OUTPUT_ENABLE);
spin_unlock_irqrestore(&chip->lock, flags);
return 0;
}
static int chip_direction_output(struct gpio_chip *c, unsigned offset, int val)
{
struct cs5535_gpio_chip *chip = (struct cs5535_gpio_chip *) c;
unsigned long flags;
spin_lock_irqsave(&chip->lock, flags);
__cs5535_gpio_set(chip, offset, GPIO_INPUT_ENABLE);
__cs5535_gpio_set(chip, offset, GPIO_OUTPUT_ENABLE);
if (val)
__cs5535_gpio_set(chip, offset, GPIO_OUTPUT_VAL);
else
__cs5535_gpio_clear(chip, offset, GPIO_OUTPUT_VAL);
spin_unlock_irqrestore(&chip->lock, flags);
return 0;
}
static const char * const cs5535_gpio_names[] = {
"GPIO0", "GPIO1", "GPIO2", "GPIO3",
"GPIO4", "GPIO5", "GPIO6", "GPIO7",
"GPIO8", "GPIO9", "GPIO10", "GPIO11",
"GPIO12", "GPIO13", "GPIO14", "GPIO15",
"GPIO16", "GPIO17", "GPIO18", "GPIO19",
"GPIO20", "GPIO21", "GPIO22", NULL,
"GPIO24", "GPIO25", "GPIO26", "GPIO27",
"GPIO28", NULL, NULL, NULL,
};
static struct cs5535_gpio_chip cs5535_gpio_chip = {
.chip = {
.owner = THIS_MODULE,
.label = DRV_NAME,
.base = 0,
.ngpio = 32,
.names = cs5535_gpio_names,
.request = chip_gpio_request,
.get = chip_gpio_get,
.set = chip_gpio_set,
.direction_input = chip_direction_input,
.direction_output = chip_direction_output,
},
};
static int __devinit cs5535_gpio_probe(struct platform_device *pdev)
{
struct resource *res;
int err = -EIO;
ulong mask_orig = mask;
/* There are two ways to get the GPIO base address; one is by
* fetching it from MSR_LBAR_GPIO, the other is by reading the
* PCI BAR info. The latter method is easier (especially across
* different architectures), so we'll stick with that for now. If
* it turns out to be unreliable in the face of crappy BIOSes, we
* can always go back to using MSRs.. */
res = platform_get_resource(pdev, IORESOURCE_IO, 0);
if (!res) {
dev_err(&pdev->dev, "can't fetch device resource info\n");
goto done;
}
if (!request_region(res->start, resource_size(res), pdev->name)) {
dev_err(&pdev->dev, "can't request region\n");
goto done;
}
/* set up the driver-specific struct */
cs5535_gpio_chip.base = res->start;
cs5535_gpio_chip.pdev = pdev;
spin_lock_init(&cs5535_gpio_chip.lock);
dev_info(&pdev->dev, "reserved resource region %pR\n", res);
/* mask out reserved pins */
mask &= 0x1F7FFFFF;
/* do not allow pin 28, Power Button, as there's special handling
* in the PMC needed. (note 12, p. 48) */
mask &= ~(1 << 28);
if (mask_orig != mask)
dev_info(&pdev->dev, "mask changed from 0x%08lX to 0x%08lX\n",
mask_orig, mask);
/* finally, register with the generic GPIO API */
err = gpiochip_add(&cs5535_gpio_chip.chip);
if (err)
goto release_region;
return 0;
release_region:
release_region(res->start, resource_size(res));
done:
return err;
}
static int __devexit cs5535_gpio_remove(struct platform_device *pdev)
{
struct resource *r;
int err;
err = gpiochip_remove(&cs5535_gpio_chip.chip);
if (err) {
/* uhh? */
dev_err(&pdev->dev, "unable to remove gpio_chip?\n");
return err;
}
r = platform_get_resource(pdev, IORESOURCE_IO, 0);
release_region(r->start, resource_size(r));
return 0;
}
static struct platform_driver cs5535_gpio_driver = {
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
},
.probe = cs5535_gpio_probe,
.remove = __devexit_p(cs5535_gpio_remove),
};
module_platform_driver(cs5535_gpio_driver);
MODULE_AUTHOR("Andres Salomon <dilinger@queued.net>");
MODULE_DESCRIPTION("AMD CS5535/CS5536 GPIO driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:" DRV_NAME);
| gpl-2.0 |
GameTheory-/android_kernel_d505 | drivers/cpufreq/maple-cpufreq.c | 5587 | 8495 | /*
* Copyright (C) 2011 Dmitry Eremin-Solenikov
* Copyright (C) 2002 - 2005 Benjamin Herrenschmidt <benh@kernel.crashing.org>
* and Markus Demleitner <msdemlei@cl.uni-heidelberg.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This driver adds basic cpufreq support for SMU & 970FX based G5 Macs,
* that is iMac G5 and latest single CPU desktop.
*/
#undef DEBUG
#include <linux/module.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/sched.h>
#include <linux/cpufreq.h>
#include <linux/init.h>
#include <linux/completion.h>
#include <linux/mutex.h>
#include <linux/time.h>
#include <linux/of.h>
#define DBG(fmt...) pr_debug(fmt)
/* see 970FX user manual */
#define SCOM_PCR 0x0aa001 /* PCR scom addr */
#define PCR_HILO_SELECT 0x80000000U /* 1 = PCR, 0 = PCRH */
#define PCR_SPEED_FULL 0x00000000U /* 1:1 speed value */
#define PCR_SPEED_HALF 0x00020000U /* 1:2 speed value */
#define PCR_SPEED_QUARTER 0x00040000U /* 1:4 speed value */
#define PCR_SPEED_MASK 0x000e0000U /* speed mask */
#define PCR_SPEED_SHIFT 17
#define PCR_FREQ_REQ_VALID 0x00010000U /* freq request valid */
#define PCR_VOLT_REQ_VALID 0x00008000U /* volt request valid */
#define PCR_TARGET_TIME_MASK 0x00006000U /* target time */
#define PCR_STATLAT_MASK 0x00001f00U /* STATLAT value */
#define PCR_SNOOPLAT_MASK 0x000000f0U /* SNOOPLAT value */
#define PCR_SNOOPACC_MASK 0x0000000fU /* SNOOPACC value */
#define SCOM_PSR 0x408001 /* PSR scom addr */
/* warning: PSR is a 64 bits register */
#define PSR_CMD_RECEIVED 0x2000000000000000U /* command received */
#define PSR_CMD_COMPLETED 0x1000000000000000U /* command completed */
#define PSR_CUR_SPEED_MASK 0x0300000000000000U /* current speed */
#define PSR_CUR_SPEED_SHIFT (56)
/*
* The G5 only supports two frequencies (Quarter speed is not supported)
*/
#define CPUFREQ_HIGH 0
#define CPUFREQ_LOW 1
static struct cpufreq_frequency_table maple_cpu_freqs[] = {
{CPUFREQ_HIGH, 0},
{CPUFREQ_LOW, 0},
{0, CPUFREQ_TABLE_END},
};
static struct freq_attr *maple_cpu_freqs_attr[] = {
&cpufreq_freq_attr_scaling_available_freqs,
NULL,
};
/* Power mode data is an array of the 32 bits PCR values to use for
* the various frequencies, retrieved from the device-tree
*/
static int maple_pmode_cur;
static DEFINE_MUTEX(maple_switch_mutex);
static const u32 *maple_pmode_data;
static int maple_pmode_max;
/*
* SCOM based frequency switching for 970FX rev3
*/
static int maple_scom_switch_freq(int speed_mode)
{
unsigned long flags;
int to;
local_irq_save(flags);
/* Clear PCR high */
scom970_write(SCOM_PCR, 0);
/* Clear PCR low */
scom970_write(SCOM_PCR, PCR_HILO_SELECT | 0);
/* Set PCR low */
scom970_write(SCOM_PCR, PCR_HILO_SELECT |
maple_pmode_data[speed_mode]);
/* Wait for completion */
for (to = 0; to < 10; to++) {
unsigned long psr = scom970_read(SCOM_PSR);
if ((psr & PSR_CMD_RECEIVED) == 0 &&
(((psr >> PSR_CUR_SPEED_SHIFT) ^
(maple_pmode_data[speed_mode] >> PCR_SPEED_SHIFT)) & 0x3)
== 0)
break;
if (psr & PSR_CMD_COMPLETED)
break;
udelay(100);
}
local_irq_restore(flags);
maple_pmode_cur = speed_mode;
ppc_proc_freq = maple_cpu_freqs[speed_mode].frequency * 1000ul;
return 0;
}
static int maple_scom_query_freq(void)
{
unsigned long psr = scom970_read(SCOM_PSR);
int i;
for (i = 0; i <= maple_pmode_max; i++)
if ((((psr >> PSR_CUR_SPEED_SHIFT) ^
(maple_pmode_data[i] >> PCR_SPEED_SHIFT)) & 0x3) == 0)
break;
return i;
}
/*
* Common interface to the cpufreq core
*/
static int maple_cpufreq_verify(struct cpufreq_policy *policy)
{
return cpufreq_frequency_table_verify(policy, maple_cpu_freqs);
}
static int maple_cpufreq_target(struct cpufreq_policy *policy,
unsigned int target_freq, unsigned int relation)
{
unsigned int newstate = 0;
struct cpufreq_freqs freqs;
int rc;
if (cpufreq_frequency_table_target(policy, maple_cpu_freqs,
target_freq, relation, &newstate))
return -EINVAL;
if (maple_pmode_cur == newstate)
return 0;
mutex_lock(&maple_switch_mutex);
freqs.old = maple_cpu_freqs[maple_pmode_cur].frequency;
freqs.new = maple_cpu_freqs[newstate].frequency;
freqs.cpu = 0;
cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
rc = maple_scom_switch_freq(newstate);
cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
mutex_unlock(&maple_switch_mutex);
return rc;
}
static unsigned int maple_cpufreq_get_speed(unsigned int cpu)
{
return maple_cpu_freqs[maple_pmode_cur].frequency;
}
static int maple_cpufreq_cpu_init(struct cpufreq_policy *policy)
{
policy->cpuinfo.transition_latency = 12000;
policy->cur = maple_cpu_freqs[maple_scom_query_freq()].frequency;
/* secondary CPUs are tied to the primary one by the
* cpufreq core if in the secondary policy we tell it that
* it actually must be one policy together with all others. */
cpumask_copy(policy->cpus, cpu_online_mask);
cpufreq_frequency_table_get_attr(maple_cpu_freqs, policy->cpu);
return cpufreq_frequency_table_cpuinfo(policy,
maple_cpu_freqs);
}
static struct cpufreq_driver maple_cpufreq_driver = {
.name = "maple",
.owner = THIS_MODULE,
.flags = CPUFREQ_CONST_LOOPS,
.init = maple_cpufreq_cpu_init,
.verify = maple_cpufreq_verify,
.target = maple_cpufreq_target,
.get = maple_cpufreq_get_speed,
.attr = maple_cpu_freqs_attr,
};
static int __init maple_cpufreq_init(void)
{
struct device_node *cpus;
struct device_node *cpunode;
unsigned int psize;
unsigned long max_freq;
const u32 *valp;
u32 pvr_hi;
int rc = -ENODEV;
/*
* Behave here like powermac driver which checks machine compatibility
* to ease merging of two drivers in future.
*/
if (!of_machine_is_compatible("Momentum,Maple") &&
!of_machine_is_compatible("Momentum,Apache"))
return 0;
cpus = of_find_node_by_path("/cpus");
if (cpus == NULL) {
DBG("No /cpus node !\n");
return -ENODEV;
}
/* Get first CPU node */
for (cpunode = NULL;
(cpunode = of_get_next_child(cpus, cpunode)) != NULL;) {
const u32 *reg = of_get_property(cpunode, "reg", NULL);
if (reg == NULL || (*reg) != 0)
continue;
if (!strcmp(cpunode->type, "cpu"))
break;
}
if (cpunode == NULL) {
printk(KERN_ERR "cpufreq: Can't find any CPU 0 node\n");
goto bail_cpus;
}
/* Check 970FX for now */
/* we actually don't care on which CPU to access PVR */
pvr_hi = PVR_VER(mfspr(SPRN_PVR));
if (pvr_hi != 0x3c && pvr_hi != 0x44) {
printk(KERN_ERR "cpufreq: Unsupported CPU version (%x)\n",
pvr_hi);
goto bail_noprops;
}
/* Look for the powertune data in the device-tree */
/*
* On Maple this property is provided by PIBS in dual-processor config,
* not provided by PIBS in CPU0 config and also not provided by SLOF,
* so YMMV
*/
maple_pmode_data = of_get_property(cpunode, "power-mode-data", &psize);
if (!maple_pmode_data) {
DBG("No power-mode-data !\n");
goto bail_noprops;
}
maple_pmode_max = psize / sizeof(u32) - 1;
/*
* From what I see, clock-frequency is always the maximal frequency.
* The current driver can not slew sysclk yet, so we really only deal
* with powertune steps for now. We also only implement full freq and
* half freq in this version. So far, I haven't yet seen a machine
* supporting anything else.
*/
valp = of_get_property(cpunode, "clock-frequency", NULL);
if (!valp)
return -ENODEV;
max_freq = (*valp)/1000;
maple_cpu_freqs[0].frequency = max_freq;
maple_cpu_freqs[1].frequency = max_freq/2;
/* Force apply current frequency to make sure everything is in
* sync (voltage is right for example). Firmware may leave us with
* a strange setting ...
*/
msleep(10);
maple_pmode_cur = -1;
maple_scom_switch_freq(maple_scom_query_freq());
printk(KERN_INFO "Registering Maple CPU frequency driver\n");
printk(KERN_INFO "Low: %d Mhz, High: %d Mhz, Cur: %d MHz\n",
maple_cpu_freqs[1].frequency/1000,
maple_cpu_freqs[0].frequency/1000,
maple_cpu_freqs[maple_pmode_cur].frequency/1000);
rc = cpufreq_register_driver(&maple_cpufreq_driver);
of_node_put(cpunode);
of_node_put(cpus);
return rc;
bail_noprops:
of_node_put(cpunode);
bail_cpus:
of_node_put(cpus);
return rc;
}
module_init(maple_cpufreq_init);
MODULE_LICENSE("GPL");
| gpl-2.0 |
andytimes/kernel-msm8660 | arch/sparc/mm/io-unit.c | 7635 | 7401 | /*
* io-unit.c: IO-UNIT specific routines for memory management.
*
* Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/mm.h>
#include <linux/highmem.h> /* pte_offset_map => kmap_atomic */
#include <linux/bitops.h>
#include <linux/scatterlist.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <asm/pgalloc.h>
#include <asm/pgtable.h>
#include <asm/io.h>
#include <asm/io-unit.h>
#include <asm/mxcc.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
#include <asm/dma.h>
#include <asm/oplib.h>
/* #define IOUNIT_DEBUG */
#ifdef IOUNIT_DEBUG
#define IOD(x) printk(x)
#else
#define IOD(x) do { } while (0)
#endif
#define IOPERM (IOUPTE_CACHE | IOUPTE_WRITE | IOUPTE_VALID)
#define MKIOPTE(phys) __iopte((((phys)>>4) & IOUPTE_PAGE) | IOPERM)
static void __init iounit_iommu_init(struct platform_device *op)
{
struct iounit_struct *iounit;
iopte_t *xpt, *xptend;
iounit = kzalloc(sizeof(struct iounit_struct), GFP_ATOMIC);
if (!iounit) {
prom_printf("SUN4D: Cannot alloc iounit, halting.\n");
prom_halt();
}
iounit->limit[0] = IOUNIT_BMAP1_START;
iounit->limit[1] = IOUNIT_BMAP2_START;
iounit->limit[2] = IOUNIT_BMAPM_START;
iounit->limit[3] = IOUNIT_BMAPM_END;
iounit->rotor[1] = IOUNIT_BMAP2_START;
iounit->rotor[2] = IOUNIT_BMAPM_START;
xpt = of_ioremap(&op->resource[2], 0, PAGE_SIZE * 16, "XPT");
if (!xpt) {
prom_printf("SUN4D: Cannot map External Page Table.");
prom_halt();
}
op->dev.archdata.iommu = iounit;
iounit->page_table = xpt;
spin_lock_init(&iounit->lock);
for (xptend = iounit->page_table + (16 * PAGE_SIZE) / sizeof(iopte_t);
xpt < xptend;)
iopte_val(*xpt++) = 0;
}
static int __init iounit_init(void)
{
extern void sun4d_init_sbi_irq(void);
struct device_node *dp;
for_each_node_by_name(dp, "sbi") {
struct platform_device *op = of_find_device_by_node(dp);
iounit_iommu_init(op);
of_propagate_archdata(op);
}
sun4d_init_sbi_irq();
return 0;
}
subsys_initcall(iounit_init);
/* One has to hold iounit->lock to call this */
static unsigned long iounit_get_area(struct iounit_struct *iounit, unsigned long vaddr, int size)
{
int i, j, k, npages;
unsigned long rotor, scan, limit;
iopte_t iopte;
npages = ((vaddr & ~PAGE_MASK) + size + (PAGE_SIZE-1)) >> PAGE_SHIFT;
/* A tiny bit of magic ingredience :) */
switch (npages) {
case 1: i = 0x0231; break;
case 2: i = 0x0132; break;
default: i = 0x0213; break;
}
IOD(("iounit_get_area(%08lx,%d[%d])=", vaddr, size, npages));
next: j = (i & 15);
rotor = iounit->rotor[j - 1];
limit = iounit->limit[j];
scan = rotor;
nexti: scan = find_next_zero_bit(iounit->bmap, limit, scan);
if (scan + npages > limit) {
if (limit != rotor) {
limit = rotor;
scan = iounit->limit[j - 1];
goto nexti;
}
i >>= 4;
if (!(i & 15))
panic("iounit_get_area: Couldn't find free iopte slots for (%08lx,%d)\n", vaddr, size);
goto next;
}
for (k = 1, scan++; k < npages; k++)
if (test_bit(scan++, iounit->bmap))
goto nexti;
iounit->rotor[j - 1] = (scan < limit) ? scan : iounit->limit[j - 1];
scan -= npages;
iopte = MKIOPTE(__pa(vaddr & PAGE_MASK));
vaddr = IOUNIT_DMA_BASE + (scan << PAGE_SHIFT) + (vaddr & ~PAGE_MASK);
for (k = 0; k < npages; k++, iopte = __iopte(iopte_val(iopte) + 0x100), scan++) {
set_bit(scan, iounit->bmap);
iounit->page_table[scan] = iopte;
}
IOD(("%08lx\n", vaddr));
return vaddr;
}
static __u32 iounit_get_scsi_one(struct device *dev, char *vaddr, unsigned long len)
{
struct iounit_struct *iounit = dev->archdata.iommu;
unsigned long ret, flags;
spin_lock_irqsave(&iounit->lock, flags);
ret = iounit_get_area(iounit, (unsigned long)vaddr, len);
spin_unlock_irqrestore(&iounit->lock, flags);
return ret;
}
static void iounit_get_scsi_sgl(struct device *dev, struct scatterlist *sg, int sz)
{
struct iounit_struct *iounit = dev->archdata.iommu;
unsigned long flags;
/* FIXME: Cache some resolved pages - often several sg entries are to the same page */
spin_lock_irqsave(&iounit->lock, flags);
while (sz != 0) {
--sz;
sg->dma_address = iounit_get_area(iounit, (unsigned long) sg_virt(sg), sg->length);
sg->dma_length = sg->length;
sg = sg_next(sg);
}
spin_unlock_irqrestore(&iounit->lock, flags);
}
static void iounit_release_scsi_one(struct device *dev, __u32 vaddr, unsigned long len)
{
struct iounit_struct *iounit = dev->archdata.iommu;
unsigned long flags;
spin_lock_irqsave(&iounit->lock, flags);
len = ((vaddr & ~PAGE_MASK) + len + (PAGE_SIZE-1)) >> PAGE_SHIFT;
vaddr = (vaddr - IOUNIT_DMA_BASE) >> PAGE_SHIFT;
IOD(("iounit_release %08lx-%08lx\n", (long)vaddr, (long)len+vaddr));
for (len += vaddr; vaddr < len; vaddr++)
clear_bit(vaddr, iounit->bmap);
spin_unlock_irqrestore(&iounit->lock, flags);
}
static void iounit_release_scsi_sgl(struct device *dev, struct scatterlist *sg, int sz)
{
struct iounit_struct *iounit = dev->archdata.iommu;
unsigned long flags;
unsigned long vaddr, len;
spin_lock_irqsave(&iounit->lock, flags);
while (sz != 0) {
--sz;
len = ((sg->dma_address & ~PAGE_MASK) + sg->length + (PAGE_SIZE-1)) >> PAGE_SHIFT;
vaddr = (sg->dma_address - IOUNIT_DMA_BASE) >> PAGE_SHIFT;
IOD(("iounit_release %08lx-%08lx\n", (long)vaddr, (long)len+vaddr));
for (len += vaddr; vaddr < len; vaddr++)
clear_bit(vaddr, iounit->bmap);
sg = sg_next(sg);
}
spin_unlock_irqrestore(&iounit->lock, flags);
}
#ifdef CONFIG_SBUS
static int iounit_map_dma_area(struct device *dev, dma_addr_t *pba, unsigned long va, __u32 addr, int len)
{
struct iounit_struct *iounit = dev->archdata.iommu;
unsigned long page, end;
pgprot_t dvma_prot;
iopte_t *iopte;
*pba = addr;
dvma_prot = __pgprot(SRMMU_CACHE | SRMMU_ET_PTE | SRMMU_PRIV);
end = PAGE_ALIGN((addr + len));
while(addr < end) {
page = va;
{
pgd_t *pgdp;
pmd_t *pmdp;
pte_t *ptep;
long i;
pgdp = pgd_offset(&init_mm, addr);
pmdp = pmd_offset(pgdp, addr);
ptep = pte_offset_map(pmdp, addr);
set_pte(ptep, mk_pte(virt_to_page(page), dvma_prot));
i = ((addr - IOUNIT_DMA_BASE) >> PAGE_SHIFT);
iopte = (iopte_t *)(iounit->page_table + i);
*iopte = MKIOPTE(__pa(page));
}
addr += PAGE_SIZE;
va += PAGE_SIZE;
}
flush_cache_all();
flush_tlb_all();
return 0;
}
static void iounit_unmap_dma_area(struct device *dev, unsigned long addr, int len)
{
/* XXX Somebody please fill this in */
}
#endif
static char *iounit_lockarea(char *vaddr, unsigned long len)
{
/* FIXME: Write this */
return vaddr;
}
static void iounit_unlockarea(char *vaddr, unsigned long len)
{
/* FIXME: Write this */
}
void __init ld_mmu_iounit(void)
{
BTFIXUPSET_CALL(mmu_lockarea, iounit_lockarea, BTFIXUPCALL_RETO0);
BTFIXUPSET_CALL(mmu_unlockarea, iounit_unlockarea, BTFIXUPCALL_NOP);
BTFIXUPSET_CALL(mmu_get_scsi_one, iounit_get_scsi_one, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(mmu_get_scsi_sgl, iounit_get_scsi_sgl, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(mmu_release_scsi_one, iounit_release_scsi_one, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(mmu_release_scsi_sgl, iounit_release_scsi_sgl, BTFIXUPCALL_NORM);
#ifdef CONFIG_SBUS
BTFIXUPSET_CALL(mmu_map_dma_area, iounit_map_dma_area, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(mmu_unmap_dma_area, iounit_unmap_dma_area, BTFIXUPCALL_NORM);
#endif
}
| gpl-2.0 |
Pulshen/XKernel | arch/sparc/mm/io-unit.c | 7635 | 7401 | /*
* io-unit.c: IO-UNIT specific routines for memory management.
*
* Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/mm.h>
#include <linux/highmem.h> /* pte_offset_map => kmap_atomic */
#include <linux/bitops.h>
#include <linux/scatterlist.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <asm/pgalloc.h>
#include <asm/pgtable.h>
#include <asm/io.h>
#include <asm/io-unit.h>
#include <asm/mxcc.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
#include <asm/dma.h>
#include <asm/oplib.h>
/* #define IOUNIT_DEBUG */
#ifdef IOUNIT_DEBUG
#define IOD(x) printk(x)
#else
#define IOD(x) do { } while (0)
#endif
#define IOPERM (IOUPTE_CACHE | IOUPTE_WRITE | IOUPTE_VALID)
#define MKIOPTE(phys) __iopte((((phys)>>4) & IOUPTE_PAGE) | IOPERM)
static void __init iounit_iommu_init(struct platform_device *op)
{
struct iounit_struct *iounit;
iopte_t *xpt, *xptend;
iounit = kzalloc(sizeof(struct iounit_struct), GFP_ATOMIC);
if (!iounit) {
prom_printf("SUN4D: Cannot alloc iounit, halting.\n");
prom_halt();
}
iounit->limit[0] = IOUNIT_BMAP1_START;
iounit->limit[1] = IOUNIT_BMAP2_START;
iounit->limit[2] = IOUNIT_BMAPM_START;
iounit->limit[3] = IOUNIT_BMAPM_END;
iounit->rotor[1] = IOUNIT_BMAP2_START;
iounit->rotor[2] = IOUNIT_BMAPM_START;
xpt = of_ioremap(&op->resource[2], 0, PAGE_SIZE * 16, "XPT");
if (!xpt) {
prom_printf("SUN4D: Cannot map External Page Table.");
prom_halt();
}
op->dev.archdata.iommu = iounit;
iounit->page_table = xpt;
spin_lock_init(&iounit->lock);
for (xptend = iounit->page_table + (16 * PAGE_SIZE) / sizeof(iopte_t);
xpt < xptend;)
iopte_val(*xpt++) = 0;
}
static int __init iounit_init(void)
{
extern void sun4d_init_sbi_irq(void);
struct device_node *dp;
for_each_node_by_name(dp, "sbi") {
struct platform_device *op = of_find_device_by_node(dp);
iounit_iommu_init(op);
of_propagate_archdata(op);
}
sun4d_init_sbi_irq();
return 0;
}
subsys_initcall(iounit_init);
/* One has to hold iounit->lock to call this */
static unsigned long iounit_get_area(struct iounit_struct *iounit, unsigned long vaddr, int size)
{
int i, j, k, npages;
unsigned long rotor, scan, limit;
iopte_t iopte;
npages = ((vaddr & ~PAGE_MASK) + size + (PAGE_SIZE-1)) >> PAGE_SHIFT;
/* A tiny bit of magic ingredience :) */
switch (npages) {
case 1: i = 0x0231; break;
case 2: i = 0x0132; break;
default: i = 0x0213; break;
}
IOD(("iounit_get_area(%08lx,%d[%d])=", vaddr, size, npages));
next: j = (i & 15);
rotor = iounit->rotor[j - 1];
limit = iounit->limit[j];
scan = rotor;
nexti: scan = find_next_zero_bit(iounit->bmap, limit, scan);
if (scan + npages > limit) {
if (limit != rotor) {
limit = rotor;
scan = iounit->limit[j - 1];
goto nexti;
}
i >>= 4;
if (!(i & 15))
panic("iounit_get_area: Couldn't find free iopte slots for (%08lx,%d)\n", vaddr, size);
goto next;
}
for (k = 1, scan++; k < npages; k++)
if (test_bit(scan++, iounit->bmap))
goto nexti;
iounit->rotor[j - 1] = (scan < limit) ? scan : iounit->limit[j - 1];
scan -= npages;
iopte = MKIOPTE(__pa(vaddr & PAGE_MASK));
vaddr = IOUNIT_DMA_BASE + (scan << PAGE_SHIFT) + (vaddr & ~PAGE_MASK);
for (k = 0; k < npages; k++, iopte = __iopte(iopte_val(iopte) + 0x100), scan++) {
set_bit(scan, iounit->bmap);
iounit->page_table[scan] = iopte;
}
IOD(("%08lx\n", vaddr));
return vaddr;
}
static __u32 iounit_get_scsi_one(struct device *dev, char *vaddr, unsigned long len)
{
struct iounit_struct *iounit = dev->archdata.iommu;
unsigned long ret, flags;
spin_lock_irqsave(&iounit->lock, flags);
ret = iounit_get_area(iounit, (unsigned long)vaddr, len);
spin_unlock_irqrestore(&iounit->lock, flags);
return ret;
}
static void iounit_get_scsi_sgl(struct device *dev, struct scatterlist *sg, int sz)
{
struct iounit_struct *iounit = dev->archdata.iommu;
unsigned long flags;
/* FIXME: Cache some resolved pages - often several sg entries are to the same page */
spin_lock_irqsave(&iounit->lock, flags);
while (sz != 0) {
--sz;
sg->dma_address = iounit_get_area(iounit, (unsigned long) sg_virt(sg), sg->length);
sg->dma_length = sg->length;
sg = sg_next(sg);
}
spin_unlock_irqrestore(&iounit->lock, flags);
}
static void iounit_release_scsi_one(struct device *dev, __u32 vaddr, unsigned long len)
{
struct iounit_struct *iounit = dev->archdata.iommu;
unsigned long flags;
spin_lock_irqsave(&iounit->lock, flags);
len = ((vaddr & ~PAGE_MASK) + len + (PAGE_SIZE-1)) >> PAGE_SHIFT;
vaddr = (vaddr - IOUNIT_DMA_BASE) >> PAGE_SHIFT;
IOD(("iounit_release %08lx-%08lx\n", (long)vaddr, (long)len+vaddr));
for (len += vaddr; vaddr < len; vaddr++)
clear_bit(vaddr, iounit->bmap);
spin_unlock_irqrestore(&iounit->lock, flags);
}
static void iounit_release_scsi_sgl(struct device *dev, struct scatterlist *sg, int sz)
{
struct iounit_struct *iounit = dev->archdata.iommu;
unsigned long flags;
unsigned long vaddr, len;
spin_lock_irqsave(&iounit->lock, flags);
while (sz != 0) {
--sz;
len = ((sg->dma_address & ~PAGE_MASK) + sg->length + (PAGE_SIZE-1)) >> PAGE_SHIFT;
vaddr = (sg->dma_address - IOUNIT_DMA_BASE) >> PAGE_SHIFT;
IOD(("iounit_release %08lx-%08lx\n", (long)vaddr, (long)len+vaddr));
for (len += vaddr; vaddr < len; vaddr++)
clear_bit(vaddr, iounit->bmap);
sg = sg_next(sg);
}
spin_unlock_irqrestore(&iounit->lock, flags);
}
#ifdef CONFIG_SBUS
static int iounit_map_dma_area(struct device *dev, dma_addr_t *pba, unsigned long va, __u32 addr, int len)
{
struct iounit_struct *iounit = dev->archdata.iommu;
unsigned long page, end;
pgprot_t dvma_prot;
iopte_t *iopte;
*pba = addr;
dvma_prot = __pgprot(SRMMU_CACHE | SRMMU_ET_PTE | SRMMU_PRIV);
end = PAGE_ALIGN((addr + len));
while(addr < end) {
page = va;
{
pgd_t *pgdp;
pmd_t *pmdp;
pte_t *ptep;
long i;
pgdp = pgd_offset(&init_mm, addr);
pmdp = pmd_offset(pgdp, addr);
ptep = pte_offset_map(pmdp, addr);
set_pte(ptep, mk_pte(virt_to_page(page), dvma_prot));
i = ((addr - IOUNIT_DMA_BASE) >> PAGE_SHIFT);
iopte = (iopte_t *)(iounit->page_table + i);
*iopte = MKIOPTE(__pa(page));
}
addr += PAGE_SIZE;
va += PAGE_SIZE;
}
flush_cache_all();
flush_tlb_all();
return 0;
}
static void iounit_unmap_dma_area(struct device *dev, unsigned long addr, int len)
{
/* XXX Somebody please fill this in */
}
#endif
static char *iounit_lockarea(char *vaddr, unsigned long len)
{
/* FIXME: Write this */
return vaddr;
}
static void iounit_unlockarea(char *vaddr, unsigned long len)
{
/* FIXME: Write this */
}
void __init ld_mmu_iounit(void)
{
BTFIXUPSET_CALL(mmu_lockarea, iounit_lockarea, BTFIXUPCALL_RETO0);
BTFIXUPSET_CALL(mmu_unlockarea, iounit_unlockarea, BTFIXUPCALL_NOP);
BTFIXUPSET_CALL(mmu_get_scsi_one, iounit_get_scsi_one, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(mmu_get_scsi_sgl, iounit_get_scsi_sgl, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(mmu_release_scsi_one, iounit_release_scsi_one, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(mmu_release_scsi_sgl, iounit_release_scsi_sgl, BTFIXUPCALL_NORM);
#ifdef CONFIG_SBUS
BTFIXUPSET_CALL(mmu_map_dma_area, iounit_map_dma_area, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(mmu_unmap_dma_area, iounit_unmap_dma_area, BTFIXUPCALL_NORM);
#endif
}
| gpl-2.0 |
godmachine81/Acer-A100-JellyBean-Custom-Kernel | drivers/staging/comedi/drivers/dt2817.c | 8147 | 4940 | /*
comedi/drivers/dt2817.c
Hardware driver for Data Translation DT2817
COMEDI - Linux Control and Measurement Device Interface
Copyright (C) 1998 David A. Schleef <ds@schleef.org>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/*
Driver: dt2817
Description: Data Translation DT2817
Author: ds
Status: complete
Devices: [Data Translation] DT2817 (dt2817)
A very simple digital I/O card. Four banks of 8 lines, each bank
is configurable for input or output. One wonders why it takes a
50 page manual to describe this thing.
The driver (which, btw, is much less than 50 pages) has 1 subdevice
with 32 channels, configurable in groups of 8.
Configuration options:
[0] - I/O port base base address
*/
#include "../comedidev.h"
#include <linux/ioport.h>
#define DT2817_SIZE 5
#define DT2817_CR 0
#define DT2817_DATA 1
static int dt2817_attach(struct comedi_device *dev,
struct comedi_devconfig *it);
static int dt2817_detach(struct comedi_device *dev);
static struct comedi_driver driver_dt2817 = {
.driver_name = "dt2817",
.module = THIS_MODULE,
.attach = dt2817_attach,
.detach = dt2817_detach,
};
static int __init driver_dt2817_init_module(void)
{
return comedi_driver_register(&driver_dt2817);
}
static void __exit driver_dt2817_cleanup_module(void)
{
comedi_driver_unregister(&driver_dt2817);
}
module_init(driver_dt2817_init_module);
module_exit(driver_dt2817_cleanup_module);
static int dt2817_dio_insn_config(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
int mask;
int chan;
int oe = 0;
if (insn->n != 1)
return -EINVAL;
chan = CR_CHAN(insn->chanspec);
if (chan < 8)
mask = 0xff;
else if (chan < 16)
mask = 0xff00;
else if (chan < 24)
mask = 0xff0000;
else
mask = 0xff000000;
if (data[0])
s->io_bits |= mask;
else
s->io_bits &= ~mask;
if (s->io_bits & 0x000000ff)
oe |= 0x1;
if (s->io_bits & 0x0000ff00)
oe |= 0x2;
if (s->io_bits & 0x00ff0000)
oe |= 0x4;
if (s->io_bits & 0xff000000)
oe |= 0x8;
outb(oe, dev->iobase + DT2817_CR);
return 1;
}
static int dt2817_dio_insn_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
unsigned int changed;
/* It's questionable whether it is more important in
* a driver like this to be deterministic or fast.
* We choose fast. */
if (data[0]) {
changed = s->state;
s->state &= ~data[0];
s->state |= (data[0] & data[1]);
changed ^= s->state;
changed &= s->io_bits;
if (changed & 0x000000ff)
outb(s->state & 0xff, dev->iobase + DT2817_DATA + 0);
if (changed & 0x0000ff00)
outb((s->state >> 8) & 0xff,
dev->iobase + DT2817_DATA + 1);
if (changed & 0x00ff0000)
outb((s->state >> 16) & 0xff,
dev->iobase + DT2817_DATA + 2);
if (changed & 0xff000000)
outb((s->state >> 24) & 0xff,
dev->iobase + DT2817_DATA + 3);
}
data[1] = inb(dev->iobase + DT2817_DATA + 0);
data[1] |= (inb(dev->iobase + DT2817_DATA + 1) << 8);
data[1] |= (inb(dev->iobase + DT2817_DATA + 2) << 16);
data[1] |= (inb(dev->iobase + DT2817_DATA + 3) << 24);
return 2;
}
static int dt2817_attach(struct comedi_device *dev, struct comedi_devconfig *it)
{
int ret;
struct comedi_subdevice *s;
unsigned long iobase;
iobase = it->options[0];
printk(KERN_INFO "comedi%d: dt2817: 0x%04lx ", dev->minor, iobase);
if (!request_region(iobase, DT2817_SIZE, "dt2817")) {
printk("I/O port conflict\n");
return -EIO;
}
dev->iobase = iobase;
dev->board_name = "dt2817";
ret = alloc_subdevices(dev, 1);
if (ret < 0)
return ret;
s = dev->subdevices + 0;
s->n_chan = 32;
s->type = COMEDI_SUBD_DIO;
s->subdev_flags = SDF_READABLE | SDF_WRITABLE;
s->range_table = &range_digital;
s->maxdata = 1;
s->insn_bits = dt2817_dio_insn_bits;
s->insn_config = dt2817_dio_insn_config;
s->state = 0;
outb(0, dev->iobase + DT2817_CR);
printk(KERN_INFO "\n");
return 0;
}
static int dt2817_detach(struct comedi_device *dev)
{
printk(KERN_INFO "comedi%d: dt2817: remove\n", dev->minor);
if (dev->iobase)
release_region(dev->iobase, DT2817_SIZE);
return 0;
}
MODULE_AUTHOR("Comedi http://www.comedi.org");
MODULE_DESCRIPTION("Comedi low-level driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
Fevax/android_kernel_samsung_universal8890-N | net/rose/rose_route.c | 8659 | 32072 | /*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk)
* Copyright (C) Terry Dawson VK2KTJ (terry@animats.net)
*/
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/kernel.h>
#include <linux/timer.h>
#include <linux/string.h>
#include <linux/sockios.h>
#include <linux/net.h>
#include <linux/slab.h>
#include <net/ax25.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <net/arp.h>
#include <linux/if_arp.h>
#include <linux/skbuff.h>
#include <net/sock.h>
#include <net/tcp_states.h>
#include <asm/uaccess.h>
#include <linux/fcntl.h>
#include <linux/termios.h> /* For TIOCINQ/OUTQ */
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/notifier.h>
#include <linux/netfilter.h>
#include <linux/init.h>
#include <net/rose.h>
#include <linux/seq_file.h>
#include <linux/export.h>
static unsigned int rose_neigh_no = 1;
static struct rose_node *rose_node_list;
static DEFINE_SPINLOCK(rose_node_list_lock);
static struct rose_neigh *rose_neigh_list;
static DEFINE_SPINLOCK(rose_neigh_list_lock);
static struct rose_route *rose_route_list;
static DEFINE_SPINLOCK(rose_route_list_lock);
struct rose_neigh *rose_loopback_neigh;
/*
* Add a new route to a node, and in the process add the node and the
* neighbour if it is new.
*/
static int __must_check rose_add_node(struct rose_route_struct *rose_route,
struct net_device *dev)
{
struct rose_node *rose_node, *rose_tmpn, *rose_tmpp;
struct rose_neigh *rose_neigh;
int i, res = 0;
spin_lock_bh(&rose_node_list_lock);
spin_lock_bh(&rose_neigh_list_lock);
rose_node = rose_node_list;
while (rose_node != NULL) {
if ((rose_node->mask == rose_route->mask) &&
(rosecmpm(&rose_route->address, &rose_node->address,
rose_route->mask) == 0))
break;
rose_node = rose_node->next;
}
if (rose_node != NULL && rose_node->loopback) {
res = -EINVAL;
goto out;
}
rose_neigh = rose_neigh_list;
while (rose_neigh != NULL) {
if (ax25cmp(&rose_route->neighbour,
&rose_neigh->callsign) == 0 &&
rose_neigh->dev == dev)
break;
rose_neigh = rose_neigh->next;
}
if (rose_neigh == NULL) {
rose_neigh = kmalloc(sizeof(*rose_neigh), GFP_ATOMIC);
if (rose_neigh == NULL) {
res = -ENOMEM;
goto out;
}
rose_neigh->callsign = rose_route->neighbour;
rose_neigh->digipeat = NULL;
rose_neigh->ax25 = NULL;
rose_neigh->dev = dev;
rose_neigh->count = 0;
rose_neigh->use = 0;
rose_neigh->dce_mode = 0;
rose_neigh->loopback = 0;
rose_neigh->number = rose_neigh_no++;
rose_neigh->restarted = 0;
skb_queue_head_init(&rose_neigh->queue);
init_timer(&rose_neigh->ftimer);
init_timer(&rose_neigh->t0timer);
if (rose_route->ndigis != 0) {
rose_neigh->digipeat =
kmalloc(sizeof(ax25_digi), GFP_ATOMIC);
if (rose_neigh->digipeat == NULL) {
kfree(rose_neigh);
res = -ENOMEM;
goto out;
}
rose_neigh->digipeat->ndigi = rose_route->ndigis;
rose_neigh->digipeat->lastrepeat = -1;
for (i = 0; i < rose_route->ndigis; i++) {
rose_neigh->digipeat->calls[i] =
rose_route->digipeaters[i];
rose_neigh->digipeat->repeated[i] = 0;
}
}
rose_neigh->next = rose_neigh_list;
rose_neigh_list = rose_neigh;
}
/*
* This is a new node to be inserted into the list. Find where it needs
* to be inserted into the list, and insert it. We want to be sure
* to order the list in descending order of mask size to ensure that
* later when we are searching this list the first match will be the
* best match.
*/
if (rose_node == NULL) {
rose_tmpn = rose_node_list;
rose_tmpp = NULL;
while (rose_tmpn != NULL) {
if (rose_tmpn->mask > rose_route->mask) {
rose_tmpp = rose_tmpn;
rose_tmpn = rose_tmpn->next;
} else {
break;
}
}
/* create new node */
rose_node = kmalloc(sizeof(*rose_node), GFP_ATOMIC);
if (rose_node == NULL) {
res = -ENOMEM;
goto out;
}
rose_node->address = rose_route->address;
rose_node->mask = rose_route->mask;
rose_node->count = 1;
rose_node->loopback = 0;
rose_node->neighbour[0] = rose_neigh;
if (rose_tmpn == NULL) {
if (rose_tmpp == NULL) { /* Empty list */
rose_node_list = rose_node;
rose_node->next = NULL;
} else {
rose_tmpp->next = rose_node;
rose_node->next = NULL;
}
} else {
if (rose_tmpp == NULL) { /* 1st node */
rose_node->next = rose_node_list;
rose_node_list = rose_node;
} else {
rose_tmpp->next = rose_node;
rose_node->next = rose_tmpn;
}
}
rose_neigh->count++;
goto out;
}
/* We have space, slot it in */
if (rose_node->count < 3) {
rose_node->neighbour[rose_node->count] = rose_neigh;
rose_node->count++;
rose_neigh->count++;
}
out:
spin_unlock_bh(&rose_neigh_list_lock);
spin_unlock_bh(&rose_node_list_lock);
return res;
}
/*
* Caller is holding rose_node_list_lock.
*/
static void rose_remove_node(struct rose_node *rose_node)
{
struct rose_node *s;
if ((s = rose_node_list) == rose_node) {
rose_node_list = rose_node->next;
kfree(rose_node);
return;
}
while (s != NULL && s->next != NULL) {
if (s->next == rose_node) {
s->next = rose_node->next;
kfree(rose_node);
return;
}
s = s->next;
}
}
/*
* Caller is holding rose_neigh_list_lock.
*/
static void rose_remove_neigh(struct rose_neigh *rose_neigh)
{
struct rose_neigh *s;
rose_stop_ftimer(rose_neigh);
rose_stop_t0timer(rose_neigh);
skb_queue_purge(&rose_neigh->queue);
if ((s = rose_neigh_list) == rose_neigh) {
rose_neigh_list = rose_neigh->next;
if (rose_neigh->ax25)
ax25_cb_put(rose_neigh->ax25);
kfree(rose_neigh->digipeat);
kfree(rose_neigh);
return;
}
while (s != NULL && s->next != NULL) {
if (s->next == rose_neigh) {
s->next = rose_neigh->next;
if (rose_neigh->ax25)
ax25_cb_put(rose_neigh->ax25);
kfree(rose_neigh->digipeat);
kfree(rose_neigh);
return;
}
s = s->next;
}
}
/*
* Caller is holding rose_route_list_lock.
*/
static void rose_remove_route(struct rose_route *rose_route)
{
struct rose_route *s;
if (rose_route->neigh1 != NULL)
rose_route->neigh1->use--;
if (rose_route->neigh2 != NULL)
rose_route->neigh2->use--;
if ((s = rose_route_list) == rose_route) {
rose_route_list = rose_route->next;
kfree(rose_route);
return;
}
while (s != NULL && s->next != NULL) {
if (s->next == rose_route) {
s->next = rose_route->next;
kfree(rose_route);
return;
}
s = s->next;
}
}
/*
* "Delete" a node. Strictly speaking remove a route to a node. The node
* is only deleted if no routes are left to it.
*/
static int rose_del_node(struct rose_route_struct *rose_route,
struct net_device *dev)
{
struct rose_node *rose_node;
struct rose_neigh *rose_neigh;
int i, err = 0;
spin_lock_bh(&rose_node_list_lock);
spin_lock_bh(&rose_neigh_list_lock);
rose_node = rose_node_list;
while (rose_node != NULL) {
if ((rose_node->mask == rose_route->mask) &&
(rosecmpm(&rose_route->address, &rose_node->address,
rose_route->mask) == 0))
break;
rose_node = rose_node->next;
}
if (rose_node == NULL || rose_node->loopback) {
err = -EINVAL;
goto out;
}
rose_neigh = rose_neigh_list;
while (rose_neigh != NULL) {
if (ax25cmp(&rose_route->neighbour,
&rose_neigh->callsign) == 0 &&
rose_neigh->dev == dev)
break;
rose_neigh = rose_neigh->next;
}
if (rose_neigh == NULL) {
err = -EINVAL;
goto out;
}
for (i = 0; i < rose_node->count; i++) {
if (rose_node->neighbour[i] == rose_neigh) {
rose_neigh->count--;
if (rose_neigh->count == 0 && rose_neigh->use == 0)
rose_remove_neigh(rose_neigh);
rose_node->count--;
if (rose_node->count == 0) {
rose_remove_node(rose_node);
} else {
switch (i) {
case 0:
rose_node->neighbour[0] =
rose_node->neighbour[1];
case 1:
rose_node->neighbour[1] =
rose_node->neighbour[2];
case 2:
break;
}
}
goto out;
}
}
err = -EINVAL;
out:
spin_unlock_bh(&rose_neigh_list_lock);
spin_unlock_bh(&rose_node_list_lock);
return err;
}
/*
* Add the loopback neighbour.
*/
void rose_add_loopback_neigh(void)
{
struct rose_neigh *sn;
rose_loopback_neigh = kmalloc(sizeof(struct rose_neigh), GFP_KERNEL);
if (!rose_loopback_neigh)
return;
sn = rose_loopback_neigh;
sn->callsign = null_ax25_address;
sn->digipeat = NULL;
sn->ax25 = NULL;
sn->dev = NULL;
sn->count = 0;
sn->use = 0;
sn->dce_mode = 1;
sn->loopback = 1;
sn->number = rose_neigh_no++;
sn->restarted = 1;
skb_queue_head_init(&sn->queue);
init_timer(&sn->ftimer);
init_timer(&sn->t0timer);
spin_lock_bh(&rose_neigh_list_lock);
sn->next = rose_neigh_list;
rose_neigh_list = sn;
spin_unlock_bh(&rose_neigh_list_lock);
}
/*
* Add a loopback node.
*/
int rose_add_loopback_node(rose_address *address)
{
struct rose_node *rose_node;
int err = 0;
spin_lock_bh(&rose_node_list_lock);
rose_node = rose_node_list;
while (rose_node != NULL) {
if ((rose_node->mask == 10) &&
(rosecmpm(address, &rose_node->address, 10) == 0) &&
rose_node->loopback)
break;
rose_node = rose_node->next;
}
if (rose_node != NULL)
goto out;
if ((rose_node = kmalloc(sizeof(*rose_node), GFP_ATOMIC)) == NULL) {
err = -ENOMEM;
goto out;
}
rose_node->address = *address;
rose_node->mask = 10;
rose_node->count = 1;
rose_node->loopback = 1;
rose_node->neighbour[0] = rose_loopback_neigh;
/* Insert at the head of list. Address is always mask=10 */
rose_node->next = rose_node_list;
rose_node_list = rose_node;
rose_loopback_neigh->count++;
out:
spin_unlock_bh(&rose_node_list_lock);
return err;
}
/*
* Delete a loopback node.
*/
void rose_del_loopback_node(rose_address *address)
{
struct rose_node *rose_node;
spin_lock_bh(&rose_node_list_lock);
rose_node = rose_node_list;
while (rose_node != NULL) {
if ((rose_node->mask == 10) &&
(rosecmpm(address, &rose_node->address, 10) == 0) &&
rose_node->loopback)
break;
rose_node = rose_node->next;
}
if (rose_node == NULL)
goto out;
rose_remove_node(rose_node);
rose_loopback_neigh->count--;
out:
spin_unlock_bh(&rose_node_list_lock);
}
/*
* A device has been removed. Remove its routes and neighbours.
*/
void rose_rt_device_down(struct net_device *dev)
{
struct rose_neigh *s, *rose_neigh;
struct rose_node *t, *rose_node;
int i;
spin_lock_bh(&rose_node_list_lock);
spin_lock_bh(&rose_neigh_list_lock);
rose_neigh = rose_neigh_list;
while (rose_neigh != NULL) {
s = rose_neigh;
rose_neigh = rose_neigh->next;
if (s->dev != dev)
continue;
rose_node = rose_node_list;
while (rose_node != NULL) {
t = rose_node;
rose_node = rose_node->next;
for (i = 0; i < t->count; i++) {
if (t->neighbour[i] != s)
continue;
t->count--;
switch (i) {
case 0:
t->neighbour[0] = t->neighbour[1];
case 1:
t->neighbour[1] = t->neighbour[2];
case 2:
break;
}
}
if (t->count <= 0)
rose_remove_node(t);
}
rose_remove_neigh(s);
}
spin_unlock_bh(&rose_neigh_list_lock);
spin_unlock_bh(&rose_node_list_lock);
}
#if 0 /* Currently unused */
/*
* A device has been removed. Remove its links.
*/
void rose_route_device_down(struct net_device *dev)
{
struct rose_route *s, *rose_route;
spin_lock_bh(&rose_route_list_lock);
rose_route = rose_route_list;
while (rose_route != NULL) {
s = rose_route;
rose_route = rose_route->next;
if (s->neigh1->dev == dev || s->neigh2->dev == dev)
rose_remove_route(s);
}
spin_unlock_bh(&rose_route_list_lock);
}
#endif
/*
* Clear all nodes and neighbours out, except for neighbours with
* active connections going through them.
* Do not clear loopback neighbour and nodes.
*/
static int rose_clear_routes(void)
{
struct rose_neigh *s, *rose_neigh;
struct rose_node *t, *rose_node;
spin_lock_bh(&rose_node_list_lock);
spin_lock_bh(&rose_neigh_list_lock);
rose_neigh = rose_neigh_list;
rose_node = rose_node_list;
while (rose_node != NULL) {
t = rose_node;
rose_node = rose_node->next;
if (!t->loopback)
rose_remove_node(t);
}
while (rose_neigh != NULL) {
s = rose_neigh;
rose_neigh = rose_neigh->next;
if (s->use == 0 && !s->loopback) {
s->count = 0;
rose_remove_neigh(s);
}
}
spin_unlock_bh(&rose_neigh_list_lock);
spin_unlock_bh(&rose_node_list_lock);
return 0;
}
/*
* Check that the device given is a valid AX.25 interface that is "up".
* called with RTNL
*/
static struct net_device *rose_ax25_dev_find(char *devname)
{
struct net_device *dev;
if ((dev = __dev_get_by_name(&init_net, devname)) == NULL)
return NULL;
if ((dev->flags & IFF_UP) && dev->type == ARPHRD_AX25)
return dev;
return NULL;
}
/*
* Find the first active ROSE device, usually "rose0".
*/
struct net_device *rose_dev_first(void)
{
struct net_device *dev, *first = NULL;
rcu_read_lock();
for_each_netdev_rcu(&init_net, dev) {
if ((dev->flags & IFF_UP) && dev->type == ARPHRD_ROSE)
if (first == NULL || strncmp(dev->name, first->name, 3) < 0)
first = dev;
}
rcu_read_unlock();
return first;
}
/*
* Find the ROSE device for the given address.
*/
struct net_device *rose_dev_get(rose_address *addr)
{
struct net_device *dev;
rcu_read_lock();
for_each_netdev_rcu(&init_net, dev) {
if ((dev->flags & IFF_UP) && dev->type == ARPHRD_ROSE && rosecmp(addr, (rose_address *)dev->dev_addr) == 0) {
dev_hold(dev);
goto out;
}
}
dev = NULL;
out:
rcu_read_unlock();
return dev;
}
static int rose_dev_exists(rose_address *addr)
{
struct net_device *dev;
rcu_read_lock();
for_each_netdev_rcu(&init_net, dev) {
if ((dev->flags & IFF_UP) && dev->type == ARPHRD_ROSE && rosecmp(addr, (rose_address *)dev->dev_addr) == 0)
goto out;
}
dev = NULL;
out:
rcu_read_unlock();
return dev != NULL;
}
struct rose_route *rose_route_free_lci(unsigned int lci, struct rose_neigh *neigh)
{
struct rose_route *rose_route;
for (rose_route = rose_route_list; rose_route != NULL; rose_route = rose_route->next)
if ((rose_route->neigh1 == neigh && rose_route->lci1 == lci) ||
(rose_route->neigh2 == neigh && rose_route->lci2 == lci))
return rose_route;
return NULL;
}
/*
* Find a neighbour or a route given a ROSE address.
*/
struct rose_neigh *rose_get_neigh(rose_address *addr, unsigned char *cause,
unsigned char *diagnostic, int route_frame)
{
struct rose_neigh *res = NULL;
struct rose_node *node;
int failed = 0;
int i;
if (!route_frame) spin_lock_bh(&rose_node_list_lock);
for (node = rose_node_list; node != NULL; node = node->next) {
if (rosecmpm(addr, &node->address, node->mask) == 0) {
for (i = 0; i < node->count; i++) {
if (node->neighbour[i]->restarted) {
res = node->neighbour[i];
goto out;
}
}
}
}
if (!route_frame) { /* connect request */
for (node = rose_node_list; node != NULL; node = node->next) {
if (rosecmpm(addr, &node->address, node->mask) == 0) {
for (i = 0; i < node->count; i++) {
if (!rose_ftimer_running(node->neighbour[i])) {
res = node->neighbour[i];
failed = 0;
goto out;
}
failed = 1;
}
}
}
}
if (failed) {
*cause = ROSE_OUT_OF_ORDER;
*diagnostic = 0;
} else {
*cause = ROSE_NOT_OBTAINABLE;
*diagnostic = 0;
}
out:
if (!route_frame) spin_unlock_bh(&rose_node_list_lock);
return res;
}
/*
* Handle the ioctls that control the routing functions.
*/
int rose_rt_ioctl(unsigned int cmd, void __user *arg)
{
struct rose_route_struct rose_route;
struct net_device *dev;
int err;
switch (cmd) {
case SIOCADDRT:
if (copy_from_user(&rose_route, arg, sizeof(struct rose_route_struct)))
return -EFAULT;
if ((dev = rose_ax25_dev_find(rose_route.device)) == NULL)
return -EINVAL;
if (rose_dev_exists(&rose_route.address)) /* Can't add routes to ourself */
return -EINVAL;
if (rose_route.mask > 10) /* Mask can't be more than 10 digits */
return -EINVAL;
if (rose_route.ndigis > AX25_MAX_DIGIS)
return -EINVAL;
err = rose_add_node(&rose_route, dev);
return err;
case SIOCDELRT:
if (copy_from_user(&rose_route, arg, sizeof(struct rose_route_struct)))
return -EFAULT;
if ((dev = rose_ax25_dev_find(rose_route.device)) == NULL)
return -EINVAL;
err = rose_del_node(&rose_route, dev);
return err;
case SIOCRSCLRRT:
return rose_clear_routes();
default:
return -EINVAL;
}
return 0;
}
static void rose_del_route_by_neigh(struct rose_neigh *rose_neigh)
{
struct rose_route *rose_route, *s;
rose_neigh->restarted = 0;
rose_stop_t0timer(rose_neigh);
rose_start_ftimer(rose_neigh);
skb_queue_purge(&rose_neigh->queue);
spin_lock_bh(&rose_route_list_lock);
rose_route = rose_route_list;
while (rose_route != NULL) {
if ((rose_route->neigh1 == rose_neigh && rose_route->neigh2 == rose_neigh) ||
(rose_route->neigh1 == rose_neigh && rose_route->neigh2 == NULL) ||
(rose_route->neigh2 == rose_neigh && rose_route->neigh1 == NULL)) {
s = rose_route->next;
rose_remove_route(rose_route);
rose_route = s;
continue;
}
if (rose_route->neigh1 == rose_neigh) {
rose_route->neigh1->use--;
rose_route->neigh1 = NULL;
rose_transmit_clear_request(rose_route->neigh2, rose_route->lci2, ROSE_OUT_OF_ORDER, 0);
}
if (rose_route->neigh2 == rose_neigh) {
rose_route->neigh2->use--;
rose_route->neigh2 = NULL;
rose_transmit_clear_request(rose_route->neigh1, rose_route->lci1, ROSE_OUT_OF_ORDER, 0);
}
rose_route = rose_route->next;
}
spin_unlock_bh(&rose_route_list_lock);
}
/*
* A level 2 link has timed out, therefore it appears to be a poor link,
* then don't use that neighbour until it is reset. Blow away all through
* routes and connections using this route.
*/
void rose_link_failed(ax25_cb *ax25, int reason)
{
struct rose_neigh *rose_neigh;
spin_lock_bh(&rose_neigh_list_lock);
rose_neigh = rose_neigh_list;
while (rose_neigh != NULL) {
if (rose_neigh->ax25 == ax25)
break;
rose_neigh = rose_neigh->next;
}
if (rose_neigh != NULL) {
rose_neigh->ax25 = NULL;
ax25_cb_put(ax25);
rose_del_route_by_neigh(rose_neigh);
rose_kill_by_neigh(rose_neigh);
}
spin_unlock_bh(&rose_neigh_list_lock);
}
/*
* A device has been "downed" remove its link status. Blow away all
* through routes and connections that use this device.
*/
void rose_link_device_down(struct net_device *dev)
{
struct rose_neigh *rose_neigh;
for (rose_neigh = rose_neigh_list; rose_neigh != NULL; rose_neigh = rose_neigh->next) {
if (rose_neigh->dev == dev) {
rose_del_route_by_neigh(rose_neigh);
rose_kill_by_neigh(rose_neigh);
}
}
}
/*
* Route a frame to an appropriate AX.25 connection.
*/
int rose_route_frame(struct sk_buff *skb, ax25_cb *ax25)
{
struct rose_neigh *rose_neigh, *new_neigh;
struct rose_route *rose_route;
struct rose_facilities_struct facilities;
rose_address *src_addr, *dest_addr;
struct sock *sk;
unsigned short frametype;
unsigned int lci, new_lci;
unsigned char cause, diagnostic;
struct net_device *dev;
int res = 0;
char buf[11];
if (skb->len < ROSE_MIN_LEN)
return res;
frametype = skb->data[2];
lci = ((skb->data[0] << 8) & 0xF00) + ((skb->data[1] << 0) & 0x0FF);
if (frametype == ROSE_CALL_REQUEST &&
(skb->len <= ROSE_CALL_REQ_FACILITIES_OFF ||
skb->data[ROSE_CALL_REQ_ADDR_LEN_OFF] !=
ROSE_CALL_REQ_ADDR_LEN_VAL))
return res;
src_addr = (rose_address *)(skb->data + ROSE_CALL_REQ_SRC_ADDR_OFF);
dest_addr = (rose_address *)(skb->data + ROSE_CALL_REQ_DEST_ADDR_OFF);
spin_lock_bh(&rose_neigh_list_lock);
spin_lock_bh(&rose_route_list_lock);
rose_neigh = rose_neigh_list;
while (rose_neigh != NULL) {
if (ax25cmp(&ax25->dest_addr, &rose_neigh->callsign) == 0 &&
ax25->ax25_dev->dev == rose_neigh->dev)
break;
rose_neigh = rose_neigh->next;
}
if (rose_neigh == NULL) {
printk("rose_route : unknown neighbour or device %s\n",
ax2asc(buf, &ax25->dest_addr));
goto out;
}
/*
* Obviously the link is working, halt the ftimer.
*/
rose_stop_ftimer(rose_neigh);
/*
* LCI of zero is always for us, and its always a restart
* frame.
*/
if (lci == 0) {
rose_link_rx_restart(skb, rose_neigh, frametype);
goto out;
}
/*
* Find an existing socket.
*/
if ((sk = rose_find_socket(lci, rose_neigh)) != NULL) {
if (frametype == ROSE_CALL_REQUEST) {
struct rose_sock *rose = rose_sk(sk);
/* Remove an existing unused socket */
rose_clear_queues(sk);
rose->cause = ROSE_NETWORK_CONGESTION;
rose->diagnostic = 0;
rose->neighbour->use--;
rose->neighbour = NULL;
rose->lci = 0;
rose->state = ROSE_STATE_0;
sk->sk_state = TCP_CLOSE;
sk->sk_err = 0;
sk->sk_shutdown |= SEND_SHUTDOWN;
if (!sock_flag(sk, SOCK_DEAD)) {
sk->sk_state_change(sk);
sock_set_flag(sk, SOCK_DEAD);
}
}
else {
skb_reset_transport_header(skb);
res = rose_process_rx_frame(sk, skb);
goto out;
}
}
/*
* Is is a Call Request and is it for us ?
*/
if (frametype == ROSE_CALL_REQUEST)
if ((dev = rose_dev_get(dest_addr)) != NULL) {
res = rose_rx_call_request(skb, dev, rose_neigh, lci);
dev_put(dev);
goto out;
}
if (!sysctl_rose_routing_control) {
rose_transmit_clear_request(rose_neigh, lci, ROSE_NOT_OBTAINABLE, 0);
goto out;
}
/*
* Route it to the next in line if we have an entry for it.
*/
rose_route = rose_route_list;
while (rose_route != NULL) {
if (rose_route->lci1 == lci &&
rose_route->neigh1 == rose_neigh) {
if (frametype == ROSE_CALL_REQUEST) {
/* F6FBB - Remove an existing unused route */
rose_remove_route(rose_route);
break;
} else if (rose_route->neigh2 != NULL) {
skb->data[0] &= 0xF0;
skb->data[0] |= (rose_route->lci2 >> 8) & 0x0F;
skb->data[1] = (rose_route->lci2 >> 0) & 0xFF;
rose_transmit_link(skb, rose_route->neigh2);
if (frametype == ROSE_CLEAR_CONFIRMATION)
rose_remove_route(rose_route);
res = 1;
goto out;
} else {
if (frametype == ROSE_CLEAR_CONFIRMATION)
rose_remove_route(rose_route);
goto out;
}
}
if (rose_route->lci2 == lci &&
rose_route->neigh2 == rose_neigh) {
if (frametype == ROSE_CALL_REQUEST) {
/* F6FBB - Remove an existing unused route */
rose_remove_route(rose_route);
break;
} else if (rose_route->neigh1 != NULL) {
skb->data[0] &= 0xF0;
skb->data[0] |= (rose_route->lci1 >> 8) & 0x0F;
skb->data[1] = (rose_route->lci1 >> 0) & 0xFF;
rose_transmit_link(skb, rose_route->neigh1);
if (frametype == ROSE_CLEAR_CONFIRMATION)
rose_remove_route(rose_route);
res = 1;
goto out;
} else {
if (frametype == ROSE_CLEAR_CONFIRMATION)
rose_remove_route(rose_route);
goto out;
}
}
rose_route = rose_route->next;
}
/*
* We know that:
* 1. The frame isn't for us,
* 2. It isn't "owned" by any existing route.
*/
if (frametype != ROSE_CALL_REQUEST) { /* XXX */
res = 0;
goto out;
}
memset(&facilities, 0x00, sizeof(struct rose_facilities_struct));
if (!rose_parse_facilities(skb->data + ROSE_CALL_REQ_FACILITIES_OFF,
skb->len - ROSE_CALL_REQ_FACILITIES_OFF,
&facilities)) {
rose_transmit_clear_request(rose_neigh, lci, ROSE_INVALID_FACILITY, 76);
goto out;
}
/*
* Check for routing loops.
*/
rose_route = rose_route_list;
while (rose_route != NULL) {
if (rose_route->rand == facilities.rand &&
rosecmp(src_addr, &rose_route->src_addr) == 0 &&
ax25cmp(&facilities.dest_call, &rose_route->src_call) == 0 &&
ax25cmp(&facilities.source_call, &rose_route->dest_call) == 0) {
rose_transmit_clear_request(rose_neigh, lci, ROSE_NOT_OBTAINABLE, 120);
goto out;
}
rose_route = rose_route->next;
}
if ((new_neigh = rose_get_neigh(dest_addr, &cause, &diagnostic, 1)) == NULL) {
rose_transmit_clear_request(rose_neigh, lci, cause, diagnostic);
goto out;
}
if ((new_lci = rose_new_lci(new_neigh)) == 0) {
rose_transmit_clear_request(rose_neigh, lci, ROSE_NETWORK_CONGESTION, 71);
goto out;
}
if ((rose_route = kmalloc(sizeof(*rose_route), GFP_ATOMIC)) == NULL) {
rose_transmit_clear_request(rose_neigh, lci, ROSE_NETWORK_CONGESTION, 120);
goto out;
}
rose_route->lci1 = lci;
rose_route->src_addr = *src_addr;
rose_route->dest_addr = *dest_addr;
rose_route->src_call = facilities.dest_call;
rose_route->dest_call = facilities.source_call;
rose_route->rand = facilities.rand;
rose_route->neigh1 = rose_neigh;
rose_route->lci2 = new_lci;
rose_route->neigh2 = new_neigh;
rose_route->neigh1->use++;
rose_route->neigh2->use++;
rose_route->next = rose_route_list;
rose_route_list = rose_route;
skb->data[0] &= 0xF0;
skb->data[0] |= (rose_route->lci2 >> 8) & 0x0F;
skb->data[1] = (rose_route->lci2 >> 0) & 0xFF;
rose_transmit_link(skb, rose_route->neigh2);
res = 1;
out:
spin_unlock_bh(&rose_route_list_lock);
spin_unlock_bh(&rose_neigh_list_lock);
return res;
}
#ifdef CONFIG_PROC_FS
static void *rose_node_start(struct seq_file *seq, loff_t *pos)
__acquires(rose_node_list_lock)
{
struct rose_node *rose_node;
int i = 1;
spin_lock_bh(&rose_node_list_lock);
if (*pos == 0)
return SEQ_START_TOKEN;
for (rose_node = rose_node_list; rose_node && i < *pos;
rose_node = rose_node->next, ++i);
return (i == *pos) ? rose_node : NULL;
}
static void *rose_node_next(struct seq_file *seq, void *v, loff_t *pos)
{
++*pos;
return (v == SEQ_START_TOKEN) ? rose_node_list
: ((struct rose_node *)v)->next;
}
static void rose_node_stop(struct seq_file *seq, void *v)
__releases(rose_node_list_lock)
{
spin_unlock_bh(&rose_node_list_lock);
}
static int rose_node_show(struct seq_file *seq, void *v)
{
char rsbuf[11];
int i;
if (v == SEQ_START_TOKEN)
seq_puts(seq, "address mask n neigh neigh neigh\n");
else {
const struct rose_node *rose_node = v;
/* if (rose_node->loopback) {
seq_printf(seq, "%-10s %04d 1 loopback\n",
rose2asc(rsbuf, &rose_node->address),
rose_node->mask);
} else { */
seq_printf(seq, "%-10s %04d %d",
rose2asc(rsbuf, &rose_node->address),
rose_node->mask,
rose_node->count);
for (i = 0; i < rose_node->count; i++)
seq_printf(seq, " %05d",
rose_node->neighbour[i]->number);
seq_puts(seq, "\n");
/* } */
}
return 0;
}
static const struct seq_operations rose_node_seqops = {
.start = rose_node_start,
.next = rose_node_next,
.stop = rose_node_stop,
.show = rose_node_show,
};
static int rose_nodes_open(struct inode *inode, struct file *file)
{
return seq_open(file, &rose_node_seqops);
}
const struct file_operations rose_nodes_fops = {
.owner = THIS_MODULE,
.open = rose_nodes_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
static void *rose_neigh_start(struct seq_file *seq, loff_t *pos)
__acquires(rose_neigh_list_lock)
{
struct rose_neigh *rose_neigh;
int i = 1;
spin_lock_bh(&rose_neigh_list_lock);
if (*pos == 0)
return SEQ_START_TOKEN;
for (rose_neigh = rose_neigh_list; rose_neigh && i < *pos;
rose_neigh = rose_neigh->next, ++i);
return (i == *pos) ? rose_neigh : NULL;
}
static void *rose_neigh_next(struct seq_file *seq, void *v, loff_t *pos)
{
++*pos;
return (v == SEQ_START_TOKEN) ? rose_neigh_list
: ((struct rose_neigh *)v)->next;
}
static void rose_neigh_stop(struct seq_file *seq, void *v)
__releases(rose_neigh_list_lock)
{
spin_unlock_bh(&rose_neigh_list_lock);
}
static int rose_neigh_show(struct seq_file *seq, void *v)
{
char buf[11];
int i;
if (v == SEQ_START_TOKEN)
seq_puts(seq,
"addr callsign dev count use mode restart t0 tf digipeaters\n");
else {
struct rose_neigh *rose_neigh = v;
/* if (!rose_neigh->loopback) { */
seq_printf(seq, "%05d %-9s %-4s %3d %3d %3s %3s %3lu %3lu",
rose_neigh->number,
(rose_neigh->loopback) ? "RSLOOP-0" : ax2asc(buf, &rose_neigh->callsign),
rose_neigh->dev ? rose_neigh->dev->name : "???",
rose_neigh->count,
rose_neigh->use,
(rose_neigh->dce_mode) ? "DCE" : "DTE",
(rose_neigh->restarted) ? "yes" : "no",
ax25_display_timer(&rose_neigh->t0timer) / HZ,
ax25_display_timer(&rose_neigh->ftimer) / HZ);
if (rose_neigh->digipeat != NULL) {
for (i = 0; i < rose_neigh->digipeat->ndigi; i++)
seq_printf(seq, " %s", ax2asc(buf, &rose_neigh->digipeat->calls[i]));
}
seq_puts(seq, "\n");
}
return 0;
}
static const struct seq_operations rose_neigh_seqops = {
.start = rose_neigh_start,
.next = rose_neigh_next,
.stop = rose_neigh_stop,
.show = rose_neigh_show,
};
static int rose_neigh_open(struct inode *inode, struct file *file)
{
return seq_open(file, &rose_neigh_seqops);
}
const struct file_operations rose_neigh_fops = {
.owner = THIS_MODULE,
.open = rose_neigh_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
static void *rose_route_start(struct seq_file *seq, loff_t *pos)
__acquires(rose_route_list_lock)
{
struct rose_route *rose_route;
int i = 1;
spin_lock_bh(&rose_route_list_lock);
if (*pos == 0)
return SEQ_START_TOKEN;
for (rose_route = rose_route_list; rose_route && i < *pos;
rose_route = rose_route->next, ++i);
return (i == *pos) ? rose_route : NULL;
}
static void *rose_route_next(struct seq_file *seq, void *v, loff_t *pos)
{
++*pos;
return (v == SEQ_START_TOKEN) ? rose_route_list
: ((struct rose_route *)v)->next;
}
static void rose_route_stop(struct seq_file *seq, void *v)
__releases(rose_route_list_lock)
{
spin_unlock_bh(&rose_route_list_lock);
}
static int rose_route_show(struct seq_file *seq, void *v)
{
char buf[11], rsbuf[11];
if (v == SEQ_START_TOKEN)
seq_puts(seq,
"lci address callsign neigh <-> lci address callsign neigh\n");
else {
struct rose_route *rose_route = v;
if (rose_route->neigh1)
seq_printf(seq,
"%3.3X %-10s %-9s %05d ",
rose_route->lci1,
rose2asc(rsbuf, &rose_route->src_addr),
ax2asc(buf, &rose_route->src_call),
rose_route->neigh1->number);
else
seq_puts(seq,
"000 * * 00000 ");
if (rose_route->neigh2)
seq_printf(seq,
"%3.3X %-10s %-9s %05d\n",
rose_route->lci2,
rose2asc(rsbuf, &rose_route->dest_addr),
ax2asc(buf, &rose_route->dest_call),
rose_route->neigh2->number);
else
seq_puts(seq,
"000 * * 00000\n");
}
return 0;
}
static const struct seq_operations rose_route_seqops = {
.start = rose_route_start,
.next = rose_route_next,
.stop = rose_route_stop,
.show = rose_route_show,
};
static int rose_route_open(struct inode *inode, struct file *file)
{
return seq_open(file, &rose_route_seqops);
}
const struct file_operations rose_routes_fops = {
.owner = THIS_MODULE,
.open = rose_route_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
#endif /* CONFIG_PROC_FS */
/*
* Release all memory associated with ROSE routing structures.
*/
void __exit rose_rt_free(void)
{
struct rose_neigh *s, *rose_neigh = rose_neigh_list;
struct rose_node *t, *rose_node = rose_node_list;
struct rose_route *u, *rose_route = rose_route_list;
while (rose_neigh != NULL) {
s = rose_neigh;
rose_neigh = rose_neigh->next;
rose_remove_neigh(s);
}
while (rose_node != NULL) {
t = rose_node;
rose_node = rose_node->next;
rose_remove_node(t);
}
while (rose_route != NULL) {
u = rose_route;
rose_route = rose_route->next;
rose_remove_route(u);
}
}
| gpl-2.0 |
mastero9017/Crystal | drivers/block/paride/ppc6lnx.c | 15571 | 14693 | /*
ppc6lnx.c (c) 2001 Micro Solutions Inc.
Released under the terms of the GNU General Public license
ppc6lnx.c is a par of the protocol driver for the Micro Solutions
"BACKPACK" parallel port IDE adapter
(Works on Series 6 drives)
*/
//***************************************************************************
// PPC 6 Code in C sanitized for LINUX
// Original x86 ASM by Ron, Converted to C by Clive
//***************************************************************************
#define port_stb 1
#define port_afd 2
#define cmd_stb port_afd
#define port_init 4
#define data_stb port_init
#define port_sel 8
#define port_int 16
#define port_dir 0x20
#define ECR_EPP 0x80
#define ECR_BI 0x20
//***************************************************************************
// 60772 Commands
#define ACCESS_REG 0x00
#define ACCESS_PORT 0x40
#define ACCESS_READ 0x00
#define ACCESS_WRITE 0x20
// 60772 Command Prefix
#define CMD_PREFIX_SET 0xe0 // Special command that modifies the next command's operation
#define CMD_PREFIX_RESET 0xc0 // Resets current cmd modifier reg bits
#define PREFIX_IO16 0x01 // perform 16-bit wide I/O
#define PREFIX_FASTWR 0x04 // enable PPC mode fast-write
#define PREFIX_BLK 0x08 // enable block transfer mode
// 60772 Registers
#define REG_STATUS 0x00 // status register
#define STATUS_IRQA 0x01 // Peripheral IRQA line
#define STATUS_EEPROM_DO 0x40 // Serial EEPROM data bit
#define REG_VERSION 0x01 // PPC version register (read)
#define REG_HWCFG 0x02 // Hardware Config register
#define REG_RAMSIZE 0x03 // Size of RAM Buffer
#define RAMSIZE_128K 0x02
#define REG_EEPROM 0x06 // EEPROM control register
#define EEPROM_SK 0x01 // eeprom SK bit
#define EEPROM_DI 0x02 // eeprom DI bit
#define EEPROM_CS 0x04 // eeprom CS bit
#define EEPROM_EN 0x08 // eeprom output enable
#define REG_BLKSIZE 0x08 // Block transfer len (24 bit)
//***************************************************************************
typedef struct ppc_storage {
u16 lpt_addr; // LPT base address
u8 ppc_id;
u8 mode; // operating mode
// 0 = PPC Uni SW
// 1 = PPC Uni FW
// 2 = PPC Bi SW
// 3 = PPC Bi FW
// 4 = EPP Byte
// 5 = EPP Word
// 6 = EPP Dword
u8 ppc_flags;
u8 org_data; // original LPT data port contents
u8 org_ctrl; // original LPT control port contents
u8 cur_ctrl; // current control port contents
} Interface;
//***************************************************************************
// ppc_flags
#define fifo_wait 0x10
//***************************************************************************
// DONT CHANGE THESE LEST YOU BREAK EVERYTHING - BIT FIELD DEPENDENCIES
#define PPCMODE_UNI_SW 0
#define PPCMODE_UNI_FW 1
#define PPCMODE_BI_SW 2
#define PPCMODE_BI_FW 3
#define PPCMODE_EPP_BYTE 4
#define PPCMODE_EPP_WORD 5
#define PPCMODE_EPP_DWORD 6
//***************************************************************************
static int ppc6_select(Interface *ppc);
static void ppc6_deselect(Interface *ppc);
static void ppc6_send_cmd(Interface *ppc, u8 cmd);
static void ppc6_wr_data_byte(Interface *ppc, u8 data);
static u8 ppc6_rd_data_byte(Interface *ppc);
static u8 ppc6_rd_port(Interface *ppc, u8 port);
static void ppc6_wr_port(Interface *ppc, u8 port, u8 data);
static void ppc6_rd_data_blk(Interface *ppc, u8 *data, long count);
static void ppc6_wait_for_fifo(Interface *ppc);
static void ppc6_wr_data_blk(Interface *ppc, u8 *data, long count);
static void ppc6_rd_port16_blk(Interface *ppc, u8 port, u8 *data, long length);
static void ppc6_wr_port16_blk(Interface *ppc, u8 port, u8 *data, long length);
static void ppc6_wr_extout(Interface *ppc, u8 regdata);
static int ppc6_open(Interface *ppc);
static void ppc6_close(Interface *ppc);
//***************************************************************************
static int ppc6_select(Interface *ppc)
{
u8 i, j, k;
i = inb(ppc->lpt_addr + 1);
if (i & 1)
outb(i, ppc->lpt_addr + 1);
ppc->org_data = inb(ppc->lpt_addr);
ppc->org_ctrl = inb(ppc->lpt_addr + 2) & 0x5F; // readback ctrl
ppc->cur_ctrl = ppc->org_ctrl;
ppc->cur_ctrl |= port_sel;
outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
if (ppc->org_data == 'b')
outb('x', ppc->lpt_addr);
outb('b', ppc->lpt_addr);
outb('p', ppc->lpt_addr);
outb(ppc->ppc_id, ppc->lpt_addr);
outb(~ppc->ppc_id,ppc->lpt_addr);
ppc->cur_ctrl &= ~port_sel;
outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
ppc->cur_ctrl = (ppc->cur_ctrl & port_int) | port_init;
outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
i = ppc->mode & 0x0C;
if (i == 0)
i = (ppc->mode & 2) | 1;
outb(i, ppc->lpt_addr);
ppc->cur_ctrl |= port_sel;
outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
// DELAY
ppc->cur_ctrl |= port_afd;
outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
j = ((i & 0x08) << 4) | ((i & 0x07) << 3);
k = inb(ppc->lpt_addr + 1) & 0xB8;
if (j == k)
{
ppc->cur_ctrl &= ~port_afd;
outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
k = (inb(ppc->lpt_addr + 1) & 0xB8) ^ 0xB8;
if (j == k)
{
if (i & 4) // EPP
ppc->cur_ctrl &= ~(port_sel | port_init);
else // PPC/ECP
ppc->cur_ctrl &= ~port_sel;
outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
return(1);
}
}
outb(ppc->org_ctrl, ppc->lpt_addr + 2);
outb(ppc->org_data, ppc->lpt_addr);
return(0); // FAIL
}
//***************************************************************************
static void ppc6_deselect(Interface *ppc)
{
if (ppc->mode & 4) // EPP
ppc->cur_ctrl |= port_init;
else // PPC/ECP
ppc->cur_ctrl |= port_sel;
outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
outb(ppc->org_data, ppc->lpt_addr);
outb((ppc->org_ctrl | port_sel), ppc->lpt_addr + 2);
outb(ppc->org_ctrl, ppc->lpt_addr + 2);
}
//***************************************************************************
static void ppc6_send_cmd(Interface *ppc, u8 cmd)
{
switch(ppc->mode)
{
case PPCMODE_UNI_SW :
case PPCMODE_UNI_FW :
case PPCMODE_BI_SW :
case PPCMODE_BI_FW :
{
outb(cmd, ppc->lpt_addr);
ppc->cur_ctrl ^= cmd_stb;
outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
break;
}
case PPCMODE_EPP_BYTE :
case PPCMODE_EPP_WORD :
case PPCMODE_EPP_DWORD :
{
outb(cmd, ppc->lpt_addr + 3);
break;
}
}
}
//***************************************************************************
static void ppc6_wr_data_byte(Interface *ppc, u8 data)
{
switch(ppc->mode)
{
case PPCMODE_UNI_SW :
case PPCMODE_UNI_FW :
case PPCMODE_BI_SW :
case PPCMODE_BI_FW :
{
outb(data, ppc->lpt_addr);
ppc->cur_ctrl ^= data_stb;
outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
break;
}
case PPCMODE_EPP_BYTE :
case PPCMODE_EPP_WORD :
case PPCMODE_EPP_DWORD :
{
outb(data, ppc->lpt_addr + 4);
break;
}
}
}
//***************************************************************************
static u8 ppc6_rd_data_byte(Interface *ppc)
{
u8 data = 0;
switch(ppc->mode)
{
case PPCMODE_UNI_SW :
case PPCMODE_UNI_FW :
{
ppc->cur_ctrl = (ppc->cur_ctrl & ~port_stb) ^ data_stb;
outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
// DELAY
data = inb(ppc->lpt_addr + 1);
data = ((data & 0x80) >> 1) | ((data & 0x38) >> 3);
ppc->cur_ctrl |= port_stb;
outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
// DELAY
data |= inb(ppc->lpt_addr + 1) & 0xB8;
break;
}
case PPCMODE_BI_SW :
case PPCMODE_BI_FW :
{
ppc->cur_ctrl |= port_dir;
outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
ppc->cur_ctrl = (ppc->cur_ctrl | port_stb) ^ data_stb;
outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
data = inb(ppc->lpt_addr);
ppc->cur_ctrl &= ~port_stb;
outb(ppc->cur_ctrl,ppc->lpt_addr + 2);
ppc->cur_ctrl &= ~port_dir;
outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
break;
}
case PPCMODE_EPP_BYTE :
case PPCMODE_EPP_WORD :
case PPCMODE_EPP_DWORD :
{
outb((ppc->cur_ctrl | port_dir),ppc->lpt_addr + 2);
data = inb(ppc->lpt_addr + 4);
outb(ppc->cur_ctrl,ppc->lpt_addr + 2);
break;
}
}
return(data);
}
//***************************************************************************
static u8 ppc6_rd_port(Interface *ppc, u8 port)
{
ppc6_send_cmd(ppc,(u8)(port | ACCESS_PORT | ACCESS_READ));
return(ppc6_rd_data_byte(ppc));
}
//***************************************************************************
static void ppc6_wr_port(Interface *ppc, u8 port, u8 data)
{
ppc6_send_cmd(ppc,(u8)(port | ACCESS_PORT | ACCESS_WRITE));
ppc6_wr_data_byte(ppc, data);
}
//***************************************************************************
static void ppc6_rd_data_blk(Interface *ppc, u8 *data, long count)
{
switch(ppc->mode)
{
case PPCMODE_UNI_SW :
case PPCMODE_UNI_FW :
{
while(count)
{
u8 d;
ppc->cur_ctrl = (ppc->cur_ctrl & ~port_stb) ^ data_stb;
outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
// DELAY
d = inb(ppc->lpt_addr + 1);
d = ((d & 0x80) >> 1) | ((d & 0x38) >> 3);
ppc->cur_ctrl |= port_stb;
outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
// DELAY
d |= inb(ppc->lpt_addr + 1) & 0xB8;
*data++ = d;
count--;
}
break;
}
case PPCMODE_BI_SW :
case PPCMODE_BI_FW :
{
ppc->cur_ctrl |= port_dir;
outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
ppc->cur_ctrl |= port_stb;
while(count)
{
ppc->cur_ctrl ^= data_stb;
outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
*data++ = inb(ppc->lpt_addr);
count--;
}
ppc->cur_ctrl &= ~port_stb;
outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
ppc->cur_ctrl &= ~port_dir;
outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
break;
}
case PPCMODE_EPP_BYTE :
{
outb((ppc->cur_ctrl | port_dir), ppc->lpt_addr + 2);
// DELAY
while(count)
{
*data++ = inb(ppc->lpt_addr + 4);
count--;
}
outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
break;
}
case PPCMODE_EPP_WORD :
{
outb((ppc->cur_ctrl | port_dir), ppc->lpt_addr + 2);
// DELAY
while(count > 1)
{
*((u16 *)data) = inw(ppc->lpt_addr + 4);
data += 2;
count -= 2;
}
while(count)
{
*data++ = inb(ppc->lpt_addr + 4);
count--;
}
outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
break;
}
case PPCMODE_EPP_DWORD :
{
outb((ppc->cur_ctrl | port_dir),ppc->lpt_addr + 2);
// DELAY
while(count > 3)
{
*((u32 *)data) = inl(ppc->lpt_addr + 4);
data += 4;
count -= 4;
}
while(count)
{
*data++ = inb(ppc->lpt_addr + 4);
count--;
}
outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
break;
}
}
}
//***************************************************************************
static void ppc6_wait_for_fifo(Interface *ppc)
{
int i;
if (ppc->ppc_flags & fifo_wait)
{
for(i=0; i<20; i++)
inb(ppc->lpt_addr + 1);
}
}
//***************************************************************************
static void ppc6_wr_data_blk(Interface *ppc, u8 *data, long count)
{
switch(ppc->mode)
{
case PPCMODE_UNI_SW :
case PPCMODE_BI_SW :
{
while(count--)
{
outb(*data++, ppc->lpt_addr);
ppc->cur_ctrl ^= data_stb;
outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
}
break;
}
case PPCMODE_UNI_FW :
case PPCMODE_BI_FW :
{
u8 this, last;
ppc6_send_cmd(ppc,(CMD_PREFIX_SET | PREFIX_FASTWR));
ppc->cur_ctrl |= port_stb;
outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
last = *data;
outb(last, ppc->lpt_addr);
while(count)
{
this = *data++;
count--;
if (this == last)
{
ppc->cur_ctrl ^= data_stb;
outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
}
else
{
outb(this, ppc->lpt_addr);
last = this;
}
}
ppc->cur_ctrl &= ~port_stb;
outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
ppc6_send_cmd(ppc,(CMD_PREFIX_RESET | PREFIX_FASTWR));
break;
}
case PPCMODE_EPP_BYTE :
{
while(count)
{
outb(*data++,ppc->lpt_addr + 4);
count--;
}
ppc6_wait_for_fifo(ppc);
break;
}
case PPCMODE_EPP_WORD :
{
while(count > 1)
{
outw(*((u16 *)data),ppc->lpt_addr + 4);
data += 2;
count -= 2;
}
while(count)
{
outb(*data++,ppc->lpt_addr + 4);
count--;
}
ppc6_wait_for_fifo(ppc);
break;
}
case PPCMODE_EPP_DWORD :
{
while(count > 3)
{
outl(*((u32 *)data),ppc->lpt_addr + 4);
data += 4;
count -= 4;
}
while(count)
{
outb(*data++,ppc->lpt_addr + 4);
count--;
}
ppc6_wait_for_fifo(ppc);
break;
}
}
}
//***************************************************************************
static void ppc6_rd_port16_blk(Interface *ppc, u8 port, u8 *data, long length)
{
length = length << 1;
ppc6_send_cmd(ppc, (REG_BLKSIZE | ACCESS_REG | ACCESS_WRITE));
ppc6_wr_data_byte(ppc,(u8)length);
ppc6_wr_data_byte(ppc,(u8)(length >> 8));
ppc6_wr_data_byte(ppc,0);
ppc6_send_cmd(ppc, (CMD_PREFIX_SET | PREFIX_IO16 | PREFIX_BLK));
ppc6_send_cmd(ppc, (u8)(port | ACCESS_PORT | ACCESS_READ));
ppc6_rd_data_blk(ppc, data, length);
ppc6_send_cmd(ppc, (CMD_PREFIX_RESET | PREFIX_IO16 | PREFIX_BLK));
}
//***************************************************************************
static void ppc6_wr_port16_blk(Interface *ppc, u8 port, u8 *data, long length)
{
length = length << 1;
ppc6_send_cmd(ppc, (REG_BLKSIZE | ACCESS_REG | ACCESS_WRITE));
ppc6_wr_data_byte(ppc,(u8)length);
ppc6_wr_data_byte(ppc,(u8)(length >> 8));
ppc6_wr_data_byte(ppc,0);
ppc6_send_cmd(ppc, (CMD_PREFIX_SET | PREFIX_IO16 | PREFIX_BLK));
ppc6_send_cmd(ppc, (u8)(port | ACCESS_PORT | ACCESS_WRITE));
ppc6_wr_data_blk(ppc, data, length);
ppc6_send_cmd(ppc, (CMD_PREFIX_RESET | PREFIX_IO16 | PREFIX_BLK));
}
//***************************************************************************
static void ppc6_wr_extout(Interface *ppc, u8 regdata)
{
ppc6_send_cmd(ppc,(REG_VERSION | ACCESS_REG | ACCESS_WRITE));
ppc6_wr_data_byte(ppc, (u8)((regdata & 0x03) << 6));
}
//***************************************************************************
static int ppc6_open(Interface *ppc)
{
int ret;
ret = ppc6_select(ppc);
if (ret == 0)
return(ret);
ppc->ppc_flags &= ~fifo_wait;
ppc6_send_cmd(ppc, (ACCESS_REG | ACCESS_WRITE | REG_RAMSIZE));
ppc6_wr_data_byte(ppc, RAMSIZE_128K);
ppc6_send_cmd(ppc, (ACCESS_REG | ACCESS_READ | REG_VERSION));
if ((ppc6_rd_data_byte(ppc) & 0x3F) == 0x0C)
ppc->ppc_flags |= fifo_wait;
return(ret);
}
//***************************************************************************
static void ppc6_close(Interface *ppc)
{
ppc6_deselect(ppc);
}
//***************************************************************************
| gpl-2.0 |
NETFORCE2/linux | drivers/block/paride/ppc6lnx.c | 15571 | 14693 | /*
ppc6lnx.c (c) 2001 Micro Solutions Inc.
Released under the terms of the GNU General Public license
ppc6lnx.c is a par of the protocol driver for the Micro Solutions
"BACKPACK" parallel port IDE adapter
(Works on Series 6 drives)
*/
//***************************************************************************
// PPC 6 Code in C sanitized for LINUX
// Original x86 ASM by Ron, Converted to C by Clive
//***************************************************************************
#define port_stb 1
#define port_afd 2
#define cmd_stb port_afd
#define port_init 4
#define data_stb port_init
#define port_sel 8
#define port_int 16
#define port_dir 0x20
#define ECR_EPP 0x80
#define ECR_BI 0x20
//***************************************************************************
// 60772 Commands
#define ACCESS_REG 0x00
#define ACCESS_PORT 0x40
#define ACCESS_READ 0x00
#define ACCESS_WRITE 0x20
// 60772 Command Prefix
#define CMD_PREFIX_SET 0xe0 // Special command that modifies the next command's operation
#define CMD_PREFIX_RESET 0xc0 // Resets current cmd modifier reg bits
#define PREFIX_IO16 0x01 // perform 16-bit wide I/O
#define PREFIX_FASTWR 0x04 // enable PPC mode fast-write
#define PREFIX_BLK 0x08 // enable block transfer mode
// 60772 Registers
#define REG_STATUS 0x00 // status register
#define STATUS_IRQA 0x01 // Peripheral IRQA line
#define STATUS_EEPROM_DO 0x40 // Serial EEPROM data bit
#define REG_VERSION 0x01 // PPC version register (read)
#define REG_HWCFG 0x02 // Hardware Config register
#define REG_RAMSIZE 0x03 // Size of RAM Buffer
#define RAMSIZE_128K 0x02
#define REG_EEPROM 0x06 // EEPROM control register
#define EEPROM_SK 0x01 // eeprom SK bit
#define EEPROM_DI 0x02 // eeprom DI bit
#define EEPROM_CS 0x04 // eeprom CS bit
#define EEPROM_EN 0x08 // eeprom output enable
#define REG_BLKSIZE 0x08 // Block transfer len (24 bit)
//***************************************************************************
typedef struct ppc_storage {
u16 lpt_addr; // LPT base address
u8 ppc_id;
u8 mode; // operating mode
// 0 = PPC Uni SW
// 1 = PPC Uni FW
// 2 = PPC Bi SW
// 3 = PPC Bi FW
// 4 = EPP Byte
// 5 = EPP Word
// 6 = EPP Dword
u8 ppc_flags;
u8 org_data; // original LPT data port contents
u8 org_ctrl; // original LPT control port contents
u8 cur_ctrl; // current control port contents
} Interface;
//***************************************************************************
// ppc_flags
#define fifo_wait 0x10
//***************************************************************************
// DONT CHANGE THESE LEST YOU BREAK EVERYTHING - BIT FIELD DEPENDENCIES
#define PPCMODE_UNI_SW 0
#define PPCMODE_UNI_FW 1
#define PPCMODE_BI_SW 2
#define PPCMODE_BI_FW 3
#define PPCMODE_EPP_BYTE 4
#define PPCMODE_EPP_WORD 5
#define PPCMODE_EPP_DWORD 6
//***************************************************************************
static int ppc6_select(Interface *ppc);
static void ppc6_deselect(Interface *ppc);
static void ppc6_send_cmd(Interface *ppc, u8 cmd);
static void ppc6_wr_data_byte(Interface *ppc, u8 data);
static u8 ppc6_rd_data_byte(Interface *ppc);
static u8 ppc6_rd_port(Interface *ppc, u8 port);
static void ppc6_wr_port(Interface *ppc, u8 port, u8 data);
static void ppc6_rd_data_blk(Interface *ppc, u8 *data, long count);
static void ppc6_wait_for_fifo(Interface *ppc);
static void ppc6_wr_data_blk(Interface *ppc, u8 *data, long count);
static void ppc6_rd_port16_blk(Interface *ppc, u8 port, u8 *data, long length);
static void ppc6_wr_port16_blk(Interface *ppc, u8 port, u8 *data, long length);
static void ppc6_wr_extout(Interface *ppc, u8 regdata);
static int ppc6_open(Interface *ppc);
static void ppc6_close(Interface *ppc);
//***************************************************************************
static int ppc6_select(Interface *ppc)
{
u8 i, j, k;
i = inb(ppc->lpt_addr + 1);
if (i & 1)
outb(i, ppc->lpt_addr + 1);
ppc->org_data = inb(ppc->lpt_addr);
ppc->org_ctrl = inb(ppc->lpt_addr + 2) & 0x5F; // readback ctrl
ppc->cur_ctrl = ppc->org_ctrl;
ppc->cur_ctrl |= port_sel;
outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
if (ppc->org_data == 'b')
outb('x', ppc->lpt_addr);
outb('b', ppc->lpt_addr);
outb('p', ppc->lpt_addr);
outb(ppc->ppc_id, ppc->lpt_addr);
outb(~ppc->ppc_id,ppc->lpt_addr);
ppc->cur_ctrl &= ~port_sel;
outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
ppc->cur_ctrl = (ppc->cur_ctrl & port_int) | port_init;
outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
i = ppc->mode & 0x0C;
if (i == 0)
i = (ppc->mode & 2) | 1;
outb(i, ppc->lpt_addr);
ppc->cur_ctrl |= port_sel;
outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
// DELAY
ppc->cur_ctrl |= port_afd;
outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
j = ((i & 0x08) << 4) | ((i & 0x07) << 3);
k = inb(ppc->lpt_addr + 1) & 0xB8;
if (j == k)
{
ppc->cur_ctrl &= ~port_afd;
outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
k = (inb(ppc->lpt_addr + 1) & 0xB8) ^ 0xB8;
if (j == k)
{
if (i & 4) // EPP
ppc->cur_ctrl &= ~(port_sel | port_init);
else // PPC/ECP
ppc->cur_ctrl &= ~port_sel;
outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
return(1);
}
}
outb(ppc->org_ctrl, ppc->lpt_addr + 2);
outb(ppc->org_data, ppc->lpt_addr);
return(0); // FAIL
}
//***************************************************************************
static void ppc6_deselect(Interface *ppc)
{
if (ppc->mode & 4) // EPP
ppc->cur_ctrl |= port_init;
else // PPC/ECP
ppc->cur_ctrl |= port_sel;
outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
outb(ppc->org_data, ppc->lpt_addr);
outb((ppc->org_ctrl | port_sel), ppc->lpt_addr + 2);
outb(ppc->org_ctrl, ppc->lpt_addr + 2);
}
//***************************************************************************
static void ppc6_send_cmd(Interface *ppc, u8 cmd)
{
switch(ppc->mode)
{
case PPCMODE_UNI_SW :
case PPCMODE_UNI_FW :
case PPCMODE_BI_SW :
case PPCMODE_BI_FW :
{
outb(cmd, ppc->lpt_addr);
ppc->cur_ctrl ^= cmd_stb;
outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
break;
}
case PPCMODE_EPP_BYTE :
case PPCMODE_EPP_WORD :
case PPCMODE_EPP_DWORD :
{
outb(cmd, ppc->lpt_addr + 3);
break;
}
}
}
//***************************************************************************
static void ppc6_wr_data_byte(Interface *ppc, u8 data)
{
switch(ppc->mode)
{
case PPCMODE_UNI_SW :
case PPCMODE_UNI_FW :
case PPCMODE_BI_SW :
case PPCMODE_BI_FW :
{
outb(data, ppc->lpt_addr);
ppc->cur_ctrl ^= data_stb;
outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
break;
}
case PPCMODE_EPP_BYTE :
case PPCMODE_EPP_WORD :
case PPCMODE_EPP_DWORD :
{
outb(data, ppc->lpt_addr + 4);
break;
}
}
}
//***************************************************************************
static u8 ppc6_rd_data_byte(Interface *ppc)
{
u8 data = 0;
switch(ppc->mode)
{
case PPCMODE_UNI_SW :
case PPCMODE_UNI_FW :
{
ppc->cur_ctrl = (ppc->cur_ctrl & ~port_stb) ^ data_stb;
outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
// DELAY
data = inb(ppc->lpt_addr + 1);
data = ((data & 0x80) >> 1) | ((data & 0x38) >> 3);
ppc->cur_ctrl |= port_stb;
outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
// DELAY
data |= inb(ppc->lpt_addr + 1) & 0xB8;
break;
}
case PPCMODE_BI_SW :
case PPCMODE_BI_FW :
{
ppc->cur_ctrl |= port_dir;
outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
ppc->cur_ctrl = (ppc->cur_ctrl | port_stb) ^ data_stb;
outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
data = inb(ppc->lpt_addr);
ppc->cur_ctrl &= ~port_stb;
outb(ppc->cur_ctrl,ppc->lpt_addr + 2);
ppc->cur_ctrl &= ~port_dir;
outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
break;
}
case PPCMODE_EPP_BYTE :
case PPCMODE_EPP_WORD :
case PPCMODE_EPP_DWORD :
{
outb((ppc->cur_ctrl | port_dir),ppc->lpt_addr + 2);
data = inb(ppc->lpt_addr + 4);
outb(ppc->cur_ctrl,ppc->lpt_addr + 2);
break;
}
}
return(data);
}
//***************************************************************************
static u8 ppc6_rd_port(Interface *ppc, u8 port)
{
ppc6_send_cmd(ppc,(u8)(port | ACCESS_PORT | ACCESS_READ));
return(ppc6_rd_data_byte(ppc));
}
//***************************************************************************
static void ppc6_wr_port(Interface *ppc, u8 port, u8 data)
{
ppc6_send_cmd(ppc,(u8)(port | ACCESS_PORT | ACCESS_WRITE));
ppc6_wr_data_byte(ppc, data);
}
//***************************************************************************
static void ppc6_rd_data_blk(Interface *ppc, u8 *data, long count)
{
switch(ppc->mode)
{
case PPCMODE_UNI_SW :
case PPCMODE_UNI_FW :
{
while(count)
{
u8 d;
ppc->cur_ctrl = (ppc->cur_ctrl & ~port_stb) ^ data_stb;
outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
// DELAY
d = inb(ppc->lpt_addr + 1);
d = ((d & 0x80) >> 1) | ((d & 0x38) >> 3);
ppc->cur_ctrl |= port_stb;
outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
// DELAY
d |= inb(ppc->lpt_addr + 1) & 0xB8;
*data++ = d;
count--;
}
break;
}
case PPCMODE_BI_SW :
case PPCMODE_BI_FW :
{
ppc->cur_ctrl |= port_dir;
outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
ppc->cur_ctrl |= port_stb;
while(count)
{
ppc->cur_ctrl ^= data_stb;
outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
*data++ = inb(ppc->lpt_addr);
count--;
}
ppc->cur_ctrl &= ~port_stb;
outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
ppc->cur_ctrl &= ~port_dir;
outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
break;
}
case PPCMODE_EPP_BYTE :
{
outb((ppc->cur_ctrl | port_dir), ppc->lpt_addr + 2);
// DELAY
while(count)
{
*data++ = inb(ppc->lpt_addr + 4);
count--;
}
outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
break;
}
case PPCMODE_EPP_WORD :
{
outb((ppc->cur_ctrl | port_dir), ppc->lpt_addr + 2);
// DELAY
while(count > 1)
{
*((u16 *)data) = inw(ppc->lpt_addr + 4);
data += 2;
count -= 2;
}
while(count)
{
*data++ = inb(ppc->lpt_addr + 4);
count--;
}
outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
break;
}
case PPCMODE_EPP_DWORD :
{
outb((ppc->cur_ctrl | port_dir),ppc->lpt_addr + 2);
// DELAY
while(count > 3)
{
*((u32 *)data) = inl(ppc->lpt_addr + 4);
data += 4;
count -= 4;
}
while(count)
{
*data++ = inb(ppc->lpt_addr + 4);
count--;
}
outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
break;
}
}
}
//***************************************************************************
static void ppc6_wait_for_fifo(Interface *ppc)
{
int i;
if (ppc->ppc_flags & fifo_wait)
{
for(i=0; i<20; i++)
inb(ppc->lpt_addr + 1);
}
}
//***************************************************************************
static void ppc6_wr_data_blk(Interface *ppc, u8 *data, long count)
{
switch(ppc->mode)
{
case PPCMODE_UNI_SW :
case PPCMODE_BI_SW :
{
while(count--)
{
outb(*data++, ppc->lpt_addr);
ppc->cur_ctrl ^= data_stb;
outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
}
break;
}
case PPCMODE_UNI_FW :
case PPCMODE_BI_FW :
{
u8 this, last;
ppc6_send_cmd(ppc,(CMD_PREFIX_SET | PREFIX_FASTWR));
ppc->cur_ctrl |= port_stb;
outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
last = *data;
outb(last, ppc->lpt_addr);
while(count)
{
this = *data++;
count--;
if (this == last)
{
ppc->cur_ctrl ^= data_stb;
outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
}
else
{
outb(this, ppc->lpt_addr);
last = this;
}
}
ppc->cur_ctrl &= ~port_stb;
outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
ppc6_send_cmd(ppc,(CMD_PREFIX_RESET | PREFIX_FASTWR));
break;
}
case PPCMODE_EPP_BYTE :
{
while(count)
{
outb(*data++,ppc->lpt_addr + 4);
count--;
}
ppc6_wait_for_fifo(ppc);
break;
}
case PPCMODE_EPP_WORD :
{
while(count > 1)
{
outw(*((u16 *)data),ppc->lpt_addr + 4);
data += 2;
count -= 2;
}
while(count)
{
outb(*data++,ppc->lpt_addr + 4);
count--;
}
ppc6_wait_for_fifo(ppc);
break;
}
case PPCMODE_EPP_DWORD :
{
while(count > 3)
{
outl(*((u32 *)data),ppc->lpt_addr + 4);
data += 4;
count -= 4;
}
while(count)
{
outb(*data++,ppc->lpt_addr + 4);
count--;
}
ppc6_wait_for_fifo(ppc);
break;
}
}
}
//***************************************************************************
static void ppc6_rd_port16_blk(Interface *ppc, u8 port, u8 *data, long length)
{
length = length << 1;
ppc6_send_cmd(ppc, (REG_BLKSIZE | ACCESS_REG | ACCESS_WRITE));
ppc6_wr_data_byte(ppc,(u8)length);
ppc6_wr_data_byte(ppc,(u8)(length >> 8));
ppc6_wr_data_byte(ppc,0);
ppc6_send_cmd(ppc, (CMD_PREFIX_SET | PREFIX_IO16 | PREFIX_BLK));
ppc6_send_cmd(ppc, (u8)(port | ACCESS_PORT | ACCESS_READ));
ppc6_rd_data_blk(ppc, data, length);
ppc6_send_cmd(ppc, (CMD_PREFIX_RESET | PREFIX_IO16 | PREFIX_BLK));
}
//***************************************************************************
static void ppc6_wr_port16_blk(Interface *ppc, u8 port, u8 *data, long length)
{
length = length << 1;
ppc6_send_cmd(ppc, (REG_BLKSIZE | ACCESS_REG | ACCESS_WRITE));
ppc6_wr_data_byte(ppc,(u8)length);
ppc6_wr_data_byte(ppc,(u8)(length >> 8));
ppc6_wr_data_byte(ppc,0);
ppc6_send_cmd(ppc, (CMD_PREFIX_SET | PREFIX_IO16 | PREFIX_BLK));
ppc6_send_cmd(ppc, (u8)(port | ACCESS_PORT | ACCESS_WRITE));
ppc6_wr_data_blk(ppc, data, length);
ppc6_send_cmd(ppc, (CMD_PREFIX_RESET | PREFIX_IO16 | PREFIX_BLK));
}
//***************************************************************************
static void ppc6_wr_extout(Interface *ppc, u8 regdata)
{
ppc6_send_cmd(ppc,(REG_VERSION | ACCESS_REG | ACCESS_WRITE));
ppc6_wr_data_byte(ppc, (u8)((regdata & 0x03) << 6));
}
//***************************************************************************
static int ppc6_open(Interface *ppc)
{
int ret;
ret = ppc6_select(ppc);
if (ret == 0)
return(ret);
ppc->ppc_flags &= ~fifo_wait;
ppc6_send_cmd(ppc, (ACCESS_REG | ACCESS_WRITE | REG_RAMSIZE));
ppc6_wr_data_byte(ppc, RAMSIZE_128K);
ppc6_send_cmd(ppc, (ACCESS_REG | ACCESS_READ | REG_VERSION));
if ((ppc6_rd_data_byte(ppc) & 0x3F) == 0x0C)
ppc->ppc_flags |= fifo_wait;
return(ret);
}
//***************************************************************************
static void ppc6_close(Interface *ppc)
{
ppc6_deselect(ppc);
}
//***************************************************************************
| gpl-2.0 |
jpabferreira/linux-pcsws | drivers/block/paride/ppc6lnx.c | 15571 | 14693 | /*
ppc6lnx.c (c) 2001 Micro Solutions Inc.
Released under the terms of the GNU General Public license
ppc6lnx.c is a par of the protocol driver for the Micro Solutions
"BACKPACK" parallel port IDE adapter
(Works on Series 6 drives)
*/
//***************************************************************************
// PPC 6 Code in C sanitized for LINUX
// Original x86 ASM by Ron, Converted to C by Clive
//***************************************************************************
#define port_stb 1
#define port_afd 2
#define cmd_stb port_afd
#define port_init 4
#define data_stb port_init
#define port_sel 8
#define port_int 16
#define port_dir 0x20
#define ECR_EPP 0x80
#define ECR_BI 0x20
//***************************************************************************
// 60772 Commands
#define ACCESS_REG 0x00
#define ACCESS_PORT 0x40
#define ACCESS_READ 0x00
#define ACCESS_WRITE 0x20
// 60772 Command Prefix
#define CMD_PREFIX_SET 0xe0 // Special command that modifies the next command's operation
#define CMD_PREFIX_RESET 0xc0 // Resets current cmd modifier reg bits
#define PREFIX_IO16 0x01 // perform 16-bit wide I/O
#define PREFIX_FASTWR 0x04 // enable PPC mode fast-write
#define PREFIX_BLK 0x08 // enable block transfer mode
// 60772 Registers
#define REG_STATUS 0x00 // status register
#define STATUS_IRQA 0x01 // Peripheral IRQA line
#define STATUS_EEPROM_DO 0x40 // Serial EEPROM data bit
#define REG_VERSION 0x01 // PPC version register (read)
#define REG_HWCFG 0x02 // Hardware Config register
#define REG_RAMSIZE 0x03 // Size of RAM Buffer
#define RAMSIZE_128K 0x02
#define REG_EEPROM 0x06 // EEPROM control register
#define EEPROM_SK 0x01 // eeprom SK bit
#define EEPROM_DI 0x02 // eeprom DI bit
#define EEPROM_CS 0x04 // eeprom CS bit
#define EEPROM_EN 0x08 // eeprom output enable
#define REG_BLKSIZE 0x08 // Block transfer len (24 bit)
//***************************************************************************
typedef struct ppc_storage {
u16 lpt_addr; // LPT base address
u8 ppc_id;
u8 mode; // operating mode
// 0 = PPC Uni SW
// 1 = PPC Uni FW
// 2 = PPC Bi SW
// 3 = PPC Bi FW
// 4 = EPP Byte
// 5 = EPP Word
// 6 = EPP Dword
u8 ppc_flags;
u8 org_data; // original LPT data port contents
u8 org_ctrl; // original LPT control port contents
u8 cur_ctrl; // current control port contents
} Interface;
//***************************************************************************
// ppc_flags
#define fifo_wait 0x10
//***************************************************************************
// DONT CHANGE THESE LEST YOU BREAK EVERYTHING - BIT FIELD DEPENDENCIES
#define PPCMODE_UNI_SW 0
#define PPCMODE_UNI_FW 1
#define PPCMODE_BI_SW 2
#define PPCMODE_BI_FW 3
#define PPCMODE_EPP_BYTE 4
#define PPCMODE_EPP_WORD 5
#define PPCMODE_EPP_DWORD 6
//***************************************************************************
static int ppc6_select(Interface *ppc);
static void ppc6_deselect(Interface *ppc);
static void ppc6_send_cmd(Interface *ppc, u8 cmd);
static void ppc6_wr_data_byte(Interface *ppc, u8 data);
static u8 ppc6_rd_data_byte(Interface *ppc);
static u8 ppc6_rd_port(Interface *ppc, u8 port);
static void ppc6_wr_port(Interface *ppc, u8 port, u8 data);
static void ppc6_rd_data_blk(Interface *ppc, u8 *data, long count);
static void ppc6_wait_for_fifo(Interface *ppc);
static void ppc6_wr_data_blk(Interface *ppc, u8 *data, long count);
static void ppc6_rd_port16_blk(Interface *ppc, u8 port, u8 *data, long length);
static void ppc6_wr_port16_blk(Interface *ppc, u8 port, u8 *data, long length);
static void ppc6_wr_extout(Interface *ppc, u8 regdata);
static int ppc6_open(Interface *ppc);
static void ppc6_close(Interface *ppc);
//***************************************************************************
static int ppc6_select(Interface *ppc)
{
u8 i, j, k;
i = inb(ppc->lpt_addr + 1);
if (i & 1)
outb(i, ppc->lpt_addr + 1);
ppc->org_data = inb(ppc->lpt_addr);
ppc->org_ctrl = inb(ppc->lpt_addr + 2) & 0x5F; // readback ctrl
ppc->cur_ctrl = ppc->org_ctrl;
ppc->cur_ctrl |= port_sel;
outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
if (ppc->org_data == 'b')
outb('x', ppc->lpt_addr);
outb('b', ppc->lpt_addr);
outb('p', ppc->lpt_addr);
outb(ppc->ppc_id, ppc->lpt_addr);
outb(~ppc->ppc_id,ppc->lpt_addr);
ppc->cur_ctrl &= ~port_sel;
outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
ppc->cur_ctrl = (ppc->cur_ctrl & port_int) | port_init;
outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
i = ppc->mode & 0x0C;
if (i == 0)
i = (ppc->mode & 2) | 1;
outb(i, ppc->lpt_addr);
ppc->cur_ctrl |= port_sel;
outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
// DELAY
ppc->cur_ctrl |= port_afd;
outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
j = ((i & 0x08) << 4) | ((i & 0x07) << 3);
k = inb(ppc->lpt_addr + 1) & 0xB8;
if (j == k)
{
ppc->cur_ctrl &= ~port_afd;
outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
k = (inb(ppc->lpt_addr + 1) & 0xB8) ^ 0xB8;
if (j == k)
{
if (i & 4) // EPP
ppc->cur_ctrl &= ~(port_sel | port_init);
else // PPC/ECP
ppc->cur_ctrl &= ~port_sel;
outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
return(1);
}
}
outb(ppc->org_ctrl, ppc->lpt_addr + 2);
outb(ppc->org_data, ppc->lpt_addr);
return(0); // FAIL
}
//***************************************************************************
static void ppc6_deselect(Interface *ppc)
{
if (ppc->mode & 4) // EPP
ppc->cur_ctrl |= port_init;
else // PPC/ECP
ppc->cur_ctrl |= port_sel;
outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
outb(ppc->org_data, ppc->lpt_addr);
outb((ppc->org_ctrl | port_sel), ppc->lpt_addr + 2);
outb(ppc->org_ctrl, ppc->lpt_addr + 2);
}
//***************************************************************************
static void ppc6_send_cmd(Interface *ppc, u8 cmd)
{
switch(ppc->mode)
{
case PPCMODE_UNI_SW :
case PPCMODE_UNI_FW :
case PPCMODE_BI_SW :
case PPCMODE_BI_FW :
{
outb(cmd, ppc->lpt_addr);
ppc->cur_ctrl ^= cmd_stb;
outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
break;
}
case PPCMODE_EPP_BYTE :
case PPCMODE_EPP_WORD :
case PPCMODE_EPP_DWORD :
{
outb(cmd, ppc->lpt_addr + 3);
break;
}
}
}
//***************************************************************************
static void ppc6_wr_data_byte(Interface *ppc, u8 data)
{
switch(ppc->mode)
{
case PPCMODE_UNI_SW :
case PPCMODE_UNI_FW :
case PPCMODE_BI_SW :
case PPCMODE_BI_FW :
{
outb(data, ppc->lpt_addr);
ppc->cur_ctrl ^= data_stb;
outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
break;
}
case PPCMODE_EPP_BYTE :
case PPCMODE_EPP_WORD :
case PPCMODE_EPP_DWORD :
{
outb(data, ppc->lpt_addr + 4);
break;
}
}
}
//***************************************************************************
static u8 ppc6_rd_data_byte(Interface *ppc)
{
u8 data = 0;
switch(ppc->mode)
{
case PPCMODE_UNI_SW :
case PPCMODE_UNI_FW :
{
ppc->cur_ctrl = (ppc->cur_ctrl & ~port_stb) ^ data_stb;
outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
// DELAY
data = inb(ppc->lpt_addr + 1);
data = ((data & 0x80) >> 1) | ((data & 0x38) >> 3);
ppc->cur_ctrl |= port_stb;
outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
// DELAY
data |= inb(ppc->lpt_addr + 1) & 0xB8;
break;
}
case PPCMODE_BI_SW :
case PPCMODE_BI_FW :
{
ppc->cur_ctrl |= port_dir;
outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
ppc->cur_ctrl = (ppc->cur_ctrl | port_stb) ^ data_stb;
outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
data = inb(ppc->lpt_addr);
ppc->cur_ctrl &= ~port_stb;
outb(ppc->cur_ctrl,ppc->lpt_addr + 2);
ppc->cur_ctrl &= ~port_dir;
outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
break;
}
case PPCMODE_EPP_BYTE :
case PPCMODE_EPP_WORD :
case PPCMODE_EPP_DWORD :
{
outb((ppc->cur_ctrl | port_dir),ppc->lpt_addr + 2);
data = inb(ppc->lpt_addr + 4);
outb(ppc->cur_ctrl,ppc->lpt_addr + 2);
break;
}
}
return(data);
}
//***************************************************************************
static u8 ppc6_rd_port(Interface *ppc, u8 port)
{
ppc6_send_cmd(ppc,(u8)(port | ACCESS_PORT | ACCESS_READ));
return(ppc6_rd_data_byte(ppc));
}
//***************************************************************************
static void ppc6_wr_port(Interface *ppc, u8 port, u8 data)
{
ppc6_send_cmd(ppc,(u8)(port | ACCESS_PORT | ACCESS_WRITE));
ppc6_wr_data_byte(ppc, data);
}
//***************************************************************************
static void ppc6_rd_data_blk(Interface *ppc, u8 *data, long count)
{
switch(ppc->mode)
{
case PPCMODE_UNI_SW :
case PPCMODE_UNI_FW :
{
while(count)
{
u8 d;
ppc->cur_ctrl = (ppc->cur_ctrl & ~port_stb) ^ data_stb;
outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
// DELAY
d = inb(ppc->lpt_addr + 1);
d = ((d & 0x80) >> 1) | ((d & 0x38) >> 3);
ppc->cur_ctrl |= port_stb;
outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
// DELAY
d |= inb(ppc->lpt_addr + 1) & 0xB8;
*data++ = d;
count--;
}
break;
}
case PPCMODE_BI_SW :
case PPCMODE_BI_FW :
{
ppc->cur_ctrl |= port_dir;
outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
ppc->cur_ctrl |= port_stb;
while(count)
{
ppc->cur_ctrl ^= data_stb;
outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
*data++ = inb(ppc->lpt_addr);
count--;
}
ppc->cur_ctrl &= ~port_stb;
outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
ppc->cur_ctrl &= ~port_dir;
outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
break;
}
case PPCMODE_EPP_BYTE :
{
outb((ppc->cur_ctrl | port_dir), ppc->lpt_addr + 2);
// DELAY
while(count)
{
*data++ = inb(ppc->lpt_addr + 4);
count--;
}
outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
break;
}
case PPCMODE_EPP_WORD :
{
outb((ppc->cur_ctrl | port_dir), ppc->lpt_addr + 2);
// DELAY
while(count > 1)
{
*((u16 *)data) = inw(ppc->lpt_addr + 4);
data += 2;
count -= 2;
}
while(count)
{
*data++ = inb(ppc->lpt_addr + 4);
count--;
}
outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
break;
}
case PPCMODE_EPP_DWORD :
{
outb((ppc->cur_ctrl | port_dir),ppc->lpt_addr + 2);
// DELAY
while(count > 3)
{
*((u32 *)data) = inl(ppc->lpt_addr + 4);
data += 4;
count -= 4;
}
while(count)
{
*data++ = inb(ppc->lpt_addr + 4);
count--;
}
outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
break;
}
}
}
//***************************************************************************
static void ppc6_wait_for_fifo(Interface *ppc)
{
int i;
if (ppc->ppc_flags & fifo_wait)
{
for(i=0; i<20; i++)
inb(ppc->lpt_addr + 1);
}
}
//***************************************************************************
static void ppc6_wr_data_blk(Interface *ppc, u8 *data, long count)
{
switch(ppc->mode)
{
case PPCMODE_UNI_SW :
case PPCMODE_BI_SW :
{
while(count--)
{
outb(*data++, ppc->lpt_addr);
ppc->cur_ctrl ^= data_stb;
outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
}
break;
}
case PPCMODE_UNI_FW :
case PPCMODE_BI_FW :
{
u8 this, last;
ppc6_send_cmd(ppc,(CMD_PREFIX_SET | PREFIX_FASTWR));
ppc->cur_ctrl |= port_stb;
outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
last = *data;
outb(last, ppc->lpt_addr);
while(count)
{
this = *data++;
count--;
if (this == last)
{
ppc->cur_ctrl ^= data_stb;
outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
}
else
{
outb(this, ppc->lpt_addr);
last = this;
}
}
ppc->cur_ctrl &= ~port_stb;
outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
ppc6_send_cmd(ppc,(CMD_PREFIX_RESET | PREFIX_FASTWR));
break;
}
case PPCMODE_EPP_BYTE :
{
while(count)
{
outb(*data++,ppc->lpt_addr + 4);
count--;
}
ppc6_wait_for_fifo(ppc);
break;
}
case PPCMODE_EPP_WORD :
{
while(count > 1)
{
outw(*((u16 *)data),ppc->lpt_addr + 4);
data += 2;
count -= 2;
}
while(count)
{
outb(*data++,ppc->lpt_addr + 4);
count--;
}
ppc6_wait_for_fifo(ppc);
break;
}
case PPCMODE_EPP_DWORD :
{
while(count > 3)
{
outl(*((u32 *)data),ppc->lpt_addr + 4);
data += 4;
count -= 4;
}
while(count)
{
outb(*data++,ppc->lpt_addr + 4);
count--;
}
ppc6_wait_for_fifo(ppc);
break;
}
}
}
//***************************************************************************
static void ppc6_rd_port16_blk(Interface *ppc, u8 port, u8 *data, long length)
{
length = length << 1;
ppc6_send_cmd(ppc, (REG_BLKSIZE | ACCESS_REG | ACCESS_WRITE));
ppc6_wr_data_byte(ppc,(u8)length);
ppc6_wr_data_byte(ppc,(u8)(length >> 8));
ppc6_wr_data_byte(ppc,0);
ppc6_send_cmd(ppc, (CMD_PREFIX_SET | PREFIX_IO16 | PREFIX_BLK));
ppc6_send_cmd(ppc, (u8)(port | ACCESS_PORT | ACCESS_READ));
ppc6_rd_data_blk(ppc, data, length);
ppc6_send_cmd(ppc, (CMD_PREFIX_RESET | PREFIX_IO16 | PREFIX_BLK));
}
//***************************************************************************
static void ppc6_wr_port16_blk(Interface *ppc, u8 port, u8 *data, long length)
{
length = length << 1;
ppc6_send_cmd(ppc, (REG_BLKSIZE | ACCESS_REG | ACCESS_WRITE));
ppc6_wr_data_byte(ppc,(u8)length);
ppc6_wr_data_byte(ppc,(u8)(length >> 8));
ppc6_wr_data_byte(ppc,0);
ppc6_send_cmd(ppc, (CMD_PREFIX_SET | PREFIX_IO16 | PREFIX_BLK));
ppc6_send_cmd(ppc, (u8)(port | ACCESS_PORT | ACCESS_WRITE));
ppc6_wr_data_blk(ppc, data, length);
ppc6_send_cmd(ppc, (CMD_PREFIX_RESET | PREFIX_IO16 | PREFIX_BLK));
}
//***************************************************************************
static void ppc6_wr_extout(Interface *ppc, u8 regdata)
{
ppc6_send_cmd(ppc,(REG_VERSION | ACCESS_REG | ACCESS_WRITE));
ppc6_wr_data_byte(ppc, (u8)((regdata & 0x03) << 6));
}
//***************************************************************************
static int ppc6_open(Interface *ppc)
{
int ret;
ret = ppc6_select(ppc);
if (ret == 0)
return(ret);
ppc->ppc_flags &= ~fifo_wait;
ppc6_send_cmd(ppc, (ACCESS_REG | ACCESS_WRITE | REG_RAMSIZE));
ppc6_wr_data_byte(ppc, RAMSIZE_128K);
ppc6_send_cmd(ppc, (ACCESS_REG | ACCESS_READ | REG_VERSION));
if ((ppc6_rd_data_byte(ppc) & 0x3F) == 0x0C)
ppc->ppc_flags |= fifo_wait;
return(ret);
}
//***************************************************************************
static void ppc6_close(Interface *ppc)
{
ppc6_deselect(ppc);
}
//***************************************************************************
| gpl-2.0 |
profblock/adaptive-litmus | sound/soc/codecs/pcm512x.c | 212 | 42986 | /*
* Driver for the PCM512x CODECs
*
* Author: Mark Brown <broonie@linaro.org>
* Copyright 2014 Linaro Ltd
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/clk.h>
#include <linux/kernel.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <linux/gcd.h>
#include <sound/soc.h>
#include <sound/soc-dapm.h>
#include <sound/pcm_params.h>
#include <sound/tlv.h>
#include "pcm512x.h"
#define DIV_ROUND_DOWN_ULL(ll, d) \
({ unsigned long long _tmp = (ll); do_div(_tmp, d); _tmp; })
#define PCM512x_NUM_SUPPLIES 3
static const char * const pcm512x_supply_names[PCM512x_NUM_SUPPLIES] = {
"AVDD",
"DVDD",
"CPVDD",
};
struct pcm512x_priv {
struct regmap *regmap;
struct clk *sclk;
struct regulator_bulk_data supplies[PCM512x_NUM_SUPPLIES];
struct notifier_block supply_nb[PCM512x_NUM_SUPPLIES];
int fmt;
int pll_in;
int pll_out;
int pll_r;
int pll_j;
int pll_d;
int pll_p;
unsigned long real_pll;
unsigned long overclock_pll;
unsigned long overclock_dac;
unsigned long overclock_dsp;
};
/*
* We can't use the same notifier block for more than one supply and
* there's no way I can see to get from a callback to the caller
* except container_of().
*/
#define PCM512x_REGULATOR_EVENT(n) \
static int pcm512x_regulator_event_##n(struct notifier_block *nb, \
unsigned long event, void *data) \
{ \
struct pcm512x_priv *pcm512x = container_of(nb, struct pcm512x_priv, \
supply_nb[n]); \
if (event & REGULATOR_EVENT_DISABLE) { \
regcache_mark_dirty(pcm512x->regmap); \
regcache_cache_only(pcm512x->regmap, true); \
} \
return 0; \
}
PCM512x_REGULATOR_EVENT(0)
PCM512x_REGULATOR_EVENT(1)
PCM512x_REGULATOR_EVENT(2)
static const struct reg_default pcm512x_reg_defaults[] = {
{ PCM512x_RESET, 0x00 },
{ PCM512x_POWER, 0x00 },
{ PCM512x_MUTE, 0x00 },
{ PCM512x_DSP, 0x00 },
{ PCM512x_PLL_REF, 0x00 },
{ PCM512x_DAC_REF, 0x00 },
{ PCM512x_DAC_ROUTING, 0x11 },
{ PCM512x_DSP_PROGRAM, 0x01 },
{ PCM512x_CLKDET, 0x00 },
{ PCM512x_AUTO_MUTE, 0x00 },
{ PCM512x_ERROR_DETECT, 0x00 },
{ PCM512x_DIGITAL_VOLUME_1, 0x00 },
{ PCM512x_DIGITAL_VOLUME_2, 0x30 },
{ PCM512x_DIGITAL_VOLUME_3, 0x30 },
{ PCM512x_DIGITAL_MUTE_1, 0x22 },
{ PCM512x_DIGITAL_MUTE_2, 0x00 },
{ PCM512x_DIGITAL_MUTE_3, 0x07 },
{ PCM512x_OUTPUT_AMPLITUDE, 0x00 },
{ PCM512x_ANALOG_GAIN_CTRL, 0x00 },
{ PCM512x_UNDERVOLTAGE_PROT, 0x00 },
{ PCM512x_ANALOG_MUTE_CTRL, 0x00 },
{ PCM512x_ANALOG_GAIN_BOOST, 0x00 },
{ PCM512x_VCOM_CTRL_1, 0x00 },
{ PCM512x_VCOM_CTRL_2, 0x01 },
{ PCM512x_BCLK_LRCLK_CFG, 0x00 },
{ PCM512x_MASTER_MODE, 0x7c },
{ PCM512x_GPIO_DACIN, 0x00 },
{ PCM512x_GPIO_PLLIN, 0x00 },
{ PCM512x_SYNCHRONIZE, 0x10 },
{ PCM512x_PLL_COEFF_0, 0x00 },
{ PCM512x_PLL_COEFF_1, 0x00 },
{ PCM512x_PLL_COEFF_2, 0x00 },
{ PCM512x_PLL_COEFF_3, 0x00 },
{ PCM512x_PLL_COEFF_4, 0x00 },
{ PCM512x_DSP_CLKDIV, 0x00 },
{ PCM512x_DAC_CLKDIV, 0x00 },
{ PCM512x_NCP_CLKDIV, 0x00 },
{ PCM512x_OSR_CLKDIV, 0x00 },
{ PCM512x_MASTER_CLKDIV_1, 0x00 },
{ PCM512x_MASTER_CLKDIV_2, 0x00 },
{ PCM512x_FS_SPEED_MODE, 0x00 },
{ PCM512x_IDAC_1, 0x01 },
{ PCM512x_IDAC_2, 0x00 },
};
static bool pcm512x_readable(struct device *dev, unsigned int reg)
{
switch (reg) {
case PCM512x_RESET:
case PCM512x_POWER:
case PCM512x_MUTE:
case PCM512x_PLL_EN:
case PCM512x_SPI_MISO_FUNCTION:
case PCM512x_DSP:
case PCM512x_GPIO_EN:
case PCM512x_BCLK_LRCLK_CFG:
case PCM512x_DSP_GPIO_INPUT:
case PCM512x_MASTER_MODE:
case PCM512x_PLL_REF:
case PCM512x_DAC_REF:
case PCM512x_GPIO_DACIN:
case PCM512x_GPIO_PLLIN:
case PCM512x_SYNCHRONIZE:
case PCM512x_PLL_COEFF_0:
case PCM512x_PLL_COEFF_1:
case PCM512x_PLL_COEFF_2:
case PCM512x_PLL_COEFF_3:
case PCM512x_PLL_COEFF_4:
case PCM512x_DSP_CLKDIV:
case PCM512x_DAC_CLKDIV:
case PCM512x_NCP_CLKDIV:
case PCM512x_OSR_CLKDIV:
case PCM512x_MASTER_CLKDIV_1:
case PCM512x_MASTER_CLKDIV_2:
case PCM512x_FS_SPEED_MODE:
case PCM512x_IDAC_1:
case PCM512x_IDAC_2:
case PCM512x_ERROR_DETECT:
case PCM512x_I2S_1:
case PCM512x_I2S_2:
case PCM512x_DAC_ROUTING:
case PCM512x_DSP_PROGRAM:
case PCM512x_CLKDET:
case PCM512x_AUTO_MUTE:
case PCM512x_DIGITAL_VOLUME_1:
case PCM512x_DIGITAL_VOLUME_2:
case PCM512x_DIGITAL_VOLUME_3:
case PCM512x_DIGITAL_MUTE_1:
case PCM512x_DIGITAL_MUTE_2:
case PCM512x_DIGITAL_MUTE_3:
case PCM512x_GPIO_OUTPUT_1:
case PCM512x_GPIO_OUTPUT_2:
case PCM512x_GPIO_OUTPUT_3:
case PCM512x_GPIO_OUTPUT_4:
case PCM512x_GPIO_OUTPUT_5:
case PCM512x_GPIO_OUTPUT_6:
case PCM512x_GPIO_CONTROL_1:
case PCM512x_GPIO_CONTROL_2:
case PCM512x_OVERFLOW:
case PCM512x_RATE_DET_1:
case PCM512x_RATE_DET_2:
case PCM512x_RATE_DET_3:
case PCM512x_RATE_DET_4:
case PCM512x_CLOCK_STATUS:
case PCM512x_ANALOG_MUTE_DET:
case PCM512x_GPIN:
case PCM512x_DIGITAL_MUTE_DET:
case PCM512x_OUTPUT_AMPLITUDE:
case PCM512x_ANALOG_GAIN_CTRL:
case PCM512x_UNDERVOLTAGE_PROT:
case PCM512x_ANALOG_MUTE_CTRL:
case PCM512x_ANALOG_GAIN_BOOST:
case PCM512x_VCOM_CTRL_1:
case PCM512x_VCOM_CTRL_2:
case PCM512x_CRAM_CTRL:
case PCM512x_FLEX_A:
case PCM512x_FLEX_B:
return true;
default:
/* There are 256 raw register addresses */
return reg < 0xff;
}
}
static bool pcm512x_volatile(struct device *dev, unsigned int reg)
{
switch (reg) {
case PCM512x_PLL_EN:
case PCM512x_OVERFLOW:
case PCM512x_RATE_DET_1:
case PCM512x_RATE_DET_2:
case PCM512x_RATE_DET_3:
case PCM512x_RATE_DET_4:
case PCM512x_CLOCK_STATUS:
case PCM512x_ANALOG_MUTE_DET:
case PCM512x_GPIN:
case PCM512x_DIGITAL_MUTE_DET:
case PCM512x_CRAM_CTRL:
return true;
default:
/* There are 256 raw register addresses */
return reg < 0xff;
}
}
static int pcm512x_overclock_pll_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
struct pcm512x_priv *pcm512x = snd_soc_codec_get_drvdata(codec);
ucontrol->value.integer.value[0] = pcm512x->overclock_pll;
return 0;
}
static int pcm512x_overclock_pll_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
struct pcm512x_priv *pcm512x = snd_soc_codec_get_drvdata(codec);
switch (codec->dapm.bias_level) {
case SND_SOC_BIAS_OFF:
case SND_SOC_BIAS_STANDBY:
break;
default:
return -EBUSY;
}
pcm512x->overclock_pll = ucontrol->value.integer.value[0];
return 0;
}
static int pcm512x_overclock_dsp_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
struct pcm512x_priv *pcm512x = snd_soc_codec_get_drvdata(codec);
ucontrol->value.integer.value[0] = pcm512x->overclock_dsp;
return 0;
}
static int pcm512x_overclock_dsp_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
struct pcm512x_priv *pcm512x = snd_soc_codec_get_drvdata(codec);
switch (codec->dapm.bias_level) {
case SND_SOC_BIAS_OFF:
case SND_SOC_BIAS_STANDBY:
break;
default:
return -EBUSY;
}
pcm512x->overclock_dsp = ucontrol->value.integer.value[0];
return 0;
}
static int pcm512x_overclock_dac_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
struct pcm512x_priv *pcm512x = snd_soc_codec_get_drvdata(codec);
ucontrol->value.integer.value[0] = pcm512x->overclock_dac;
return 0;
}
static int pcm512x_overclock_dac_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
struct pcm512x_priv *pcm512x = snd_soc_codec_get_drvdata(codec);
switch (codec->dapm.bias_level) {
case SND_SOC_BIAS_OFF:
case SND_SOC_BIAS_STANDBY:
break;
default:
return -EBUSY;
}
pcm512x->overclock_dac = ucontrol->value.integer.value[0];
return 0;
}
static const DECLARE_TLV_DB_SCALE(digital_tlv, -10350, 50, 1);
static const DECLARE_TLV_DB_SCALE(analog_tlv, -600, 600, 0);
static const DECLARE_TLV_DB_SCALE(boost_tlv, 0, 80, 0);
static const char * const pcm512x_dsp_program_texts[] = {
"FIR interpolation with de-emphasis",
"Low latency IIR with de-emphasis",
"High attenuation with de-emphasis",
"Fixed process flow",
"Ringing-less low latency FIR",
};
static const unsigned int pcm512x_dsp_program_values[] = {
1,
2,
3,
5,
7,
};
static SOC_VALUE_ENUM_SINGLE_DECL(pcm512x_dsp_program,
PCM512x_DSP_PROGRAM, 0, 0x1f,
pcm512x_dsp_program_texts,
pcm512x_dsp_program_values);
static const char * const pcm512x_clk_missing_text[] = {
"1s", "2s", "3s", "4s", "5s", "6s", "7s", "8s"
};
static const struct soc_enum pcm512x_clk_missing =
SOC_ENUM_SINGLE(PCM512x_CLKDET, 0, 8, pcm512x_clk_missing_text);
static const char * const pcm512x_autom_text[] = {
"21ms", "106ms", "213ms", "533ms", "1.07s", "2.13s", "5.33s", "10.66s"
};
static const struct soc_enum pcm512x_autom_l =
SOC_ENUM_SINGLE(PCM512x_AUTO_MUTE, PCM512x_ATML_SHIFT, 8,
pcm512x_autom_text);
static const struct soc_enum pcm512x_autom_r =
SOC_ENUM_SINGLE(PCM512x_AUTO_MUTE, PCM512x_ATMR_SHIFT, 8,
pcm512x_autom_text);
static const char * const pcm512x_ramp_rate_text[] = {
"1 sample/update", "2 samples/update", "4 samples/update",
"Immediate"
};
static const struct soc_enum pcm512x_vndf =
SOC_ENUM_SINGLE(PCM512x_DIGITAL_MUTE_1, PCM512x_VNDF_SHIFT, 4,
pcm512x_ramp_rate_text);
static const struct soc_enum pcm512x_vnuf =
SOC_ENUM_SINGLE(PCM512x_DIGITAL_MUTE_1, PCM512x_VNUF_SHIFT, 4,
pcm512x_ramp_rate_text);
static const struct soc_enum pcm512x_vedf =
SOC_ENUM_SINGLE(PCM512x_DIGITAL_MUTE_2, PCM512x_VEDF_SHIFT, 4,
pcm512x_ramp_rate_text);
static const char * const pcm512x_ramp_step_text[] = {
"4dB/step", "2dB/step", "1dB/step", "0.5dB/step"
};
static const struct soc_enum pcm512x_vnds =
SOC_ENUM_SINGLE(PCM512x_DIGITAL_MUTE_1, PCM512x_VNDS_SHIFT, 4,
pcm512x_ramp_step_text);
static const struct soc_enum pcm512x_vnus =
SOC_ENUM_SINGLE(PCM512x_DIGITAL_MUTE_1, PCM512x_VNUS_SHIFT, 4,
pcm512x_ramp_step_text);
static const struct soc_enum pcm512x_veds =
SOC_ENUM_SINGLE(PCM512x_DIGITAL_MUTE_2, PCM512x_VEDS_SHIFT, 4,
pcm512x_ramp_step_text);
static const struct snd_kcontrol_new pcm512x_controls[] = {
SOC_DOUBLE_R_TLV("Digital Playback Volume", PCM512x_DIGITAL_VOLUME_2,
PCM512x_DIGITAL_VOLUME_3, 0, 255, 1, digital_tlv),
SOC_DOUBLE_TLV("Analogue Playback Volume", PCM512x_ANALOG_GAIN_CTRL,
PCM512x_LAGN_SHIFT, PCM512x_RAGN_SHIFT, 1, 1, analog_tlv),
SOC_DOUBLE_TLV("Analogue Playback Boost Volume", PCM512x_ANALOG_GAIN_BOOST,
PCM512x_AGBL_SHIFT, PCM512x_AGBR_SHIFT, 1, 0, boost_tlv),
SOC_DOUBLE("Digital Playback Switch", PCM512x_MUTE, PCM512x_RQML_SHIFT,
PCM512x_RQMR_SHIFT, 1, 1),
SOC_SINGLE("Deemphasis Switch", PCM512x_DSP, PCM512x_DEMP_SHIFT, 1, 1),
SOC_ENUM("DSP Program", pcm512x_dsp_program),
SOC_ENUM("Clock Missing Period", pcm512x_clk_missing),
SOC_ENUM("Auto Mute Time Left", pcm512x_autom_l),
SOC_ENUM("Auto Mute Time Right", pcm512x_autom_r),
SOC_SINGLE("Auto Mute Mono Switch", PCM512x_DIGITAL_MUTE_3,
PCM512x_ACTL_SHIFT, 1, 0),
SOC_DOUBLE("Auto Mute Switch", PCM512x_DIGITAL_MUTE_3, PCM512x_AMLE_SHIFT,
PCM512x_AMRE_SHIFT, 1, 0),
SOC_ENUM("Volume Ramp Down Rate", pcm512x_vndf),
SOC_ENUM("Volume Ramp Down Step", pcm512x_vnds),
SOC_ENUM("Volume Ramp Up Rate", pcm512x_vnuf),
SOC_ENUM("Volume Ramp Up Step", pcm512x_vnus),
SOC_ENUM("Volume Ramp Down Emergency Rate", pcm512x_vedf),
SOC_ENUM("Volume Ramp Down Emergency Step", pcm512x_veds),
SOC_SINGLE_EXT("Max Overclock PLL", SND_SOC_NOPM, 0, 20, 0,
pcm512x_overclock_pll_get, pcm512x_overclock_pll_put),
SOC_SINGLE_EXT("Max Overclock DSP", SND_SOC_NOPM, 0, 40, 0,
pcm512x_overclock_dsp_get, pcm512x_overclock_dsp_put),
SOC_SINGLE_EXT("Max Overclock DAC", SND_SOC_NOPM, 0, 40, 0,
pcm512x_overclock_dac_get, pcm512x_overclock_dac_put),
};
static const struct snd_soc_dapm_widget pcm512x_dapm_widgets[] = {
SND_SOC_DAPM_DAC("DACL", NULL, SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_DAC("DACR", NULL, SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_OUTPUT("OUTL"),
SND_SOC_DAPM_OUTPUT("OUTR"),
};
static const struct snd_soc_dapm_route pcm512x_dapm_routes[] = {
{ "DACL", NULL, "Playback" },
{ "DACR", NULL, "Playback" },
{ "OUTL", NULL, "DACL" },
{ "OUTR", NULL, "DACR" },
};
static unsigned long pcm512x_pll_max(struct pcm512x_priv *pcm512x)
{
return 25000000 + 25000000 * pcm512x->overclock_pll / 100;
}
static unsigned long pcm512x_dsp_max(struct pcm512x_priv *pcm512x)
{
return 50000000 + 50000000 * pcm512x->overclock_dsp / 100;
}
static unsigned long pcm512x_dac_max(struct pcm512x_priv *pcm512x,
unsigned long rate)
{
return rate + rate * pcm512x->overclock_dac / 100;
}
static unsigned long pcm512x_sck_max(struct pcm512x_priv *pcm512x)
{
if (!pcm512x->pll_out)
return 25000000;
return pcm512x_pll_max(pcm512x);
}
static unsigned long pcm512x_ncp_target(struct pcm512x_priv *pcm512x,
unsigned long dac_rate)
{
/*
* If the DAC is not actually overclocked, use the good old
* NCP target rate...
*/
if (dac_rate <= 6144000)
return 1536000;
/*
* ...but if the DAC is in fact overclocked, bump the NCP target
* rate to get the recommended dividers even when overclocking.
*/
return pcm512x_dac_max(pcm512x, 1536000);
}
static const u32 pcm512x_dai_rates[] = {
8000, 11025, 16000, 22050, 32000, 44100, 48000, 64000,
88200, 96000, 176400, 192000, 384000,
};
static const struct snd_pcm_hw_constraint_list constraints_slave = {
.count = ARRAY_SIZE(pcm512x_dai_rates),
.list = pcm512x_dai_rates,
};
static int pcm512x_hw_rule_rate(struct snd_pcm_hw_params *params,
struct snd_pcm_hw_rule *rule)
{
struct pcm512x_priv *pcm512x = rule->private;
struct snd_interval ranges[2];
int frame_size;
frame_size = snd_soc_params_to_frame_size(params);
if (frame_size < 0)
return frame_size;
switch (frame_size) {
case 32:
/* No hole when the frame size is 32. */
return 0;
case 48:
case 64:
/* There is only one hole in the range of supported
* rates, but it moves with the frame size.
*/
memset(ranges, 0, sizeof(ranges));
ranges[0].min = 8000;
ranges[0].max = pcm512x_sck_max(pcm512x) / frame_size / 2;
ranges[1].min = DIV_ROUND_UP(16000000, frame_size);
ranges[1].max = 384000;
break;
default:
return -EINVAL;
}
return snd_interval_ranges(hw_param_interval(params, rule->var),
ARRAY_SIZE(ranges), ranges, 0);
}
static int pcm512x_dai_startup_master(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct snd_soc_codec *codec = dai->codec;
struct pcm512x_priv *pcm512x = snd_soc_codec_get_drvdata(codec);
struct device *dev = dai->dev;
struct snd_pcm_hw_constraint_ratnums *constraints_no_pll;
struct snd_ratnum *rats_no_pll;
if (IS_ERR(pcm512x->sclk)) {
dev_err(dev, "Need SCLK for master mode: %ld\n",
PTR_ERR(pcm512x->sclk));
return PTR_ERR(pcm512x->sclk);
}
if (pcm512x->pll_out)
return snd_pcm_hw_rule_add(substream->runtime, 0,
SNDRV_PCM_HW_PARAM_RATE,
pcm512x_hw_rule_rate,
pcm512x,
SNDRV_PCM_HW_PARAM_FRAME_BITS,
SNDRV_PCM_HW_PARAM_CHANNELS, -1);
constraints_no_pll = devm_kzalloc(dev, sizeof(*constraints_no_pll),
GFP_KERNEL);
if (!constraints_no_pll)
return -ENOMEM;
constraints_no_pll->nrats = 1;
rats_no_pll = devm_kzalloc(dev, sizeof(*rats_no_pll), GFP_KERNEL);
if (!rats_no_pll)
return -ENOMEM;
constraints_no_pll->rats = rats_no_pll;
rats_no_pll->num = clk_get_rate(pcm512x->sclk) / 64;
rats_no_pll->den_min = 1;
rats_no_pll->den_max = 128;
rats_no_pll->den_step = 1;
return snd_pcm_hw_constraint_ratnums(substream->runtime, 0,
SNDRV_PCM_HW_PARAM_RATE,
constraints_no_pll);
}
static int pcm512x_dai_startup_slave(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct snd_soc_codec *codec = dai->codec;
struct pcm512x_priv *pcm512x = snd_soc_codec_get_drvdata(codec);
struct device *dev = dai->dev;
struct regmap *regmap = pcm512x->regmap;
if (IS_ERR(pcm512x->sclk)) {
dev_info(dev, "No SCLK, using BCLK: %ld\n",
PTR_ERR(pcm512x->sclk));
/* Disable reporting of missing SCLK as an error */
regmap_update_bits(regmap, PCM512x_ERROR_DETECT,
PCM512x_IDCH, PCM512x_IDCH);
/* Switch PLL input to BCLK */
regmap_update_bits(regmap, PCM512x_PLL_REF,
PCM512x_SREF, PCM512x_SREF_BCK);
}
return snd_pcm_hw_constraint_list(substream->runtime, 0,
SNDRV_PCM_HW_PARAM_RATE,
&constraints_slave);
}
static int pcm512x_dai_startup(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct snd_soc_codec *codec = dai->codec;
struct pcm512x_priv *pcm512x = snd_soc_codec_get_drvdata(codec);
switch (pcm512x->fmt & SND_SOC_DAIFMT_MASTER_MASK) {
case SND_SOC_DAIFMT_CBM_CFM:
case SND_SOC_DAIFMT_CBM_CFS:
return pcm512x_dai_startup_master(substream, dai);
case SND_SOC_DAIFMT_CBS_CFS:
return pcm512x_dai_startup_slave(substream, dai);
default:
return -EINVAL;
}
}
static int pcm512x_set_bias_level(struct snd_soc_codec *codec,
enum snd_soc_bias_level level)
{
struct pcm512x_priv *pcm512x = dev_get_drvdata(codec->dev);
int ret;
switch (level) {
case SND_SOC_BIAS_ON:
case SND_SOC_BIAS_PREPARE:
break;
case SND_SOC_BIAS_STANDBY:
ret = regmap_update_bits(pcm512x->regmap, PCM512x_POWER,
PCM512x_RQST, 0);
if (ret != 0) {
dev_err(codec->dev, "Failed to remove standby: %d\n",
ret);
return ret;
}
break;
case SND_SOC_BIAS_OFF:
ret = regmap_update_bits(pcm512x->regmap, PCM512x_POWER,
PCM512x_RQST, PCM512x_RQST);
if (ret != 0) {
dev_err(codec->dev, "Failed to request standby: %d\n",
ret);
return ret;
}
break;
}
codec->dapm.bias_level = level;
return 0;
}
static unsigned long pcm512x_find_sck(struct snd_soc_dai *dai,
unsigned long bclk_rate)
{
struct device *dev = dai->dev;
struct snd_soc_codec *codec = dai->codec;
struct pcm512x_priv *pcm512x = snd_soc_codec_get_drvdata(codec);
unsigned long sck_rate;
int pow2;
/* 64 MHz <= pll_rate <= 100 MHz, VREF mode */
/* 16 MHz <= sck_rate <= 25 MHz, VREF mode */
/* select sck_rate as a multiple of bclk_rate but still with
* as many factors of 2 as possible, as that makes it easier
* to find a fast DAC rate
*/
pow2 = 1 << fls((pcm512x_pll_max(pcm512x) - 16000000) / bclk_rate);
for (; pow2; pow2 >>= 1) {
sck_rate = rounddown(pcm512x_pll_max(pcm512x),
bclk_rate * pow2);
if (sck_rate >= 16000000)
break;
}
if (!pow2) {
dev_err(dev, "Impossible to generate a suitable SCK\n");
return 0;
}
dev_dbg(dev, "sck_rate %lu\n", sck_rate);
return sck_rate;
}
/* pll_rate = pllin_rate * R * J.D / P
* 1 <= R <= 16
* 1 <= J <= 63
* 0 <= D <= 9999
* 1 <= P <= 15
* 64 MHz <= pll_rate <= 100 MHz
* if D == 0
* 1 MHz <= pllin_rate / P <= 20 MHz
* else if D > 0
* 6.667 MHz <= pllin_rate / P <= 20 MHz
* 4 <= J <= 11
* R = 1
*/
static int pcm512x_find_pll_coeff(struct snd_soc_dai *dai,
unsigned long pllin_rate,
unsigned long pll_rate)
{
struct device *dev = dai->dev;
struct snd_soc_codec *codec = dai->codec;
struct pcm512x_priv *pcm512x = snd_soc_codec_get_drvdata(codec);
unsigned long common;
int R, J, D, P;
unsigned long K; /* 10000 * J.D */
unsigned long num;
unsigned long den;
common = gcd(pll_rate, pllin_rate);
dev_dbg(dev, "pll %lu pllin %lu common %lu\n",
pll_rate, pllin_rate, common);
num = pll_rate / common;
den = pllin_rate / common;
/* pllin_rate / P (or here, den) cannot be greater than 20 MHz */
if (pllin_rate / den > 20000000 && num < 8) {
num *= DIV_ROUND_UP(pllin_rate / den, 20000000);
den *= DIV_ROUND_UP(pllin_rate / den, 20000000);
}
dev_dbg(dev, "num / den = %lu / %lu\n", num, den);
P = den;
if (den <= 15 && num <= 16 * 63
&& 1000000 <= pllin_rate / P && pllin_rate / P <= 20000000) {
/* Try the case with D = 0 */
D = 0;
/* factor 'num' into J and R, such that R <= 16 and J <= 63 */
for (R = 16; R; R--) {
if (num % R)
continue;
J = num / R;
if (J == 0 || J > 63)
continue;
dev_dbg(dev, "R * J / P = %d * %d / %d\n", R, J, P);
pcm512x->real_pll = pll_rate;
goto done;
}
/* no luck */
}
R = 1;
if (num > 0xffffffffUL / 10000)
goto fallback;
/* Try to find an exact pll_rate using the D > 0 case */
common = gcd(10000 * num, den);
num = 10000 * num / common;
den /= common;
dev_dbg(dev, "num %lu den %lu common %lu\n", num, den, common);
for (P = den; P <= 15; P++) {
if (pllin_rate / P < 6667000 || 200000000 < pllin_rate / P)
continue;
if (num * P % den)
continue;
K = num * P / den;
/* J == 12 is ok if D == 0 */
if (K < 40000 || K > 120000)
continue;
J = K / 10000;
D = K % 10000;
dev_dbg(dev, "J.D / P = %d.%04d / %d\n", J, D, P);
pcm512x->real_pll = pll_rate;
goto done;
}
/* Fall back to an approximate pll_rate */
fallback:
/* find smallest possible P */
P = DIV_ROUND_UP(pllin_rate, 20000000);
if (!P)
P = 1;
else if (P > 15) {
dev_err(dev, "Need a slower clock as pll-input\n");
return -EINVAL;
}
if (pllin_rate / P < 6667000) {
dev_err(dev, "Need a faster clock as pll-input\n");
return -EINVAL;
}
K = DIV_ROUND_CLOSEST_ULL(10000ULL * pll_rate * P, pllin_rate);
if (K < 40000)
K = 40000;
/* J == 12 is ok if D == 0 */
if (K > 120000)
K = 120000;
J = K / 10000;
D = K % 10000;
dev_dbg(dev, "J.D / P ~ %d.%04d / %d\n", J, D, P);
pcm512x->real_pll = DIV_ROUND_DOWN_ULL((u64)K * pllin_rate, 10000 * P);
done:
pcm512x->pll_r = R;
pcm512x->pll_j = J;
pcm512x->pll_d = D;
pcm512x->pll_p = P;
return 0;
}
static unsigned long pcm512x_pllin_dac_rate(struct snd_soc_dai *dai,
unsigned long osr_rate,
unsigned long pllin_rate)
{
struct snd_soc_codec *codec = dai->codec;
struct pcm512x_priv *pcm512x = snd_soc_codec_get_drvdata(codec);
unsigned long dac_rate;
if (!pcm512x->pll_out)
return 0; /* no PLL to bypass, force SCK as DAC input */
if (pllin_rate % osr_rate)
return 0; /* futile, quit early */
/* run DAC no faster than 6144000 Hz */
for (dac_rate = rounddown(pcm512x_dac_max(pcm512x, 6144000), osr_rate);
dac_rate;
dac_rate -= osr_rate) {
if (pllin_rate / dac_rate > 128)
return 0; /* DAC divider would be too big */
if (!(pllin_rate % dac_rate))
return dac_rate;
dac_rate -= osr_rate;
}
return 0;
}
static int pcm512x_set_dividers(struct snd_soc_dai *dai,
struct snd_pcm_hw_params *params)
{
struct device *dev = dai->dev;
struct snd_soc_codec *codec = dai->codec;
struct pcm512x_priv *pcm512x = snd_soc_codec_get_drvdata(codec);
unsigned long pllin_rate = 0;
unsigned long pll_rate;
unsigned long sck_rate;
unsigned long mck_rate;
unsigned long bclk_rate;
unsigned long sample_rate;
unsigned long osr_rate;
unsigned long dacsrc_rate;
int bclk_div;
int lrclk_div;
int dsp_div;
int dac_div;
unsigned long dac_rate;
int ncp_div;
int osr_div;
int ret;
int idac;
int fssp;
int gpio;
lrclk_div = snd_soc_params_to_frame_size(params);
if (lrclk_div == 0) {
dev_err(dev, "No LRCLK?\n");
return -EINVAL;
}
if (!pcm512x->pll_out) {
sck_rate = clk_get_rate(pcm512x->sclk);
bclk_div = params->rate_den * 64 / lrclk_div;
bclk_rate = DIV_ROUND_CLOSEST(sck_rate, bclk_div);
mck_rate = sck_rate;
} else {
ret = snd_soc_params_to_bclk(params);
if (ret < 0) {
dev_err(dev, "Failed to find suitable BCLK: %d\n", ret);
return ret;
}
if (ret == 0) {
dev_err(dev, "No BCLK?\n");
return -EINVAL;
}
bclk_rate = ret;
pllin_rate = clk_get_rate(pcm512x->sclk);
sck_rate = pcm512x_find_sck(dai, bclk_rate);
if (!sck_rate)
return -EINVAL;
pll_rate = 4 * sck_rate;
ret = pcm512x_find_pll_coeff(dai, pllin_rate, pll_rate);
if (ret != 0)
return ret;
ret = regmap_write(pcm512x->regmap,
PCM512x_PLL_COEFF_0, pcm512x->pll_p - 1);
if (ret != 0) {
dev_err(dev, "Failed to write PLL P: %d\n", ret);
return ret;
}
ret = regmap_write(pcm512x->regmap,
PCM512x_PLL_COEFF_1, pcm512x->pll_j);
if (ret != 0) {
dev_err(dev, "Failed to write PLL J: %d\n", ret);
return ret;
}
ret = regmap_write(pcm512x->regmap,
PCM512x_PLL_COEFF_2, pcm512x->pll_d >> 8);
if (ret != 0) {
dev_err(dev, "Failed to write PLL D msb: %d\n", ret);
return ret;
}
ret = regmap_write(pcm512x->regmap,
PCM512x_PLL_COEFF_3, pcm512x->pll_d & 0xff);
if (ret != 0) {
dev_err(dev, "Failed to write PLL D lsb: %d\n", ret);
return ret;
}
ret = regmap_write(pcm512x->regmap,
PCM512x_PLL_COEFF_4, pcm512x->pll_r - 1);
if (ret != 0) {
dev_err(dev, "Failed to write PLL R: %d\n", ret);
return ret;
}
mck_rate = pcm512x->real_pll;
bclk_div = DIV_ROUND_CLOSEST(sck_rate, bclk_rate);
}
if (bclk_div > 128) {
dev_err(dev, "Failed to find BCLK divider\n");
return -EINVAL;
}
/* the actual rate */
sample_rate = sck_rate / bclk_div / lrclk_div;
osr_rate = 16 * sample_rate;
/* run DSP no faster than 50 MHz */
dsp_div = mck_rate > pcm512x_dsp_max(pcm512x) ? 2 : 1;
dac_rate = pcm512x_pllin_dac_rate(dai, osr_rate, pllin_rate);
if (dac_rate) {
/* the desired clock rate is "compatible" with the pll input
* clock, so use that clock as dac input instead of the pll
* output clock since the pll will introduce jitter and thus
* noise.
*/
dev_dbg(dev, "using pll input as dac input\n");
ret = regmap_update_bits(pcm512x->regmap, PCM512x_DAC_REF,
PCM512x_SDAC, PCM512x_SDAC_GPIO);
if (ret != 0) {
dev_err(codec->dev,
"Failed to set gpio as dacref: %d\n", ret);
return ret;
}
gpio = PCM512x_GREF_GPIO1 + pcm512x->pll_in - 1;
ret = regmap_update_bits(pcm512x->regmap, PCM512x_GPIO_DACIN,
PCM512x_GREF, gpio);
if (ret != 0) {
dev_err(codec->dev,
"Failed to set gpio %d as dacin: %d\n",
pcm512x->pll_in, ret);
return ret;
}
dacsrc_rate = pllin_rate;
} else {
/* run DAC no faster than 6144000 Hz */
unsigned long dac_mul = pcm512x_dac_max(pcm512x, 6144000)
/ osr_rate;
unsigned long sck_mul = sck_rate / osr_rate;
for (; dac_mul; dac_mul--) {
if (!(sck_mul % dac_mul))
break;
}
if (!dac_mul) {
dev_err(dev, "Failed to find DAC rate\n");
return -EINVAL;
}
dac_rate = dac_mul * osr_rate;
dev_dbg(dev, "dac_rate %lu sample_rate %lu\n",
dac_rate, sample_rate);
ret = regmap_update_bits(pcm512x->regmap, PCM512x_DAC_REF,
PCM512x_SDAC, PCM512x_SDAC_SCK);
if (ret != 0) {
dev_err(codec->dev,
"Failed to set sck as dacref: %d\n", ret);
return ret;
}
dacsrc_rate = sck_rate;
}
osr_div = DIV_ROUND_CLOSEST(dac_rate, osr_rate);
if (osr_div > 128) {
dev_err(dev, "Failed to find OSR divider\n");
return -EINVAL;
}
dac_div = DIV_ROUND_CLOSEST(dacsrc_rate, dac_rate);
if (dac_div > 128) {
dev_err(dev, "Failed to find DAC divider\n");
return -EINVAL;
}
dac_rate = dacsrc_rate / dac_div;
ncp_div = DIV_ROUND_CLOSEST(dac_rate,
pcm512x_ncp_target(pcm512x, dac_rate));
if (ncp_div > 128 || dac_rate / ncp_div > 2048000) {
/* run NCP no faster than 2048000 Hz, but why? */
ncp_div = DIV_ROUND_UP(dac_rate, 2048000);
if (ncp_div > 128) {
dev_err(dev, "Failed to find NCP divider\n");
return -EINVAL;
}
}
idac = mck_rate / (dsp_div * sample_rate);
ret = regmap_write(pcm512x->regmap, PCM512x_DSP_CLKDIV, dsp_div - 1);
if (ret != 0) {
dev_err(dev, "Failed to write DSP divider: %d\n", ret);
return ret;
}
ret = regmap_write(pcm512x->regmap, PCM512x_DAC_CLKDIV, dac_div - 1);
if (ret != 0) {
dev_err(dev, "Failed to write DAC divider: %d\n", ret);
return ret;
}
ret = regmap_write(pcm512x->regmap, PCM512x_NCP_CLKDIV, ncp_div - 1);
if (ret != 0) {
dev_err(dev, "Failed to write NCP divider: %d\n", ret);
return ret;
}
ret = regmap_write(pcm512x->regmap, PCM512x_OSR_CLKDIV, osr_div - 1);
if (ret != 0) {
dev_err(dev, "Failed to write OSR divider: %d\n", ret);
return ret;
}
ret = regmap_write(pcm512x->regmap,
PCM512x_MASTER_CLKDIV_1, bclk_div - 1);
if (ret != 0) {
dev_err(dev, "Failed to write BCLK divider: %d\n", ret);
return ret;
}
ret = regmap_write(pcm512x->regmap,
PCM512x_MASTER_CLKDIV_2, lrclk_div - 1);
if (ret != 0) {
dev_err(dev, "Failed to write LRCLK divider: %d\n", ret);
return ret;
}
ret = regmap_write(pcm512x->regmap, PCM512x_IDAC_1, idac >> 8);
if (ret != 0) {
dev_err(dev, "Failed to write IDAC msb divider: %d\n", ret);
return ret;
}
ret = regmap_write(pcm512x->regmap, PCM512x_IDAC_2, idac & 0xff);
if (ret != 0) {
dev_err(dev, "Failed to write IDAC lsb divider: %d\n", ret);
return ret;
}
if (sample_rate <= pcm512x_dac_max(pcm512x, 48000))
fssp = PCM512x_FSSP_48KHZ;
else if (sample_rate <= pcm512x_dac_max(pcm512x, 96000))
fssp = PCM512x_FSSP_96KHZ;
else if (sample_rate <= pcm512x_dac_max(pcm512x, 192000))
fssp = PCM512x_FSSP_192KHZ;
else
fssp = PCM512x_FSSP_384KHZ;
ret = regmap_update_bits(pcm512x->regmap, PCM512x_FS_SPEED_MODE,
PCM512x_FSSP, fssp);
if (ret != 0) {
dev_err(codec->dev, "Failed to set fs speed: %d\n", ret);
return ret;
}
dev_dbg(codec->dev, "DSP divider %d\n", dsp_div);
dev_dbg(codec->dev, "DAC divider %d\n", dac_div);
dev_dbg(codec->dev, "NCP divider %d\n", ncp_div);
dev_dbg(codec->dev, "OSR divider %d\n", osr_div);
dev_dbg(codec->dev, "BCK divider %d\n", bclk_div);
dev_dbg(codec->dev, "LRCK divider %d\n", lrclk_div);
dev_dbg(codec->dev, "IDAC %d\n", idac);
dev_dbg(codec->dev, "1<<FSSP %d\n", 1 << fssp);
return 0;
}
static int pcm512x_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *dai)
{
struct snd_soc_codec *codec = dai->codec;
struct pcm512x_priv *pcm512x = snd_soc_codec_get_drvdata(codec);
int alen;
int gpio;
int clock_output;
int master_mode;
int ret;
dev_dbg(codec->dev, "hw_params %u Hz, %u channels\n",
params_rate(params),
params_channels(params));
switch (snd_pcm_format_width(params_format(params))) {
case 16:
alen = PCM512x_ALEN_16;
break;
case 20:
alen = PCM512x_ALEN_20;
break;
case 24:
alen = PCM512x_ALEN_24;
break;
case 32:
alen = PCM512x_ALEN_32;
break;
default:
dev_err(codec->dev, "Bad frame size: %d\n",
snd_pcm_format_width(params_format(params)));
return -EINVAL;
}
switch (pcm512x->fmt & SND_SOC_DAIFMT_MASTER_MASK) {
case SND_SOC_DAIFMT_CBS_CFS:
ret = regmap_update_bits(pcm512x->regmap,
PCM512x_BCLK_LRCLK_CFG,
PCM512x_BCKP
| PCM512x_BCKO | PCM512x_LRKO,
0);
if (ret != 0) {
dev_err(codec->dev,
"Failed to enable slave mode: %d\n", ret);
return ret;
}
ret = regmap_update_bits(pcm512x->regmap, PCM512x_ERROR_DETECT,
PCM512x_DCAS, 0);
if (ret != 0) {
dev_err(codec->dev,
"Failed to enable clock divider autoset: %d\n",
ret);
return ret;
}
return 0;
case SND_SOC_DAIFMT_CBM_CFM:
clock_output = PCM512x_BCKO | PCM512x_LRKO;
master_mode = PCM512x_RLRK | PCM512x_RBCK;
break;
case SND_SOC_DAIFMT_CBM_CFS:
clock_output = PCM512x_BCKO;
master_mode = PCM512x_RBCK;
break;
default:
return -EINVAL;
}
ret = regmap_update_bits(pcm512x->regmap, PCM512x_I2S_1,
PCM512x_ALEN, alen);
if (ret != 0) {
dev_err(codec->dev, "Failed to set frame size: %d\n", ret);
return ret;
}
if (pcm512x->pll_out) {
ret = regmap_write(pcm512x->regmap, PCM512x_FLEX_A, 0x11);
if (ret != 0) {
dev_err(codec->dev, "Failed to set FLEX_A: %d\n", ret);
return ret;
}
ret = regmap_write(pcm512x->regmap, PCM512x_FLEX_B, 0xff);
if (ret != 0) {
dev_err(codec->dev, "Failed to set FLEX_B: %d\n", ret);
return ret;
}
ret = regmap_update_bits(pcm512x->regmap, PCM512x_ERROR_DETECT,
PCM512x_IDFS | PCM512x_IDBK
| PCM512x_IDSK | PCM512x_IDCH
| PCM512x_IDCM | PCM512x_DCAS
| PCM512x_IPLK,
PCM512x_IDFS | PCM512x_IDBK
| PCM512x_IDSK | PCM512x_IDCH
| PCM512x_DCAS);
if (ret != 0) {
dev_err(codec->dev,
"Failed to ignore auto-clock failures: %d\n",
ret);
return ret;
}
} else {
ret = regmap_update_bits(pcm512x->regmap, PCM512x_ERROR_DETECT,
PCM512x_IDFS | PCM512x_IDBK
| PCM512x_IDSK | PCM512x_IDCH
| PCM512x_IDCM | PCM512x_DCAS
| PCM512x_IPLK,
PCM512x_IDFS | PCM512x_IDBK
| PCM512x_IDSK | PCM512x_IDCH
| PCM512x_DCAS | PCM512x_IPLK);
if (ret != 0) {
dev_err(codec->dev,
"Failed to ignore auto-clock failures: %d\n",
ret);
return ret;
}
ret = regmap_update_bits(pcm512x->regmap, PCM512x_PLL_EN,
PCM512x_PLLE, 0);
if (ret != 0) {
dev_err(codec->dev, "Failed to disable pll: %d\n", ret);
return ret;
}
}
ret = pcm512x_set_dividers(dai, params);
if (ret != 0)
return ret;
if (pcm512x->pll_out) {
ret = regmap_update_bits(pcm512x->regmap, PCM512x_PLL_REF,
PCM512x_SREF, PCM512x_SREF_GPIO);
if (ret != 0) {
dev_err(codec->dev,
"Failed to set gpio as pllref: %d\n", ret);
return ret;
}
gpio = PCM512x_GREF_GPIO1 + pcm512x->pll_in - 1;
ret = regmap_update_bits(pcm512x->regmap, PCM512x_GPIO_PLLIN,
PCM512x_GREF, gpio);
if (ret != 0) {
dev_err(codec->dev,
"Failed to set gpio %d as pllin: %d\n",
pcm512x->pll_in, ret);
return ret;
}
ret = regmap_update_bits(pcm512x->regmap, PCM512x_PLL_EN,
PCM512x_PLLE, PCM512x_PLLE);
if (ret != 0) {
dev_err(codec->dev, "Failed to enable pll: %d\n", ret);
return ret;
}
}
ret = regmap_update_bits(pcm512x->regmap, PCM512x_BCLK_LRCLK_CFG,
PCM512x_BCKP | PCM512x_BCKO | PCM512x_LRKO,
clock_output);
if (ret != 0) {
dev_err(codec->dev, "Failed to enable clock output: %d\n", ret);
return ret;
}
ret = regmap_update_bits(pcm512x->regmap, PCM512x_MASTER_MODE,
PCM512x_RLRK | PCM512x_RBCK,
master_mode);
if (ret != 0) {
dev_err(codec->dev, "Failed to enable master mode: %d\n", ret);
return ret;
}
if (pcm512x->pll_out) {
gpio = PCM512x_G1OE << (pcm512x->pll_out - 1);
ret = regmap_update_bits(pcm512x->regmap, PCM512x_GPIO_EN,
gpio, gpio);
if (ret != 0) {
dev_err(codec->dev, "Failed to enable gpio %d: %d\n",
pcm512x->pll_out, ret);
return ret;
}
gpio = PCM512x_GPIO_OUTPUT_1 + pcm512x->pll_out - 1;
ret = regmap_update_bits(pcm512x->regmap, gpio,
PCM512x_GxSL, PCM512x_GxSL_PLLCK);
if (ret != 0) {
dev_err(codec->dev, "Failed to output pll on %d: %d\n",
ret, pcm512x->pll_out);
return ret;
}
}
ret = regmap_update_bits(pcm512x->regmap, PCM512x_SYNCHRONIZE,
PCM512x_RQSY, PCM512x_RQSY_HALT);
if (ret != 0) {
dev_err(codec->dev, "Failed to halt clocks: %d\n", ret);
return ret;
}
ret = regmap_update_bits(pcm512x->regmap, PCM512x_SYNCHRONIZE,
PCM512x_RQSY, PCM512x_RQSY_RESUME);
if (ret != 0) {
dev_err(codec->dev, "Failed to resume clocks: %d\n", ret);
return ret;
}
return 0;
}
static int pcm512x_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
{
struct snd_soc_codec *codec = dai->codec;
struct pcm512x_priv *pcm512x = snd_soc_codec_get_drvdata(codec);
pcm512x->fmt = fmt;
return 0;
}
static const struct snd_soc_dai_ops pcm512x_dai_ops = {
.startup = pcm512x_dai_startup,
.hw_params = pcm512x_hw_params,
.set_fmt = pcm512x_set_fmt,
};
static struct snd_soc_dai_driver pcm512x_dai = {
.name = "pcm512x-hifi",
.playback = {
.stream_name = "Playback",
.channels_min = 2,
.channels_max = 2,
.rates = SNDRV_PCM_RATE_CONTINUOUS,
.rate_min = 8000,
.rate_max = 384000,
.formats = SNDRV_PCM_FMTBIT_S16_LE |
SNDRV_PCM_FMTBIT_S24_LE |
SNDRV_PCM_FMTBIT_S32_LE
},
.ops = &pcm512x_dai_ops,
};
static struct snd_soc_codec_driver pcm512x_codec_driver = {
.set_bias_level = pcm512x_set_bias_level,
.idle_bias_off = true,
.controls = pcm512x_controls,
.num_controls = ARRAY_SIZE(pcm512x_controls),
.dapm_widgets = pcm512x_dapm_widgets,
.num_dapm_widgets = ARRAY_SIZE(pcm512x_dapm_widgets),
.dapm_routes = pcm512x_dapm_routes,
.num_dapm_routes = ARRAY_SIZE(pcm512x_dapm_routes),
};
static const struct regmap_range_cfg pcm512x_range = {
.name = "Pages", .range_min = PCM512x_VIRT_BASE,
.range_max = PCM512x_MAX_REGISTER,
.selector_reg = PCM512x_PAGE,
.selector_mask = 0xff,
.window_start = 0, .window_len = 0x100,
};
const struct regmap_config pcm512x_regmap = {
.reg_bits = 8,
.val_bits = 8,
.readable_reg = pcm512x_readable,
.volatile_reg = pcm512x_volatile,
.ranges = &pcm512x_range,
.num_ranges = 1,
.max_register = PCM512x_MAX_REGISTER,
.reg_defaults = pcm512x_reg_defaults,
.num_reg_defaults = ARRAY_SIZE(pcm512x_reg_defaults),
.cache_type = REGCACHE_RBTREE,
};
EXPORT_SYMBOL_GPL(pcm512x_regmap);
int pcm512x_probe(struct device *dev, struct regmap *regmap)
{
struct pcm512x_priv *pcm512x;
int i, ret;
pcm512x = devm_kzalloc(dev, sizeof(struct pcm512x_priv), GFP_KERNEL);
if (!pcm512x)
return -ENOMEM;
dev_set_drvdata(dev, pcm512x);
pcm512x->regmap = regmap;
for (i = 0; i < ARRAY_SIZE(pcm512x->supplies); i++)
pcm512x->supplies[i].supply = pcm512x_supply_names[i];
ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(pcm512x->supplies),
pcm512x->supplies);
if (ret != 0) {
dev_err(dev, "Failed to get supplies: %d\n", ret);
return ret;
}
pcm512x->supply_nb[0].notifier_call = pcm512x_regulator_event_0;
pcm512x->supply_nb[1].notifier_call = pcm512x_regulator_event_1;
pcm512x->supply_nb[2].notifier_call = pcm512x_regulator_event_2;
for (i = 0; i < ARRAY_SIZE(pcm512x->supplies); i++) {
ret = regulator_register_notifier(pcm512x->supplies[i].consumer,
&pcm512x->supply_nb[i]);
if (ret != 0) {
dev_err(dev,
"Failed to register regulator notifier: %d\n",
ret);
}
}
ret = regulator_bulk_enable(ARRAY_SIZE(pcm512x->supplies),
pcm512x->supplies);
if (ret != 0) {
dev_err(dev, "Failed to enable supplies: %d\n", ret);
return ret;
}
/* Reset the device, verifying I/O in the process for I2C */
ret = regmap_write(regmap, PCM512x_RESET,
PCM512x_RSTM | PCM512x_RSTR);
if (ret != 0) {
dev_err(dev, "Failed to reset device: %d\n", ret);
goto err;
}
ret = regmap_write(regmap, PCM512x_RESET, 0);
if (ret != 0) {
dev_err(dev, "Failed to reset device: %d\n", ret);
goto err;
}
pcm512x->sclk = devm_clk_get(dev, NULL);
if (PTR_ERR(pcm512x->sclk) == -EPROBE_DEFER)
return -EPROBE_DEFER;
if (!IS_ERR(pcm512x->sclk)) {
ret = clk_prepare_enable(pcm512x->sclk);
if (ret != 0) {
dev_err(dev, "Failed to enable SCLK: %d\n", ret);
return ret;
}
}
/* Default to standby mode */
ret = regmap_update_bits(pcm512x->regmap, PCM512x_POWER,
PCM512x_RQST, PCM512x_RQST);
if (ret != 0) {
dev_err(dev, "Failed to request standby: %d\n",
ret);
goto err_clk;
}
pm_runtime_set_active(dev);
pm_runtime_enable(dev);
pm_runtime_idle(dev);
#ifdef CONFIG_OF
if (dev->of_node) {
const struct device_node *np = dev->of_node;
u32 val;
if (of_property_read_u32(np, "pll-in", &val) >= 0) {
if (val > 6) {
dev_err(dev, "Invalid pll-in\n");
ret = -EINVAL;
goto err_clk;
}
pcm512x->pll_in = val;
}
if (of_property_read_u32(np, "pll-out", &val) >= 0) {
if (val > 6) {
dev_err(dev, "Invalid pll-out\n");
ret = -EINVAL;
goto err_clk;
}
pcm512x->pll_out = val;
}
if (!pcm512x->pll_in != !pcm512x->pll_out) {
dev_err(dev,
"Error: both pll-in and pll-out, or none\n");
ret = -EINVAL;
goto err_clk;
}
if (pcm512x->pll_in && pcm512x->pll_in == pcm512x->pll_out) {
dev_err(dev, "Error: pll-in == pll-out\n");
ret = -EINVAL;
goto err_clk;
}
}
#endif
ret = snd_soc_register_codec(dev, &pcm512x_codec_driver,
&pcm512x_dai, 1);
if (ret != 0) {
dev_err(dev, "Failed to register CODEC: %d\n", ret);
goto err_pm;
}
return 0;
err_pm:
pm_runtime_disable(dev);
err_clk:
if (!IS_ERR(pcm512x->sclk))
clk_disable_unprepare(pcm512x->sclk);
err:
regulator_bulk_disable(ARRAY_SIZE(pcm512x->supplies),
pcm512x->supplies);
return ret;
}
EXPORT_SYMBOL_GPL(pcm512x_probe);
void pcm512x_remove(struct device *dev)
{
struct pcm512x_priv *pcm512x = dev_get_drvdata(dev);
snd_soc_unregister_codec(dev);
pm_runtime_disable(dev);
if (!IS_ERR(pcm512x->sclk))
clk_disable_unprepare(pcm512x->sclk);
regulator_bulk_disable(ARRAY_SIZE(pcm512x->supplies),
pcm512x->supplies);
}
EXPORT_SYMBOL_GPL(pcm512x_remove);
#ifdef CONFIG_PM
static int pcm512x_suspend(struct device *dev)
{
struct pcm512x_priv *pcm512x = dev_get_drvdata(dev);
int ret;
ret = regmap_update_bits(pcm512x->regmap, PCM512x_POWER,
PCM512x_RQPD, PCM512x_RQPD);
if (ret != 0) {
dev_err(dev, "Failed to request power down: %d\n", ret);
return ret;
}
ret = regulator_bulk_disable(ARRAY_SIZE(pcm512x->supplies),
pcm512x->supplies);
if (ret != 0) {
dev_err(dev, "Failed to disable supplies: %d\n", ret);
return ret;
}
if (!IS_ERR(pcm512x->sclk))
clk_disable_unprepare(pcm512x->sclk);
return 0;
}
static int pcm512x_resume(struct device *dev)
{
struct pcm512x_priv *pcm512x = dev_get_drvdata(dev);
int ret;
if (!IS_ERR(pcm512x->sclk)) {
ret = clk_prepare_enable(pcm512x->sclk);
if (ret != 0) {
dev_err(dev, "Failed to enable SCLK: %d\n", ret);
return ret;
}
}
ret = regulator_bulk_enable(ARRAY_SIZE(pcm512x->supplies),
pcm512x->supplies);
if (ret != 0) {
dev_err(dev, "Failed to enable supplies: %d\n", ret);
return ret;
}
regcache_cache_only(pcm512x->regmap, false);
ret = regcache_sync(pcm512x->regmap);
if (ret != 0) {
dev_err(dev, "Failed to sync cache: %d\n", ret);
return ret;
}
ret = regmap_update_bits(pcm512x->regmap, PCM512x_POWER,
PCM512x_RQPD, 0);
if (ret != 0) {
dev_err(dev, "Failed to remove power down: %d\n", ret);
return ret;
}
return 0;
}
#endif
const struct dev_pm_ops pcm512x_pm_ops = {
SET_RUNTIME_PM_OPS(pcm512x_suspend, pcm512x_resume, NULL)
};
EXPORT_SYMBOL_GPL(pcm512x_pm_ops);
MODULE_DESCRIPTION("ASoC PCM512x codec driver");
MODULE_AUTHOR("Mark Brown <broonie@linaro.org>");
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
XCage15/linux-1 | net/sunrpc/xprtrdma/frwr_ops.c | 212 | 9550 | /*
* Copyright (c) 2015 Oracle. All rights reserved.
* Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
*/
/* Lightweight memory registration using Fast Registration Work
* Requests (FRWR). Also referred to sometimes as FRMR mode.
*
* FRWR features ordered asynchronous registration and deregistration
* of arbitrarily sized memory regions. This is the fastest and safest
* but most complex memory registration mode.
*/
#include "xprt_rdma.h"
#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
# define RPCDBG_FACILITY RPCDBG_TRANS
#endif
static int
__frwr_init(struct rpcrdma_mw *r, struct ib_pd *pd, struct ib_device *device,
unsigned int depth)
{
struct rpcrdma_frmr *f = &r->r.frmr;
int rc;
f->fr_mr = ib_alloc_fast_reg_mr(pd, depth);
if (IS_ERR(f->fr_mr))
goto out_mr_err;
f->fr_pgl = ib_alloc_fast_reg_page_list(device, depth);
if (IS_ERR(f->fr_pgl))
goto out_list_err;
return 0;
out_mr_err:
rc = PTR_ERR(f->fr_mr);
dprintk("RPC: %s: ib_alloc_fast_reg_mr status %i\n",
__func__, rc);
return rc;
out_list_err:
rc = PTR_ERR(f->fr_pgl);
dprintk("RPC: %s: ib_alloc_fast_reg_page_list status %i\n",
__func__, rc);
ib_dereg_mr(f->fr_mr);
return rc;
}
static void
__frwr_release(struct rpcrdma_mw *r)
{
int rc;
rc = ib_dereg_mr(r->r.frmr.fr_mr);
if (rc)
dprintk("RPC: %s: ib_dereg_mr status %i\n",
__func__, rc);
ib_free_fast_reg_page_list(r->r.frmr.fr_pgl);
}
static int
frwr_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep,
struct rpcrdma_create_data_internal *cdata)
{
struct ib_device_attr *devattr = &ia->ri_devattr;
int depth, delta;
ia->ri_max_frmr_depth =
min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS,
devattr->max_fast_reg_page_list_len);
dprintk("RPC: %s: device's max FR page list len = %u\n",
__func__, ia->ri_max_frmr_depth);
/* Add room for frmr register and invalidate WRs.
* 1. FRMR reg WR for head
* 2. FRMR invalidate WR for head
* 3. N FRMR reg WRs for pagelist
* 4. N FRMR invalidate WRs for pagelist
* 5. FRMR reg WR for tail
* 6. FRMR invalidate WR for tail
* 7. The RDMA_SEND WR
*/
depth = 7;
/* Calculate N if the device max FRMR depth is smaller than
* RPCRDMA_MAX_DATA_SEGS.
*/
if (ia->ri_max_frmr_depth < RPCRDMA_MAX_DATA_SEGS) {
delta = RPCRDMA_MAX_DATA_SEGS - ia->ri_max_frmr_depth;
do {
depth += 2; /* FRMR reg + invalidate */
delta -= ia->ri_max_frmr_depth;
} while (delta > 0);
}
ep->rep_attr.cap.max_send_wr *= depth;
if (ep->rep_attr.cap.max_send_wr > devattr->max_qp_wr) {
cdata->max_requests = devattr->max_qp_wr / depth;
if (!cdata->max_requests)
return -EINVAL;
ep->rep_attr.cap.max_send_wr = cdata->max_requests *
depth;
}
return 0;
}
/* FRWR mode conveys a list of pages per chunk segment. The
* maximum length of that list is the FRWR page list depth.
*/
static size_t
frwr_op_maxpages(struct rpcrdma_xprt *r_xprt)
{
struct rpcrdma_ia *ia = &r_xprt->rx_ia;
return min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS,
rpcrdma_max_segments(r_xprt) * ia->ri_max_frmr_depth);
}
/* If FAST_REG or LOCAL_INV failed, indicate the frmr needs to be reset. */
static void
frwr_sendcompletion(struct ib_wc *wc)
{
struct rpcrdma_mw *r;
if (likely(wc->status == IB_WC_SUCCESS))
return;
/* WARNING: Only wr_id and status are reliable at this point */
r = (struct rpcrdma_mw *)(unsigned long)wc->wr_id;
dprintk("RPC: %s: frmr %p (stale), status %d\n",
__func__, r, wc->status);
r->r.frmr.fr_state = FRMR_IS_STALE;
}
static int
frwr_op_init(struct rpcrdma_xprt *r_xprt)
{
struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
struct ib_device *device = r_xprt->rx_ia.ri_id->device;
unsigned int depth = r_xprt->rx_ia.ri_max_frmr_depth;
struct ib_pd *pd = r_xprt->rx_ia.ri_pd;
int i;
INIT_LIST_HEAD(&buf->rb_mws);
INIT_LIST_HEAD(&buf->rb_all);
i = (buf->rb_max_requests + 1) * RPCRDMA_MAX_SEGS;
dprintk("RPC: %s: initializing %d FRMRs\n", __func__, i);
while (i--) {
struct rpcrdma_mw *r;
int rc;
r = kzalloc(sizeof(*r), GFP_KERNEL);
if (!r)
return -ENOMEM;
rc = __frwr_init(r, pd, device, depth);
if (rc) {
kfree(r);
return rc;
}
list_add(&r->mw_list, &buf->rb_mws);
list_add(&r->mw_all, &buf->rb_all);
r->mw_sendcompletion = frwr_sendcompletion;
}
return 0;
}
/* Post a FAST_REG Work Request to register a memory region
* for remote access via RDMA READ or RDMA WRITE.
*/
static int
frwr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
int nsegs, bool writing)
{
struct rpcrdma_ia *ia = &r_xprt->rx_ia;
struct ib_device *device = ia->ri_id->device;
enum dma_data_direction direction = rpcrdma_data_dir(writing);
struct rpcrdma_mr_seg *seg1 = seg;
struct rpcrdma_mw *mw = seg1->rl_mw;
struct rpcrdma_frmr *frmr = &mw->r.frmr;
struct ib_mr *mr = frmr->fr_mr;
struct ib_send_wr fastreg_wr, *bad_wr;
u8 key;
int len, pageoff;
int i, rc;
int seg_len;
u64 pa;
int page_no;
pageoff = offset_in_page(seg1->mr_offset);
seg1->mr_offset -= pageoff; /* start of page */
seg1->mr_len += pageoff;
len = -pageoff;
if (nsegs > ia->ri_max_frmr_depth)
nsegs = ia->ri_max_frmr_depth;
for (page_no = i = 0; i < nsegs;) {
rpcrdma_map_one(device, seg, direction);
pa = seg->mr_dma;
for (seg_len = seg->mr_len; seg_len > 0; seg_len -= PAGE_SIZE) {
frmr->fr_pgl->page_list[page_no++] = pa;
pa += PAGE_SIZE;
}
len += seg->mr_len;
++seg;
++i;
/* Check for holes */
if ((i < nsegs && offset_in_page(seg->mr_offset)) ||
offset_in_page((seg-1)->mr_offset + (seg-1)->mr_len))
break;
}
dprintk("RPC: %s: Using frmr %p to map %d segments (%d bytes)\n",
__func__, mw, i, len);
frmr->fr_state = FRMR_IS_VALID;
memset(&fastreg_wr, 0, sizeof(fastreg_wr));
fastreg_wr.wr_id = (unsigned long)(void *)mw;
fastreg_wr.opcode = IB_WR_FAST_REG_MR;
fastreg_wr.wr.fast_reg.iova_start = seg1->mr_dma + pageoff;
fastreg_wr.wr.fast_reg.page_list = frmr->fr_pgl;
fastreg_wr.wr.fast_reg.page_shift = PAGE_SHIFT;
fastreg_wr.wr.fast_reg.page_list_len = page_no;
fastreg_wr.wr.fast_reg.length = len;
fastreg_wr.wr.fast_reg.access_flags = writing ?
IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE :
IB_ACCESS_REMOTE_READ;
key = (u8)(mr->rkey & 0x000000FF);
ib_update_fast_reg_key(mr, ++key);
fastreg_wr.wr.fast_reg.rkey = mr->rkey;
DECR_CQCOUNT(&r_xprt->rx_ep);
rc = ib_post_send(ia->ri_id->qp, &fastreg_wr, &bad_wr);
if (rc)
goto out_senderr;
seg1->mr_rkey = mr->rkey;
seg1->mr_base = seg1->mr_dma + pageoff;
seg1->mr_nsegs = i;
seg1->mr_len = len;
return i;
out_senderr:
dprintk("RPC: %s: ib_post_send status %i\n", __func__, rc);
ib_update_fast_reg_key(mr, --key);
frmr->fr_state = FRMR_IS_INVALID;
while (i--)
rpcrdma_unmap_one(device, --seg);
return rc;
}
/* Post a LOCAL_INV Work Request to prevent further remote access
* via RDMA READ or RDMA WRITE.
*/
static int
frwr_op_unmap(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg)
{
struct rpcrdma_mr_seg *seg1 = seg;
struct rpcrdma_ia *ia = &r_xprt->rx_ia;
struct ib_send_wr invalidate_wr, *bad_wr;
int rc, nsegs = seg->mr_nsegs;
struct ib_device *device;
seg1->rl_mw->r.frmr.fr_state = FRMR_IS_INVALID;
memset(&invalidate_wr, 0, sizeof(invalidate_wr));
invalidate_wr.wr_id = (unsigned long)(void *)seg1->rl_mw;
invalidate_wr.opcode = IB_WR_LOCAL_INV;
invalidate_wr.ex.invalidate_rkey = seg1->rl_mw->r.frmr.fr_mr->rkey;
DECR_CQCOUNT(&r_xprt->rx_ep);
read_lock(&ia->ri_qplock);
device = ia->ri_id->device;
while (seg1->mr_nsegs--)
rpcrdma_unmap_one(device, seg++);
rc = ib_post_send(ia->ri_id->qp, &invalidate_wr, &bad_wr);
read_unlock(&ia->ri_qplock);
if (rc)
goto out_err;
return nsegs;
out_err:
/* Force rpcrdma_buffer_get() to retry */
seg1->rl_mw->r.frmr.fr_state = FRMR_IS_STALE;
dprintk("RPC: %s: ib_post_send status %i\n", __func__, rc);
return nsegs;
}
/* After a disconnect, a flushed FAST_REG_MR can leave an FRMR in
* an unusable state. Find FRMRs in this state and dereg / reg
* each. FRMRs that are VALID and attached to an rpcrdma_req are
* also torn down.
*
* This gives all in-use FRMRs a fresh rkey and leaves them INVALID.
*
* This is invoked only in the transport connect worker in order
* to serialize with rpcrdma_register_frmr_external().
*/
static void
frwr_op_reset(struct rpcrdma_xprt *r_xprt)
{
struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
struct ib_device *device = r_xprt->rx_ia.ri_id->device;
unsigned int depth = r_xprt->rx_ia.ri_max_frmr_depth;
struct ib_pd *pd = r_xprt->rx_ia.ri_pd;
struct rpcrdma_mw *r;
int rc;
list_for_each_entry(r, &buf->rb_all, mw_all) {
if (r->r.frmr.fr_state == FRMR_IS_INVALID)
continue;
__frwr_release(r);
rc = __frwr_init(r, pd, device, depth);
if (rc) {
dprintk("RPC: %s: mw %p left %s\n",
__func__, r,
(r->r.frmr.fr_state == FRMR_IS_STALE ?
"stale" : "valid"));
continue;
}
r->r.frmr.fr_state = FRMR_IS_INVALID;
}
}
static void
frwr_op_destroy(struct rpcrdma_buffer *buf)
{
struct rpcrdma_mw *r;
while (!list_empty(&buf->rb_all)) {
r = list_entry(buf->rb_all.next, struct rpcrdma_mw, mw_all);
list_del(&r->mw_all);
__frwr_release(r);
kfree(r);
}
}
const struct rpcrdma_memreg_ops rpcrdma_frwr_memreg_ops = {
.ro_map = frwr_op_map,
.ro_unmap = frwr_op_unmap,
.ro_open = frwr_op_open,
.ro_maxpages = frwr_op_maxpages,
.ro_init = frwr_op_init,
.ro_reset = frwr_op_reset,
.ro_destroy = frwr_op_destroy,
.ro_displayname = "frwr",
};
| gpl-2.0 |
blakearnold/MPR | drivers/scsi/mvme16x_scsi.c | 212 | 3678 | /*
* Detection routine for the NCR53c710 based MVME16x SCSI Controllers for Linux.
*
* Based on work by Alan Hourihane
*
* Rewritten to use 53c700.c by Kars de Jong <jongk@linux-m68k.org>
*/
#include <linux/module.h>
#include <linux/blkdev.h>
#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <asm/mvme16xhw.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_transport.h>
#include <scsi/scsi_transport_spi.h>
#include "53c700.h"
MODULE_AUTHOR("Kars de Jong <jongk@linux-m68k.org>");
MODULE_DESCRIPTION("MVME16x NCR53C710 driver");
MODULE_LICENSE("GPL");
static struct scsi_host_template mvme16x_scsi_driver_template = {
.name = "MVME16x NCR53c710 SCSI",
.proc_name = "MVME16x",
.this_id = 7,
.module = THIS_MODULE,
};
static struct platform_device *mvme16x_scsi_device;
static __devinit int
mvme16x_probe(struct device *dev)
{
struct Scsi_Host * host = NULL;
struct NCR_700_Host_Parameters *hostdata;
if (!MACH_IS_MVME16x)
goto out;
if (mvme16x_config & MVME16x_CONFIG_NO_SCSICHIP) {
printk(KERN_INFO "mvme16x-scsi: detection disabled, "
"SCSI chip not present\n");
goto out;
}
hostdata = kzalloc(sizeof(struct NCR_700_Host_Parameters), GFP_KERNEL);
if (hostdata == NULL) {
printk(KERN_ERR "mvme16x-scsi: "
"Failed to allocate host data\n");
goto out;
}
/* Fill in the required pieces of hostdata */
hostdata->base = (void __iomem *)0xfff47000UL;
hostdata->clock = 50; /* XXX - depends on the CPU clock! */
hostdata->chip710 = 1;
hostdata->dmode_extra = DMODE_FC2;
hostdata->dcntl_extra = EA_710;
hostdata->ctest7_extra = CTEST7_TT1;
/* and register the chip */
host = NCR_700_detect(&mvme16x_scsi_driver_template, hostdata, dev);
if (!host) {
printk(KERN_ERR "mvme16x-scsi: No host detected; "
"board configuration problem?\n");
goto out_free;
}
host->this_id = 7;
host->base = 0xfff47000UL;
host->irq = MVME16x_IRQ_SCSI;
if (request_irq(host->irq, NCR_700_intr, 0, "mvme16x-scsi", host)) {
printk(KERN_ERR "mvme16x-scsi: request_irq failed\n");
goto out_put_host;
}
/* Enable scsi chip ints */
{
volatile unsigned long v;
/* Enable scsi interrupts at level 4 in PCCchip2 */
v = in_be32(0xfff4202c);
v = (v & ~0xff) | 0x10 | 4;
out_be32(0xfff4202c, v);
}
dev_set_drvdata(dev, host);
scsi_scan_host(host);
return 0;
out_put_host:
scsi_host_put(host);
out_free:
kfree(hostdata);
out:
return -ENODEV;
}
static __devexit int
mvme16x_device_remove(struct device *dev)
{
struct Scsi_Host *host = dev_get_drvdata(dev);
struct NCR_700_Host_Parameters *hostdata = shost_priv(host);
/* Disable scsi chip ints */
{
volatile unsigned long v;
v = in_be32(0xfff4202c);
v &= ~0x10;
out_be32(0xfff4202c, v);
}
scsi_remove_host(host);
NCR_700_release(host);
kfree(hostdata);
free_irq(host->irq, host);
return 0;
}
static struct device_driver mvme16x_scsi_driver = {
.name = "mvme16x-scsi",
.bus = &platform_bus_type,
.probe = mvme16x_probe,
.remove = __devexit_p(mvme16x_device_remove),
};
static int __init mvme16x_scsi_init(void)
{
int err;
err = driver_register(&mvme16x_scsi_driver);
if (err)
return err;
mvme16x_scsi_device = platform_device_register_simple("mvme16x-scsi",
-1, NULL, 0);
if (IS_ERR(mvme16x_scsi_device)) {
driver_unregister(&mvme16x_scsi_driver);
return PTR_ERR(mvme16x_scsi_device);
}
return 0;
}
static void __exit mvme16x_scsi_exit(void)
{
platform_device_unregister(mvme16x_scsi_device);
driver_unregister(&mvme16x_scsi_driver);
}
module_init(mvme16x_scsi_init);
module_exit(mvme16x_scsi_exit);
| gpl-2.0 |
raedwulf/linux | drivers/pwm/pwm-bcm-kona.c | 212 | 9037 | /*
* Copyright (C) 2014 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation version 2.
*
* This program is distributed "as is" WITHOUT ANY WARRANTY of any
* kind, whether express or implied; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/ioport.h>
#include <linux/math64.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pwm.h>
#include <linux/slab.h>
#include <linux/types.h>
/*
* The Kona PWM has some unusual characteristics. Here are the main points.
*
* 1) There is no disable bit and the hardware docs advise programming a zero
* duty to achieve output equivalent to that of a normal disable operation.
*
* 2) Changes to prescale, duty, period, and polarity do not take effect until
* a subsequent rising edge of the trigger bit.
*
* 3) If the smooth bit and trigger bit are both low, the output is a constant
* high signal. Otherwise, the earlier waveform continues to be output.
*
* 4) If the smooth bit is set on the rising edge of the trigger bit, output
* will transition to the new settings on a period boundary (which could be
* seconds away). If the smooth bit is clear, new settings will be applied
* as soon as possible (the hardware always has a 400ns delay).
*
* 5) When the external clock that feeds the PWM is disabled, output is pegged
* high or low depending on its state at that exact instant.
*/
#define PWM_CONTROL_OFFSET (0x00000000)
#define PWM_CONTROL_SMOOTH_SHIFT(chan) (24 + (chan))
#define PWM_CONTROL_TYPE_SHIFT(chan) (16 + (chan))
#define PWM_CONTROL_POLARITY_SHIFT(chan) (8 + (chan))
#define PWM_CONTROL_TRIGGER_SHIFT(chan) (chan)
#define PRESCALE_OFFSET (0x00000004)
#define PRESCALE_SHIFT(chan) ((chan) << 2)
#define PRESCALE_MASK(chan) (0x7 << PRESCALE_SHIFT(chan))
#define PRESCALE_MIN (0x00000000)
#define PRESCALE_MAX (0x00000007)
#define PERIOD_COUNT_OFFSET(chan) (0x00000008 + ((chan) << 3))
#define PERIOD_COUNT_MIN (0x00000002)
#define PERIOD_COUNT_MAX (0x00ffffff)
#define DUTY_CYCLE_HIGH_OFFSET(chan) (0x0000000c + ((chan) << 3))
#define DUTY_CYCLE_HIGH_MIN (0x00000000)
#define DUTY_CYCLE_HIGH_MAX (0x00ffffff)
struct kona_pwmc {
struct pwm_chip chip;
void __iomem *base;
struct clk *clk;
};
static inline struct kona_pwmc *to_kona_pwmc(struct pwm_chip *_chip)
{
return container_of(_chip, struct kona_pwmc, chip);
}
static void kona_pwmc_apply_settings(struct kona_pwmc *kp, unsigned int chan)
{
unsigned int value = readl(kp->base + PWM_CONTROL_OFFSET);
/* Clear trigger bit but set smooth bit to maintain old output */
value |= 1 << PWM_CONTROL_SMOOTH_SHIFT(chan);
value &= ~(1 << PWM_CONTROL_TRIGGER_SHIFT(chan));
writel(value, kp->base + PWM_CONTROL_OFFSET);
/* Set trigger bit and clear smooth bit to apply new settings */
value &= ~(1 << PWM_CONTROL_SMOOTH_SHIFT(chan));
value |= 1 << PWM_CONTROL_TRIGGER_SHIFT(chan);
writel(value, kp->base + PWM_CONTROL_OFFSET);
}
static int kona_pwmc_config(struct pwm_chip *chip, struct pwm_device *pwm,
int duty_ns, int period_ns)
{
struct kona_pwmc *kp = to_kona_pwmc(chip);
u64 val, div, rate;
unsigned long prescale = PRESCALE_MIN, pc, dc;
unsigned int value, chan = pwm->hwpwm;
/*
* Find period count, duty count and prescale to suit duty_ns and
* period_ns. This is done according to formulas described below:
*
* period_ns = 10^9 * (PRESCALE + 1) * PC / PWM_CLK_RATE
* duty_ns = 10^9 * (PRESCALE + 1) * DC / PWM_CLK_RATE
*
* PC = (PWM_CLK_RATE * period_ns) / (10^9 * (PRESCALE + 1))
* DC = (PWM_CLK_RATE * duty_ns) / (10^9 * (PRESCALE + 1))
*/
rate = clk_get_rate(kp->clk);
while (1) {
div = 1000000000;
div *= 1 + prescale;
val = rate * period_ns;
pc = div64_u64(val, div);
val = rate * duty_ns;
dc = div64_u64(val, div);
/* If duty_ns or period_ns are not achievable then return */
if (pc < PERIOD_COUNT_MIN || dc < DUTY_CYCLE_HIGH_MIN)
return -EINVAL;
/* If pc and dc are in bounds, the calculation is done */
if (pc <= PERIOD_COUNT_MAX && dc <= DUTY_CYCLE_HIGH_MAX)
break;
/* Otherwise, increase prescale and recalculate pc and dc */
if (++prescale > PRESCALE_MAX)
return -EINVAL;
}
/* If the PWM channel is enabled, write the settings to the HW */
if (test_bit(PWMF_ENABLED, &pwm->flags)) {
value = readl(kp->base + PRESCALE_OFFSET);
value &= ~PRESCALE_MASK(chan);
value |= prescale << PRESCALE_SHIFT(chan);
writel(value, kp->base + PRESCALE_OFFSET);
writel(pc, kp->base + PERIOD_COUNT_OFFSET(chan));
writel(dc, kp->base + DUTY_CYCLE_HIGH_OFFSET(chan));
kona_pwmc_apply_settings(kp, chan);
}
return 0;
}
static int kona_pwmc_set_polarity(struct pwm_chip *chip, struct pwm_device *pwm,
enum pwm_polarity polarity)
{
struct kona_pwmc *kp = to_kona_pwmc(chip);
unsigned int chan = pwm->hwpwm;
unsigned int value;
int ret;
ret = clk_prepare_enable(kp->clk);
if (ret < 0) {
dev_err(chip->dev, "failed to enable clock: %d\n", ret);
return ret;
}
value = readl(kp->base + PWM_CONTROL_OFFSET);
if (polarity == PWM_POLARITY_NORMAL)
value |= 1 << PWM_CONTROL_POLARITY_SHIFT(chan);
else
value &= ~(1 << PWM_CONTROL_POLARITY_SHIFT(chan));
writel(value, kp->base + PWM_CONTROL_OFFSET);
kona_pwmc_apply_settings(kp, chan);
/* Wait for waveform to settle before gating off the clock */
ndelay(400);
clk_disable_unprepare(kp->clk);
return 0;
}
static int kona_pwmc_enable(struct pwm_chip *chip, struct pwm_device *pwm)
{
struct kona_pwmc *kp = to_kona_pwmc(chip);
int ret;
ret = clk_prepare_enable(kp->clk);
if (ret < 0) {
dev_err(chip->dev, "failed to enable clock: %d\n", ret);
return ret;
}
ret = kona_pwmc_config(chip, pwm, pwm->duty_cycle, pwm->period);
if (ret < 0) {
clk_disable_unprepare(kp->clk);
return ret;
}
return 0;
}
static void kona_pwmc_disable(struct pwm_chip *chip, struct pwm_device *pwm)
{
struct kona_pwmc *kp = to_kona_pwmc(chip);
unsigned int chan = pwm->hwpwm;
/* Simulate a disable by configuring for zero duty */
writel(0, kp->base + DUTY_CYCLE_HIGH_OFFSET(chan));
kona_pwmc_apply_settings(kp, chan);
/* Wait for waveform to settle before gating off the clock */
ndelay(400);
clk_disable_unprepare(kp->clk);
}
static const struct pwm_ops kona_pwm_ops = {
.config = kona_pwmc_config,
.set_polarity = kona_pwmc_set_polarity,
.enable = kona_pwmc_enable,
.disable = kona_pwmc_disable,
.owner = THIS_MODULE,
};
static int kona_pwmc_probe(struct platform_device *pdev)
{
struct kona_pwmc *kp;
struct resource *res;
unsigned int chan;
unsigned int value = 0;
int ret = 0;
kp = devm_kzalloc(&pdev->dev, sizeof(*kp), GFP_KERNEL);
if (kp == NULL)
return -ENOMEM;
platform_set_drvdata(pdev, kp);
kp->chip.dev = &pdev->dev;
kp->chip.ops = &kona_pwm_ops;
kp->chip.base = -1;
kp->chip.npwm = 6;
kp->chip.of_xlate = of_pwm_xlate_with_flags;
kp->chip.of_pwm_n_cells = 3;
kp->chip.can_sleep = true;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
kp->base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(kp->base))
return PTR_ERR(kp->base);
kp->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(kp->clk)) {
dev_err(&pdev->dev, "failed to get clock: %ld\n",
PTR_ERR(kp->clk));
return PTR_ERR(kp->clk);
}
ret = clk_prepare_enable(kp->clk);
if (ret < 0) {
dev_err(&pdev->dev, "failed to enable clock: %d\n", ret);
return ret;
}
/* Set push/pull for all channels */
for (chan = 0; chan < kp->chip.npwm; chan++)
value |= (1 << PWM_CONTROL_TYPE_SHIFT(chan));
writel(value, kp->base + PWM_CONTROL_OFFSET);
clk_disable_unprepare(kp->clk);
ret = pwmchip_add_with_polarity(&kp->chip, PWM_POLARITY_INVERSED);
if (ret < 0)
dev_err(&pdev->dev, "failed to add PWM chip: %d\n", ret);
return ret;
}
static int kona_pwmc_remove(struct platform_device *pdev)
{
struct kona_pwmc *kp = platform_get_drvdata(pdev);
unsigned int chan;
for (chan = 0; chan < kp->chip.npwm; chan++)
if (test_bit(PWMF_ENABLED, &kp->chip.pwms[chan].flags))
clk_disable_unprepare(kp->clk);
return pwmchip_remove(&kp->chip);
}
static const struct of_device_id bcm_kona_pwmc_dt[] = {
{ .compatible = "brcm,kona-pwm" },
{ },
};
MODULE_DEVICE_TABLE(of, bcm_kona_pwmc_dt);
static struct platform_driver kona_pwmc_driver = {
.driver = {
.name = "bcm-kona-pwm",
.of_match_table = bcm_kona_pwmc_dt,
},
.probe = kona_pwmc_probe,
.remove = kona_pwmc_remove,
};
module_platform_driver(kona_pwmc_driver);
MODULE_AUTHOR("Broadcom Corporation <bcm-kernel-feedback-list@broadcom.com>");
MODULE_AUTHOR("Tim Kryger <tkryger@broadcom.com>");
MODULE_DESCRIPTION("Broadcom Kona PWM driver");
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
AntonioPT/u8150-kernel-pulse-port | drivers/net/wireless/iwmc3200wifi/fw.c | 468 | 11301 | /*
* Intel Wireless Multicomm 3200 WiFi driver
*
* Copyright (C) 2009 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*
* Intel Corporation <ilw@linux.intel.com>
* Samuel Ortiz <samuel.ortiz@intel.com>
* Zhu Yi <yi.zhu@intel.com>
*
*/
#include <linux/kernel.h>
#include <linux/firmware.h>
#include "iwm.h"
#include "bus.h"
#include "hal.h"
#include "umac.h"
#include "debug.h"
#include "fw.h"
#include "commands.h"
static const char fw_barker[] = "*WESTOPFORNOONE*";
/*
* @op_code: Op code we're looking for.
* @index: There can be several instances of the same opcode within
* the firmware. Index specifies which one we're looking for.
*/
static int iwm_fw_op_offset(struct iwm_priv *iwm, const struct firmware *fw,
u16 op_code, u32 index)
{
int offset = -EINVAL, fw_offset;
u32 op_index = 0;
const u8 *fw_ptr;
struct iwm_fw_hdr_rec *rec;
fw_offset = 0;
fw_ptr = fw->data;
/* We first need to look for the firmware barker */
if (memcmp(fw_ptr, fw_barker, IWM_HDR_BARKER_LEN)) {
IWM_ERR(iwm, "No barker string in this FW\n");
return -EINVAL;
}
if (fw->size < IWM_HDR_LEN) {
IWM_ERR(iwm, "FW is too small (%zu)\n", fw->size);
return -EINVAL;
}
fw_offset += IWM_HDR_BARKER_LEN;
while (fw_offset < fw->size) {
rec = (struct iwm_fw_hdr_rec *)(fw_ptr + fw_offset);
IWM_DBG_FW(iwm, DBG, "FW: op_code: 0x%x, len: %d @ 0x%x\n",
rec->op_code, rec->len, fw_offset);
if (rec->op_code == IWM_HDR_REC_OP_INVALID) {
IWM_DBG_FW(iwm, DBG, "Reached INVALID op code\n");
break;
}
if (rec->op_code == op_code) {
if (op_index == index) {
fw_offset += sizeof(struct iwm_fw_hdr_rec);
offset = fw_offset;
goto out;
}
op_index++;
}
fw_offset += sizeof(struct iwm_fw_hdr_rec) + rec->len;
}
out:
return offset;
}
static int iwm_load_firmware_chunk(struct iwm_priv *iwm,
const struct firmware *fw,
struct iwm_fw_img_desc *img_desc)
{
struct iwm_udma_nonwifi_cmd target_cmd;
u32 chunk_size;
const u8 *chunk_ptr;
int ret = 0;
IWM_DBG_FW(iwm, INFO, "Loading FW chunk: %d bytes @ 0x%x\n",
img_desc->length, img_desc->address);
target_cmd.opcode = UMAC_HDI_OUT_OPCODE_WRITE;
target_cmd.handle_by_hw = 1;
target_cmd.op2 = 0;
target_cmd.resp = 0;
target_cmd.eop = 1;
chunk_size = img_desc->length;
chunk_ptr = fw->data + img_desc->offset;
while (chunk_size > 0) {
u32 tmp_chunk_size;
tmp_chunk_size = min_t(u32, chunk_size,
IWM_MAX_NONWIFI_CMD_BUFF_SIZE);
target_cmd.addr = cpu_to_le32(img_desc->address +
(chunk_ptr - fw->data - img_desc->offset));
target_cmd.op1_sz = cpu_to_le32(tmp_chunk_size);
IWM_DBG_FW(iwm, DBG, "\t%d bytes @ 0x%x\n",
tmp_chunk_size, target_cmd.addr);
ret = iwm_hal_send_target_cmd(iwm, &target_cmd, chunk_ptr);
if (ret < 0) {
IWM_ERR(iwm, "Couldn't load FW chunk\n");
break;
}
chunk_size -= tmp_chunk_size;
chunk_ptr += tmp_chunk_size;
}
return ret;
}
/*
* To load a fw image to the target, we basically go through the
* fw, looking for OP_MEM_DESC records. Once we found one, we
* pass it to iwm_load_firmware_chunk().
* The OP_MEM_DESC records contain the actuall memory chunk to be
* sent, but also the destination address.
*/
static int iwm_load_img(struct iwm_priv *iwm, const char *img_name)
{
const struct firmware *fw;
struct iwm_fw_img_desc *img_desc;
struct iwm_fw_img_ver *ver;
int ret = 0, fw_offset;
u32 opcode_idx = 0, build_date;
char *build_tag;
ret = request_firmware(&fw, img_name, iwm_to_dev(iwm));
if (ret) {
IWM_ERR(iwm, "Request firmware failed");
return ret;
}
IWM_DBG_FW(iwm, INFO, "Start to load FW %s\n", img_name);
while (1) {
fw_offset = iwm_fw_op_offset(iwm, fw,
IWM_HDR_REC_OP_MEM_DESC,
opcode_idx);
if (fw_offset < 0)
break;
img_desc = (struct iwm_fw_img_desc *)(fw->data + fw_offset);
ret = iwm_load_firmware_chunk(iwm, fw, img_desc);
if (ret < 0)
goto err_release_fw;
opcode_idx++;
};
/* Read firmware version */
fw_offset = iwm_fw_op_offset(iwm, fw, IWM_HDR_REC_OP_SW_VER, 0);
if (fw_offset < 0)
goto err_release_fw;
ver = (struct iwm_fw_img_ver *)(fw->data + fw_offset);
/* Read build tag */
fw_offset = iwm_fw_op_offset(iwm, fw, IWM_HDR_REC_OP_BUILD_TAG, 0);
if (fw_offset < 0)
goto err_release_fw;
build_tag = (char *)(fw->data + fw_offset);
/* Read build date */
fw_offset = iwm_fw_op_offset(iwm, fw, IWM_HDR_REC_OP_BUILD_DATE, 0);
if (fw_offset < 0)
goto err_release_fw;
build_date = *(u32 *)(fw->data + fw_offset);
IWM_INFO(iwm, "%s:\n", img_name);
IWM_INFO(iwm, "\tVersion: %02X.%02X\n", ver->major, ver->minor);
IWM_INFO(iwm, "\tBuild tag: %s\n", build_tag);
IWM_INFO(iwm, "\tBuild date: %x-%x-%x\n",
IWM_BUILD_YEAR(build_date), IWM_BUILD_MONTH(build_date),
IWM_BUILD_DAY(build_date));
err_release_fw:
release_firmware(fw);
return ret;
}
static int iwm_load_umac(struct iwm_priv *iwm)
{
struct iwm_udma_nonwifi_cmd target_cmd;
int ret;
ret = iwm_load_img(iwm, iwm->bus_ops->umac_name);
if (ret < 0)
return ret;
/* We've loaded the UMAC, we can tell the target to jump there */
target_cmd.opcode = UMAC_HDI_OUT_OPCODE_JUMP;
target_cmd.addr = cpu_to_le32(UMAC_MU_FW_INST_DATA_12_ADDR);
target_cmd.op1_sz = 0;
target_cmd.op2 = 0;
target_cmd.handle_by_hw = 0;
target_cmd.resp = 1 ;
target_cmd.eop = 1;
ret = iwm_hal_send_target_cmd(iwm, &target_cmd, NULL);
if (ret < 0)
IWM_ERR(iwm, "Couldn't send JMP command\n");
return ret;
}
static int iwm_load_lmac(struct iwm_priv *iwm, const char *img_name)
{
int ret;
ret = iwm_load_img(iwm, img_name);
if (ret < 0)
return ret;
return iwm_send_umac_reset(iwm,
cpu_to_le32(UMAC_RST_CTRL_FLG_LARC_CLK_EN), 0);
}
static int iwm_init_calib(struct iwm_priv *iwm, unsigned long cfg_bitmap,
unsigned long expected_bitmap, u8 rx_iq_cmd)
{
/* Read RX IQ calibration result from EEPROM */
if (test_bit(rx_iq_cmd, &cfg_bitmap)) {
iwm_store_rxiq_calib_result(iwm);
set_bit(PHY_CALIBRATE_RX_IQ_CMD, &iwm->calib_done_map);
}
iwm_send_prio_table(iwm);
iwm_send_init_calib_cfg(iwm, cfg_bitmap);
while (iwm->calib_done_map != expected_bitmap) {
if (iwm_notif_handle(iwm, CALIBRATION_RES_NOTIFICATION,
IWM_SRC_LMAC, WAIT_NOTIF_TIMEOUT)) {
IWM_DBG_FW(iwm, DBG, "Initial calibration timeout\n");
return -ETIMEDOUT;
}
IWM_DBG_FW(iwm, DBG, "Got calibration result. calib_done_map: "
"0x%lx, expected calibrations: 0x%lx\n",
iwm->calib_done_map, expected_bitmap);
}
return 0;
}
/*
* We currently have to load 3 FWs:
* 1) The UMAC (Upper MAC).
* 2) The calibration LMAC (Lower MAC).
* We then send the calibration init command, so that the device can
* run a first calibration round.
* 3) The operational LMAC, which replaces the calibration one when it's
* done with the first calibration round.
*
* Once those 3 FWs have been loaded, we send the periodic calibration
* command, and then the device is available for regular 802.11 operations.
*/
int iwm_load_fw(struct iwm_priv *iwm)
{
unsigned long init_calib_map, periodic_calib_map;
unsigned long expected_calib_map;
int ret;
/* We first start downloading the UMAC */
ret = iwm_load_umac(iwm);
if (ret < 0) {
IWM_ERR(iwm, "UMAC loading failed\n");
return ret;
}
/* Handle UMAC_ALIVE notification */
ret = iwm_notif_handle(iwm, UMAC_NOTIFY_OPCODE_ALIVE, IWM_SRC_UMAC,
WAIT_NOTIF_TIMEOUT);
if (ret) {
IWM_ERR(iwm, "Handle UMAC_ALIVE failed: %d\n", ret);
return ret;
}
/* UMAC is alive, we can download the calibration LMAC */
ret = iwm_load_lmac(iwm, iwm->bus_ops->calib_lmac_name);
if (ret) {
IWM_ERR(iwm, "Calibration LMAC loading failed\n");
return ret;
}
/* Handle UMAC_INIT_COMPLETE notification */
ret = iwm_notif_handle(iwm, UMAC_NOTIFY_OPCODE_INIT_COMPLETE,
IWM_SRC_UMAC, WAIT_NOTIF_TIMEOUT);
if (ret) {
IWM_ERR(iwm, "Handle INIT_COMPLETE failed for calibration "
"LMAC: %d\n", ret);
return ret;
}
/* Read EEPROM data */
ret = iwm_eeprom_init(iwm);
if (ret < 0) {
IWM_ERR(iwm, "Couldn't init eeprom array\n");
return ret;
}
init_calib_map = iwm->conf.calib_map & IWM_CALIB_MAP_INIT_MSK;
expected_calib_map = iwm->conf.expected_calib_map &
IWM_CALIB_MAP_INIT_MSK;
periodic_calib_map = IWM_CALIB_MAP_PER_LMAC(iwm->conf.calib_map);
ret = iwm_init_calib(iwm, init_calib_map, expected_calib_map,
CALIB_CFG_RX_IQ_IDX);
if (ret < 0) {
/* Let's try the old way */
ret = iwm_init_calib(iwm, expected_calib_map,
expected_calib_map,
PHY_CALIBRATE_RX_IQ_CMD);
if (ret < 0) {
IWM_ERR(iwm, "Calibration result timeout\n");
goto out;
}
}
/* Handle LMAC CALIBRATION_COMPLETE notification */
ret = iwm_notif_handle(iwm, CALIBRATION_COMPLETE_NOTIFICATION,
IWM_SRC_LMAC, WAIT_NOTIF_TIMEOUT);
if (ret) {
IWM_ERR(iwm, "Wait for CALIBRATION_COMPLETE timeout\n");
goto out;
}
IWM_INFO(iwm, "LMAC calibration done: 0x%lx\n", iwm->calib_done_map);
iwm_send_umac_reset(iwm, cpu_to_le32(UMAC_RST_CTRL_FLG_LARC_RESET), 1);
ret = iwm_notif_handle(iwm, UMAC_CMD_OPCODE_RESET, IWM_SRC_UMAC,
WAIT_NOTIF_TIMEOUT);
if (ret) {
IWM_ERR(iwm, "Wait for UMAC RESET timeout\n");
goto out;
}
/* Download the operational LMAC */
ret = iwm_load_lmac(iwm, iwm->bus_ops->lmac_name);
if (ret) {
IWM_ERR(iwm, "LMAC loading failed\n");
goto out;
}
ret = iwm_notif_handle(iwm, UMAC_NOTIFY_OPCODE_INIT_COMPLETE,
IWM_SRC_UMAC, WAIT_NOTIF_TIMEOUT);
if (ret) {
IWM_ERR(iwm, "Handle INIT_COMPLETE failed for LMAC: %d\n", ret);
goto out;
}
iwm_send_prio_table(iwm);
iwm_send_calib_results(iwm);
iwm_send_periodic_calib_cfg(iwm, periodic_calib_map);
return 0;
out:
iwm_eeprom_exit(iwm);
return ret;
}
| gpl-2.0 |
vadonka/lge-kernel-kang | arch/x86/kernel/probe_roms_32.c | 1748 | 4182 | #include <linux/sched.h>
#include <linux/mm.h>
#include <linux/uaccess.h>
#include <linux/mmzone.h>
#include <linux/ioport.h>
#include <linux/seq_file.h>
#include <linux/console.h>
#include <linux/init.h>
#include <linux/edd.h>
#include <linux/dmi.h>
#include <linux/pfn.h>
#include <linux/pci.h>
#include <asm/pci-direct.h>
#include <asm/e820.h>
#include <asm/mmzone.h>
#include <asm/setup.h>
#include <asm/sections.h>
#include <asm/io.h>
#include <asm/setup_arch.h>
static struct resource system_rom_resource = {
.name = "System ROM",
.start = 0xf0000,
.end = 0xfffff,
.flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
};
static struct resource extension_rom_resource = {
.name = "Extension ROM",
.start = 0xe0000,
.end = 0xeffff,
.flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
};
static struct resource adapter_rom_resources[] = { {
.name = "Adapter ROM",
.start = 0xc8000,
.end = 0,
.flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
}, {
.name = "Adapter ROM",
.start = 0,
.end = 0,
.flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
}, {
.name = "Adapter ROM",
.start = 0,
.end = 0,
.flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
}, {
.name = "Adapter ROM",
.start = 0,
.end = 0,
.flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
}, {
.name = "Adapter ROM",
.start = 0,
.end = 0,
.flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
}, {
.name = "Adapter ROM",
.start = 0,
.end = 0,
.flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
} };
static struct resource video_rom_resource = {
.name = "Video ROM",
.start = 0xc0000,
.end = 0xc7fff,
.flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
};
#define ROMSIGNATURE 0xaa55
static int __init romsignature(const unsigned char *rom)
{
const unsigned short * const ptr = (const unsigned short *)rom;
unsigned short sig;
return probe_kernel_address(ptr, sig) == 0 && sig == ROMSIGNATURE;
}
static int __init romchecksum(const unsigned char *rom, unsigned long length)
{
unsigned char sum, c;
for (sum = 0; length && probe_kernel_address(rom++, c) == 0; length--)
sum += c;
return !length && !sum;
}
void __init probe_roms(void)
{
const unsigned char *rom;
unsigned long start, length, upper;
unsigned char c;
int i;
/* video rom */
upper = adapter_rom_resources[0].start;
for (start = video_rom_resource.start; start < upper; start += 2048) {
rom = isa_bus_to_virt(start);
if (!romsignature(rom))
continue;
video_rom_resource.start = start;
if (probe_kernel_address(rom + 2, c) != 0)
continue;
/* 0 < length <= 0x7f * 512, historically */
length = c * 512;
/* if checksum okay, trust length byte */
if (length && romchecksum(rom, length))
video_rom_resource.end = start + length - 1;
request_resource(&iomem_resource, &video_rom_resource);
break;
}
start = (video_rom_resource.end + 1 + 2047) & ~2047UL;
if (start < upper)
start = upper;
/* system rom */
request_resource(&iomem_resource, &system_rom_resource);
upper = system_rom_resource.start;
/* check for extension rom (ignore length byte!) */
rom = isa_bus_to_virt(extension_rom_resource.start);
if (romsignature(rom)) {
length = extension_rom_resource.end - extension_rom_resource.start + 1;
if (romchecksum(rom, length)) {
request_resource(&iomem_resource, &extension_rom_resource);
upper = extension_rom_resource.start;
}
}
/* check for adapter roms on 2k boundaries */
for (i = 0; i < ARRAY_SIZE(adapter_rom_resources) && start < upper; start += 2048) {
rom = isa_bus_to_virt(start);
if (!romsignature(rom))
continue;
if (probe_kernel_address(rom + 2, c) != 0)
continue;
/* 0 < length <= 0x7f * 512, historically */
length = c * 512;
/* but accept any length that fits if checksum okay */
if (!length || start + length > upper || !romchecksum(rom, length))
continue;
adapter_rom_resources[i].start = start;
adapter_rom_resources[i].end = start + length - 1;
request_resource(&iomem_resource, &adapter_rom_resources[i]);
start = adapter_rom_resources[i++].end & ~2047UL;
}
}
| gpl-2.0 |
spica234/LiteGX | drivers/net/wireless/mwl8k.c | 2260 | 141598 | /*
* drivers/net/wireless/mwl8k.c
* Driver for Marvell TOPDOG 802.11 Wireless cards
*
* Copyright (C) 2008, 2009, 2010 Marvell Semiconductor Inc.
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/spinlock.h>
#include <linux/list.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/completion.h>
#include <linux/etherdevice.h>
#include <linux/slab.h>
#include <net/mac80211.h>
#include <linux/moduleparam.h>
#include <linux/firmware.h>
#include <linux/workqueue.h>
#define MWL8K_DESC "Marvell TOPDOG(R) 802.11 Wireless Network Driver"
#define MWL8K_NAME KBUILD_MODNAME
#define MWL8K_VERSION "0.12"
/* Module parameters */
static unsigned ap_mode_default;
module_param(ap_mode_default, bool, 0);
MODULE_PARM_DESC(ap_mode_default,
"Set to 1 to make ap mode the default instead of sta mode");
/* Register definitions */
#define MWL8K_HIU_GEN_PTR 0x00000c10
#define MWL8K_MODE_STA 0x0000005a
#define MWL8K_MODE_AP 0x000000a5
#define MWL8K_HIU_INT_CODE 0x00000c14
#define MWL8K_FWSTA_READY 0xf0f1f2f4
#define MWL8K_FWAP_READY 0xf1f2f4a5
#define MWL8K_INT_CODE_CMD_FINISHED 0x00000005
#define MWL8K_HIU_SCRATCH 0x00000c40
/* Host->device communications */
#define MWL8K_HIU_H2A_INTERRUPT_EVENTS 0x00000c18
#define MWL8K_HIU_H2A_INTERRUPT_STATUS 0x00000c1c
#define MWL8K_HIU_H2A_INTERRUPT_MASK 0x00000c20
#define MWL8K_HIU_H2A_INTERRUPT_CLEAR_SEL 0x00000c24
#define MWL8K_HIU_H2A_INTERRUPT_STATUS_MASK 0x00000c28
#define MWL8K_H2A_INT_DUMMY (1 << 20)
#define MWL8K_H2A_INT_RESET (1 << 15)
#define MWL8K_H2A_INT_DOORBELL (1 << 1)
#define MWL8K_H2A_INT_PPA_READY (1 << 0)
/* Device->host communications */
#define MWL8K_HIU_A2H_INTERRUPT_EVENTS 0x00000c2c
#define MWL8K_HIU_A2H_INTERRUPT_STATUS 0x00000c30
#define MWL8K_HIU_A2H_INTERRUPT_MASK 0x00000c34
#define MWL8K_HIU_A2H_INTERRUPT_CLEAR_SEL 0x00000c38
#define MWL8K_HIU_A2H_INTERRUPT_STATUS_MASK 0x00000c3c
#define MWL8K_A2H_INT_DUMMY (1 << 20)
#define MWL8K_A2H_INT_BA_WATCHDOG (1 << 14)
#define MWL8K_A2H_INT_CHNL_SWITCHED (1 << 11)
#define MWL8K_A2H_INT_QUEUE_EMPTY (1 << 10)
#define MWL8K_A2H_INT_RADAR_DETECT (1 << 7)
#define MWL8K_A2H_INT_RADIO_ON (1 << 6)
#define MWL8K_A2H_INT_RADIO_OFF (1 << 5)
#define MWL8K_A2H_INT_MAC_EVENT (1 << 3)
#define MWL8K_A2H_INT_OPC_DONE (1 << 2)
#define MWL8K_A2H_INT_RX_READY (1 << 1)
#define MWL8K_A2H_INT_TX_DONE (1 << 0)
/* HW micro second timer register
* located at offset 0xA600. This
* will be used to timestamp tx
* packets.
*/
#define MWL8K_HW_TIMER_REGISTER 0x0000a600
#define MWL8K_A2H_EVENTS (MWL8K_A2H_INT_DUMMY | \
MWL8K_A2H_INT_CHNL_SWITCHED | \
MWL8K_A2H_INT_QUEUE_EMPTY | \
MWL8K_A2H_INT_RADAR_DETECT | \
MWL8K_A2H_INT_RADIO_ON | \
MWL8K_A2H_INT_RADIO_OFF | \
MWL8K_A2H_INT_MAC_EVENT | \
MWL8K_A2H_INT_OPC_DONE | \
MWL8K_A2H_INT_RX_READY | \
MWL8K_A2H_INT_TX_DONE | \
MWL8K_A2H_INT_BA_WATCHDOG)
#define MWL8K_RX_QUEUES 1
#define MWL8K_TX_WMM_QUEUES 4
#define MWL8K_MAX_AMPDU_QUEUES 8
#define MWL8K_MAX_TX_QUEUES (MWL8K_TX_WMM_QUEUES + MWL8K_MAX_AMPDU_QUEUES)
#define mwl8k_tx_queues(priv) (MWL8K_TX_WMM_QUEUES + (priv)->num_ampdu_queues)
struct rxd_ops {
int rxd_size;
void (*rxd_init)(void *rxd, dma_addr_t next_dma_addr);
void (*rxd_refill)(void *rxd, dma_addr_t addr, int len);
int (*rxd_process)(void *rxd, struct ieee80211_rx_status *status,
__le16 *qos, s8 *noise);
};
struct mwl8k_device_info {
char *part_name;
char *helper_image;
char *fw_image_sta;
char *fw_image_ap;
struct rxd_ops *ap_rxd_ops;
u32 fw_api_ap;
};
struct mwl8k_rx_queue {
int rxd_count;
/* hw receives here */
int head;
/* refill descs here */
int tail;
void *rxd;
dma_addr_t rxd_dma;
struct {
struct sk_buff *skb;
DEFINE_DMA_UNMAP_ADDR(dma);
} *buf;
};
struct mwl8k_tx_queue {
/* hw transmits here */
int head;
/* sw appends here */
int tail;
unsigned int len;
struct mwl8k_tx_desc *txd;
dma_addr_t txd_dma;
struct sk_buff **skb;
};
enum {
AMPDU_NO_STREAM,
AMPDU_STREAM_NEW,
AMPDU_STREAM_IN_PROGRESS,
AMPDU_STREAM_ACTIVE,
};
struct mwl8k_ampdu_stream {
struct ieee80211_sta *sta;
u8 tid;
u8 state;
u8 idx;
u8 txq_idx; /* index of this stream in priv->txq */
};
struct mwl8k_priv {
struct ieee80211_hw *hw;
struct pci_dev *pdev;
int irq;
struct mwl8k_device_info *device_info;
void __iomem *sram;
void __iomem *regs;
/* firmware */
const struct firmware *fw_helper;
const struct firmware *fw_ucode;
/* hardware/firmware parameters */
bool ap_fw;
struct rxd_ops *rxd_ops;
struct ieee80211_supported_band band_24;
struct ieee80211_channel channels_24[14];
struct ieee80211_rate rates_24[14];
struct ieee80211_supported_band band_50;
struct ieee80211_channel channels_50[4];
struct ieee80211_rate rates_50[9];
u32 ap_macids_supported;
u32 sta_macids_supported;
/* Ampdu stream information */
u8 num_ampdu_queues;
spinlock_t stream_lock;
struct mwl8k_ampdu_stream ampdu[MWL8K_MAX_AMPDU_QUEUES];
struct work_struct watchdog_ba_handle;
/* firmware access */
struct mutex fw_mutex;
struct task_struct *fw_mutex_owner;
int fw_mutex_depth;
struct completion *hostcmd_wait;
/* lock held over TX and TX reap */
spinlock_t tx_lock;
/* TX quiesce completion, protected by fw_mutex and tx_lock */
struct completion *tx_wait;
/* List of interfaces. */
u32 macids_used;
struct list_head vif_list;
/* power management status cookie from firmware */
u32 *cookie;
dma_addr_t cookie_dma;
u16 num_mcaddrs;
u8 hw_rev;
u32 fw_rev;
/*
* Running count of TX packets in flight, to avoid
* iterating over the transmit rings each time.
*/
int pending_tx_pkts;
struct mwl8k_rx_queue rxq[MWL8K_RX_QUEUES];
struct mwl8k_tx_queue txq[MWL8K_MAX_TX_QUEUES];
u32 txq_offset[MWL8K_MAX_TX_QUEUES];
bool radio_on;
bool radio_short_preamble;
bool sniffer_enabled;
bool wmm_enabled;
/* XXX need to convert this to handle multiple interfaces */
bool capture_beacon;
u8 capture_bssid[ETH_ALEN];
struct sk_buff *beacon_skb;
/*
* This FJ worker has to be global as it is scheduled from the
* RX handler. At this point we don't know which interface it
* belongs to until the list of bssids waiting to complete join
* is checked.
*/
struct work_struct finalize_join_worker;
/* Tasklet to perform TX reclaim. */
struct tasklet_struct poll_tx_task;
/* Tasklet to perform RX. */
struct tasklet_struct poll_rx_task;
/* Most recently reported noise in dBm */
s8 noise;
/*
* preserve the queue configurations so they can be restored if/when
* the firmware image is swapped.
*/
struct ieee80211_tx_queue_params wmm_params[MWL8K_TX_WMM_QUEUES];
/* async firmware loading state */
unsigned fw_state;
char *fw_pref;
char *fw_alt;
struct completion firmware_loading_complete;
};
#define MAX_WEP_KEY_LEN 13
#define NUM_WEP_KEYS 4
/* Per interface specific private data */
struct mwl8k_vif {
struct list_head list;
struct ieee80211_vif *vif;
/* Firmware macid for this vif. */
int macid;
/* Non AMPDU sequence number assigned by driver. */
u16 seqno;
/* Saved WEP keys */
struct {
u8 enabled;
u8 key[sizeof(struct ieee80211_key_conf) + MAX_WEP_KEY_LEN];
} wep_key_conf[NUM_WEP_KEYS];
/* BSSID */
u8 bssid[ETH_ALEN];
/* A flag to indicate is HW crypto is enabled for this bssid */
bool is_hw_crypto_enabled;
};
#define MWL8K_VIF(_vif) ((struct mwl8k_vif *)&((_vif)->drv_priv))
#define IEEE80211_KEY_CONF(_u8) ((struct ieee80211_key_conf *)(_u8))
struct tx_traffic_info {
u32 start_time;
u32 pkts;
};
#define MWL8K_MAX_TID 8
struct mwl8k_sta {
/* Index into station database. Returned by UPDATE_STADB. */
u8 peer_id;
u8 is_ampdu_allowed;
struct tx_traffic_info tx_stats[MWL8K_MAX_TID];
};
#define MWL8K_STA(_sta) ((struct mwl8k_sta *)&((_sta)->drv_priv))
static const struct ieee80211_channel mwl8k_channels_24[] = {
{ .center_freq = 2412, .hw_value = 1, },
{ .center_freq = 2417, .hw_value = 2, },
{ .center_freq = 2422, .hw_value = 3, },
{ .center_freq = 2427, .hw_value = 4, },
{ .center_freq = 2432, .hw_value = 5, },
{ .center_freq = 2437, .hw_value = 6, },
{ .center_freq = 2442, .hw_value = 7, },
{ .center_freq = 2447, .hw_value = 8, },
{ .center_freq = 2452, .hw_value = 9, },
{ .center_freq = 2457, .hw_value = 10, },
{ .center_freq = 2462, .hw_value = 11, },
{ .center_freq = 2467, .hw_value = 12, },
{ .center_freq = 2472, .hw_value = 13, },
{ .center_freq = 2484, .hw_value = 14, },
};
static const struct ieee80211_rate mwl8k_rates_24[] = {
{ .bitrate = 10, .hw_value = 2, },
{ .bitrate = 20, .hw_value = 4, },
{ .bitrate = 55, .hw_value = 11, },
{ .bitrate = 110, .hw_value = 22, },
{ .bitrate = 220, .hw_value = 44, },
{ .bitrate = 60, .hw_value = 12, },
{ .bitrate = 90, .hw_value = 18, },
{ .bitrate = 120, .hw_value = 24, },
{ .bitrate = 180, .hw_value = 36, },
{ .bitrate = 240, .hw_value = 48, },
{ .bitrate = 360, .hw_value = 72, },
{ .bitrate = 480, .hw_value = 96, },
{ .bitrate = 540, .hw_value = 108, },
{ .bitrate = 720, .hw_value = 144, },
};
static const struct ieee80211_channel mwl8k_channels_50[] = {
{ .center_freq = 5180, .hw_value = 36, },
{ .center_freq = 5200, .hw_value = 40, },
{ .center_freq = 5220, .hw_value = 44, },
{ .center_freq = 5240, .hw_value = 48, },
};
static const struct ieee80211_rate mwl8k_rates_50[] = {
{ .bitrate = 60, .hw_value = 12, },
{ .bitrate = 90, .hw_value = 18, },
{ .bitrate = 120, .hw_value = 24, },
{ .bitrate = 180, .hw_value = 36, },
{ .bitrate = 240, .hw_value = 48, },
{ .bitrate = 360, .hw_value = 72, },
{ .bitrate = 480, .hw_value = 96, },
{ .bitrate = 540, .hw_value = 108, },
{ .bitrate = 720, .hw_value = 144, },
};
/* Set or get info from Firmware */
#define MWL8K_CMD_GET 0x0000
#define MWL8K_CMD_SET 0x0001
#define MWL8K_CMD_SET_LIST 0x0002
/* Firmware command codes */
#define MWL8K_CMD_CODE_DNLD 0x0001
#define MWL8K_CMD_GET_HW_SPEC 0x0003
#define MWL8K_CMD_SET_HW_SPEC 0x0004
#define MWL8K_CMD_MAC_MULTICAST_ADR 0x0010
#define MWL8K_CMD_GET_STAT 0x0014
#define MWL8K_CMD_RADIO_CONTROL 0x001c
#define MWL8K_CMD_RF_TX_POWER 0x001e
#define MWL8K_CMD_TX_POWER 0x001f
#define MWL8K_CMD_RF_ANTENNA 0x0020
#define MWL8K_CMD_SET_BEACON 0x0100 /* per-vif */
#define MWL8K_CMD_SET_PRE_SCAN 0x0107
#define MWL8K_CMD_SET_POST_SCAN 0x0108
#define MWL8K_CMD_SET_RF_CHANNEL 0x010a
#define MWL8K_CMD_SET_AID 0x010d
#define MWL8K_CMD_SET_RATE 0x0110
#define MWL8K_CMD_SET_FINALIZE_JOIN 0x0111
#define MWL8K_CMD_RTS_THRESHOLD 0x0113
#define MWL8K_CMD_SET_SLOT 0x0114
#define MWL8K_CMD_SET_EDCA_PARAMS 0x0115
#define MWL8K_CMD_SET_WMM_MODE 0x0123
#define MWL8K_CMD_MIMO_CONFIG 0x0125
#define MWL8K_CMD_USE_FIXED_RATE 0x0126
#define MWL8K_CMD_ENABLE_SNIFFER 0x0150
#define MWL8K_CMD_SET_MAC_ADDR 0x0202 /* per-vif */
#define MWL8K_CMD_SET_RATEADAPT_MODE 0x0203
#define MWL8K_CMD_GET_WATCHDOG_BITMAP 0x0205
#define MWL8K_CMD_BSS_START 0x1100 /* per-vif */
#define MWL8K_CMD_SET_NEW_STN 0x1111 /* per-vif */
#define MWL8K_CMD_UPDATE_ENCRYPTION 0x1122 /* per-vif */
#define MWL8K_CMD_UPDATE_STADB 0x1123
#define MWL8K_CMD_BASTREAM 0x1125
static const char *mwl8k_cmd_name(__le16 cmd, char *buf, int bufsize)
{
u16 command = le16_to_cpu(cmd);
#define MWL8K_CMDNAME(x) case MWL8K_CMD_##x: do {\
snprintf(buf, bufsize, "%s", #x);\
return buf;\
} while (0)
switch (command & ~0x8000) {
MWL8K_CMDNAME(CODE_DNLD);
MWL8K_CMDNAME(GET_HW_SPEC);
MWL8K_CMDNAME(SET_HW_SPEC);
MWL8K_CMDNAME(MAC_MULTICAST_ADR);
MWL8K_CMDNAME(GET_STAT);
MWL8K_CMDNAME(RADIO_CONTROL);
MWL8K_CMDNAME(RF_TX_POWER);
MWL8K_CMDNAME(TX_POWER);
MWL8K_CMDNAME(RF_ANTENNA);
MWL8K_CMDNAME(SET_BEACON);
MWL8K_CMDNAME(SET_PRE_SCAN);
MWL8K_CMDNAME(SET_POST_SCAN);
MWL8K_CMDNAME(SET_RF_CHANNEL);
MWL8K_CMDNAME(SET_AID);
MWL8K_CMDNAME(SET_RATE);
MWL8K_CMDNAME(SET_FINALIZE_JOIN);
MWL8K_CMDNAME(RTS_THRESHOLD);
MWL8K_CMDNAME(SET_SLOT);
MWL8K_CMDNAME(SET_EDCA_PARAMS);
MWL8K_CMDNAME(SET_WMM_MODE);
MWL8K_CMDNAME(MIMO_CONFIG);
MWL8K_CMDNAME(USE_FIXED_RATE);
MWL8K_CMDNAME(ENABLE_SNIFFER);
MWL8K_CMDNAME(SET_MAC_ADDR);
MWL8K_CMDNAME(SET_RATEADAPT_MODE);
MWL8K_CMDNAME(BSS_START);
MWL8K_CMDNAME(SET_NEW_STN);
MWL8K_CMDNAME(UPDATE_ENCRYPTION);
MWL8K_CMDNAME(UPDATE_STADB);
MWL8K_CMDNAME(BASTREAM);
MWL8K_CMDNAME(GET_WATCHDOG_BITMAP);
default:
snprintf(buf, bufsize, "0x%x", cmd);
}
#undef MWL8K_CMDNAME
return buf;
}
/* Hardware and firmware reset */
static void mwl8k_hw_reset(struct mwl8k_priv *priv)
{
iowrite32(MWL8K_H2A_INT_RESET,
priv->regs + MWL8K_HIU_H2A_INTERRUPT_EVENTS);
iowrite32(MWL8K_H2A_INT_RESET,
priv->regs + MWL8K_HIU_H2A_INTERRUPT_EVENTS);
msleep(20);
}
/* Release fw image */
static void mwl8k_release_fw(const struct firmware **fw)
{
if (*fw == NULL)
return;
release_firmware(*fw);
*fw = NULL;
}
static void mwl8k_release_firmware(struct mwl8k_priv *priv)
{
mwl8k_release_fw(&priv->fw_ucode);
mwl8k_release_fw(&priv->fw_helper);
}
/* states for asynchronous f/w loading */
static void mwl8k_fw_state_machine(const struct firmware *fw, void *context);
enum {
FW_STATE_INIT = 0,
FW_STATE_LOADING_PREF,
FW_STATE_LOADING_ALT,
FW_STATE_ERROR,
};
/* Request fw image */
static int mwl8k_request_fw(struct mwl8k_priv *priv,
const char *fname, const struct firmware **fw,
bool nowait)
{
/* release current image */
if (*fw != NULL)
mwl8k_release_fw(fw);
if (nowait)
return request_firmware_nowait(THIS_MODULE, 1, fname,
&priv->pdev->dev, GFP_KERNEL,
priv, mwl8k_fw_state_machine);
else
return request_firmware(fw, fname, &priv->pdev->dev);
}
static int mwl8k_request_firmware(struct mwl8k_priv *priv, char *fw_image,
bool nowait)
{
struct mwl8k_device_info *di = priv->device_info;
int rc;
if (di->helper_image != NULL) {
if (nowait)
rc = mwl8k_request_fw(priv, di->helper_image,
&priv->fw_helper, true);
else
rc = mwl8k_request_fw(priv, di->helper_image,
&priv->fw_helper, false);
if (rc)
printk(KERN_ERR "%s: Error requesting helper fw %s\n",
pci_name(priv->pdev), di->helper_image);
if (rc || nowait)
return rc;
}
if (nowait) {
/*
* if we get here, no helper image is needed. Skip the
* FW_STATE_INIT state.
*/
priv->fw_state = FW_STATE_LOADING_PREF;
rc = mwl8k_request_fw(priv, fw_image,
&priv->fw_ucode,
true);
} else
rc = mwl8k_request_fw(priv, fw_image,
&priv->fw_ucode, false);
if (rc) {
printk(KERN_ERR "%s: Error requesting firmware file %s\n",
pci_name(priv->pdev), fw_image);
mwl8k_release_fw(&priv->fw_helper);
return rc;
}
return 0;
}
struct mwl8k_cmd_pkt {
__le16 code;
__le16 length;
__u8 seq_num;
__u8 macid;
__le16 result;
char payload[0];
} __packed;
/*
* Firmware loading.
*/
static int
mwl8k_send_fw_load_cmd(struct mwl8k_priv *priv, void *data, int length)
{
void __iomem *regs = priv->regs;
dma_addr_t dma_addr;
int loops;
dma_addr = pci_map_single(priv->pdev, data, length, PCI_DMA_TODEVICE);
if (pci_dma_mapping_error(priv->pdev, dma_addr))
return -ENOMEM;
iowrite32(dma_addr, regs + MWL8K_HIU_GEN_PTR);
iowrite32(0, regs + MWL8K_HIU_INT_CODE);
iowrite32(MWL8K_H2A_INT_DOORBELL,
regs + MWL8K_HIU_H2A_INTERRUPT_EVENTS);
iowrite32(MWL8K_H2A_INT_DUMMY,
regs + MWL8K_HIU_H2A_INTERRUPT_EVENTS);
loops = 1000;
do {
u32 int_code;
int_code = ioread32(regs + MWL8K_HIU_INT_CODE);
if (int_code == MWL8K_INT_CODE_CMD_FINISHED) {
iowrite32(0, regs + MWL8K_HIU_INT_CODE);
break;
}
cond_resched();
udelay(1);
} while (--loops);
pci_unmap_single(priv->pdev, dma_addr, length, PCI_DMA_TODEVICE);
return loops ? 0 : -ETIMEDOUT;
}
static int mwl8k_load_fw_image(struct mwl8k_priv *priv,
const u8 *data, size_t length)
{
struct mwl8k_cmd_pkt *cmd;
int done;
int rc = 0;
cmd = kmalloc(sizeof(*cmd) + 256, GFP_KERNEL);
if (cmd == NULL)
return -ENOMEM;
cmd->code = cpu_to_le16(MWL8K_CMD_CODE_DNLD);
cmd->seq_num = 0;
cmd->macid = 0;
cmd->result = 0;
done = 0;
while (length) {
int block_size = length > 256 ? 256 : length;
memcpy(cmd->payload, data + done, block_size);
cmd->length = cpu_to_le16(block_size);
rc = mwl8k_send_fw_load_cmd(priv, cmd,
sizeof(*cmd) + block_size);
if (rc)
break;
done += block_size;
length -= block_size;
}
if (!rc) {
cmd->length = 0;
rc = mwl8k_send_fw_load_cmd(priv, cmd, sizeof(*cmd));
}
kfree(cmd);
return rc;
}
static int mwl8k_feed_fw_image(struct mwl8k_priv *priv,
const u8 *data, size_t length)
{
unsigned char *buffer;
int may_continue, rc = 0;
u32 done, prev_block_size;
buffer = kmalloc(1024, GFP_KERNEL);
if (buffer == NULL)
return -ENOMEM;
done = 0;
prev_block_size = 0;
may_continue = 1000;
while (may_continue > 0) {
u32 block_size;
block_size = ioread32(priv->regs + MWL8K_HIU_SCRATCH);
if (block_size & 1) {
block_size &= ~1;
may_continue--;
} else {
done += prev_block_size;
length -= prev_block_size;
}
if (block_size > 1024 || block_size > length) {
rc = -EOVERFLOW;
break;
}
if (length == 0) {
rc = 0;
break;
}
if (block_size == 0) {
rc = -EPROTO;
may_continue--;
udelay(1);
continue;
}
prev_block_size = block_size;
memcpy(buffer, data + done, block_size);
rc = mwl8k_send_fw_load_cmd(priv, buffer, block_size);
if (rc)
break;
}
if (!rc && length != 0)
rc = -EREMOTEIO;
kfree(buffer);
return rc;
}
static int mwl8k_load_firmware(struct ieee80211_hw *hw)
{
struct mwl8k_priv *priv = hw->priv;
const struct firmware *fw = priv->fw_ucode;
int rc;
int loops;
if (!memcmp(fw->data, "\x01\x00\x00\x00", 4)) {
const struct firmware *helper = priv->fw_helper;
if (helper == NULL) {
printk(KERN_ERR "%s: helper image needed but none "
"given\n", pci_name(priv->pdev));
return -EINVAL;
}
rc = mwl8k_load_fw_image(priv, helper->data, helper->size);
if (rc) {
printk(KERN_ERR "%s: unable to load firmware "
"helper image\n", pci_name(priv->pdev));
return rc;
}
msleep(20);
rc = mwl8k_feed_fw_image(priv, fw->data, fw->size);
} else {
rc = mwl8k_load_fw_image(priv, fw->data, fw->size);
}
if (rc) {
printk(KERN_ERR "%s: unable to load firmware image\n",
pci_name(priv->pdev));
return rc;
}
iowrite32(MWL8K_MODE_STA, priv->regs + MWL8K_HIU_GEN_PTR);
loops = 500000;
do {
u32 ready_code;
ready_code = ioread32(priv->regs + MWL8K_HIU_INT_CODE);
if (ready_code == MWL8K_FWAP_READY) {
priv->ap_fw = 1;
break;
} else if (ready_code == MWL8K_FWSTA_READY) {
priv->ap_fw = 0;
break;
}
cond_resched();
udelay(1);
} while (--loops);
return loops ? 0 : -ETIMEDOUT;
}
/* DMA header used by firmware and hardware. */
struct mwl8k_dma_data {
__le16 fwlen;
struct ieee80211_hdr wh;
char data[0];
} __packed;
/* Routines to add/remove DMA header from skb. */
static inline void mwl8k_remove_dma_header(struct sk_buff *skb, __le16 qos)
{
struct mwl8k_dma_data *tr;
int hdrlen;
tr = (struct mwl8k_dma_data *)skb->data;
hdrlen = ieee80211_hdrlen(tr->wh.frame_control);
if (hdrlen != sizeof(tr->wh)) {
if (ieee80211_is_data_qos(tr->wh.frame_control)) {
memmove(tr->data - hdrlen, &tr->wh, hdrlen - 2);
*((__le16 *)(tr->data - 2)) = qos;
} else {
memmove(tr->data - hdrlen, &tr->wh, hdrlen);
}
}
if (hdrlen != sizeof(*tr))
skb_pull(skb, sizeof(*tr) - hdrlen);
}
#define REDUCED_TX_HEADROOM 8
static void
mwl8k_add_dma_header(struct mwl8k_priv *priv, struct sk_buff *skb,
int head_pad, int tail_pad)
{
struct ieee80211_hdr *wh;
int hdrlen;
int reqd_hdrlen;
struct mwl8k_dma_data *tr;
/*
* Add a firmware DMA header; the firmware requires that we
* present a 2-byte payload length followed by a 4-address
* header (without QoS field), followed (optionally) by any
* WEP/ExtIV header (but only filled in for CCMP).
*/
wh = (struct ieee80211_hdr *)skb->data;
hdrlen = ieee80211_hdrlen(wh->frame_control);
/*
* Check if skb_resize is required because of
* tx_headroom adjustment.
*/
if (priv->ap_fw && (hdrlen < (sizeof(struct ieee80211_cts)
+ REDUCED_TX_HEADROOM))) {
if (pskb_expand_head(skb, REDUCED_TX_HEADROOM, 0, GFP_ATOMIC)) {
wiphy_err(priv->hw->wiphy,
"Failed to reallocate TX buffer\n");
return;
}
skb->truesize += REDUCED_TX_HEADROOM;
}
reqd_hdrlen = sizeof(*tr) + head_pad;
if (hdrlen != reqd_hdrlen)
skb_push(skb, reqd_hdrlen - hdrlen);
if (ieee80211_is_data_qos(wh->frame_control))
hdrlen -= IEEE80211_QOS_CTL_LEN;
tr = (struct mwl8k_dma_data *)skb->data;
if (wh != &tr->wh)
memmove(&tr->wh, wh, hdrlen);
if (hdrlen != sizeof(tr->wh))
memset(((void *)&tr->wh) + hdrlen, 0, sizeof(tr->wh) - hdrlen);
/*
* Firmware length is the length of the fully formed "802.11
* payload". That is, everything except for the 802.11 header.
* This includes all crypto material including the MIC.
*/
tr->fwlen = cpu_to_le16(skb->len - sizeof(*tr) + tail_pad);
}
static void mwl8k_encapsulate_tx_frame(struct mwl8k_priv *priv,
struct sk_buff *skb)
{
struct ieee80211_hdr *wh;
struct ieee80211_tx_info *tx_info;
struct ieee80211_key_conf *key_conf;
int data_pad;
int head_pad = 0;
wh = (struct ieee80211_hdr *)skb->data;
tx_info = IEEE80211_SKB_CB(skb);
key_conf = NULL;
if (ieee80211_is_data(wh->frame_control))
key_conf = tx_info->control.hw_key;
/*
* Make sure the packet header is in the DMA header format (4-address
* without QoS), and add head & tail padding when HW crypto is enabled.
*
* We have the following trailer padding requirements:
* - WEP: 4 trailer bytes (ICV)
* - TKIP: 12 trailer bytes (8 MIC + 4 ICV)
* - CCMP: 8 trailer bytes (MIC)
*/
data_pad = 0;
if (key_conf != NULL) {
head_pad = key_conf->iv_len;
switch (key_conf->cipher) {
case WLAN_CIPHER_SUITE_WEP40:
case WLAN_CIPHER_SUITE_WEP104:
data_pad = 4;
break;
case WLAN_CIPHER_SUITE_TKIP:
data_pad = 12;
break;
case WLAN_CIPHER_SUITE_CCMP:
data_pad = 8;
break;
}
}
mwl8k_add_dma_header(priv, skb, head_pad, data_pad);
}
/*
* Packet reception for 88w8366 AP firmware.
*/
struct mwl8k_rxd_8366_ap {
__le16 pkt_len;
__u8 sq2;
__u8 rate;
__le32 pkt_phys_addr;
__le32 next_rxd_phys_addr;
__le16 qos_control;
__le16 htsig2;
__le32 hw_rssi_info;
__le32 hw_noise_floor_info;
__u8 noise_floor;
__u8 pad0[3];
__u8 rssi;
__u8 rx_status;
__u8 channel;
__u8 rx_ctrl;
} __packed;
#define MWL8K_8366_AP_RATE_INFO_MCS_FORMAT 0x80
#define MWL8K_8366_AP_RATE_INFO_40MHZ 0x40
#define MWL8K_8366_AP_RATE_INFO_RATEID(x) ((x) & 0x3f)
#define MWL8K_8366_AP_RX_CTRL_OWNED_BY_HOST 0x80
/* 8366 AP rx_status bits */
#define MWL8K_8366_AP_RXSTAT_DECRYPT_ERR_MASK 0x80
#define MWL8K_8366_AP_RXSTAT_GENERAL_DECRYPT_ERR 0xFF
#define MWL8K_8366_AP_RXSTAT_TKIP_DECRYPT_MIC_ERR 0x02
#define MWL8K_8366_AP_RXSTAT_WEP_DECRYPT_ICV_ERR 0x04
#define MWL8K_8366_AP_RXSTAT_TKIP_DECRYPT_ICV_ERR 0x08
static void mwl8k_rxd_8366_ap_init(void *_rxd, dma_addr_t next_dma_addr)
{
struct mwl8k_rxd_8366_ap *rxd = _rxd;
rxd->next_rxd_phys_addr = cpu_to_le32(next_dma_addr);
rxd->rx_ctrl = MWL8K_8366_AP_RX_CTRL_OWNED_BY_HOST;
}
static void mwl8k_rxd_8366_ap_refill(void *_rxd, dma_addr_t addr, int len)
{
struct mwl8k_rxd_8366_ap *rxd = _rxd;
rxd->pkt_len = cpu_to_le16(len);
rxd->pkt_phys_addr = cpu_to_le32(addr);
wmb();
rxd->rx_ctrl = 0;
}
static int
mwl8k_rxd_8366_ap_process(void *_rxd, struct ieee80211_rx_status *status,
__le16 *qos, s8 *noise)
{
struct mwl8k_rxd_8366_ap *rxd = _rxd;
if (!(rxd->rx_ctrl & MWL8K_8366_AP_RX_CTRL_OWNED_BY_HOST))
return -1;
rmb();
memset(status, 0, sizeof(*status));
status->signal = -rxd->rssi;
*noise = -rxd->noise_floor;
if (rxd->rate & MWL8K_8366_AP_RATE_INFO_MCS_FORMAT) {
status->flag |= RX_FLAG_HT;
if (rxd->rate & MWL8K_8366_AP_RATE_INFO_40MHZ)
status->flag |= RX_FLAG_40MHZ;
status->rate_idx = MWL8K_8366_AP_RATE_INFO_RATEID(rxd->rate);
} else {
int i;
for (i = 0; i < ARRAY_SIZE(mwl8k_rates_24); i++) {
if (mwl8k_rates_24[i].hw_value == rxd->rate) {
status->rate_idx = i;
break;
}
}
}
if (rxd->channel > 14) {
status->band = IEEE80211_BAND_5GHZ;
if (!(status->flag & RX_FLAG_HT))
status->rate_idx -= 5;
} else {
status->band = IEEE80211_BAND_2GHZ;
}
status->freq = ieee80211_channel_to_frequency(rxd->channel,
status->band);
*qos = rxd->qos_control;
if ((rxd->rx_status != MWL8K_8366_AP_RXSTAT_GENERAL_DECRYPT_ERR) &&
(rxd->rx_status & MWL8K_8366_AP_RXSTAT_DECRYPT_ERR_MASK) &&
(rxd->rx_status & MWL8K_8366_AP_RXSTAT_TKIP_DECRYPT_MIC_ERR))
status->flag |= RX_FLAG_MMIC_ERROR;
return le16_to_cpu(rxd->pkt_len);
}
static struct rxd_ops rxd_8366_ap_ops = {
.rxd_size = sizeof(struct mwl8k_rxd_8366_ap),
.rxd_init = mwl8k_rxd_8366_ap_init,
.rxd_refill = mwl8k_rxd_8366_ap_refill,
.rxd_process = mwl8k_rxd_8366_ap_process,
};
/*
* Packet reception for STA firmware.
*/
struct mwl8k_rxd_sta {
__le16 pkt_len;
__u8 link_quality;
__u8 noise_level;
__le32 pkt_phys_addr;
__le32 next_rxd_phys_addr;
__le16 qos_control;
__le16 rate_info;
__le32 pad0[4];
__u8 rssi;
__u8 channel;
__le16 pad1;
__u8 rx_ctrl;
__u8 rx_status;
__u8 pad2[2];
} __packed;
#define MWL8K_STA_RATE_INFO_SHORTPRE 0x8000
#define MWL8K_STA_RATE_INFO_ANTSELECT(x) (((x) >> 11) & 0x3)
#define MWL8K_STA_RATE_INFO_RATEID(x) (((x) >> 3) & 0x3f)
#define MWL8K_STA_RATE_INFO_40MHZ 0x0004
#define MWL8K_STA_RATE_INFO_SHORTGI 0x0002
#define MWL8K_STA_RATE_INFO_MCS_FORMAT 0x0001
#define MWL8K_STA_RX_CTRL_OWNED_BY_HOST 0x02
#define MWL8K_STA_RX_CTRL_DECRYPT_ERROR 0x04
/* ICV=0 or MIC=1 */
#define MWL8K_STA_RX_CTRL_DEC_ERR_TYPE 0x08
/* Key is uploaded only in failure case */
#define MWL8K_STA_RX_CTRL_KEY_INDEX 0x30
static void mwl8k_rxd_sta_init(void *_rxd, dma_addr_t next_dma_addr)
{
struct mwl8k_rxd_sta *rxd = _rxd;
rxd->next_rxd_phys_addr = cpu_to_le32(next_dma_addr);
rxd->rx_ctrl = MWL8K_STA_RX_CTRL_OWNED_BY_HOST;
}
static void mwl8k_rxd_sta_refill(void *_rxd, dma_addr_t addr, int len)
{
struct mwl8k_rxd_sta *rxd = _rxd;
rxd->pkt_len = cpu_to_le16(len);
rxd->pkt_phys_addr = cpu_to_le32(addr);
wmb();
rxd->rx_ctrl = 0;
}
static int
mwl8k_rxd_sta_process(void *_rxd, struct ieee80211_rx_status *status,
__le16 *qos, s8 *noise)
{
struct mwl8k_rxd_sta *rxd = _rxd;
u16 rate_info;
if (!(rxd->rx_ctrl & MWL8K_STA_RX_CTRL_OWNED_BY_HOST))
return -1;
rmb();
rate_info = le16_to_cpu(rxd->rate_info);
memset(status, 0, sizeof(*status));
status->signal = -rxd->rssi;
*noise = -rxd->noise_level;
status->antenna = MWL8K_STA_RATE_INFO_ANTSELECT(rate_info);
status->rate_idx = MWL8K_STA_RATE_INFO_RATEID(rate_info);
if (rate_info & MWL8K_STA_RATE_INFO_SHORTPRE)
status->flag |= RX_FLAG_SHORTPRE;
if (rate_info & MWL8K_STA_RATE_INFO_40MHZ)
status->flag |= RX_FLAG_40MHZ;
if (rate_info & MWL8K_STA_RATE_INFO_SHORTGI)
status->flag |= RX_FLAG_SHORT_GI;
if (rate_info & MWL8K_STA_RATE_INFO_MCS_FORMAT)
status->flag |= RX_FLAG_HT;
if (rxd->channel > 14) {
status->band = IEEE80211_BAND_5GHZ;
if (!(status->flag & RX_FLAG_HT))
status->rate_idx -= 5;
} else {
status->band = IEEE80211_BAND_2GHZ;
}
status->freq = ieee80211_channel_to_frequency(rxd->channel,
status->band);
*qos = rxd->qos_control;
if ((rxd->rx_ctrl & MWL8K_STA_RX_CTRL_DECRYPT_ERROR) &&
(rxd->rx_ctrl & MWL8K_STA_RX_CTRL_DEC_ERR_TYPE))
status->flag |= RX_FLAG_MMIC_ERROR;
return le16_to_cpu(rxd->pkt_len);
}
static struct rxd_ops rxd_sta_ops = {
.rxd_size = sizeof(struct mwl8k_rxd_sta),
.rxd_init = mwl8k_rxd_sta_init,
.rxd_refill = mwl8k_rxd_sta_refill,
.rxd_process = mwl8k_rxd_sta_process,
};
#define MWL8K_RX_DESCS 256
#define MWL8K_RX_MAXSZ 3800
static int mwl8k_rxq_init(struct ieee80211_hw *hw, int index)
{
struct mwl8k_priv *priv = hw->priv;
struct mwl8k_rx_queue *rxq = priv->rxq + index;
int size;
int i;
rxq->rxd_count = 0;
rxq->head = 0;
rxq->tail = 0;
size = MWL8K_RX_DESCS * priv->rxd_ops->rxd_size;
rxq->rxd = pci_alloc_consistent(priv->pdev, size, &rxq->rxd_dma);
if (rxq->rxd == NULL) {
wiphy_err(hw->wiphy, "failed to alloc RX descriptors\n");
return -ENOMEM;
}
memset(rxq->rxd, 0, size);
rxq->buf = kcalloc(MWL8K_RX_DESCS, sizeof(*rxq->buf), GFP_KERNEL);
if (rxq->buf == NULL) {
wiphy_err(hw->wiphy, "failed to alloc RX skbuff list\n");
pci_free_consistent(priv->pdev, size, rxq->rxd, rxq->rxd_dma);
return -ENOMEM;
}
for (i = 0; i < MWL8K_RX_DESCS; i++) {
int desc_size;
void *rxd;
int nexti;
dma_addr_t next_dma_addr;
desc_size = priv->rxd_ops->rxd_size;
rxd = rxq->rxd + (i * priv->rxd_ops->rxd_size);
nexti = i + 1;
if (nexti == MWL8K_RX_DESCS)
nexti = 0;
next_dma_addr = rxq->rxd_dma + (nexti * desc_size);
priv->rxd_ops->rxd_init(rxd, next_dma_addr);
}
return 0;
}
static int rxq_refill(struct ieee80211_hw *hw, int index, int limit)
{
struct mwl8k_priv *priv = hw->priv;
struct mwl8k_rx_queue *rxq = priv->rxq + index;
int refilled;
refilled = 0;
while (rxq->rxd_count < MWL8K_RX_DESCS && limit--) {
struct sk_buff *skb;
dma_addr_t addr;
int rx;
void *rxd;
skb = dev_alloc_skb(MWL8K_RX_MAXSZ);
if (skb == NULL)
break;
addr = pci_map_single(priv->pdev, skb->data,
MWL8K_RX_MAXSZ, DMA_FROM_DEVICE);
rxq->rxd_count++;
rx = rxq->tail++;
if (rxq->tail == MWL8K_RX_DESCS)
rxq->tail = 0;
rxq->buf[rx].skb = skb;
dma_unmap_addr_set(&rxq->buf[rx], dma, addr);
rxd = rxq->rxd + (rx * priv->rxd_ops->rxd_size);
priv->rxd_ops->rxd_refill(rxd, addr, MWL8K_RX_MAXSZ);
refilled++;
}
return refilled;
}
/* Must be called only when the card's reception is completely halted */
static void mwl8k_rxq_deinit(struct ieee80211_hw *hw, int index)
{
struct mwl8k_priv *priv = hw->priv;
struct mwl8k_rx_queue *rxq = priv->rxq + index;
int i;
if (rxq->rxd == NULL)
return;
for (i = 0; i < MWL8K_RX_DESCS; i++) {
if (rxq->buf[i].skb != NULL) {
pci_unmap_single(priv->pdev,
dma_unmap_addr(&rxq->buf[i], dma),
MWL8K_RX_MAXSZ, PCI_DMA_FROMDEVICE);
dma_unmap_addr_set(&rxq->buf[i], dma, 0);
kfree_skb(rxq->buf[i].skb);
rxq->buf[i].skb = NULL;
}
}
kfree(rxq->buf);
rxq->buf = NULL;
pci_free_consistent(priv->pdev,
MWL8K_RX_DESCS * priv->rxd_ops->rxd_size,
rxq->rxd, rxq->rxd_dma);
rxq->rxd = NULL;
}
/*
* Scan a list of BSSIDs to process for finalize join.
* Allows for extension to process multiple BSSIDs.
*/
static inline int
mwl8k_capture_bssid(struct mwl8k_priv *priv, struct ieee80211_hdr *wh)
{
return priv->capture_beacon &&
ieee80211_is_beacon(wh->frame_control) &&
!compare_ether_addr(wh->addr3, priv->capture_bssid);
}
static inline void mwl8k_save_beacon(struct ieee80211_hw *hw,
struct sk_buff *skb)
{
struct mwl8k_priv *priv = hw->priv;
priv->capture_beacon = false;
memset(priv->capture_bssid, 0, ETH_ALEN);
/*
* Use GFP_ATOMIC as rxq_process is called from
* the primary interrupt handler, memory allocation call
* must not sleep.
*/
priv->beacon_skb = skb_copy(skb, GFP_ATOMIC);
if (priv->beacon_skb != NULL)
ieee80211_queue_work(hw, &priv->finalize_join_worker);
}
static inline struct mwl8k_vif *mwl8k_find_vif_bss(struct list_head *vif_list,
u8 *bssid)
{
struct mwl8k_vif *mwl8k_vif;
list_for_each_entry(mwl8k_vif,
vif_list, list) {
if (memcmp(bssid, mwl8k_vif->bssid,
ETH_ALEN) == 0)
return mwl8k_vif;
}
return NULL;
}
static int rxq_process(struct ieee80211_hw *hw, int index, int limit)
{
struct mwl8k_priv *priv = hw->priv;
struct mwl8k_vif *mwl8k_vif = NULL;
struct mwl8k_rx_queue *rxq = priv->rxq + index;
int processed;
processed = 0;
while (rxq->rxd_count && limit--) {
struct sk_buff *skb;
void *rxd;
int pkt_len;
struct ieee80211_rx_status status;
struct ieee80211_hdr *wh;
__le16 qos;
skb = rxq->buf[rxq->head].skb;
if (skb == NULL)
break;
rxd = rxq->rxd + (rxq->head * priv->rxd_ops->rxd_size);
pkt_len = priv->rxd_ops->rxd_process(rxd, &status, &qos,
&priv->noise);
if (pkt_len < 0)
break;
rxq->buf[rxq->head].skb = NULL;
pci_unmap_single(priv->pdev,
dma_unmap_addr(&rxq->buf[rxq->head], dma),
MWL8K_RX_MAXSZ, PCI_DMA_FROMDEVICE);
dma_unmap_addr_set(&rxq->buf[rxq->head], dma, 0);
rxq->head++;
if (rxq->head == MWL8K_RX_DESCS)
rxq->head = 0;
rxq->rxd_count--;
wh = &((struct mwl8k_dma_data *)skb->data)->wh;
/*
* Check for a pending join operation. Save a
* copy of the beacon and schedule a tasklet to
* send a FINALIZE_JOIN command to the firmware.
*/
if (mwl8k_capture_bssid(priv, (void *)skb->data))
mwl8k_save_beacon(hw, skb);
if (ieee80211_has_protected(wh->frame_control)) {
/* Check if hw crypto has been enabled for
* this bss. If yes, set the status flags
* accordingly
*/
mwl8k_vif = mwl8k_find_vif_bss(&priv->vif_list,
wh->addr1);
if (mwl8k_vif != NULL &&
mwl8k_vif->is_hw_crypto_enabled == true) {
/*
* When MMIC ERROR is encountered
* by the firmware, payload is
* dropped and only 32 bytes of
* mwl8k Firmware header is sent
* to the host.
*
* We need to add four bytes of
* key information. In it
* MAC80211 expects keyidx set to
* 0 for triggering Counter
* Measure of MMIC failure.
*/
if (status.flag & RX_FLAG_MMIC_ERROR) {
struct mwl8k_dma_data *tr;
tr = (struct mwl8k_dma_data *)skb->data;
memset((void *)&(tr->data), 0, 4);
pkt_len += 4;
}
if (!ieee80211_is_auth(wh->frame_control))
status.flag |= RX_FLAG_IV_STRIPPED |
RX_FLAG_DECRYPTED |
RX_FLAG_MMIC_STRIPPED;
}
}
skb_put(skb, pkt_len);
mwl8k_remove_dma_header(skb, qos);
memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status));
ieee80211_rx_irqsafe(hw, skb);
processed++;
}
return processed;
}
/*
* Packet transmission.
*/
#define MWL8K_TXD_STATUS_OK 0x00000001
#define MWL8K_TXD_STATUS_OK_RETRY 0x00000002
#define MWL8K_TXD_STATUS_OK_MORE_RETRY 0x00000004
#define MWL8K_TXD_STATUS_MULTICAST_TX 0x00000008
#define MWL8K_TXD_STATUS_FW_OWNED 0x80000000
#define MWL8K_QOS_QLEN_UNSPEC 0xff00
#define MWL8K_QOS_ACK_POLICY_MASK 0x0060
#define MWL8K_QOS_ACK_POLICY_NORMAL 0x0000
#define MWL8K_QOS_ACK_POLICY_BLOCKACK 0x0060
#define MWL8K_QOS_EOSP 0x0010
struct mwl8k_tx_desc {
__le32 status;
__u8 data_rate;
__u8 tx_priority;
__le16 qos_control;
__le32 pkt_phys_addr;
__le16 pkt_len;
__u8 dest_MAC_addr[ETH_ALEN];
__le32 next_txd_phys_addr;
__le32 timestamp;
__le16 rate_info;
__u8 peer_id;
__u8 tx_frag_cnt;
} __packed;
#define MWL8K_TX_DESCS 128
static int mwl8k_txq_init(struct ieee80211_hw *hw, int index)
{
struct mwl8k_priv *priv = hw->priv;
struct mwl8k_tx_queue *txq = priv->txq + index;
int size;
int i;
txq->len = 0;
txq->head = 0;
txq->tail = 0;
size = MWL8K_TX_DESCS * sizeof(struct mwl8k_tx_desc);
txq->txd = pci_alloc_consistent(priv->pdev, size, &txq->txd_dma);
if (txq->txd == NULL) {
wiphy_err(hw->wiphy, "failed to alloc TX descriptors\n");
return -ENOMEM;
}
memset(txq->txd, 0, size);
txq->skb = kcalloc(MWL8K_TX_DESCS, sizeof(*txq->skb), GFP_KERNEL);
if (txq->skb == NULL) {
wiphy_err(hw->wiphy, "failed to alloc TX skbuff list\n");
pci_free_consistent(priv->pdev, size, txq->txd, txq->txd_dma);
return -ENOMEM;
}
for (i = 0; i < MWL8K_TX_DESCS; i++) {
struct mwl8k_tx_desc *tx_desc;
int nexti;
tx_desc = txq->txd + i;
nexti = (i + 1) % MWL8K_TX_DESCS;
tx_desc->status = 0;
tx_desc->next_txd_phys_addr =
cpu_to_le32(txq->txd_dma + nexti * sizeof(*tx_desc));
}
return 0;
}
static inline void mwl8k_tx_start(struct mwl8k_priv *priv)
{
iowrite32(MWL8K_H2A_INT_PPA_READY,
priv->regs + MWL8K_HIU_H2A_INTERRUPT_EVENTS);
iowrite32(MWL8K_H2A_INT_DUMMY,
priv->regs + MWL8K_HIU_H2A_INTERRUPT_EVENTS);
ioread32(priv->regs + MWL8K_HIU_INT_CODE);
}
static void mwl8k_dump_tx_rings(struct ieee80211_hw *hw)
{
struct mwl8k_priv *priv = hw->priv;
int i;
for (i = 0; i < mwl8k_tx_queues(priv); i++) {
struct mwl8k_tx_queue *txq = priv->txq + i;
int fw_owned = 0;
int drv_owned = 0;
int unused = 0;
int desc;
for (desc = 0; desc < MWL8K_TX_DESCS; desc++) {
struct mwl8k_tx_desc *tx_desc = txq->txd + desc;
u32 status;
status = le32_to_cpu(tx_desc->status);
if (status & MWL8K_TXD_STATUS_FW_OWNED)
fw_owned++;
else
drv_owned++;
if (tx_desc->pkt_len == 0)
unused++;
}
wiphy_err(hw->wiphy,
"txq[%d] len=%d head=%d tail=%d "
"fw_owned=%d drv_owned=%d unused=%d\n",
i,
txq->len, txq->head, txq->tail,
fw_owned, drv_owned, unused);
}
}
/*
* Must be called with priv->fw_mutex held and tx queues stopped.
*/
#define MWL8K_TX_WAIT_TIMEOUT_MS 5000
static int mwl8k_tx_wait_empty(struct ieee80211_hw *hw)
{
struct mwl8k_priv *priv = hw->priv;
DECLARE_COMPLETION_ONSTACK(tx_wait);
int retry;
int rc;
might_sleep();
/*
* The TX queues are stopped at this point, so this test
* doesn't need to take ->tx_lock.
*/
if (!priv->pending_tx_pkts)
return 0;
retry = 0;
rc = 0;
spin_lock_bh(&priv->tx_lock);
priv->tx_wait = &tx_wait;
while (!rc) {
int oldcount;
unsigned long timeout;
oldcount = priv->pending_tx_pkts;
spin_unlock_bh(&priv->tx_lock);
timeout = wait_for_completion_timeout(&tx_wait,
msecs_to_jiffies(MWL8K_TX_WAIT_TIMEOUT_MS));
spin_lock_bh(&priv->tx_lock);
if (timeout) {
WARN_ON(priv->pending_tx_pkts);
if (retry)
wiphy_notice(hw->wiphy, "tx rings drained\n");
break;
}
if (priv->pending_tx_pkts < oldcount) {
wiphy_notice(hw->wiphy,
"waiting for tx rings to drain (%d -> %d pkts)\n",
oldcount, priv->pending_tx_pkts);
retry = 1;
continue;
}
priv->tx_wait = NULL;
wiphy_err(hw->wiphy, "tx rings stuck for %d ms\n",
MWL8K_TX_WAIT_TIMEOUT_MS);
mwl8k_dump_tx_rings(hw);
rc = -ETIMEDOUT;
}
spin_unlock_bh(&priv->tx_lock);
return rc;
}
#define MWL8K_TXD_SUCCESS(status) \
((status) & (MWL8K_TXD_STATUS_OK | \
MWL8K_TXD_STATUS_OK_RETRY | \
MWL8K_TXD_STATUS_OK_MORE_RETRY))
static int mwl8k_tid_queue_mapping(u8 tid)
{
BUG_ON(tid > 7);
switch (tid) {
case 0:
case 3:
return IEEE80211_AC_BE;
break;
case 1:
case 2:
return IEEE80211_AC_BK;
break;
case 4:
case 5:
return IEEE80211_AC_VI;
break;
case 6:
case 7:
return IEEE80211_AC_VO;
break;
default:
return -1;
break;
}
}
/* The firmware will fill in the rate information
* for each packet that gets queued in the hardware
* and these macros will interpret that info.
*/
#define RI_FORMAT(a) (a & 0x0001)
#define RI_RATE_ID_MCS(a) ((a & 0x01f8) >> 3)
static int
mwl8k_txq_reclaim(struct ieee80211_hw *hw, int index, int limit, int force)
{
struct mwl8k_priv *priv = hw->priv;
struct mwl8k_tx_queue *txq = priv->txq + index;
int processed;
processed = 0;
while (txq->len > 0 && limit--) {
int tx;
struct mwl8k_tx_desc *tx_desc;
unsigned long addr;
int size;
struct sk_buff *skb;
struct ieee80211_tx_info *info;
u32 status;
struct ieee80211_sta *sta;
struct mwl8k_sta *sta_info = NULL;
u16 rate_info;
struct ieee80211_hdr *wh;
tx = txq->head;
tx_desc = txq->txd + tx;
status = le32_to_cpu(tx_desc->status);
if (status & MWL8K_TXD_STATUS_FW_OWNED) {
if (!force)
break;
tx_desc->status &=
~cpu_to_le32(MWL8K_TXD_STATUS_FW_OWNED);
}
txq->head = (tx + 1) % MWL8K_TX_DESCS;
BUG_ON(txq->len == 0);
txq->len--;
priv->pending_tx_pkts--;
addr = le32_to_cpu(tx_desc->pkt_phys_addr);
size = le16_to_cpu(tx_desc->pkt_len);
skb = txq->skb[tx];
txq->skb[tx] = NULL;
BUG_ON(skb == NULL);
pci_unmap_single(priv->pdev, addr, size, PCI_DMA_TODEVICE);
mwl8k_remove_dma_header(skb, tx_desc->qos_control);
wh = (struct ieee80211_hdr *) skb->data;
/* Mark descriptor as unused */
tx_desc->pkt_phys_addr = 0;
tx_desc->pkt_len = 0;
info = IEEE80211_SKB_CB(skb);
if (ieee80211_is_data(wh->frame_control)) {
sta = info->control.sta;
if (sta) {
sta_info = MWL8K_STA(sta);
BUG_ON(sta_info == NULL);
rate_info = le16_to_cpu(tx_desc->rate_info);
/* If rate is < 6.5 Mpbs for an ht station
* do not form an ampdu. If the station is a
* legacy station (format = 0), do not form an
* ampdu
*/
if (RI_RATE_ID_MCS(rate_info) < 1 ||
RI_FORMAT(rate_info) == 0) {
sta_info->is_ampdu_allowed = false;
} else {
sta_info->is_ampdu_allowed = true;
}
}
}
ieee80211_tx_info_clear_status(info);
/* Rate control is happening in the firmware.
* Ensure no tx rate is being reported.
*/
info->status.rates[0].idx = -1;
info->status.rates[0].count = 1;
if (MWL8K_TXD_SUCCESS(status))
info->flags |= IEEE80211_TX_STAT_ACK;
ieee80211_tx_status_irqsafe(hw, skb);
processed++;
}
return processed;
}
/* must be called only when the card's transmit is completely halted */
static void mwl8k_txq_deinit(struct ieee80211_hw *hw, int index)
{
struct mwl8k_priv *priv = hw->priv;
struct mwl8k_tx_queue *txq = priv->txq + index;
if (txq->txd == NULL)
return;
mwl8k_txq_reclaim(hw, index, INT_MAX, 1);
kfree(txq->skb);
txq->skb = NULL;
pci_free_consistent(priv->pdev,
MWL8K_TX_DESCS * sizeof(struct mwl8k_tx_desc),
txq->txd, txq->txd_dma);
txq->txd = NULL;
}
/* caller must hold priv->stream_lock when calling the stream functions */
static struct mwl8k_ampdu_stream *
mwl8k_add_stream(struct ieee80211_hw *hw, struct ieee80211_sta *sta, u8 tid)
{
struct mwl8k_ampdu_stream *stream;
struct mwl8k_priv *priv = hw->priv;
int i;
for (i = 0; i < priv->num_ampdu_queues; i++) {
stream = &priv->ampdu[i];
if (stream->state == AMPDU_NO_STREAM) {
stream->sta = sta;
stream->state = AMPDU_STREAM_NEW;
stream->tid = tid;
stream->idx = i;
stream->txq_idx = MWL8K_TX_WMM_QUEUES + i;
wiphy_debug(hw->wiphy, "Added a new stream for %pM %d",
sta->addr, tid);
return stream;
}
}
return NULL;
}
static int
mwl8k_start_stream(struct ieee80211_hw *hw, struct mwl8k_ampdu_stream *stream)
{
int ret;
/* if the stream has already been started, don't start it again */
if (stream->state != AMPDU_STREAM_NEW)
return 0;
ret = ieee80211_start_tx_ba_session(stream->sta, stream->tid, 0);
if (ret)
wiphy_debug(hw->wiphy, "Failed to start stream for %pM %d: "
"%d\n", stream->sta->addr, stream->tid, ret);
else
wiphy_debug(hw->wiphy, "Started stream for %pM %d\n",
stream->sta->addr, stream->tid);
return ret;
}
static void
mwl8k_remove_stream(struct ieee80211_hw *hw, struct mwl8k_ampdu_stream *stream)
{
wiphy_debug(hw->wiphy, "Remove stream for %pM %d\n", stream->sta->addr,
stream->tid);
memset(stream, 0, sizeof(*stream));
}
static struct mwl8k_ampdu_stream *
mwl8k_lookup_stream(struct ieee80211_hw *hw, u8 *addr, u8 tid)
{
struct mwl8k_priv *priv = hw->priv;
int i;
for (i = 0 ; i < priv->num_ampdu_queues; i++) {
struct mwl8k_ampdu_stream *stream;
stream = &priv->ampdu[i];
if (stream->state == AMPDU_NO_STREAM)
continue;
if (!memcmp(stream->sta->addr, addr, ETH_ALEN) &&
stream->tid == tid)
return stream;
}
return NULL;
}
#define MWL8K_AMPDU_PACKET_THRESHOLD 64
static inline bool mwl8k_ampdu_allowed(struct ieee80211_sta *sta, u8 tid)
{
struct mwl8k_sta *sta_info = MWL8K_STA(sta);
struct tx_traffic_info *tx_stats;
BUG_ON(tid >= MWL8K_MAX_TID);
tx_stats = &sta_info->tx_stats[tid];
return sta_info->is_ampdu_allowed &&
tx_stats->pkts > MWL8K_AMPDU_PACKET_THRESHOLD;
}
static inline void mwl8k_tx_count_packet(struct ieee80211_sta *sta, u8 tid)
{
struct mwl8k_sta *sta_info = MWL8K_STA(sta);
struct tx_traffic_info *tx_stats;
BUG_ON(tid >= MWL8K_MAX_TID);
tx_stats = &sta_info->tx_stats[tid];
if (tx_stats->start_time == 0)
tx_stats->start_time = jiffies;
/* reset the packet count after each second elapses. If the number of
* packets ever exceeds the ampdu_min_traffic threshold, we will allow
* an ampdu stream to be started.
*/
if (jiffies - tx_stats->start_time > HZ) {
tx_stats->pkts = 0;
tx_stats->start_time = 0;
} else
tx_stats->pkts++;
}
static void
mwl8k_txq_xmit(struct ieee80211_hw *hw, int index, struct sk_buff *skb)
{
struct mwl8k_priv *priv = hw->priv;
struct ieee80211_tx_info *tx_info;
struct mwl8k_vif *mwl8k_vif;
struct ieee80211_sta *sta;
struct ieee80211_hdr *wh;
struct mwl8k_tx_queue *txq;
struct mwl8k_tx_desc *tx;
dma_addr_t dma;
u32 txstatus;
u8 txdatarate;
u16 qos;
int txpriority;
u8 tid = 0;
struct mwl8k_ampdu_stream *stream = NULL;
bool start_ba_session = false;
bool mgmtframe = false;
struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
wh = (struct ieee80211_hdr *)skb->data;
if (ieee80211_is_data_qos(wh->frame_control))
qos = le16_to_cpu(*((__le16 *)ieee80211_get_qos_ctl(wh)));
else
qos = 0;
if (ieee80211_is_mgmt(wh->frame_control))
mgmtframe = true;
if (priv->ap_fw)
mwl8k_encapsulate_tx_frame(priv, skb);
else
mwl8k_add_dma_header(priv, skb, 0, 0);
wh = &((struct mwl8k_dma_data *)skb->data)->wh;
tx_info = IEEE80211_SKB_CB(skb);
sta = tx_info->control.sta;
mwl8k_vif = MWL8K_VIF(tx_info->control.vif);
if (tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
wh->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
wh->seq_ctrl |= cpu_to_le16(mwl8k_vif->seqno);
mwl8k_vif->seqno += 0x10;
}
/* Setup firmware control bit fields for each frame type. */
txstatus = 0;
txdatarate = 0;
if (ieee80211_is_mgmt(wh->frame_control) ||
ieee80211_is_ctl(wh->frame_control)) {
txdatarate = 0;
qos |= MWL8K_QOS_QLEN_UNSPEC | MWL8K_QOS_EOSP;
} else if (ieee80211_is_data(wh->frame_control)) {
txdatarate = 1;
if (is_multicast_ether_addr(wh->addr1))
txstatus |= MWL8K_TXD_STATUS_MULTICAST_TX;
qos &= ~MWL8K_QOS_ACK_POLICY_MASK;
if (tx_info->flags & IEEE80211_TX_CTL_AMPDU)
qos |= MWL8K_QOS_ACK_POLICY_BLOCKACK;
else
qos |= MWL8K_QOS_ACK_POLICY_NORMAL;
}
/* Queue ADDBA request in the respective data queue. While setting up
* the ampdu stream, mac80211 queues further packets for that
* particular ra/tid pair. However, packets piled up in the hardware
* for that ra/tid pair will still go out. ADDBA request and the
* related data packets going out from different queues asynchronously
* will cause a shift in the receiver window which might result in
* ampdu packets getting dropped at the receiver after the stream has
* been setup.
*/
if (unlikely(ieee80211_is_action(wh->frame_control) &&
mgmt->u.action.category == WLAN_CATEGORY_BACK &&
mgmt->u.action.u.addba_req.action_code == WLAN_ACTION_ADDBA_REQ &&
priv->ap_fw)) {
u16 capab = le16_to_cpu(mgmt->u.action.u.addba_req.capab);
tid = (capab & IEEE80211_ADDBA_PARAM_TID_MASK) >> 2;
index = mwl8k_tid_queue_mapping(tid);
}
txpriority = index;
if (ieee80211_is_data_qos(wh->frame_control) &&
skb->protocol != cpu_to_be16(ETH_P_PAE) &&
sta->ht_cap.ht_supported && priv->ap_fw) {
tid = qos & 0xf;
mwl8k_tx_count_packet(sta, tid);
spin_lock(&priv->stream_lock);
stream = mwl8k_lookup_stream(hw, sta->addr, tid);
if (stream != NULL) {
if (stream->state == AMPDU_STREAM_ACTIVE) {
txpriority = stream->txq_idx;
index = stream->txq_idx;
} else if (stream->state == AMPDU_STREAM_NEW) {
/* We get here if the driver sends us packets
* after we've initiated a stream, but before
* our ampdu_action routine has been called
* with IEEE80211_AMPDU_TX_START to get the SSN
* for the ADDBA request. So this packet can
* go out with no risk of sequence number
* mismatch. No special handling is required.
*/
} else {
/* Drop packets that would go out after the
* ADDBA request was sent but before the ADDBA
* response is received. If we don't do this,
* the recipient would probably receive it
* after the ADDBA request with SSN 0. This
* will cause the recipient's BA receive window
* to shift, which would cause the subsequent
* packets in the BA stream to be discarded.
* mac80211 queues our packets for us in this
* case, so this is really just a safety check.
*/
wiphy_warn(hw->wiphy,
"Cannot send packet while ADDBA "
"dialog is underway.\n");
spin_unlock(&priv->stream_lock);
dev_kfree_skb(skb);
return;
}
} else {
/* Defer calling mwl8k_start_stream so that the current
* skb can go out before the ADDBA request. This
* prevents sequence number mismatch at the recepient
* as described above.
*/
if (mwl8k_ampdu_allowed(sta, tid)) {
stream = mwl8k_add_stream(hw, sta, tid);
if (stream != NULL)
start_ba_session = true;
}
}
spin_unlock(&priv->stream_lock);
}
dma = pci_map_single(priv->pdev, skb->data,
skb->len, PCI_DMA_TODEVICE);
if (pci_dma_mapping_error(priv->pdev, dma)) {
wiphy_debug(hw->wiphy,
"failed to dma map skb, dropping TX frame.\n");
if (start_ba_session) {
spin_lock(&priv->stream_lock);
mwl8k_remove_stream(hw, stream);
spin_unlock(&priv->stream_lock);
}
dev_kfree_skb(skb);
return;
}
spin_lock_bh(&priv->tx_lock);
txq = priv->txq + index;
/* Mgmt frames that go out frequently are probe
* responses. Other mgmt frames got out relatively
* infrequently. Hence reserve 2 buffers so that
* other mgmt frames do not get dropped due to an
* already queued probe response in one of the
* reserved buffers.
*/
if (txq->len >= MWL8K_TX_DESCS - 2) {
if (mgmtframe == false ||
txq->len == MWL8K_TX_DESCS) {
if (start_ba_session) {
spin_lock(&priv->stream_lock);
mwl8k_remove_stream(hw, stream);
spin_unlock(&priv->stream_lock);
}
spin_unlock_bh(&priv->tx_lock);
dev_kfree_skb(skb);
return;
}
}
BUG_ON(txq->skb[txq->tail] != NULL);
txq->skb[txq->tail] = skb;
tx = txq->txd + txq->tail;
tx->data_rate = txdatarate;
tx->tx_priority = txpriority;
tx->qos_control = cpu_to_le16(qos);
tx->pkt_phys_addr = cpu_to_le32(dma);
tx->pkt_len = cpu_to_le16(skb->len);
tx->rate_info = 0;
if (!priv->ap_fw && tx_info->control.sta != NULL)
tx->peer_id = MWL8K_STA(tx_info->control.sta)->peer_id;
else
tx->peer_id = 0;
if (priv->ap_fw)
tx->timestamp = cpu_to_le32(ioread32(priv->regs +
MWL8K_HW_TIMER_REGISTER));
wmb();
tx->status = cpu_to_le32(MWL8K_TXD_STATUS_FW_OWNED | txstatus);
txq->len++;
priv->pending_tx_pkts++;
txq->tail++;
if (txq->tail == MWL8K_TX_DESCS)
txq->tail = 0;
mwl8k_tx_start(priv);
spin_unlock_bh(&priv->tx_lock);
/* Initiate the ampdu session here */
if (start_ba_session) {
spin_lock(&priv->stream_lock);
if (mwl8k_start_stream(hw, stream))
mwl8k_remove_stream(hw, stream);
spin_unlock(&priv->stream_lock);
}
}
/*
* Firmware access.
*
* We have the following requirements for issuing firmware commands:
* - Some commands require that the packet transmit path is idle when
* the command is issued. (For simplicity, we'll just quiesce the
* transmit path for every command.)
* - There are certain sequences of commands that need to be issued to
* the hardware sequentially, with no other intervening commands.
*
* This leads to an implementation of a "firmware lock" as a mutex that
* can be taken recursively, and which is taken by both the low-level
* command submission function (mwl8k_post_cmd) as well as any users of
* that function that require issuing of an atomic sequence of commands,
* and quiesces the transmit path whenever it's taken.
*/
static int mwl8k_fw_lock(struct ieee80211_hw *hw)
{
struct mwl8k_priv *priv = hw->priv;
if (priv->fw_mutex_owner != current) {
int rc;
mutex_lock(&priv->fw_mutex);
ieee80211_stop_queues(hw);
rc = mwl8k_tx_wait_empty(hw);
if (rc) {
ieee80211_wake_queues(hw);
mutex_unlock(&priv->fw_mutex);
return rc;
}
priv->fw_mutex_owner = current;
}
priv->fw_mutex_depth++;
return 0;
}
static void mwl8k_fw_unlock(struct ieee80211_hw *hw)
{
struct mwl8k_priv *priv = hw->priv;
if (!--priv->fw_mutex_depth) {
ieee80211_wake_queues(hw);
priv->fw_mutex_owner = NULL;
mutex_unlock(&priv->fw_mutex);
}
}
/*
* Command processing.
*/
/* Timeout firmware commands after 10s */
#define MWL8K_CMD_TIMEOUT_MS 10000
static int mwl8k_post_cmd(struct ieee80211_hw *hw, struct mwl8k_cmd_pkt *cmd)
{
DECLARE_COMPLETION_ONSTACK(cmd_wait);
struct mwl8k_priv *priv = hw->priv;
void __iomem *regs = priv->regs;
dma_addr_t dma_addr;
unsigned int dma_size;
int rc;
unsigned long timeout = 0;
u8 buf[32];
cmd->result = (__force __le16) 0xffff;
dma_size = le16_to_cpu(cmd->length);
dma_addr = pci_map_single(priv->pdev, cmd, dma_size,
PCI_DMA_BIDIRECTIONAL);
if (pci_dma_mapping_error(priv->pdev, dma_addr))
return -ENOMEM;
rc = mwl8k_fw_lock(hw);
if (rc) {
pci_unmap_single(priv->pdev, dma_addr, dma_size,
PCI_DMA_BIDIRECTIONAL);
return rc;
}
priv->hostcmd_wait = &cmd_wait;
iowrite32(dma_addr, regs + MWL8K_HIU_GEN_PTR);
iowrite32(MWL8K_H2A_INT_DOORBELL,
regs + MWL8K_HIU_H2A_INTERRUPT_EVENTS);
iowrite32(MWL8K_H2A_INT_DUMMY,
regs + MWL8K_HIU_H2A_INTERRUPT_EVENTS);
timeout = wait_for_completion_timeout(&cmd_wait,
msecs_to_jiffies(MWL8K_CMD_TIMEOUT_MS));
priv->hostcmd_wait = NULL;
mwl8k_fw_unlock(hw);
pci_unmap_single(priv->pdev, dma_addr, dma_size,
PCI_DMA_BIDIRECTIONAL);
if (!timeout) {
wiphy_err(hw->wiphy, "Command %s timeout after %u ms\n",
mwl8k_cmd_name(cmd->code, buf, sizeof(buf)),
MWL8K_CMD_TIMEOUT_MS);
rc = -ETIMEDOUT;
} else {
int ms;
ms = MWL8K_CMD_TIMEOUT_MS - jiffies_to_msecs(timeout);
rc = cmd->result ? -EINVAL : 0;
if (rc)
wiphy_err(hw->wiphy, "Command %s error 0x%x\n",
mwl8k_cmd_name(cmd->code, buf, sizeof(buf)),
le16_to_cpu(cmd->result));
else if (ms > 2000)
wiphy_notice(hw->wiphy, "Command %s took %d ms\n",
mwl8k_cmd_name(cmd->code,
buf, sizeof(buf)),
ms);
}
return rc;
}
static int mwl8k_post_pervif_cmd(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct mwl8k_cmd_pkt *cmd)
{
if (vif != NULL)
cmd->macid = MWL8K_VIF(vif)->macid;
return mwl8k_post_cmd(hw, cmd);
}
/*
* Setup code shared between STA and AP firmware images.
*/
static void mwl8k_setup_2ghz_band(struct ieee80211_hw *hw)
{
struct mwl8k_priv *priv = hw->priv;
BUILD_BUG_ON(sizeof(priv->channels_24) != sizeof(mwl8k_channels_24));
memcpy(priv->channels_24, mwl8k_channels_24, sizeof(mwl8k_channels_24));
BUILD_BUG_ON(sizeof(priv->rates_24) != sizeof(mwl8k_rates_24));
memcpy(priv->rates_24, mwl8k_rates_24, sizeof(mwl8k_rates_24));
priv->band_24.band = IEEE80211_BAND_2GHZ;
priv->band_24.channels = priv->channels_24;
priv->band_24.n_channels = ARRAY_SIZE(mwl8k_channels_24);
priv->band_24.bitrates = priv->rates_24;
priv->band_24.n_bitrates = ARRAY_SIZE(mwl8k_rates_24);
hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band_24;
}
static void mwl8k_setup_5ghz_band(struct ieee80211_hw *hw)
{
struct mwl8k_priv *priv = hw->priv;
BUILD_BUG_ON(sizeof(priv->channels_50) != sizeof(mwl8k_channels_50));
memcpy(priv->channels_50, mwl8k_channels_50, sizeof(mwl8k_channels_50));
BUILD_BUG_ON(sizeof(priv->rates_50) != sizeof(mwl8k_rates_50));
memcpy(priv->rates_50, mwl8k_rates_50, sizeof(mwl8k_rates_50));
priv->band_50.band = IEEE80211_BAND_5GHZ;
priv->band_50.channels = priv->channels_50;
priv->band_50.n_channels = ARRAY_SIZE(mwl8k_channels_50);
priv->band_50.bitrates = priv->rates_50;
priv->band_50.n_bitrates = ARRAY_SIZE(mwl8k_rates_50);
hw->wiphy->bands[IEEE80211_BAND_5GHZ] = &priv->band_50;
}
/*
* CMD_GET_HW_SPEC (STA version).
*/
struct mwl8k_cmd_get_hw_spec_sta {
struct mwl8k_cmd_pkt header;
__u8 hw_rev;
__u8 host_interface;
__le16 num_mcaddrs;
__u8 perm_addr[ETH_ALEN];
__le16 region_code;
__le32 fw_rev;
__le32 ps_cookie;
__le32 caps;
__u8 mcs_bitmap[16];
__le32 rx_queue_ptr;
__le32 num_tx_queues;
__le32 tx_queue_ptrs[MWL8K_TX_WMM_QUEUES];
__le32 caps2;
__le32 num_tx_desc_per_queue;
__le32 total_rxd;
} __packed;
#define MWL8K_CAP_MAX_AMSDU 0x20000000
#define MWL8K_CAP_GREENFIELD 0x08000000
#define MWL8K_CAP_AMPDU 0x04000000
#define MWL8K_CAP_RX_STBC 0x01000000
#define MWL8K_CAP_TX_STBC 0x00800000
#define MWL8K_CAP_SHORTGI_40MHZ 0x00400000
#define MWL8K_CAP_SHORTGI_20MHZ 0x00200000
#define MWL8K_CAP_RX_ANTENNA_MASK 0x000e0000
#define MWL8K_CAP_TX_ANTENNA_MASK 0x0001c000
#define MWL8K_CAP_DELAY_BA 0x00003000
#define MWL8K_CAP_MIMO 0x00000200
#define MWL8K_CAP_40MHZ 0x00000100
#define MWL8K_CAP_BAND_MASK 0x00000007
#define MWL8K_CAP_5GHZ 0x00000004
#define MWL8K_CAP_2GHZ4 0x00000001
static void
mwl8k_set_ht_caps(struct ieee80211_hw *hw,
struct ieee80211_supported_band *band, u32 cap)
{
int rx_streams;
int tx_streams;
band->ht_cap.ht_supported = 1;
if (cap & MWL8K_CAP_MAX_AMSDU)
band->ht_cap.cap |= IEEE80211_HT_CAP_MAX_AMSDU;
if (cap & MWL8K_CAP_GREENFIELD)
band->ht_cap.cap |= IEEE80211_HT_CAP_GRN_FLD;
if (cap & MWL8K_CAP_AMPDU) {
hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION;
band->ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
band->ht_cap.ampdu_density = IEEE80211_HT_MPDU_DENSITY_NONE;
}
if (cap & MWL8K_CAP_RX_STBC)
band->ht_cap.cap |= IEEE80211_HT_CAP_RX_STBC;
if (cap & MWL8K_CAP_TX_STBC)
band->ht_cap.cap |= IEEE80211_HT_CAP_TX_STBC;
if (cap & MWL8K_CAP_SHORTGI_40MHZ)
band->ht_cap.cap |= IEEE80211_HT_CAP_SGI_40;
if (cap & MWL8K_CAP_SHORTGI_20MHZ)
band->ht_cap.cap |= IEEE80211_HT_CAP_SGI_20;
if (cap & MWL8K_CAP_DELAY_BA)
band->ht_cap.cap |= IEEE80211_HT_CAP_DELAY_BA;
if (cap & MWL8K_CAP_40MHZ)
band->ht_cap.cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
rx_streams = hweight32(cap & MWL8K_CAP_RX_ANTENNA_MASK);
tx_streams = hweight32(cap & MWL8K_CAP_TX_ANTENNA_MASK);
band->ht_cap.mcs.rx_mask[0] = 0xff;
if (rx_streams >= 2)
band->ht_cap.mcs.rx_mask[1] = 0xff;
if (rx_streams >= 3)
band->ht_cap.mcs.rx_mask[2] = 0xff;
band->ht_cap.mcs.rx_mask[4] = 0x01;
band->ht_cap.mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
if (rx_streams != tx_streams) {
band->ht_cap.mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
band->ht_cap.mcs.tx_params |= (tx_streams - 1) <<
IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT;
}
}
static void
mwl8k_set_caps(struct ieee80211_hw *hw, u32 caps)
{
struct mwl8k_priv *priv = hw->priv;
if ((caps & MWL8K_CAP_2GHZ4) || !(caps & MWL8K_CAP_BAND_MASK)) {
mwl8k_setup_2ghz_band(hw);
if (caps & MWL8K_CAP_MIMO)
mwl8k_set_ht_caps(hw, &priv->band_24, caps);
}
if (caps & MWL8K_CAP_5GHZ) {
mwl8k_setup_5ghz_band(hw);
if (caps & MWL8K_CAP_MIMO)
mwl8k_set_ht_caps(hw, &priv->band_50, caps);
}
}
static int mwl8k_cmd_get_hw_spec_sta(struct ieee80211_hw *hw)
{
struct mwl8k_priv *priv = hw->priv;
struct mwl8k_cmd_get_hw_spec_sta *cmd;
int rc;
int i;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (cmd == NULL)
return -ENOMEM;
cmd->header.code = cpu_to_le16(MWL8K_CMD_GET_HW_SPEC);
cmd->header.length = cpu_to_le16(sizeof(*cmd));
memset(cmd->perm_addr, 0xff, sizeof(cmd->perm_addr));
cmd->ps_cookie = cpu_to_le32(priv->cookie_dma);
cmd->rx_queue_ptr = cpu_to_le32(priv->rxq[0].rxd_dma);
cmd->num_tx_queues = cpu_to_le32(mwl8k_tx_queues(priv));
for (i = 0; i < mwl8k_tx_queues(priv); i++)
cmd->tx_queue_ptrs[i] = cpu_to_le32(priv->txq[i].txd_dma);
cmd->num_tx_desc_per_queue = cpu_to_le32(MWL8K_TX_DESCS);
cmd->total_rxd = cpu_to_le32(MWL8K_RX_DESCS);
rc = mwl8k_post_cmd(hw, &cmd->header);
if (!rc) {
SET_IEEE80211_PERM_ADDR(hw, cmd->perm_addr);
priv->num_mcaddrs = le16_to_cpu(cmd->num_mcaddrs);
priv->fw_rev = le32_to_cpu(cmd->fw_rev);
priv->hw_rev = cmd->hw_rev;
mwl8k_set_caps(hw, le32_to_cpu(cmd->caps));
priv->ap_macids_supported = 0x00000000;
priv->sta_macids_supported = 0x00000001;
}
kfree(cmd);
return rc;
}
/*
* CMD_GET_HW_SPEC (AP version).
*/
struct mwl8k_cmd_get_hw_spec_ap {
struct mwl8k_cmd_pkt header;
__u8 hw_rev;
__u8 host_interface;
__le16 num_wcb;
__le16 num_mcaddrs;
__u8 perm_addr[ETH_ALEN];
__le16 region_code;
__le16 num_antenna;
__le32 fw_rev;
__le32 wcbbase0;
__le32 rxwrptr;
__le32 rxrdptr;
__le32 ps_cookie;
__le32 wcbbase1;
__le32 wcbbase2;
__le32 wcbbase3;
__le32 fw_api_version;
__le32 caps;
__le32 num_of_ampdu_queues;
__le32 wcbbase_ampdu[MWL8K_MAX_AMPDU_QUEUES];
} __packed;
static int mwl8k_cmd_get_hw_spec_ap(struct ieee80211_hw *hw)
{
struct mwl8k_priv *priv = hw->priv;
struct mwl8k_cmd_get_hw_spec_ap *cmd;
int rc, i;
u32 api_version;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (cmd == NULL)
return -ENOMEM;
cmd->header.code = cpu_to_le16(MWL8K_CMD_GET_HW_SPEC);
cmd->header.length = cpu_to_le16(sizeof(*cmd));
memset(cmd->perm_addr, 0xff, sizeof(cmd->perm_addr));
cmd->ps_cookie = cpu_to_le32(priv->cookie_dma);
rc = mwl8k_post_cmd(hw, &cmd->header);
if (!rc) {
int off;
api_version = le32_to_cpu(cmd->fw_api_version);
if (priv->device_info->fw_api_ap != api_version) {
printk(KERN_ERR "%s: Unsupported fw API version for %s."
" Expected %d got %d.\n", MWL8K_NAME,
priv->device_info->part_name,
priv->device_info->fw_api_ap,
api_version);
rc = -EINVAL;
goto done;
}
SET_IEEE80211_PERM_ADDR(hw, cmd->perm_addr);
priv->num_mcaddrs = le16_to_cpu(cmd->num_mcaddrs);
priv->fw_rev = le32_to_cpu(cmd->fw_rev);
priv->hw_rev = cmd->hw_rev;
mwl8k_set_caps(hw, le32_to_cpu(cmd->caps));
priv->ap_macids_supported = 0x000000ff;
priv->sta_macids_supported = 0x00000000;
priv->num_ampdu_queues = le32_to_cpu(cmd->num_of_ampdu_queues);
if (priv->num_ampdu_queues > MWL8K_MAX_AMPDU_QUEUES) {
wiphy_warn(hw->wiphy, "fw reported %d ampdu queues"
" but we only support %d.\n",
priv->num_ampdu_queues,
MWL8K_MAX_AMPDU_QUEUES);
priv->num_ampdu_queues = MWL8K_MAX_AMPDU_QUEUES;
}
off = le32_to_cpu(cmd->rxwrptr) & 0xffff;
iowrite32(priv->rxq[0].rxd_dma, priv->sram + off);
off = le32_to_cpu(cmd->rxrdptr) & 0xffff;
iowrite32(priv->rxq[0].rxd_dma, priv->sram + off);
priv->txq_offset[0] = le32_to_cpu(cmd->wcbbase0) & 0xffff;
priv->txq_offset[1] = le32_to_cpu(cmd->wcbbase1) & 0xffff;
priv->txq_offset[2] = le32_to_cpu(cmd->wcbbase2) & 0xffff;
priv->txq_offset[3] = le32_to_cpu(cmd->wcbbase3) & 0xffff;
for (i = 0; i < priv->num_ampdu_queues; i++)
priv->txq_offset[i + MWL8K_TX_WMM_QUEUES] =
le32_to_cpu(cmd->wcbbase_ampdu[i]) & 0xffff;
}
done:
kfree(cmd);
return rc;
}
/*
* CMD_SET_HW_SPEC.
*/
struct mwl8k_cmd_set_hw_spec {
struct mwl8k_cmd_pkt header;
__u8 hw_rev;
__u8 host_interface;
__le16 num_mcaddrs;
__u8 perm_addr[ETH_ALEN];
__le16 region_code;
__le32 fw_rev;
__le32 ps_cookie;
__le32 caps;
__le32 rx_queue_ptr;
__le32 num_tx_queues;
__le32 tx_queue_ptrs[MWL8K_MAX_TX_QUEUES];
__le32 flags;
__le32 num_tx_desc_per_queue;
__le32 total_rxd;
} __packed;
/* If enabled, MWL8K_SET_HW_SPEC_FLAG_ENABLE_LIFE_TIME_EXPIRY will cause
* packets to expire 500 ms after the timestamp in the tx descriptor. That is,
* the packets that are queued for more than 500ms, will be dropped in the
* hardware. This helps minimizing the issues caused due to head-of-line
* blocking where a slow client can hog the bandwidth and affect traffic to a
* faster client.
*/
#define MWL8K_SET_HW_SPEC_FLAG_ENABLE_LIFE_TIME_EXPIRY 0x00000400
#define MWL8K_SET_HW_SPEC_FLAG_GENERATE_CCMP_HDR 0x00000200
#define MWL8K_SET_HW_SPEC_FLAG_HOST_DECR_MGMT 0x00000080
#define MWL8K_SET_HW_SPEC_FLAG_HOSTFORM_PROBERESP 0x00000020
#define MWL8K_SET_HW_SPEC_FLAG_HOSTFORM_BEACON 0x00000010
static int mwl8k_cmd_set_hw_spec(struct ieee80211_hw *hw)
{
struct mwl8k_priv *priv = hw->priv;
struct mwl8k_cmd_set_hw_spec *cmd;
int rc;
int i;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (cmd == NULL)
return -ENOMEM;
cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_HW_SPEC);
cmd->header.length = cpu_to_le16(sizeof(*cmd));
cmd->ps_cookie = cpu_to_le32(priv->cookie_dma);
cmd->rx_queue_ptr = cpu_to_le32(priv->rxq[0].rxd_dma);
cmd->num_tx_queues = cpu_to_le32(mwl8k_tx_queues(priv));
/*
* Mac80211 stack has Q0 as highest priority and Q3 as lowest in
* that order. Firmware has Q3 as highest priority and Q0 as lowest
* in that order. Map Q3 of mac80211 to Q0 of firmware so that the
* priority is interpreted the right way in firmware.
*/
for (i = 0; i < mwl8k_tx_queues(priv); i++) {
int j = mwl8k_tx_queues(priv) - 1 - i;
cmd->tx_queue_ptrs[i] = cpu_to_le32(priv->txq[j].txd_dma);
}
cmd->flags = cpu_to_le32(MWL8K_SET_HW_SPEC_FLAG_HOST_DECR_MGMT |
MWL8K_SET_HW_SPEC_FLAG_HOSTFORM_PROBERESP |
MWL8K_SET_HW_SPEC_FLAG_HOSTFORM_BEACON |
MWL8K_SET_HW_SPEC_FLAG_ENABLE_LIFE_TIME_EXPIRY |
MWL8K_SET_HW_SPEC_FLAG_GENERATE_CCMP_HDR);
cmd->num_tx_desc_per_queue = cpu_to_le32(MWL8K_TX_DESCS);
cmd->total_rxd = cpu_to_le32(MWL8K_RX_DESCS);
rc = mwl8k_post_cmd(hw, &cmd->header);
kfree(cmd);
return rc;
}
/*
* CMD_MAC_MULTICAST_ADR.
*/
struct mwl8k_cmd_mac_multicast_adr {
struct mwl8k_cmd_pkt header;
__le16 action;
__le16 numaddr;
__u8 addr[0][ETH_ALEN];
};
#define MWL8K_ENABLE_RX_DIRECTED 0x0001
#define MWL8K_ENABLE_RX_MULTICAST 0x0002
#define MWL8K_ENABLE_RX_ALL_MULTICAST 0x0004
#define MWL8K_ENABLE_RX_BROADCAST 0x0008
static struct mwl8k_cmd_pkt *
__mwl8k_cmd_mac_multicast_adr(struct ieee80211_hw *hw, int allmulti,
struct netdev_hw_addr_list *mc_list)
{
struct mwl8k_priv *priv = hw->priv;
struct mwl8k_cmd_mac_multicast_adr *cmd;
int size;
int mc_count = 0;
if (mc_list)
mc_count = netdev_hw_addr_list_count(mc_list);
if (allmulti || mc_count > priv->num_mcaddrs) {
allmulti = 1;
mc_count = 0;
}
size = sizeof(*cmd) + mc_count * ETH_ALEN;
cmd = kzalloc(size, GFP_ATOMIC);
if (cmd == NULL)
return NULL;
cmd->header.code = cpu_to_le16(MWL8K_CMD_MAC_MULTICAST_ADR);
cmd->header.length = cpu_to_le16(size);
cmd->action = cpu_to_le16(MWL8K_ENABLE_RX_DIRECTED |
MWL8K_ENABLE_RX_BROADCAST);
if (allmulti) {
cmd->action |= cpu_to_le16(MWL8K_ENABLE_RX_ALL_MULTICAST);
} else if (mc_count) {
struct netdev_hw_addr *ha;
int i = 0;
cmd->action |= cpu_to_le16(MWL8K_ENABLE_RX_MULTICAST);
cmd->numaddr = cpu_to_le16(mc_count);
netdev_hw_addr_list_for_each(ha, mc_list) {
memcpy(cmd->addr[i], ha->addr, ETH_ALEN);
}
}
return &cmd->header;
}
/*
* CMD_GET_STAT.
*/
struct mwl8k_cmd_get_stat {
struct mwl8k_cmd_pkt header;
__le32 stats[64];
} __packed;
#define MWL8K_STAT_ACK_FAILURE 9
#define MWL8K_STAT_RTS_FAILURE 12
#define MWL8K_STAT_FCS_ERROR 24
#define MWL8K_STAT_RTS_SUCCESS 11
static int mwl8k_cmd_get_stat(struct ieee80211_hw *hw,
struct ieee80211_low_level_stats *stats)
{
struct mwl8k_cmd_get_stat *cmd;
int rc;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (cmd == NULL)
return -ENOMEM;
cmd->header.code = cpu_to_le16(MWL8K_CMD_GET_STAT);
cmd->header.length = cpu_to_le16(sizeof(*cmd));
rc = mwl8k_post_cmd(hw, &cmd->header);
if (!rc) {
stats->dot11ACKFailureCount =
le32_to_cpu(cmd->stats[MWL8K_STAT_ACK_FAILURE]);
stats->dot11RTSFailureCount =
le32_to_cpu(cmd->stats[MWL8K_STAT_RTS_FAILURE]);
stats->dot11FCSErrorCount =
le32_to_cpu(cmd->stats[MWL8K_STAT_FCS_ERROR]);
stats->dot11RTSSuccessCount =
le32_to_cpu(cmd->stats[MWL8K_STAT_RTS_SUCCESS]);
}
kfree(cmd);
return rc;
}
/*
* CMD_RADIO_CONTROL.
*/
struct mwl8k_cmd_radio_control {
struct mwl8k_cmd_pkt header;
__le16 action;
__le16 control;
__le16 radio_on;
} __packed;
static int
mwl8k_cmd_radio_control(struct ieee80211_hw *hw, bool enable, bool force)
{
struct mwl8k_priv *priv = hw->priv;
struct mwl8k_cmd_radio_control *cmd;
int rc;
if (enable == priv->radio_on && !force)
return 0;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (cmd == NULL)
return -ENOMEM;
cmd->header.code = cpu_to_le16(MWL8K_CMD_RADIO_CONTROL);
cmd->header.length = cpu_to_le16(sizeof(*cmd));
cmd->action = cpu_to_le16(MWL8K_CMD_SET);
cmd->control = cpu_to_le16(priv->radio_short_preamble ? 3 : 1);
cmd->radio_on = cpu_to_le16(enable ? 0x0001 : 0x0000);
rc = mwl8k_post_cmd(hw, &cmd->header);
kfree(cmd);
if (!rc)
priv->radio_on = enable;
return rc;
}
static int mwl8k_cmd_radio_disable(struct ieee80211_hw *hw)
{
return mwl8k_cmd_radio_control(hw, 0, 0);
}
static int mwl8k_cmd_radio_enable(struct ieee80211_hw *hw)
{
return mwl8k_cmd_radio_control(hw, 1, 0);
}
static int
mwl8k_set_radio_preamble(struct ieee80211_hw *hw, bool short_preamble)
{
struct mwl8k_priv *priv = hw->priv;
priv->radio_short_preamble = short_preamble;
return mwl8k_cmd_radio_control(hw, 1, 1);
}
/*
* CMD_RF_TX_POWER.
*/
#define MWL8K_RF_TX_POWER_LEVEL_TOTAL 8
struct mwl8k_cmd_rf_tx_power {
struct mwl8k_cmd_pkt header;
__le16 action;
__le16 support_level;
__le16 current_level;
__le16 reserved;
__le16 power_level_list[MWL8K_RF_TX_POWER_LEVEL_TOTAL];
} __packed;
static int mwl8k_cmd_rf_tx_power(struct ieee80211_hw *hw, int dBm)
{
struct mwl8k_cmd_rf_tx_power *cmd;
int rc;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (cmd == NULL)
return -ENOMEM;
cmd->header.code = cpu_to_le16(MWL8K_CMD_RF_TX_POWER);
cmd->header.length = cpu_to_le16(sizeof(*cmd));
cmd->action = cpu_to_le16(MWL8K_CMD_SET);
cmd->support_level = cpu_to_le16(dBm);
rc = mwl8k_post_cmd(hw, &cmd->header);
kfree(cmd);
return rc;
}
/*
* CMD_TX_POWER.
*/
#define MWL8K_TX_POWER_LEVEL_TOTAL 12
struct mwl8k_cmd_tx_power {
struct mwl8k_cmd_pkt header;
__le16 action;
__le16 band;
__le16 channel;
__le16 bw;
__le16 sub_ch;
__le16 power_level_list[MWL8K_TX_POWER_LEVEL_TOTAL];
} __packed;
static int mwl8k_cmd_tx_power(struct ieee80211_hw *hw,
struct ieee80211_conf *conf,
unsigned short pwr)
{
struct ieee80211_channel *channel = conf->channel;
struct mwl8k_cmd_tx_power *cmd;
int rc;
int i;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (cmd == NULL)
return -ENOMEM;
cmd->header.code = cpu_to_le16(MWL8K_CMD_TX_POWER);
cmd->header.length = cpu_to_le16(sizeof(*cmd));
cmd->action = cpu_to_le16(MWL8K_CMD_SET_LIST);
if (channel->band == IEEE80211_BAND_2GHZ)
cmd->band = cpu_to_le16(0x1);
else if (channel->band == IEEE80211_BAND_5GHZ)
cmd->band = cpu_to_le16(0x4);
cmd->channel = channel->hw_value;
if (conf->channel_type == NL80211_CHAN_NO_HT ||
conf->channel_type == NL80211_CHAN_HT20) {
cmd->bw = cpu_to_le16(0x2);
} else {
cmd->bw = cpu_to_le16(0x4);
if (conf->channel_type == NL80211_CHAN_HT40MINUS)
cmd->sub_ch = cpu_to_le16(0x3);
else if (conf->channel_type == NL80211_CHAN_HT40PLUS)
cmd->sub_ch = cpu_to_le16(0x1);
}
for (i = 0; i < MWL8K_TX_POWER_LEVEL_TOTAL; i++)
cmd->power_level_list[i] = cpu_to_le16(pwr);
rc = mwl8k_post_cmd(hw, &cmd->header);
kfree(cmd);
return rc;
}
/*
* CMD_RF_ANTENNA.
*/
struct mwl8k_cmd_rf_antenna {
struct mwl8k_cmd_pkt header;
__le16 antenna;
__le16 mode;
} __packed;
#define MWL8K_RF_ANTENNA_RX 1
#define MWL8K_RF_ANTENNA_TX 2
static int
mwl8k_cmd_rf_antenna(struct ieee80211_hw *hw, int antenna, int mask)
{
struct mwl8k_cmd_rf_antenna *cmd;
int rc;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (cmd == NULL)
return -ENOMEM;
cmd->header.code = cpu_to_le16(MWL8K_CMD_RF_ANTENNA);
cmd->header.length = cpu_to_le16(sizeof(*cmd));
cmd->antenna = cpu_to_le16(antenna);
cmd->mode = cpu_to_le16(mask);
rc = mwl8k_post_cmd(hw, &cmd->header);
kfree(cmd);
return rc;
}
/*
* CMD_SET_BEACON.
*/
struct mwl8k_cmd_set_beacon {
struct mwl8k_cmd_pkt header;
__le16 beacon_len;
__u8 beacon[0];
};
static int mwl8k_cmd_set_beacon(struct ieee80211_hw *hw,
struct ieee80211_vif *vif, u8 *beacon, int len)
{
struct mwl8k_cmd_set_beacon *cmd;
int rc;
cmd = kzalloc(sizeof(*cmd) + len, GFP_KERNEL);
if (cmd == NULL)
return -ENOMEM;
cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_BEACON);
cmd->header.length = cpu_to_le16(sizeof(*cmd) + len);
cmd->beacon_len = cpu_to_le16(len);
memcpy(cmd->beacon, beacon, len);
rc = mwl8k_post_pervif_cmd(hw, vif, &cmd->header);
kfree(cmd);
return rc;
}
/*
* CMD_SET_PRE_SCAN.
*/
struct mwl8k_cmd_set_pre_scan {
struct mwl8k_cmd_pkt header;
} __packed;
static int mwl8k_cmd_set_pre_scan(struct ieee80211_hw *hw)
{
struct mwl8k_cmd_set_pre_scan *cmd;
int rc;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (cmd == NULL)
return -ENOMEM;
cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_PRE_SCAN);
cmd->header.length = cpu_to_le16(sizeof(*cmd));
rc = mwl8k_post_cmd(hw, &cmd->header);
kfree(cmd);
return rc;
}
/*
* CMD_SET_POST_SCAN.
*/
struct mwl8k_cmd_set_post_scan {
struct mwl8k_cmd_pkt header;
__le32 isibss;
__u8 bssid[ETH_ALEN];
} __packed;
static int
mwl8k_cmd_set_post_scan(struct ieee80211_hw *hw, const __u8 *mac)
{
struct mwl8k_cmd_set_post_scan *cmd;
int rc;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (cmd == NULL)
return -ENOMEM;
cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_POST_SCAN);
cmd->header.length = cpu_to_le16(sizeof(*cmd));
cmd->isibss = 0;
memcpy(cmd->bssid, mac, ETH_ALEN);
rc = mwl8k_post_cmd(hw, &cmd->header);
kfree(cmd);
return rc;
}
/*
* CMD_SET_RF_CHANNEL.
*/
struct mwl8k_cmd_set_rf_channel {
struct mwl8k_cmd_pkt header;
__le16 action;
__u8 current_channel;
__le32 channel_flags;
} __packed;
static int mwl8k_cmd_set_rf_channel(struct ieee80211_hw *hw,
struct ieee80211_conf *conf)
{
struct ieee80211_channel *channel = conf->channel;
struct mwl8k_cmd_set_rf_channel *cmd;
int rc;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (cmd == NULL)
return -ENOMEM;
cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_RF_CHANNEL);
cmd->header.length = cpu_to_le16(sizeof(*cmd));
cmd->action = cpu_to_le16(MWL8K_CMD_SET);
cmd->current_channel = channel->hw_value;
if (channel->band == IEEE80211_BAND_2GHZ)
cmd->channel_flags |= cpu_to_le32(0x00000001);
else if (channel->band == IEEE80211_BAND_5GHZ)
cmd->channel_flags |= cpu_to_le32(0x00000004);
if (conf->channel_type == NL80211_CHAN_NO_HT ||
conf->channel_type == NL80211_CHAN_HT20)
cmd->channel_flags |= cpu_to_le32(0x00000080);
else if (conf->channel_type == NL80211_CHAN_HT40MINUS)
cmd->channel_flags |= cpu_to_le32(0x000001900);
else if (conf->channel_type == NL80211_CHAN_HT40PLUS)
cmd->channel_flags |= cpu_to_le32(0x000000900);
rc = mwl8k_post_cmd(hw, &cmd->header);
kfree(cmd);
return rc;
}
/*
* CMD_SET_AID.
*/
#define MWL8K_FRAME_PROT_DISABLED 0x00
#define MWL8K_FRAME_PROT_11G 0x07
#define MWL8K_FRAME_PROT_11N_HT_40MHZ_ONLY 0x02
#define MWL8K_FRAME_PROT_11N_HT_ALL 0x06
struct mwl8k_cmd_update_set_aid {
struct mwl8k_cmd_pkt header;
__le16 aid;
/* AP's MAC address (BSSID) */
__u8 bssid[ETH_ALEN];
__le16 protection_mode;
__u8 supp_rates[14];
} __packed;
static void legacy_rate_mask_to_array(u8 *rates, u32 mask)
{
int i;
int j;
/*
* Clear nonstandard rates 4 and 13.
*/
mask &= 0x1fef;
for (i = 0, j = 0; i < 14; i++) {
if (mask & (1 << i))
rates[j++] = mwl8k_rates_24[i].hw_value;
}
}
static int
mwl8k_cmd_set_aid(struct ieee80211_hw *hw,
struct ieee80211_vif *vif, u32 legacy_rate_mask)
{
struct mwl8k_cmd_update_set_aid *cmd;
u16 prot_mode;
int rc;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (cmd == NULL)
return -ENOMEM;
cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_AID);
cmd->header.length = cpu_to_le16(sizeof(*cmd));
cmd->aid = cpu_to_le16(vif->bss_conf.aid);
memcpy(cmd->bssid, vif->bss_conf.bssid, ETH_ALEN);
if (vif->bss_conf.use_cts_prot) {
prot_mode = MWL8K_FRAME_PROT_11G;
} else {
switch (vif->bss_conf.ht_operation_mode &
IEEE80211_HT_OP_MODE_PROTECTION) {
case IEEE80211_HT_OP_MODE_PROTECTION_20MHZ:
prot_mode = MWL8K_FRAME_PROT_11N_HT_40MHZ_ONLY;
break;
case IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED:
prot_mode = MWL8K_FRAME_PROT_11N_HT_ALL;
break;
default:
prot_mode = MWL8K_FRAME_PROT_DISABLED;
break;
}
}
cmd->protection_mode = cpu_to_le16(prot_mode);
legacy_rate_mask_to_array(cmd->supp_rates, legacy_rate_mask);
rc = mwl8k_post_cmd(hw, &cmd->header);
kfree(cmd);
return rc;
}
/*
* CMD_SET_RATE.
*/
struct mwl8k_cmd_set_rate {
struct mwl8k_cmd_pkt header;
__u8 legacy_rates[14];
/* Bitmap for supported MCS codes. */
__u8 mcs_set[16];
__u8 reserved[16];
} __packed;
static int
mwl8k_cmd_set_rate(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
u32 legacy_rate_mask, u8 *mcs_rates)
{
struct mwl8k_cmd_set_rate *cmd;
int rc;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (cmd == NULL)
return -ENOMEM;
cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_RATE);
cmd->header.length = cpu_to_le16(sizeof(*cmd));
legacy_rate_mask_to_array(cmd->legacy_rates, legacy_rate_mask);
memcpy(cmd->mcs_set, mcs_rates, 16);
rc = mwl8k_post_cmd(hw, &cmd->header);
kfree(cmd);
return rc;
}
/*
* CMD_FINALIZE_JOIN.
*/
#define MWL8K_FJ_BEACON_MAXLEN 128
struct mwl8k_cmd_finalize_join {
struct mwl8k_cmd_pkt header;
__le32 sleep_interval; /* Number of beacon periods to sleep */
__u8 beacon_data[MWL8K_FJ_BEACON_MAXLEN];
} __packed;
static int mwl8k_cmd_finalize_join(struct ieee80211_hw *hw, void *frame,
int framelen, int dtim)
{
struct mwl8k_cmd_finalize_join *cmd;
struct ieee80211_mgmt *payload = frame;
int payload_len;
int rc;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (cmd == NULL)
return -ENOMEM;
cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_FINALIZE_JOIN);
cmd->header.length = cpu_to_le16(sizeof(*cmd));
cmd->sleep_interval = cpu_to_le32(dtim ? dtim : 1);
payload_len = framelen - ieee80211_hdrlen(payload->frame_control);
if (payload_len < 0)
payload_len = 0;
else if (payload_len > MWL8K_FJ_BEACON_MAXLEN)
payload_len = MWL8K_FJ_BEACON_MAXLEN;
memcpy(cmd->beacon_data, &payload->u.beacon, payload_len);
rc = mwl8k_post_cmd(hw, &cmd->header);
kfree(cmd);
return rc;
}
/*
* CMD_SET_RTS_THRESHOLD.
*/
struct mwl8k_cmd_set_rts_threshold {
struct mwl8k_cmd_pkt header;
__le16 action;
__le16 threshold;
} __packed;
static int
mwl8k_cmd_set_rts_threshold(struct ieee80211_hw *hw, int rts_thresh)
{
struct mwl8k_cmd_set_rts_threshold *cmd;
int rc;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (cmd == NULL)
return -ENOMEM;
cmd->header.code = cpu_to_le16(MWL8K_CMD_RTS_THRESHOLD);
cmd->header.length = cpu_to_le16(sizeof(*cmd));
cmd->action = cpu_to_le16(MWL8K_CMD_SET);
cmd->threshold = cpu_to_le16(rts_thresh);
rc = mwl8k_post_cmd(hw, &cmd->header);
kfree(cmd);
return rc;
}
/*
* CMD_SET_SLOT.
*/
struct mwl8k_cmd_set_slot {
struct mwl8k_cmd_pkt header;
__le16 action;
__u8 short_slot;
} __packed;
static int mwl8k_cmd_set_slot(struct ieee80211_hw *hw, bool short_slot_time)
{
struct mwl8k_cmd_set_slot *cmd;
int rc;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (cmd == NULL)
return -ENOMEM;
cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_SLOT);
cmd->header.length = cpu_to_le16(sizeof(*cmd));
cmd->action = cpu_to_le16(MWL8K_CMD_SET);
cmd->short_slot = short_slot_time;
rc = mwl8k_post_cmd(hw, &cmd->header);
kfree(cmd);
return rc;
}
/*
* CMD_SET_EDCA_PARAMS.
*/
struct mwl8k_cmd_set_edca_params {
struct mwl8k_cmd_pkt header;
/* See MWL8K_SET_EDCA_XXX below */
__le16 action;
/* TX opportunity in units of 32 us */
__le16 txop;
union {
struct {
/* Log exponent of max contention period: 0...15 */
__le32 log_cw_max;
/* Log exponent of min contention period: 0...15 */
__le32 log_cw_min;
/* Adaptive interframe spacing in units of 32us */
__u8 aifs;
/* TX queue to configure */
__u8 txq;
} ap;
struct {
/* Log exponent of max contention period: 0...15 */
__u8 log_cw_max;
/* Log exponent of min contention period: 0...15 */
__u8 log_cw_min;
/* Adaptive interframe spacing in units of 32us */
__u8 aifs;
/* TX queue to configure */
__u8 txq;
} sta;
};
} __packed;
#define MWL8K_SET_EDCA_CW 0x01
#define MWL8K_SET_EDCA_TXOP 0x02
#define MWL8K_SET_EDCA_AIFS 0x04
#define MWL8K_SET_EDCA_ALL (MWL8K_SET_EDCA_CW | \
MWL8K_SET_EDCA_TXOP | \
MWL8K_SET_EDCA_AIFS)
static int
mwl8k_cmd_set_edca_params(struct ieee80211_hw *hw, __u8 qnum,
__u16 cw_min, __u16 cw_max,
__u8 aifs, __u16 txop)
{
struct mwl8k_priv *priv = hw->priv;
struct mwl8k_cmd_set_edca_params *cmd;
int rc;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (cmd == NULL)
return -ENOMEM;
cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_EDCA_PARAMS);
cmd->header.length = cpu_to_le16(sizeof(*cmd));
cmd->action = cpu_to_le16(MWL8K_SET_EDCA_ALL);
cmd->txop = cpu_to_le16(txop);
if (priv->ap_fw) {
cmd->ap.log_cw_max = cpu_to_le32(ilog2(cw_max + 1));
cmd->ap.log_cw_min = cpu_to_le32(ilog2(cw_min + 1));
cmd->ap.aifs = aifs;
cmd->ap.txq = qnum;
} else {
cmd->sta.log_cw_max = (u8)ilog2(cw_max + 1);
cmd->sta.log_cw_min = (u8)ilog2(cw_min + 1);
cmd->sta.aifs = aifs;
cmd->sta.txq = qnum;
}
rc = mwl8k_post_cmd(hw, &cmd->header);
kfree(cmd);
return rc;
}
/*
* CMD_SET_WMM_MODE.
*/
struct mwl8k_cmd_set_wmm_mode {
struct mwl8k_cmd_pkt header;
__le16 action;
} __packed;
static int mwl8k_cmd_set_wmm_mode(struct ieee80211_hw *hw, bool enable)
{
struct mwl8k_priv *priv = hw->priv;
struct mwl8k_cmd_set_wmm_mode *cmd;
int rc;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (cmd == NULL)
return -ENOMEM;
cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_WMM_MODE);
cmd->header.length = cpu_to_le16(sizeof(*cmd));
cmd->action = cpu_to_le16(!!enable);
rc = mwl8k_post_cmd(hw, &cmd->header);
kfree(cmd);
if (!rc)
priv->wmm_enabled = enable;
return rc;
}
/*
* CMD_MIMO_CONFIG.
*/
struct mwl8k_cmd_mimo_config {
struct mwl8k_cmd_pkt header;
__le32 action;
__u8 rx_antenna_map;
__u8 tx_antenna_map;
} __packed;
static int mwl8k_cmd_mimo_config(struct ieee80211_hw *hw, __u8 rx, __u8 tx)
{
struct mwl8k_cmd_mimo_config *cmd;
int rc;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (cmd == NULL)
return -ENOMEM;
cmd->header.code = cpu_to_le16(MWL8K_CMD_MIMO_CONFIG);
cmd->header.length = cpu_to_le16(sizeof(*cmd));
cmd->action = cpu_to_le32((u32)MWL8K_CMD_SET);
cmd->rx_antenna_map = rx;
cmd->tx_antenna_map = tx;
rc = mwl8k_post_cmd(hw, &cmd->header);
kfree(cmd);
return rc;
}
/*
* CMD_USE_FIXED_RATE (STA version).
*/
struct mwl8k_cmd_use_fixed_rate_sta {
struct mwl8k_cmd_pkt header;
__le32 action;
__le32 allow_rate_drop;
__le32 num_rates;
struct {
__le32 is_ht_rate;
__le32 enable_retry;
__le32 rate;
__le32 retry_count;
} rate_entry[8];
__le32 rate_type;
__le32 reserved1;
__le32 reserved2;
} __packed;
#define MWL8K_USE_AUTO_RATE 0x0002
#define MWL8K_UCAST_RATE 0
static int mwl8k_cmd_use_fixed_rate_sta(struct ieee80211_hw *hw)
{
struct mwl8k_cmd_use_fixed_rate_sta *cmd;
int rc;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (cmd == NULL)
return -ENOMEM;
cmd->header.code = cpu_to_le16(MWL8K_CMD_USE_FIXED_RATE);
cmd->header.length = cpu_to_le16(sizeof(*cmd));
cmd->action = cpu_to_le32(MWL8K_USE_AUTO_RATE);
cmd->rate_type = cpu_to_le32(MWL8K_UCAST_RATE);
rc = mwl8k_post_cmd(hw, &cmd->header);
kfree(cmd);
return rc;
}
/*
* CMD_USE_FIXED_RATE (AP version).
*/
struct mwl8k_cmd_use_fixed_rate_ap {
struct mwl8k_cmd_pkt header;
__le32 action;
__le32 allow_rate_drop;
__le32 num_rates;
struct mwl8k_rate_entry_ap {
__le32 is_ht_rate;
__le32 enable_retry;
__le32 rate;
__le32 retry_count;
} rate_entry[4];
u8 multicast_rate;
u8 multicast_rate_type;
u8 management_rate;
} __packed;
static int
mwl8k_cmd_use_fixed_rate_ap(struct ieee80211_hw *hw, int mcast, int mgmt)
{
struct mwl8k_cmd_use_fixed_rate_ap *cmd;
int rc;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (cmd == NULL)
return -ENOMEM;
cmd->header.code = cpu_to_le16(MWL8K_CMD_USE_FIXED_RATE);
cmd->header.length = cpu_to_le16(sizeof(*cmd));
cmd->action = cpu_to_le32(MWL8K_USE_AUTO_RATE);
cmd->multicast_rate = mcast;
cmd->management_rate = mgmt;
rc = mwl8k_post_cmd(hw, &cmd->header);
kfree(cmd);
return rc;
}
/*
* CMD_ENABLE_SNIFFER.
*/
struct mwl8k_cmd_enable_sniffer {
struct mwl8k_cmd_pkt header;
__le32 action;
} __packed;
static int mwl8k_cmd_enable_sniffer(struct ieee80211_hw *hw, bool enable)
{
struct mwl8k_cmd_enable_sniffer *cmd;
int rc;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (cmd == NULL)
return -ENOMEM;
cmd->header.code = cpu_to_le16(MWL8K_CMD_ENABLE_SNIFFER);
cmd->header.length = cpu_to_le16(sizeof(*cmd));
cmd->action = cpu_to_le32(!!enable);
rc = mwl8k_post_cmd(hw, &cmd->header);
kfree(cmd);
return rc;
}
/*
* CMD_SET_MAC_ADDR.
*/
struct mwl8k_cmd_set_mac_addr {
struct mwl8k_cmd_pkt header;
union {
struct {
__le16 mac_type;
__u8 mac_addr[ETH_ALEN];
} mbss;
__u8 mac_addr[ETH_ALEN];
};
} __packed;
#define MWL8K_MAC_TYPE_PRIMARY_CLIENT 0
#define MWL8K_MAC_TYPE_SECONDARY_CLIENT 1
#define MWL8K_MAC_TYPE_PRIMARY_AP 2
#define MWL8K_MAC_TYPE_SECONDARY_AP 3
static int mwl8k_cmd_set_mac_addr(struct ieee80211_hw *hw,
struct ieee80211_vif *vif, u8 *mac)
{
struct mwl8k_priv *priv = hw->priv;
struct mwl8k_vif *mwl8k_vif = MWL8K_VIF(vif);
struct mwl8k_cmd_set_mac_addr *cmd;
int mac_type;
int rc;
mac_type = MWL8K_MAC_TYPE_PRIMARY_AP;
if (vif != NULL && vif->type == NL80211_IFTYPE_STATION) {
if (mwl8k_vif->macid + 1 == ffs(priv->sta_macids_supported))
mac_type = MWL8K_MAC_TYPE_PRIMARY_CLIENT;
else
mac_type = MWL8K_MAC_TYPE_SECONDARY_CLIENT;
} else if (vif != NULL && vif->type == NL80211_IFTYPE_AP) {
if (mwl8k_vif->macid + 1 == ffs(priv->ap_macids_supported))
mac_type = MWL8K_MAC_TYPE_PRIMARY_AP;
else
mac_type = MWL8K_MAC_TYPE_SECONDARY_AP;
}
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (cmd == NULL)
return -ENOMEM;
cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_MAC_ADDR);
cmd->header.length = cpu_to_le16(sizeof(*cmd));
if (priv->ap_fw) {
cmd->mbss.mac_type = cpu_to_le16(mac_type);
memcpy(cmd->mbss.mac_addr, mac, ETH_ALEN);
} else {
memcpy(cmd->mac_addr, mac, ETH_ALEN);
}
rc = mwl8k_post_pervif_cmd(hw, vif, &cmd->header);
kfree(cmd);
return rc;
}
/*
* CMD_SET_RATEADAPT_MODE.
*/
struct mwl8k_cmd_set_rate_adapt_mode {
struct mwl8k_cmd_pkt header;
__le16 action;
__le16 mode;
} __packed;
static int mwl8k_cmd_set_rateadapt_mode(struct ieee80211_hw *hw, __u16 mode)
{
struct mwl8k_cmd_set_rate_adapt_mode *cmd;
int rc;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (cmd == NULL)
return -ENOMEM;
cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_RATEADAPT_MODE);
cmd->header.length = cpu_to_le16(sizeof(*cmd));
cmd->action = cpu_to_le16(MWL8K_CMD_SET);
cmd->mode = cpu_to_le16(mode);
rc = mwl8k_post_cmd(hw, &cmd->header);
kfree(cmd);
return rc;
}
/*
* CMD_GET_WATCHDOG_BITMAP.
*/
struct mwl8k_cmd_get_watchdog_bitmap {
struct mwl8k_cmd_pkt header;
u8 bitmap;
} __packed;
static int mwl8k_cmd_get_watchdog_bitmap(struct ieee80211_hw *hw, u8 *bitmap)
{
struct mwl8k_cmd_get_watchdog_bitmap *cmd;
int rc;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (cmd == NULL)
return -ENOMEM;
cmd->header.code = cpu_to_le16(MWL8K_CMD_GET_WATCHDOG_BITMAP);
cmd->header.length = cpu_to_le16(sizeof(*cmd));
rc = mwl8k_post_cmd(hw, &cmd->header);
if (!rc)
*bitmap = cmd->bitmap;
kfree(cmd);
return rc;
}
#define INVALID_BA 0xAA
static void mwl8k_watchdog_ba_events(struct work_struct *work)
{
int rc;
u8 bitmap = 0, stream_index;
struct mwl8k_ampdu_stream *streams;
struct mwl8k_priv *priv =
container_of(work, struct mwl8k_priv, watchdog_ba_handle);
rc = mwl8k_cmd_get_watchdog_bitmap(priv->hw, &bitmap);
if (rc)
return;
if (bitmap == INVALID_BA)
return;
/* the bitmap is the hw queue number. Map it to the ampdu queue. */
stream_index = bitmap - MWL8K_TX_WMM_QUEUES;
BUG_ON(stream_index >= priv->num_ampdu_queues);
streams = &priv->ampdu[stream_index];
if (streams->state == AMPDU_STREAM_ACTIVE)
ieee80211_stop_tx_ba_session(streams->sta, streams->tid);
return;
}
/*
* CMD_BSS_START.
*/
struct mwl8k_cmd_bss_start {
struct mwl8k_cmd_pkt header;
__le32 enable;
} __packed;
static int mwl8k_cmd_bss_start(struct ieee80211_hw *hw,
struct ieee80211_vif *vif, int enable)
{
struct mwl8k_cmd_bss_start *cmd;
int rc;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (cmd == NULL)
return -ENOMEM;
cmd->header.code = cpu_to_le16(MWL8K_CMD_BSS_START);
cmd->header.length = cpu_to_le16(sizeof(*cmd));
cmd->enable = cpu_to_le32(enable);
rc = mwl8k_post_pervif_cmd(hw, vif, &cmd->header);
kfree(cmd);
return rc;
}
/*
* CMD_BASTREAM.
*/
/*
* UPSTREAM is tx direction
*/
#define BASTREAM_FLAG_DIRECTION_UPSTREAM 0x00
#define BASTREAM_FLAG_IMMEDIATE_TYPE 0x01
enum ba_stream_action_type {
MWL8K_BA_CREATE,
MWL8K_BA_UPDATE,
MWL8K_BA_DESTROY,
MWL8K_BA_FLUSH,
MWL8K_BA_CHECK,
};
struct mwl8k_create_ba_stream {
__le32 flags;
__le32 idle_thrs;
__le32 bar_thrs;
__le32 window_size;
u8 peer_mac_addr[6];
u8 dialog_token;
u8 tid;
u8 queue_id;
u8 param_info;
__le32 ba_context;
u8 reset_seq_no_flag;
__le16 curr_seq_no;
u8 sta_src_mac_addr[6];
} __packed;
struct mwl8k_destroy_ba_stream {
__le32 flags;
__le32 ba_context;
} __packed;
struct mwl8k_cmd_bastream {
struct mwl8k_cmd_pkt header;
__le32 action;
union {
struct mwl8k_create_ba_stream create_params;
struct mwl8k_destroy_ba_stream destroy_params;
};
} __packed;
static int
mwl8k_check_ba(struct ieee80211_hw *hw, struct mwl8k_ampdu_stream *stream)
{
struct mwl8k_cmd_bastream *cmd;
int rc;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (cmd == NULL)
return -ENOMEM;
cmd->header.code = cpu_to_le16(MWL8K_CMD_BASTREAM);
cmd->header.length = cpu_to_le16(sizeof(*cmd));
cmd->action = cpu_to_le32(MWL8K_BA_CHECK);
cmd->create_params.queue_id = stream->idx;
memcpy(&cmd->create_params.peer_mac_addr[0], stream->sta->addr,
ETH_ALEN);
cmd->create_params.tid = stream->tid;
cmd->create_params.flags =
cpu_to_le32(BASTREAM_FLAG_IMMEDIATE_TYPE) |
cpu_to_le32(BASTREAM_FLAG_DIRECTION_UPSTREAM);
rc = mwl8k_post_cmd(hw, &cmd->header);
kfree(cmd);
return rc;
}
static int
mwl8k_create_ba(struct ieee80211_hw *hw, struct mwl8k_ampdu_stream *stream,
u8 buf_size)
{
struct mwl8k_cmd_bastream *cmd;
int rc;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (cmd == NULL)
return -ENOMEM;
cmd->header.code = cpu_to_le16(MWL8K_CMD_BASTREAM);
cmd->header.length = cpu_to_le16(sizeof(*cmd));
cmd->action = cpu_to_le32(MWL8K_BA_CREATE);
cmd->create_params.bar_thrs = cpu_to_le32((u32)buf_size);
cmd->create_params.window_size = cpu_to_le32((u32)buf_size);
cmd->create_params.queue_id = stream->idx;
memcpy(cmd->create_params.peer_mac_addr, stream->sta->addr, ETH_ALEN);
cmd->create_params.tid = stream->tid;
cmd->create_params.curr_seq_no = cpu_to_le16(0);
cmd->create_params.reset_seq_no_flag = 1;
cmd->create_params.param_info =
(stream->sta->ht_cap.ampdu_factor &
IEEE80211_HT_AMPDU_PARM_FACTOR) |
((stream->sta->ht_cap.ampdu_density << 2) &
IEEE80211_HT_AMPDU_PARM_DENSITY);
cmd->create_params.flags =
cpu_to_le32(BASTREAM_FLAG_IMMEDIATE_TYPE |
BASTREAM_FLAG_DIRECTION_UPSTREAM);
rc = mwl8k_post_cmd(hw, &cmd->header);
wiphy_debug(hw->wiphy, "Created a BA stream for %pM : tid %d\n",
stream->sta->addr, stream->tid);
kfree(cmd);
return rc;
}
static void mwl8k_destroy_ba(struct ieee80211_hw *hw,
struct mwl8k_ampdu_stream *stream)
{
struct mwl8k_cmd_bastream *cmd;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (cmd == NULL)
return;
cmd->header.code = cpu_to_le16(MWL8K_CMD_BASTREAM);
cmd->header.length = cpu_to_le16(sizeof(*cmd));
cmd->action = cpu_to_le32(MWL8K_BA_DESTROY);
cmd->destroy_params.ba_context = cpu_to_le32(stream->idx);
mwl8k_post_cmd(hw, &cmd->header);
wiphy_debug(hw->wiphy, "Deleted BA stream index %d\n", stream->idx);
kfree(cmd);
}
/*
* CMD_SET_NEW_STN.
*/
struct mwl8k_cmd_set_new_stn {
struct mwl8k_cmd_pkt header;
__le16 aid;
__u8 mac_addr[6];
__le16 stn_id;
__le16 action;
__le16 rsvd;
__le32 legacy_rates;
__u8 ht_rates[4];
__le16 cap_info;
__le16 ht_capabilities_info;
__u8 mac_ht_param_info;
__u8 rev;
__u8 control_channel;
__u8 add_channel;
__le16 op_mode;
__le16 stbc;
__u8 add_qos_info;
__u8 is_qos_sta;
__le32 fw_sta_ptr;
} __packed;
#define MWL8K_STA_ACTION_ADD 0
#define MWL8K_STA_ACTION_REMOVE 2
static int mwl8k_cmd_set_new_stn_add(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
struct mwl8k_cmd_set_new_stn *cmd;
u32 rates;
int rc;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (cmd == NULL)
return -ENOMEM;
cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_NEW_STN);
cmd->header.length = cpu_to_le16(sizeof(*cmd));
cmd->aid = cpu_to_le16(sta->aid);
memcpy(cmd->mac_addr, sta->addr, ETH_ALEN);
cmd->stn_id = cpu_to_le16(sta->aid);
cmd->action = cpu_to_le16(MWL8K_STA_ACTION_ADD);
if (hw->conf.channel->band == IEEE80211_BAND_2GHZ)
rates = sta->supp_rates[IEEE80211_BAND_2GHZ];
else
rates = sta->supp_rates[IEEE80211_BAND_5GHZ] << 5;
cmd->legacy_rates = cpu_to_le32(rates);
if (sta->ht_cap.ht_supported) {
cmd->ht_rates[0] = sta->ht_cap.mcs.rx_mask[0];
cmd->ht_rates[1] = sta->ht_cap.mcs.rx_mask[1];
cmd->ht_rates[2] = sta->ht_cap.mcs.rx_mask[2];
cmd->ht_rates[3] = sta->ht_cap.mcs.rx_mask[3];
cmd->ht_capabilities_info = cpu_to_le16(sta->ht_cap.cap);
cmd->mac_ht_param_info = (sta->ht_cap.ampdu_factor & 3) |
((sta->ht_cap.ampdu_density & 7) << 2);
cmd->is_qos_sta = 1;
}
rc = mwl8k_post_pervif_cmd(hw, vif, &cmd->header);
kfree(cmd);
return rc;
}
static int mwl8k_cmd_set_new_stn_add_self(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct mwl8k_cmd_set_new_stn *cmd;
int rc;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (cmd == NULL)
return -ENOMEM;
cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_NEW_STN);
cmd->header.length = cpu_to_le16(sizeof(*cmd));
memcpy(cmd->mac_addr, vif->addr, ETH_ALEN);
rc = mwl8k_post_pervif_cmd(hw, vif, &cmd->header);
kfree(cmd);
return rc;
}
static int mwl8k_cmd_set_new_stn_del(struct ieee80211_hw *hw,
struct ieee80211_vif *vif, u8 *addr)
{
struct mwl8k_cmd_set_new_stn *cmd;
int rc;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (cmd == NULL)
return -ENOMEM;
cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_NEW_STN);
cmd->header.length = cpu_to_le16(sizeof(*cmd));
memcpy(cmd->mac_addr, addr, ETH_ALEN);
cmd->action = cpu_to_le16(MWL8K_STA_ACTION_REMOVE);
rc = mwl8k_post_pervif_cmd(hw, vif, &cmd->header);
kfree(cmd);
return rc;
}
/*
* CMD_UPDATE_ENCRYPTION.
*/
#define MAX_ENCR_KEY_LENGTH 16
#define MIC_KEY_LENGTH 8
struct mwl8k_cmd_update_encryption {
struct mwl8k_cmd_pkt header;
__le32 action;
__le32 reserved;
__u8 mac_addr[6];
__u8 encr_type;
} __packed;
struct mwl8k_cmd_set_key {
struct mwl8k_cmd_pkt header;
__le32 action;
__le32 reserved;
__le16 length;
__le16 key_type_id;
__le32 key_info;
__le32 key_id;
__le16 key_len;
__u8 key_material[MAX_ENCR_KEY_LENGTH];
__u8 tkip_tx_mic_key[MIC_KEY_LENGTH];
__u8 tkip_rx_mic_key[MIC_KEY_LENGTH];
__le16 tkip_rsc_low;
__le32 tkip_rsc_high;
__le16 tkip_tsc_low;
__le32 tkip_tsc_high;
__u8 mac_addr[6];
} __packed;
enum {
MWL8K_ENCR_ENABLE,
MWL8K_ENCR_SET_KEY,
MWL8K_ENCR_REMOVE_KEY,
MWL8K_ENCR_SET_GROUP_KEY,
};
#define MWL8K_UPDATE_ENCRYPTION_TYPE_WEP 0
#define MWL8K_UPDATE_ENCRYPTION_TYPE_DISABLE 1
#define MWL8K_UPDATE_ENCRYPTION_TYPE_TKIP 4
#define MWL8K_UPDATE_ENCRYPTION_TYPE_MIXED 7
#define MWL8K_UPDATE_ENCRYPTION_TYPE_AES 8
enum {
MWL8K_ALG_WEP,
MWL8K_ALG_TKIP,
MWL8K_ALG_CCMP,
};
#define MWL8K_KEY_FLAG_TXGROUPKEY 0x00000004
#define MWL8K_KEY_FLAG_PAIRWISE 0x00000008
#define MWL8K_KEY_FLAG_TSC_VALID 0x00000040
#define MWL8K_KEY_FLAG_WEP_TXKEY 0x01000000
#define MWL8K_KEY_FLAG_MICKEY_VALID 0x02000000
static int mwl8k_cmd_update_encryption_enable(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
u8 *addr,
u8 encr_type)
{
struct mwl8k_cmd_update_encryption *cmd;
int rc;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (cmd == NULL)
return -ENOMEM;
cmd->header.code = cpu_to_le16(MWL8K_CMD_UPDATE_ENCRYPTION);
cmd->header.length = cpu_to_le16(sizeof(*cmd));
cmd->action = cpu_to_le32(MWL8K_ENCR_ENABLE);
memcpy(cmd->mac_addr, addr, ETH_ALEN);
cmd->encr_type = encr_type;
rc = mwl8k_post_pervif_cmd(hw, vif, &cmd->header);
kfree(cmd);
return rc;
}
static int mwl8k_encryption_set_cmd_info(struct mwl8k_cmd_set_key *cmd,
u8 *addr,
struct ieee80211_key_conf *key)
{
cmd->header.code = cpu_to_le16(MWL8K_CMD_UPDATE_ENCRYPTION);
cmd->header.length = cpu_to_le16(sizeof(*cmd));
cmd->length = cpu_to_le16(sizeof(*cmd) -
offsetof(struct mwl8k_cmd_set_key, length));
cmd->key_id = cpu_to_le32(key->keyidx);
cmd->key_len = cpu_to_le16(key->keylen);
memcpy(cmd->mac_addr, addr, ETH_ALEN);
switch (key->cipher) {
case WLAN_CIPHER_SUITE_WEP40:
case WLAN_CIPHER_SUITE_WEP104:
cmd->key_type_id = cpu_to_le16(MWL8K_ALG_WEP);
if (key->keyidx == 0)
cmd->key_info = cpu_to_le32(MWL8K_KEY_FLAG_WEP_TXKEY);
break;
case WLAN_CIPHER_SUITE_TKIP:
cmd->key_type_id = cpu_to_le16(MWL8K_ALG_TKIP);
cmd->key_info = (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
? cpu_to_le32(MWL8K_KEY_FLAG_PAIRWISE)
: cpu_to_le32(MWL8K_KEY_FLAG_TXGROUPKEY);
cmd->key_info |= cpu_to_le32(MWL8K_KEY_FLAG_MICKEY_VALID
| MWL8K_KEY_FLAG_TSC_VALID);
break;
case WLAN_CIPHER_SUITE_CCMP:
cmd->key_type_id = cpu_to_le16(MWL8K_ALG_CCMP);
cmd->key_info = (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
? cpu_to_le32(MWL8K_KEY_FLAG_PAIRWISE)
: cpu_to_le32(MWL8K_KEY_FLAG_TXGROUPKEY);
break;
default:
return -ENOTSUPP;
}
return 0;
}
static int mwl8k_cmd_encryption_set_key(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
u8 *addr,
struct ieee80211_key_conf *key)
{
struct mwl8k_cmd_set_key *cmd;
int rc;
int keymlen;
u32 action;
u8 idx;
struct mwl8k_vif *mwl8k_vif = MWL8K_VIF(vif);
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (cmd == NULL)
return -ENOMEM;
rc = mwl8k_encryption_set_cmd_info(cmd, addr, key);
if (rc < 0)
goto done;
idx = key->keyidx;
if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
action = MWL8K_ENCR_SET_KEY;
else
action = MWL8K_ENCR_SET_GROUP_KEY;
switch (key->cipher) {
case WLAN_CIPHER_SUITE_WEP40:
case WLAN_CIPHER_SUITE_WEP104:
if (!mwl8k_vif->wep_key_conf[idx].enabled) {
memcpy(mwl8k_vif->wep_key_conf[idx].key, key,
sizeof(*key) + key->keylen);
mwl8k_vif->wep_key_conf[idx].enabled = 1;
}
keymlen = key->keylen;
action = MWL8K_ENCR_SET_KEY;
break;
case WLAN_CIPHER_SUITE_TKIP:
keymlen = MAX_ENCR_KEY_LENGTH + 2 * MIC_KEY_LENGTH;
break;
case WLAN_CIPHER_SUITE_CCMP:
keymlen = key->keylen;
break;
default:
rc = -ENOTSUPP;
goto done;
}
memcpy(cmd->key_material, key->key, keymlen);
cmd->action = cpu_to_le32(action);
rc = mwl8k_post_pervif_cmd(hw, vif, &cmd->header);
done:
kfree(cmd);
return rc;
}
static int mwl8k_cmd_encryption_remove_key(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
u8 *addr,
struct ieee80211_key_conf *key)
{
struct mwl8k_cmd_set_key *cmd;
int rc;
struct mwl8k_vif *mwl8k_vif = MWL8K_VIF(vif);
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (cmd == NULL)
return -ENOMEM;
rc = mwl8k_encryption_set_cmd_info(cmd, addr, key);
if (rc < 0)
goto done;
if (key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
WLAN_CIPHER_SUITE_WEP104)
mwl8k_vif->wep_key_conf[key->keyidx].enabled = 0;
cmd->action = cpu_to_le32(MWL8K_ENCR_REMOVE_KEY);
rc = mwl8k_post_pervif_cmd(hw, vif, &cmd->header);
done:
kfree(cmd);
return rc;
}
static int mwl8k_set_key(struct ieee80211_hw *hw,
enum set_key_cmd cmd_param,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
struct ieee80211_key_conf *key)
{
int rc = 0;
u8 encr_type;
u8 *addr;
struct mwl8k_vif *mwl8k_vif = MWL8K_VIF(vif);
if (vif->type == NL80211_IFTYPE_STATION)
return -EOPNOTSUPP;
if (sta == NULL)
addr = hw->wiphy->perm_addr;
else
addr = sta->addr;
if (cmd_param == SET_KEY) {
rc = mwl8k_cmd_encryption_set_key(hw, vif, addr, key);
if (rc)
goto out;
if ((key->cipher == WLAN_CIPHER_SUITE_WEP40)
|| (key->cipher == WLAN_CIPHER_SUITE_WEP104))
encr_type = MWL8K_UPDATE_ENCRYPTION_TYPE_WEP;
else
encr_type = MWL8K_UPDATE_ENCRYPTION_TYPE_MIXED;
rc = mwl8k_cmd_update_encryption_enable(hw, vif, addr,
encr_type);
if (rc)
goto out;
mwl8k_vif->is_hw_crypto_enabled = true;
} else {
rc = mwl8k_cmd_encryption_remove_key(hw, vif, addr, key);
if (rc)
goto out;
mwl8k_vif->is_hw_crypto_enabled = false;
}
out:
return rc;
}
/*
* CMD_UPDATE_STADB.
*/
struct ewc_ht_info {
__le16 control1;
__le16 control2;
__le16 control3;
} __packed;
struct peer_capability_info {
/* Peer type - AP vs. STA. */
__u8 peer_type;
/* Basic 802.11 capabilities from assoc resp. */
__le16 basic_caps;
/* Set if peer supports 802.11n high throughput (HT). */
__u8 ht_support;
/* Valid if HT is supported. */
__le16 ht_caps;
__u8 extended_ht_caps;
struct ewc_ht_info ewc_info;
/* Legacy rate table. Intersection of our rates and peer rates. */
__u8 legacy_rates[12];
/* HT rate table. Intersection of our rates and peer rates. */
__u8 ht_rates[16];
__u8 pad[16];
/* If set, interoperability mode, no proprietary extensions. */
__u8 interop;
__u8 pad2;
__u8 station_id;
__le16 amsdu_enabled;
} __packed;
struct mwl8k_cmd_update_stadb {
struct mwl8k_cmd_pkt header;
/* See STADB_ACTION_TYPE */
__le32 action;
/* Peer MAC address */
__u8 peer_addr[ETH_ALEN];
__le32 reserved;
/* Peer info - valid during add/update. */
struct peer_capability_info peer_info;
} __packed;
#define MWL8K_STA_DB_MODIFY_ENTRY 1
#define MWL8K_STA_DB_DEL_ENTRY 2
/* Peer Entry flags - used to define the type of the peer node */
#define MWL8K_PEER_TYPE_ACCESSPOINT 2
static int mwl8k_cmd_update_stadb_add(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
struct mwl8k_cmd_update_stadb *cmd;
struct peer_capability_info *p;
u32 rates;
int rc;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (cmd == NULL)
return -ENOMEM;
cmd->header.code = cpu_to_le16(MWL8K_CMD_UPDATE_STADB);
cmd->header.length = cpu_to_le16(sizeof(*cmd));
cmd->action = cpu_to_le32(MWL8K_STA_DB_MODIFY_ENTRY);
memcpy(cmd->peer_addr, sta->addr, ETH_ALEN);
p = &cmd->peer_info;
p->peer_type = MWL8K_PEER_TYPE_ACCESSPOINT;
p->basic_caps = cpu_to_le16(vif->bss_conf.assoc_capability);
p->ht_support = sta->ht_cap.ht_supported;
p->ht_caps = cpu_to_le16(sta->ht_cap.cap);
p->extended_ht_caps = (sta->ht_cap.ampdu_factor & 3) |
((sta->ht_cap.ampdu_density & 7) << 2);
if (hw->conf.channel->band == IEEE80211_BAND_2GHZ)
rates = sta->supp_rates[IEEE80211_BAND_2GHZ];
else
rates = sta->supp_rates[IEEE80211_BAND_5GHZ] << 5;
legacy_rate_mask_to_array(p->legacy_rates, rates);
memcpy(p->ht_rates, sta->ht_cap.mcs.rx_mask, 16);
p->interop = 1;
p->amsdu_enabled = 0;
rc = mwl8k_post_cmd(hw, &cmd->header);
kfree(cmd);
return rc ? rc : p->station_id;
}
static int mwl8k_cmd_update_stadb_del(struct ieee80211_hw *hw,
struct ieee80211_vif *vif, u8 *addr)
{
struct mwl8k_cmd_update_stadb *cmd;
int rc;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (cmd == NULL)
return -ENOMEM;
cmd->header.code = cpu_to_le16(MWL8K_CMD_UPDATE_STADB);
cmd->header.length = cpu_to_le16(sizeof(*cmd));
cmd->action = cpu_to_le32(MWL8K_STA_DB_DEL_ENTRY);
memcpy(cmd->peer_addr, addr, ETH_ALEN);
rc = mwl8k_post_cmd(hw, &cmd->header);
kfree(cmd);
return rc;
}
/*
* Interrupt handling.
*/
static irqreturn_t mwl8k_interrupt(int irq, void *dev_id)
{
struct ieee80211_hw *hw = dev_id;
struct mwl8k_priv *priv = hw->priv;
u32 status;
status = ioread32(priv->regs + MWL8K_HIU_A2H_INTERRUPT_STATUS);
if (!status)
return IRQ_NONE;
if (status & MWL8K_A2H_INT_TX_DONE) {
status &= ~MWL8K_A2H_INT_TX_DONE;
tasklet_schedule(&priv->poll_tx_task);
}
if (status & MWL8K_A2H_INT_RX_READY) {
status &= ~MWL8K_A2H_INT_RX_READY;
tasklet_schedule(&priv->poll_rx_task);
}
if (status & MWL8K_A2H_INT_BA_WATCHDOG) {
status &= ~MWL8K_A2H_INT_BA_WATCHDOG;
ieee80211_queue_work(hw, &priv->watchdog_ba_handle);
}
if (status)
iowrite32(~status, priv->regs + MWL8K_HIU_A2H_INTERRUPT_STATUS);
if (status & MWL8K_A2H_INT_OPC_DONE) {
if (priv->hostcmd_wait != NULL)
complete(priv->hostcmd_wait);
}
if (status & MWL8K_A2H_INT_QUEUE_EMPTY) {
if (!mutex_is_locked(&priv->fw_mutex) &&
priv->radio_on && priv->pending_tx_pkts)
mwl8k_tx_start(priv);
}
return IRQ_HANDLED;
}
static void mwl8k_tx_poll(unsigned long data)
{
struct ieee80211_hw *hw = (struct ieee80211_hw *)data;
struct mwl8k_priv *priv = hw->priv;
int limit;
int i;
limit = 32;
spin_lock_bh(&priv->tx_lock);
for (i = 0; i < mwl8k_tx_queues(priv); i++)
limit -= mwl8k_txq_reclaim(hw, i, limit, 0);
if (!priv->pending_tx_pkts && priv->tx_wait != NULL) {
complete(priv->tx_wait);
priv->tx_wait = NULL;
}
spin_unlock_bh(&priv->tx_lock);
if (limit) {
writel(~MWL8K_A2H_INT_TX_DONE,
priv->regs + MWL8K_HIU_A2H_INTERRUPT_STATUS);
} else {
tasklet_schedule(&priv->poll_tx_task);
}
}
static void mwl8k_rx_poll(unsigned long data)
{
struct ieee80211_hw *hw = (struct ieee80211_hw *)data;
struct mwl8k_priv *priv = hw->priv;
int limit;
limit = 32;
limit -= rxq_process(hw, 0, limit);
limit -= rxq_refill(hw, 0, limit);
if (limit) {
writel(~MWL8K_A2H_INT_RX_READY,
priv->regs + MWL8K_HIU_A2H_INTERRUPT_STATUS);
} else {
tasklet_schedule(&priv->poll_rx_task);
}
}
/*
* Core driver operations.
*/
static void mwl8k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
{
struct mwl8k_priv *priv = hw->priv;
int index = skb_get_queue_mapping(skb);
if (!priv->radio_on) {
wiphy_debug(hw->wiphy,
"dropped TX frame since radio disabled\n");
dev_kfree_skb(skb);
return;
}
mwl8k_txq_xmit(hw, index, skb);
}
static int mwl8k_start(struct ieee80211_hw *hw)
{
struct mwl8k_priv *priv = hw->priv;
int rc;
rc = request_irq(priv->pdev->irq, mwl8k_interrupt,
IRQF_SHARED, MWL8K_NAME, hw);
if (rc) {
priv->irq = -1;
wiphy_err(hw->wiphy, "failed to register IRQ handler\n");
return -EIO;
}
priv->irq = priv->pdev->irq;
/* Enable TX reclaim and RX tasklets. */
tasklet_enable(&priv->poll_tx_task);
tasklet_enable(&priv->poll_rx_task);
/* Enable interrupts */
iowrite32(MWL8K_A2H_EVENTS, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK);
iowrite32(MWL8K_A2H_EVENTS,
priv->regs + MWL8K_HIU_A2H_INTERRUPT_STATUS_MASK);
rc = mwl8k_fw_lock(hw);
if (!rc) {
rc = mwl8k_cmd_radio_enable(hw);
if (!priv->ap_fw) {
if (!rc)
rc = mwl8k_cmd_enable_sniffer(hw, 0);
if (!rc)
rc = mwl8k_cmd_set_pre_scan(hw);
if (!rc)
rc = mwl8k_cmd_set_post_scan(hw,
"\x00\x00\x00\x00\x00\x00");
}
if (!rc)
rc = mwl8k_cmd_set_rateadapt_mode(hw, 0);
if (!rc)
rc = mwl8k_cmd_set_wmm_mode(hw, 0);
mwl8k_fw_unlock(hw);
}
if (rc) {
iowrite32(0, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK);
free_irq(priv->pdev->irq, hw);
priv->irq = -1;
tasklet_disable(&priv->poll_tx_task);
tasklet_disable(&priv->poll_rx_task);
}
return rc;
}
static void mwl8k_stop(struct ieee80211_hw *hw)
{
struct mwl8k_priv *priv = hw->priv;
int i;
mwl8k_cmd_radio_disable(hw);
ieee80211_stop_queues(hw);
/* Disable interrupts */
iowrite32(0, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK);
if (priv->irq != -1) {
free_irq(priv->pdev->irq, hw);
priv->irq = -1;
}
/* Stop finalize join worker */
cancel_work_sync(&priv->finalize_join_worker);
cancel_work_sync(&priv->watchdog_ba_handle);
if (priv->beacon_skb != NULL)
dev_kfree_skb(priv->beacon_skb);
/* Stop TX reclaim and RX tasklets. */
tasklet_disable(&priv->poll_tx_task);
tasklet_disable(&priv->poll_rx_task);
/* Return all skbs to mac80211 */
for (i = 0; i < mwl8k_tx_queues(priv); i++)
mwl8k_txq_reclaim(hw, i, INT_MAX, 1);
}
static int mwl8k_reload_firmware(struct ieee80211_hw *hw, char *fw_image);
static int mwl8k_add_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct mwl8k_priv *priv = hw->priv;
struct mwl8k_vif *mwl8k_vif;
u32 macids_supported;
int macid, rc;
struct mwl8k_device_info *di;
/*
* Reject interface creation if sniffer mode is active, as
* STA operation is mutually exclusive with hardware sniffer
* mode. (Sniffer mode is only used on STA firmware.)
*/
if (priv->sniffer_enabled) {
wiphy_info(hw->wiphy,
"unable to create STA interface because sniffer mode is enabled\n");
return -EINVAL;
}
di = priv->device_info;
switch (vif->type) {
case NL80211_IFTYPE_AP:
if (!priv->ap_fw && di->fw_image_ap) {
/* we must load the ap fw to meet this request */
if (!list_empty(&priv->vif_list))
return -EBUSY;
rc = mwl8k_reload_firmware(hw, di->fw_image_ap);
if (rc)
return rc;
}
macids_supported = priv->ap_macids_supported;
break;
case NL80211_IFTYPE_STATION:
if (priv->ap_fw && di->fw_image_sta) {
/* we must load the sta fw to meet this request */
if (!list_empty(&priv->vif_list))
return -EBUSY;
rc = mwl8k_reload_firmware(hw, di->fw_image_sta);
if (rc)
return rc;
}
macids_supported = priv->sta_macids_supported;
break;
default:
return -EINVAL;
}
macid = ffs(macids_supported & ~priv->macids_used);
if (!macid--)
return -EBUSY;
/* Setup driver private area. */
mwl8k_vif = MWL8K_VIF(vif);
memset(mwl8k_vif, 0, sizeof(*mwl8k_vif));
mwl8k_vif->vif = vif;
mwl8k_vif->macid = macid;
mwl8k_vif->seqno = 0;
memcpy(mwl8k_vif->bssid, vif->addr, ETH_ALEN);
mwl8k_vif->is_hw_crypto_enabled = false;
/* Set the mac address. */
mwl8k_cmd_set_mac_addr(hw, vif, vif->addr);
if (priv->ap_fw)
mwl8k_cmd_set_new_stn_add_self(hw, vif);
priv->macids_used |= 1 << mwl8k_vif->macid;
list_add_tail(&mwl8k_vif->list, &priv->vif_list);
return 0;
}
static void mwl8k_remove_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct mwl8k_priv *priv = hw->priv;
struct mwl8k_vif *mwl8k_vif = MWL8K_VIF(vif);
if (priv->ap_fw)
mwl8k_cmd_set_new_stn_del(hw, vif, vif->addr);
mwl8k_cmd_set_mac_addr(hw, vif, "\x00\x00\x00\x00\x00\x00");
priv->macids_used &= ~(1 << mwl8k_vif->macid);
list_del(&mwl8k_vif->list);
}
static int mwl8k_config(struct ieee80211_hw *hw, u32 changed)
{
struct ieee80211_conf *conf = &hw->conf;
struct mwl8k_priv *priv = hw->priv;
int rc;
if (conf->flags & IEEE80211_CONF_IDLE) {
mwl8k_cmd_radio_disable(hw);
return 0;
}
rc = mwl8k_fw_lock(hw);
if (rc)
return rc;
rc = mwl8k_cmd_radio_enable(hw);
if (rc)
goto out;
rc = mwl8k_cmd_set_rf_channel(hw, conf);
if (rc)
goto out;
if (conf->power_level > 18)
conf->power_level = 18;
if (priv->ap_fw) {
if (conf->flags & IEEE80211_CONF_CHANGE_POWER) {
rc = mwl8k_cmd_tx_power(hw, conf, conf->power_level);
if (rc)
goto out;
}
rc = mwl8k_cmd_rf_antenna(hw, MWL8K_RF_ANTENNA_RX, 0x3);
if (rc)
wiphy_warn(hw->wiphy, "failed to set # of RX antennas");
rc = mwl8k_cmd_rf_antenna(hw, MWL8K_RF_ANTENNA_TX, 0x7);
if (rc)
wiphy_warn(hw->wiphy, "failed to set # of TX antennas");
} else {
rc = mwl8k_cmd_rf_tx_power(hw, conf->power_level);
if (rc)
goto out;
rc = mwl8k_cmd_mimo_config(hw, 0x7, 0x7);
}
out:
mwl8k_fw_unlock(hw);
return rc;
}
static void
mwl8k_bss_info_changed_sta(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_bss_conf *info, u32 changed)
{
struct mwl8k_priv *priv = hw->priv;
u32 ap_legacy_rates = 0;
u8 ap_mcs_rates[16];
int rc;
if (mwl8k_fw_lock(hw))
return;
/*
* No need to capture a beacon if we're no longer associated.
*/
if ((changed & BSS_CHANGED_ASSOC) && !vif->bss_conf.assoc)
priv->capture_beacon = false;
/*
* Get the AP's legacy and MCS rates.
*/
if (vif->bss_conf.assoc) {
struct ieee80211_sta *ap;
rcu_read_lock();
ap = ieee80211_find_sta(vif, vif->bss_conf.bssid);
if (ap == NULL) {
rcu_read_unlock();
goto out;
}
if (hw->conf.channel->band == IEEE80211_BAND_2GHZ) {
ap_legacy_rates = ap->supp_rates[IEEE80211_BAND_2GHZ];
} else {
ap_legacy_rates =
ap->supp_rates[IEEE80211_BAND_5GHZ] << 5;
}
memcpy(ap_mcs_rates, ap->ht_cap.mcs.rx_mask, 16);
rcu_read_unlock();
}
if ((changed & BSS_CHANGED_ASSOC) && vif->bss_conf.assoc) {
rc = mwl8k_cmd_set_rate(hw, vif, ap_legacy_rates, ap_mcs_rates);
if (rc)
goto out;
rc = mwl8k_cmd_use_fixed_rate_sta(hw);
if (rc)
goto out;
}
if (changed & BSS_CHANGED_ERP_PREAMBLE) {
rc = mwl8k_set_radio_preamble(hw,
vif->bss_conf.use_short_preamble);
if (rc)
goto out;
}
if (changed & BSS_CHANGED_ERP_SLOT) {
rc = mwl8k_cmd_set_slot(hw, vif->bss_conf.use_short_slot);
if (rc)
goto out;
}
if (vif->bss_conf.assoc &&
(changed & (BSS_CHANGED_ASSOC | BSS_CHANGED_ERP_CTS_PROT |
BSS_CHANGED_HT))) {
rc = mwl8k_cmd_set_aid(hw, vif, ap_legacy_rates);
if (rc)
goto out;
}
if (vif->bss_conf.assoc &&
(changed & (BSS_CHANGED_ASSOC | BSS_CHANGED_BEACON_INT))) {
/*
* Finalize the join. Tell rx handler to process
* next beacon from our BSSID.
*/
memcpy(priv->capture_bssid, vif->bss_conf.bssid, ETH_ALEN);
priv->capture_beacon = true;
}
out:
mwl8k_fw_unlock(hw);
}
static void
mwl8k_bss_info_changed_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_bss_conf *info, u32 changed)
{
int rc;
if (mwl8k_fw_lock(hw))
return;
if (changed & BSS_CHANGED_ERP_PREAMBLE) {
rc = mwl8k_set_radio_preamble(hw,
vif->bss_conf.use_short_preamble);
if (rc)
goto out;
}
if (changed & BSS_CHANGED_BASIC_RATES) {
int idx;
int rate;
/*
* Use lowest supported basic rate for multicasts
* and management frames (such as probe responses --
* beacons will always go out at 1 Mb/s).
*/
idx = ffs(vif->bss_conf.basic_rates);
if (idx)
idx--;
if (hw->conf.channel->band == IEEE80211_BAND_2GHZ)
rate = mwl8k_rates_24[idx].hw_value;
else
rate = mwl8k_rates_50[idx].hw_value;
mwl8k_cmd_use_fixed_rate_ap(hw, rate, rate);
}
if (changed & (BSS_CHANGED_BEACON_INT | BSS_CHANGED_BEACON)) {
struct sk_buff *skb;
skb = ieee80211_beacon_get(hw, vif);
if (skb != NULL) {
mwl8k_cmd_set_beacon(hw, vif, skb->data, skb->len);
kfree_skb(skb);
}
}
if (changed & BSS_CHANGED_BEACON_ENABLED)
mwl8k_cmd_bss_start(hw, vif, info->enable_beacon);
out:
mwl8k_fw_unlock(hw);
}
static void
mwl8k_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_bss_conf *info, u32 changed)
{
struct mwl8k_priv *priv = hw->priv;
if (!priv->ap_fw)
mwl8k_bss_info_changed_sta(hw, vif, info, changed);
else
mwl8k_bss_info_changed_ap(hw, vif, info, changed);
}
static u64 mwl8k_prepare_multicast(struct ieee80211_hw *hw,
struct netdev_hw_addr_list *mc_list)
{
struct mwl8k_cmd_pkt *cmd;
/*
* Synthesize and return a command packet that programs the
* hardware multicast address filter. At this point we don't
* know whether FIF_ALLMULTI is being requested, but if it is,
* we'll end up throwing this packet away and creating a new
* one in mwl8k_configure_filter().
*/
cmd = __mwl8k_cmd_mac_multicast_adr(hw, 0, mc_list);
return (unsigned long)cmd;
}
static int
mwl8k_configure_filter_sniffer(struct ieee80211_hw *hw,
unsigned int changed_flags,
unsigned int *total_flags)
{
struct mwl8k_priv *priv = hw->priv;
/*
* Hardware sniffer mode is mutually exclusive with STA
* operation, so refuse to enable sniffer mode if a STA
* interface is active.
*/
if (!list_empty(&priv->vif_list)) {
if (net_ratelimit())
wiphy_info(hw->wiphy,
"not enabling sniffer mode because STA interface is active\n");
return 0;
}
if (!priv->sniffer_enabled) {
if (mwl8k_cmd_enable_sniffer(hw, 1))
return 0;
priv->sniffer_enabled = true;
}
*total_flags &= FIF_PROMISC_IN_BSS | FIF_ALLMULTI |
FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL |
FIF_OTHER_BSS;
return 1;
}
static struct mwl8k_vif *mwl8k_first_vif(struct mwl8k_priv *priv)
{
if (!list_empty(&priv->vif_list))
return list_entry(priv->vif_list.next, struct mwl8k_vif, list);
return NULL;
}
static void mwl8k_configure_filter(struct ieee80211_hw *hw,
unsigned int changed_flags,
unsigned int *total_flags,
u64 multicast)
{
struct mwl8k_priv *priv = hw->priv;
struct mwl8k_cmd_pkt *cmd = (void *)(unsigned long)multicast;
/*
* AP firmware doesn't allow fine-grained control over
* the receive filter.
*/
if (priv->ap_fw) {
*total_flags &= FIF_ALLMULTI | FIF_BCN_PRBRESP_PROMISC;
kfree(cmd);
return;
}
/*
* Enable hardware sniffer mode if FIF_CONTROL or
* FIF_OTHER_BSS is requested.
*/
if (*total_flags & (FIF_CONTROL | FIF_OTHER_BSS) &&
mwl8k_configure_filter_sniffer(hw, changed_flags, total_flags)) {
kfree(cmd);
return;
}
/* Clear unsupported feature flags */
*total_flags &= FIF_ALLMULTI | FIF_BCN_PRBRESP_PROMISC;
if (mwl8k_fw_lock(hw)) {
kfree(cmd);
return;
}
if (priv->sniffer_enabled) {
mwl8k_cmd_enable_sniffer(hw, 0);
priv->sniffer_enabled = false;
}
if (changed_flags & FIF_BCN_PRBRESP_PROMISC) {
if (*total_flags & FIF_BCN_PRBRESP_PROMISC) {
/*
* Disable the BSS filter.
*/
mwl8k_cmd_set_pre_scan(hw);
} else {
struct mwl8k_vif *mwl8k_vif;
const u8 *bssid;
/*
* Enable the BSS filter.
*
* If there is an active STA interface, use that
* interface's BSSID, otherwise use a dummy one
* (where the OUI part needs to be nonzero for
* the BSSID to be accepted by POST_SCAN).
*/
mwl8k_vif = mwl8k_first_vif(priv);
if (mwl8k_vif != NULL)
bssid = mwl8k_vif->vif->bss_conf.bssid;
else
bssid = "\x01\x00\x00\x00\x00\x00";
mwl8k_cmd_set_post_scan(hw, bssid);
}
}
/*
* If FIF_ALLMULTI is being requested, throw away the command
* packet that ->prepare_multicast() built and replace it with
* a command packet that enables reception of all multicast
* packets.
*/
if (*total_flags & FIF_ALLMULTI) {
kfree(cmd);
cmd = __mwl8k_cmd_mac_multicast_adr(hw, 1, NULL);
}
if (cmd != NULL) {
mwl8k_post_cmd(hw, cmd);
kfree(cmd);
}
mwl8k_fw_unlock(hw);
}
static int mwl8k_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
{
return mwl8k_cmd_set_rts_threshold(hw, value);
}
static int mwl8k_sta_remove(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
struct mwl8k_priv *priv = hw->priv;
if (priv->ap_fw)
return mwl8k_cmd_set_new_stn_del(hw, vif, sta->addr);
else
return mwl8k_cmd_update_stadb_del(hw, vif, sta->addr);
}
static int mwl8k_sta_add(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
struct mwl8k_priv *priv = hw->priv;
int ret;
int i;
struct mwl8k_vif *mwl8k_vif = MWL8K_VIF(vif);
struct ieee80211_key_conf *key;
if (!priv->ap_fw) {
ret = mwl8k_cmd_update_stadb_add(hw, vif, sta);
if (ret >= 0) {
MWL8K_STA(sta)->peer_id = ret;
if (sta->ht_cap.ht_supported)
MWL8K_STA(sta)->is_ampdu_allowed = true;
ret = 0;
}
} else {
ret = mwl8k_cmd_set_new_stn_add(hw, vif, sta);
}
for (i = 0; i < NUM_WEP_KEYS; i++) {
key = IEEE80211_KEY_CONF(mwl8k_vif->wep_key_conf[i].key);
if (mwl8k_vif->wep_key_conf[i].enabled)
mwl8k_set_key(hw, SET_KEY, vif, sta, key);
}
return ret;
}
static int mwl8k_conf_tx(struct ieee80211_hw *hw, u16 queue,
const struct ieee80211_tx_queue_params *params)
{
struct mwl8k_priv *priv = hw->priv;
int rc;
rc = mwl8k_fw_lock(hw);
if (!rc) {
BUG_ON(queue > MWL8K_TX_WMM_QUEUES - 1);
memcpy(&priv->wmm_params[queue], params, sizeof(*params));
if (!priv->wmm_enabled)
rc = mwl8k_cmd_set_wmm_mode(hw, 1);
if (!rc) {
int q = MWL8K_TX_WMM_QUEUES - 1 - queue;
rc = mwl8k_cmd_set_edca_params(hw, q,
params->cw_min,
params->cw_max,
params->aifs,
params->txop);
}
mwl8k_fw_unlock(hw);
}
return rc;
}
static int mwl8k_get_stats(struct ieee80211_hw *hw,
struct ieee80211_low_level_stats *stats)
{
return mwl8k_cmd_get_stat(hw, stats);
}
static int mwl8k_get_survey(struct ieee80211_hw *hw, int idx,
struct survey_info *survey)
{
struct mwl8k_priv *priv = hw->priv;
struct ieee80211_conf *conf = &hw->conf;
if (idx != 0)
return -ENOENT;
survey->channel = conf->channel;
survey->filled = SURVEY_INFO_NOISE_DBM;
survey->noise = priv->noise;
return 0;
}
#define MAX_AMPDU_ATTEMPTS 5
static int
mwl8k_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
enum ieee80211_ampdu_mlme_action action,
struct ieee80211_sta *sta, u16 tid, u16 *ssn,
u8 buf_size)
{
int i, rc = 0;
struct mwl8k_priv *priv = hw->priv;
struct mwl8k_ampdu_stream *stream;
u8 *addr = sta->addr;
if (!(hw->flags & IEEE80211_HW_AMPDU_AGGREGATION))
return -ENOTSUPP;
spin_lock(&priv->stream_lock);
stream = mwl8k_lookup_stream(hw, addr, tid);
switch (action) {
case IEEE80211_AMPDU_RX_START:
case IEEE80211_AMPDU_RX_STOP:
break;
case IEEE80211_AMPDU_TX_START:
/* By the time we get here the hw queues may contain outgoing
* packets for this RA/TID that are not part of this BA
* session. The hw will assign sequence numbers to these
* packets as they go out. So if we query the hw for its next
* sequence number and use that for the SSN here, it may end up
* being wrong, which will lead to sequence number mismatch at
* the recipient. To avoid this, we reset the sequence number
* to O for the first MPDU in this BA stream.
*/
*ssn = 0;
if (stream == NULL) {
/* This means that somebody outside this driver called
* ieee80211_start_tx_ba_session. This is unexpected
* because we do our own rate control. Just warn and
* move on.
*/
wiphy_warn(hw->wiphy, "Unexpected call to %s. "
"Proceeding anyway.\n", __func__);
stream = mwl8k_add_stream(hw, sta, tid);
}
if (stream == NULL) {
wiphy_debug(hw->wiphy, "no free AMPDU streams\n");
rc = -EBUSY;
break;
}
stream->state = AMPDU_STREAM_IN_PROGRESS;
/* Release the lock before we do the time consuming stuff */
spin_unlock(&priv->stream_lock);
for (i = 0; i < MAX_AMPDU_ATTEMPTS; i++) {
rc = mwl8k_check_ba(hw, stream);
if (!rc)
break;
/*
* HW queues take time to be flushed, give them
* sufficient time
*/
msleep(1000);
}
spin_lock(&priv->stream_lock);
if (rc) {
wiphy_err(hw->wiphy, "Stream for tid %d busy after %d"
" attempts\n", tid, MAX_AMPDU_ATTEMPTS);
mwl8k_remove_stream(hw, stream);
rc = -EBUSY;
break;
}
ieee80211_start_tx_ba_cb_irqsafe(vif, addr, tid);
break;
case IEEE80211_AMPDU_TX_STOP:
if (stream == NULL)
break;
if (stream->state == AMPDU_STREAM_ACTIVE) {
spin_unlock(&priv->stream_lock);
mwl8k_destroy_ba(hw, stream);
spin_lock(&priv->stream_lock);
}
mwl8k_remove_stream(hw, stream);
ieee80211_stop_tx_ba_cb_irqsafe(vif, addr, tid);
break;
case IEEE80211_AMPDU_TX_OPERATIONAL:
BUG_ON(stream == NULL);
BUG_ON(stream->state != AMPDU_STREAM_IN_PROGRESS);
spin_unlock(&priv->stream_lock);
rc = mwl8k_create_ba(hw, stream, buf_size);
spin_lock(&priv->stream_lock);
if (!rc)
stream->state = AMPDU_STREAM_ACTIVE;
else {
spin_unlock(&priv->stream_lock);
mwl8k_destroy_ba(hw, stream);
spin_lock(&priv->stream_lock);
wiphy_debug(hw->wiphy,
"Failed adding stream for sta %pM tid %d\n",
addr, tid);
mwl8k_remove_stream(hw, stream);
}
break;
default:
rc = -ENOTSUPP;
}
spin_unlock(&priv->stream_lock);
return rc;
}
static const struct ieee80211_ops mwl8k_ops = {
.tx = mwl8k_tx,
.start = mwl8k_start,
.stop = mwl8k_stop,
.add_interface = mwl8k_add_interface,
.remove_interface = mwl8k_remove_interface,
.config = mwl8k_config,
.bss_info_changed = mwl8k_bss_info_changed,
.prepare_multicast = mwl8k_prepare_multicast,
.configure_filter = mwl8k_configure_filter,
.set_key = mwl8k_set_key,
.set_rts_threshold = mwl8k_set_rts_threshold,
.sta_add = mwl8k_sta_add,
.sta_remove = mwl8k_sta_remove,
.conf_tx = mwl8k_conf_tx,
.get_stats = mwl8k_get_stats,
.get_survey = mwl8k_get_survey,
.ampdu_action = mwl8k_ampdu_action,
};
static void mwl8k_finalize_join_worker(struct work_struct *work)
{
struct mwl8k_priv *priv =
container_of(work, struct mwl8k_priv, finalize_join_worker);
struct sk_buff *skb = priv->beacon_skb;
struct ieee80211_mgmt *mgmt = (void *)skb->data;
int len = skb->len - offsetof(struct ieee80211_mgmt, u.beacon.variable);
const u8 *tim = cfg80211_find_ie(WLAN_EID_TIM,
mgmt->u.beacon.variable, len);
int dtim_period = 1;
if (tim && tim[1] >= 2)
dtim_period = tim[3];
mwl8k_cmd_finalize_join(priv->hw, skb->data, skb->len, dtim_period);
dev_kfree_skb(skb);
priv->beacon_skb = NULL;
}
enum {
MWL8363 = 0,
MWL8687,
MWL8366,
};
#define MWL8K_8366_AP_FW_API 2
#define _MWL8K_8366_AP_FW(api) "mwl8k/fmimage_8366_ap-" #api ".fw"
#define MWL8K_8366_AP_FW(api) _MWL8K_8366_AP_FW(api)
static struct mwl8k_device_info mwl8k_info_tbl[] __devinitdata = {
[MWL8363] = {
.part_name = "88w8363",
.helper_image = "mwl8k/helper_8363.fw",
.fw_image_sta = "mwl8k/fmimage_8363.fw",
},
[MWL8687] = {
.part_name = "88w8687",
.helper_image = "mwl8k/helper_8687.fw",
.fw_image_sta = "mwl8k/fmimage_8687.fw",
},
[MWL8366] = {
.part_name = "88w8366",
.helper_image = "mwl8k/helper_8366.fw",
.fw_image_sta = "mwl8k/fmimage_8366.fw",
.fw_image_ap = MWL8K_8366_AP_FW(MWL8K_8366_AP_FW_API),
.fw_api_ap = MWL8K_8366_AP_FW_API,
.ap_rxd_ops = &rxd_8366_ap_ops,
},
};
MODULE_FIRMWARE("mwl8k/helper_8363.fw");
MODULE_FIRMWARE("mwl8k/fmimage_8363.fw");
MODULE_FIRMWARE("mwl8k/helper_8687.fw");
MODULE_FIRMWARE("mwl8k/fmimage_8687.fw");
MODULE_FIRMWARE("mwl8k/helper_8366.fw");
MODULE_FIRMWARE("mwl8k/fmimage_8366.fw");
MODULE_FIRMWARE(MWL8K_8366_AP_FW(MWL8K_8366_AP_FW_API));
static DEFINE_PCI_DEVICE_TABLE(mwl8k_pci_id_table) = {
{ PCI_VDEVICE(MARVELL, 0x2a0a), .driver_data = MWL8363, },
{ PCI_VDEVICE(MARVELL, 0x2a0c), .driver_data = MWL8363, },
{ PCI_VDEVICE(MARVELL, 0x2a24), .driver_data = MWL8363, },
{ PCI_VDEVICE(MARVELL, 0x2a2b), .driver_data = MWL8687, },
{ PCI_VDEVICE(MARVELL, 0x2a30), .driver_data = MWL8687, },
{ PCI_VDEVICE(MARVELL, 0x2a40), .driver_data = MWL8366, },
{ PCI_VDEVICE(MARVELL, 0x2a43), .driver_data = MWL8366, },
{ },
};
MODULE_DEVICE_TABLE(pci, mwl8k_pci_id_table);
static int mwl8k_request_alt_fw(struct mwl8k_priv *priv)
{
int rc;
printk(KERN_ERR "%s: Error requesting preferred fw %s.\n"
"Trying alternative firmware %s\n", pci_name(priv->pdev),
priv->fw_pref, priv->fw_alt);
rc = mwl8k_request_fw(priv, priv->fw_alt, &priv->fw_ucode, true);
if (rc) {
printk(KERN_ERR "%s: Error requesting alt fw %s\n",
pci_name(priv->pdev), priv->fw_alt);
return rc;
}
return 0;
}
static int mwl8k_firmware_load_success(struct mwl8k_priv *priv);
static void mwl8k_fw_state_machine(const struct firmware *fw, void *context)
{
struct mwl8k_priv *priv = context;
struct mwl8k_device_info *di = priv->device_info;
int rc;
switch (priv->fw_state) {
case FW_STATE_INIT:
if (!fw) {
printk(KERN_ERR "%s: Error requesting helper fw %s\n",
pci_name(priv->pdev), di->helper_image);
goto fail;
}
priv->fw_helper = fw;
rc = mwl8k_request_fw(priv, priv->fw_pref, &priv->fw_ucode,
true);
if (rc && priv->fw_alt) {
rc = mwl8k_request_alt_fw(priv);
if (rc)
goto fail;
priv->fw_state = FW_STATE_LOADING_ALT;
} else if (rc)
goto fail;
else
priv->fw_state = FW_STATE_LOADING_PREF;
break;
case FW_STATE_LOADING_PREF:
if (!fw) {
if (priv->fw_alt) {
rc = mwl8k_request_alt_fw(priv);
if (rc)
goto fail;
priv->fw_state = FW_STATE_LOADING_ALT;
} else
goto fail;
} else {
priv->fw_ucode = fw;
rc = mwl8k_firmware_load_success(priv);
if (rc)
goto fail;
else
complete(&priv->firmware_loading_complete);
}
break;
case FW_STATE_LOADING_ALT:
if (!fw) {
printk(KERN_ERR "%s: Error requesting alt fw %s\n",
pci_name(priv->pdev), di->helper_image);
goto fail;
}
priv->fw_ucode = fw;
rc = mwl8k_firmware_load_success(priv);
if (rc)
goto fail;
else
complete(&priv->firmware_loading_complete);
break;
default:
printk(KERN_ERR "%s: Unexpected firmware loading state: %d\n",
MWL8K_NAME, priv->fw_state);
BUG_ON(1);
}
return;
fail:
priv->fw_state = FW_STATE_ERROR;
complete(&priv->firmware_loading_complete);
device_release_driver(&priv->pdev->dev);
mwl8k_release_firmware(priv);
}
static int mwl8k_init_firmware(struct ieee80211_hw *hw, char *fw_image,
bool nowait)
{
struct mwl8k_priv *priv = hw->priv;
int rc;
/* Reset firmware and hardware */
mwl8k_hw_reset(priv);
/* Ask userland hotplug daemon for the device firmware */
rc = mwl8k_request_firmware(priv, fw_image, nowait);
if (rc) {
wiphy_err(hw->wiphy, "Firmware files not found\n");
return rc;
}
if (nowait)
return rc;
/* Load firmware into hardware */
rc = mwl8k_load_firmware(hw);
if (rc)
wiphy_err(hw->wiphy, "Cannot start firmware\n");
/* Reclaim memory once firmware is successfully loaded */
mwl8k_release_firmware(priv);
return rc;
}
static int mwl8k_init_txqs(struct ieee80211_hw *hw)
{
struct mwl8k_priv *priv = hw->priv;
int rc = 0;
int i;
for (i = 0; i < mwl8k_tx_queues(priv); i++) {
rc = mwl8k_txq_init(hw, i);
if (rc)
break;
if (priv->ap_fw)
iowrite32(priv->txq[i].txd_dma,
priv->sram + priv->txq_offset[i]);
}
return rc;
}
/* initialize hw after successfully loading a firmware image */
static int mwl8k_probe_hw(struct ieee80211_hw *hw)
{
struct mwl8k_priv *priv = hw->priv;
int rc = 0;
int i;
if (priv->ap_fw) {
priv->rxd_ops = priv->device_info->ap_rxd_ops;
if (priv->rxd_ops == NULL) {
wiphy_err(hw->wiphy,
"Driver does not have AP firmware image support for this hardware\n");
goto err_stop_firmware;
}
} else {
priv->rxd_ops = &rxd_sta_ops;
}
priv->sniffer_enabled = false;
priv->wmm_enabled = false;
priv->pending_tx_pkts = 0;
rc = mwl8k_rxq_init(hw, 0);
if (rc)
goto err_stop_firmware;
rxq_refill(hw, 0, INT_MAX);
/* For the sta firmware, we need to know the dma addresses of tx queues
* before sending MWL8K_CMD_GET_HW_SPEC. So we must initialize them
* prior to issuing this command. But for the AP case, we learn the
* total number of queues from the result CMD_GET_HW_SPEC, so for this
* case we must initialize the tx queues after.
*/
priv->num_ampdu_queues = 0;
if (!priv->ap_fw) {
rc = mwl8k_init_txqs(hw);
if (rc)
goto err_free_queues;
}
iowrite32(0, priv->regs + MWL8K_HIU_A2H_INTERRUPT_STATUS);
iowrite32(0, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK);
iowrite32(MWL8K_A2H_INT_TX_DONE|MWL8K_A2H_INT_RX_READY|
MWL8K_A2H_INT_BA_WATCHDOG,
priv->regs + MWL8K_HIU_A2H_INTERRUPT_CLEAR_SEL);
iowrite32(MWL8K_A2H_INT_OPC_DONE,
priv->regs + MWL8K_HIU_A2H_INTERRUPT_STATUS_MASK);
rc = request_irq(priv->pdev->irq, mwl8k_interrupt,
IRQF_SHARED, MWL8K_NAME, hw);
if (rc) {
wiphy_err(hw->wiphy, "failed to register IRQ handler\n");
goto err_free_queues;
}
memset(priv->ampdu, 0, sizeof(priv->ampdu));
/*
* Temporarily enable interrupts. Initial firmware host
* commands use interrupts and avoid polling. Disable
* interrupts when done.
*/
iowrite32(MWL8K_A2H_EVENTS, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK);
/* Get config data, mac addrs etc */
if (priv->ap_fw) {
rc = mwl8k_cmd_get_hw_spec_ap(hw);
if (!rc)
rc = mwl8k_init_txqs(hw);
if (!rc)
rc = mwl8k_cmd_set_hw_spec(hw);
} else {
rc = mwl8k_cmd_get_hw_spec_sta(hw);
}
if (rc) {
wiphy_err(hw->wiphy, "Cannot initialise firmware\n");
goto err_free_irq;
}
/* Turn radio off */
rc = mwl8k_cmd_radio_disable(hw);
if (rc) {
wiphy_err(hw->wiphy, "Cannot disable\n");
goto err_free_irq;
}
/* Clear MAC address */
rc = mwl8k_cmd_set_mac_addr(hw, NULL, "\x00\x00\x00\x00\x00\x00");
if (rc) {
wiphy_err(hw->wiphy, "Cannot clear MAC address\n");
goto err_free_irq;
}
/* Disable interrupts */
iowrite32(0, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK);
free_irq(priv->pdev->irq, hw);
wiphy_info(hw->wiphy, "%s v%d, %pm, %s firmware %u.%u.%u.%u\n",
priv->device_info->part_name,
priv->hw_rev, hw->wiphy->perm_addr,
priv->ap_fw ? "AP" : "STA",
(priv->fw_rev >> 24) & 0xff, (priv->fw_rev >> 16) & 0xff,
(priv->fw_rev >> 8) & 0xff, priv->fw_rev & 0xff);
return 0;
err_free_irq:
iowrite32(0, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK);
free_irq(priv->pdev->irq, hw);
err_free_queues:
for (i = 0; i < mwl8k_tx_queues(priv); i++)
mwl8k_txq_deinit(hw, i);
mwl8k_rxq_deinit(hw, 0);
err_stop_firmware:
mwl8k_hw_reset(priv);
return rc;
}
/*
* invoke mwl8k_reload_firmware to change the firmware image after the device
* has already been registered
*/
static int mwl8k_reload_firmware(struct ieee80211_hw *hw, char *fw_image)
{
int i, rc = 0;
struct mwl8k_priv *priv = hw->priv;
mwl8k_stop(hw);
mwl8k_rxq_deinit(hw, 0);
for (i = 0; i < mwl8k_tx_queues(priv); i++)
mwl8k_txq_deinit(hw, i);
rc = mwl8k_init_firmware(hw, fw_image, false);
if (rc)
goto fail;
rc = mwl8k_probe_hw(hw);
if (rc)
goto fail;
rc = mwl8k_start(hw);
if (rc)
goto fail;
rc = mwl8k_config(hw, ~0);
if (rc)
goto fail;
for (i = 0; i < MWL8K_TX_WMM_QUEUES; i++) {
rc = mwl8k_conf_tx(hw, i, &priv->wmm_params[i]);
if (rc)
goto fail;
}
return rc;
fail:
printk(KERN_WARNING "mwl8k: Failed to reload firmware image.\n");
return rc;
}
static int mwl8k_firmware_load_success(struct mwl8k_priv *priv)
{
struct ieee80211_hw *hw = priv->hw;
int i, rc;
rc = mwl8k_load_firmware(hw);
mwl8k_release_firmware(priv);
if (rc) {
wiphy_err(hw->wiphy, "Cannot start firmware\n");
return rc;
}
/*
* Extra headroom is the size of the required DMA header
* minus the size of the smallest 802.11 frame (CTS frame).
*/
hw->extra_tx_headroom =
sizeof(struct mwl8k_dma_data) - sizeof(struct ieee80211_cts);
hw->extra_tx_headroom -= priv->ap_fw ? REDUCED_TX_HEADROOM : 0;
hw->channel_change_time = 10;
hw->queues = MWL8K_TX_WMM_QUEUES;
/* Set rssi values to dBm */
hw->flags |= IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_HAS_RATE_CONTROL;
hw->vif_data_size = sizeof(struct mwl8k_vif);
hw->sta_data_size = sizeof(struct mwl8k_sta);
priv->macids_used = 0;
INIT_LIST_HEAD(&priv->vif_list);
/* Set default radio state and preamble */
priv->radio_on = 0;
priv->radio_short_preamble = 0;
/* Finalize join worker */
INIT_WORK(&priv->finalize_join_worker, mwl8k_finalize_join_worker);
/* Handle watchdog ba events */
INIT_WORK(&priv->watchdog_ba_handle, mwl8k_watchdog_ba_events);
/* TX reclaim and RX tasklets. */
tasklet_init(&priv->poll_tx_task, mwl8k_tx_poll, (unsigned long)hw);
tasklet_disable(&priv->poll_tx_task);
tasklet_init(&priv->poll_rx_task, mwl8k_rx_poll, (unsigned long)hw);
tasklet_disable(&priv->poll_rx_task);
/* Power management cookie */
priv->cookie = pci_alloc_consistent(priv->pdev, 4, &priv->cookie_dma);
if (priv->cookie == NULL)
return -ENOMEM;
mutex_init(&priv->fw_mutex);
priv->fw_mutex_owner = NULL;
priv->fw_mutex_depth = 0;
priv->hostcmd_wait = NULL;
spin_lock_init(&priv->tx_lock);
spin_lock_init(&priv->stream_lock);
priv->tx_wait = NULL;
rc = mwl8k_probe_hw(hw);
if (rc)
goto err_free_cookie;
hw->wiphy->interface_modes = 0;
if (priv->ap_macids_supported || priv->device_info->fw_image_ap)
hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_AP);
if (priv->sta_macids_supported || priv->device_info->fw_image_sta)
hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_STATION);
rc = ieee80211_register_hw(hw);
if (rc) {
wiphy_err(hw->wiphy, "Cannot register device\n");
goto err_unprobe_hw;
}
return 0;
err_unprobe_hw:
for (i = 0; i < mwl8k_tx_queues(priv); i++)
mwl8k_txq_deinit(hw, i);
mwl8k_rxq_deinit(hw, 0);
err_free_cookie:
if (priv->cookie != NULL)
pci_free_consistent(priv->pdev, 4,
priv->cookie, priv->cookie_dma);
return rc;
}
static int __devinit mwl8k_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
static int printed_version;
struct ieee80211_hw *hw;
struct mwl8k_priv *priv;
struct mwl8k_device_info *di;
int rc;
if (!printed_version) {
printk(KERN_INFO "%s version %s\n", MWL8K_DESC, MWL8K_VERSION);
printed_version = 1;
}
rc = pci_enable_device(pdev);
if (rc) {
printk(KERN_ERR "%s: Cannot enable new PCI device\n",
MWL8K_NAME);
return rc;
}
rc = pci_request_regions(pdev, MWL8K_NAME);
if (rc) {
printk(KERN_ERR "%s: Cannot obtain PCI resources\n",
MWL8K_NAME);
goto err_disable_device;
}
pci_set_master(pdev);
hw = ieee80211_alloc_hw(sizeof(*priv), &mwl8k_ops);
if (hw == NULL) {
printk(KERN_ERR "%s: ieee80211 alloc failed\n", MWL8K_NAME);
rc = -ENOMEM;
goto err_free_reg;
}
SET_IEEE80211_DEV(hw, &pdev->dev);
pci_set_drvdata(pdev, hw);
priv = hw->priv;
priv->hw = hw;
priv->pdev = pdev;
priv->device_info = &mwl8k_info_tbl[id->driver_data];
priv->sram = pci_iomap(pdev, 0, 0x10000);
if (priv->sram == NULL) {
wiphy_err(hw->wiphy, "Cannot map device SRAM\n");
goto err_iounmap;
}
/*
* If BAR0 is a 32 bit BAR, the register BAR will be BAR1.
* If BAR0 is a 64 bit BAR, the register BAR will be BAR2.
*/
priv->regs = pci_iomap(pdev, 1, 0x10000);
if (priv->regs == NULL) {
priv->regs = pci_iomap(pdev, 2, 0x10000);
if (priv->regs == NULL) {
wiphy_err(hw->wiphy, "Cannot map device registers\n");
goto err_iounmap;
}
}
/*
* Choose the initial fw image depending on user input. If a second
* image is available, make it the alternative image that will be
* loaded if the first one fails.
*/
init_completion(&priv->firmware_loading_complete);
di = priv->device_info;
if (ap_mode_default && di->fw_image_ap) {
priv->fw_pref = di->fw_image_ap;
priv->fw_alt = di->fw_image_sta;
} else if (!ap_mode_default && di->fw_image_sta) {
priv->fw_pref = di->fw_image_sta;
priv->fw_alt = di->fw_image_ap;
} else if (ap_mode_default && !di->fw_image_ap && di->fw_image_sta) {
printk(KERN_WARNING "AP fw is unavailable. Using STA fw.");
priv->fw_pref = di->fw_image_sta;
} else if (!ap_mode_default && !di->fw_image_sta && di->fw_image_ap) {
printk(KERN_WARNING "STA fw is unavailable. Using AP fw.");
priv->fw_pref = di->fw_image_ap;
}
rc = mwl8k_init_firmware(hw, priv->fw_pref, true);
if (rc)
goto err_stop_firmware;
return rc;
err_stop_firmware:
mwl8k_hw_reset(priv);
err_iounmap:
if (priv->regs != NULL)
pci_iounmap(pdev, priv->regs);
if (priv->sram != NULL)
pci_iounmap(pdev, priv->sram);
pci_set_drvdata(pdev, NULL);
ieee80211_free_hw(hw);
err_free_reg:
pci_release_regions(pdev);
err_disable_device:
pci_disable_device(pdev);
return rc;
}
static void __devexit mwl8k_shutdown(struct pci_dev *pdev)
{
printk(KERN_ERR "===>%s(%u)\n", __func__, __LINE__);
}
static void __devexit mwl8k_remove(struct pci_dev *pdev)
{
struct ieee80211_hw *hw = pci_get_drvdata(pdev);
struct mwl8k_priv *priv;
int i;
if (hw == NULL)
return;
priv = hw->priv;
wait_for_completion(&priv->firmware_loading_complete);
if (priv->fw_state == FW_STATE_ERROR) {
mwl8k_hw_reset(priv);
goto unmap;
}
ieee80211_stop_queues(hw);
ieee80211_unregister_hw(hw);
/* Remove TX reclaim and RX tasklets. */
tasklet_kill(&priv->poll_tx_task);
tasklet_kill(&priv->poll_rx_task);
/* Stop hardware */
mwl8k_hw_reset(priv);
/* Return all skbs to mac80211 */
for (i = 0; i < mwl8k_tx_queues(priv); i++)
mwl8k_txq_reclaim(hw, i, INT_MAX, 1);
for (i = 0; i < mwl8k_tx_queues(priv); i++)
mwl8k_txq_deinit(hw, i);
mwl8k_rxq_deinit(hw, 0);
pci_free_consistent(priv->pdev, 4, priv->cookie, priv->cookie_dma);
unmap:
pci_iounmap(pdev, priv->regs);
pci_iounmap(pdev, priv->sram);
pci_set_drvdata(pdev, NULL);
ieee80211_free_hw(hw);
pci_release_regions(pdev);
pci_disable_device(pdev);
}
static struct pci_driver mwl8k_driver = {
.name = MWL8K_NAME,
.id_table = mwl8k_pci_id_table,
.probe = mwl8k_probe,
.remove = __devexit_p(mwl8k_remove),
.shutdown = __devexit_p(mwl8k_shutdown),
};
static int __init mwl8k_init(void)
{
return pci_register_driver(&mwl8k_driver);
}
static void __exit mwl8k_exit(void)
{
pci_unregister_driver(&mwl8k_driver);
}
module_init(mwl8k_init);
module_exit(mwl8k_exit);
MODULE_DESCRIPTION(MWL8K_DESC);
MODULE_VERSION(MWL8K_VERSION);
MODULE_AUTHOR("Lennert Buytenhek <buytenh@marvell.com>");
MODULE_LICENSE("GPL");
| gpl-2.0 |
PerthCharles/tcpcomment | linux-3.10/arch/arm/mach-exynos/setup-fimd0.c | 2260 | 1126 | /* linux/arch/arm/mach-exynos4/setup-fimd0.c
*
* Copyright (c) 2009-2011 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
* Base Exynos4 FIMD 0 configuration
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/fb.h>
#include <linux/gpio.h>
#include <video/samsung_fimd.h>
#include <plat/gpio-cfg.h>
#include <mach/map.h>
void exynos4_fimd0_gpio_setup_24bpp(void)
{
unsigned int reg;
s3c_gpio_cfgrange_nopull(EXYNOS4_GPF0(0), 8, S3C_GPIO_SFN(2));
s3c_gpio_cfgrange_nopull(EXYNOS4_GPF1(0), 8, S3C_GPIO_SFN(2));
s3c_gpio_cfgrange_nopull(EXYNOS4_GPF2(0), 8, S3C_GPIO_SFN(2));
s3c_gpio_cfgrange_nopull(EXYNOS4_GPF3(0), 4, S3C_GPIO_SFN(2));
/*
* Set DISPLAY_CONTROL register for Display path selection.
*
* DISPLAY_CONTROL[1:0]
* ---------------------
* 00 | MIE
* 01 | MDINE
* 10 | FIMD : selected
* 11 | FIMD
*/
reg = __raw_readl(S3C_VA_SYS + 0x0210);
reg |= (1 << 1);
__raw_writel(reg, S3C_VA_SYS + 0x0210);
}
| gpl-2.0 |
Split-Screen/android_kernel_asus_grouper | drivers/tty/cyclades.c | 2772 | 114272 | #undef BLOCKMOVE
#define Z_WAKE
#undef Z_EXT_CHARS_IN_BUFFER
/*
* This file contains the driver for the Cyclades async multiport
* serial boards.
*
* Initially written by Randolph Bentson <bentson@grieg.seaslug.org>.
* Modified and maintained by Marcio Saito <marcio@cyclades.com>.
*
* Copyright (C) 2007-2009 Jiri Slaby <jirislaby@gmail.com>
*
* Much of the design and some of the code came from serial.c
* which was copyright (C) 1991, 1992 Linus Torvalds. It was
* extensively rewritten by Theodore Ts'o, 8/16/92 -- 9/14/92,
* and then fixed as suggested by Michael K. Johnson 12/12/92.
* Converted to pci probing and cleaned up by Jiri Slaby.
*
*/
#define CY_VERSION "2.6"
/* If you need to install more boards than NR_CARDS, change the constant
in the definition below. No other change is necessary to support up to
eight boards. Beyond that you'll have to extend cy_isa_addresses. */
#define NR_CARDS 4
/*
If the total number of ports is larger than NR_PORTS, change this
constant in the definition below. No other change is necessary to
support more boards/ports. */
#define NR_PORTS 256
#define ZO_V1 0
#define ZO_V2 1
#define ZE_V1 2
#define SERIAL_PARANOIA_CHECK
#undef CY_DEBUG_OPEN
#undef CY_DEBUG_THROTTLE
#undef CY_DEBUG_OTHER
#undef CY_DEBUG_IO
#undef CY_DEBUG_COUNT
#undef CY_DEBUG_DTR
#undef CY_DEBUG_WAIT_UNTIL_SENT
#undef CY_DEBUG_INTERRUPTS
#undef CY_16Y_HACK
#undef CY_ENABLE_MONITORING
#undef CY_PCI_DEBUG
/*
* Include section
*/
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/timer.h>
#include <linux/interrupt.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/serial.h>
#include <linux/major.h>
#include <linux/string.h>
#include <linux/fcntl.h>
#include <linux/ptrace.h>
#include <linux/cyclades.h>
#include <linux/mm.h>
#include <linux/ioport.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/spinlock.h>
#include <linux/bitops.h>
#include <linux/firmware.h>
#include <linux/device.h>
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/uaccess.h>
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/stat.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
static void cy_send_xchar(struct tty_struct *tty, char ch);
#ifndef SERIAL_XMIT_SIZE
#define SERIAL_XMIT_SIZE (min(PAGE_SIZE, 4096))
#endif
#define STD_COM_FLAGS (0)
/* firmware stuff */
#define ZL_MAX_BLOCKS 16
#define DRIVER_VERSION 0x02010203
#define RAM_SIZE 0x80000
enum zblock_type {
ZBLOCK_PRG = 0,
ZBLOCK_FPGA = 1
};
struct zfile_header {
char name[64];
char date[32];
char aux[32];
u32 n_config;
u32 config_offset;
u32 n_blocks;
u32 block_offset;
u32 reserved[9];
} __attribute__ ((packed));
struct zfile_config {
char name[64];
u32 mailbox;
u32 function;
u32 n_blocks;
u32 block_list[ZL_MAX_BLOCKS];
} __attribute__ ((packed));
struct zfile_block {
u32 type;
u32 file_offset;
u32 ram_offset;
u32 size;
} __attribute__ ((packed));
static struct tty_driver *cy_serial_driver;
#ifdef CONFIG_ISA
/* This is the address lookup table. The driver will probe for
Cyclom-Y/ISA boards at all addresses in here. If you want the
driver to probe addresses at a different address, add it to
this table. If the driver is probing some other board and
causing problems, remove the offending address from this table.
*/
static unsigned int cy_isa_addresses[] = {
0xD0000,
0xD2000,
0xD4000,
0xD6000,
0xD8000,
0xDA000,
0xDC000,
0xDE000,
0, 0, 0, 0, 0, 0, 0, 0
};
#define NR_ISA_ADDRS ARRAY_SIZE(cy_isa_addresses)
static long maddr[NR_CARDS];
static int irq[NR_CARDS];
module_param_array(maddr, long, NULL, 0);
module_param_array(irq, int, NULL, 0);
#endif /* CONFIG_ISA */
/* This is the per-card data structure containing address, irq, number of
channels, etc. This driver supports a maximum of NR_CARDS cards.
*/
static struct cyclades_card cy_card[NR_CARDS];
static int cy_next_channel; /* next minor available */
/*
* This is used to look up the divisor speeds and the timeouts
* We're normally limited to 15 distinct baud rates. The extra
* are accessed via settings in info->port.flags.
* 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
* 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
* HI VHI
* 20
*/
static const int baud_table[] = {
0, 50, 75, 110, 134, 150, 200, 300, 600, 1200,
1800, 2400, 4800, 9600, 19200, 38400, 57600, 76800, 115200, 150000,
230400, 0
};
static const char baud_co_25[] = { /* 25 MHz clock option table */
/* value => 00 01 02 03 04 */
/* divide by 8 32 128 512 2048 */
0x00, 0x04, 0x04, 0x04, 0x04, 0x04, 0x03, 0x03, 0x03, 0x02,
0x02, 0x02, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
};
static const char baud_bpr_25[] = { /* 25 MHz baud rate period table */
0x00, 0xf5, 0xa3, 0x6f, 0x5c, 0x51, 0xf5, 0xa3, 0x51, 0xa3,
0x6d, 0x51, 0xa3, 0x51, 0xa3, 0x51, 0x36, 0x29, 0x1b, 0x15
};
static const char baud_co_60[] = { /* 60 MHz clock option table (CD1400 J) */
/* value => 00 01 02 03 04 */
/* divide by 8 32 128 512 2048 */
0x00, 0x00, 0x00, 0x04, 0x04, 0x04, 0x04, 0x04, 0x03, 0x03,
0x03, 0x02, 0x02, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00
};
static const char baud_bpr_60[] = { /* 60 MHz baud rate period table (CD1400 J) */
0x00, 0x82, 0x21, 0xff, 0xdb, 0xc3, 0x92, 0x62, 0xc3, 0x62,
0x41, 0xc3, 0x62, 0xc3, 0x62, 0xc3, 0x82, 0x62, 0x41, 0x32,
0x21
};
static const char baud_cor3[] = { /* receive threshold */
0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a,
0x0a, 0x0a, 0x0a, 0x09, 0x09, 0x08, 0x08, 0x08, 0x08, 0x07,
0x07
};
/*
* The Cyclades driver implements HW flow control as any serial driver.
* The cyclades_port structure member rflow and the vector rflow_thr
* allows us to take advantage of a special feature in the CD1400 to avoid
* data loss even when the system interrupt latency is too high. These flags
* are to be used only with very special applications. Setting these flags
* requires the use of a special cable (DTR and RTS reversed). In the new
* CD1400-based boards (rev. 6.00 or later), there is no need for special
* cables.
*/
static const char rflow_thr[] = { /* rflow threshold */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a,
0x0a
};
/* The Cyclom-Ye has placed the sequential chips in non-sequential
* address order. This look-up table overcomes that problem.
*/
static const unsigned int cy_chip_offset[] = { 0x0000,
0x0400,
0x0800,
0x0C00,
0x0200,
0x0600,
0x0A00,
0x0E00
};
/* PCI related definitions */
#ifdef CONFIG_PCI
static const struct pci_device_id cy_pci_dev_id[] = {
/* PCI < 1Mb */
{ PCI_DEVICE(PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_CYCLOM_Y_Lo) },
/* PCI > 1Mb */
{ PCI_DEVICE(PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_CYCLOM_Y_Hi) },
/* 4Y PCI < 1Mb */
{ PCI_DEVICE(PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_CYCLOM_4Y_Lo) },
/* 4Y PCI > 1Mb */
{ PCI_DEVICE(PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_CYCLOM_4Y_Hi) },
/* 8Y PCI < 1Mb */
{ PCI_DEVICE(PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_CYCLOM_8Y_Lo) },
/* 8Y PCI > 1Mb */
{ PCI_DEVICE(PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_CYCLOM_8Y_Hi) },
/* Z PCI < 1Mb */
{ PCI_DEVICE(PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_CYCLOM_Z_Lo) },
/* Z PCI > 1Mb */
{ PCI_DEVICE(PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_CYCLOM_Z_Hi) },
{ } /* end of table */
};
MODULE_DEVICE_TABLE(pci, cy_pci_dev_id);
#endif
static void cy_start(struct tty_struct *);
static void cy_set_line_char(struct cyclades_port *, struct tty_struct *);
static int cyz_issue_cmd(struct cyclades_card *, __u32, __u8, __u32);
#ifdef CONFIG_ISA
static unsigned detect_isa_irq(void __iomem *);
#endif /* CONFIG_ISA */
#ifndef CONFIG_CYZ_INTR
static void cyz_poll(unsigned long);
/* The Cyclades-Z polling cycle is defined by this variable */
static long cyz_polling_cycle = CZ_DEF_POLL;
static DEFINE_TIMER(cyz_timerlist, cyz_poll, 0, 0);
#else /* CONFIG_CYZ_INTR */
static void cyz_rx_restart(unsigned long);
static struct timer_list cyz_rx_full_timer[NR_PORTS];
#endif /* CONFIG_CYZ_INTR */
static inline void cyy_writeb(struct cyclades_port *port, u32 reg, u8 val)
{
struct cyclades_card *card = port->card;
cy_writeb(port->u.cyy.base_addr + (reg << card->bus_index), val);
}
static inline u8 cyy_readb(struct cyclades_port *port, u32 reg)
{
struct cyclades_card *card = port->card;
return readb(port->u.cyy.base_addr + (reg << card->bus_index));
}
static inline bool cy_is_Z(struct cyclades_card *card)
{
return card->num_chips == (unsigned int)-1;
}
static inline bool __cyz_fpga_loaded(struct RUNTIME_9060 __iomem *ctl_addr)
{
return readl(&ctl_addr->init_ctrl) & (1 << 17);
}
static inline bool cyz_fpga_loaded(struct cyclades_card *card)
{
return __cyz_fpga_loaded(card->ctl_addr.p9060);
}
static inline bool cyz_is_loaded(struct cyclades_card *card)
{
struct FIRM_ID __iomem *fw_id = card->base_addr + ID_ADDRESS;
return (card->hw_ver == ZO_V1 || cyz_fpga_loaded(card)) &&
readl(&fw_id->signature) == ZFIRM_ID;
}
static inline int serial_paranoia_check(struct cyclades_port *info,
const char *name, const char *routine)
{
#ifdef SERIAL_PARANOIA_CHECK
if (!info) {
printk(KERN_WARNING "cyc Warning: null cyclades_port for (%s) "
"in %s\n", name, routine);
return 1;
}
if (info->magic != CYCLADES_MAGIC) {
printk(KERN_WARNING "cyc Warning: bad magic number for serial "
"struct (%s) in %s\n", name, routine);
return 1;
}
#endif
return 0;
}
/***********************************************************/
/********* Start of block of Cyclom-Y specific code ********/
/* This routine waits up to 1000 micro-seconds for the previous
command to the Cirrus chip to complete and then issues the
new command. An error is returned if the previous command
didn't finish within the time limit.
This function is only called from inside spinlock-protected code.
*/
static int __cyy_issue_cmd(void __iomem *base_addr, u8 cmd, int index)
{
void __iomem *ccr = base_addr + (CyCCR << index);
unsigned int i;
/* Check to see that the previous command has completed */
for (i = 0; i < 100; i++) {
if (readb(ccr) == 0)
break;
udelay(10L);
}
/* if the CCR never cleared, the previous command
didn't finish within the "reasonable time" */
if (i == 100)
return -1;
/* Issue the new command */
cy_writeb(ccr, cmd);
return 0;
}
static inline int cyy_issue_cmd(struct cyclades_port *port, u8 cmd)
{
return __cyy_issue_cmd(port->u.cyy.base_addr, cmd,
port->card->bus_index);
}
#ifdef CONFIG_ISA
/* ISA interrupt detection code */
static unsigned detect_isa_irq(void __iomem *address)
{
int irq;
unsigned long irqs, flags;
int save_xir, save_car;
int index = 0; /* IRQ probing is only for ISA */
/* forget possible initially masked and pending IRQ */
irq = probe_irq_off(probe_irq_on());
/* Clear interrupts on the board first */
cy_writeb(address + (Cy_ClrIntr << index), 0);
/* Cy_ClrIntr is 0x1800 */
irqs = probe_irq_on();
/* Wait ... */
msleep(5);
/* Enable the Tx interrupts on the CD1400 */
local_irq_save(flags);
cy_writeb(address + (CyCAR << index), 0);
__cyy_issue_cmd(address, CyCHAN_CTL | CyENB_XMTR, index);
cy_writeb(address + (CyCAR << index), 0);
cy_writeb(address + (CySRER << index),
readb(address + (CySRER << index)) | CyTxRdy);
local_irq_restore(flags);
/* Wait ... */
msleep(5);
/* Check which interrupt is in use */
irq = probe_irq_off(irqs);
/* Clean up */
save_xir = (u_char) readb(address + (CyTIR << index));
save_car = readb(address + (CyCAR << index));
cy_writeb(address + (CyCAR << index), (save_xir & 0x3));
cy_writeb(address + (CySRER << index),
readb(address + (CySRER << index)) & ~CyTxRdy);
cy_writeb(address + (CyTIR << index), (save_xir & 0x3f));
cy_writeb(address + (CyCAR << index), (save_car));
cy_writeb(address + (Cy_ClrIntr << index), 0);
/* Cy_ClrIntr is 0x1800 */
return (irq > 0) ? irq : 0;
}
#endif /* CONFIG_ISA */
static void cyy_chip_rx(struct cyclades_card *cinfo, int chip,
void __iomem *base_addr)
{
struct cyclades_port *info;
struct tty_struct *tty;
int len, index = cinfo->bus_index;
u8 ivr, save_xir, channel, save_car, data, char_count;
#ifdef CY_DEBUG_INTERRUPTS
printk(KERN_DEBUG "cyy_interrupt: rcvd intr, chip %d\n", chip);
#endif
/* determine the channel & change to that context */
save_xir = readb(base_addr + (CyRIR << index));
channel = save_xir & CyIRChannel;
info = &cinfo->ports[channel + chip * 4];
save_car = cyy_readb(info, CyCAR);
cyy_writeb(info, CyCAR, save_xir);
ivr = cyy_readb(info, CyRIVR) & CyIVRMask;
tty = tty_port_tty_get(&info->port);
/* if there is nowhere to put the data, discard it */
if (tty == NULL) {
if (ivr == CyIVRRxEx) { /* exception */
data = cyy_readb(info, CyRDSR);
} else { /* normal character reception */
char_count = cyy_readb(info, CyRDCR);
while (char_count--)
data = cyy_readb(info, CyRDSR);
}
goto end;
}
/* there is an open port for this data */
if (ivr == CyIVRRxEx) { /* exception */
data = cyy_readb(info, CyRDSR);
/* For statistics only */
if (data & CyBREAK)
info->icount.brk++;
else if (data & CyFRAME)
info->icount.frame++;
else if (data & CyPARITY)
info->icount.parity++;
else if (data & CyOVERRUN)
info->icount.overrun++;
if (data & info->ignore_status_mask) {
info->icount.rx++;
tty_kref_put(tty);
return;
}
if (tty_buffer_request_room(tty, 1)) {
if (data & info->read_status_mask) {
if (data & CyBREAK) {
tty_insert_flip_char(tty,
cyy_readb(info, CyRDSR),
TTY_BREAK);
info->icount.rx++;
if (info->port.flags & ASYNC_SAK)
do_SAK(tty);
} else if (data & CyFRAME) {
tty_insert_flip_char(tty,
cyy_readb(info, CyRDSR),
TTY_FRAME);
info->icount.rx++;
info->idle_stats.frame_errs++;
} else if (data & CyPARITY) {
/* Pieces of seven... */
tty_insert_flip_char(tty,
cyy_readb(info, CyRDSR),
TTY_PARITY);
info->icount.rx++;
info->idle_stats.parity_errs++;
} else if (data & CyOVERRUN) {
tty_insert_flip_char(tty, 0,
TTY_OVERRUN);
info->icount.rx++;
/* If the flip buffer itself is
overflowing, we still lose
the next incoming character.
*/
tty_insert_flip_char(tty,
cyy_readb(info, CyRDSR),
TTY_FRAME);
info->icount.rx++;
info->idle_stats.overruns++;
/* These two conditions may imply */
/* a normal read should be done. */
/* } else if(data & CyTIMEOUT) { */
/* } else if(data & CySPECHAR) { */
} else {
tty_insert_flip_char(tty, 0,
TTY_NORMAL);
info->icount.rx++;
}
} else {
tty_insert_flip_char(tty, 0, TTY_NORMAL);
info->icount.rx++;
}
} else {
/* there was a software buffer overrun and nothing
* could be done about it!!! */
info->icount.buf_overrun++;
info->idle_stats.overruns++;
}
} else { /* normal character reception */
/* load # chars available from the chip */
char_count = cyy_readb(info, CyRDCR);
#ifdef CY_ENABLE_MONITORING
++info->mon.int_count;
info->mon.char_count += char_count;
if (char_count > info->mon.char_max)
info->mon.char_max = char_count;
info->mon.char_last = char_count;
#endif
len = tty_buffer_request_room(tty, char_count);
while (len--) {
data = cyy_readb(info, CyRDSR);
tty_insert_flip_char(tty, data, TTY_NORMAL);
info->idle_stats.recv_bytes++;
info->icount.rx++;
#ifdef CY_16Y_HACK
udelay(10L);
#endif
}
info->idle_stats.recv_idle = jiffies;
}
tty_schedule_flip(tty);
tty_kref_put(tty);
end:
/* end of service */
cyy_writeb(info, CyRIR, save_xir & 0x3f);
cyy_writeb(info, CyCAR, save_car);
}
static void cyy_chip_tx(struct cyclades_card *cinfo, unsigned int chip,
void __iomem *base_addr)
{
struct cyclades_port *info;
struct tty_struct *tty;
int char_count, index = cinfo->bus_index;
u8 save_xir, channel, save_car, outch;
/* Since we only get here when the transmit buffer
is empty, we know we can always stuff a dozen
characters. */
#ifdef CY_DEBUG_INTERRUPTS
printk(KERN_DEBUG "cyy_interrupt: xmit intr, chip %d\n", chip);
#endif
/* determine the channel & change to that context */
save_xir = readb(base_addr + (CyTIR << index));
channel = save_xir & CyIRChannel;
save_car = readb(base_addr + (CyCAR << index));
cy_writeb(base_addr + (CyCAR << index), save_xir);
info = &cinfo->ports[channel + chip * 4];
tty = tty_port_tty_get(&info->port);
if (tty == NULL) {
cyy_writeb(info, CySRER, cyy_readb(info, CySRER) & ~CyTxRdy);
goto end;
}
/* load the on-chip space for outbound data */
char_count = info->xmit_fifo_size;
if (info->x_char) { /* send special char */
outch = info->x_char;
cyy_writeb(info, CyTDR, outch);
char_count--;
info->icount.tx++;
info->x_char = 0;
}
if (info->breakon || info->breakoff) {
if (info->breakon) {
cyy_writeb(info, CyTDR, 0);
cyy_writeb(info, CyTDR, 0x81);
info->breakon = 0;
char_count -= 2;
}
if (info->breakoff) {
cyy_writeb(info, CyTDR, 0);
cyy_writeb(info, CyTDR, 0x83);
info->breakoff = 0;
char_count -= 2;
}
}
while (char_count-- > 0) {
if (!info->xmit_cnt) {
if (cyy_readb(info, CySRER) & CyTxMpty) {
cyy_writeb(info, CySRER,
cyy_readb(info, CySRER) & ~CyTxMpty);
} else {
cyy_writeb(info, CySRER, CyTxMpty |
(cyy_readb(info, CySRER) & ~CyTxRdy));
}
goto done;
}
if (info->port.xmit_buf == NULL) {
cyy_writeb(info, CySRER,
cyy_readb(info, CySRER) & ~CyTxRdy);
goto done;
}
if (tty->stopped || tty->hw_stopped) {
cyy_writeb(info, CySRER,
cyy_readb(info, CySRER) & ~CyTxRdy);
goto done;
}
/* Because the Embedded Transmit Commands have been enabled,
* we must check to see if the escape character, NULL, is being
* sent. If it is, we must ensure that there is room for it to
* be doubled in the output stream. Therefore we no longer
* advance the pointer when the character is fetched, but
* rather wait until after the check for a NULL output
* character. This is necessary because there may not be room
* for the two chars needed to send a NULL.)
*/
outch = info->port.xmit_buf[info->xmit_tail];
if (outch) {
info->xmit_cnt--;
info->xmit_tail = (info->xmit_tail + 1) &
(SERIAL_XMIT_SIZE - 1);
cyy_writeb(info, CyTDR, outch);
info->icount.tx++;
} else {
if (char_count > 1) {
info->xmit_cnt--;
info->xmit_tail = (info->xmit_tail + 1) &
(SERIAL_XMIT_SIZE - 1);
cyy_writeb(info, CyTDR, outch);
cyy_writeb(info, CyTDR, 0);
info->icount.tx++;
char_count--;
}
}
}
done:
tty_wakeup(tty);
tty_kref_put(tty);
end:
/* end of service */
cyy_writeb(info, CyTIR, save_xir & 0x3f);
cyy_writeb(info, CyCAR, save_car);
}
static void cyy_chip_modem(struct cyclades_card *cinfo, int chip,
void __iomem *base_addr)
{
struct cyclades_port *info;
struct tty_struct *tty;
int index = cinfo->bus_index;
u8 save_xir, channel, save_car, mdm_change, mdm_status;
/* determine the channel & change to that context */
save_xir = readb(base_addr + (CyMIR << index));
channel = save_xir & CyIRChannel;
info = &cinfo->ports[channel + chip * 4];
save_car = cyy_readb(info, CyCAR);
cyy_writeb(info, CyCAR, save_xir);
mdm_change = cyy_readb(info, CyMISR);
mdm_status = cyy_readb(info, CyMSVR1);
tty = tty_port_tty_get(&info->port);
if (!tty)
goto end;
if (mdm_change & CyANY_DELTA) {
/* For statistics only */
if (mdm_change & CyDCD)
info->icount.dcd++;
if (mdm_change & CyCTS)
info->icount.cts++;
if (mdm_change & CyDSR)
info->icount.dsr++;
if (mdm_change & CyRI)
info->icount.rng++;
wake_up_interruptible(&info->port.delta_msr_wait);
}
if ((mdm_change & CyDCD) && (info->port.flags & ASYNC_CHECK_CD)) {
if (mdm_status & CyDCD)
wake_up_interruptible(&info->port.open_wait);
else
tty_hangup(tty);
}
if ((mdm_change & CyCTS) && (info->port.flags & ASYNC_CTS_FLOW)) {
if (tty->hw_stopped) {
if (mdm_status & CyCTS) {
/* cy_start isn't used
because... !!! */
tty->hw_stopped = 0;
cyy_writeb(info, CySRER,
cyy_readb(info, CySRER) | CyTxRdy);
tty_wakeup(tty);
}
} else {
if (!(mdm_status & CyCTS)) {
/* cy_stop isn't used
because ... !!! */
tty->hw_stopped = 1;
cyy_writeb(info, CySRER,
cyy_readb(info, CySRER) & ~CyTxRdy);
}
}
}
/* if (mdm_change & CyDSR) {
}
if (mdm_change & CyRI) {
}*/
tty_kref_put(tty);
end:
/* end of service */
cyy_writeb(info, CyMIR, save_xir & 0x3f);
cyy_writeb(info, CyCAR, save_car);
}
/* The real interrupt service routine is called
whenever the card wants its hand held--chars
received, out buffer empty, modem change, etc.
*/
static irqreturn_t cyy_interrupt(int irq, void *dev_id)
{
int status;
struct cyclades_card *cinfo = dev_id;
void __iomem *base_addr, *card_base_addr;
unsigned int chip, too_many, had_work;
int index;
if (unlikely(cinfo == NULL)) {
#ifdef CY_DEBUG_INTERRUPTS
printk(KERN_DEBUG "cyy_interrupt: spurious interrupt %d\n",
irq);
#endif
return IRQ_NONE; /* spurious interrupt */
}
card_base_addr = cinfo->base_addr;
index = cinfo->bus_index;
/* card was not initialized yet (e.g. DEBUG_SHIRQ) */
if (unlikely(card_base_addr == NULL))
return IRQ_HANDLED;
/* This loop checks all chips in the card. Make a note whenever
_any_ chip had some work to do, as this is considered an
indication that there will be more to do. Only when no chip
has any work does this outermost loop exit.
*/
do {
had_work = 0;
for (chip = 0; chip < cinfo->num_chips; chip++) {
base_addr = cinfo->base_addr +
(cy_chip_offset[chip] << index);
too_many = 0;
while ((status = readb(base_addr +
(CySVRR << index))) != 0x00) {
had_work++;
/* The purpose of the following test is to ensure that
no chip can monopolize the driver. This forces the
chips to be checked in a round-robin fashion (after
draining each of a bunch (1000) of characters).
*/
if (1000 < too_many++)
break;
spin_lock(&cinfo->card_lock);
if (status & CySRReceive) /* rx intr */
cyy_chip_rx(cinfo, chip, base_addr);
if (status & CySRTransmit) /* tx intr */
cyy_chip_tx(cinfo, chip, base_addr);
if (status & CySRModem) /* modem intr */
cyy_chip_modem(cinfo, chip, base_addr);
spin_unlock(&cinfo->card_lock);
}
}
} while (had_work);
/* clear interrupts */
spin_lock(&cinfo->card_lock);
cy_writeb(card_base_addr + (Cy_ClrIntr << index), 0);
/* Cy_ClrIntr is 0x1800 */
spin_unlock(&cinfo->card_lock);
return IRQ_HANDLED;
} /* cyy_interrupt */
static void cyy_change_rts_dtr(struct cyclades_port *info, unsigned int set,
unsigned int clear)
{
struct cyclades_card *card = info->card;
int channel = info->line - card->first_line;
u32 rts, dtr, msvrr, msvrd;
channel &= 0x03;
if (info->rtsdtr_inv) {
msvrr = CyMSVR2;
msvrd = CyMSVR1;
rts = CyDTR;
dtr = CyRTS;
} else {
msvrr = CyMSVR1;
msvrd = CyMSVR2;
rts = CyRTS;
dtr = CyDTR;
}
if (set & TIOCM_RTS) {
cyy_writeb(info, CyCAR, channel);
cyy_writeb(info, msvrr, rts);
}
if (clear & TIOCM_RTS) {
cyy_writeb(info, CyCAR, channel);
cyy_writeb(info, msvrr, ~rts);
}
if (set & TIOCM_DTR) {
cyy_writeb(info, CyCAR, channel);
cyy_writeb(info, msvrd, dtr);
#ifdef CY_DEBUG_DTR
printk(KERN_DEBUG "cyc:set_modem_info raising DTR\n");
printk(KERN_DEBUG " status: 0x%x, 0x%x\n",
cyy_readb(info, CyMSVR1),
cyy_readb(info, CyMSVR2));
#endif
}
if (clear & TIOCM_DTR) {
cyy_writeb(info, CyCAR, channel);
cyy_writeb(info, msvrd, ~dtr);
#ifdef CY_DEBUG_DTR
printk(KERN_DEBUG "cyc:set_modem_info dropping DTR\n");
printk(KERN_DEBUG " status: 0x%x, 0x%x\n",
cyy_readb(info, CyMSVR1),
cyy_readb(info, CyMSVR2));
#endif
}
}
/***********************************************************/
/********* End of block of Cyclom-Y specific code **********/
/******** Start of block of Cyclades-Z specific code *******/
/***********************************************************/
static int
cyz_fetch_msg(struct cyclades_card *cinfo,
__u32 *channel, __u8 *cmd, __u32 *param)
{
struct BOARD_CTRL __iomem *board_ctrl = cinfo->board_ctrl;
unsigned long loc_doorbell;
loc_doorbell = readl(&cinfo->ctl_addr.p9060->loc_doorbell);
if (loc_doorbell) {
*cmd = (char)(0xff & loc_doorbell);
*channel = readl(&board_ctrl->fwcmd_channel);
*param = (__u32) readl(&board_ctrl->fwcmd_param);
cy_writel(&cinfo->ctl_addr.p9060->loc_doorbell, 0xffffffff);
return 1;
}
return 0;
} /* cyz_fetch_msg */
static int
cyz_issue_cmd(struct cyclades_card *cinfo,
__u32 channel, __u8 cmd, __u32 param)
{
struct BOARD_CTRL __iomem *board_ctrl = cinfo->board_ctrl;
__u32 __iomem *pci_doorbell;
unsigned int index;
if (!cyz_is_loaded(cinfo))
return -1;
index = 0;
pci_doorbell = &cinfo->ctl_addr.p9060->pci_doorbell;
while ((readl(pci_doorbell) & 0xff) != 0) {
if (index++ == 1000)
return (int)(readl(pci_doorbell) & 0xff);
udelay(50L);
}
cy_writel(&board_ctrl->hcmd_channel, channel);
cy_writel(&board_ctrl->hcmd_param, param);
cy_writel(pci_doorbell, (long)cmd);
return 0;
} /* cyz_issue_cmd */
static void cyz_handle_rx(struct cyclades_port *info, struct tty_struct *tty)
{
struct BUF_CTRL __iomem *buf_ctrl = info->u.cyz.buf_ctrl;
struct cyclades_card *cinfo = info->card;
unsigned int char_count;
int len;
#ifdef BLOCKMOVE
unsigned char *buf;
#else
char data;
#endif
__u32 rx_put, rx_get, new_rx_get, rx_bufsize, rx_bufaddr;
rx_get = new_rx_get = readl(&buf_ctrl->rx_get);
rx_put = readl(&buf_ctrl->rx_put);
rx_bufsize = readl(&buf_ctrl->rx_bufsize);
rx_bufaddr = readl(&buf_ctrl->rx_bufaddr);
if (rx_put >= rx_get)
char_count = rx_put - rx_get;
else
char_count = rx_put - rx_get + rx_bufsize;
if (char_count) {
#ifdef CY_ENABLE_MONITORING
info->mon.int_count++;
info->mon.char_count += char_count;
if (char_count > info->mon.char_max)
info->mon.char_max = char_count;
info->mon.char_last = char_count;
#endif
if (tty == NULL) {
/* flush received characters */
new_rx_get = (new_rx_get + char_count) &
(rx_bufsize - 1);
info->rflush_count++;
} else {
#ifdef BLOCKMOVE
/* we'd like to use memcpy(t, f, n) and memset(s, c, count)
for performance, but because of buffer boundaries, there
may be several steps to the operation */
while (1) {
len = tty_prepare_flip_string(tty, &buf,
char_count);
if (!len)
break;
len = min_t(unsigned int, min(len, char_count),
rx_bufsize - new_rx_get);
memcpy_fromio(buf, cinfo->base_addr +
rx_bufaddr + new_rx_get, len);
new_rx_get = (new_rx_get + len) &
(rx_bufsize - 1);
char_count -= len;
info->icount.rx += len;
info->idle_stats.recv_bytes += len;
}
#else
len = tty_buffer_request_room(tty, char_count);
while (len--) {
data = readb(cinfo->base_addr + rx_bufaddr +
new_rx_get);
new_rx_get = (new_rx_get + 1) &
(rx_bufsize - 1);
tty_insert_flip_char(tty, data, TTY_NORMAL);
info->idle_stats.recv_bytes++;
info->icount.rx++;
}
#endif
#ifdef CONFIG_CYZ_INTR
/* Recalculate the number of chars in the RX buffer and issue
a cmd in case it's higher than the RX high water mark */
rx_put = readl(&buf_ctrl->rx_put);
if (rx_put >= rx_get)
char_count = rx_put - rx_get;
else
char_count = rx_put - rx_get + rx_bufsize;
if (char_count >= readl(&buf_ctrl->rx_threshold) &&
!timer_pending(&cyz_rx_full_timer[
info->line]))
mod_timer(&cyz_rx_full_timer[info->line],
jiffies + 1);
#endif
info->idle_stats.recv_idle = jiffies;
tty_schedule_flip(tty);
}
/* Update rx_get */
cy_writel(&buf_ctrl->rx_get, new_rx_get);
}
}
static void cyz_handle_tx(struct cyclades_port *info, struct tty_struct *tty)
{
struct BUF_CTRL __iomem *buf_ctrl = info->u.cyz.buf_ctrl;
struct cyclades_card *cinfo = info->card;
u8 data;
unsigned int char_count;
#ifdef BLOCKMOVE
int small_count;
#endif
__u32 tx_put, tx_get, tx_bufsize, tx_bufaddr;
if (info->xmit_cnt <= 0) /* Nothing to transmit */
return;
tx_get = readl(&buf_ctrl->tx_get);
tx_put = readl(&buf_ctrl->tx_put);
tx_bufsize = readl(&buf_ctrl->tx_bufsize);
tx_bufaddr = readl(&buf_ctrl->tx_bufaddr);
if (tx_put >= tx_get)
char_count = tx_get - tx_put - 1 + tx_bufsize;
else
char_count = tx_get - tx_put - 1;
if (char_count) {
if (tty == NULL)
goto ztxdone;
if (info->x_char) { /* send special char */
data = info->x_char;
cy_writeb(cinfo->base_addr + tx_bufaddr + tx_put, data);
tx_put = (tx_put + 1) & (tx_bufsize - 1);
info->x_char = 0;
char_count--;
info->icount.tx++;
}
#ifdef BLOCKMOVE
while (0 < (small_count = min_t(unsigned int,
tx_bufsize - tx_put, min_t(unsigned int,
(SERIAL_XMIT_SIZE - info->xmit_tail),
min_t(unsigned int, info->xmit_cnt,
char_count))))) {
memcpy_toio((char *)(cinfo->base_addr + tx_bufaddr +
tx_put),
&info->port.xmit_buf[info->xmit_tail],
small_count);
tx_put = (tx_put + small_count) & (tx_bufsize - 1);
char_count -= small_count;
info->icount.tx += small_count;
info->xmit_cnt -= small_count;
info->xmit_tail = (info->xmit_tail + small_count) &
(SERIAL_XMIT_SIZE - 1);
}
#else
while (info->xmit_cnt && char_count) {
data = info->port.xmit_buf[info->xmit_tail];
info->xmit_cnt--;
info->xmit_tail = (info->xmit_tail + 1) &
(SERIAL_XMIT_SIZE - 1);
cy_writeb(cinfo->base_addr + tx_bufaddr + tx_put, data);
tx_put = (tx_put + 1) & (tx_bufsize - 1);
char_count--;
info->icount.tx++;
}
#endif
tty_wakeup(tty);
ztxdone:
/* Update tx_put */
cy_writel(&buf_ctrl->tx_put, tx_put);
}
}
static void cyz_handle_cmd(struct cyclades_card *cinfo)
{
struct BOARD_CTRL __iomem *board_ctrl = cinfo->board_ctrl;
struct tty_struct *tty;
struct cyclades_port *info;
__u32 channel, param, fw_ver;
__u8 cmd;
int special_count;
int delta_count;
fw_ver = readl(&board_ctrl->fw_version);
while (cyz_fetch_msg(cinfo, &channel, &cmd, ¶m) == 1) {
special_count = 0;
delta_count = 0;
info = &cinfo->ports[channel];
tty = tty_port_tty_get(&info->port);
if (tty == NULL)
continue;
switch (cmd) {
case C_CM_PR_ERROR:
tty_insert_flip_char(tty, 0, TTY_PARITY);
info->icount.rx++;
special_count++;
break;
case C_CM_FR_ERROR:
tty_insert_flip_char(tty, 0, TTY_FRAME);
info->icount.rx++;
special_count++;
break;
case C_CM_RXBRK:
tty_insert_flip_char(tty, 0, TTY_BREAK);
info->icount.rx++;
special_count++;
break;
case C_CM_MDCD:
info->icount.dcd++;
delta_count++;
if (info->port.flags & ASYNC_CHECK_CD) {
u32 dcd = fw_ver > 241 ? param :
readl(&info->u.cyz.ch_ctrl->rs_status);
if (dcd & C_RS_DCD)
wake_up_interruptible(&info->port.open_wait);
else
tty_hangup(tty);
}
break;
case C_CM_MCTS:
info->icount.cts++;
delta_count++;
break;
case C_CM_MRI:
info->icount.rng++;
delta_count++;
break;
case C_CM_MDSR:
info->icount.dsr++;
delta_count++;
break;
#ifdef Z_WAKE
case C_CM_IOCTLW:
complete(&info->shutdown_wait);
break;
#endif
#ifdef CONFIG_CYZ_INTR
case C_CM_RXHIWM:
case C_CM_RXNNDT:
case C_CM_INTBACK2:
/* Reception Interrupt */
#ifdef CY_DEBUG_INTERRUPTS
printk(KERN_DEBUG "cyz_interrupt: rcvd intr, card %d, "
"port %ld\n", info->card, channel);
#endif
cyz_handle_rx(info, tty);
break;
case C_CM_TXBEMPTY:
case C_CM_TXLOWWM:
case C_CM_INTBACK:
/* Transmission Interrupt */
#ifdef CY_DEBUG_INTERRUPTS
printk(KERN_DEBUG "cyz_interrupt: xmit intr, card %d, "
"port %ld\n", info->card, channel);
#endif
cyz_handle_tx(info, tty);
break;
#endif /* CONFIG_CYZ_INTR */
case C_CM_FATAL:
/* should do something with this !!! */
break;
default:
break;
}
if (delta_count)
wake_up_interruptible(&info->port.delta_msr_wait);
if (special_count)
tty_schedule_flip(tty);
tty_kref_put(tty);
}
}
#ifdef CONFIG_CYZ_INTR
static irqreturn_t cyz_interrupt(int irq, void *dev_id)
{
struct cyclades_card *cinfo = dev_id;
if (unlikely(!cyz_is_loaded(cinfo))) {
#ifdef CY_DEBUG_INTERRUPTS
printk(KERN_DEBUG "cyz_interrupt: board not yet loaded "
"(IRQ%d).\n", irq);
#endif
return IRQ_NONE;
}
/* Handle the interrupts */
cyz_handle_cmd(cinfo);
return IRQ_HANDLED;
} /* cyz_interrupt */
static void cyz_rx_restart(unsigned long arg)
{
struct cyclades_port *info = (struct cyclades_port *)arg;
struct cyclades_card *card = info->card;
int retval;
__u32 channel = info->line - card->first_line;
unsigned long flags;
spin_lock_irqsave(&card->card_lock, flags);
retval = cyz_issue_cmd(card, channel, C_CM_INTBACK2, 0L);
if (retval != 0) {
printk(KERN_ERR "cyc:cyz_rx_restart retval on ttyC%d was %x\n",
info->line, retval);
}
spin_unlock_irqrestore(&card->card_lock, flags);
}
#else /* CONFIG_CYZ_INTR */
static void cyz_poll(unsigned long arg)
{
struct cyclades_card *cinfo;
struct cyclades_port *info;
unsigned long expires = jiffies + HZ;
unsigned int port, card;
for (card = 0; card < NR_CARDS; card++) {
cinfo = &cy_card[card];
if (!cy_is_Z(cinfo))
continue;
if (!cyz_is_loaded(cinfo))
continue;
/* Skip first polling cycle to avoid racing conditions with the FW */
if (!cinfo->intr_enabled) {
cinfo->intr_enabled = 1;
continue;
}
cyz_handle_cmd(cinfo);
for (port = 0; port < cinfo->nports; port++) {
struct tty_struct *tty;
info = &cinfo->ports[port];
tty = tty_port_tty_get(&info->port);
/* OK to pass NULL to the handle functions below.
They need to drop the data in that case. */
if (!info->throttle)
cyz_handle_rx(info, tty);
cyz_handle_tx(info, tty);
tty_kref_put(tty);
}
/* poll every 'cyz_polling_cycle' period */
expires = jiffies + cyz_polling_cycle;
}
mod_timer(&cyz_timerlist, expires);
} /* cyz_poll */
#endif /* CONFIG_CYZ_INTR */
/********** End of block of Cyclades-Z specific code *********/
/***********************************************************/
/* This is called whenever a port becomes active;
interrupts are enabled and DTR & RTS are turned on.
*/
static int cy_startup(struct cyclades_port *info, struct tty_struct *tty)
{
struct cyclades_card *card;
unsigned long flags;
int retval = 0;
int channel;
unsigned long page;
card = info->card;
channel = info->line - card->first_line;
page = get_zeroed_page(GFP_KERNEL);
if (!page)
return -ENOMEM;
spin_lock_irqsave(&card->card_lock, flags);
if (info->port.flags & ASYNC_INITIALIZED)
goto errout;
if (!info->type) {
set_bit(TTY_IO_ERROR, &tty->flags);
goto errout;
}
if (info->port.xmit_buf)
free_page(page);
else
info->port.xmit_buf = (unsigned char *)page;
spin_unlock_irqrestore(&card->card_lock, flags);
cy_set_line_char(info, tty);
if (!cy_is_Z(card)) {
channel &= 0x03;
spin_lock_irqsave(&card->card_lock, flags);
cyy_writeb(info, CyCAR, channel);
cyy_writeb(info, CyRTPR,
(info->default_timeout ? info->default_timeout : 0x02));
/* 10ms rx timeout */
cyy_issue_cmd(info, CyCHAN_CTL | CyENB_RCVR | CyENB_XMTR);
cyy_change_rts_dtr(info, TIOCM_RTS | TIOCM_DTR, 0);
cyy_writeb(info, CySRER, cyy_readb(info, CySRER) | CyRxData);
} else {
struct CH_CTRL __iomem *ch_ctrl = info->u.cyz.ch_ctrl;
if (!cyz_is_loaded(card))
return -ENODEV;
#ifdef CY_DEBUG_OPEN
printk(KERN_DEBUG "cyc startup Z card %d, channel %d, "
"base_addr %p\n", card, channel, card->base_addr);
#endif
spin_lock_irqsave(&card->card_lock, flags);
cy_writel(&ch_ctrl->op_mode, C_CH_ENABLE);
#ifdef Z_WAKE
#ifdef CONFIG_CYZ_INTR
cy_writel(&ch_ctrl->intr_enable,
C_IN_TXBEMPTY | C_IN_TXLOWWM | C_IN_RXHIWM |
C_IN_RXNNDT | C_IN_IOCTLW | C_IN_MDCD);
#else
cy_writel(&ch_ctrl->intr_enable,
C_IN_IOCTLW | C_IN_MDCD);
#endif /* CONFIG_CYZ_INTR */
#else
#ifdef CONFIG_CYZ_INTR
cy_writel(&ch_ctrl->intr_enable,
C_IN_TXBEMPTY | C_IN_TXLOWWM | C_IN_RXHIWM |
C_IN_RXNNDT | C_IN_MDCD);
#else
cy_writel(&ch_ctrl->intr_enable, C_IN_MDCD);
#endif /* CONFIG_CYZ_INTR */
#endif /* Z_WAKE */
retval = cyz_issue_cmd(card, channel, C_CM_IOCTL, 0L);
if (retval != 0) {
printk(KERN_ERR "cyc:startup(1) retval on ttyC%d was "
"%x\n", info->line, retval);
}
/* Flush RX buffers before raising DTR and RTS */
retval = cyz_issue_cmd(card, channel, C_CM_FLUSH_RX, 0L);
if (retval != 0) {
printk(KERN_ERR "cyc:startup(2) retval on ttyC%d was "
"%x\n", info->line, retval);
}
/* set timeout !!! */
/* set RTS and DTR !!! */
tty_port_raise_dtr_rts(&info->port);
/* enable send, recv, modem !!! */
}
info->port.flags |= ASYNC_INITIALIZED;
clear_bit(TTY_IO_ERROR, &tty->flags);
info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
info->breakon = info->breakoff = 0;
memset((char *)&info->idle_stats, 0, sizeof(info->idle_stats));
info->idle_stats.in_use =
info->idle_stats.recv_idle =
info->idle_stats.xmit_idle = jiffies;
spin_unlock_irqrestore(&card->card_lock, flags);
#ifdef CY_DEBUG_OPEN
printk(KERN_DEBUG "cyc startup done\n");
#endif
return 0;
errout:
spin_unlock_irqrestore(&card->card_lock, flags);
free_page(page);
return retval;
} /* startup */
static void start_xmit(struct cyclades_port *info)
{
struct cyclades_card *card = info->card;
unsigned long flags;
int channel = info->line - card->first_line;
if (!cy_is_Z(card)) {
spin_lock_irqsave(&card->card_lock, flags);
cyy_writeb(info, CyCAR, channel & 0x03);
cyy_writeb(info, CySRER, cyy_readb(info, CySRER) | CyTxRdy);
spin_unlock_irqrestore(&card->card_lock, flags);
} else {
#ifdef CONFIG_CYZ_INTR
int retval;
spin_lock_irqsave(&card->card_lock, flags);
retval = cyz_issue_cmd(card, channel, C_CM_INTBACK, 0L);
if (retval != 0) {
printk(KERN_ERR "cyc:start_xmit retval on ttyC%d was "
"%x\n", info->line, retval);
}
spin_unlock_irqrestore(&card->card_lock, flags);
#else /* CONFIG_CYZ_INTR */
/* Don't have to do anything at this time */
#endif /* CONFIG_CYZ_INTR */
}
} /* start_xmit */
/*
* This routine shuts down a serial port; interrupts are disabled,
* and DTR is dropped if the hangup on close termio flag is on.
*/
static void cy_shutdown(struct cyclades_port *info, struct tty_struct *tty)
{
struct cyclades_card *card;
unsigned long flags;
if (!(info->port.flags & ASYNC_INITIALIZED))
return;
card = info->card;
if (!cy_is_Z(card)) {
spin_lock_irqsave(&card->card_lock, flags);
/* Clear delta_msr_wait queue to avoid mem leaks. */
wake_up_interruptible(&info->port.delta_msr_wait);
if (info->port.xmit_buf) {
unsigned char *temp;
temp = info->port.xmit_buf;
info->port.xmit_buf = NULL;
free_page((unsigned long)temp);
}
if (tty->termios->c_cflag & HUPCL)
cyy_change_rts_dtr(info, 0, TIOCM_RTS | TIOCM_DTR);
cyy_issue_cmd(info, CyCHAN_CTL | CyDIS_RCVR);
/* it may be appropriate to clear _XMIT at
some later date (after testing)!!! */
set_bit(TTY_IO_ERROR, &tty->flags);
info->port.flags &= ~ASYNC_INITIALIZED;
spin_unlock_irqrestore(&card->card_lock, flags);
} else {
#ifdef CY_DEBUG_OPEN
int channel = info->line - card->first_line;
printk(KERN_DEBUG "cyc shutdown Z card %d, channel %d, "
"base_addr %p\n", card, channel, card->base_addr);
#endif
if (!cyz_is_loaded(card))
return;
spin_lock_irqsave(&card->card_lock, flags);
if (info->port.xmit_buf) {
unsigned char *temp;
temp = info->port.xmit_buf;
info->port.xmit_buf = NULL;
free_page((unsigned long)temp);
}
if (tty->termios->c_cflag & HUPCL)
tty_port_lower_dtr_rts(&info->port);
set_bit(TTY_IO_ERROR, &tty->flags);
info->port.flags &= ~ASYNC_INITIALIZED;
spin_unlock_irqrestore(&card->card_lock, flags);
}
#ifdef CY_DEBUG_OPEN
printk(KERN_DEBUG "cyc shutdown done\n");
#endif
} /* shutdown */
/*
* ------------------------------------------------------------
* cy_open() and friends
* ------------------------------------------------------------
*/
/*
* This routine is called whenever a serial port is opened. It
* performs the serial-specific initialization for the tty structure.
*/
static int cy_open(struct tty_struct *tty, struct file *filp)
{
struct cyclades_port *info;
unsigned int i, line;
int retval;
line = tty->index;
if (tty->index < 0 || NR_PORTS <= line)
return -ENODEV;
for (i = 0; i < NR_CARDS; i++)
if (line < cy_card[i].first_line + cy_card[i].nports &&
line >= cy_card[i].first_line)
break;
if (i >= NR_CARDS)
return -ENODEV;
info = &cy_card[i].ports[line - cy_card[i].first_line];
if (info->line < 0)
return -ENODEV;
/* If the card's firmware hasn't been loaded,
treat it as absent from the system. This
will make the user pay attention.
*/
if (cy_is_Z(info->card)) {
struct cyclades_card *cinfo = info->card;
struct FIRM_ID __iomem *firm_id = cinfo->base_addr + ID_ADDRESS;
if (!cyz_is_loaded(cinfo)) {
if (cinfo->hw_ver == ZE_V1 && cyz_fpga_loaded(cinfo) &&
readl(&firm_id->signature) ==
ZFIRM_HLT) {
printk(KERN_ERR "cyc:Cyclades-Z Error: you "
"need an external power supply for "
"this number of ports.\nFirmware "
"halted.\n");
} else {
printk(KERN_ERR "cyc:Cyclades-Z firmware not "
"yet loaded\n");
}
return -ENODEV;
}
#ifdef CONFIG_CYZ_INTR
else {
/* In case this Z board is operating in interrupt mode, its
interrupts should be enabled as soon as the first open
happens to one of its ports. */
if (!cinfo->intr_enabled) {
u16 intr;
/* Enable interrupts on the PLX chip */
intr = readw(&cinfo->ctl_addr.p9060->
intr_ctrl_stat) | 0x0900;
cy_writew(&cinfo->ctl_addr.p9060->
intr_ctrl_stat, intr);
/* Enable interrupts on the FW */
retval = cyz_issue_cmd(cinfo, 0,
C_CM_IRQ_ENBL, 0L);
if (retval != 0) {
printk(KERN_ERR "cyc:IRQ enable retval "
"was %x\n", retval);
}
cinfo->intr_enabled = 1;
}
}
#endif /* CONFIG_CYZ_INTR */
/* Make sure this Z port really exists in hardware */
if (info->line > (cinfo->first_line + cinfo->nports - 1))
return -ENODEV;
}
#ifdef CY_DEBUG_OTHER
printk(KERN_DEBUG "cyc:cy_open ttyC%d\n", info->line);
#endif
tty->driver_data = info;
if (serial_paranoia_check(info, tty->name, "cy_open"))
return -ENODEV;
#ifdef CY_DEBUG_OPEN
printk(KERN_DEBUG "cyc:cy_open ttyC%d, count = %d\n", info->line,
info->port.count);
#endif
info->port.count++;
#ifdef CY_DEBUG_COUNT
printk(KERN_DEBUG "cyc:cy_open (%d): incrementing count to %d\n",
current->pid, info->port.count);
#endif
/*
* If the port is the middle of closing, bail out now
*/
if (tty_hung_up_p(filp) || (info->port.flags & ASYNC_CLOSING)) {
wait_event_interruptible_tty(info->port.close_wait,
!(info->port.flags & ASYNC_CLOSING));
return (info->port.flags & ASYNC_HUP_NOTIFY) ? -EAGAIN: -ERESTARTSYS;
}
/*
* Start up serial port
*/
retval = cy_startup(info, tty);
if (retval)
return retval;
retval = tty_port_block_til_ready(&info->port, tty, filp);
if (retval) {
#ifdef CY_DEBUG_OPEN
printk(KERN_DEBUG "cyc:cy_open returning after block_til_ready "
"with %d\n", retval);
#endif
return retval;
}
info->throttle = 0;
tty_port_tty_set(&info->port, tty);
#ifdef CY_DEBUG_OPEN
printk(KERN_DEBUG "cyc:cy_open done\n");
#endif
return 0;
} /* cy_open */
/*
* cy_wait_until_sent() --- wait until the transmitter is empty
*/
static void cy_wait_until_sent(struct tty_struct *tty, int timeout)
{
struct cyclades_card *card;
struct cyclades_port *info = tty->driver_data;
unsigned long orig_jiffies;
int char_time;
if (serial_paranoia_check(info, tty->name, "cy_wait_until_sent"))
return;
if (info->xmit_fifo_size == 0)
return; /* Just in case.... */
orig_jiffies = jiffies;
/*
* Set the check interval to be 1/5 of the estimated time to
* send a single character, and make it at least 1. The check
* interval should also be less than the timeout.
*
* Note: we have to use pretty tight timings here to satisfy
* the NIST-PCTS.
*/
char_time = (info->timeout - HZ / 50) / info->xmit_fifo_size;
char_time = char_time / 5;
if (char_time <= 0)
char_time = 1;
if (timeout < 0)
timeout = 0;
if (timeout)
char_time = min(char_time, timeout);
/*
* If the transmitter hasn't cleared in twice the approximate
* amount of time to send the entire FIFO, it probably won't
* ever clear. This assumes the UART isn't doing flow
* control, which is currently the case. Hence, if it ever
* takes longer than info->timeout, this is probably due to a
* UART bug of some kind. So, we clamp the timeout parameter at
* 2*info->timeout.
*/
if (!timeout || timeout > 2 * info->timeout)
timeout = 2 * info->timeout;
#ifdef CY_DEBUG_WAIT_UNTIL_SENT
printk(KERN_DEBUG "In cy_wait_until_sent(%d) check=%d, jiff=%lu...",
timeout, char_time, jiffies);
#endif
card = info->card;
if (!cy_is_Z(card)) {
while (cyy_readb(info, CySRER) & CyTxRdy) {
#ifdef CY_DEBUG_WAIT_UNTIL_SENT
printk(KERN_DEBUG "Not clean (jiff=%lu)...", jiffies);
#endif
if (msleep_interruptible(jiffies_to_msecs(char_time)))
break;
if (timeout && time_after(jiffies, orig_jiffies +
timeout))
break;
}
}
/* Run one more char cycle */
msleep_interruptible(jiffies_to_msecs(char_time * 5));
#ifdef CY_DEBUG_WAIT_UNTIL_SENT
printk(KERN_DEBUG "Clean (jiff=%lu)...done\n", jiffies);
#endif
}
static void cy_flush_buffer(struct tty_struct *tty)
{
struct cyclades_port *info = tty->driver_data;
struct cyclades_card *card;
int channel, retval;
unsigned long flags;
#ifdef CY_DEBUG_IO
printk(KERN_DEBUG "cyc:cy_flush_buffer ttyC%d\n", info->line);
#endif
if (serial_paranoia_check(info, tty->name, "cy_flush_buffer"))
return;
card = info->card;
channel = info->line - card->first_line;
spin_lock_irqsave(&card->card_lock, flags);
info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
spin_unlock_irqrestore(&card->card_lock, flags);
if (cy_is_Z(card)) { /* If it is a Z card, flush the on-board
buffers as well */
spin_lock_irqsave(&card->card_lock, flags);
retval = cyz_issue_cmd(card, channel, C_CM_FLUSH_TX, 0L);
if (retval != 0) {
printk(KERN_ERR "cyc: flush_buffer retval on ttyC%d "
"was %x\n", info->line, retval);
}
spin_unlock_irqrestore(&card->card_lock, flags);
}
tty_wakeup(tty);
} /* cy_flush_buffer */
static void cy_do_close(struct tty_port *port)
{
struct cyclades_port *info = container_of(port, struct cyclades_port,
port);
struct cyclades_card *card;
unsigned long flags;
int channel;
card = info->card;
channel = info->line - card->first_line;
spin_lock_irqsave(&card->card_lock, flags);
if (!cy_is_Z(card)) {
/* Stop accepting input */
cyy_writeb(info, CyCAR, channel & 0x03);
cyy_writeb(info, CySRER, cyy_readb(info, CySRER) & ~CyRxData);
if (info->port.flags & ASYNC_INITIALIZED) {
/* Waiting for on-board buffers to be empty before
closing the port */
spin_unlock_irqrestore(&card->card_lock, flags);
cy_wait_until_sent(port->tty, info->timeout);
spin_lock_irqsave(&card->card_lock, flags);
}
} else {
#ifdef Z_WAKE
/* Waiting for on-board buffers to be empty before closing
the port */
struct CH_CTRL __iomem *ch_ctrl = info->u.cyz.ch_ctrl;
int retval;
if (readl(&ch_ctrl->flow_status) != C_FS_TXIDLE) {
retval = cyz_issue_cmd(card, channel, C_CM_IOCTLW, 0L);
if (retval != 0) {
printk(KERN_DEBUG "cyc:cy_close retval on "
"ttyC%d was %x\n", info->line, retval);
}
spin_unlock_irqrestore(&card->card_lock, flags);
wait_for_completion_interruptible(&info->shutdown_wait);
spin_lock_irqsave(&card->card_lock, flags);
}
#endif
}
spin_unlock_irqrestore(&card->card_lock, flags);
cy_shutdown(info, port->tty);
}
/*
* This routine is called when a particular tty device is closed.
*/
static void cy_close(struct tty_struct *tty, struct file *filp)
{
struct cyclades_port *info = tty->driver_data;
if (!info || serial_paranoia_check(info, tty->name, "cy_close"))
return;
tty_port_close(&info->port, tty, filp);
} /* cy_close */
/* This routine gets called when tty_write has put something into
* the write_queue. The characters may come from user space or
* kernel space.
*
* This routine will return the number of characters actually
* accepted for writing.
*
* If the port is not already transmitting stuff, start it off by
* enabling interrupts. The interrupt service routine will then
* ensure that the characters are sent.
* If the port is already active, there is no need to kick it.
*
*/
static int cy_write(struct tty_struct *tty, const unsigned char *buf, int count)
{
struct cyclades_port *info = tty->driver_data;
unsigned long flags;
int c, ret = 0;
#ifdef CY_DEBUG_IO
printk(KERN_DEBUG "cyc:cy_write ttyC%d\n", info->line);
#endif
if (serial_paranoia_check(info, tty->name, "cy_write"))
return 0;
if (!info->port.xmit_buf)
return 0;
spin_lock_irqsave(&info->card->card_lock, flags);
while (1) {
c = min(count, (int)(SERIAL_XMIT_SIZE - info->xmit_cnt - 1));
c = min(c, (int)(SERIAL_XMIT_SIZE - info->xmit_head));
if (c <= 0)
break;
memcpy(info->port.xmit_buf + info->xmit_head, buf, c);
info->xmit_head = (info->xmit_head + c) &
(SERIAL_XMIT_SIZE - 1);
info->xmit_cnt += c;
buf += c;
count -= c;
ret += c;
}
spin_unlock_irqrestore(&info->card->card_lock, flags);
info->idle_stats.xmit_bytes += ret;
info->idle_stats.xmit_idle = jiffies;
if (info->xmit_cnt && !tty->stopped && !tty->hw_stopped)
start_xmit(info);
return ret;
} /* cy_write */
/*
* This routine is called by the kernel to write a single
* character to the tty device. If the kernel uses this routine,
* it must call the flush_chars() routine (if defined) when it is
* done stuffing characters into the driver. If there is no room
* in the queue, the character is ignored.
*/
static int cy_put_char(struct tty_struct *tty, unsigned char ch)
{
struct cyclades_port *info = tty->driver_data;
unsigned long flags;
#ifdef CY_DEBUG_IO
printk(KERN_DEBUG "cyc:cy_put_char ttyC%d\n", info->line);
#endif
if (serial_paranoia_check(info, tty->name, "cy_put_char"))
return 0;
if (!info->port.xmit_buf)
return 0;
spin_lock_irqsave(&info->card->card_lock, flags);
if (info->xmit_cnt >= (int)(SERIAL_XMIT_SIZE - 1)) {
spin_unlock_irqrestore(&info->card->card_lock, flags);
return 0;
}
info->port.xmit_buf[info->xmit_head++] = ch;
info->xmit_head &= SERIAL_XMIT_SIZE - 1;
info->xmit_cnt++;
info->idle_stats.xmit_bytes++;
info->idle_stats.xmit_idle = jiffies;
spin_unlock_irqrestore(&info->card->card_lock, flags);
return 1;
} /* cy_put_char */
/*
* This routine is called by the kernel after it has written a
* series of characters to the tty device using put_char().
*/
static void cy_flush_chars(struct tty_struct *tty)
{
struct cyclades_port *info = tty->driver_data;
#ifdef CY_DEBUG_IO
printk(KERN_DEBUG "cyc:cy_flush_chars ttyC%d\n", info->line);
#endif
if (serial_paranoia_check(info, tty->name, "cy_flush_chars"))
return;
if (info->xmit_cnt <= 0 || tty->stopped || tty->hw_stopped ||
!info->port.xmit_buf)
return;
start_xmit(info);
} /* cy_flush_chars */
/*
* This routine returns the numbers of characters the tty driver
* will accept for queuing to be written. This number is subject
* to change as output buffers get emptied, or if the output flow
* control is activated.
*/
static int cy_write_room(struct tty_struct *tty)
{
struct cyclades_port *info = tty->driver_data;
int ret;
#ifdef CY_DEBUG_IO
printk(KERN_DEBUG "cyc:cy_write_room ttyC%d\n", info->line);
#endif
if (serial_paranoia_check(info, tty->name, "cy_write_room"))
return 0;
ret = SERIAL_XMIT_SIZE - info->xmit_cnt - 1;
if (ret < 0)
ret = 0;
return ret;
} /* cy_write_room */
static int cy_chars_in_buffer(struct tty_struct *tty)
{
struct cyclades_port *info = tty->driver_data;
if (serial_paranoia_check(info, tty->name, "cy_chars_in_buffer"))
return 0;
#ifdef Z_EXT_CHARS_IN_BUFFER
if (!cy_is_Z(info->card)) {
#endif /* Z_EXT_CHARS_IN_BUFFER */
#ifdef CY_DEBUG_IO
printk(KERN_DEBUG "cyc:cy_chars_in_buffer ttyC%d %d\n",
info->line, info->xmit_cnt);
#endif
return info->xmit_cnt;
#ifdef Z_EXT_CHARS_IN_BUFFER
} else {
struct BUF_CTRL __iomem *buf_ctrl = info->u.cyz.buf_ctrl;
int char_count;
__u32 tx_put, tx_get, tx_bufsize;
tx_get = readl(&buf_ctrl->tx_get);
tx_put = readl(&buf_ctrl->tx_put);
tx_bufsize = readl(&buf_ctrl->tx_bufsize);
if (tx_put >= tx_get)
char_count = tx_put - tx_get;
else
char_count = tx_put - tx_get + tx_bufsize;
#ifdef CY_DEBUG_IO
printk(KERN_DEBUG "cyc:cy_chars_in_buffer ttyC%d %d\n",
info->line, info->xmit_cnt + char_count);
#endif
return info->xmit_cnt + char_count;
}
#endif /* Z_EXT_CHARS_IN_BUFFER */
} /* cy_chars_in_buffer */
/*
* ------------------------------------------------------------
* cy_ioctl() and friends
* ------------------------------------------------------------
*/
static void cyy_baud_calc(struct cyclades_port *info, __u32 baud)
{
int co, co_val, bpr;
__u32 cy_clock = ((info->chip_rev >= CD1400_REV_J) ? 60000000 :
25000000);
if (baud == 0) {
info->tbpr = info->tco = info->rbpr = info->rco = 0;
return;
}
/* determine which prescaler to use */
for (co = 4, co_val = 2048; co; co--, co_val >>= 2) {
if (cy_clock / co_val / baud > 63)
break;
}
bpr = (cy_clock / co_val * 2 / baud + 1) / 2;
if (bpr > 255)
bpr = 255;
info->tbpr = info->rbpr = bpr;
info->tco = info->rco = co;
}
/*
* This routine finds or computes the various line characteristics.
* It used to be called config_setup
*/
static void cy_set_line_char(struct cyclades_port *info, struct tty_struct *tty)
{
struct cyclades_card *card;
unsigned long flags;
int channel;
unsigned cflag, iflag;
int baud, baud_rate = 0;
int i;
if (!tty->termios) /* XXX can this happen at all? */
return;
if (info->line == -1)
return;
cflag = tty->termios->c_cflag;
iflag = tty->termios->c_iflag;
/*
* Set up the tty->alt_speed kludge
*/
if ((info->port.flags & ASYNC_SPD_MASK) == ASYNC_SPD_HI)
tty->alt_speed = 57600;
if ((info->port.flags & ASYNC_SPD_MASK) == ASYNC_SPD_VHI)
tty->alt_speed = 115200;
if ((info->port.flags & ASYNC_SPD_MASK) == ASYNC_SPD_SHI)
tty->alt_speed = 230400;
if ((info->port.flags & ASYNC_SPD_MASK) == ASYNC_SPD_WARP)
tty->alt_speed = 460800;
card = info->card;
channel = info->line - card->first_line;
if (!cy_is_Z(card)) {
u32 cflags;
/* baud rate */
baud = tty_get_baud_rate(tty);
if (baud == 38400 && (info->port.flags & ASYNC_SPD_MASK) ==
ASYNC_SPD_CUST) {
if (info->custom_divisor)
baud_rate = info->baud / info->custom_divisor;
else
baud_rate = info->baud;
} else if (baud > CD1400_MAX_SPEED) {
baud = CD1400_MAX_SPEED;
}
/* find the baud index */
for (i = 0; i < 20; i++) {
if (baud == baud_table[i])
break;
}
if (i == 20)
i = 19; /* CD1400_MAX_SPEED */
if (baud == 38400 && (info->port.flags & ASYNC_SPD_MASK) ==
ASYNC_SPD_CUST) {
cyy_baud_calc(info, baud_rate);
} else {
if (info->chip_rev >= CD1400_REV_J) {
/* It is a CD1400 rev. J or later */
info->tbpr = baud_bpr_60[i]; /* Tx BPR */
info->tco = baud_co_60[i]; /* Tx CO */
info->rbpr = baud_bpr_60[i]; /* Rx BPR */
info->rco = baud_co_60[i]; /* Rx CO */
} else {
info->tbpr = baud_bpr_25[i]; /* Tx BPR */
info->tco = baud_co_25[i]; /* Tx CO */
info->rbpr = baud_bpr_25[i]; /* Rx BPR */
info->rco = baud_co_25[i]; /* Rx CO */
}
}
if (baud_table[i] == 134) {
/* get it right for 134.5 baud */
info->timeout = (info->xmit_fifo_size * HZ * 30 / 269) +
2;
} else if (baud == 38400 && (info->port.flags & ASYNC_SPD_MASK) ==
ASYNC_SPD_CUST) {
info->timeout = (info->xmit_fifo_size * HZ * 15 /
baud_rate) + 2;
} else if (baud_table[i]) {
info->timeout = (info->xmit_fifo_size * HZ * 15 /
baud_table[i]) + 2;
/* this needs to be propagated into the card info */
} else {
info->timeout = 0;
}
/* By tradition (is it a standard?) a baud rate of zero
implies the line should be/has been closed. A bit
later in this routine such a test is performed. */
/* byte size and parity */
info->cor5 = 0;
info->cor4 = 0;
/* receive threshold */
info->cor3 = (info->default_threshold ?
info->default_threshold : baud_cor3[i]);
info->cor2 = CyETC;
switch (cflag & CSIZE) {
case CS5:
info->cor1 = Cy_5_BITS;
break;
case CS6:
info->cor1 = Cy_6_BITS;
break;
case CS7:
info->cor1 = Cy_7_BITS;
break;
case CS8:
info->cor1 = Cy_8_BITS;
break;
}
if (cflag & CSTOPB)
info->cor1 |= Cy_2_STOP;
if (cflag & PARENB) {
if (cflag & PARODD)
info->cor1 |= CyPARITY_O;
else
info->cor1 |= CyPARITY_E;
} else
info->cor1 |= CyPARITY_NONE;
/* CTS flow control flag */
if (cflag & CRTSCTS) {
info->port.flags |= ASYNC_CTS_FLOW;
info->cor2 |= CyCtsAE;
} else {
info->port.flags &= ~ASYNC_CTS_FLOW;
info->cor2 &= ~CyCtsAE;
}
if (cflag & CLOCAL)
info->port.flags &= ~ASYNC_CHECK_CD;
else
info->port.flags |= ASYNC_CHECK_CD;
/***********************************************
The hardware option, CyRtsAO, presents RTS when
the chip has characters to send. Since most modems
use RTS as reverse (inbound) flow control, this
option is not used. If inbound flow control is
necessary, DTR can be programmed to provide the
appropriate signals for use with a non-standard
cable. Contact Marcio Saito for details.
***********************************************/
channel &= 0x03;
spin_lock_irqsave(&card->card_lock, flags);
cyy_writeb(info, CyCAR, channel);
/* tx and rx baud rate */
cyy_writeb(info, CyTCOR, info->tco);
cyy_writeb(info, CyTBPR, info->tbpr);
cyy_writeb(info, CyRCOR, info->rco);
cyy_writeb(info, CyRBPR, info->rbpr);
/* set line characteristics according configuration */
cyy_writeb(info, CySCHR1, START_CHAR(tty));
cyy_writeb(info, CySCHR2, STOP_CHAR(tty));
cyy_writeb(info, CyCOR1, info->cor1);
cyy_writeb(info, CyCOR2, info->cor2);
cyy_writeb(info, CyCOR3, info->cor3);
cyy_writeb(info, CyCOR4, info->cor4);
cyy_writeb(info, CyCOR5, info->cor5);
cyy_issue_cmd(info, CyCOR_CHANGE | CyCOR1ch | CyCOR2ch |
CyCOR3ch);
/* !!! Is this needed? */
cyy_writeb(info, CyCAR, channel);
cyy_writeb(info, CyRTPR,
(info->default_timeout ? info->default_timeout : 0x02));
/* 10ms rx timeout */
cflags = CyCTS;
if (!C_CLOCAL(tty))
cflags |= CyDSR | CyRI | CyDCD;
/* without modem intr */
cyy_writeb(info, CySRER, cyy_readb(info, CySRER) | CyMdmCh);
/* act on 1->0 modem transitions */
if ((cflag & CRTSCTS) && info->rflow)
cyy_writeb(info, CyMCOR1, cflags | rflow_thr[i]);
else
cyy_writeb(info, CyMCOR1, cflags);
/* act on 0->1 modem transitions */
cyy_writeb(info, CyMCOR2, cflags);
if (i == 0) /* baud rate is zero, turn off line */
cyy_change_rts_dtr(info, 0, TIOCM_DTR);
else
cyy_change_rts_dtr(info, TIOCM_DTR, 0);
clear_bit(TTY_IO_ERROR, &tty->flags);
spin_unlock_irqrestore(&card->card_lock, flags);
} else {
struct CH_CTRL __iomem *ch_ctrl = info->u.cyz.ch_ctrl;
__u32 sw_flow;
int retval;
if (!cyz_is_loaded(card))
return;
/* baud rate */
baud = tty_get_baud_rate(tty);
if (baud == 38400 && (info->port.flags & ASYNC_SPD_MASK) ==
ASYNC_SPD_CUST) {
if (info->custom_divisor)
baud_rate = info->baud / info->custom_divisor;
else
baud_rate = info->baud;
} else if (baud > CYZ_MAX_SPEED) {
baud = CYZ_MAX_SPEED;
}
cy_writel(&ch_ctrl->comm_baud, baud);
if (baud == 134) {
/* get it right for 134.5 baud */
info->timeout = (info->xmit_fifo_size * HZ * 30 / 269) +
2;
} else if (baud == 38400 && (info->port.flags & ASYNC_SPD_MASK) ==
ASYNC_SPD_CUST) {
info->timeout = (info->xmit_fifo_size * HZ * 15 /
baud_rate) + 2;
} else if (baud) {
info->timeout = (info->xmit_fifo_size * HZ * 15 /
baud) + 2;
/* this needs to be propagated into the card info */
} else {
info->timeout = 0;
}
/* byte size and parity */
switch (cflag & CSIZE) {
case CS5:
cy_writel(&ch_ctrl->comm_data_l, C_DL_CS5);
break;
case CS6:
cy_writel(&ch_ctrl->comm_data_l, C_DL_CS6);
break;
case CS7:
cy_writel(&ch_ctrl->comm_data_l, C_DL_CS7);
break;
case CS8:
cy_writel(&ch_ctrl->comm_data_l, C_DL_CS8);
break;
}
if (cflag & CSTOPB) {
cy_writel(&ch_ctrl->comm_data_l,
readl(&ch_ctrl->comm_data_l) | C_DL_2STOP);
} else {
cy_writel(&ch_ctrl->comm_data_l,
readl(&ch_ctrl->comm_data_l) | C_DL_1STOP);
}
if (cflag & PARENB) {
if (cflag & PARODD)
cy_writel(&ch_ctrl->comm_parity, C_PR_ODD);
else
cy_writel(&ch_ctrl->comm_parity, C_PR_EVEN);
} else
cy_writel(&ch_ctrl->comm_parity, C_PR_NONE);
/* CTS flow control flag */
if (cflag & CRTSCTS) {
cy_writel(&ch_ctrl->hw_flow,
readl(&ch_ctrl->hw_flow) | C_RS_CTS | C_RS_RTS);
} else {
cy_writel(&ch_ctrl->hw_flow, readl(&ch_ctrl->hw_flow) &
~(C_RS_CTS | C_RS_RTS));
}
/* As the HW flow control is done in firmware, the driver
doesn't need to care about it */
info->port.flags &= ~ASYNC_CTS_FLOW;
/* XON/XOFF/XANY flow control flags */
sw_flow = 0;
if (iflag & IXON) {
sw_flow |= C_FL_OXX;
if (iflag & IXANY)
sw_flow |= C_FL_OIXANY;
}
cy_writel(&ch_ctrl->sw_flow, sw_flow);
retval = cyz_issue_cmd(card, channel, C_CM_IOCTL, 0L);
if (retval != 0) {
printk(KERN_ERR "cyc:set_line_char retval on ttyC%d "
"was %x\n", info->line, retval);
}
/* CD sensitivity */
if (cflag & CLOCAL)
info->port.flags &= ~ASYNC_CHECK_CD;
else
info->port.flags |= ASYNC_CHECK_CD;
if (baud == 0) { /* baud rate is zero, turn off line */
cy_writel(&ch_ctrl->rs_control,
readl(&ch_ctrl->rs_control) & ~C_RS_DTR);
#ifdef CY_DEBUG_DTR
printk(KERN_DEBUG "cyc:set_line_char dropping Z DTR\n");
#endif
} else {
cy_writel(&ch_ctrl->rs_control,
readl(&ch_ctrl->rs_control) | C_RS_DTR);
#ifdef CY_DEBUG_DTR
printk(KERN_DEBUG "cyc:set_line_char raising Z DTR\n");
#endif
}
retval = cyz_issue_cmd(card, channel, C_CM_IOCTLM, 0L);
if (retval != 0) {
printk(KERN_ERR "cyc:set_line_char(2) retval on ttyC%d "
"was %x\n", info->line, retval);
}
clear_bit(TTY_IO_ERROR, &tty->flags);
}
} /* set_line_char */
static int cy_get_serial_info(struct cyclades_port *info,
struct serial_struct __user *retinfo)
{
struct cyclades_card *cinfo = info->card;
struct serial_struct tmp = {
.type = info->type,
.line = info->line,
.port = (info->card - cy_card) * 0x100 + info->line -
cinfo->first_line,
.irq = cinfo->irq,
.flags = info->port.flags,
.close_delay = info->port.close_delay,
.closing_wait = info->port.closing_wait,
.baud_base = info->baud,
.custom_divisor = info->custom_divisor,
.hub6 = 0, /*!!! */
};
return copy_to_user(retinfo, &tmp, sizeof(*retinfo)) ? -EFAULT : 0;
}
static int
cy_set_serial_info(struct cyclades_port *info, struct tty_struct *tty,
struct serial_struct __user *new_info)
{
struct serial_struct new_serial;
int ret;
if (copy_from_user(&new_serial, new_info, sizeof(new_serial)))
return -EFAULT;
mutex_lock(&info->port.mutex);
if (!capable(CAP_SYS_ADMIN)) {
if (new_serial.close_delay != info->port.close_delay ||
new_serial.baud_base != info->baud ||
(new_serial.flags & ASYNC_FLAGS &
~ASYNC_USR_MASK) !=
(info->port.flags & ASYNC_FLAGS & ~ASYNC_USR_MASK))
{
mutex_unlock(&info->port.mutex);
return -EPERM;
}
info->port.flags = (info->port.flags & ~ASYNC_USR_MASK) |
(new_serial.flags & ASYNC_USR_MASK);
info->baud = new_serial.baud_base;
info->custom_divisor = new_serial.custom_divisor;
goto check_and_exit;
}
/*
* OK, past this point, all the error checking has been done.
* At this point, we start making changes.....
*/
info->baud = new_serial.baud_base;
info->custom_divisor = new_serial.custom_divisor;
info->port.flags = (info->port.flags & ~ASYNC_FLAGS) |
(new_serial.flags & ASYNC_FLAGS);
info->port.close_delay = new_serial.close_delay * HZ / 100;
info->port.closing_wait = new_serial.closing_wait * HZ / 100;
check_and_exit:
if (info->port.flags & ASYNC_INITIALIZED) {
cy_set_line_char(info, tty);
ret = 0;
} else {
ret = cy_startup(info, tty);
}
mutex_unlock(&info->port.mutex);
return ret;
} /* set_serial_info */
/*
* get_lsr_info - get line status register info
*
* Purpose: Let user call ioctl() to get info when the UART physically
* is emptied. On bus types like RS485, the transmitter must
* release the bus after transmitting. This must be done when
* the transmit shift register is empty, not be done when the
* transmit holding register is empty. This functionality
* allows an RS485 driver to be written in user space.
*/
static int get_lsr_info(struct cyclades_port *info, unsigned int __user *value)
{
struct cyclades_card *card = info->card;
unsigned int result;
unsigned long flags;
u8 status;
if (!cy_is_Z(card)) {
spin_lock_irqsave(&card->card_lock, flags);
status = cyy_readb(info, CySRER) & (CyTxRdy | CyTxMpty);
spin_unlock_irqrestore(&card->card_lock, flags);
result = (status ? 0 : TIOCSER_TEMT);
} else {
/* Not supported yet */
return -EINVAL;
}
return put_user(result, (unsigned long __user *)value);
}
static int cy_tiocmget(struct tty_struct *tty)
{
struct cyclades_port *info = tty->driver_data;
struct cyclades_card *card;
int result;
if (serial_paranoia_check(info, tty->name, __func__))
return -ENODEV;
card = info->card;
if (!cy_is_Z(card)) {
unsigned long flags;
int channel = info->line - card->first_line;
u8 status;
spin_lock_irqsave(&card->card_lock, flags);
cyy_writeb(info, CyCAR, channel & 0x03);
status = cyy_readb(info, CyMSVR1);
status |= cyy_readb(info, CyMSVR2);
spin_unlock_irqrestore(&card->card_lock, flags);
if (info->rtsdtr_inv) {
result = ((status & CyRTS) ? TIOCM_DTR : 0) |
((status & CyDTR) ? TIOCM_RTS : 0);
} else {
result = ((status & CyRTS) ? TIOCM_RTS : 0) |
((status & CyDTR) ? TIOCM_DTR : 0);
}
result |= ((status & CyDCD) ? TIOCM_CAR : 0) |
((status & CyRI) ? TIOCM_RNG : 0) |
((status & CyDSR) ? TIOCM_DSR : 0) |
((status & CyCTS) ? TIOCM_CTS : 0);
} else {
u32 lstatus;
if (!cyz_is_loaded(card)) {
result = -ENODEV;
goto end;
}
lstatus = readl(&info->u.cyz.ch_ctrl->rs_status);
result = ((lstatus & C_RS_RTS) ? TIOCM_RTS : 0) |
((lstatus & C_RS_DTR) ? TIOCM_DTR : 0) |
((lstatus & C_RS_DCD) ? TIOCM_CAR : 0) |
((lstatus & C_RS_RI) ? TIOCM_RNG : 0) |
((lstatus & C_RS_DSR) ? TIOCM_DSR : 0) |
((lstatus & C_RS_CTS) ? TIOCM_CTS : 0);
}
end:
return result;
} /* cy_tiomget */
static int
cy_tiocmset(struct tty_struct *tty,
unsigned int set, unsigned int clear)
{
struct cyclades_port *info = tty->driver_data;
struct cyclades_card *card;
unsigned long flags;
if (serial_paranoia_check(info, tty->name, __func__))
return -ENODEV;
card = info->card;
if (!cy_is_Z(card)) {
spin_lock_irqsave(&card->card_lock, flags);
cyy_change_rts_dtr(info, set, clear);
spin_unlock_irqrestore(&card->card_lock, flags);
} else {
struct CH_CTRL __iomem *ch_ctrl = info->u.cyz.ch_ctrl;
int retval, channel = info->line - card->first_line;
u32 rs;
if (!cyz_is_loaded(card))
return -ENODEV;
spin_lock_irqsave(&card->card_lock, flags);
rs = readl(&ch_ctrl->rs_control);
if (set & TIOCM_RTS)
rs |= C_RS_RTS;
if (clear & TIOCM_RTS)
rs &= ~C_RS_RTS;
if (set & TIOCM_DTR) {
rs |= C_RS_DTR;
#ifdef CY_DEBUG_DTR
printk(KERN_DEBUG "cyc:set_modem_info raising Z DTR\n");
#endif
}
if (clear & TIOCM_DTR) {
rs &= ~C_RS_DTR;
#ifdef CY_DEBUG_DTR
printk(KERN_DEBUG "cyc:set_modem_info clearing "
"Z DTR\n");
#endif
}
cy_writel(&ch_ctrl->rs_control, rs);
retval = cyz_issue_cmd(card, channel, C_CM_IOCTLM, 0L);
spin_unlock_irqrestore(&card->card_lock, flags);
if (retval != 0) {
printk(KERN_ERR "cyc:set_modem_info retval on ttyC%d "
"was %x\n", info->line, retval);
}
}
return 0;
}
/*
* cy_break() --- routine which turns the break handling on or off
*/
static int cy_break(struct tty_struct *tty, int break_state)
{
struct cyclades_port *info = tty->driver_data;
struct cyclades_card *card;
unsigned long flags;
int retval = 0;
if (serial_paranoia_check(info, tty->name, "cy_break"))
return -EINVAL;
card = info->card;
spin_lock_irqsave(&card->card_lock, flags);
if (!cy_is_Z(card)) {
/* Let the transmit ISR take care of this (since it
requires stuffing characters into the output stream).
*/
if (break_state == -1) {
if (!info->breakon) {
info->breakon = 1;
if (!info->xmit_cnt) {
spin_unlock_irqrestore(&card->card_lock, flags);
start_xmit(info);
spin_lock_irqsave(&card->card_lock, flags);
}
}
} else {
if (!info->breakoff) {
info->breakoff = 1;
if (!info->xmit_cnt) {
spin_unlock_irqrestore(&card->card_lock, flags);
start_xmit(info);
spin_lock_irqsave(&card->card_lock, flags);
}
}
}
} else {
if (break_state == -1) {
retval = cyz_issue_cmd(card,
info->line - card->first_line,
C_CM_SET_BREAK, 0L);
if (retval != 0) {
printk(KERN_ERR "cyc:cy_break (set) retval on "
"ttyC%d was %x\n", info->line, retval);
}
} else {
retval = cyz_issue_cmd(card,
info->line - card->first_line,
C_CM_CLR_BREAK, 0L);
if (retval != 0) {
printk(KERN_DEBUG "cyc:cy_break (clr) retval "
"on ttyC%d was %x\n", info->line,
retval);
}
}
}
spin_unlock_irqrestore(&card->card_lock, flags);
return retval;
} /* cy_break */
static int set_threshold(struct cyclades_port *info, unsigned long value)
{
struct cyclades_card *card = info->card;
unsigned long flags;
if (!cy_is_Z(card)) {
info->cor3 &= ~CyREC_FIFO;
info->cor3 |= value & CyREC_FIFO;
spin_lock_irqsave(&card->card_lock, flags);
cyy_writeb(info, CyCOR3, info->cor3);
cyy_issue_cmd(info, CyCOR_CHANGE | CyCOR3ch);
spin_unlock_irqrestore(&card->card_lock, flags);
}
return 0;
} /* set_threshold */
static int get_threshold(struct cyclades_port *info,
unsigned long __user *value)
{
struct cyclades_card *card = info->card;
if (!cy_is_Z(card)) {
u8 tmp = cyy_readb(info, CyCOR3) & CyREC_FIFO;
return put_user(tmp, value);
}
return 0;
} /* get_threshold */
static int set_timeout(struct cyclades_port *info, unsigned long value)
{
struct cyclades_card *card = info->card;
unsigned long flags;
if (!cy_is_Z(card)) {
spin_lock_irqsave(&card->card_lock, flags);
cyy_writeb(info, CyRTPR, value & 0xff);
spin_unlock_irqrestore(&card->card_lock, flags);
}
return 0;
} /* set_timeout */
static int get_timeout(struct cyclades_port *info,
unsigned long __user *value)
{
struct cyclades_card *card = info->card;
if (!cy_is_Z(card)) {
u8 tmp = cyy_readb(info, CyRTPR);
return put_user(tmp, value);
}
return 0;
} /* get_timeout */
static int cy_cflags_changed(struct cyclades_port *info, unsigned long arg,
struct cyclades_icount *cprev)
{
struct cyclades_icount cnow;
unsigned long flags;
int ret;
spin_lock_irqsave(&info->card->card_lock, flags);
cnow = info->icount; /* atomic copy */
spin_unlock_irqrestore(&info->card->card_lock, flags);
ret = ((arg & TIOCM_RNG) && (cnow.rng != cprev->rng)) ||
((arg & TIOCM_DSR) && (cnow.dsr != cprev->dsr)) ||
((arg & TIOCM_CD) && (cnow.dcd != cprev->dcd)) ||
((arg & TIOCM_CTS) && (cnow.cts != cprev->cts));
*cprev = cnow;
return ret;
}
/*
* This routine allows the tty driver to implement device-
* specific ioctl's. If the ioctl number passed in cmd is
* not recognized by the driver, it should return ENOIOCTLCMD.
*/
static int
cy_ioctl(struct tty_struct *tty,
unsigned int cmd, unsigned long arg)
{
struct cyclades_port *info = tty->driver_data;
struct cyclades_icount cnow; /* kernel counter temps */
int ret_val = 0;
unsigned long flags;
void __user *argp = (void __user *)arg;
if (serial_paranoia_check(info, tty->name, "cy_ioctl"))
return -ENODEV;
#ifdef CY_DEBUG_OTHER
printk(KERN_DEBUG "cyc:cy_ioctl ttyC%d, cmd = %x arg = %lx\n",
info->line, cmd, arg);
#endif
switch (cmd) {
case CYGETMON:
if (copy_to_user(argp, &info->mon, sizeof(info->mon))) {
ret_val = -EFAULT;
break;
}
memset(&info->mon, 0, sizeof(info->mon));
break;
case CYGETTHRESH:
ret_val = get_threshold(info, argp);
break;
case CYSETTHRESH:
ret_val = set_threshold(info, arg);
break;
case CYGETDEFTHRESH:
ret_val = put_user(info->default_threshold,
(unsigned long __user *)argp);
break;
case CYSETDEFTHRESH:
info->default_threshold = arg & 0x0f;
break;
case CYGETTIMEOUT:
ret_val = get_timeout(info, argp);
break;
case CYSETTIMEOUT:
ret_val = set_timeout(info, arg);
break;
case CYGETDEFTIMEOUT:
ret_val = put_user(info->default_timeout,
(unsigned long __user *)argp);
break;
case CYSETDEFTIMEOUT:
info->default_timeout = arg & 0xff;
break;
case CYSETRFLOW:
info->rflow = (int)arg;
break;
case CYGETRFLOW:
ret_val = info->rflow;
break;
case CYSETRTSDTR_INV:
info->rtsdtr_inv = (int)arg;
break;
case CYGETRTSDTR_INV:
ret_val = info->rtsdtr_inv;
break;
case CYGETCD1400VER:
ret_val = info->chip_rev;
break;
#ifndef CONFIG_CYZ_INTR
case CYZSETPOLLCYCLE:
cyz_polling_cycle = (arg * HZ) / 1000;
break;
case CYZGETPOLLCYCLE:
ret_val = (cyz_polling_cycle * 1000) / HZ;
break;
#endif /* CONFIG_CYZ_INTR */
case CYSETWAIT:
info->port.closing_wait = (unsigned short)arg * HZ / 100;
break;
case CYGETWAIT:
ret_val = info->port.closing_wait / (HZ / 100);
break;
case TIOCGSERIAL:
ret_val = cy_get_serial_info(info, argp);
break;
case TIOCSSERIAL:
ret_val = cy_set_serial_info(info, tty, argp);
break;
case TIOCSERGETLSR: /* Get line status register */
ret_val = get_lsr_info(info, argp);
break;
/*
* Wait for any of the 4 modem inputs (DCD,RI,DSR,CTS) to change
* - mask passed in arg for lines of interest
* (use |'ed TIOCM_RNG/DSR/CD/CTS for masking)
* Caller should use TIOCGICOUNT to see which one it was
*/
case TIOCMIWAIT:
spin_lock_irqsave(&info->card->card_lock, flags);
/* note the counters on entry */
cnow = info->icount;
spin_unlock_irqrestore(&info->card->card_lock, flags);
ret_val = wait_event_interruptible(info->port.delta_msr_wait,
cy_cflags_changed(info, arg, &cnow));
break;
/*
* Get counter of input serial line interrupts (DCD,RI,DSR,CTS)
* Return: write counters to the user passed counter struct
* NB: both 1->0 and 0->1 transitions are counted except for
* RI where only 0->1 is counted.
*/
default:
ret_val = -ENOIOCTLCMD;
}
#ifdef CY_DEBUG_OTHER
printk(KERN_DEBUG "cyc:cy_ioctl done\n");
#endif
return ret_val;
} /* cy_ioctl */
static int cy_get_icount(struct tty_struct *tty,
struct serial_icounter_struct *sic)
{
struct cyclades_port *info = tty->driver_data;
struct cyclades_icount cnow; /* Used to snapshot */
unsigned long flags;
spin_lock_irqsave(&info->card->card_lock, flags);
cnow = info->icount;
spin_unlock_irqrestore(&info->card->card_lock, flags);
sic->cts = cnow.cts;
sic->dsr = cnow.dsr;
sic->rng = cnow.rng;
sic->dcd = cnow.dcd;
sic->rx = cnow.rx;
sic->tx = cnow.tx;
sic->frame = cnow.frame;
sic->overrun = cnow.overrun;
sic->parity = cnow.parity;
sic->brk = cnow.brk;
sic->buf_overrun = cnow.buf_overrun;
return 0;
}
/*
* This routine allows the tty driver to be notified when
* device's termios settings have changed. Note that a
* well-designed tty driver should be prepared to accept the case
* where old == NULL, and try to do something rational.
*/
static void cy_set_termios(struct tty_struct *tty, struct ktermios *old_termios)
{
struct cyclades_port *info = tty->driver_data;
#ifdef CY_DEBUG_OTHER
printk(KERN_DEBUG "cyc:cy_set_termios ttyC%d\n", info->line);
#endif
cy_set_line_char(info, tty);
if ((old_termios->c_cflag & CRTSCTS) &&
!(tty->termios->c_cflag & CRTSCTS)) {
tty->hw_stopped = 0;
cy_start(tty);
}
#if 0
/*
* No need to wake up processes in open wait, since they
* sample the CLOCAL flag once, and don't recheck it.
* XXX It's not clear whether the current behavior is correct
* or not. Hence, this may change.....
*/
if (!(old_termios->c_cflag & CLOCAL) &&
(tty->termios->c_cflag & CLOCAL))
wake_up_interruptible(&info->port.open_wait);
#endif
} /* cy_set_termios */
/* This function is used to send a high-priority XON/XOFF character to
the device.
*/
static void cy_send_xchar(struct tty_struct *tty, char ch)
{
struct cyclades_port *info = tty->driver_data;
struct cyclades_card *card;
int channel;
if (serial_paranoia_check(info, tty->name, "cy_send_xchar"))
return;
info->x_char = ch;
if (ch)
cy_start(tty);
card = info->card;
channel = info->line - card->first_line;
if (cy_is_Z(card)) {
if (ch == STOP_CHAR(tty))
cyz_issue_cmd(card, channel, C_CM_SENDXOFF, 0L);
else if (ch == START_CHAR(tty))
cyz_issue_cmd(card, channel, C_CM_SENDXON, 0L);
}
}
/* This routine is called by the upper-layer tty layer to signal
that incoming characters should be throttled because the input
buffers are close to full.
*/
static void cy_throttle(struct tty_struct *tty)
{
struct cyclades_port *info = tty->driver_data;
struct cyclades_card *card;
unsigned long flags;
#ifdef CY_DEBUG_THROTTLE
char buf[64];
printk(KERN_DEBUG "cyc:throttle %s: %ld...ttyC%d\n", tty_name(tty, buf),
tty->ldisc.chars_in_buffer(tty), info->line);
#endif
if (serial_paranoia_check(info, tty->name, "cy_throttle"))
return;
card = info->card;
if (I_IXOFF(tty)) {
if (!cy_is_Z(card))
cy_send_xchar(tty, STOP_CHAR(tty));
else
info->throttle = 1;
}
if (tty->termios->c_cflag & CRTSCTS) {
if (!cy_is_Z(card)) {
spin_lock_irqsave(&card->card_lock, flags);
cyy_change_rts_dtr(info, 0, TIOCM_RTS);
spin_unlock_irqrestore(&card->card_lock, flags);
} else {
info->throttle = 1;
}
}
} /* cy_throttle */
/*
* This routine notifies the tty driver that it should signal
* that characters can now be sent to the tty without fear of
* overrunning the input buffers of the line disciplines.
*/
static void cy_unthrottle(struct tty_struct *tty)
{
struct cyclades_port *info = tty->driver_data;
struct cyclades_card *card;
unsigned long flags;
#ifdef CY_DEBUG_THROTTLE
char buf[64];
printk(KERN_DEBUG "cyc:unthrottle %s: %ld...ttyC%d\n",
tty_name(tty, buf), tty_chars_in_buffer(tty), info->line);
#endif
if (serial_paranoia_check(info, tty->name, "cy_unthrottle"))
return;
if (I_IXOFF(tty)) {
if (info->x_char)
info->x_char = 0;
else
cy_send_xchar(tty, START_CHAR(tty));
}
if (tty->termios->c_cflag & CRTSCTS) {
card = info->card;
if (!cy_is_Z(card)) {
spin_lock_irqsave(&card->card_lock, flags);
cyy_change_rts_dtr(info, TIOCM_RTS, 0);
spin_unlock_irqrestore(&card->card_lock, flags);
} else {
info->throttle = 0;
}
}
} /* cy_unthrottle */
/* cy_start and cy_stop provide software output flow control as a
function of XON/XOFF, software CTS, and other such stuff.
*/
static void cy_stop(struct tty_struct *tty)
{
struct cyclades_card *cinfo;
struct cyclades_port *info = tty->driver_data;
int channel;
unsigned long flags;
#ifdef CY_DEBUG_OTHER
printk(KERN_DEBUG "cyc:cy_stop ttyC%d\n", info->line);
#endif
if (serial_paranoia_check(info, tty->name, "cy_stop"))
return;
cinfo = info->card;
channel = info->line - cinfo->first_line;
if (!cy_is_Z(cinfo)) {
spin_lock_irqsave(&cinfo->card_lock, flags);
cyy_writeb(info, CyCAR, channel & 0x03);
cyy_writeb(info, CySRER, cyy_readb(info, CySRER) & ~CyTxRdy);
spin_unlock_irqrestore(&cinfo->card_lock, flags);
}
} /* cy_stop */
static void cy_start(struct tty_struct *tty)
{
struct cyclades_card *cinfo;
struct cyclades_port *info = tty->driver_data;
int channel;
unsigned long flags;
#ifdef CY_DEBUG_OTHER
printk(KERN_DEBUG "cyc:cy_start ttyC%d\n", info->line);
#endif
if (serial_paranoia_check(info, tty->name, "cy_start"))
return;
cinfo = info->card;
channel = info->line - cinfo->first_line;
if (!cy_is_Z(cinfo)) {
spin_lock_irqsave(&cinfo->card_lock, flags);
cyy_writeb(info, CyCAR, channel & 0x03);
cyy_writeb(info, CySRER, cyy_readb(info, CySRER) | CyTxRdy);
spin_unlock_irqrestore(&cinfo->card_lock, flags);
}
} /* cy_start */
/*
* cy_hangup() --- called by tty_hangup() when a hangup is signaled.
*/
static void cy_hangup(struct tty_struct *tty)
{
struct cyclades_port *info = tty->driver_data;
#ifdef CY_DEBUG_OTHER
printk(KERN_DEBUG "cyc:cy_hangup ttyC%d\n", info->line);
#endif
if (serial_paranoia_check(info, tty->name, "cy_hangup"))
return;
cy_flush_buffer(tty);
cy_shutdown(info, tty);
tty_port_hangup(&info->port);
} /* cy_hangup */
static int cyy_carrier_raised(struct tty_port *port)
{
struct cyclades_port *info = container_of(port, struct cyclades_port,
port);
struct cyclades_card *cinfo = info->card;
unsigned long flags;
int channel = info->line - cinfo->first_line;
u32 cd;
spin_lock_irqsave(&cinfo->card_lock, flags);
cyy_writeb(info, CyCAR, channel & 0x03);
cd = cyy_readb(info, CyMSVR1) & CyDCD;
spin_unlock_irqrestore(&cinfo->card_lock, flags);
return cd;
}
static void cyy_dtr_rts(struct tty_port *port, int raise)
{
struct cyclades_port *info = container_of(port, struct cyclades_port,
port);
struct cyclades_card *cinfo = info->card;
unsigned long flags;
spin_lock_irqsave(&cinfo->card_lock, flags);
cyy_change_rts_dtr(info, raise ? TIOCM_RTS | TIOCM_DTR : 0,
raise ? 0 : TIOCM_RTS | TIOCM_DTR);
spin_unlock_irqrestore(&cinfo->card_lock, flags);
}
static int cyz_carrier_raised(struct tty_port *port)
{
struct cyclades_port *info = container_of(port, struct cyclades_port,
port);
return readl(&info->u.cyz.ch_ctrl->rs_status) & C_RS_DCD;
}
static void cyz_dtr_rts(struct tty_port *port, int raise)
{
struct cyclades_port *info = container_of(port, struct cyclades_port,
port);
struct cyclades_card *cinfo = info->card;
struct CH_CTRL __iomem *ch_ctrl = info->u.cyz.ch_ctrl;
int ret, channel = info->line - cinfo->first_line;
u32 rs;
rs = readl(&ch_ctrl->rs_control);
if (raise)
rs |= C_RS_RTS | C_RS_DTR;
else
rs &= ~(C_RS_RTS | C_RS_DTR);
cy_writel(&ch_ctrl->rs_control, rs);
ret = cyz_issue_cmd(cinfo, channel, C_CM_IOCTLM, 0L);
if (ret != 0)
printk(KERN_ERR "%s: retval on ttyC%d was %x\n",
__func__, info->line, ret);
#ifdef CY_DEBUG_DTR
printk(KERN_DEBUG "%s: raising Z DTR\n", __func__);
#endif
}
static const struct tty_port_operations cyy_port_ops = {
.carrier_raised = cyy_carrier_raised,
.dtr_rts = cyy_dtr_rts,
.shutdown = cy_do_close,
};
static const struct tty_port_operations cyz_port_ops = {
.carrier_raised = cyz_carrier_raised,
.dtr_rts = cyz_dtr_rts,
.shutdown = cy_do_close,
};
/*
* ---------------------------------------------------------------------
* cy_init() and friends
*
* cy_init() is called at boot-time to initialize the serial driver.
* ---------------------------------------------------------------------
*/
static int __devinit cy_init_card(struct cyclades_card *cinfo)
{
struct cyclades_port *info;
unsigned int channel, port;
spin_lock_init(&cinfo->card_lock);
cinfo->intr_enabled = 0;
cinfo->ports = kcalloc(cinfo->nports, sizeof(*cinfo->ports),
GFP_KERNEL);
if (cinfo->ports == NULL) {
printk(KERN_ERR "Cyclades: cannot allocate ports\n");
return -ENOMEM;
}
for (channel = 0, port = cinfo->first_line; channel < cinfo->nports;
channel++, port++) {
info = &cinfo->ports[channel];
tty_port_init(&info->port);
info->magic = CYCLADES_MAGIC;
info->card = cinfo;
info->line = port;
info->port.closing_wait = CLOSING_WAIT_DELAY;
info->port.close_delay = 5 * HZ / 10;
info->port.flags = STD_COM_FLAGS;
init_completion(&info->shutdown_wait);
if (cy_is_Z(cinfo)) {
struct FIRM_ID *firm_id = cinfo->base_addr + ID_ADDRESS;
struct ZFW_CTRL *zfw_ctrl;
info->port.ops = &cyz_port_ops;
info->type = PORT_STARTECH;
zfw_ctrl = cinfo->base_addr +
(readl(&firm_id->zfwctrl_addr) & 0xfffff);
info->u.cyz.ch_ctrl = &zfw_ctrl->ch_ctrl[channel];
info->u.cyz.buf_ctrl = &zfw_ctrl->buf_ctrl[channel];
if (cinfo->hw_ver == ZO_V1)
info->xmit_fifo_size = CYZ_FIFO_SIZE;
else
info->xmit_fifo_size = 4 * CYZ_FIFO_SIZE;
#ifdef CONFIG_CYZ_INTR
setup_timer(&cyz_rx_full_timer[port],
cyz_rx_restart, (unsigned long)info);
#endif
} else {
unsigned short chip_number;
int index = cinfo->bus_index;
info->port.ops = &cyy_port_ops;
info->type = PORT_CIRRUS;
info->xmit_fifo_size = CyMAX_CHAR_FIFO;
info->cor1 = CyPARITY_NONE | Cy_1_STOP | Cy_8_BITS;
info->cor2 = CyETC;
info->cor3 = 0x08; /* _very_ small rcv threshold */
chip_number = channel / CyPORTS_PER_CHIP;
info->u.cyy.base_addr = cinfo->base_addr +
(cy_chip_offset[chip_number] << index);
info->chip_rev = cyy_readb(info, CyGFRCR);
if (info->chip_rev >= CD1400_REV_J) {
/* It is a CD1400 rev. J or later */
info->tbpr = baud_bpr_60[13]; /* Tx BPR */
info->tco = baud_co_60[13]; /* Tx CO */
info->rbpr = baud_bpr_60[13]; /* Rx BPR */
info->rco = baud_co_60[13]; /* Rx CO */
info->rtsdtr_inv = 1;
} else {
info->tbpr = baud_bpr_25[13]; /* Tx BPR */
info->tco = baud_co_25[13]; /* Tx CO */
info->rbpr = baud_bpr_25[13]; /* Rx BPR */
info->rco = baud_co_25[13]; /* Rx CO */
info->rtsdtr_inv = 0;
}
info->read_status_mask = CyTIMEOUT | CySPECHAR |
CyBREAK | CyPARITY | CyFRAME | CyOVERRUN;
}
}
#ifndef CONFIG_CYZ_INTR
if (cy_is_Z(cinfo) && !timer_pending(&cyz_timerlist)) {
mod_timer(&cyz_timerlist, jiffies + 1);
#ifdef CY_PCI_DEBUG
printk(KERN_DEBUG "Cyclades-Z polling initialized\n");
#endif
}
#endif
return 0;
}
/* initialize chips on Cyclom-Y card -- return number of valid
chips (which is number of ports/4) */
static unsigned short __devinit cyy_init_card(void __iomem *true_base_addr,
int index)
{
unsigned int chip_number;
void __iomem *base_addr;
cy_writeb(true_base_addr + (Cy_HwReset << index), 0);
/* Cy_HwReset is 0x1400 */
cy_writeb(true_base_addr + (Cy_ClrIntr << index), 0);
/* Cy_ClrIntr is 0x1800 */
udelay(500L);
for (chip_number = 0; chip_number < CyMAX_CHIPS_PER_CARD;
chip_number++) {
base_addr =
true_base_addr + (cy_chip_offset[chip_number] << index);
mdelay(1);
if (readb(base_addr + (CyCCR << index)) != 0x00) {
/*************
printk(" chip #%d at %#6lx is never idle (CCR != 0)\n",
chip_number, (unsigned long)base_addr);
*************/
return chip_number;
}
cy_writeb(base_addr + (CyGFRCR << index), 0);
udelay(10L);
/* The Cyclom-16Y does not decode address bit 9 and therefore
cannot distinguish between references to chip 0 and a non-
existent chip 4. If the preceding clearing of the supposed
chip 4 GFRCR register appears at chip 0, there is no chip 4
and this must be a Cyclom-16Y, not a Cyclom-32Ye.
*/
if (chip_number == 4 && readb(true_base_addr +
(cy_chip_offset[0] << index) +
(CyGFRCR << index)) == 0) {
return chip_number;
}
cy_writeb(base_addr + (CyCCR << index), CyCHIP_RESET);
mdelay(1);
if (readb(base_addr + (CyGFRCR << index)) == 0x00) {
/*
printk(" chip #%d at %#6lx is not responding ",
chip_number, (unsigned long)base_addr);
printk("(GFRCR stayed 0)\n",
*/
return chip_number;
}
if ((0xf0 & (readb(base_addr + (CyGFRCR << index)))) !=
0x40) {
/*
printk(" chip #%d at %#6lx is not valid (GFRCR == "
"%#2x)\n",
chip_number, (unsigned long)base_addr,
base_addr[CyGFRCR<<index]);
*/
return chip_number;
}
cy_writeb(base_addr + (CyGCR << index), CyCH0_SERIAL);
if (readb(base_addr + (CyGFRCR << index)) >= CD1400_REV_J) {
/* It is a CD1400 rev. J or later */
/* Impossible to reach 5ms with this chip.
Changed to 2ms instead (f = 500 Hz). */
cy_writeb(base_addr + (CyPPR << index), CyCLOCK_60_2MS);
} else {
/* f = 200 Hz */
cy_writeb(base_addr + (CyPPR << index), CyCLOCK_25_5MS);
}
/*
printk(" chip #%d at %#6lx is rev 0x%2x\n",
chip_number, (unsigned long)base_addr,
readb(base_addr+(CyGFRCR<<index)));
*/
}
return chip_number;
} /* cyy_init_card */
/*
* ---------------------------------------------------------------------
* cy_detect_isa() - Probe for Cyclom-Y/ISA boards.
* sets global variables and return the number of ISA boards found.
* ---------------------------------------------------------------------
*/
static int __init cy_detect_isa(void)
{
#ifdef CONFIG_ISA
unsigned short cy_isa_irq, nboard;
void __iomem *cy_isa_address;
unsigned short i, j, cy_isa_nchan;
int isparam = 0;
nboard = 0;
/* Check for module parameters */
for (i = 0; i < NR_CARDS; i++) {
if (maddr[i] || i) {
isparam = 1;
cy_isa_addresses[i] = maddr[i];
}
if (!maddr[i])
break;
}
/* scan the address table probing for Cyclom-Y/ISA boards */
for (i = 0; i < NR_ISA_ADDRS; i++) {
unsigned int isa_address = cy_isa_addresses[i];
if (isa_address == 0x0000)
return nboard;
/* probe for CD1400... */
cy_isa_address = ioremap_nocache(isa_address, CyISA_Ywin);
if (cy_isa_address == NULL) {
printk(KERN_ERR "Cyclom-Y/ISA: can't remap base "
"address\n");
continue;
}
cy_isa_nchan = CyPORTS_PER_CHIP *
cyy_init_card(cy_isa_address, 0);
if (cy_isa_nchan == 0) {
iounmap(cy_isa_address);
continue;
}
if (isparam && i < NR_CARDS && irq[i])
cy_isa_irq = irq[i];
else
/* find out the board's irq by probing */
cy_isa_irq = detect_isa_irq(cy_isa_address);
if (cy_isa_irq == 0) {
printk(KERN_ERR "Cyclom-Y/ISA found at 0x%lx, but the "
"IRQ could not be detected.\n",
(unsigned long)cy_isa_address);
iounmap(cy_isa_address);
continue;
}
if ((cy_next_channel + cy_isa_nchan) > NR_PORTS) {
printk(KERN_ERR "Cyclom-Y/ISA found at 0x%lx, but no "
"more channels are available. Change NR_PORTS "
"in cyclades.c and recompile kernel.\n",
(unsigned long)cy_isa_address);
iounmap(cy_isa_address);
return nboard;
}
/* fill the next cy_card structure available */
for (j = 0; j < NR_CARDS; j++) {
if (cy_card[j].base_addr == NULL)
break;
}
if (j == NR_CARDS) { /* no more cy_cards available */
printk(KERN_ERR "Cyclom-Y/ISA found at 0x%lx, but no "
"more cards can be used. Change NR_CARDS in "
"cyclades.c and recompile kernel.\n",
(unsigned long)cy_isa_address);
iounmap(cy_isa_address);
return nboard;
}
/* allocate IRQ */
if (request_irq(cy_isa_irq, cyy_interrupt,
IRQF_DISABLED, "Cyclom-Y", &cy_card[j])) {
printk(KERN_ERR "Cyclom-Y/ISA found at 0x%lx, but "
"could not allocate IRQ#%d.\n",
(unsigned long)cy_isa_address, cy_isa_irq);
iounmap(cy_isa_address);
return nboard;
}
/* set cy_card */
cy_card[j].base_addr = cy_isa_address;
cy_card[j].ctl_addr.p9050 = NULL;
cy_card[j].irq = (int)cy_isa_irq;
cy_card[j].bus_index = 0;
cy_card[j].first_line = cy_next_channel;
cy_card[j].num_chips = cy_isa_nchan / CyPORTS_PER_CHIP;
cy_card[j].nports = cy_isa_nchan;
if (cy_init_card(&cy_card[j])) {
cy_card[j].base_addr = NULL;
free_irq(cy_isa_irq, &cy_card[j]);
iounmap(cy_isa_address);
continue;
}
nboard++;
printk(KERN_INFO "Cyclom-Y/ISA #%d: 0x%lx-0x%lx, IRQ%d found: "
"%d channels starting from port %d\n",
j + 1, (unsigned long)cy_isa_address,
(unsigned long)(cy_isa_address + (CyISA_Ywin - 1)),
cy_isa_irq, cy_isa_nchan, cy_next_channel);
for (j = cy_next_channel;
j < cy_next_channel + cy_isa_nchan; j++)
tty_register_device(cy_serial_driver, j, NULL);
cy_next_channel += cy_isa_nchan;
}
return nboard;
#else
return 0;
#endif /* CONFIG_ISA */
} /* cy_detect_isa */
#ifdef CONFIG_PCI
static inline int __devinit cyc_isfwstr(const char *str, unsigned int size)
{
unsigned int a;
for (a = 0; a < size && *str; a++, str++)
if (*str & 0x80)
return -EINVAL;
for (; a < size; a++, str++)
if (*str)
return -EINVAL;
return 0;
}
static inline void __devinit cyz_fpga_copy(void __iomem *fpga, const u8 *data,
unsigned int size)
{
for (; size > 0; size--) {
cy_writel(fpga, *data++);
udelay(10);
}
}
static void __devinit plx_init(struct pci_dev *pdev, int irq,
struct RUNTIME_9060 __iomem *addr)
{
/* Reset PLX */
cy_writel(&addr->init_ctrl, readl(&addr->init_ctrl) | 0x40000000);
udelay(100L);
cy_writel(&addr->init_ctrl, readl(&addr->init_ctrl) & ~0x40000000);
/* Reload Config. Registers from EEPROM */
cy_writel(&addr->init_ctrl, readl(&addr->init_ctrl) | 0x20000000);
udelay(100L);
cy_writel(&addr->init_ctrl, readl(&addr->init_ctrl) & ~0x20000000);
/* For some yet unknown reason, once the PLX9060 reloads the EEPROM,
* the IRQ is lost and, thus, we have to re-write it to the PCI config.
* registers. This will remain here until we find a permanent fix.
*/
pci_write_config_byte(pdev, PCI_INTERRUPT_LINE, irq);
}
static int __devinit __cyz_load_fw(const struct firmware *fw,
const char *name, const u32 mailbox, void __iomem *base,
void __iomem *fpga)
{
const void *ptr = fw->data;
const struct zfile_header *h = ptr;
const struct zfile_config *c, *cs;
const struct zfile_block *b, *bs;
unsigned int a, tmp, len = fw->size;
#define BAD_FW KERN_ERR "Bad firmware: "
if (len < sizeof(*h)) {
printk(BAD_FW "too short: %u<%zu\n", len, sizeof(*h));
return -EINVAL;
}
cs = ptr + h->config_offset;
bs = ptr + h->block_offset;
if ((void *)(cs + h->n_config) > ptr + len ||
(void *)(bs + h->n_blocks) > ptr + len) {
printk(BAD_FW "too short");
return -EINVAL;
}
if (cyc_isfwstr(h->name, sizeof(h->name)) ||
cyc_isfwstr(h->date, sizeof(h->date))) {
printk(BAD_FW "bad formatted header string\n");
return -EINVAL;
}
if (strncmp(name, h->name, sizeof(h->name))) {
printk(BAD_FW "bad name '%s' (expected '%s')\n", h->name, name);
return -EINVAL;
}
tmp = 0;
for (c = cs; c < cs + h->n_config; c++) {
for (a = 0; a < c->n_blocks; a++)
if (c->block_list[a] > h->n_blocks) {
printk(BAD_FW "bad block ref number in cfgs\n");
return -EINVAL;
}
if (c->mailbox == mailbox && c->function == 0) /* 0 is normal */
tmp++;
}
if (!tmp) {
printk(BAD_FW "nothing appropriate\n");
return -EINVAL;
}
for (b = bs; b < bs + h->n_blocks; b++)
if (b->file_offset + b->size > len) {
printk(BAD_FW "bad block data offset\n");
return -EINVAL;
}
/* everything is OK, let's seek'n'load it */
for (c = cs; c < cs + h->n_config; c++)
if (c->mailbox == mailbox && c->function == 0)
break;
for (a = 0; a < c->n_blocks; a++) {
b = &bs[c->block_list[a]];
if (b->type == ZBLOCK_FPGA) {
if (fpga != NULL)
cyz_fpga_copy(fpga, ptr + b->file_offset,
b->size);
} else {
if (base != NULL)
memcpy_toio(base + b->ram_offset,
ptr + b->file_offset, b->size);
}
}
#undef BAD_FW
return 0;
}
static int __devinit cyz_load_fw(struct pci_dev *pdev, void __iomem *base_addr,
struct RUNTIME_9060 __iomem *ctl_addr, int irq)
{
const struct firmware *fw;
struct FIRM_ID __iomem *fid = base_addr + ID_ADDRESS;
struct CUSTOM_REG __iomem *cust = base_addr;
struct ZFW_CTRL __iomem *pt_zfwctrl;
void __iomem *tmp;
u32 mailbox, status, nchan;
unsigned int i;
int retval;
retval = request_firmware(&fw, "cyzfirm.bin", &pdev->dev);
if (retval) {
dev_err(&pdev->dev, "can't get firmware\n");
goto err;
}
/* Check whether the firmware is already loaded and running. If
positive, skip this board */
if (__cyz_fpga_loaded(ctl_addr) && readl(&fid->signature) == ZFIRM_ID) {
u32 cntval = readl(base_addr + 0x190);
udelay(100);
if (cntval != readl(base_addr + 0x190)) {
/* FW counter is working, FW is running */
dev_dbg(&pdev->dev, "Cyclades-Z FW already loaded. "
"Skipping board.\n");
retval = 0;
goto err_rel;
}
}
/* start boot */
cy_writel(&ctl_addr->intr_ctrl_stat, readl(&ctl_addr->intr_ctrl_stat) &
~0x00030800UL);
mailbox = readl(&ctl_addr->mail_box_0);
if (mailbox == 0 || __cyz_fpga_loaded(ctl_addr)) {
/* stops CPU and set window to beginning of RAM */
cy_writel(&ctl_addr->loc_addr_base, WIN_CREG);
cy_writel(&cust->cpu_stop, 0);
cy_writel(&ctl_addr->loc_addr_base, WIN_RAM);
udelay(100);
}
plx_init(pdev, irq, ctl_addr);
if (mailbox != 0) {
/* load FPGA */
retval = __cyz_load_fw(fw, "Cyclom-Z", mailbox, NULL,
base_addr);
if (retval)
goto err_rel;
if (!__cyz_fpga_loaded(ctl_addr)) {
dev_err(&pdev->dev, "fw upload successful, but fw is "
"not loaded\n");
goto err_rel;
}
}
/* stops CPU and set window to beginning of RAM */
cy_writel(&ctl_addr->loc_addr_base, WIN_CREG);
cy_writel(&cust->cpu_stop, 0);
cy_writel(&ctl_addr->loc_addr_base, WIN_RAM);
udelay(100);
/* clear memory */
for (tmp = base_addr; tmp < base_addr + RAM_SIZE; tmp++)
cy_writeb(tmp, 255);
if (mailbox != 0) {
/* set window to last 512K of RAM */
cy_writel(&ctl_addr->loc_addr_base, WIN_RAM + RAM_SIZE);
for (tmp = base_addr; tmp < base_addr + RAM_SIZE; tmp++)
cy_writeb(tmp, 255);
/* set window to beginning of RAM */
cy_writel(&ctl_addr->loc_addr_base, WIN_RAM);
}
retval = __cyz_load_fw(fw, "Cyclom-Z", mailbox, base_addr, NULL);
release_firmware(fw);
if (retval)
goto err;
/* finish boot and start boards */
cy_writel(&ctl_addr->loc_addr_base, WIN_CREG);
cy_writel(&cust->cpu_start, 0);
cy_writel(&ctl_addr->loc_addr_base, WIN_RAM);
i = 0;
while ((status = readl(&fid->signature)) != ZFIRM_ID && i++ < 40)
msleep(100);
if (status != ZFIRM_ID) {
if (status == ZFIRM_HLT) {
dev_err(&pdev->dev, "you need an external power supply "
"for this number of ports. Firmware halted and "
"board reset.\n");
retval = -EIO;
goto err;
}
dev_warn(&pdev->dev, "fid->signature = 0x%x... Waiting "
"some more time\n", status);
while ((status = readl(&fid->signature)) != ZFIRM_ID &&
i++ < 200)
msleep(100);
if (status != ZFIRM_ID) {
dev_err(&pdev->dev, "Board not started in 20 seconds! "
"Giving up. (fid->signature = 0x%x)\n",
status);
dev_info(&pdev->dev, "*** Warning ***: if you are "
"upgrading the FW, please power cycle the "
"system before loading the new FW to the "
"Cyclades-Z.\n");
if (__cyz_fpga_loaded(ctl_addr))
plx_init(pdev, irq, ctl_addr);
retval = -EIO;
goto err;
}
dev_dbg(&pdev->dev, "Firmware started after %d seconds.\n",
i / 10);
}
pt_zfwctrl = base_addr + readl(&fid->zfwctrl_addr);
dev_dbg(&pdev->dev, "fid=> %p, zfwctrl_addr=> %x, npt_zfwctrl=> %p\n",
base_addr + ID_ADDRESS, readl(&fid->zfwctrl_addr),
base_addr + readl(&fid->zfwctrl_addr));
nchan = readl(&pt_zfwctrl->board_ctrl.n_channel);
dev_info(&pdev->dev, "Cyclades-Z FW loaded: version = %x, ports = %u\n",
readl(&pt_zfwctrl->board_ctrl.fw_version), nchan);
if (nchan == 0) {
dev_warn(&pdev->dev, "no Cyclades-Z ports were found. Please "
"check the connection between the Z host card and the "
"serial expanders.\n");
if (__cyz_fpga_loaded(ctl_addr))
plx_init(pdev, irq, ctl_addr);
dev_info(&pdev->dev, "Null number of ports detected. Board "
"reset.\n");
retval = 0;
goto err;
}
cy_writel(&pt_zfwctrl->board_ctrl.op_system, C_OS_LINUX);
cy_writel(&pt_zfwctrl->board_ctrl.dr_version, DRIVER_VERSION);
/*
Early firmware failed to start looking for commands.
This enables firmware interrupts for those commands.
*/
cy_writel(&ctl_addr->intr_ctrl_stat, readl(&ctl_addr->intr_ctrl_stat) |
(1 << 17));
cy_writel(&ctl_addr->intr_ctrl_stat, readl(&ctl_addr->intr_ctrl_stat) |
0x00030800UL);
return nchan;
err_rel:
release_firmware(fw);
err:
return retval;
}
static int __devinit cy_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
void __iomem *addr0 = NULL, *addr2 = NULL;
char *card_name = NULL;
u32 uninitialized_var(mailbox);
unsigned int device_id, nchan = 0, card_no, i;
unsigned char plx_ver;
int retval, irq;
retval = pci_enable_device(pdev);
if (retval) {
dev_err(&pdev->dev, "cannot enable device\n");
goto err;
}
/* read PCI configuration area */
irq = pdev->irq;
device_id = pdev->device & ~PCI_DEVICE_ID_MASK;
#if defined(__alpha__)
if (device_id == PCI_DEVICE_ID_CYCLOM_Y_Lo) { /* below 1M? */
dev_err(&pdev->dev, "Cyclom-Y/PCI not supported for low "
"addresses on Alpha systems.\n");
retval = -EIO;
goto err_dis;
}
#endif
if (device_id == PCI_DEVICE_ID_CYCLOM_Z_Lo) {
dev_err(&pdev->dev, "Cyclades-Z/PCI not supported for low "
"addresses\n");
retval = -EIO;
goto err_dis;
}
if (pci_resource_flags(pdev, 2) & IORESOURCE_IO) {
dev_warn(&pdev->dev, "PCI I/O bit incorrectly set. Ignoring "
"it...\n");
pdev->resource[2].flags &= ~IORESOURCE_IO;
}
retval = pci_request_regions(pdev, "cyclades");
if (retval) {
dev_err(&pdev->dev, "failed to reserve resources\n");
goto err_dis;
}
retval = -EIO;
if (device_id == PCI_DEVICE_ID_CYCLOM_Y_Lo ||
device_id == PCI_DEVICE_ID_CYCLOM_Y_Hi) {
card_name = "Cyclom-Y";
addr0 = ioremap_nocache(pci_resource_start(pdev, 0),
CyPCI_Yctl);
if (addr0 == NULL) {
dev_err(&pdev->dev, "can't remap ctl region\n");
goto err_reg;
}
addr2 = ioremap_nocache(pci_resource_start(pdev, 2),
CyPCI_Ywin);
if (addr2 == NULL) {
dev_err(&pdev->dev, "can't remap base region\n");
goto err_unmap;
}
nchan = CyPORTS_PER_CHIP * cyy_init_card(addr2, 1);
if (nchan == 0) {
dev_err(&pdev->dev, "Cyclom-Y PCI host card with no "
"Serial-Modules\n");
goto err_unmap;
}
} else if (device_id == PCI_DEVICE_ID_CYCLOM_Z_Hi) {
struct RUNTIME_9060 __iomem *ctl_addr;
ctl_addr = addr0 = ioremap_nocache(pci_resource_start(pdev, 0),
CyPCI_Zctl);
if (addr0 == NULL) {
dev_err(&pdev->dev, "can't remap ctl region\n");
goto err_reg;
}
/* Disable interrupts on the PLX before resetting it */
cy_writew(&ctl_addr->intr_ctrl_stat,
readw(&ctl_addr->intr_ctrl_stat) & ~0x0900);
plx_init(pdev, irq, addr0);
mailbox = readl(&ctl_addr->mail_box_0);
addr2 = ioremap_nocache(pci_resource_start(pdev, 2),
mailbox == ZE_V1 ? CyPCI_Ze_win : CyPCI_Zwin);
if (addr2 == NULL) {
dev_err(&pdev->dev, "can't remap base region\n");
goto err_unmap;
}
if (mailbox == ZE_V1) {
card_name = "Cyclades-Ze";
} else {
card_name = "Cyclades-8Zo";
#ifdef CY_PCI_DEBUG
if (mailbox == ZO_V1) {
cy_writel(&ctl_addr->loc_addr_base, WIN_CREG);
dev_info(&pdev->dev, "Cyclades-8Zo/PCI: FPGA "
"id %lx, ver %lx\n", (ulong)(0xff &
readl(&((struct CUSTOM_REG *)addr2)->
fpga_id)), (ulong)(0xff &
readl(&((struct CUSTOM_REG *)addr2)->
fpga_version)));
cy_writel(&ctl_addr->loc_addr_base, WIN_RAM);
} else {
dev_info(&pdev->dev, "Cyclades-Z/PCI: New "
"Cyclades-Z board. FPGA not loaded\n");
}
#endif
/* The following clears the firmware id word. This
ensures that the driver will not attempt to talk to
the board until it has been properly initialized.
*/
if ((mailbox == ZO_V1) || (mailbox == ZO_V2))
cy_writel(addr2 + ID_ADDRESS, 0L);
}
retval = cyz_load_fw(pdev, addr2, addr0, irq);
if (retval <= 0)
goto err_unmap;
nchan = retval;
}
if ((cy_next_channel + nchan) > NR_PORTS) {
dev_err(&pdev->dev, "Cyclades-8Zo/PCI found, but no "
"channels are available. Change NR_PORTS in "
"cyclades.c and recompile kernel.\n");
goto err_unmap;
}
/* fill the next cy_card structure available */
for (card_no = 0; card_no < NR_CARDS; card_no++) {
if (cy_card[card_no].base_addr == NULL)
break;
}
if (card_no == NR_CARDS) { /* no more cy_cards available */
dev_err(&pdev->dev, "Cyclades-8Zo/PCI found, but no "
"more cards can be used. Change NR_CARDS in "
"cyclades.c and recompile kernel.\n");
goto err_unmap;
}
if (device_id == PCI_DEVICE_ID_CYCLOM_Y_Lo ||
device_id == PCI_DEVICE_ID_CYCLOM_Y_Hi) {
/* allocate IRQ */
retval = request_irq(irq, cyy_interrupt,
IRQF_SHARED, "Cyclom-Y", &cy_card[card_no]);
if (retval) {
dev_err(&pdev->dev, "could not allocate IRQ\n");
goto err_unmap;
}
cy_card[card_no].num_chips = nchan / CyPORTS_PER_CHIP;
} else {
struct FIRM_ID __iomem *firm_id = addr2 + ID_ADDRESS;
struct ZFW_CTRL __iomem *zfw_ctrl;
zfw_ctrl = addr2 + (readl(&firm_id->zfwctrl_addr) & 0xfffff);
cy_card[card_no].hw_ver = mailbox;
cy_card[card_no].num_chips = (unsigned int)-1;
cy_card[card_no].board_ctrl = &zfw_ctrl->board_ctrl;
#ifdef CONFIG_CYZ_INTR
/* allocate IRQ only if board has an IRQ */
if (irq != 0 && irq != 255) {
retval = request_irq(irq, cyz_interrupt,
IRQF_SHARED, "Cyclades-Z",
&cy_card[card_no]);
if (retval) {
dev_err(&pdev->dev, "could not allocate IRQ\n");
goto err_unmap;
}
}
#endif /* CONFIG_CYZ_INTR */
}
/* set cy_card */
cy_card[card_no].base_addr = addr2;
cy_card[card_no].ctl_addr.p9050 = addr0;
cy_card[card_no].irq = irq;
cy_card[card_no].bus_index = 1;
cy_card[card_no].first_line = cy_next_channel;
cy_card[card_no].nports = nchan;
retval = cy_init_card(&cy_card[card_no]);
if (retval)
goto err_null;
pci_set_drvdata(pdev, &cy_card[card_no]);
if (device_id == PCI_DEVICE_ID_CYCLOM_Y_Lo ||
device_id == PCI_DEVICE_ID_CYCLOM_Y_Hi) {
/* enable interrupts in the PCI interface */
plx_ver = readb(addr2 + CyPLX_VER) & 0x0f;
switch (plx_ver) {
case PLX_9050:
cy_writeb(addr0 + 0x4c, 0x43);
break;
case PLX_9060:
case PLX_9080:
default: /* Old boards, use PLX_9060 */
{
struct RUNTIME_9060 __iomem *ctl_addr = addr0;
plx_init(pdev, irq, ctl_addr);
cy_writew(&ctl_addr->intr_ctrl_stat,
readw(&ctl_addr->intr_ctrl_stat) | 0x0900);
break;
}
}
}
dev_info(&pdev->dev, "%s/PCI #%d found: %d channels starting from "
"port %d.\n", card_name, card_no + 1, nchan, cy_next_channel);
for (i = cy_next_channel; i < cy_next_channel + nchan; i++)
tty_register_device(cy_serial_driver, i, &pdev->dev);
cy_next_channel += nchan;
return 0;
err_null:
cy_card[card_no].base_addr = NULL;
free_irq(irq, &cy_card[card_no]);
err_unmap:
iounmap(addr0);
if (addr2)
iounmap(addr2);
err_reg:
pci_release_regions(pdev);
err_dis:
pci_disable_device(pdev);
err:
return retval;
}
static void __devexit cy_pci_remove(struct pci_dev *pdev)
{
struct cyclades_card *cinfo = pci_get_drvdata(pdev);
unsigned int i;
/* non-Z with old PLX */
if (!cy_is_Z(cinfo) && (readb(cinfo->base_addr + CyPLX_VER) & 0x0f) ==
PLX_9050)
cy_writeb(cinfo->ctl_addr.p9050 + 0x4c, 0);
else
#ifndef CONFIG_CYZ_INTR
if (!cy_is_Z(cinfo))
#endif
cy_writew(&cinfo->ctl_addr.p9060->intr_ctrl_stat,
readw(&cinfo->ctl_addr.p9060->intr_ctrl_stat) &
~0x0900);
iounmap(cinfo->base_addr);
if (cinfo->ctl_addr.p9050)
iounmap(cinfo->ctl_addr.p9050);
if (cinfo->irq
#ifndef CONFIG_CYZ_INTR
&& !cy_is_Z(cinfo)
#endif /* CONFIG_CYZ_INTR */
)
free_irq(cinfo->irq, cinfo);
pci_release_regions(pdev);
cinfo->base_addr = NULL;
for (i = cinfo->first_line; i < cinfo->first_line +
cinfo->nports; i++)
tty_unregister_device(cy_serial_driver, i);
cinfo->nports = 0;
kfree(cinfo->ports);
}
static struct pci_driver cy_pci_driver = {
.name = "cyclades",
.id_table = cy_pci_dev_id,
.probe = cy_pci_probe,
.remove = __devexit_p(cy_pci_remove)
};
#endif
static int cyclades_proc_show(struct seq_file *m, void *v)
{
struct cyclades_port *info;
unsigned int i, j;
__u32 cur_jifs = jiffies;
seq_puts(m, "Dev TimeOpen BytesOut IdleOut BytesIn "
"IdleIn Overruns Ldisc\n");
/* Output one line for each known port */
for (i = 0; i < NR_CARDS; i++)
for (j = 0; j < cy_card[i].nports; j++) {
info = &cy_card[i].ports[j];
if (info->port.count) {
/* XXX is the ldisc num worth this? */
struct tty_struct *tty;
struct tty_ldisc *ld;
int num = 0;
tty = tty_port_tty_get(&info->port);
if (tty) {
ld = tty_ldisc_ref(tty);
if (ld) {
num = ld->ops->num;
tty_ldisc_deref(ld);
}
tty_kref_put(tty);
}
seq_printf(m, "%3d %8lu %10lu %8lu "
"%10lu %8lu %9lu %6d\n", info->line,
(cur_jifs - info->idle_stats.in_use) /
HZ, info->idle_stats.xmit_bytes,
(cur_jifs - info->idle_stats.xmit_idle)/
HZ, info->idle_stats.recv_bytes,
(cur_jifs - info->idle_stats.recv_idle)/
HZ, info->idle_stats.overruns,
num);
} else
seq_printf(m, "%3d %8lu %10lu %8lu "
"%10lu %8lu %9lu %6ld\n",
info->line, 0L, 0L, 0L, 0L, 0L, 0L, 0L);
}
return 0;
}
static int cyclades_proc_open(struct inode *inode, struct file *file)
{
return single_open(file, cyclades_proc_show, NULL);
}
static const struct file_operations cyclades_proc_fops = {
.owner = THIS_MODULE,
.open = cyclades_proc_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
/* The serial driver boot-time initialization code!
Hardware I/O ports are mapped to character special devices on a
first found, first allocated manner. That is, this code searches
for Cyclom cards in the system. As each is found, it is probed
to discover how many chips (and thus how many ports) are present.
These ports are mapped to the tty ports 32 and upward in monotonic
fashion. If an 8-port card is replaced with a 16-port card, the
port mapping on a following card will shift.
This approach is different from what is used in the other serial
device driver because the Cyclom is more properly a multiplexer,
not just an aggregation of serial ports on one card.
If there are more cards with more ports than have been
statically allocated above, a warning is printed and the
extra ports are ignored.
*/
static const struct tty_operations cy_ops = {
.open = cy_open,
.close = cy_close,
.write = cy_write,
.put_char = cy_put_char,
.flush_chars = cy_flush_chars,
.write_room = cy_write_room,
.chars_in_buffer = cy_chars_in_buffer,
.flush_buffer = cy_flush_buffer,
.ioctl = cy_ioctl,
.throttle = cy_throttle,
.unthrottle = cy_unthrottle,
.set_termios = cy_set_termios,
.stop = cy_stop,
.start = cy_start,
.hangup = cy_hangup,
.break_ctl = cy_break,
.wait_until_sent = cy_wait_until_sent,
.tiocmget = cy_tiocmget,
.tiocmset = cy_tiocmset,
.get_icount = cy_get_icount,
.proc_fops = &cyclades_proc_fops,
};
static int __init cy_init(void)
{
unsigned int nboards;
int retval = -ENOMEM;
cy_serial_driver = alloc_tty_driver(NR_PORTS);
if (!cy_serial_driver)
goto err;
printk(KERN_INFO "Cyclades driver " CY_VERSION "\n");
/* Initialize the tty_driver structure */
cy_serial_driver->owner = THIS_MODULE;
cy_serial_driver->driver_name = "cyclades";
cy_serial_driver->name = "ttyC";
cy_serial_driver->major = CYCLADES_MAJOR;
cy_serial_driver->minor_start = 0;
cy_serial_driver->type = TTY_DRIVER_TYPE_SERIAL;
cy_serial_driver->subtype = SERIAL_TYPE_NORMAL;
cy_serial_driver->init_termios = tty_std_termios;
cy_serial_driver->init_termios.c_cflag =
B9600 | CS8 | CREAD | HUPCL | CLOCAL;
cy_serial_driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV;
tty_set_operations(cy_serial_driver, &cy_ops);
retval = tty_register_driver(cy_serial_driver);
if (retval) {
printk(KERN_ERR "Couldn't register Cyclades serial driver\n");
goto err_frtty;
}
/* the code below is responsible to find the boards. Each different
type of board has its own detection routine. If a board is found,
the next cy_card structure available is set by the detection
routine. These functions are responsible for checking the
availability of cy_card and cy_port data structures and updating
the cy_next_channel. */
/* look for isa boards */
nboards = cy_detect_isa();
#ifdef CONFIG_PCI
/* look for pci boards */
retval = pci_register_driver(&cy_pci_driver);
if (retval && !nboards) {
tty_unregister_driver(cy_serial_driver);
goto err_frtty;
}
#endif
return 0;
err_frtty:
put_tty_driver(cy_serial_driver);
err:
return retval;
} /* cy_init */
static void __exit cy_cleanup_module(void)
{
struct cyclades_card *card;
unsigned int i, e1;
#ifndef CONFIG_CYZ_INTR
del_timer_sync(&cyz_timerlist);
#endif /* CONFIG_CYZ_INTR */
e1 = tty_unregister_driver(cy_serial_driver);
if (e1)
printk(KERN_ERR "failed to unregister Cyclades serial "
"driver(%d)\n", e1);
#ifdef CONFIG_PCI
pci_unregister_driver(&cy_pci_driver);
#endif
for (i = 0; i < NR_CARDS; i++) {
card = &cy_card[i];
if (card->base_addr) {
/* clear interrupt */
cy_writeb(card->base_addr + Cy_ClrIntr, 0);
iounmap(card->base_addr);
if (card->ctl_addr.p9050)
iounmap(card->ctl_addr.p9050);
if (card->irq
#ifndef CONFIG_CYZ_INTR
&& !cy_is_Z(card)
#endif /* CONFIG_CYZ_INTR */
)
free_irq(card->irq, card);
for (e1 = card->first_line; e1 < card->first_line +
card->nports; e1++)
tty_unregister_device(cy_serial_driver, e1);
kfree(card->ports);
}
}
put_tty_driver(cy_serial_driver);
} /* cy_cleanup_module */
module_init(cy_init);
module_exit(cy_cleanup_module);
MODULE_LICENSE("GPL");
MODULE_VERSION(CY_VERSION);
MODULE_ALIAS_CHARDEV_MAJOR(CYCLADES_MAJOR);
MODULE_FIRMWARE("cyzfirm.bin");
| gpl-2.0 |
superatmos/i9100g_kernel_ics | sound/soc/atmel/playpaq_wm8510.c | 3028 | 10774 | /* sound/soc/at32/playpaq_wm8510.c
* ASoC machine driver for PlayPaq using WM8510 codec
*
* Copyright (C) 2008 Long Range Systems
* Geoffrey Wossum <gwossum@acm.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This code is largely inspired by sound/soc/at91/eti_b1_wm8731.c
*
* NOTE: If you don't have the AT32 enhanced portmux configured (which
* isn't currently in the mainline or Atmel patched kernel), you will
* need to set the MCLK pin (PA30) to peripheral A in your board initialization
* code. Something like:
* at32_select_periph(GPIO_PIN_PA(30), GPIO_PERIPH_A, 0);
*
*/
/* #define DEBUG */
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/clk.h>
#include <linux/timer.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/soc.h>
#include <mach/at32ap700x.h>
#include <mach/portmux.h>
#include "../codecs/wm8510.h"
#include "atmel-pcm.h"
#include "atmel_ssc_dai.h"
/*-------------------------------------------------------------------------*\
* constants
\*-------------------------------------------------------------------------*/
#define MCLK_PIN GPIO_PIN_PA(30)
#define MCLK_PERIPH GPIO_PERIPH_A
/*-------------------------------------------------------------------------*\
* data types
\*-------------------------------------------------------------------------*/
/* SSC clocking data */
struct ssc_clock_data {
/* CMR div */
unsigned int cmr_div;
/* Frame period (as needed by xCMR.PERIOD) */
unsigned int period;
/* The SSC clock rate these settings where calculated for */
unsigned long ssc_rate;
};
/*-------------------------------------------------------------------------*\
* module data
\*-------------------------------------------------------------------------*/
static struct clk *_gclk0;
static struct clk *_pll0;
#define CODEC_CLK (_gclk0)
/*-------------------------------------------------------------------------*\
* Sound SOC operations
\*-------------------------------------------------------------------------*/
#if defined CONFIG_SND_AT32_SOC_PLAYPAQ_SLAVE
static struct ssc_clock_data playpaq_wm8510_calc_ssc_clock(
struct snd_pcm_hw_params *params,
struct snd_soc_dai *cpu_dai)
{
struct at32_ssc_info *ssc_p = snd_soc_dai_get_drvdata(cpu_dai);
struct ssc_device *ssc = ssc_p->ssc;
struct ssc_clock_data cd;
unsigned int rate, width_bits, channels;
unsigned int bitrate, ssc_div;
unsigned actual_rate;
/*
* Figure out required bitrate
*/
rate = params_rate(params);
channels = params_channels(params);
width_bits = snd_pcm_format_physical_width(params_format(params));
bitrate = rate * width_bits * channels;
/*
* Figure out required SSC divider and period for required bitrate
*/
cd.ssc_rate = clk_get_rate(ssc->clk);
ssc_div = cd.ssc_rate / bitrate;
cd.cmr_div = ssc_div / 2;
if (ssc_div & 1) {
/* round cmr_div up */
cd.cmr_div++;
}
cd.period = width_bits - 1;
/*
* Find actual rate, compare to requested rate
*/
actual_rate = (cd.ssc_rate / (cd.cmr_div * 2)) / (2 * (cd.period + 1));
pr_debug("playpaq_wm8510: Request rate = %u, actual rate = %u\n",
rate, actual_rate);
return cd;
}
#endif /* CONFIG_SND_AT32_SOC_PLAYPAQ_SLAVE */
static int playpaq_wm8510_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_dai *codec_dai = rtd->codec_dai;
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
struct at32_ssc_info *ssc_p = snd_soc_dai_get_drvdata(cpu_dai);
struct ssc_device *ssc = ssc_p->ssc;
unsigned int pll_out = 0, bclk = 0, mclk_div = 0;
int ret;
/* Due to difficulties with getting the correct clocks from the AT32's
* PLL0, we're going to let the CODEC be in charge of all the clocks
*/
#if !defined CONFIG_SND_AT32_SOC_PLAYPAQ_SLAVE
const unsigned int fmt = (SND_SOC_DAIFMT_I2S |
SND_SOC_DAIFMT_NB_NF |
SND_SOC_DAIFMT_CBM_CFM);
#else
struct ssc_clock_data cd;
const unsigned int fmt = (SND_SOC_DAIFMT_I2S |
SND_SOC_DAIFMT_NB_NF |
SND_SOC_DAIFMT_CBS_CFS);
#endif
if (ssc == NULL) {
pr_warning("playpaq_wm8510_hw_params: ssc is NULL!\n");
return -EINVAL;
}
/*
* Figure out PLL and BCLK dividers for WM8510
*/
switch (params_rate(params)) {
case 48000:
pll_out = 24576000;
mclk_div = WM8510_MCLKDIV_2;
bclk = WM8510_BCLKDIV_8;
break;
case 44100:
pll_out = 22579200;
mclk_div = WM8510_MCLKDIV_2;
bclk = WM8510_BCLKDIV_8;
break;
case 22050:
pll_out = 22579200;
mclk_div = WM8510_MCLKDIV_4;
bclk = WM8510_BCLKDIV_8;
break;
case 16000:
pll_out = 24576000;
mclk_div = WM8510_MCLKDIV_6;
bclk = WM8510_BCLKDIV_8;
break;
case 11025:
pll_out = 22579200;
mclk_div = WM8510_MCLKDIV_8;
bclk = WM8510_BCLKDIV_8;
break;
case 8000:
pll_out = 24576000;
mclk_div = WM8510_MCLKDIV_12;
bclk = WM8510_BCLKDIV_8;
break;
default:
pr_warning("playpaq_wm8510: Unsupported sample rate %d\n",
params_rate(params));
return -EINVAL;
}
/*
* set CPU and CODEC DAI configuration
*/
ret = snd_soc_dai_set_fmt(codec_dai, fmt);
if (ret < 0) {
pr_warning("playpaq_wm8510: "
"Failed to set CODEC DAI format (%d)\n",
ret);
return ret;
}
ret = snd_soc_dai_set_fmt(cpu_dai, fmt);
if (ret < 0) {
pr_warning("playpaq_wm8510: "
"Failed to set CPU DAI format (%d)\n",
ret);
return ret;
}
/*
* Set CPU clock configuration
*/
#if defined CONFIG_SND_AT32_SOC_PLAYPAQ_SLAVE
cd = playpaq_wm8510_calc_ssc_clock(params, cpu_dai);
pr_debug("playpaq_wm8510: cmr_div = %d, period = %d\n",
cd.cmr_div, cd.period);
ret = snd_soc_dai_set_clkdiv(cpu_dai, AT32_SSC_CMR_DIV, cd.cmr_div);
if (ret < 0) {
pr_warning("playpaq_wm8510: Failed to set CPU CMR_DIV (%d)\n",
ret);
return ret;
}
ret = snd_soc_dai_set_clkdiv(cpu_dai, AT32_SSC_TCMR_PERIOD,
cd.period);
if (ret < 0) {
pr_warning("playpaq_wm8510: "
"Failed to set CPU transmit period (%d)\n",
ret);
return ret;
}
#endif /* CONFIG_SND_AT32_SOC_PLAYPAQ_SLAVE */
/*
* Set CODEC clock configuration
*/
pr_debug("playpaq_wm8510: "
"pll_in = %ld, pll_out = %u, bclk = %x, mclk = %x\n",
clk_get_rate(CODEC_CLK), pll_out, bclk, mclk_div);
#if !defined CONFIG_SND_AT32_SOC_PLAYPAQ_SLAVE
ret = snd_soc_dai_set_clkdiv(codec_dai, WM8510_BCLKDIV, bclk);
if (ret < 0) {
pr_warning
("playpaq_wm8510: Failed to set CODEC DAI BCLKDIV (%d)\n",
ret);
return ret;
}
#endif /* CONFIG_SND_AT32_SOC_PLAYPAQ_SLAVE */
ret = snd_soc_dai_set_pll(codec_dai, 0, 0,
clk_get_rate(CODEC_CLK), pll_out);
if (ret < 0) {
pr_warning("playpaq_wm8510: Failed to set CODEC DAI PLL (%d)\n",
ret);
return ret;
}
ret = snd_soc_dai_set_clkdiv(codec_dai, WM8510_MCLKDIV, mclk_div);
if (ret < 0) {
pr_warning("playpaq_wm8510: Failed to set CODEC MCLKDIV (%d)\n",
ret);
return ret;
}
return 0;
}
static struct snd_soc_ops playpaq_wm8510_ops = {
.hw_params = playpaq_wm8510_hw_params,
};
static const struct snd_soc_dapm_widget playpaq_dapm_widgets[] = {
SND_SOC_DAPM_MIC("Int Mic", NULL),
SND_SOC_DAPM_SPK("Ext Spk", NULL),
};
static const struct snd_soc_dapm_route intercon[] = {
/* speaker connected to SPKOUT */
{"Ext Spk", NULL, "SPKOUTP"},
{"Ext Spk", NULL, "SPKOUTN"},
{"Mic Bias", NULL, "Int Mic"},
{"MICN", NULL, "Mic Bias"},
{"MICP", NULL, "Mic Bias"},
};
static int playpaq_wm8510_init(struct snd_soc_pcm_runtime *rtd)
{
struct snd_soc_codec *codec = rtd->codec;
struct snd_soc_dapm_context *dapm = &codec->dapm;
int i;
/*
* Add DAPM widgets
*/
for (i = 0; i < ARRAY_SIZE(playpaq_dapm_widgets); i++)
snd_soc_dapm_new_control(dapm, &playpaq_dapm_widgets[i]);
/*
* Setup audio path interconnects
*/
snd_soc_dapm_add_routes(dapm, intercon, ARRAY_SIZE(intercon));
/* always connected pins */
snd_soc_dapm_enable_pin(dapm, "Int Mic");
snd_soc_dapm_enable_pin(dapm, "Ext Spk");
snd_soc_dapm_sync(dapm);
/* Make CSB show PLL rate */
snd_soc_dai_set_clkdiv(rtd->codec_dai, WM8510_OPCLKDIV,
WM8510_OPCLKDIV_1 | 4);
return 0;
}
static struct snd_soc_dai_link playpaq_wm8510_dai = {
.name = "WM8510",
.stream_name = "WM8510 PCM",
.cpu_dai_name= "atmel-ssc-dai.0",
.platform_name = "atmel-pcm-audio",
.codec_name = "wm8510-codec.0-0x1a",
.codec_dai_name = "wm8510-hifi",
.init = playpaq_wm8510_init,
.ops = &playpaq_wm8510_ops,
};
static struct snd_soc_card snd_soc_playpaq = {
.name = "LRS_PlayPaq_WM8510",
.dai_link = &playpaq_wm8510_dai,
.num_links = 1,
};
static struct platform_device *playpaq_snd_device;
static int __init playpaq_asoc_init(void)
{
int ret = 0;
/*
* Configure MCLK for WM8510
*/
_gclk0 = clk_get(NULL, "gclk0");
if (IS_ERR(_gclk0)) {
_gclk0 = NULL;
goto err_gclk0;
}
_pll0 = clk_get(NULL, "pll0");
if (IS_ERR(_pll0)) {
_pll0 = NULL;
goto err_pll0;
}
if (clk_set_parent(_gclk0, _pll0)) {
pr_warning("snd-soc-playpaq: "
"Failed to set PLL0 as parent for DAC clock\n");
goto err_set_clk;
}
clk_set_rate(CODEC_CLK, 12000000);
clk_enable(CODEC_CLK);
#if defined CONFIG_AT32_ENHANCED_PORTMUX
at32_select_periph(MCLK_PIN, MCLK_PERIPH, 0);
#endif
/*
* Create and register platform device
*/
playpaq_snd_device = platform_device_alloc("soc-audio", 0);
if (playpaq_snd_device == NULL) {
ret = -ENOMEM;
goto err_device_alloc;
}
platform_set_drvdata(playpaq_snd_device, &snd_soc_playpaq);
ret = platform_device_add(playpaq_snd_device);
if (ret) {
pr_warning("playpaq_wm8510: platform_device_add failed (%d)\n",
ret);
goto err_device_add;
}
return 0;
err_device_add:
if (playpaq_snd_device != NULL) {
platform_device_put(playpaq_snd_device);
playpaq_snd_device = NULL;
}
err_device_alloc:
err_set_clk:
if (_pll0 != NULL) {
clk_put(_pll0);
_pll0 = NULL;
}
err_pll0:
if (_gclk0 != NULL) {
clk_put(_gclk0);
_gclk0 = NULL;
}
return ret;
}
static void __exit playpaq_asoc_exit(void)
{
if (_gclk0 != NULL) {
clk_put(_gclk0);
_gclk0 = NULL;
}
if (_pll0 != NULL) {
clk_put(_pll0);
_pll0 = NULL;
}
#if defined CONFIG_AT32_ENHANCED_PORTMUX
at32_free_pin(MCLK_PIN);
#endif
platform_device_unregister(playpaq_snd_device);
playpaq_snd_device = NULL;
}
module_init(playpaq_asoc_init);
module_exit(playpaq_asoc_exit);
MODULE_AUTHOR("Geoffrey Wossum <gwossum@acm.org>");
MODULE_DESCRIPTION("ASoC machine driver for LRS PlayPaq");
MODULE_LICENSE("GPL");
| gpl-2.0 |
mdeejay/android_kernel_dlxub1 | kernel/debug/kdb/kdb_main.c | 4308 | 71380 | /*
* Kernel Debugger Architecture Independent Main Code
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1999-2004 Silicon Graphics, Inc. All Rights Reserved.
* Copyright (C) 2000 Stephane Eranian <eranian@hpl.hp.com>
* Xscale (R) modifications copyright (C) 2003 Intel Corporation.
* Copyright (c) 2009 Wind River Systems, Inc. All Rights Reserved.
*/
#include <linux/ctype.h>
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/reboot.h>
#include <linux/sched.h>
#include <linux/sysrq.h>
#include <linux/smp.h>
#include <linux/utsname.h>
#include <linux/vmalloc.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/kallsyms.h>
#include <linux/kgdb.h>
#include <linux/kdb.h>
#include <linux/notifier.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/nmi.h>
#include <linux/time.h>
#include <linux/ptrace.h>
#include <linux/sysctl.h>
#include <linux/cpu.h>
#include <linux/kdebug.h>
#include <linux/proc_fs.h>
#include <linux/uaccess.h>
#include <linux/slab.h>
#include "kdb_private.h"
#define GREP_LEN 256
char kdb_grep_string[GREP_LEN];
int kdb_grepping_flag;
EXPORT_SYMBOL(kdb_grepping_flag);
int kdb_grep_leading;
int kdb_grep_trailing;
/*
* Kernel debugger state flags
*/
int kdb_flags;
atomic_t kdb_event;
/*
* kdb_lock protects updates to kdb_initial_cpu. Used to
* single thread processors through the kernel debugger.
*/
int kdb_initial_cpu = -1; /* cpu number that owns kdb */
int kdb_nextline = 1;
int kdb_state; /* General KDB state */
struct task_struct *kdb_current_task;
EXPORT_SYMBOL(kdb_current_task);
struct pt_regs *kdb_current_regs;
const char *kdb_diemsg;
static int kdb_go_count;
#ifdef CONFIG_KDB_CONTINUE_CATASTROPHIC
static unsigned int kdb_continue_catastrophic =
CONFIG_KDB_CONTINUE_CATASTROPHIC;
#else
static unsigned int kdb_continue_catastrophic;
#endif
/* kdb_commands describes the available commands. */
static kdbtab_t *kdb_commands;
#define KDB_BASE_CMD_MAX 50
static int kdb_max_commands = KDB_BASE_CMD_MAX;
static kdbtab_t kdb_base_commands[KDB_BASE_CMD_MAX];
#define for_each_kdbcmd(cmd, num) \
for ((cmd) = kdb_base_commands, (num) = 0; \
num < kdb_max_commands; \
num++, num == KDB_BASE_CMD_MAX ? cmd = kdb_commands : cmd++)
typedef struct _kdbmsg {
int km_diag; /* kdb diagnostic */
char *km_msg; /* Corresponding message text */
} kdbmsg_t;
#define KDBMSG(msgnum, text) \
{ KDB_##msgnum, text }
static kdbmsg_t kdbmsgs[] = {
KDBMSG(NOTFOUND, "Command Not Found"),
KDBMSG(ARGCOUNT, "Improper argument count, see usage."),
KDBMSG(BADWIDTH, "Illegal value for BYTESPERWORD use 1, 2, 4 or 8, "
"8 is only allowed on 64 bit systems"),
KDBMSG(BADRADIX, "Illegal value for RADIX use 8, 10 or 16"),
KDBMSG(NOTENV, "Cannot find environment variable"),
KDBMSG(NOENVVALUE, "Environment variable should have value"),
KDBMSG(NOTIMP, "Command not implemented"),
KDBMSG(ENVFULL, "Environment full"),
KDBMSG(ENVBUFFULL, "Environment buffer full"),
KDBMSG(TOOMANYBPT, "Too many breakpoints defined"),
#ifdef CONFIG_CPU_XSCALE
KDBMSG(TOOMANYDBREGS, "More breakpoints than ibcr registers defined"),
#else
KDBMSG(TOOMANYDBREGS, "More breakpoints than db registers defined"),
#endif
KDBMSG(DUPBPT, "Duplicate breakpoint address"),
KDBMSG(BPTNOTFOUND, "Breakpoint not found"),
KDBMSG(BADMODE, "Invalid IDMODE"),
KDBMSG(BADINT, "Illegal numeric value"),
KDBMSG(INVADDRFMT, "Invalid symbolic address format"),
KDBMSG(BADREG, "Invalid register name"),
KDBMSG(BADCPUNUM, "Invalid cpu number"),
KDBMSG(BADLENGTH, "Invalid length field"),
KDBMSG(NOBP, "No Breakpoint exists"),
KDBMSG(BADADDR, "Invalid address"),
};
#undef KDBMSG
static const int __nkdb_err = sizeof(kdbmsgs) / sizeof(kdbmsg_t);
/*
* Initial environment. This is all kept static and local to
* this file. We don't want to rely on the memory allocation
* mechanisms in the kernel, so we use a very limited allocate-only
* heap for new and altered environment variables. The entire
* environment is limited to a fixed number of entries (add more
* to __env[] if required) and a fixed amount of heap (add more to
* KDB_ENVBUFSIZE if required).
*/
static char *__env[] = {
#if defined(CONFIG_SMP)
"PROMPT=[%d]kdb> ",
"MOREPROMPT=[%d]more> ",
#else
"PROMPT=kdb> ",
"MOREPROMPT=more> ",
#endif
"RADIX=16",
"MDCOUNT=8", /* lines of md output */
KDB_PLATFORM_ENV,
"DTABCOUNT=30",
"NOSECT=1",
(char *)0,
(char *)0,
(char *)0,
(char *)0,
(char *)0,
(char *)0,
(char *)0,
(char *)0,
(char *)0,
(char *)0,
(char *)0,
(char *)0,
(char *)0,
(char *)0,
(char *)0,
(char *)0,
(char *)0,
(char *)0,
(char *)0,
(char *)0,
(char *)0,
(char *)0,
(char *)0,
(char *)0,
};
static const int __nenv = (sizeof(__env) / sizeof(char *));
struct task_struct *kdb_curr_task(int cpu)
{
struct task_struct *p = curr_task(cpu);
#ifdef _TIF_MCA_INIT
if ((task_thread_info(p)->flags & _TIF_MCA_INIT) && KDB_TSK(cpu))
p = krp->p;
#endif
return p;
}
/*
* kdbgetenv - This function will return the character string value of
* an environment variable.
* Parameters:
* match A character string representing an environment variable.
* Returns:
* NULL No environment variable matches 'match'
* char* Pointer to string value of environment variable.
*/
char *kdbgetenv(const char *match)
{
char **ep = __env;
int matchlen = strlen(match);
int i;
for (i = 0; i < __nenv; i++) {
char *e = *ep++;
if (!e)
continue;
if ((strncmp(match, e, matchlen) == 0)
&& ((e[matchlen] == '\0')
|| (e[matchlen] == '='))) {
char *cp = strchr(e, '=');
return cp ? ++cp : "";
}
}
return NULL;
}
/*
* kdballocenv - This function is used to allocate bytes for
* environment entries.
* Parameters:
* match A character string representing a numeric value
* Outputs:
* *value the unsigned long representation of the env variable 'match'
* Returns:
* Zero on success, a kdb diagnostic on failure.
* Remarks:
* We use a static environment buffer (envbuffer) to hold the values
* of dynamically generated environment variables (see kdb_set). Buffer
* space once allocated is never free'd, so over time, the amount of space
* (currently 512 bytes) will be exhausted if env variables are changed
* frequently.
*/
static char *kdballocenv(size_t bytes)
{
#define KDB_ENVBUFSIZE 512
static char envbuffer[KDB_ENVBUFSIZE];
static int envbufsize;
char *ep = NULL;
if ((KDB_ENVBUFSIZE - envbufsize) >= bytes) {
ep = &envbuffer[envbufsize];
envbufsize += bytes;
}
return ep;
}
/*
* kdbgetulenv - This function will return the value of an unsigned
* long-valued environment variable.
* Parameters:
* match A character string representing a numeric value
* Outputs:
* *value the unsigned long represntation of the env variable 'match'
* Returns:
* Zero on success, a kdb diagnostic on failure.
*/
static int kdbgetulenv(const char *match, unsigned long *value)
{
char *ep;
ep = kdbgetenv(match);
if (!ep)
return KDB_NOTENV;
if (strlen(ep) == 0)
return KDB_NOENVVALUE;
*value = simple_strtoul(ep, NULL, 0);
return 0;
}
/*
* kdbgetintenv - This function will return the value of an
* integer-valued environment variable.
* Parameters:
* match A character string representing an integer-valued env variable
* Outputs:
* *value the integer representation of the environment variable 'match'
* Returns:
* Zero on success, a kdb diagnostic on failure.
*/
int kdbgetintenv(const char *match, int *value)
{
unsigned long val;
int diag;
diag = kdbgetulenv(match, &val);
if (!diag)
*value = (int) val;
return diag;
}
/*
* kdbgetularg - This function will convert a numeric string into an
* unsigned long value.
* Parameters:
* arg A character string representing a numeric value
* Outputs:
* *value the unsigned long represntation of arg.
* Returns:
* Zero on success, a kdb diagnostic on failure.
*/
int kdbgetularg(const char *arg, unsigned long *value)
{
char *endp;
unsigned long val;
val = simple_strtoul(arg, &endp, 0);
if (endp == arg) {
/*
* Also try base 16, for us folks too lazy to type the
* leading 0x...
*/
val = simple_strtoul(arg, &endp, 16);
if (endp == arg)
return KDB_BADINT;
}
*value = val;
return 0;
}
int kdbgetu64arg(const char *arg, u64 *value)
{
char *endp;
u64 val;
val = simple_strtoull(arg, &endp, 0);
if (endp == arg) {
val = simple_strtoull(arg, &endp, 16);
if (endp == arg)
return KDB_BADINT;
}
*value = val;
return 0;
}
/*
* kdb_set - This function implements the 'set' command. Alter an
* existing environment variable or create a new one.
*/
int kdb_set(int argc, const char **argv)
{
int i;
char *ep;
size_t varlen, vallen;
/*
* we can be invoked two ways:
* set var=value argv[1]="var", argv[2]="value"
* set var = value argv[1]="var", argv[2]="=", argv[3]="value"
* - if the latter, shift 'em down.
*/
if (argc == 3) {
argv[2] = argv[3];
argc--;
}
if (argc != 2)
return KDB_ARGCOUNT;
/*
* Check for internal variables
*/
if (strcmp(argv[1], "KDBDEBUG") == 0) {
unsigned int debugflags;
char *cp;
debugflags = simple_strtoul(argv[2], &cp, 0);
if (cp == argv[2] || debugflags & ~KDB_DEBUG_FLAG_MASK) {
kdb_printf("kdb: illegal debug flags '%s'\n",
argv[2]);
return 0;
}
kdb_flags = (kdb_flags &
~(KDB_DEBUG_FLAG_MASK << KDB_DEBUG_FLAG_SHIFT))
| (debugflags << KDB_DEBUG_FLAG_SHIFT);
return 0;
}
/*
* Tokenizer squashed the '=' sign. argv[1] is variable
* name, argv[2] = value.
*/
varlen = strlen(argv[1]);
vallen = strlen(argv[2]);
ep = kdballocenv(varlen + vallen + 2);
if (ep == (char *)0)
return KDB_ENVBUFFULL;
sprintf(ep, "%s=%s", argv[1], argv[2]);
ep[varlen+vallen+1] = '\0';
for (i = 0; i < __nenv; i++) {
if (__env[i]
&& ((strncmp(__env[i], argv[1], varlen) == 0)
&& ((__env[i][varlen] == '\0')
|| (__env[i][varlen] == '=')))) {
__env[i] = ep;
return 0;
}
}
/*
* Wasn't existing variable. Fit into slot.
*/
for (i = 0; i < __nenv-1; i++) {
if (__env[i] == (char *)0) {
__env[i] = ep;
return 0;
}
}
return KDB_ENVFULL;
}
static int kdb_check_regs(void)
{
if (!kdb_current_regs) {
kdb_printf("No current kdb registers."
" You may need to select another task\n");
return KDB_BADREG;
}
return 0;
}
/*
* kdbgetaddrarg - This function is responsible for parsing an
* address-expression and returning the value of the expression,
* symbol name, and offset to the caller.
*
* The argument may consist of a numeric value (decimal or
* hexidecimal), a symbol name, a register name (preceded by the
* percent sign), an environment variable with a numeric value
* (preceded by a dollar sign) or a simple arithmetic expression
* consisting of a symbol name, +/-, and a numeric constant value
* (offset).
* Parameters:
* argc - count of arguments in argv
* argv - argument vector
* *nextarg - index to next unparsed argument in argv[]
* regs - Register state at time of KDB entry
* Outputs:
* *value - receives the value of the address-expression
* *offset - receives the offset specified, if any
* *name - receives the symbol name, if any
* *nextarg - index to next unparsed argument in argv[]
* Returns:
* zero is returned on success, a kdb diagnostic code is
* returned on error.
*/
int kdbgetaddrarg(int argc, const char **argv, int *nextarg,
unsigned long *value, long *offset,
char **name)
{
unsigned long addr;
unsigned long off = 0;
int positive;
int diag;
int found = 0;
char *symname;
char symbol = '\0';
char *cp;
kdb_symtab_t symtab;
/*
* Process arguments which follow the following syntax:
*
* symbol | numeric-address [+/- numeric-offset]
* %register
* $environment-variable
*/
if (*nextarg > argc)
return KDB_ARGCOUNT;
symname = (char *)argv[*nextarg];
/*
* If there is no whitespace between the symbol
* or address and the '+' or '-' symbols, we
* remember the character and replace it with a
* null so the symbol/value can be properly parsed
*/
cp = strpbrk(symname, "+-");
if (cp != NULL) {
symbol = *cp;
*cp++ = '\0';
}
if (symname[0] == '$') {
diag = kdbgetulenv(&symname[1], &addr);
if (diag)
return diag;
} else if (symname[0] == '%') {
diag = kdb_check_regs();
if (diag)
return diag;
/* Implement register values with % at a later time as it is
* arch optional.
*/
return KDB_NOTIMP;
} else {
found = kdbgetsymval(symname, &symtab);
if (found) {
addr = symtab.sym_start;
} else {
diag = kdbgetularg(argv[*nextarg], &addr);
if (diag)
return diag;
}
}
if (!found)
found = kdbnearsym(addr, &symtab);
(*nextarg)++;
if (name)
*name = symname;
if (value)
*value = addr;
if (offset && name && *name)
*offset = addr - symtab.sym_start;
if ((*nextarg > argc)
&& (symbol == '\0'))
return 0;
/*
* check for +/- and offset
*/
if (symbol == '\0') {
if ((argv[*nextarg][0] != '+')
&& (argv[*nextarg][0] != '-')) {
/*
* Not our argument. Return.
*/
return 0;
} else {
positive = (argv[*nextarg][0] == '+');
(*nextarg)++;
}
} else
positive = (symbol == '+');
/*
* Now there must be an offset!
*/
if ((*nextarg > argc)
&& (symbol == '\0')) {
return KDB_INVADDRFMT;
}
if (!symbol) {
cp = (char *)argv[*nextarg];
(*nextarg)++;
}
diag = kdbgetularg(cp, &off);
if (diag)
return diag;
if (!positive)
off = -off;
if (offset)
*offset += off;
if (value)
*value += off;
return 0;
}
static void kdb_cmderror(int diag)
{
int i;
if (diag >= 0) {
kdb_printf("no error detected (diagnostic is %d)\n", diag);
return;
}
for (i = 0; i < __nkdb_err; i++) {
if (kdbmsgs[i].km_diag == diag) {
kdb_printf("diag: %d: %s\n", diag, kdbmsgs[i].km_msg);
return;
}
}
kdb_printf("Unknown diag %d\n", -diag);
}
/*
* kdb_defcmd, kdb_defcmd2 - This function implements the 'defcmd'
* command which defines one command as a set of other commands,
* terminated by endefcmd. kdb_defcmd processes the initial
* 'defcmd' command, kdb_defcmd2 is invoked from kdb_parse for
* the following commands until 'endefcmd'.
* Inputs:
* argc argument count
* argv argument vector
* Returns:
* zero for success, a kdb diagnostic if error
*/
struct defcmd_set {
int count;
int usable;
char *name;
char *usage;
char *help;
char **command;
};
static struct defcmd_set *defcmd_set;
static int defcmd_set_count;
static int defcmd_in_progress;
/* Forward references */
static int kdb_exec_defcmd(int argc, const char **argv);
static int kdb_defcmd2(const char *cmdstr, const char *argv0)
{
struct defcmd_set *s = defcmd_set + defcmd_set_count - 1;
char **save_command = s->command;
if (strcmp(argv0, "endefcmd") == 0) {
defcmd_in_progress = 0;
if (!s->count)
s->usable = 0;
if (s->usable)
kdb_register(s->name, kdb_exec_defcmd,
s->usage, s->help, 0);
return 0;
}
if (!s->usable)
return KDB_NOTIMP;
s->command = kzalloc((s->count + 1) * sizeof(*(s->command)), GFP_KDB);
if (!s->command) {
kdb_printf("Could not allocate new kdb_defcmd table for %s\n",
cmdstr);
s->usable = 0;
return KDB_NOTIMP;
}
memcpy(s->command, save_command, s->count * sizeof(*(s->command)));
s->command[s->count++] = kdb_strdup(cmdstr, GFP_KDB);
kfree(save_command);
return 0;
}
static int kdb_defcmd(int argc, const char **argv)
{
struct defcmd_set *save_defcmd_set = defcmd_set, *s;
if (defcmd_in_progress) {
kdb_printf("kdb: nested defcmd detected, assuming missing "
"endefcmd\n");
kdb_defcmd2("endefcmd", "endefcmd");
}
if (argc == 0) {
int i;
for (s = defcmd_set; s < defcmd_set + defcmd_set_count; ++s) {
kdb_printf("defcmd %s \"%s\" \"%s\"\n", s->name,
s->usage, s->help);
for (i = 0; i < s->count; ++i)
kdb_printf("%s", s->command[i]);
kdb_printf("endefcmd\n");
}
return 0;
}
if (argc != 3)
return KDB_ARGCOUNT;
defcmd_set = kmalloc((defcmd_set_count + 1) * sizeof(*defcmd_set),
GFP_KDB);
if (!defcmd_set) {
kdb_printf("Could not allocate new defcmd_set entry for %s\n",
argv[1]);
defcmd_set = save_defcmd_set;
return KDB_NOTIMP;
}
memcpy(defcmd_set, save_defcmd_set,
defcmd_set_count * sizeof(*defcmd_set));
kfree(save_defcmd_set);
s = defcmd_set + defcmd_set_count;
memset(s, 0, sizeof(*s));
s->usable = 1;
s->name = kdb_strdup(argv[1], GFP_KDB);
s->usage = kdb_strdup(argv[2], GFP_KDB);
s->help = kdb_strdup(argv[3], GFP_KDB);
if (s->usage[0] == '"') {
strcpy(s->usage, s->usage+1);
s->usage[strlen(s->usage)-1] = '\0';
}
if (s->help[0] == '"') {
strcpy(s->help, s->help+1);
s->help[strlen(s->help)-1] = '\0';
}
++defcmd_set_count;
defcmd_in_progress = 1;
return 0;
}
/*
* kdb_exec_defcmd - Execute the set of commands associated with this
* defcmd name.
* Inputs:
* argc argument count
* argv argument vector
* Returns:
* zero for success, a kdb diagnostic if error
*/
static int kdb_exec_defcmd(int argc, const char **argv)
{
int i, ret;
struct defcmd_set *s;
if (argc != 0)
return KDB_ARGCOUNT;
for (s = defcmd_set, i = 0; i < defcmd_set_count; ++i, ++s) {
if (strcmp(s->name, argv[0]) == 0)
break;
}
if (i == defcmd_set_count) {
kdb_printf("kdb_exec_defcmd: could not find commands for %s\n",
argv[0]);
return KDB_NOTIMP;
}
for (i = 0; i < s->count; ++i) {
/* Recursive use of kdb_parse, do not use argv after
* this point */
argv = NULL;
kdb_printf("[%s]kdb> %s\n", s->name, s->command[i]);
ret = kdb_parse(s->command[i]);
if (ret)
return ret;
}
return 0;
}
/* Command history */
#define KDB_CMD_HISTORY_COUNT 32
#define CMD_BUFLEN 200 /* kdb_printf: max printline
* size == 256 */
static unsigned int cmd_head, cmd_tail;
static unsigned int cmdptr;
static char cmd_hist[KDB_CMD_HISTORY_COUNT][CMD_BUFLEN];
static char cmd_cur[CMD_BUFLEN];
/*
* The "str" argument may point to something like | grep xyz
*/
static void parse_grep(const char *str)
{
int len;
char *cp = (char *)str, *cp2;
/* sanity check: we should have been called with the \ first */
if (*cp != '|')
return;
cp++;
while (isspace(*cp))
cp++;
if (strncmp(cp, "grep ", 5)) {
kdb_printf("invalid 'pipe', see grephelp\n");
return;
}
cp += 5;
while (isspace(*cp))
cp++;
cp2 = strchr(cp, '\n');
if (cp2)
*cp2 = '\0'; /* remove the trailing newline */
len = strlen(cp);
if (len == 0) {
kdb_printf("invalid 'pipe', see grephelp\n");
return;
}
/* now cp points to a nonzero length search string */
if (*cp == '"') {
/* allow it be "x y z" by removing the "'s - there must
be two of them */
cp++;
cp2 = strchr(cp, '"');
if (!cp2) {
kdb_printf("invalid quoted string, see grephelp\n");
return;
}
*cp2 = '\0'; /* end the string where the 2nd " was */
}
kdb_grep_leading = 0;
if (*cp == '^') {
kdb_grep_leading = 1;
cp++;
}
len = strlen(cp);
kdb_grep_trailing = 0;
if (*(cp+len-1) == '$') {
kdb_grep_trailing = 1;
*(cp+len-1) = '\0';
}
len = strlen(cp);
if (!len)
return;
if (len >= GREP_LEN) {
kdb_printf("search string too long\n");
return;
}
strcpy(kdb_grep_string, cp);
kdb_grepping_flag++;
return;
}
/*
* kdb_parse - Parse the command line, search the command table for a
* matching command and invoke the command function. This
* function may be called recursively, if it is, the second call
* will overwrite argv and cbuf. It is the caller's
* responsibility to save their argv if they recursively call
* kdb_parse().
* Parameters:
* cmdstr The input command line to be parsed.
* regs The registers at the time kdb was entered.
* Returns:
* Zero for success, a kdb diagnostic if failure.
* Remarks:
* Limited to 20 tokens.
*
* Real rudimentary tokenization. Basically only whitespace
* is considered a token delimeter (but special consideration
* is taken of the '=' sign as used by the 'set' command).
*
* The algorithm used to tokenize the input string relies on
* there being at least one whitespace (or otherwise useless)
* character between tokens as the character immediately following
* the token is altered in-place to a null-byte to terminate the
* token string.
*/
#define MAXARGC 20
int kdb_parse(const char *cmdstr)
{
static char *argv[MAXARGC];
static int argc;
static char cbuf[CMD_BUFLEN+2];
char *cp;
char *cpp, quoted;
kdbtab_t *tp;
int i, escaped, ignore_errors = 0, check_grep;
/*
* First tokenize the command string.
*/
cp = (char *)cmdstr;
kdb_grepping_flag = check_grep = 0;
if (KDB_FLAG(CMD_INTERRUPT)) {
/* Previous command was interrupted, newline must not
* repeat the command */
KDB_FLAG_CLEAR(CMD_INTERRUPT);
KDB_STATE_SET(PAGER);
argc = 0; /* no repeat */
}
if (*cp != '\n' && *cp != '\0') {
argc = 0;
cpp = cbuf;
while (*cp) {
/* skip whitespace */
while (isspace(*cp))
cp++;
if ((*cp == '\0') || (*cp == '\n') ||
(*cp == '#' && !defcmd_in_progress))
break;
/* special case: check for | grep pattern */
if (*cp == '|') {
check_grep++;
break;
}
if (cpp >= cbuf + CMD_BUFLEN) {
kdb_printf("kdb_parse: command buffer "
"overflow, command ignored\n%s\n",
cmdstr);
return KDB_NOTFOUND;
}
if (argc >= MAXARGC - 1) {
kdb_printf("kdb_parse: too many arguments, "
"command ignored\n%s\n", cmdstr);
return KDB_NOTFOUND;
}
argv[argc++] = cpp;
escaped = 0;
quoted = '\0';
/* Copy to next unquoted and unescaped
* whitespace or '=' */
while (*cp && *cp != '\n' &&
(escaped || quoted || !isspace(*cp))) {
if (cpp >= cbuf + CMD_BUFLEN)
break;
if (escaped) {
escaped = 0;
*cpp++ = *cp++;
continue;
}
if (*cp == '\\') {
escaped = 1;
++cp;
continue;
}
if (*cp == quoted)
quoted = '\0';
else if (*cp == '\'' || *cp == '"')
quoted = *cp;
*cpp = *cp++;
if (*cpp == '=' && !quoted)
break;
++cpp;
}
*cpp++ = '\0'; /* Squash a ws or '=' character */
}
}
if (!argc)
return 0;
if (check_grep)
parse_grep(cp);
if (defcmd_in_progress) {
int result = kdb_defcmd2(cmdstr, argv[0]);
if (!defcmd_in_progress) {
argc = 0; /* avoid repeat on endefcmd */
*(argv[0]) = '\0';
}
return result;
}
if (argv[0][0] == '-' && argv[0][1] &&
(argv[0][1] < '0' || argv[0][1] > '9')) {
ignore_errors = 1;
++argv[0];
}
for_each_kdbcmd(tp, i) {
if (tp->cmd_name) {
/*
* If this command is allowed to be abbreviated,
* check to see if this is it.
*/
if (tp->cmd_minlen
&& (strlen(argv[0]) <= tp->cmd_minlen)) {
if (strncmp(argv[0],
tp->cmd_name,
tp->cmd_minlen) == 0) {
break;
}
}
if (strcmp(argv[0], tp->cmd_name) == 0)
break;
}
}
/*
* If we don't find a command by this name, see if the first
* few characters of this match any of the known commands.
* e.g., md1c20 should match md.
*/
if (i == kdb_max_commands) {
for_each_kdbcmd(tp, i) {
if (tp->cmd_name) {
if (strncmp(argv[0],
tp->cmd_name,
strlen(tp->cmd_name)) == 0) {
break;
}
}
}
}
if (i < kdb_max_commands) {
int result;
KDB_STATE_SET(CMD);
result = (*tp->cmd_func)(argc-1, (const char **)argv);
if (result && ignore_errors && result > KDB_CMD_GO)
result = 0;
KDB_STATE_CLEAR(CMD);
switch (tp->cmd_repeat) {
case KDB_REPEAT_NONE:
argc = 0;
if (argv[0])
*(argv[0]) = '\0';
break;
case KDB_REPEAT_NO_ARGS:
argc = 1;
if (argv[1])
*(argv[1]) = '\0';
break;
case KDB_REPEAT_WITH_ARGS:
break;
}
return result;
}
/*
* If the input with which we were presented does not
* map to an existing command, attempt to parse it as an
* address argument and display the result. Useful for
* obtaining the address of a variable, or the nearest symbol
* to an address contained in a register.
*/
{
unsigned long value;
char *name = NULL;
long offset;
int nextarg = 0;
if (kdbgetaddrarg(0, (const char **)argv, &nextarg,
&value, &offset, &name)) {
return KDB_NOTFOUND;
}
kdb_printf("%s = ", argv[0]);
kdb_symbol_print(value, NULL, KDB_SP_DEFAULT);
kdb_printf("\n");
return 0;
}
}
static int handle_ctrl_cmd(char *cmd)
{
#define CTRL_P 16
#define CTRL_N 14
/* initial situation */
if (cmd_head == cmd_tail)
return 0;
switch (*cmd) {
case CTRL_P:
if (cmdptr != cmd_tail)
cmdptr = (cmdptr-1) % KDB_CMD_HISTORY_COUNT;
strncpy(cmd_cur, cmd_hist[cmdptr], CMD_BUFLEN);
return 1;
case CTRL_N:
if (cmdptr != cmd_head)
cmdptr = (cmdptr+1) % KDB_CMD_HISTORY_COUNT;
strncpy(cmd_cur, cmd_hist[cmdptr], CMD_BUFLEN);
return 1;
}
return 0;
}
/*
* kdb_reboot - This function implements the 'reboot' command. Reboot
* the system immediately, or loop for ever on failure.
*/
static int kdb_reboot(int argc, const char **argv)
{
emergency_restart();
kdb_printf("Hmm, kdb_reboot did not reboot, spinning here\n");
while (1)
cpu_relax();
/* NOTREACHED */
return 0;
}
static void kdb_dumpregs(struct pt_regs *regs)
{
int old_lvl = console_loglevel;
console_loglevel = 15;
kdb_trap_printk++;
show_regs(regs);
kdb_trap_printk--;
kdb_printf("\n");
console_loglevel = old_lvl;
}
void kdb_set_current_task(struct task_struct *p)
{
kdb_current_task = p;
if (kdb_task_has_cpu(p)) {
kdb_current_regs = KDB_TSKREGS(kdb_process_cpu(p));
return;
}
kdb_current_regs = NULL;
}
/*
* kdb_local - The main code for kdb. This routine is invoked on a
* specific processor, it is not global. The main kdb() routine
* ensures that only one processor at a time is in this routine.
* This code is called with the real reason code on the first
* entry to a kdb session, thereafter it is called with reason
* SWITCH, even if the user goes back to the original cpu.
* Inputs:
* reason The reason KDB was invoked
* error The hardware-defined error code
* regs The exception frame at time of fault/breakpoint.
* db_result Result code from the break or debug point.
* Returns:
* 0 KDB was invoked for an event which it wasn't responsible
* 1 KDB handled the event for which it was invoked.
* KDB_CMD_GO User typed 'go'.
* KDB_CMD_CPU User switched to another cpu.
* KDB_CMD_SS Single step.
* KDB_CMD_SSB Single step until branch.
*/
static int kdb_local(kdb_reason_t reason, int error, struct pt_regs *regs,
kdb_dbtrap_t db_result)
{
char *cmdbuf;
int diag;
struct task_struct *kdb_current =
kdb_curr_task(raw_smp_processor_id());
KDB_DEBUG_STATE("kdb_local 1", reason);
kdb_go_count = 0;
if (reason == KDB_REASON_DEBUG) {
/* special case below */
} else {
kdb_printf("\nEntering kdb (current=0x%p, pid %d) ",
kdb_current, kdb_current ? kdb_current->pid : 0);
#if defined(CONFIG_SMP)
kdb_printf("on processor %d ", raw_smp_processor_id());
#endif
}
switch (reason) {
case KDB_REASON_DEBUG:
{
/*
* If re-entering kdb after a single step
* command, don't print the message.
*/
switch (db_result) {
case KDB_DB_BPT:
kdb_printf("\nEntering kdb (0x%p, pid %d) ",
kdb_current, kdb_current->pid);
#if defined(CONFIG_SMP)
kdb_printf("on processor %d ", raw_smp_processor_id());
#endif
kdb_printf("due to Debug @ " kdb_machreg_fmt "\n",
instruction_pointer(regs));
break;
case KDB_DB_SSB:
/*
* In the midst of ssb command. Just return.
*/
KDB_DEBUG_STATE("kdb_local 3", reason);
return KDB_CMD_SSB; /* Continue with SSB command */
break;
case KDB_DB_SS:
break;
case KDB_DB_SSBPT:
KDB_DEBUG_STATE("kdb_local 4", reason);
return 1; /* kdba_db_trap did the work */
default:
kdb_printf("kdb: Bad result from kdba_db_trap: %d\n",
db_result);
break;
}
}
break;
case KDB_REASON_ENTER:
if (KDB_STATE(KEYBOARD))
kdb_printf("due to Keyboard Entry\n");
else
kdb_printf("due to KDB_ENTER()\n");
break;
case KDB_REASON_KEYBOARD:
KDB_STATE_SET(KEYBOARD);
kdb_printf("due to Keyboard Entry\n");
break;
case KDB_REASON_ENTER_SLAVE:
/* drop through, slaves only get released via cpu switch */
case KDB_REASON_SWITCH:
kdb_printf("due to cpu switch\n");
break;
case KDB_REASON_OOPS:
kdb_printf("Oops: %s\n", kdb_diemsg);
kdb_printf("due to oops @ " kdb_machreg_fmt "\n",
instruction_pointer(regs));
kdb_dumpregs(regs);
break;
case KDB_REASON_NMI:
kdb_printf("due to NonMaskable Interrupt @ "
kdb_machreg_fmt "\n",
instruction_pointer(regs));
kdb_dumpregs(regs);
break;
case KDB_REASON_SSTEP:
case KDB_REASON_BREAK:
kdb_printf("due to %s @ " kdb_machreg_fmt "\n",
reason == KDB_REASON_BREAK ?
"Breakpoint" : "SS trap", instruction_pointer(regs));
/*
* Determine if this breakpoint is one that we
* are interested in.
*/
if (db_result != KDB_DB_BPT) {
kdb_printf("kdb: error return from kdba_bp_trap: %d\n",
db_result);
KDB_DEBUG_STATE("kdb_local 6", reason);
return 0; /* Not for us, dismiss it */
}
break;
case KDB_REASON_RECURSE:
kdb_printf("due to Recursion @ " kdb_machreg_fmt "\n",
instruction_pointer(regs));
break;
default:
kdb_printf("kdb: unexpected reason code: %d\n", reason);
KDB_DEBUG_STATE("kdb_local 8", reason);
return 0; /* Not for us, dismiss it */
}
while (1) {
/*
* Initialize pager context.
*/
kdb_nextline = 1;
KDB_STATE_CLEAR(SUPPRESS);
cmdbuf = cmd_cur;
*cmdbuf = '\0';
*(cmd_hist[cmd_head]) = '\0';
if (KDB_FLAG(ONLY_DO_DUMP)) {
/* kdb is off but a catastrophic error requires a dump.
* Take the dump and reboot.
* Turn on logging so the kdb output appears in the log
* buffer in the dump.
*/
const char *setargs[] = { "set", "LOGGING", "1" };
kdb_set(2, setargs);
kdb_reboot(0, NULL);
/*NOTREACHED*/
}
do_full_getstr:
#if defined(CONFIG_SMP)
snprintf(kdb_prompt_str, CMD_BUFLEN, kdbgetenv("PROMPT"),
raw_smp_processor_id());
#else
snprintf(kdb_prompt_str, CMD_BUFLEN, kdbgetenv("PROMPT"));
#endif
if (defcmd_in_progress)
strncat(kdb_prompt_str, "[defcmd]", CMD_BUFLEN);
/*
* Fetch command from keyboard
*/
cmdbuf = kdb_getstr(cmdbuf, CMD_BUFLEN, kdb_prompt_str);
if (*cmdbuf != '\n') {
if (*cmdbuf < 32) {
if (cmdptr == cmd_head) {
strncpy(cmd_hist[cmd_head], cmd_cur,
CMD_BUFLEN);
*(cmd_hist[cmd_head] +
strlen(cmd_hist[cmd_head])-1) = '\0';
}
if (!handle_ctrl_cmd(cmdbuf))
*(cmd_cur+strlen(cmd_cur)-1) = '\0';
cmdbuf = cmd_cur;
goto do_full_getstr;
} else {
strncpy(cmd_hist[cmd_head], cmd_cur,
CMD_BUFLEN);
}
cmd_head = (cmd_head+1) % KDB_CMD_HISTORY_COUNT;
if (cmd_head == cmd_tail)
cmd_tail = (cmd_tail+1) % KDB_CMD_HISTORY_COUNT;
}
cmdptr = cmd_head;
diag = kdb_parse(cmdbuf);
if (diag == KDB_NOTFOUND) {
kdb_printf("Unknown kdb command: '%s'\n", cmdbuf);
diag = 0;
}
if (diag == KDB_CMD_GO
|| diag == KDB_CMD_CPU
|| diag == KDB_CMD_SS
|| diag == KDB_CMD_SSB
|| diag == KDB_CMD_KGDB)
break;
if (diag)
kdb_cmderror(diag);
}
KDB_DEBUG_STATE("kdb_local 9", diag);
return diag;
}
/*
* kdb_print_state - Print the state data for the current processor
* for debugging.
* Inputs:
* text Identifies the debug point
* value Any integer value to be printed, e.g. reason code.
*/
void kdb_print_state(const char *text, int value)
{
kdb_printf("state: %s cpu %d value %d initial %d state %x\n",
text, raw_smp_processor_id(), value, kdb_initial_cpu,
kdb_state);
}
/*
* kdb_main_loop - After initial setup and assignment of the
* controlling cpu, all cpus are in this loop. One cpu is in
* control and will issue the kdb prompt, the others will spin
* until 'go' or cpu switch.
*
* To get a consistent view of the kernel stacks for all
* processes, this routine is invoked from the main kdb code via
* an architecture specific routine. kdba_main_loop is
* responsible for making the kernel stacks consistent for all
* processes, there should be no difference between a blocked
* process and a running process as far as kdb is concerned.
* Inputs:
* reason The reason KDB was invoked
* error The hardware-defined error code
* reason2 kdb's current reason code.
* Initially error but can change
* according to kdb state.
* db_result Result code from break or debug point.
* regs The exception frame at time of fault/breakpoint.
* should always be valid.
* Returns:
* 0 KDB was invoked for an event which it wasn't responsible
* 1 KDB handled the event for which it was invoked.
*/
int kdb_main_loop(kdb_reason_t reason, kdb_reason_t reason2, int error,
kdb_dbtrap_t db_result, struct pt_regs *regs)
{
int result = 1;
/* Stay in kdb() until 'go', 'ss[b]' or an error */
while (1) {
/*
* All processors except the one that is in control
* will spin here.
*/
KDB_DEBUG_STATE("kdb_main_loop 1", reason);
while (KDB_STATE(HOLD_CPU)) {
/* state KDB is turned off by kdb_cpu to see if the
* other cpus are still live, each cpu in this loop
* turns it back on.
*/
if (!KDB_STATE(KDB))
KDB_STATE_SET(KDB);
}
KDB_STATE_CLEAR(SUPPRESS);
KDB_DEBUG_STATE("kdb_main_loop 2", reason);
if (KDB_STATE(LEAVING))
break; /* Another cpu said 'go' */
/* Still using kdb, this processor is in control */
result = kdb_local(reason2, error, regs, db_result);
KDB_DEBUG_STATE("kdb_main_loop 3", result);
if (result == KDB_CMD_CPU)
break;
if (result == KDB_CMD_SS) {
KDB_STATE_SET(DOING_SS);
break;
}
if (result == KDB_CMD_SSB) {
KDB_STATE_SET(DOING_SS);
KDB_STATE_SET(DOING_SSB);
break;
}
if (result == KDB_CMD_KGDB) {
if (!KDB_STATE(DOING_KGDB))
kdb_printf("Entering please attach debugger "
"or use $D#44+ or $3#33\n");
break;
}
if (result && result != 1 && result != KDB_CMD_GO)
kdb_printf("\nUnexpected kdb_local return code %d\n",
result);
KDB_DEBUG_STATE("kdb_main_loop 4", reason);
break;
}
if (KDB_STATE(DOING_SS))
KDB_STATE_CLEAR(SSBPT);
/* Clean up any keyboard devices before leaving */
kdb_kbd_cleanup_state();
return result;
}
/*
* kdb_mdr - This function implements the guts of the 'mdr', memory
* read command.
* mdr <addr arg>,<byte count>
* Inputs:
* addr Start address
* count Number of bytes
* Returns:
* Always 0. Any errors are detected and printed by kdb_getarea.
*/
static int kdb_mdr(unsigned long addr, unsigned int count)
{
unsigned char c;
while (count--) {
if (kdb_getarea(c, addr))
return 0;
kdb_printf("%02x", c);
addr++;
}
kdb_printf("\n");
return 0;
}
/*
* kdb_md - This function implements the 'md', 'md1', 'md2', 'md4',
* 'md8' 'mdr' and 'mds' commands.
*
* md|mds [<addr arg> [<line count> [<radix>]]]
* mdWcN [<addr arg> [<line count> [<radix>]]]
* where W = is the width (1, 2, 4 or 8) and N is the count.
* for eg., md1c20 reads 20 bytes, 1 at a time.
* mdr <addr arg>,<byte count>
*/
static void kdb_md_line(const char *fmtstr, unsigned long addr,
int symbolic, int nosect, int bytesperword,
int num, int repeat, int phys)
{
/* print just one line of data */
kdb_symtab_t symtab;
char cbuf[32];
char *c = cbuf;
int i;
unsigned long word;
memset(cbuf, '\0', sizeof(cbuf));
if (phys)
kdb_printf("phys " kdb_machreg_fmt0 " ", addr);
else
kdb_printf(kdb_machreg_fmt0 " ", addr);
for (i = 0; i < num && repeat--; i++) {
if (phys) {
if (kdb_getphysword(&word, addr, bytesperword))
break;
} else if (kdb_getword(&word, addr, bytesperword))
break;
kdb_printf(fmtstr, word);
if (symbolic)
kdbnearsym(word, &symtab);
else
memset(&symtab, 0, sizeof(symtab));
if (symtab.sym_name) {
kdb_symbol_print(word, &symtab, 0);
if (!nosect) {
kdb_printf("\n");
kdb_printf(" %s %s "
kdb_machreg_fmt " "
kdb_machreg_fmt " "
kdb_machreg_fmt, symtab.mod_name,
symtab.sec_name, symtab.sec_start,
symtab.sym_start, symtab.sym_end);
}
addr += bytesperword;
} else {
union {
u64 word;
unsigned char c[8];
} wc;
unsigned char *cp;
#ifdef __BIG_ENDIAN
cp = wc.c + 8 - bytesperword;
#else
cp = wc.c;
#endif
wc.word = word;
#define printable_char(c) \
({unsigned char __c = c; isascii(__c) && isprint(__c) ? __c : '.'; })
switch (bytesperword) {
case 8:
*c++ = printable_char(*cp++);
*c++ = printable_char(*cp++);
*c++ = printable_char(*cp++);
*c++ = printable_char(*cp++);
addr += 4;
case 4:
*c++ = printable_char(*cp++);
*c++ = printable_char(*cp++);
addr += 2;
case 2:
*c++ = printable_char(*cp++);
addr++;
case 1:
*c++ = printable_char(*cp++);
addr++;
break;
}
#undef printable_char
}
}
kdb_printf("%*s %s\n", (int)((num-i)*(2*bytesperword + 1)+1),
" ", cbuf);
}
static int kdb_md(int argc, const char **argv)
{
static unsigned long last_addr;
static int last_radix, last_bytesperword, last_repeat;
int radix = 16, mdcount = 8, bytesperword = KDB_WORD_SIZE, repeat;
int nosect = 0;
char fmtchar, fmtstr[64];
unsigned long addr;
unsigned long word;
long offset = 0;
int symbolic = 0;
int valid = 0;
int phys = 0;
kdbgetintenv("MDCOUNT", &mdcount);
kdbgetintenv("RADIX", &radix);
kdbgetintenv("BYTESPERWORD", &bytesperword);
/* Assume 'md <addr>' and start with environment values */
repeat = mdcount * 16 / bytesperword;
if (strcmp(argv[0], "mdr") == 0) {
if (argc != 2)
return KDB_ARGCOUNT;
valid = 1;
} else if (isdigit(argv[0][2])) {
bytesperword = (int)(argv[0][2] - '0');
if (bytesperword == 0) {
bytesperword = last_bytesperword;
if (bytesperword == 0)
bytesperword = 4;
}
last_bytesperword = bytesperword;
repeat = mdcount * 16 / bytesperword;
if (!argv[0][3])
valid = 1;
else if (argv[0][3] == 'c' && argv[0][4]) {
char *p;
repeat = simple_strtoul(argv[0] + 4, &p, 10);
mdcount = ((repeat * bytesperword) + 15) / 16;
valid = !*p;
}
last_repeat = repeat;
} else if (strcmp(argv[0], "md") == 0)
valid = 1;
else if (strcmp(argv[0], "mds") == 0)
valid = 1;
else if (strcmp(argv[0], "mdp") == 0) {
phys = valid = 1;
}
if (!valid)
return KDB_NOTFOUND;
if (argc == 0) {
if (last_addr == 0)
return KDB_ARGCOUNT;
addr = last_addr;
radix = last_radix;
bytesperword = last_bytesperword;
repeat = last_repeat;
mdcount = ((repeat * bytesperword) + 15) / 16;
}
if (argc) {
unsigned long val;
int diag, nextarg = 1;
diag = kdbgetaddrarg(argc, argv, &nextarg, &addr,
&offset, NULL);
if (diag)
return diag;
if (argc > nextarg+2)
return KDB_ARGCOUNT;
if (argc >= nextarg) {
diag = kdbgetularg(argv[nextarg], &val);
if (!diag) {
mdcount = (int) val;
repeat = mdcount * 16 / bytesperword;
}
}
if (argc >= nextarg+1) {
diag = kdbgetularg(argv[nextarg+1], &val);
if (!diag)
radix = (int) val;
}
}
if (strcmp(argv[0], "mdr") == 0)
return kdb_mdr(addr, mdcount);
switch (radix) {
case 10:
fmtchar = 'd';
break;
case 16:
fmtchar = 'x';
break;
case 8:
fmtchar = 'o';
break;
default:
return KDB_BADRADIX;
}
last_radix = radix;
if (bytesperword > KDB_WORD_SIZE)
return KDB_BADWIDTH;
switch (bytesperword) {
case 8:
sprintf(fmtstr, "%%16.16l%c ", fmtchar);
break;
case 4:
sprintf(fmtstr, "%%8.8l%c ", fmtchar);
break;
case 2:
sprintf(fmtstr, "%%4.4l%c ", fmtchar);
break;
case 1:
sprintf(fmtstr, "%%2.2l%c ", fmtchar);
break;
default:
return KDB_BADWIDTH;
}
last_repeat = repeat;
last_bytesperword = bytesperword;
if (strcmp(argv[0], "mds") == 0) {
symbolic = 1;
/* Do not save these changes as last_*, they are temporary mds
* overrides.
*/
bytesperword = KDB_WORD_SIZE;
repeat = mdcount;
kdbgetintenv("NOSECT", &nosect);
}
/* Round address down modulo BYTESPERWORD */
addr &= ~(bytesperword-1);
while (repeat > 0) {
unsigned long a;
int n, z, num = (symbolic ? 1 : (16 / bytesperword));
if (KDB_FLAG(CMD_INTERRUPT))
return 0;
for (a = addr, z = 0; z < repeat; a += bytesperword, ++z) {
if (phys) {
if (kdb_getphysword(&word, a, bytesperword)
|| word)
break;
} else if (kdb_getword(&word, a, bytesperword) || word)
break;
}
n = min(num, repeat);
kdb_md_line(fmtstr, addr, symbolic, nosect, bytesperword,
num, repeat, phys);
addr += bytesperword * n;
repeat -= n;
z = (z + num - 1) / num;
if (z > 2) {
int s = num * (z-2);
kdb_printf(kdb_machreg_fmt0 "-" kdb_machreg_fmt0
" zero suppressed\n",
addr, addr + bytesperword * s - 1);
addr += bytesperword * s;
repeat -= s;
}
}
last_addr = addr;
return 0;
}
/*
* kdb_mm - This function implements the 'mm' command.
* mm address-expression new-value
* Remarks:
* mm works on machine words, mmW works on bytes.
*/
static int kdb_mm(int argc, const char **argv)
{
int diag;
unsigned long addr;
long offset = 0;
unsigned long contents;
int nextarg;
int width;
if (argv[0][2] && !isdigit(argv[0][2]))
return KDB_NOTFOUND;
if (argc < 2)
return KDB_ARGCOUNT;
nextarg = 1;
diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL);
if (diag)
return diag;
if (nextarg > argc)
return KDB_ARGCOUNT;
diag = kdbgetaddrarg(argc, argv, &nextarg, &contents, NULL, NULL);
if (diag)
return diag;
if (nextarg != argc + 1)
return KDB_ARGCOUNT;
width = argv[0][2] ? (argv[0][2] - '0') : (KDB_WORD_SIZE);
diag = kdb_putword(addr, contents, width);
if (diag)
return diag;
kdb_printf(kdb_machreg_fmt " = " kdb_machreg_fmt "\n", addr, contents);
return 0;
}
/*
* kdb_go - This function implements the 'go' command.
* go [address-expression]
*/
static int kdb_go(int argc, const char **argv)
{
unsigned long addr;
int diag;
int nextarg;
long offset;
if (raw_smp_processor_id() != kdb_initial_cpu) {
kdb_printf("go must execute on the entry cpu, "
"please use \"cpu %d\" and then execute go\n",
kdb_initial_cpu);
return KDB_BADCPUNUM;
}
if (argc == 1) {
nextarg = 1;
diag = kdbgetaddrarg(argc, argv, &nextarg,
&addr, &offset, NULL);
if (diag)
return diag;
} else if (argc) {
return KDB_ARGCOUNT;
}
diag = KDB_CMD_GO;
if (KDB_FLAG(CATASTROPHIC)) {
kdb_printf("Catastrophic error detected\n");
kdb_printf("kdb_continue_catastrophic=%d, ",
kdb_continue_catastrophic);
if (kdb_continue_catastrophic == 0 && kdb_go_count++ == 0) {
kdb_printf("type go a second time if you really want "
"to continue\n");
return 0;
}
if (kdb_continue_catastrophic == 2) {
kdb_printf("forcing reboot\n");
kdb_reboot(0, NULL);
}
kdb_printf("attempting to continue\n");
}
return diag;
}
/*
* kdb_rd - This function implements the 'rd' command.
*/
static int kdb_rd(int argc, const char **argv)
{
int len = kdb_check_regs();
#if DBG_MAX_REG_NUM > 0
int i;
char *rname;
int rsize;
u64 reg64;
u32 reg32;
u16 reg16;
u8 reg8;
if (len)
return len;
for (i = 0; i < DBG_MAX_REG_NUM; i++) {
rsize = dbg_reg_def[i].size * 2;
if (rsize > 16)
rsize = 2;
if (len + strlen(dbg_reg_def[i].name) + 4 + rsize > 80) {
len = 0;
kdb_printf("\n");
}
if (len)
len += kdb_printf(" ");
switch(dbg_reg_def[i].size * 8) {
case 8:
rname = dbg_get_reg(i, ®8, kdb_current_regs);
if (!rname)
break;
len += kdb_printf("%s: %02x", rname, reg8);
break;
case 16:
rname = dbg_get_reg(i, ®16, kdb_current_regs);
if (!rname)
break;
len += kdb_printf("%s: %04x", rname, reg16);
break;
case 32:
rname = dbg_get_reg(i, ®32, kdb_current_regs);
if (!rname)
break;
len += kdb_printf("%s: %08x", rname, reg32);
break;
case 64:
rname = dbg_get_reg(i, ®64, kdb_current_regs);
if (!rname)
break;
len += kdb_printf("%s: %016llx", rname, reg64);
break;
default:
len += kdb_printf("%s: ??", dbg_reg_def[i].name);
}
}
kdb_printf("\n");
#else
if (len)
return len;
kdb_dumpregs(kdb_current_regs);
#endif
return 0;
}
/*
* kdb_rm - This function implements the 'rm' (register modify) command.
* rm register-name new-contents
* Remarks:
* Allows register modification with the same restrictions as gdb
*/
static int kdb_rm(int argc, const char **argv)
{
#if DBG_MAX_REG_NUM > 0
int diag;
const char *rname;
int i;
u64 reg64;
u32 reg32;
u16 reg16;
u8 reg8;
if (argc != 2)
return KDB_ARGCOUNT;
/*
* Allow presence or absence of leading '%' symbol.
*/
rname = argv[1];
if (*rname == '%')
rname++;
diag = kdbgetu64arg(argv[2], ®64);
if (diag)
return diag;
diag = kdb_check_regs();
if (diag)
return diag;
diag = KDB_BADREG;
for (i = 0; i < DBG_MAX_REG_NUM; i++) {
if (strcmp(rname, dbg_reg_def[i].name) == 0) {
diag = 0;
break;
}
}
if (!diag) {
switch(dbg_reg_def[i].size * 8) {
case 8:
reg8 = reg64;
dbg_set_reg(i, ®8, kdb_current_regs);
break;
case 16:
reg16 = reg64;
dbg_set_reg(i, ®16, kdb_current_regs);
break;
case 32:
reg32 = reg64;
dbg_set_reg(i, ®32, kdb_current_regs);
break;
case 64:
dbg_set_reg(i, ®64, kdb_current_regs);
break;
}
}
return diag;
#else
kdb_printf("ERROR: Register set currently not implemented\n");
return 0;
#endif
}
#if defined(CONFIG_MAGIC_SYSRQ)
/*
* kdb_sr - This function implements the 'sr' (SYSRQ key) command
* which interfaces to the soi-disant MAGIC SYSRQ functionality.
* sr <magic-sysrq-code>
*/
static int kdb_sr(int argc, const char **argv)
{
if (argc != 1)
return KDB_ARGCOUNT;
kdb_trap_printk++;
__handle_sysrq(*argv[1], false);
kdb_trap_printk--;
return 0;
}
#endif /* CONFIG_MAGIC_SYSRQ */
/*
* kdb_ef - This function implements the 'regs' (display exception
* frame) command. This command takes an address and expects to
* find an exception frame at that address, formats and prints
* it.
* regs address-expression
* Remarks:
* Not done yet.
*/
static int kdb_ef(int argc, const char **argv)
{
int diag;
unsigned long addr;
long offset;
int nextarg;
if (argc != 1)
return KDB_ARGCOUNT;
nextarg = 1;
diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL);
if (diag)
return diag;
show_regs((struct pt_regs *)addr);
return 0;
}
#if defined(CONFIG_MODULES)
/*
* kdb_lsmod - This function implements the 'lsmod' command. Lists
* currently loaded kernel modules.
* Mostly taken from userland lsmod.
*/
static int kdb_lsmod(int argc, const char **argv)
{
struct module *mod;
if (argc != 0)
return KDB_ARGCOUNT;
kdb_printf("Module Size modstruct Used by\n");
list_for_each_entry(mod, kdb_modules, list) {
kdb_printf("%-20s%8u 0x%p ", mod->name,
mod->core_size, (void *)mod);
#ifdef CONFIG_MODULE_UNLOAD
kdb_printf("%4ld ", module_refcount(mod));
#endif
if (mod->state == MODULE_STATE_GOING)
kdb_printf(" (Unloading)");
else if (mod->state == MODULE_STATE_COMING)
kdb_printf(" (Loading)");
else
kdb_printf(" (Live)");
kdb_printf(" 0x%p", mod->module_core);
#ifdef CONFIG_MODULE_UNLOAD
{
struct module_use *use;
kdb_printf(" [ ");
list_for_each_entry(use, &mod->source_list,
source_list)
kdb_printf("%s ", use->target->name);
kdb_printf("]\n");
}
#endif
}
return 0;
}
#endif /* CONFIG_MODULES */
/*
* kdb_env - This function implements the 'env' command. Display the
* current environment variables.
*/
static int kdb_env(int argc, const char **argv)
{
int i;
for (i = 0; i < __nenv; i++) {
if (__env[i])
kdb_printf("%s\n", __env[i]);
}
if (KDB_DEBUG(MASK))
kdb_printf("KDBFLAGS=0x%x\n", kdb_flags);
return 0;
}
#ifdef CONFIG_PRINTK
/*
* kdb_dmesg - This function implements the 'dmesg' command to display
* the contents of the syslog buffer.
* dmesg [lines] [adjust]
*/
static int kdb_dmesg(int argc, const char **argv)
{
char *syslog_data[4], *start, *end, c = '\0', *p;
int diag, logging, logsize, lines = 0, adjust = 0, n;
if (argc > 2)
return KDB_ARGCOUNT;
if (argc) {
char *cp;
lines = simple_strtol(argv[1], &cp, 0);
if (*cp)
lines = 0;
if (argc > 1) {
adjust = simple_strtoul(argv[2], &cp, 0);
if (*cp || adjust < 0)
adjust = 0;
}
}
/* disable LOGGING if set */
diag = kdbgetintenv("LOGGING", &logging);
if (!diag && logging) {
const char *setargs[] = { "set", "LOGGING", "0" };
kdb_set(2, setargs);
}
/* syslog_data[0,1] physical start, end+1. syslog_data[2,3]
* logical start, end+1. */
kdb_syslog_data(syslog_data);
if (syslog_data[2] == syslog_data[3])
return 0;
logsize = syslog_data[1] - syslog_data[0];
start = syslog_data[2];
end = syslog_data[3];
#define KDB_WRAP(p) (((p - syslog_data[0]) % logsize) + syslog_data[0])
for (n = 0, p = start; p < end; ++p) {
c = *KDB_WRAP(p);
if (c == '\n')
++n;
}
if (c != '\n')
++n;
if (lines < 0) {
if (adjust >= n)
kdb_printf("buffer only contains %d lines, nothing "
"printed\n", n);
else if (adjust - lines >= n)
kdb_printf("buffer only contains %d lines, last %d "
"lines printed\n", n, n - adjust);
if (adjust) {
for (; start < end && adjust; ++start) {
if (*KDB_WRAP(start) == '\n')
--adjust;
}
if (start < end)
++start;
}
for (p = start; p < end && lines; ++p) {
if (*KDB_WRAP(p) == '\n')
++lines;
}
end = p;
} else if (lines > 0) {
int skip = n - (adjust + lines);
if (adjust >= n) {
kdb_printf("buffer only contains %d lines, "
"nothing printed\n", n);
skip = n;
} else if (skip < 0) {
lines += skip;
skip = 0;
kdb_printf("buffer only contains %d lines, first "
"%d lines printed\n", n, lines);
}
for (; start < end && skip; ++start) {
if (*KDB_WRAP(start) == '\n')
--skip;
}
for (p = start; p < end && lines; ++p) {
if (*KDB_WRAP(p) == '\n')
--lines;
}
end = p;
}
/* Do a line at a time (max 200 chars) to reduce protocol overhead */
c = '\n';
while (start != end) {
char buf[201];
p = buf;
if (KDB_FLAG(CMD_INTERRUPT))
return 0;
while (start < end && (c = *KDB_WRAP(start)) &&
(p - buf) < sizeof(buf)-1) {
++start;
*p++ = c;
if (c == '\n')
break;
}
*p = '\0';
kdb_printf("%s", buf);
}
if (c != '\n')
kdb_printf("\n");
return 0;
}
#endif /* CONFIG_PRINTK */
/*
* kdb_cpu - This function implements the 'cpu' command.
* cpu [<cpunum>]
* Returns:
* KDB_CMD_CPU for success, a kdb diagnostic if error
*/
static void kdb_cpu_status(void)
{
int i, start_cpu, first_print = 1;
char state, prev_state = '?';
kdb_printf("Currently on cpu %d\n", raw_smp_processor_id());
kdb_printf("Available cpus: ");
for (start_cpu = -1, i = 0; i < NR_CPUS; i++) {
if (!cpu_online(i)) {
state = 'F'; /* cpu is offline */
} else {
state = ' '; /* cpu is responding to kdb */
if (kdb_task_state_char(KDB_TSK(i)) == 'I')
state = 'I'; /* idle task */
}
if (state != prev_state) {
if (prev_state != '?') {
if (!first_print)
kdb_printf(", ");
first_print = 0;
kdb_printf("%d", start_cpu);
if (start_cpu < i-1)
kdb_printf("-%d", i-1);
if (prev_state != ' ')
kdb_printf("(%c)", prev_state);
}
prev_state = state;
start_cpu = i;
}
}
/* print the trailing cpus, ignoring them if they are all offline */
if (prev_state != 'F') {
if (!first_print)
kdb_printf(", ");
kdb_printf("%d", start_cpu);
if (start_cpu < i-1)
kdb_printf("-%d", i-1);
if (prev_state != ' ')
kdb_printf("(%c)", prev_state);
}
kdb_printf("\n");
}
static int kdb_cpu(int argc, const char **argv)
{
unsigned long cpunum;
int diag;
if (argc == 0) {
kdb_cpu_status();
return 0;
}
if (argc != 1)
return KDB_ARGCOUNT;
diag = kdbgetularg(argv[1], &cpunum);
if (diag)
return diag;
/*
* Validate cpunum
*/
if ((cpunum > NR_CPUS) || !cpu_online(cpunum))
return KDB_BADCPUNUM;
dbg_switch_cpu = cpunum;
/*
* Switch to other cpu
*/
return KDB_CMD_CPU;
}
/* The user may not realize that ps/bta with no parameters does not print idle
* or sleeping system daemon processes, so tell them how many were suppressed.
*/
void kdb_ps_suppressed(void)
{
int idle = 0, daemon = 0;
unsigned long mask_I = kdb_task_state_string("I"),
mask_M = kdb_task_state_string("M");
unsigned long cpu;
const struct task_struct *p, *g;
for_each_online_cpu(cpu) {
p = kdb_curr_task(cpu);
if (kdb_task_state(p, mask_I))
++idle;
}
kdb_do_each_thread(g, p) {
if (kdb_task_state(p, mask_M))
++daemon;
} kdb_while_each_thread(g, p);
if (idle || daemon) {
if (idle)
kdb_printf("%d idle process%s (state I)%s\n",
idle, idle == 1 ? "" : "es",
daemon ? " and " : "");
if (daemon)
kdb_printf("%d sleeping system daemon (state M) "
"process%s", daemon,
daemon == 1 ? "" : "es");
kdb_printf(" suppressed,\nuse 'ps A' to see all.\n");
}
}
/*
* kdb_ps - This function implements the 'ps' command which shows a
* list of the active processes.
* ps [DRSTCZEUIMA] All processes, optionally filtered by state
*/
void kdb_ps1(const struct task_struct *p)
{
int cpu;
unsigned long tmp;
if (!p || probe_kernel_read(&tmp, (char *)p, sizeof(unsigned long)))
return;
cpu = kdb_process_cpu(p);
kdb_printf("0x%p %8d %8d %d %4d %c 0x%p %c%s\n",
(void *)p, p->pid, p->parent->pid,
kdb_task_has_cpu(p), kdb_process_cpu(p),
kdb_task_state_char(p),
(void *)(&p->thread),
p == kdb_curr_task(raw_smp_processor_id()) ? '*' : ' ',
p->comm);
if (kdb_task_has_cpu(p)) {
if (!KDB_TSK(cpu)) {
kdb_printf(" Error: no saved data for this cpu\n");
} else {
if (KDB_TSK(cpu) != p)
kdb_printf(" Error: does not match running "
"process table (0x%p)\n", KDB_TSK(cpu));
}
}
}
static int kdb_ps(int argc, const char **argv)
{
struct task_struct *g, *p;
unsigned long mask, cpu;
if (argc == 0)
kdb_ps_suppressed();
kdb_printf("%-*s Pid Parent [*] cpu State %-*s Command\n",
(int)(2*sizeof(void *))+2, "Task Addr",
(int)(2*sizeof(void *))+2, "Thread");
mask = kdb_task_state_string(argc ? argv[1] : NULL);
/* Run the active tasks first */
for_each_online_cpu(cpu) {
if (KDB_FLAG(CMD_INTERRUPT))
return 0;
p = kdb_curr_task(cpu);
if (kdb_task_state(p, mask))
kdb_ps1(p);
}
kdb_printf("\n");
/* Now the real tasks */
kdb_do_each_thread(g, p) {
if (KDB_FLAG(CMD_INTERRUPT))
return 0;
if (kdb_task_state(p, mask))
kdb_ps1(p);
} kdb_while_each_thread(g, p);
return 0;
}
/*
* kdb_pid - This function implements the 'pid' command which switches
* the currently active process.
* pid [<pid> | R]
*/
static int kdb_pid(int argc, const char **argv)
{
struct task_struct *p;
unsigned long val;
int diag;
if (argc > 1)
return KDB_ARGCOUNT;
if (argc) {
if (strcmp(argv[1], "R") == 0) {
p = KDB_TSK(kdb_initial_cpu);
} else {
diag = kdbgetularg(argv[1], &val);
if (diag)
return KDB_BADINT;
p = find_task_by_pid_ns((pid_t)val, &init_pid_ns);
if (!p) {
kdb_printf("No task with pid=%d\n", (pid_t)val);
return 0;
}
}
kdb_set_current_task(p);
}
kdb_printf("KDB current process is %s(pid=%d)\n",
kdb_current_task->comm,
kdb_current_task->pid);
return 0;
}
/*
* kdb_ll - This function implements the 'll' command which follows a
* linked list and executes an arbitrary command for each
* element.
*/
static int kdb_ll(int argc, const char **argv)
{
int diag = 0;
unsigned long addr;
long offset = 0;
unsigned long va;
unsigned long linkoffset;
int nextarg;
const char *command;
if (argc != 3)
return KDB_ARGCOUNT;
nextarg = 1;
diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL);
if (diag)
return diag;
diag = kdbgetularg(argv[2], &linkoffset);
if (diag)
return diag;
/*
* Using the starting address as
* the first element in the list, and assuming that
* the list ends with a null pointer.
*/
va = addr;
command = kdb_strdup(argv[3], GFP_KDB);
if (!command) {
kdb_printf("%s: cannot duplicate command\n", __func__);
return 0;
}
/* Recursive use of kdb_parse, do not use argv after this point */
argv = NULL;
while (va) {
char buf[80];
if (KDB_FLAG(CMD_INTERRUPT))
goto out;
sprintf(buf, "%s " kdb_machreg_fmt "\n", command, va);
diag = kdb_parse(buf);
if (diag)
goto out;
addr = va + linkoffset;
if (kdb_getword(&va, addr, sizeof(va)))
goto out;
}
out:
kfree(command);
return diag;
}
static int kdb_kgdb(int argc, const char **argv)
{
return KDB_CMD_KGDB;
}
/*
* kdb_help - This function implements the 'help' and '?' commands.
*/
static int kdb_help(int argc, const char **argv)
{
kdbtab_t *kt;
int i;
kdb_printf("%-15.15s %-20.20s %s\n", "Command", "Usage", "Description");
kdb_printf("-----------------------------"
"-----------------------------\n");
for_each_kdbcmd(kt, i) {
if (kt->cmd_name)
kdb_printf("%-15.15s %-20.20s %s\n", kt->cmd_name,
kt->cmd_usage, kt->cmd_help);
if (KDB_FLAG(CMD_INTERRUPT))
return 0;
}
return 0;
}
/*
* kdb_kill - This function implements the 'kill' commands.
*/
static int kdb_kill(int argc, const char **argv)
{
long sig, pid;
char *endp;
struct task_struct *p;
struct siginfo info;
if (argc != 2)
return KDB_ARGCOUNT;
sig = simple_strtol(argv[1], &endp, 0);
if (*endp)
return KDB_BADINT;
if (sig >= 0) {
kdb_printf("Invalid signal parameter.<-signal>\n");
return 0;
}
sig = -sig;
pid = simple_strtol(argv[2], &endp, 0);
if (*endp)
return KDB_BADINT;
if (pid <= 0) {
kdb_printf("Process ID must be large than 0.\n");
return 0;
}
/* Find the process. */
p = find_task_by_pid_ns(pid, &init_pid_ns);
if (!p) {
kdb_printf("The specified process isn't found.\n");
return 0;
}
p = p->group_leader;
info.si_signo = sig;
info.si_errno = 0;
info.si_code = SI_USER;
info.si_pid = pid; /* same capabilities as process being signalled */
info.si_uid = 0; /* kdb has root authority */
kdb_send_sig_info(p, &info);
return 0;
}
struct kdb_tm {
int tm_sec; /* seconds */
int tm_min; /* minutes */
int tm_hour; /* hours */
int tm_mday; /* day of the month */
int tm_mon; /* month */
int tm_year; /* year */
};
static void kdb_gmtime(struct timespec *tv, struct kdb_tm *tm)
{
/* This will work from 1970-2099, 2100 is not a leap year */
static int mon_day[] = { 31, 29, 31, 30, 31, 30, 31,
31, 30, 31, 30, 31 };
memset(tm, 0, sizeof(*tm));
tm->tm_sec = tv->tv_sec % (24 * 60 * 60);
tm->tm_mday = tv->tv_sec / (24 * 60 * 60) +
(2 * 365 + 1); /* shift base from 1970 to 1968 */
tm->tm_min = tm->tm_sec / 60 % 60;
tm->tm_hour = tm->tm_sec / 60 / 60;
tm->tm_sec = tm->tm_sec % 60;
tm->tm_year = 68 + 4*(tm->tm_mday / (4*365+1));
tm->tm_mday %= (4*365+1);
mon_day[1] = 29;
while (tm->tm_mday >= mon_day[tm->tm_mon]) {
tm->tm_mday -= mon_day[tm->tm_mon];
if (++tm->tm_mon == 12) {
tm->tm_mon = 0;
++tm->tm_year;
mon_day[1] = 28;
}
}
++tm->tm_mday;
}
/*
* Most of this code has been lifted from kernel/timer.c::sys_sysinfo().
* I cannot call that code directly from kdb, it has an unconditional
* cli()/sti() and calls routines that take locks which can stop the debugger.
*/
static void kdb_sysinfo(struct sysinfo *val)
{
struct timespec uptime;
do_posix_clock_monotonic_gettime(&uptime);
memset(val, 0, sizeof(*val));
val->uptime = uptime.tv_sec;
val->loads[0] = avenrun[0];
val->loads[1] = avenrun[1];
val->loads[2] = avenrun[2];
val->procs = nr_threads-1;
si_meminfo(val);
return;
}
/*
* kdb_summary - This function implements the 'summary' command.
*/
static int kdb_summary(int argc, const char **argv)
{
struct timespec now;
struct kdb_tm tm;
struct sysinfo val;
if (argc)
return KDB_ARGCOUNT;
kdb_printf("sysname %s\n", init_uts_ns.name.sysname);
kdb_printf("release %s\n", init_uts_ns.name.release);
kdb_printf("version %s\n", init_uts_ns.name.version);
kdb_printf("machine %s\n", init_uts_ns.name.machine);
kdb_printf("nodename %s\n", init_uts_ns.name.nodename);
kdb_printf("domainname %s\n", init_uts_ns.name.domainname);
kdb_printf("ccversion %s\n", __stringify(CCVERSION));
now = __current_kernel_time();
kdb_gmtime(&now, &tm);
kdb_printf("date %04d-%02d-%02d %02d:%02d:%02d "
"tz_minuteswest %d\n",
1900+tm.tm_year, tm.tm_mon+1, tm.tm_mday,
tm.tm_hour, tm.tm_min, tm.tm_sec,
sys_tz.tz_minuteswest);
kdb_sysinfo(&val);
kdb_printf("uptime ");
if (val.uptime > (24*60*60)) {
int days = val.uptime / (24*60*60);
val.uptime %= (24*60*60);
kdb_printf("%d day%s ", days, days == 1 ? "" : "s");
}
kdb_printf("%02ld:%02ld\n", val.uptime/(60*60), (val.uptime/60)%60);
/* lifted from fs/proc/proc_misc.c::loadavg_read_proc() */
#define LOAD_INT(x) ((x) >> FSHIFT)
#define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100)
kdb_printf("load avg %ld.%02ld %ld.%02ld %ld.%02ld\n",
LOAD_INT(val.loads[0]), LOAD_FRAC(val.loads[0]),
LOAD_INT(val.loads[1]), LOAD_FRAC(val.loads[1]),
LOAD_INT(val.loads[2]), LOAD_FRAC(val.loads[2]));
#undef LOAD_INT
#undef LOAD_FRAC
/* Display in kilobytes */
#define K(x) ((x) << (PAGE_SHIFT - 10))
kdb_printf("\nMemTotal: %8lu kB\nMemFree: %8lu kB\n"
"Buffers: %8lu kB\n",
val.totalram, val.freeram, val.bufferram);
return 0;
}
/*
* kdb_per_cpu - This function implements the 'per_cpu' command.
*/
static int kdb_per_cpu(int argc, const char **argv)
{
char fmtstr[64];
int cpu, diag, nextarg = 1;
unsigned long addr, symaddr, val, bytesperword = 0, whichcpu = ~0UL;
if (argc < 1 || argc > 3)
return KDB_ARGCOUNT;
diag = kdbgetaddrarg(argc, argv, &nextarg, &symaddr, NULL, NULL);
if (diag)
return diag;
if (argc >= 2) {
diag = kdbgetularg(argv[2], &bytesperword);
if (diag)
return diag;
}
if (!bytesperword)
bytesperword = KDB_WORD_SIZE;
else if (bytesperword > KDB_WORD_SIZE)
return KDB_BADWIDTH;
sprintf(fmtstr, "%%0%dlx ", (int)(2*bytesperword));
if (argc >= 3) {
diag = kdbgetularg(argv[3], &whichcpu);
if (diag)
return diag;
if (!cpu_online(whichcpu)) {
kdb_printf("cpu %ld is not online\n", whichcpu);
return KDB_BADCPUNUM;
}
}
/* Most architectures use __per_cpu_offset[cpu], some use
* __per_cpu_offset(cpu), smp has no __per_cpu_offset.
*/
#ifdef __per_cpu_offset
#define KDB_PCU(cpu) __per_cpu_offset(cpu)
#else
#ifdef CONFIG_SMP
#define KDB_PCU(cpu) __per_cpu_offset[cpu]
#else
#define KDB_PCU(cpu) 0
#endif
#endif
for_each_online_cpu(cpu) {
if (KDB_FLAG(CMD_INTERRUPT))
return 0;
if (whichcpu != ~0UL && whichcpu != cpu)
continue;
addr = symaddr + KDB_PCU(cpu);
diag = kdb_getword(&val, addr, bytesperword);
if (diag) {
kdb_printf("%5d " kdb_bfd_vma_fmt0 " - unable to "
"read, diag=%d\n", cpu, addr, diag);
continue;
}
kdb_printf("%5d ", cpu);
kdb_md_line(fmtstr, addr,
bytesperword == KDB_WORD_SIZE,
1, bytesperword, 1, 1, 0);
}
#undef KDB_PCU
return 0;
}
/*
* display help for the use of cmd | grep pattern
*/
static int kdb_grep_help(int argc, const char **argv)
{
kdb_printf("Usage of cmd args | grep pattern:\n");
kdb_printf(" Any command's output may be filtered through an ");
kdb_printf("emulated 'pipe'.\n");
kdb_printf(" 'grep' is just a key word.\n");
kdb_printf(" The pattern may include a very limited set of "
"metacharacters:\n");
kdb_printf(" pattern or ^pattern or pattern$ or ^pattern$\n");
kdb_printf(" And if there are spaces in the pattern, you may "
"quote it:\n");
kdb_printf(" \"pat tern\" or \"^pat tern\" or \"pat tern$\""
" or \"^pat tern$\"\n");
return 0;
}
/*
* kdb_register_repeat - This function is used to register a kernel
* debugger command.
* Inputs:
* cmd Command name
* func Function to execute the command
* usage A simple usage string showing arguments
* help A simple help string describing command
* repeat Does the command auto repeat on enter?
* Returns:
* zero for success, one if a duplicate command.
*/
#define kdb_command_extend 50 /* arbitrary */
int kdb_register_repeat(char *cmd,
kdb_func_t func,
char *usage,
char *help,
short minlen,
kdb_repeat_t repeat)
{
int i;
kdbtab_t *kp;
/*
* Brute force method to determine duplicates
*/
for_each_kdbcmd(kp, i) {
if (kp->cmd_name && (strcmp(kp->cmd_name, cmd) == 0)) {
kdb_printf("Duplicate kdb command registered: "
"%s, func %p help %s\n", cmd, func, help);
return 1;
}
}
/*
* Insert command into first available location in table
*/
for_each_kdbcmd(kp, i) {
if (kp->cmd_name == NULL)
break;
}
if (i >= kdb_max_commands) {
kdbtab_t *new = kmalloc((kdb_max_commands - KDB_BASE_CMD_MAX +
kdb_command_extend) * sizeof(*new), GFP_KDB);
if (!new) {
kdb_printf("Could not allocate new kdb_command "
"table\n");
return 1;
}
if (kdb_commands) {
memcpy(new, kdb_commands,
(kdb_max_commands - KDB_BASE_CMD_MAX) * sizeof(*new));
kfree(kdb_commands);
}
memset(new + kdb_max_commands, 0,
kdb_command_extend * sizeof(*new));
kdb_commands = new;
kp = kdb_commands + kdb_max_commands - KDB_BASE_CMD_MAX;
kdb_max_commands += kdb_command_extend;
}
kp->cmd_name = cmd;
kp->cmd_func = func;
kp->cmd_usage = usage;
kp->cmd_help = help;
kp->cmd_flags = 0;
kp->cmd_minlen = minlen;
kp->cmd_repeat = repeat;
return 0;
}
EXPORT_SYMBOL_GPL(kdb_register_repeat);
/*
* kdb_register - Compatibility register function for commands that do
* not need to specify a repeat state. Equivalent to
* kdb_register_repeat with KDB_REPEAT_NONE.
* Inputs:
* cmd Command name
* func Function to execute the command
* usage A simple usage string showing arguments
* help A simple help string describing command
* Returns:
* zero for success, one if a duplicate command.
*/
int kdb_register(char *cmd,
kdb_func_t func,
char *usage,
char *help,
short minlen)
{
return kdb_register_repeat(cmd, func, usage, help, minlen,
KDB_REPEAT_NONE);
}
EXPORT_SYMBOL_GPL(kdb_register);
/*
* kdb_unregister - This function is used to unregister a kernel
* debugger command. It is generally called when a module which
* implements kdb commands is unloaded.
* Inputs:
* cmd Command name
* Returns:
* zero for success, one command not registered.
*/
int kdb_unregister(char *cmd)
{
int i;
kdbtab_t *kp;
/*
* find the command.
*/
for_each_kdbcmd(kp, i) {
if (kp->cmd_name && (strcmp(kp->cmd_name, cmd) == 0)) {
kp->cmd_name = NULL;
return 0;
}
}
/* Couldn't find it. */
return 1;
}
EXPORT_SYMBOL_GPL(kdb_unregister);
/* Initialize the kdb command table. */
static void __init kdb_inittab(void)
{
int i;
kdbtab_t *kp;
for_each_kdbcmd(kp, i)
kp->cmd_name = NULL;
kdb_register_repeat("md", kdb_md, "<vaddr>",
"Display Memory Contents, also mdWcN, e.g. md8c1", 1,
KDB_REPEAT_NO_ARGS);
kdb_register_repeat("mdr", kdb_md, "<vaddr> <bytes>",
"Display Raw Memory", 0, KDB_REPEAT_NO_ARGS);
kdb_register_repeat("mdp", kdb_md, "<paddr> <bytes>",
"Display Physical Memory", 0, KDB_REPEAT_NO_ARGS);
kdb_register_repeat("mds", kdb_md, "<vaddr>",
"Display Memory Symbolically", 0, KDB_REPEAT_NO_ARGS);
kdb_register_repeat("mm", kdb_mm, "<vaddr> <contents>",
"Modify Memory Contents", 0, KDB_REPEAT_NO_ARGS);
kdb_register_repeat("go", kdb_go, "[<vaddr>]",
"Continue Execution", 1, KDB_REPEAT_NONE);
kdb_register_repeat("rd", kdb_rd, "",
"Display Registers", 0, KDB_REPEAT_NONE);
kdb_register_repeat("rm", kdb_rm, "<reg> <contents>",
"Modify Registers", 0, KDB_REPEAT_NONE);
kdb_register_repeat("ef", kdb_ef, "<vaddr>",
"Display exception frame", 0, KDB_REPEAT_NONE);
kdb_register_repeat("bt", kdb_bt, "[<vaddr>]",
"Stack traceback", 1, KDB_REPEAT_NONE);
kdb_register_repeat("btp", kdb_bt, "<pid>",
"Display stack for process <pid>", 0, KDB_REPEAT_NONE);
kdb_register_repeat("bta", kdb_bt, "[DRSTCZEUIMA]",
"Display stack all processes", 0, KDB_REPEAT_NONE);
kdb_register_repeat("btc", kdb_bt, "",
"Backtrace current process on each cpu", 0, KDB_REPEAT_NONE);
kdb_register_repeat("btt", kdb_bt, "<vaddr>",
"Backtrace process given its struct task address", 0,
KDB_REPEAT_NONE);
kdb_register_repeat("ll", kdb_ll, "<first-element> <linkoffset> <cmd>",
"Execute cmd for each element in linked list", 0, KDB_REPEAT_NONE);
kdb_register_repeat("env", kdb_env, "",
"Show environment variables", 0, KDB_REPEAT_NONE);
kdb_register_repeat("set", kdb_set, "",
"Set environment variables", 0, KDB_REPEAT_NONE);
kdb_register_repeat("help", kdb_help, "",
"Display Help Message", 1, KDB_REPEAT_NONE);
kdb_register_repeat("?", kdb_help, "",
"Display Help Message", 0, KDB_REPEAT_NONE);
kdb_register_repeat("cpu", kdb_cpu, "<cpunum>",
"Switch to new cpu", 0, KDB_REPEAT_NONE);
kdb_register_repeat("kgdb", kdb_kgdb, "",
"Enter kgdb mode", 0, KDB_REPEAT_NONE);
kdb_register_repeat("ps", kdb_ps, "[<flags>|A]",
"Display active task list", 0, KDB_REPEAT_NONE);
kdb_register_repeat("pid", kdb_pid, "<pidnum>",
"Switch to another task", 0, KDB_REPEAT_NONE);
kdb_register_repeat("reboot", kdb_reboot, "",
"Reboot the machine immediately", 0, KDB_REPEAT_NONE);
#if defined(CONFIG_MODULES)
kdb_register_repeat("lsmod", kdb_lsmod, "",
"List loaded kernel modules", 0, KDB_REPEAT_NONE);
#endif
#if defined(CONFIG_MAGIC_SYSRQ)
kdb_register_repeat("sr", kdb_sr, "<key>",
"Magic SysRq key", 0, KDB_REPEAT_NONE);
#endif
#if defined(CONFIG_PRINTK)
kdb_register_repeat("dmesg", kdb_dmesg, "[lines]",
"Display syslog buffer", 0, KDB_REPEAT_NONE);
#endif
kdb_register_repeat("defcmd", kdb_defcmd, "name \"usage\" \"help\"",
"Define a set of commands, down to endefcmd", 0, KDB_REPEAT_NONE);
kdb_register_repeat("kill", kdb_kill, "<-signal> <pid>",
"Send a signal to a process", 0, KDB_REPEAT_NONE);
kdb_register_repeat("summary", kdb_summary, "",
"Summarize the system", 4, KDB_REPEAT_NONE);
kdb_register_repeat("per_cpu", kdb_per_cpu, "<sym> [<bytes>] [<cpu>]",
"Display per_cpu variables", 3, KDB_REPEAT_NONE);
kdb_register_repeat("grephelp", kdb_grep_help, "",
"Display help on | grep", 0, KDB_REPEAT_NONE);
}
/* Execute any commands defined in kdb_cmds. */
static void __init kdb_cmd_init(void)
{
int i, diag;
for (i = 0; kdb_cmds[i]; ++i) {
diag = kdb_parse(kdb_cmds[i]);
if (diag)
kdb_printf("kdb command %s failed, kdb diag %d\n",
kdb_cmds[i], diag);
}
if (defcmd_in_progress) {
kdb_printf("Incomplete 'defcmd' set, forcing endefcmd\n");
kdb_parse("endefcmd");
}
}
/* Initialize kdb_printf, breakpoint tables and kdb state */
void __init kdb_init(int lvl)
{
static int kdb_init_lvl = KDB_NOT_INITIALIZED;
int i;
if (kdb_init_lvl == KDB_INIT_FULL || lvl <= kdb_init_lvl)
return;
for (i = kdb_init_lvl; i < lvl; i++) {
switch (i) {
case KDB_NOT_INITIALIZED:
kdb_inittab(); /* Initialize Command Table */
kdb_initbptab(); /* Initialize Breakpoints */
break;
case KDB_INIT_EARLY:
kdb_cmd_init(); /* Build kdb_cmds tables */
break;
}
}
kdb_init_lvl = lvl;
}
| gpl-2.0 |
kaber/netlink-mmap | sound/soc/blackfin/bf5xx-tdm-pcm.c | 5076 | 9211 | /*
* File: sound/soc/blackfin/bf5xx-tdm-pcm.c
* Author: Barry Song <Barry.Song@analog.com>
*
* Created: Tue June 06 2009
* Description: DMA driver for tdm codec
*
* Modified:
* Copyright 2009 Analog Devices Inc.
*
* Bugs: Enter bugs at http://blackfin.uclinux.org/
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see the file COPYING, or write
* to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/gfp.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/soc.h>
#include <asm/dma.h>
#include "bf5xx-tdm-pcm.h"
#include "bf5xx-tdm.h"
#include "bf5xx-sport.h"
#define PCM_BUFFER_MAX 0x8000
#define FRAGMENT_SIZE_MIN (4*1024)
#define FRAGMENTS_MIN 2
#define FRAGMENTS_MAX 32
static void bf5xx_dma_irq(void *data)
{
struct snd_pcm_substream *pcm = data;
snd_pcm_period_elapsed(pcm);
}
static const struct snd_pcm_hardware bf5xx_pcm_hardware = {
.info = (SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER |
SNDRV_PCM_INFO_RESUME),
.formats = SNDRV_PCM_FMTBIT_S32_LE,
.rates = SNDRV_PCM_RATE_48000,
.channels_min = 2,
.channels_max = 8,
.buffer_bytes_max = PCM_BUFFER_MAX,
.period_bytes_min = FRAGMENT_SIZE_MIN,
.period_bytes_max = PCM_BUFFER_MAX/2,
.periods_min = FRAGMENTS_MIN,
.periods_max = FRAGMENTS_MAX,
};
static int bf5xx_pcm_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
size_t size = bf5xx_pcm_hardware.buffer_bytes_max;
snd_pcm_lib_malloc_pages(substream, size * 4);
return 0;
}
static int bf5xx_pcm_hw_free(struct snd_pcm_substream *substream)
{
snd_pcm_lib_free_pages(substream);
return 0;
}
static int bf5xx_pcm_prepare(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct sport_device *sport = runtime->private_data;
int fragsize_bytes = frames_to_bytes(runtime, runtime->period_size);
fragsize_bytes /= runtime->channels;
/* inflate the fragsize to match the dma width of SPORT */
fragsize_bytes *= 8;
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
sport_set_tx_callback(sport, bf5xx_dma_irq, substream);
sport_config_tx_dma(sport, runtime->dma_area,
runtime->periods, fragsize_bytes);
} else {
sport_set_rx_callback(sport, bf5xx_dma_irq, substream);
sport_config_rx_dma(sport, runtime->dma_area,
runtime->periods, fragsize_bytes);
}
return 0;
}
static int bf5xx_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct sport_device *sport = runtime->private_data;
int ret = 0;
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
sport_tx_start(sport);
else
sport_rx_start(sport);
break;
case SNDRV_PCM_TRIGGER_STOP:
case SNDRV_PCM_TRIGGER_SUSPEND:
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
sport_tx_stop(sport);
else
sport_rx_stop(sport);
break;
default:
ret = -EINVAL;
}
return ret;
}
static snd_pcm_uframes_t bf5xx_pcm_pointer(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct sport_device *sport = runtime->private_data;
unsigned int diff;
snd_pcm_uframes_t frames;
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
diff = sport_curr_offset_tx(sport);
frames = diff / (8*4); /* 32 bytes per frame */
} else {
diff = sport_curr_offset_rx(sport);
frames = diff / (8*4);
}
return frames;
}
static int bf5xx_pcm_open(struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
struct sport_device *sport_handle = snd_soc_dai_get_drvdata(cpu_dai);
struct snd_pcm_runtime *runtime = substream->runtime;
struct snd_dma_buffer *buf = &substream->dma_buffer;
int ret = 0;
snd_soc_set_runtime_hwparams(substream, &bf5xx_pcm_hardware);
ret = snd_pcm_hw_constraint_integer(runtime,
SNDRV_PCM_HW_PARAM_PERIODS);
if (ret < 0)
goto out;
if (sport_handle != NULL) {
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
sport_handle->tx_buf = buf->area;
else
sport_handle->rx_buf = buf->area;
runtime->private_data = sport_handle;
} else {
pr_err("sport_handle is NULL\n");
ret = -ENODEV;
}
out:
return ret;
}
static int bf5xx_pcm_copy(struct snd_pcm_substream *substream, int channel,
snd_pcm_uframes_t pos, void *buf, snd_pcm_uframes_t count)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct sport_device *sport = runtime->private_data;
struct bf5xx_tdm_port *tdm_port = sport->private_data;
unsigned int *src;
unsigned int *dst;
int i;
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
src = buf;
dst = (unsigned int *)substream->runtime->dma_area;
dst += pos * 8;
while (count--) {
for (i = 0; i < substream->runtime->channels; i++)
*(dst + tdm_port->tx_map[i]) = *src++;
dst += 8;
}
} else {
src = (unsigned int *)substream->runtime->dma_area;
dst = buf;
src += pos * 8;
while (count--) {
for (i = 0; i < substream->runtime->channels; i++)
*dst++ = *(src + tdm_port->rx_map[i]);
src += 8;
}
}
return 0;
}
static int bf5xx_pcm_silence(struct snd_pcm_substream *substream,
int channel, snd_pcm_uframes_t pos, snd_pcm_uframes_t count)
{
unsigned char *buf = substream->runtime->dma_area;
buf += pos * 8 * 4;
memset(buf, '\0', count * 8 * 4);
return 0;
}
struct snd_pcm_ops bf5xx_pcm_tdm_ops = {
.open = bf5xx_pcm_open,
.ioctl = snd_pcm_lib_ioctl,
.hw_params = bf5xx_pcm_hw_params,
.hw_free = bf5xx_pcm_hw_free,
.prepare = bf5xx_pcm_prepare,
.trigger = bf5xx_pcm_trigger,
.pointer = bf5xx_pcm_pointer,
.copy = bf5xx_pcm_copy,
.silence = bf5xx_pcm_silence,
};
static int bf5xx_pcm_preallocate_dma_buffer(struct snd_pcm *pcm, int stream)
{
struct snd_pcm_substream *substream = pcm->streams[stream].substream;
struct snd_dma_buffer *buf = &substream->dma_buffer;
size_t size = bf5xx_pcm_hardware.buffer_bytes_max;
buf->dev.type = SNDRV_DMA_TYPE_DEV;
buf->dev.dev = pcm->card->dev;
buf->private_data = NULL;
buf->area = dma_alloc_coherent(pcm->card->dev, size * 4,
&buf->addr, GFP_KERNEL);
if (!buf->area) {
pr_err("Failed to allocate dma memory - Please increase uncached DMA memory region\n");
return -ENOMEM;
}
buf->bytes = size;
return 0;
}
static void bf5xx_pcm_free_dma_buffers(struct snd_pcm *pcm)
{
struct snd_pcm_substream *substream;
struct snd_dma_buffer *buf;
int stream;
for (stream = 0; stream < 2; stream++) {
substream = pcm->streams[stream].substream;
if (!substream)
continue;
buf = &substream->dma_buffer;
if (!buf->area)
continue;
dma_free_coherent(NULL, buf->bytes, buf->area, 0);
buf->area = NULL;
}
}
static u64 bf5xx_pcm_dmamask = DMA_BIT_MASK(32);
static int bf5xx_pcm_tdm_new(struct snd_soc_pcm_runtime *rtd)
{
struct snd_card *card = rtd->card->snd_card;
struct snd_pcm *pcm = rtd->pcm;
int ret = 0;
if (!card->dev->dma_mask)
card->dev->dma_mask = &bf5xx_pcm_dmamask;
if (!card->dev->coherent_dma_mask)
card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
ret = bf5xx_pcm_preallocate_dma_buffer(pcm,
SNDRV_PCM_STREAM_PLAYBACK);
if (ret)
goto out;
}
if (pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream) {
ret = bf5xx_pcm_preallocate_dma_buffer(pcm,
SNDRV_PCM_STREAM_CAPTURE);
if (ret)
goto out;
}
out:
return ret;
}
static struct snd_soc_platform_driver bf5xx_tdm_soc_platform = {
.ops = &bf5xx_pcm_tdm_ops,
.pcm_new = bf5xx_pcm_tdm_new,
.pcm_free = bf5xx_pcm_free_dma_buffers,
};
static int __devinit bf5xx_soc_platform_probe(struct platform_device *pdev)
{
return snd_soc_register_platform(&pdev->dev, &bf5xx_tdm_soc_platform);
}
static int __devexit bf5xx_soc_platform_remove(struct platform_device *pdev)
{
snd_soc_unregister_platform(&pdev->dev);
return 0;
}
static struct platform_driver bfin_tdm_driver = {
.driver = {
.name = "bfin-tdm-pcm-audio",
.owner = THIS_MODULE,
},
.probe = bf5xx_soc_platform_probe,
.remove = __devexit_p(bf5xx_soc_platform_remove),
};
module_platform_driver(bfin_tdm_driver);
MODULE_AUTHOR("Barry Song");
MODULE_DESCRIPTION("ADI Blackfin TDM PCM DMA module");
MODULE_LICENSE("GPL");
| gpl-2.0 |
omega-roms/I9505G_Stock_Kernel_JB_4.3 | sound/pci/asihpi/hpioctl.c | 5076 | 12505 | /*******************************************************************************
AudioScience HPI driver
Copyright (C) 1997-2011 AudioScience Inc. <support@audioscience.com>
This program is free software; you can redistribute it and/or modify
it under the terms of version 2 of the GNU General Public License as
published by the Free Software Foundation;
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
Common Linux HPI ioctl and module probe/remove functions
*******************************************************************************/
#define SOURCEFILE_NAME "hpioctl.c"
#include "hpi_internal.h"
#include "hpi_version.h"
#include "hpimsginit.h"
#include "hpidebug.h"
#include "hpimsgx.h"
#include "hpioctl.h"
#include "hpicmn.h"
#include <linux/fs.h>
#include <linux/slab.h>
#include <linux/moduleparam.h>
#include <asm/uaccess.h>
#include <linux/pci.h>
#include <linux/stringify.h>
#include <linux/module.h>
#ifdef MODULE_FIRMWARE
MODULE_FIRMWARE("asihpi/dsp5000.bin");
MODULE_FIRMWARE("asihpi/dsp6200.bin");
MODULE_FIRMWARE("asihpi/dsp6205.bin");
MODULE_FIRMWARE("asihpi/dsp6400.bin");
MODULE_FIRMWARE("asihpi/dsp6600.bin");
MODULE_FIRMWARE("asihpi/dsp8700.bin");
MODULE_FIRMWARE("asihpi/dsp8900.bin");
#endif
static int prealloc_stream_buf;
module_param(prealloc_stream_buf, int, S_IRUGO);
MODULE_PARM_DESC(prealloc_stream_buf,
"Preallocate size for per-adapter stream buffer");
/* Allow the debug level to be changed after module load.
E.g. echo 2 > /sys/module/asihpi/parameters/hpiDebugLevel
*/
module_param(hpi_debug_level, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(hpi_debug_level, "debug verbosity 0..5");
/* List of adapters found */
static struct hpi_adapter adapters[HPI_MAX_ADAPTERS];
/* Wrapper function to HPI_Message to enable dumping of the
message and response types.
*/
static void hpi_send_recv_f(struct hpi_message *phm, struct hpi_response *phr,
struct file *file)
{
if ((phm->adapter_index >= HPI_MAX_ADAPTERS)
&& (phm->object != HPI_OBJ_SUBSYSTEM))
phr->error = HPI_ERROR_INVALID_OBJ_INDEX;
else
hpi_send_recv_ex(phm, phr, file);
}
/* This is called from hpifunc.c functions, called by ALSA
* (or other kernel process) In this case there is no file descriptor
* available for the message cache code
*/
void hpi_send_recv(struct hpi_message *phm, struct hpi_response *phr)
{
hpi_send_recv_f(phm, phr, HOWNER_KERNEL);
}
EXPORT_SYMBOL(hpi_send_recv);
/* for radio-asihpi */
int asihpi_hpi_release(struct file *file)
{
struct hpi_message hm;
struct hpi_response hr;
/* HPI_DEBUG_LOG(INFO,"hpi_release file %p, pid %d\n", file, current->pid); */
/* close the subsystem just in case the application forgot to. */
hpi_init_message_response(&hm, &hr, HPI_OBJ_SUBSYSTEM,
HPI_SUBSYS_CLOSE);
hpi_send_recv_ex(&hm, &hr, file);
return 0;
}
long asihpi_hpi_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct hpi_ioctl_linux __user *phpi_ioctl_data;
void __user *puhm;
void __user *puhr;
union hpi_message_buffer_v1 *hm;
union hpi_response_buffer_v1 *hr;
u16 res_max_size;
u32 uncopied_bytes;
int err = 0;
if (cmd != HPI_IOCTL_LINUX)
return -EINVAL;
hm = kmalloc(sizeof(*hm), GFP_KERNEL);
hr = kmalloc(sizeof(*hr), GFP_KERNEL);
if (!hm || !hr) {
err = -ENOMEM;
goto out;
}
phpi_ioctl_data = (struct hpi_ioctl_linux __user *)arg;
/* Read the message and response pointers from user space. */
if (get_user(puhm, &phpi_ioctl_data->phm)
|| get_user(puhr, &phpi_ioctl_data->phr)) {
err = -EFAULT;
goto out;
}
/* Now read the message size and data from user space. */
if (get_user(hm->h.size, (u16 __user *)puhm)) {
err = -EFAULT;
goto out;
}
if (hm->h.size > sizeof(*hm))
hm->h.size = sizeof(*hm);
/* printk(KERN_INFO "message size %d\n", hm->h.wSize); */
uncopied_bytes = copy_from_user(hm, puhm, hm->h.size);
if (uncopied_bytes) {
HPI_DEBUG_LOG(ERROR, "uncopied bytes %d\n", uncopied_bytes);
err = -EFAULT;
goto out;
}
if (get_user(res_max_size, (u16 __user *)puhr)) {
err = -EFAULT;
goto out;
}
/* printk(KERN_INFO "user response size %d\n", res_max_size); */
if (res_max_size < sizeof(struct hpi_response_header)) {
HPI_DEBUG_LOG(WARNING, "small res size %d\n", res_max_size);
err = -EFAULT;
goto out;
}
switch (hm->h.function) {
case HPI_SUBSYS_CREATE_ADAPTER:
case HPI_ADAPTER_DELETE:
/* Application must not use these functions! */
hr->h.size = sizeof(hr->h);
hr->h.error = HPI_ERROR_INVALID_OPERATION;
hr->h.function = hm->h.function;
uncopied_bytes = copy_to_user(puhr, hr, hr->h.size);
if (uncopied_bytes)
err = -EFAULT;
else
err = 0;
goto out;
}
hr->h.size = res_max_size;
if (hm->h.object == HPI_OBJ_SUBSYSTEM) {
hpi_send_recv_f(&hm->m0, &hr->r0, file);
} else {
u16 __user *ptr = NULL;
u32 size = 0;
/* -1=no data 0=read from user mem, 1=write to user mem */
int wrflag = -1;
struct hpi_adapter *pa = NULL;
if (hm->h.adapter_index < ARRAY_SIZE(adapters))
pa = &adapters[hm->h.adapter_index];
if (!pa || !pa->adapter || !pa->adapter->type) {
hpi_init_response(&hr->r0, hm->h.object,
hm->h.function, HPI_ERROR_BAD_ADAPTER_NUMBER);
uncopied_bytes =
copy_to_user(puhr, hr, sizeof(hr->h));
if (uncopied_bytes)
err = -EFAULT;
else
err = 0;
goto out;
}
if (mutex_lock_interruptible(&pa->mutex)) {
err = -EINTR;
goto out;
}
/* Dig out any pointers embedded in the message. */
switch (hm->h.function) {
case HPI_OSTREAM_WRITE:
case HPI_ISTREAM_READ:{
/* Yes, sparse, this is correct. */
ptr = (u16 __user *)hm->m0.u.d.u.data.pb_data;
size = hm->m0.u.d.u.data.data_size;
/* Allocate buffer according to application request.
?Is it better to alloc/free for the duration
of the transaction?
*/
if (pa->buffer_size < size) {
HPI_DEBUG_LOG(DEBUG,
"Realloc adapter %d stream "
"buffer from %zd to %d\n",
hm->h.adapter_index,
pa->buffer_size, size);
if (pa->p_buffer) {
pa->buffer_size = 0;
vfree(pa->p_buffer);
}
pa->p_buffer = vmalloc(size);
if (pa->p_buffer)
pa->buffer_size = size;
else {
HPI_DEBUG_LOG(ERROR,
"HPI could not allocate "
"stream buffer size %d\n",
size);
mutex_unlock(&pa->mutex);
err = -EINVAL;
goto out;
}
}
hm->m0.u.d.u.data.pb_data = pa->p_buffer;
if (hm->h.function == HPI_ISTREAM_READ)
/* from card, WRITE to user mem */
wrflag = 1;
else
wrflag = 0;
break;
}
default:
size = 0;
break;
}
if (size && (wrflag == 0)) {
uncopied_bytes =
copy_from_user(pa->p_buffer, ptr, size);
if (uncopied_bytes)
HPI_DEBUG_LOG(WARNING,
"Missed %d of %d "
"bytes from user\n", uncopied_bytes,
size);
}
hpi_send_recv_f(&hm->m0, &hr->r0, file);
if (size && (wrflag == 1)) {
uncopied_bytes =
copy_to_user(ptr, pa->p_buffer, size);
if (uncopied_bytes)
HPI_DEBUG_LOG(WARNING,
"Missed %d of %d " "bytes to user\n",
uncopied_bytes, size);
}
mutex_unlock(&pa->mutex);
}
/* on return response size must be set */
/*printk(KERN_INFO "response size %d\n", hr->h.wSize); */
if (!hr->h.size) {
HPI_DEBUG_LOG(ERROR, "response zero size\n");
err = -EFAULT;
goto out;
}
if (hr->h.size > res_max_size) {
HPI_DEBUG_LOG(ERROR, "response too big %d %d\n", hr->h.size,
res_max_size);
hr->h.error = HPI_ERROR_RESPONSE_BUFFER_TOO_SMALL;
hr->h.specific_error = hr->h.size;
hr->h.size = sizeof(hr->h);
}
uncopied_bytes = copy_to_user(puhr, hr, hr->h.size);
if (uncopied_bytes) {
HPI_DEBUG_LOG(ERROR, "uncopied bytes %d\n", uncopied_bytes);
err = -EFAULT;
goto out;
}
out:
kfree(hm);
kfree(hr);
return err;
}
int __devinit asihpi_adapter_probe(struct pci_dev *pci_dev,
const struct pci_device_id *pci_id)
{
int idx, nm;
int adapter_index;
unsigned int memlen;
struct hpi_message hm;
struct hpi_response hr;
struct hpi_adapter adapter;
struct hpi_pci pci;
memset(&adapter, 0, sizeof(adapter));
dev_printk(KERN_DEBUG, &pci_dev->dev,
"probe %04x:%04x,%04x:%04x,%04x\n", pci_dev->vendor,
pci_dev->device, pci_dev->subsystem_vendor,
pci_dev->subsystem_device, pci_dev->devfn);
if (pci_enable_device(pci_dev) < 0) {
dev_printk(KERN_ERR, &pci_dev->dev,
"pci_enable_device failed, disabling device\n");
return -EIO;
}
pci_set_master(pci_dev); /* also sets latency timer if < 16 */
hpi_init_message_response(&hm, &hr, HPI_OBJ_SUBSYSTEM,
HPI_SUBSYS_CREATE_ADAPTER);
hpi_init_response(&hr, HPI_OBJ_SUBSYSTEM, HPI_SUBSYS_CREATE_ADAPTER,
HPI_ERROR_PROCESSING_MESSAGE);
hm.adapter_index = HPI_ADAPTER_INDEX_INVALID;
nm = HPI_MAX_ADAPTER_MEM_SPACES;
for (idx = 0; idx < nm; idx++) {
HPI_DEBUG_LOG(INFO, "resource %d %pR\n", idx,
&pci_dev->resource[idx]);
if (pci_resource_flags(pci_dev, idx) & IORESOURCE_MEM) {
memlen = pci_resource_len(pci_dev, idx);
pci.ap_mem_base[idx] =
ioremap(pci_resource_start(pci_dev, idx),
memlen);
if (!pci.ap_mem_base[idx]) {
HPI_DEBUG_LOG(ERROR,
"ioremap failed, aborting\n");
/* unmap previously mapped pci mem space */
goto err;
}
}
}
pci.pci_dev = pci_dev;
hm.u.s.resource.bus_type = HPI_BUS_PCI;
hm.u.s.resource.r.pci = &pci;
/* call CreateAdapterObject on the relevant hpi module */
hpi_send_recv_ex(&hm, &hr, HOWNER_KERNEL);
if (hr.error)
goto err;
adapter_index = hr.u.s.adapter_index;
adapter.adapter = hpi_find_adapter(adapter_index);
if (prealloc_stream_buf) {
adapter.p_buffer = vmalloc(prealloc_stream_buf);
if (!adapter.p_buffer) {
HPI_DEBUG_LOG(ERROR,
"HPI could not allocate "
"kernel buffer size %d\n",
prealloc_stream_buf);
goto err;
}
}
hpi_init_message_response(&hm, &hr, HPI_OBJ_ADAPTER,
HPI_ADAPTER_OPEN);
hm.adapter_index = adapter.adapter->index;
hpi_send_recv_ex(&hm, &hr, HOWNER_KERNEL);
if (hr.error)
goto err;
/* WARNING can't init mutex in 'adapter'
* and then copy it to adapters[] ?!?!
*/
adapters[adapter_index] = adapter;
mutex_init(&adapters[adapter_index].mutex);
pci_set_drvdata(pci_dev, &adapters[adapter_index]);
dev_printk(KERN_INFO, &pci_dev->dev,
"probe succeeded for ASI%04X HPI index %d\n",
adapter.adapter->type, adapter_index);
return 0;
err:
for (idx = 0; idx < HPI_MAX_ADAPTER_MEM_SPACES; idx++) {
if (pci.ap_mem_base[idx]) {
iounmap(pci.ap_mem_base[idx]);
pci.ap_mem_base[idx] = NULL;
}
}
if (adapter.p_buffer) {
adapter.buffer_size = 0;
vfree(adapter.p_buffer);
}
HPI_DEBUG_LOG(ERROR, "adapter_probe failed\n");
return -ENODEV;
}
void __devexit asihpi_adapter_remove(struct pci_dev *pci_dev)
{
int idx;
struct hpi_message hm;
struct hpi_response hr;
struct hpi_adapter *pa;
struct hpi_pci pci;
pa = pci_get_drvdata(pci_dev);
pci = pa->adapter->pci;
hpi_init_message_response(&hm, &hr, HPI_OBJ_ADAPTER,
HPI_ADAPTER_DELETE);
hm.adapter_index = pa->adapter->index;
hpi_send_recv_ex(&hm, &hr, HOWNER_KERNEL);
/* unmap PCI memory space, mapped during device init. */
for (idx = 0; idx < HPI_MAX_ADAPTER_MEM_SPACES; idx++) {
if (pci.ap_mem_base[idx])
iounmap(pci.ap_mem_base[idx]);
}
if (pa->p_buffer)
vfree(pa->p_buffer);
pci_set_drvdata(pci_dev, NULL);
if (1)
dev_printk(KERN_INFO, &pci_dev->dev,
"remove %04x:%04x,%04x:%04x,%04x," " HPI index %d.\n",
pci_dev->vendor, pci_dev->device,
pci_dev->subsystem_vendor, pci_dev->subsystem_device,
pci_dev->devfn, pa->adapter->index);
memset(pa, 0, sizeof(*pa));
}
void __init asihpi_init(void)
{
struct hpi_message hm;
struct hpi_response hr;
memset(adapters, 0, sizeof(adapters));
printk(KERN_INFO "ASIHPI driver " HPI_VER_STRING "\n");
hpi_init_message_response(&hm, &hr, HPI_OBJ_SUBSYSTEM,
HPI_SUBSYS_DRIVER_LOAD);
hpi_send_recv_ex(&hm, &hr, HOWNER_KERNEL);
}
void asihpi_exit(void)
{
struct hpi_message hm;
struct hpi_response hr;
hpi_init_message_response(&hm, &hr, HPI_OBJ_SUBSYSTEM,
HPI_SUBSYS_DRIVER_UNLOAD);
hpi_send_recv_ex(&hm, &hr, HOWNER_KERNEL);
}
| gpl-2.0 |
MinimalOS-AOSP/kernel_lge_hammerhead | drivers/hid/hid-petalynx.c | 7380 | 3001 | /*
* HID driver for some petalynx "special" devices
*
* Copyright (c) 1999 Andreas Gal
* Copyright (c) 2000-2005 Vojtech Pavlik <vojtech@suse.cz>
* Copyright (c) 2005 Michael Haboustak <mike-@cinci.rr.com> for Concept2, Inc
* Copyright (c) 2006-2007 Jiri Kosina
* Copyright (c) 2007 Paul Walmsley
* Copyright (c) 2008 Jiri Slaby
*/
/*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*/
#include <linux/device.h>
#include <linux/hid.h>
#include <linux/module.h>
#include "hid-ids.h"
/* Petalynx Maxter Remote has maximum for consumer page set too low */
static __u8 *pl_report_fixup(struct hid_device *hdev, __u8 *rdesc,
unsigned int *rsize)
{
if (*rsize >= 60 && rdesc[39] == 0x2a && rdesc[40] == 0xf5 &&
rdesc[41] == 0x00 && rdesc[59] == 0x26 &&
rdesc[60] == 0xf9 && rdesc[61] == 0x00) {
hid_info(hdev, "fixing up Petalynx Maxter Remote report descriptor\n");
rdesc[60] = 0xfa;
rdesc[40] = 0xfa;
}
return rdesc;
}
#define pl_map_key_clear(c) hid_map_usage_clear(hi, usage, bit, max, \
EV_KEY, (c))
static int pl_input_mapping(struct hid_device *hdev, struct hid_input *hi,
struct hid_field *field, struct hid_usage *usage,
unsigned long **bit, int *max)
{
if ((usage->hid & HID_USAGE_PAGE) == HID_UP_LOGIVENDOR) {
switch (usage->hid & HID_USAGE) {
case 0x05a: pl_map_key_clear(KEY_TEXT); break;
case 0x05b: pl_map_key_clear(KEY_RED); break;
case 0x05c: pl_map_key_clear(KEY_GREEN); break;
case 0x05d: pl_map_key_clear(KEY_YELLOW); break;
case 0x05e: pl_map_key_clear(KEY_BLUE); break;
default:
return 0;
}
return 1;
}
if ((usage->hid & HID_USAGE_PAGE) == HID_UP_CONSUMER) {
switch (usage->hid & HID_USAGE) {
case 0x0f6: pl_map_key_clear(KEY_NEXT); break;
case 0x0fa: pl_map_key_clear(KEY_BACK); break;
default:
return 0;
}
return 1;
}
return 0;
}
static int pl_probe(struct hid_device *hdev, const struct hid_device_id *id)
{
int ret;
hdev->quirks |= HID_QUIRK_NOGET;
ret = hid_parse(hdev);
if (ret) {
hid_err(hdev, "parse failed\n");
goto err_free;
}
ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
if (ret) {
hid_err(hdev, "hw start failed\n");
goto err_free;
}
return 0;
err_free:
return ret;
}
static const struct hid_device_id pl_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_PETALYNX, USB_DEVICE_ID_PETALYNX_MAXTER_REMOTE) },
{ }
};
MODULE_DEVICE_TABLE(hid, pl_devices);
static struct hid_driver pl_driver = {
.name = "petalynx",
.id_table = pl_devices,
.report_fixup = pl_report_fixup,
.input_mapping = pl_input_mapping,
.probe = pl_probe,
};
static int __init pl_init(void)
{
return hid_register_driver(&pl_driver);
}
static void __exit pl_exit(void)
{
hid_unregister_driver(&pl_driver);
}
module_init(pl_init);
module_exit(pl_exit);
MODULE_LICENSE("GPL");
| gpl-2.0 |
SamsungGalaxyS6/kernel_samsung_exynos7420 | drivers/media/rc/keymaps/rc-encore-enltv.c | 7636 | 2927 | /* encore-enltv.h - Keytable for encore_enltv Remote Controller
*
* keymap imported from ir-keymaps.c
*
* Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <media/rc-map.h>
#include <linux/module.h>
/* Encore ENLTV-FM - black plastic, white front cover with white glowing buttons
Juan Pablo Sormani <sorman@gmail.com> */
static struct rc_map_table encore_enltv[] = {
/* Power button does nothing, neither in Windows app,
although it sends data (used for BIOS wakeup?) */
{ 0x0d, KEY_MUTE },
{ 0x1e, KEY_TV },
{ 0x00, KEY_VIDEO },
{ 0x01, KEY_AUDIO }, /* music */
{ 0x02, KEY_CAMERA }, /* picture */
{ 0x1f, KEY_1 },
{ 0x03, KEY_2 },
{ 0x04, KEY_3 },
{ 0x05, KEY_4 },
{ 0x1c, KEY_5 },
{ 0x06, KEY_6 },
{ 0x07, KEY_7 },
{ 0x08, KEY_8 },
{ 0x1d, KEY_9 },
{ 0x0a, KEY_0 },
{ 0x09, KEY_LIST }, /* -/-- */
{ 0x0b, KEY_LAST }, /* recall */
{ 0x14, KEY_HOME }, /* win start menu */
{ 0x15, KEY_EXIT }, /* exit */
{ 0x16, KEY_CHANNELUP }, /* UP */
{ 0x12, KEY_CHANNELDOWN }, /* DOWN */
{ 0x0c, KEY_VOLUMEUP }, /* RIGHT */
{ 0x17, KEY_VOLUMEDOWN }, /* LEFT */
{ 0x18, KEY_ENTER }, /* OK */
{ 0x0e, KEY_ESC },
{ 0x13, KEY_CYCLEWINDOWS }, /* desktop */
{ 0x11, KEY_TAB },
{ 0x19, KEY_SWITCHVIDEOMODE }, /* switch */
{ 0x1a, KEY_MENU },
{ 0x1b, KEY_ZOOM }, /* fullscreen */
{ 0x44, KEY_TIME }, /* time shift */
{ 0x40, KEY_MODE }, /* source */
{ 0x5a, KEY_RECORD },
{ 0x42, KEY_PLAY }, /* play/pause */
{ 0x45, KEY_STOP },
{ 0x43, KEY_CAMERA }, /* camera icon */
{ 0x48, KEY_REWIND },
{ 0x4a, KEY_FASTFORWARD },
{ 0x49, KEY_PREVIOUS },
{ 0x4b, KEY_NEXT },
{ 0x4c, KEY_FAVORITES }, /* tv wall */
{ 0x4d, KEY_SOUND }, /* DVD sound */
{ 0x4e, KEY_LANGUAGE }, /* DVD lang */
{ 0x4f, KEY_TEXT }, /* DVD text */
{ 0x50, KEY_SLEEP }, /* shutdown */
{ 0x51, KEY_MODE }, /* stereo > main */
{ 0x52, KEY_SELECT }, /* stereo > sap */
{ 0x53, KEY_TEXT }, /* teletext */
{ 0x59, KEY_RED }, /* AP1 */
{ 0x41, KEY_GREEN }, /* AP2 */
{ 0x47, KEY_YELLOW }, /* AP3 */
{ 0x57, KEY_BLUE }, /* AP4 */
};
static struct rc_map_list encore_enltv_map = {
.map = {
.scan = encore_enltv,
.size = ARRAY_SIZE(encore_enltv),
.rc_type = RC_TYPE_UNKNOWN, /* Legacy IR type */
.name = RC_MAP_ENCORE_ENLTV,
}
};
static int __init init_rc_map_encore_enltv(void)
{
return rc_map_register(&encore_enltv_map);
}
static void __exit exit_rc_map_encore_enltv(void)
{
rc_map_unregister(&encore_enltv_map);
}
module_init(init_rc_map_encore_enltv)
module_exit(exit_rc_map_encore_enltv)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
| gpl-2.0 |
longqzh/chronnOS | drivers/staging/comedi/drivers/aio_iiro_16.c | 8148 | 5051 | /*
comedi/drivers/aio_iiro_16.c
Driver for Access I/O Products PC-104 AIO-IIRO-16 Digital I/O board
Copyright (C) 2006 C&C Technologies, Inc.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/*
Driver: aio_iiro_16
Description: Access I/O Products PC-104 IIRO16 Relay And Isolated Input Board
Author: Zachary Ware <zach.ware@cctechnol.com>
Devices:
[Access I/O] PC-104 AIO12-8
Status: experimental
Configuration Options:
[0] - I/O port base address
*/
#include "../comedidev.h"
#include <linux/ioport.h>
#define AIO_IIRO_16_SIZE 0x08
#define AIO_IIRO_16_RELAY_0_7 0x00
#define AIO_IIRO_16_INPUT_0_7 0x01
#define AIO_IIRO_16_IRQ 0x02
#define AIO_IIRO_16_RELAY_8_15 0x04
#define AIO_IIRO_16_INPUT_8_15 0x05
struct aio_iiro_16_board {
const char *name;
int do_;
int di;
};
static const struct aio_iiro_16_board aio_iiro_16_boards[] = {
{
.name = "aio_iiro_16",
.di = 16,
.do_ = 16},
};
#define thisboard ((const struct aio_iiro_16_board *) dev->board_ptr)
struct aio_iiro_16_private {
int data;
struct pci_dev *pci_dev;
unsigned int ao_readback[2];
};
#define devpriv ((struct aio_iiro_16_private *) dev->private)
static int aio_iiro_16_attach(struct comedi_device *dev,
struct comedi_devconfig *it);
static int aio_iiro_16_detach(struct comedi_device *dev);
static struct comedi_driver driver_aio_iiro_16 = {
.driver_name = "aio_iiro_16",
.module = THIS_MODULE,
.attach = aio_iiro_16_attach,
.detach = aio_iiro_16_detach,
.board_name = &aio_iiro_16_boards[0].name,
.offset = sizeof(struct aio_iiro_16_board),
.num_names = ARRAY_SIZE(aio_iiro_16_boards),
};
static int aio_iiro_16_dio_insn_bits_read(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data);
static int aio_iiro_16_dio_insn_bits_write(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data);
static int aio_iiro_16_attach(struct comedi_device *dev,
struct comedi_devconfig *it)
{
int iobase;
struct comedi_subdevice *s;
printk(KERN_INFO "comedi%d: aio_iiro_16: ", dev->minor);
dev->board_name = thisboard->name;
iobase = it->options[0];
if (!request_region(iobase, AIO_IIRO_16_SIZE, dev->board_name)) {
printk("I/O port conflict");
return -EIO;
}
dev->iobase = iobase;
if (alloc_private(dev, sizeof(struct aio_iiro_16_private)) < 0)
return -ENOMEM;
if (alloc_subdevices(dev, 2) < 0)
return -ENOMEM;
s = dev->subdevices + 0;
s->type = COMEDI_SUBD_DIO;
s->subdev_flags = SDF_WRITABLE;
s->n_chan = 16;
s->maxdata = 1;
s->range_table = &range_digital;
s->insn_bits = aio_iiro_16_dio_insn_bits_write;
s = dev->subdevices + 1;
s->type = COMEDI_SUBD_DIO;
s->subdev_flags = SDF_READABLE;
s->n_chan = 16;
s->maxdata = 1;
s->range_table = &range_digital;
s->insn_bits = aio_iiro_16_dio_insn_bits_read;
printk("attached\n");
return 1;
}
static int aio_iiro_16_detach(struct comedi_device *dev)
{
printk(KERN_INFO "comedi%d: aio_iiro_16: remove\n", dev->minor);
if (dev->iobase)
release_region(dev->iobase, AIO_IIRO_16_SIZE);
return 0;
}
static int aio_iiro_16_dio_insn_bits_write(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
if (insn->n != 2)
return -EINVAL;
if (data[0]) {
s->state &= ~data[0];
s->state |= data[0] & data[1];
outb(s->state & 0xff, dev->iobase + AIO_IIRO_16_RELAY_0_7);
outb((s->state >> 8) & 0xff,
dev->iobase + AIO_IIRO_16_RELAY_8_15);
}
data[1] = s->state;
return 2;
}
static int aio_iiro_16_dio_insn_bits_read(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
if (insn->n != 2)
return -EINVAL;
data[1] = 0;
data[1] |= inb(dev->iobase + AIO_IIRO_16_INPUT_0_7);
data[1] |= inb(dev->iobase + AIO_IIRO_16_INPUT_8_15) << 8;
return 2;
}
static int __init driver_aio_iiro_16_init_module(void)
{
return comedi_driver_register(&driver_aio_iiro_16);
}
static void __exit driver_aio_iiro_16_cleanup_module(void)
{
comedi_driver_unregister(&driver_aio_iiro_16);
}
module_init(driver_aio_iiro_16_init_module);
module_exit(driver_aio_iiro_16_cleanup_module);
MODULE_AUTHOR("Comedi http://www.comedi.org");
MODULE_DESCRIPTION("Comedi low-level driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
Nothing-Dev/android_kernel_samsung_royss | arch/x86/um/sysrq_32.c | 9684 | 2651 | /*
* Copyright (C) 2001 - 2003 Jeff Dike (jdike@addtoit.com)
* Licensed under the GPL
*/
#include "linux/kernel.h"
#include "linux/smp.h"
#include "linux/sched.h"
#include "linux/kallsyms.h"
#include "asm/ptrace.h"
#include "sysrq.h"
/* This is declared by <linux/sched.h> */
void show_regs(struct pt_regs *regs)
{
printk("\n");
printk("EIP: %04lx:[<%08lx>] CPU: %d %s",
0xffff & PT_REGS_CS(regs), PT_REGS_IP(regs),
smp_processor_id(), print_tainted());
if (PT_REGS_CS(regs) & 3)
printk(" ESP: %04lx:%08lx", 0xffff & PT_REGS_SS(regs),
PT_REGS_SP(regs));
printk(" EFLAGS: %08lx\n %s\n", PT_REGS_EFLAGS(regs),
print_tainted());
printk("EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
PT_REGS_EAX(regs), PT_REGS_EBX(regs),
PT_REGS_ECX(regs),
PT_REGS_EDX(regs));
printk("ESI: %08lx EDI: %08lx EBP: %08lx",
PT_REGS_ESI(regs), PT_REGS_EDI(regs),
PT_REGS_EBP(regs));
printk(" DS: %04lx ES: %04lx\n",
0xffff & PT_REGS_DS(regs),
0xffff & PT_REGS_ES(regs));
show_trace(NULL, (unsigned long *) ®s);
}
/* Copied from i386. */
static inline int valid_stack_ptr(struct thread_info *tinfo, void *p)
{
return p > (void *)tinfo &&
p < (void *)tinfo + THREAD_SIZE - 3;
}
/* Adapted from i386 (we also print the address we read from). */
static inline unsigned long print_context_stack(struct thread_info *tinfo,
unsigned long *stack, unsigned long ebp)
{
unsigned long addr;
#ifdef CONFIG_FRAME_POINTER
while (valid_stack_ptr(tinfo, (void *)ebp)) {
addr = *(unsigned long *)(ebp + 4);
printk("%08lx: [<%08lx>]", ebp + 4, addr);
print_symbol(" %s", addr);
printk("\n");
ebp = *(unsigned long *)ebp;
}
#else
while (valid_stack_ptr(tinfo, stack)) {
addr = *stack;
if (__kernel_text_address(addr)) {
printk("%08lx: [<%08lx>]", (unsigned long) stack, addr);
print_symbol(" %s", addr);
printk("\n");
}
stack++;
}
#endif
return ebp;
}
void show_trace(struct task_struct* task, unsigned long * stack)
{
unsigned long ebp;
struct thread_info *context;
/* Turn this into BUG_ON if possible. */
if (!stack) {
stack = (unsigned long*) &stack;
printk("show_trace: got NULL stack, implicit assumption task == current");
WARN_ON(1);
}
if (!task)
task = current;
if (task != current) {
ebp = (unsigned long) KSTK_EBP(task);
} else {
asm ("movl %%ebp, %0" : "=r" (ebp) : );
}
context = (struct thread_info *)
((unsigned long)stack & (~(THREAD_SIZE - 1)));
print_context_stack(context, stack, ebp);
printk("\n");
}
| gpl-2.0 |
Smartandroidtech/platform_kernel_lge_hammerhead | drivers/pci/hotplug/ibmphp_hpc.c | 10708 | 33511 | /*
* IBM Hot Plug Controller Driver
*
* Written By: Jyoti Shah, IBM Corporation
*
* Copyright (C) 2001-2003 IBM Corp.
*
* All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or (at
* your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
* Send feedback to <gregkh@us.ibm.com>
* <jshah@us.ibm.com>
*
*/
#include <linux/wait.h>
#include <linux/time.h>
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/mutex.h>
#include <linux/sched.h>
#include <linux/semaphore.h>
#include <linux/kthread.h>
#include "ibmphp.h"
static int to_debug = 0;
#define debug_polling(fmt, arg...) do { if (to_debug) debug (fmt, arg); } while (0)
//----------------------------------------------------------------------------
// timeout values
//----------------------------------------------------------------------------
#define CMD_COMPLETE_TOUT_SEC 60 // give HPC 60 sec to finish cmd
#define HPC_CTLR_WORKING_TOUT 60 // give HPC 60 sec to finish cmd
#define HPC_GETACCESS_TIMEOUT 60 // seconds
#define POLL_INTERVAL_SEC 2 // poll HPC every 2 seconds
#define POLL_LATCH_CNT 5 // poll latch 5 times, then poll slots
//----------------------------------------------------------------------------
// Winnipeg Architected Register Offsets
//----------------------------------------------------------------------------
#define WPG_I2CMBUFL_OFFSET 0x08 // I2C Message Buffer Low
#define WPG_I2CMOSUP_OFFSET 0x10 // I2C Master Operation Setup Reg
#define WPG_I2CMCNTL_OFFSET 0x20 // I2C Master Control Register
#define WPG_I2CPARM_OFFSET 0x40 // I2C Parameter Register
#define WPG_I2CSTAT_OFFSET 0x70 // I2C Status Register
//----------------------------------------------------------------------------
// Winnipeg Store Type commands (Add this commands to the register offset)
//----------------------------------------------------------------------------
#define WPG_I2C_AND 0x1000 // I2C AND operation
#define WPG_I2C_OR 0x2000 // I2C OR operation
//----------------------------------------------------------------------------
// Command set for I2C Master Operation Setup Register
//----------------------------------------------------------------------------
#define WPG_READATADDR_MASK 0x00010000 // read,bytes,I2C shifted,index
#define WPG_WRITEATADDR_MASK 0x40010000 // write,bytes,I2C shifted,index
#define WPG_READDIRECT_MASK 0x10010000
#define WPG_WRITEDIRECT_MASK 0x60010000
//----------------------------------------------------------------------------
// bit masks for I2C Master Control Register
//----------------------------------------------------------------------------
#define WPG_I2CMCNTL_STARTOP_MASK 0x00000002 // Start the Operation
//----------------------------------------------------------------------------
//
//----------------------------------------------------------------------------
#define WPG_I2C_IOREMAP_SIZE 0x2044 // size of linear address interval
//----------------------------------------------------------------------------
// command index
//----------------------------------------------------------------------------
#define WPG_1ST_SLOT_INDEX 0x01 // index - 1st slot for ctlr
#define WPG_CTLR_INDEX 0x0F // index - ctlr
#define WPG_1ST_EXTSLOT_INDEX 0x10 // index - 1st ext slot for ctlr
#define WPG_1ST_BUS_INDEX 0x1F // index - 1st bus for ctlr
//----------------------------------------------------------------------------
// macro utilities
//----------------------------------------------------------------------------
// if bits 20,22,25,26,27,29,30 are OFF return 1
#define HPC_I2CSTATUS_CHECK(s) ((u8)((s & 0x00000A76) ? 0 : 1))
//----------------------------------------------------------------------------
// global variables
//----------------------------------------------------------------------------
static struct mutex sem_hpcaccess; // lock access to HPC
static struct semaphore semOperations; // lock all operations and
// access to data structures
static struct semaphore sem_exit; // make sure polling thread goes away
static struct task_struct *ibmphp_poll_thread;
//----------------------------------------------------------------------------
// local function prototypes
//----------------------------------------------------------------------------
static u8 i2c_ctrl_read (struct controller *, void __iomem *, u8);
static u8 i2c_ctrl_write (struct controller *, void __iomem *, u8, u8);
static u8 hpc_writecmdtoindex (u8, u8);
static u8 hpc_readcmdtoindex (u8, u8);
static void get_hpc_access (void);
static void free_hpc_access (void);
static int poll_hpc(void *data);
static int process_changeinstatus (struct slot *, struct slot *);
static int process_changeinlatch (u8, u8, struct controller *);
static int hpc_wait_ctlr_notworking (int, struct controller *, void __iomem *, u8 *);
//----------------------------------------------------------------------------
/*----------------------------------------------------------------------
* Name: ibmphp_hpc_initvars
*
* Action: initialize semaphores and variables
*---------------------------------------------------------------------*/
void __init ibmphp_hpc_initvars (void)
{
debug ("%s - Entry\n", __func__);
mutex_init(&sem_hpcaccess);
sema_init(&semOperations, 1);
sema_init(&sem_exit, 0);
to_debug = 0;
debug ("%s - Exit\n", __func__);
}
/*----------------------------------------------------------------------
* Name: i2c_ctrl_read
*
* Action: read from HPC over I2C
*
*---------------------------------------------------------------------*/
static u8 i2c_ctrl_read (struct controller *ctlr_ptr, void __iomem *WPGBbar, u8 index)
{
u8 status;
int i;
void __iomem *wpg_addr; // base addr + offset
unsigned long wpg_data; // data to/from WPG LOHI format
unsigned long ultemp;
unsigned long data; // actual data HILO format
debug_polling ("%s - Entry WPGBbar[%p] index[%x] \n", __func__, WPGBbar, index);
//--------------------------------------------------------------------
// READ - step 1
// read at address, byte length, I2C address (shifted), index
// or read direct, byte length, index
if (ctlr_ptr->ctlr_type == 0x02) {
data = WPG_READATADDR_MASK;
// fill in I2C address
ultemp = (unsigned long)ctlr_ptr->u.wpeg_ctlr.i2c_addr;
ultemp = ultemp >> 1;
data |= (ultemp << 8);
// fill in index
data |= (unsigned long)index;
} else if (ctlr_ptr->ctlr_type == 0x04) {
data = WPG_READDIRECT_MASK;
// fill in index
ultemp = (unsigned long)index;
ultemp = ultemp << 8;
data |= ultemp;
} else {
err ("this controller type is not supported \n");
return HPC_ERROR;
}
wpg_data = swab32 (data); // swap data before writing
wpg_addr = WPGBbar + WPG_I2CMOSUP_OFFSET;
writel (wpg_data, wpg_addr);
//--------------------------------------------------------------------
// READ - step 2 : clear the message buffer
data = 0x00000000;
wpg_data = swab32 (data);
wpg_addr = WPGBbar + WPG_I2CMBUFL_OFFSET;
writel (wpg_data, wpg_addr);
//--------------------------------------------------------------------
// READ - step 3 : issue start operation, I2C master control bit 30:ON
// 2020 : [20] OR operation at [20] offset 0x20
data = WPG_I2CMCNTL_STARTOP_MASK;
wpg_data = swab32 (data);
wpg_addr = WPGBbar + WPG_I2CMCNTL_OFFSET + WPG_I2C_OR;
writel (wpg_data, wpg_addr);
//--------------------------------------------------------------------
// READ - step 4 : wait until start operation bit clears
i = CMD_COMPLETE_TOUT_SEC;
while (i) {
msleep(10);
wpg_addr = WPGBbar + WPG_I2CMCNTL_OFFSET;
wpg_data = readl (wpg_addr);
data = swab32 (wpg_data);
if (!(data & WPG_I2CMCNTL_STARTOP_MASK))
break;
i--;
}
if (i == 0) {
debug ("%s - Error : WPG timeout\n", __func__);
return HPC_ERROR;
}
//--------------------------------------------------------------------
// READ - step 5 : read I2C status register
i = CMD_COMPLETE_TOUT_SEC;
while (i) {
msleep(10);
wpg_addr = WPGBbar + WPG_I2CSTAT_OFFSET;
wpg_data = readl (wpg_addr);
data = swab32 (wpg_data);
if (HPC_I2CSTATUS_CHECK (data))
break;
i--;
}
if (i == 0) {
debug ("ctrl_read - Exit Error:I2C timeout\n");
return HPC_ERROR;
}
//--------------------------------------------------------------------
// READ - step 6 : get DATA
wpg_addr = WPGBbar + WPG_I2CMBUFL_OFFSET;
wpg_data = readl (wpg_addr);
data = swab32 (wpg_data);
status = (u8) data;
debug_polling ("%s - Exit index[%x] status[%x]\n", __func__, index, status);
return (status);
}
/*----------------------------------------------------------------------
* Name: i2c_ctrl_write
*
* Action: write to HPC over I2C
*
* Return 0 or error codes
*---------------------------------------------------------------------*/
static u8 i2c_ctrl_write (struct controller *ctlr_ptr, void __iomem *WPGBbar, u8 index, u8 cmd)
{
u8 rc;
void __iomem *wpg_addr; // base addr + offset
unsigned long wpg_data; // data to/from WPG LOHI format
unsigned long ultemp;
unsigned long data; // actual data HILO format
int i;
debug_polling ("%s - Entry WPGBbar[%p] index[%x] cmd[%x]\n", __func__, WPGBbar, index, cmd);
rc = 0;
//--------------------------------------------------------------------
// WRITE - step 1
// write at address, byte length, I2C address (shifted), index
// or write direct, byte length, index
data = 0x00000000;
if (ctlr_ptr->ctlr_type == 0x02) {
data = WPG_WRITEATADDR_MASK;
// fill in I2C address
ultemp = (unsigned long)ctlr_ptr->u.wpeg_ctlr.i2c_addr;
ultemp = ultemp >> 1;
data |= (ultemp << 8);
// fill in index
data |= (unsigned long)index;
} else if (ctlr_ptr->ctlr_type == 0x04) {
data = WPG_WRITEDIRECT_MASK;
// fill in index
ultemp = (unsigned long)index;
ultemp = ultemp << 8;
data |= ultemp;
} else {
err ("this controller type is not supported \n");
return HPC_ERROR;
}
wpg_data = swab32 (data); // swap data before writing
wpg_addr = WPGBbar + WPG_I2CMOSUP_OFFSET;
writel (wpg_data, wpg_addr);
//--------------------------------------------------------------------
// WRITE - step 2 : clear the message buffer
data = 0x00000000 | (unsigned long)cmd;
wpg_data = swab32 (data);
wpg_addr = WPGBbar + WPG_I2CMBUFL_OFFSET;
writel (wpg_data, wpg_addr);
//--------------------------------------------------------------------
// WRITE - step 3 : issue start operation,I2C master control bit 30:ON
// 2020 : [20] OR operation at [20] offset 0x20
data = WPG_I2CMCNTL_STARTOP_MASK;
wpg_data = swab32 (data);
wpg_addr = WPGBbar + WPG_I2CMCNTL_OFFSET + WPG_I2C_OR;
writel (wpg_data, wpg_addr);
//--------------------------------------------------------------------
// WRITE - step 4 : wait until start operation bit clears
i = CMD_COMPLETE_TOUT_SEC;
while (i) {
msleep(10);
wpg_addr = WPGBbar + WPG_I2CMCNTL_OFFSET;
wpg_data = readl (wpg_addr);
data = swab32 (wpg_data);
if (!(data & WPG_I2CMCNTL_STARTOP_MASK))
break;
i--;
}
if (i == 0) {
debug ("%s - Exit Error:WPG timeout\n", __func__);
rc = HPC_ERROR;
}
//--------------------------------------------------------------------
// WRITE - step 5 : read I2C status register
i = CMD_COMPLETE_TOUT_SEC;
while (i) {
msleep(10);
wpg_addr = WPGBbar + WPG_I2CSTAT_OFFSET;
wpg_data = readl (wpg_addr);
data = swab32 (wpg_data);
if (HPC_I2CSTATUS_CHECK (data))
break;
i--;
}
if (i == 0) {
debug ("ctrl_read - Error : I2C timeout\n");
rc = HPC_ERROR;
}
debug_polling ("%s Exit rc[%x]\n", __func__, rc);
return (rc);
}
//------------------------------------------------------------
// Read from ISA type HPC
//------------------------------------------------------------
static u8 isa_ctrl_read (struct controller *ctlr_ptr, u8 offset)
{
u16 start_address;
u16 end_address;
u8 data;
start_address = ctlr_ptr->u.isa_ctlr.io_start;
end_address = ctlr_ptr->u.isa_ctlr.io_end;
data = inb (start_address + offset);
return data;
}
//--------------------------------------------------------------
// Write to ISA type HPC
//--------------------------------------------------------------
static void isa_ctrl_write (struct controller *ctlr_ptr, u8 offset, u8 data)
{
u16 start_address;
u16 port_address;
start_address = ctlr_ptr->u.isa_ctlr.io_start;
port_address = start_address + (u16) offset;
outb (data, port_address);
}
static u8 pci_ctrl_read (struct controller *ctrl, u8 offset)
{
u8 data = 0x00;
debug ("inside pci_ctrl_read\n");
if (ctrl->ctrl_dev)
pci_read_config_byte (ctrl->ctrl_dev, HPC_PCI_OFFSET + offset, &data);
return data;
}
static u8 pci_ctrl_write (struct controller *ctrl, u8 offset, u8 data)
{
u8 rc = -ENODEV;
debug ("inside pci_ctrl_write\n");
if (ctrl->ctrl_dev) {
pci_write_config_byte (ctrl->ctrl_dev, HPC_PCI_OFFSET + offset, data);
rc = 0;
}
return rc;
}
static u8 ctrl_read (struct controller *ctlr, void __iomem *base, u8 offset)
{
u8 rc;
switch (ctlr->ctlr_type) {
case 0:
rc = isa_ctrl_read (ctlr, offset);
break;
case 1:
rc = pci_ctrl_read (ctlr, offset);
break;
case 2:
case 4:
rc = i2c_ctrl_read (ctlr, base, offset);
break;
default:
return -ENODEV;
}
return rc;
}
static u8 ctrl_write (struct controller *ctlr, void __iomem *base, u8 offset, u8 data)
{
u8 rc = 0;
switch (ctlr->ctlr_type) {
case 0:
isa_ctrl_write(ctlr, offset, data);
break;
case 1:
rc = pci_ctrl_write (ctlr, offset, data);
break;
case 2:
case 4:
rc = i2c_ctrl_write(ctlr, base, offset, data);
break;
default:
return -ENODEV;
}
return rc;
}
/*----------------------------------------------------------------------
* Name: hpc_writecmdtoindex()
*
* Action: convert a write command to proper index within a controller
*
* Return index, HPC_ERROR
*---------------------------------------------------------------------*/
static u8 hpc_writecmdtoindex (u8 cmd, u8 index)
{
u8 rc;
switch (cmd) {
case HPC_CTLR_ENABLEIRQ: // 0x00.N.15
case HPC_CTLR_CLEARIRQ: // 0x06.N.15
case HPC_CTLR_RESET: // 0x07.N.15
case HPC_CTLR_IRQSTEER: // 0x08.N.15
case HPC_CTLR_DISABLEIRQ: // 0x01.N.15
case HPC_ALLSLOT_ON: // 0x11.N.15
case HPC_ALLSLOT_OFF: // 0x12.N.15
rc = 0x0F;
break;
case HPC_SLOT_OFF: // 0x02.Y.0-14
case HPC_SLOT_ON: // 0x03.Y.0-14
case HPC_SLOT_ATTNOFF: // 0x04.N.0-14
case HPC_SLOT_ATTNON: // 0x05.N.0-14
case HPC_SLOT_BLINKLED: // 0x13.N.0-14
rc = index;
break;
case HPC_BUS_33CONVMODE:
case HPC_BUS_66CONVMODE:
case HPC_BUS_66PCIXMODE:
case HPC_BUS_100PCIXMODE:
case HPC_BUS_133PCIXMODE:
rc = index + WPG_1ST_BUS_INDEX - 1;
break;
default:
err ("hpc_writecmdtoindex - Error invalid cmd[%x]\n", cmd);
rc = HPC_ERROR;
}
return rc;
}
/*----------------------------------------------------------------------
* Name: hpc_readcmdtoindex()
*
* Action: convert a read command to proper index within a controller
*
* Return index, HPC_ERROR
*---------------------------------------------------------------------*/
static u8 hpc_readcmdtoindex (u8 cmd, u8 index)
{
u8 rc;
switch (cmd) {
case READ_CTLRSTATUS:
rc = 0x0F;
break;
case READ_SLOTSTATUS:
case READ_ALLSTAT:
rc = index;
break;
case READ_EXTSLOTSTATUS:
rc = index + WPG_1ST_EXTSLOT_INDEX;
break;
case READ_BUSSTATUS:
rc = index + WPG_1ST_BUS_INDEX - 1;
break;
case READ_SLOTLATCHLOWREG:
rc = 0x28;
break;
case READ_REVLEVEL:
rc = 0x25;
break;
case READ_HPCOPTIONS:
rc = 0x27;
break;
default:
rc = HPC_ERROR;
}
return rc;
}
/*----------------------------------------------------------------------
* Name: HPCreadslot()
*
* Action: issue a READ command to HPC
*
* Input: pslot - cannot be NULL for READ_ALLSTAT
* pstatus - can be NULL for READ_ALLSTAT
*
* Return 0 or error codes
*---------------------------------------------------------------------*/
int ibmphp_hpc_readslot (struct slot * pslot, u8 cmd, u8 * pstatus)
{
void __iomem *wpg_bbar = NULL;
struct controller *ctlr_ptr;
struct list_head *pslotlist;
u8 index, status;
int rc = 0;
int busindex;
debug_polling ("%s - Entry pslot[%p] cmd[%x] pstatus[%p]\n", __func__, pslot, cmd, pstatus);
if ((pslot == NULL)
|| ((pstatus == NULL) && (cmd != READ_ALLSTAT) && (cmd != READ_BUSSTATUS))) {
rc = -EINVAL;
err ("%s - Error invalid pointer, rc[%d]\n", __func__, rc);
return rc;
}
if (cmd == READ_BUSSTATUS) {
busindex = ibmphp_get_bus_index (pslot->bus);
if (busindex < 0) {
rc = -EINVAL;
err ("%s - Exit Error:invalid bus, rc[%d]\n", __func__, rc);
return rc;
} else
index = (u8) busindex;
} else
index = pslot->ctlr_index;
index = hpc_readcmdtoindex (cmd, index);
if (index == HPC_ERROR) {
rc = -EINVAL;
err ("%s - Exit Error:invalid index, rc[%d]\n", __func__, rc);
return rc;
}
ctlr_ptr = pslot->ctrl;
get_hpc_access ();
//--------------------------------------------------------------------
// map physical address to logical address
//--------------------------------------------------------------------
if ((ctlr_ptr->ctlr_type == 2) || (ctlr_ptr->ctlr_type == 4))
wpg_bbar = ioremap (ctlr_ptr->u.wpeg_ctlr.wpegbbar, WPG_I2C_IOREMAP_SIZE);
//--------------------------------------------------------------------
// check controller status before reading
//--------------------------------------------------------------------
rc = hpc_wait_ctlr_notworking (HPC_CTLR_WORKING_TOUT, ctlr_ptr, wpg_bbar, &status);
if (!rc) {
switch (cmd) {
case READ_ALLSTAT:
// update the slot structure
pslot->ctrl->status = status;
pslot->status = ctrl_read (ctlr_ptr, wpg_bbar, index);
rc = hpc_wait_ctlr_notworking (HPC_CTLR_WORKING_TOUT, ctlr_ptr, wpg_bbar,
&status);
if (!rc)
pslot->ext_status = ctrl_read (ctlr_ptr, wpg_bbar, index + WPG_1ST_EXTSLOT_INDEX);
break;
case READ_SLOTSTATUS:
// DO NOT update the slot structure
*pstatus = ctrl_read (ctlr_ptr, wpg_bbar, index);
break;
case READ_EXTSLOTSTATUS:
// DO NOT update the slot structure
*pstatus = ctrl_read (ctlr_ptr, wpg_bbar, index);
break;
case READ_CTLRSTATUS:
// DO NOT update the slot structure
*pstatus = status;
break;
case READ_BUSSTATUS:
pslot->busstatus = ctrl_read (ctlr_ptr, wpg_bbar, index);
break;
case READ_REVLEVEL:
*pstatus = ctrl_read (ctlr_ptr, wpg_bbar, index);
break;
case READ_HPCOPTIONS:
*pstatus = ctrl_read (ctlr_ptr, wpg_bbar, index);
break;
case READ_SLOTLATCHLOWREG:
// DO NOT update the slot structure
*pstatus = ctrl_read (ctlr_ptr, wpg_bbar, index);
break;
// Not used
case READ_ALLSLOT:
list_for_each (pslotlist, &ibmphp_slot_head) {
pslot = list_entry (pslotlist, struct slot, ibm_slot_list);
index = pslot->ctlr_index;
rc = hpc_wait_ctlr_notworking (HPC_CTLR_WORKING_TOUT, ctlr_ptr,
wpg_bbar, &status);
if (!rc) {
pslot->status = ctrl_read (ctlr_ptr, wpg_bbar, index);
rc = hpc_wait_ctlr_notworking (HPC_CTLR_WORKING_TOUT,
ctlr_ptr, wpg_bbar, &status);
if (!rc)
pslot->ext_status =
ctrl_read (ctlr_ptr, wpg_bbar,
index + WPG_1ST_EXTSLOT_INDEX);
} else {
err ("%s - Error ctrl_read failed\n", __func__);
rc = -EINVAL;
break;
}
}
break;
default:
rc = -EINVAL;
break;
}
}
//--------------------------------------------------------------------
// cleanup
//--------------------------------------------------------------------
// remove physical to logical address mapping
if ((ctlr_ptr->ctlr_type == 2) || (ctlr_ptr->ctlr_type == 4))
iounmap (wpg_bbar);
free_hpc_access ();
debug_polling ("%s - Exit rc[%d]\n", __func__, rc);
return rc;
}
/*----------------------------------------------------------------------
* Name: ibmphp_hpc_writeslot()
*
* Action: issue a WRITE command to HPC
*---------------------------------------------------------------------*/
int ibmphp_hpc_writeslot (struct slot * pslot, u8 cmd)
{
void __iomem *wpg_bbar = NULL;
struct controller *ctlr_ptr;
u8 index, status;
int busindex;
u8 done;
int rc = 0;
int timeout;
debug_polling ("%s - Entry pslot[%p] cmd[%x]\n", __func__, pslot, cmd);
if (pslot == NULL) {
rc = -EINVAL;
err ("%s - Error Exit rc[%d]\n", __func__, rc);
return rc;
}
if ((cmd == HPC_BUS_33CONVMODE) || (cmd == HPC_BUS_66CONVMODE) ||
(cmd == HPC_BUS_66PCIXMODE) || (cmd == HPC_BUS_100PCIXMODE) ||
(cmd == HPC_BUS_133PCIXMODE)) {
busindex = ibmphp_get_bus_index (pslot->bus);
if (busindex < 0) {
rc = -EINVAL;
err ("%s - Exit Error:invalid bus, rc[%d]\n", __func__, rc);
return rc;
} else
index = (u8) busindex;
} else
index = pslot->ctlr_index;
index = hpc_writecmdtoindex (cmd, index);
if (index == HPC_ERROR) {
rc = -EINVAL;
err ("%s - Error Exit rc[%d]\n", __func__, rc);
return rc;
}
ctlr_ptr = pslot->ctrl;
get_hpc_access ();
//--------------------------------------------------------------------
// map physical address to logical address
//--------------------------------------------------------------------
if ((ctlr_ptr->ctlr_type == 2) || (ctlr_ptr->ctlr_type == 4)) {
wpg_bbar = ioremap (ctlr_ptr->u.wpeg_ctlr.wpegbbar, WPG_I2C_IOREMAP_SIZE);
debug ("%s - ctlr id[%x] physical[%lx] logical[%lx] i2c[%x]\n", __func__,
ctlr_ptr->ctlr_id, (ulong) (ctlr_ptr->u.wpeg_ctlr.wpegbbar), (ulong) wpg_bbar,
ctlr_ptr->u.wpeg_ctlr.i2c_addr);
}
//--------------------------------------------------------------------
// check controller status before writing
//--------------------------------------------------------------------
rc = hpc_wait_ctlr_notworking (HPC_CTLR_WORKING_TOUT, ctlr_ptr, wpg_bbar, &status);
if (!rc) {
ctrl_write (ctlr_ptr, wpg_bbar, index, cmd);
//--------------------------------------------------------------------
// check controller is still not working on the command
//--------------------------------------------------------------------
timeout = CMD_COMPLETE_TOUT_SEC;
done = 0;
while (!done) {
rc = hpc_wait_ctlr_notworking (HPC_CTLR_WORKING_TOUT, ctlr_ptr, wpg_bbar,
&status);
if (!rc) {
if (NEEDTOCHECK_CMDSTATUS (cmd)) {
if (CTLR_FINISHED (status) == HPC_CTLR_FINISHED_YES)
done = 1;
} else
done = 1;
}
if (!done) {
msleep(1000);
if (timeout < 1) {
done = 1;
err ("%s - Error command complete timeout\n", __func__);
rc = -EFAULT;
} else
timeout--;
}
}
ctlr_ptr->status = status;
}
// cleanup
// remove physical to logical address mapping
if ((ctlr_ptr->ctlr_type == 2) || (ctlr_ptr->ctlr_type == 4))
iounmap (wpg_bbar);
free_hpc_access ();
debug_polling ("%s - Exit rc[%d]\n", __func__, rc);
return rc;
}
/*----------------------------------------------------------------------
* Name: get_hpc_access()
*
* Action: make sure only one process can access HPC at one time
*---------------------------------------------------------------------*/
static void get_hpc_access (void)
{
mutex_lock(&sem_hpcaccess);
}
/*----------------------------------------------------------------------
* Name: free_hpc_access()
*---------------------------------------------------------------------*/
void free_hpc_access (void)
{
mutex_unlock(&sem_hpcaccess);
}
/*----------------------------------------------------------------------
* Name: ibmphp_lock_operations()
*
* Action: make sure only one process can change the data structure
*---------------------------------------------------------------------*/
void ibmphp_lock_operations (void)
{
down (&semOperations);
to_debug = 1;
}
/*----------------------------------------------------------------------
* Name: ibmphp_unlock_operations()
*---------------------------------------------------------------------*/
void ibmphp_unlock_operations (void)
{
debug ("%s - Entry\n", __func__);
up (&semOperations);
to_debug = 0;
debug ("%s - Exit\n", __func__);
}
/*----------------------------------------------------------------------
* Name: poll_hpc()
*---------------------------------------------------------------------*/
#define POLL_LATCH_REGISTER 0
#define POLL_SLOTS 1
#define POLL_SLEEP 2
static int poll_hpc(void *data)
{
struct slot myslot;
struct slot *pslot = NULL;
struct list_head *pslotlist;
int rc;
int poll_state = POLL_LATCH_REGISTER;
u8 oldlatchlow = 0x00;
u8 curlatchlow = 0x00;
int poll_count = 0;
u8 ctrl_count = 0x00;
debug ("%s - Entry\n", __func__);
while (!kthread_should_stop()) {
/* try to get the lock to do some kind of hardware access */
down (&semOperations);
switch (poll_state) {
case POLL_LATCH_REGISTER:
oldlatchlow = curlatchlow;
ctrl_count = 0x00;
list_for_each (pslotlist, &ibmphp_slot_head) {
if (ctrl_count >= ibmphp_get_total_controllers())
break;
pslot = list_entry (pslotlist, struct slot, ibm_slot_list);
if (pslot->ctrl->ctlr_relative_id == ctrl_count) {
ctrl_count++;
if (READ_SLOT_LATCH (pslot->ctrl)) {
rc = ibmphp_hpc_readslot (pslot,
READ_SLOTLATCHLOWREG,
&curlatchlow);
if (oldlatchlow != curlatchlow)
process_changeinlatch (oldlatchlow,
curlatchlow,
pslot->ctrl);
}
}
}
++poll_count;
poll_state = POLL_SLEEP;
break;
case POLL_SLOTS:
list_for_each (pslotlist, &ibmphp_slot_head) {
pslot = list_entry (pslotlist, struct slot, ibm_slot_list);
// make a copy of the old status
memcpy ((void *) &myslot, (void *) pslot,
sizeof (struct slot));
rc = ibmphp_hpc_readslot (pslot, READ_ALLSTAT, NULL);
if ((myslot.status != pslot->status)
|| (myslot.ext_status != pslot->ext_status))
process_changeinstatus (pslot, &myslot);
}
ctrl_count = 0x00;
list_for_each (pslotlist, &ibmphp_slot_head) {
if (ctrl_count >= ibmphp_get_total_controllers())
break;
pslot = list_entry (pslotlist, struct slot, ibm_slot_list);
if (pslot->ctrl->ctlr_relative_id == ctrl_count) {
ctrl_count++;
if (READ_SLOT_LATCH (pslot->ctrl))
rc = ibmphp_hpc_readslot (pslot,
READ_SLOTLATCHLOWREG,
&curlatchlow);
}
}
++poll_count;
poll_state = POLL_SLEEP;
break;
case POLL_SLEEP:
/* don't sleep with a lock on the hardware */
up (&semOperations);
msleep(POLL_INTERVAL_SEC * 1000);
if (kthread_should_stop())
goto out_sleep;
down (&semOperations);
if (poll_count >= POLL_LATCH_CNT) {
poll_count = 0;
poll_state = POLL_SLOTS;
} else
poll_state = POLL_LATCH_REGISTER;
break;
}
/* give up the hardware semaphore */
up (&semOperations);
/* sleep for a short time just for good measure */
out_sleep:
msleep(100);
}
up (&sem_exit);
debug ("%s - Exit\n", __func__);
return 0;
}
/*----------------------------------------------------------------------
* Name: process_changeinstatus
*
* Action: compare old and new slot status, process the change in status
*
* Input: pointer to slot struct, old slot struct
*
* Return 0 or error codes
* Value:
*
* Side
* Effects: None.
*
* Notes:
*---------------------------------------------------------------------*/
static int process_changeinstatus (struct slot *pslot, struct slot *poldslot)
{
u8 status;
int rc = 0;
u8 disable = 0;
u8 update = 0;
debug ("process_changeinstatus - Entry pslot[%p], poldslot[%p]\n", pslot, poldslot);
// bit 0 - HPC_SLOT_POWER
if ((pslot->status & 0x01) != (poldslot->status & 0x01))
update = 1;
// bit 1 - HPC_SLOT_CONNECT
// ignore
// bit 2 - HPC_SLOT_ATTN
if ((pslot->status & 0x04) != (poldslot->status & 0x04))
update = 1;
// bit 3 - HPC_SLOT_PRSNT2
// bit 4 - HPC_SLOT_PRSNT1
if (((pslot->status & 0x08) != (poldslot->status & 0x08))
|| ((pslot->status & 0x10) != (poldslot->status & 0x10)))
update = 1;
// bit 5 - HPC_SLOT_PWRGD
if ((pslot->status & 0x20) != (poldslot->status & 0x20))
// OFF -> ON: ignore, ON -> OFF: disable slot
if ((poldslot->status & 0x20) && (SLOT_CONNECT (poldslot->status) == HPC_SLOT_CONNECTED) && (SLOT_PRESENT (poldslot->status)))
disable = 1;
// bit 6 - HPC_SLOT_BUS_SPEED
// ignore
// bit 7 - HPC_SLOT_LATCH
if ((pslot->status & 0x80) != (poldslot->status & 0x80)) {
update = 1;
// OPEN -> CLOSE
if (pslot->status & 0x80) {
if (SLOT_PWRGD (pslot->status)) {
// power goes on and off after closing latch
// check again to make sure power is still ON
msleep(1000);
rc = ibmphp_hpc_readslot (pslot, READ_SLOTSTATUS, &status);
if (SLOT_PWRGD (status))
update = 1;
else // overwrite power in pslot to OFF
pslot->status &= ~HPC_SLOT_POWER;
}
}
// CLOSE -> OPEN
else if ((SLOT_PWRGD (poldslot->status) == HPC_SLOT_PWRGD_GOOD)
&& (SLOT_CONNECT (poldslot->status) == HPC_SLOT_CONNECTED) && (SLOT_PRESENT (poldslot->status))) {
disable = 1;
}
// else - ignore
}
// bit 4 - HPC_SLOT_BLINK_ATTN
if ((pslot->ext_status & 0x08) != (poldslot->ext_status & 0x08))
update = 1;
if (disable) {
debug ("process_changeinstatus - disable slot\n");
pslot->flag = 0;
rc = ibmphp_do_disable_slot (pslot);
}
if (update || disable) {
ibmphp_update_slot_info (pslot);
}
debug ("%s - Exit rc[%d] disable[%x] update[%x]\n", __func__, rc, disable, update);
return rc;
}
/*----------------------------------------------------------------------
* Name: process_changeinlatch
*
* Action: compare old and new latch reg status, process the change
*
* Input: old and current latch register status
*
* Return 0 or error codes
* Value:
*---------------------------------------------------------------------*/
static int process_changeinlatch (u8 old, u8 new, struct controller *ctrl)
{
struct slot myslot, *pslot;
u8 i;
u8 mask;
int rc = 0;
debug ("%s - Entry old[%x], new[%x]\n", __func__, old, new);
// bit 0 reserved, 0 is LSB, check bit 1-6 for 6 slots
for (i = ctrl->starting_slot_num; i <= ctrl->ending_slot_num; i++) {
mask = 0x01 << i;
if ((mask & old) != (mask & new)) {
pslot = ibmphp_get_slot_from_physical_num (i);
if (pslot) {
memcpy ((void *) &myslot, (void *) pslot, sizeof (struct slot));
rc = ibmphp_hpc_readslot (pslot, READ_ALLSTAT, NULL);
debug ("%s - call process_changeinstatus for slot[%d]\n", __func__, i);
process_changeinstatus (pslot, &myslot);
} else {
rc = -EINVAL;
err ("%s - Error bad pointer for slot[%d]\n", __func__, i);
}
}
}
debug ("%s - Exit rc[%d]\n", __func__, rc);
return rc;
}
/*----------------------------------------------------------------------
* Name: ibmphp_hpc_start_poll_thread
*
* Action: start polling thread
*---------------------------------------------------------------------*/
int __init ibmphp_hpc_start_poll_thread (void)
{
debug ("%s - Entry\n", __func__);
ibmphp_poll_thread = kthread_run(poll_hpc, NULL, "hpc_poll");
if (IS_ERR(ibmphp_poll_thread)) {
err ("%s - Error, thread not started\n", __func__);
return PTR_ERR(ibmphp_poll_thread);
}
return 0;
}
/*----------------------------------------------------------------------
* Name: ibmphp_hpc_stop_poll_thread
*
* Action: stop polling thread and cleanup
*---------------------------------------------------------------------*/
void __exit ibmphp_hpc_stop_poll_thread (void)
{
debug ("%s - Entry\n", __func__);
kthread_stop(ibmphp_poll_thread);
debug ("before locking operations \n");
ibmphp_lock_operations ();
debug ("after locking operations \n");
// wait for poll thread to exit
debug ("before sem_exit down \n");
down (&sem_exit);
debug ("after sem_exit down \n");
// cleanup
debug ("before free_hpc_access \n");
free_hpc_access ();
debug ("after free_hpc_access \n");
ibmphp_unlock_operations ();
debug ("after unlock operations \n");
up (&sem_exit);
debug ("after sem exit up\n");
debug ("%s - Exit\n", __func__);
}
/*----------------------------------------------------------------------
* Name: hpc_wait_ctlr_notworking
*
* Action: wait until the controller is in a not working state
*
* Return 0, HPC_ERROR
* Value:
*---------------------------------------------------------------------*/
static int hpc_wait_ctlr_notworking (int timeout, struct controller *ctlr_ptr, void __iomem *wpg_bbar,
u8 * pstatus)
{
int rc = 0;
u8 done = 0;
debug_polling ("hpc_wait_ctlr_notworking - Entry timeout[%d]\n", timeout);
while (!done) {
*pstatus = ctrl_read (ctlr_ptr, wpg_bbar, WPG_CTLR_INDEX);
if (*pstatus == HPC_ERROR) {
rc = HPC_ERROR;
done = 1;
}
if (CTLR_WORKING (*pstatus) == HPC_CTLR_WORKING_NO)
done = 1;
if (!done) {
msleep(1000);
if (timeout < 1) {
done = 1;
err ("HPCreadslot - Error ctlr timeout\n");
rc = HPC_ERROR;
} else
timeout--;
}
}
debug_polling ("hpc_wait_ctlr_notworking - Exit rc[%x] status[%x]\n", rc, *pstatus);
return rc;
}
| gpl-2.0 |
KINGbabasula/android_kernel_oneplus_msm8974-kexec | arch/powerpc/boot/ugecon.c | 13268 | 2911 | /*
* arch/powerpc/boot/ugecon.c
*
* USB Gecko bootwrapper console.
* Copyright (C) 2008-2009 The GameCube Linux Team
* Copyright (C) 2008,2009 Albert Herranz
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
*/
#include <stddef.h>
#include "stdio.h"
#include "types.h"
#include "io.h"
#include "ops.h"
#define EXI_CLK_32MHZ 5
#define EXI_CSR 0x00
#define EXI_CSR_CLKMASK (0x7<<4)
#define EXI_CSR_CLK_32MHZ (EXI_CLK_32MHZ<<4)
#define EXI_CSR_CSMASK (0x7<<7)
#define EXI_CSR_CS_0 (0x1<<7) /* Chip Select 001 */
#define EXI_CR 0x0c
#define EXI_CR_TSTART (1<<0)
#define EXI_CR_WRITE (1<<2)
#define EXI_CR_READ_WRITE (2<<2)
#define EXI_CR_TLEN(len) (((len)-1)<<4)
#define EXI_DATA 0x10
/* virtual address base for input/output, retrieved from device tree */
static void *ug_io_base;
static u32 ug_io_transaction(u32 in)
{
u32 *csr_reg = ug_io_base + EXI_CSR;
u32 *data_reg = ug_io_base + EXI_DATA;
u32 *cr_reg = ug_io_base + EXI_CR;
u32 csr, data, cr;
/* select */
csr = EXI_CSR_CLK_32MHZ | EXI_CSR_CS_0;
out_be32(csr_reg, csr);
/* read/write */
data = in;
out_be32(data_reg, data);
cr = EXI_CR_TLEN(2) | EXI_CR_READ_WRITE | EXI_CR_TSTART;
out_be32(cr_reg, cr);
while (in_be32(cr_reg) & EXI_CR_TSTART)
barrier();
/* deselect */
out_be32(csr_reg, 0);
data = in_be32(data_reg);
return data;
}
static int ug_is_txfifo_ready(void)
{
return ug_io_transaction(0xc0000000) & 0x04000000;
}
static void ug_raw_putc(char ch)
{
ug_io_transaction(0xb0000000 | (ch << 20));
}
static void ug_putc(char ch)
{
int count = 16;
if (!ug_io_base)
return;
while (!ug_is_txfifo_ready() && count--)
barrier();
if (count >= 0)
ug_raw_putc(ch);
}
void ug_console_write(const char *buf, int len)
{
char *b = (char *)buf;
while (len--) {
if (*b == '\n')
ug_putc('\r');
ug_putc(*b++);
}
}
static int ug_is_adapter_present(void)
{
if (!ug_io_base)
return 0;
return ug_io_transaction(0x90000000) == 0x04700000;
}
static void *ug_grab_exi_io_base(void)
{
u32 v;
void *devp;
devp = find_node_by_compatible(NULL, "nintendo,flipper-exi");
if (devp == NULL)
goto err_out;
if (getprop(devp, "virtual-reg", &v, sizeof(v)) != sizeof(v))
goto err_out;
return (void *)v;
err_out:
return NULL;
}
void *ug_probe(void)
{
void *exi_io_base;
int i;
exi_io_base = ug_grab_exi_io_base();
if (!exi_io_base)
return NULL;
/* look for a usbgecko on memcard slots A and B */
for (i = 0; i < 2; i++) {
ug_io_base = exi_io_base + 0x14 * i;
if (ug_is_adapter_present())
break;
}
if (i == 2)
ug_io_base = NULL;
return ug_io_base;
}
| gpl-2.0 |
wujiku/superstar-kernel-shooter-2.3.4gb | drivers/usb/gadget/f_acm.c | 213 | 23419 | /*
* f_acm.c -- USB CDC serial (ACM) function driver
*
* Copyright (C) 2003 Al Borchers (alborchers@steinerpoint.com)
* Copyright (C) 2008 by David Brownell
* Copyright (C) 2008 by Nokia Corporation
* Copyright (C) 2009 by Samsung Electronics
* Author: Michal Nazarewicz (m.nazarewicz@samsung.com)
*
* This software is distributed under the terms of the GNU General
* Public License ("GPL") as published by the Free Software Foundation,
* either version 2 of that License or (at your option) any later version.
*/
/* #define VERBOSE_DEBUG */
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/usb/android_composite.h>
#include "u_serial.h"
#include "gadget_chips.h"
/*
* This CDC ACM function support just wraps control functions and
* notifications around the generic serial-over-usb code.
*
* Because CDC ACM is standardized by the USB-IF, many host operating
* systems have drivers for it. Accordingly, ACM is the preferred
* interop solution for serial-port type connections. The control
* models are often not necessary, and in any case don't do much in
* this bare-bones implementation.
*
* Note that even MS-Windows has some support for ACM. However, that
* support is somewhat broken because when you use ACM in a composite
* device, having multiple interfaces confuses the poor OS. It doesn't
* seem to understand CDC Union descriptors. The new "association"
* descriptors (roughly equivalent to CDC Unions) may sometimes help.
*/
struct acm_ep_descs {
struct usb_endpoint_descriptor *in;
struct usb_endpoint_descriptor *out;
struct usb_endpoint_descriptor *notify;
};
struct f_acm {
struct gserial port;
u8 ctrl_id, data_id;
u8 port_num;
u8 pending;
/* lock is mostly for pending and notify_req ... they get accessed
* by callbacks both from tty (open/close/break) under its spinlock,
* and notify_req.complete() which can't use that lock.
*/
spinlock_t lock;
struct acm_ep_descs fs;
struct acm_ep_descs hs;
struct usb_ep *notify;
struct usb_endpoint_descriptor *notify_desc;
struct usb_request *notify_req;
struct usb_cdc_line_coding port_line_coding; /* 8-N-1 etc */
/* SetControlLineState request -- CDC 1.1 section 6.2.14 (INPUT) */
u16 port_handshake_bits;
#define ACM_CTRL_RTS (1 << 1) /* unused with full duplex */
#define ACM_CTRL_DTR (1 << 0) /* host is ready for data r/w */
/* SerialState notification -- CDC 1.1 section 6.3.5 (OUTPUT) */
u16 serial_state;
#define ACM_CTRL_OVERRUN (1 << 6)
#define ACM_CTRL_PARITY (1 << 5)
#define ACM_CTRL_FRAMING (1 << 4)
#define ACM_CTRL_RI (1 << 3)
#define ACM_CTRL_BRK (1 << 2)
#define ACM_CTRL_DSR (1 << 1)
#define ACM_CTRL_DCD (1 << 0)
};
static inline struct f_acm *func_to_acm(struct usb_function *f)
{
return container_of(f, struct f_acm, port.func);
}
static inline struct f_acm *port_to_acm(struct gserial *p)
{
return container_of(p, struct f_acm, port);
}
/*-------------------------------------------------------------------------*/
/* notification endpoint uses smallish and infrequent fixed-size messages */
#define GS_LOG2_NOTIFY_INTERVAL 5 /* 1 << 5 == 32 msec */
#define GS_NOTIFY_MAXPACKET 10 /* notification + 2 bytes */
/* interface and class descriptors: */
static struct usb_interface_assoc_descriptor
acm_iad_descriptor = {
.bLength = sizeof acm_iad_descriptor,
.bDescriptorType = USB_DT_INTERFACE_ASSOCIATION,
/* .bFirstInterface = DYNAMIC, */
.bInterfaceCount = 2, // control + data
.bFunctionClass = USB_CLASS_COMM,
.bFunctionSubClass = USB_CDC_SUBCLASS_ACM,
.bFunctionProtocol = USB_CDC_PROTO_NONE,
/* .iFunction = DYNAMIC */
};
static struct usb_interface_descriptor acm_control_interface_desc = {
.bLength = USB_DT_INTERFACE_SIZE,
.bDescriptorType = USB_DT_INTERFACE,
/* .bInterfaceNumber = DYNAMIC */
.bNumEndpoints = 1,
.bInterfaceClass = USB_CLASS_COMM,
.bInterfaceSubClass = USB_CDC_SUBCLASS_ACM,
.bInterfaceProtocol = USB_CDC_ACM_PROTO_AT_V25TER,
/* .iInterface = DYNAMIC */
};
static struct usb_interface_descriptor acm_data_interface_desc = {
.bLength = USB_DT_INTERFACE_SIZE,
.bDescriptorType = USB_DT_INTERFACE,
/* .bInterfaceNumber = DYNAMIC */
.bNumEndpoints = 2,
.bInterfaceClass = USB_CLASS_CDC_DATA,
.bInterfaceSubClass = 0,
.bInterfaceProtocol = 0,
/* .iInterface = DYNAMIC */
};
static struct usb_cdc_header_desc acm_header_desc = {
.bLength = sizeof(acm_header_desc),
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubType = USB_CDC_HEADER_TYPE,
.bcdCDC = cpu_to_le16(0x0110),
};
static struct usb_cdc_call_mgmt_descriptor
acm_call_mgmt_descriptor = {
.bLength = sizeof(acm_call_mgmt_descriptor),
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubType = USB_CDC_CALL_MANAGEMENT_TYPE,
.bmCapabilities = 0,
/* .bDataInterface = DYNAMIC */
};
static struct usb_cdc_acm_descriptor acm_descriptor = {
.bLength = sizeof(acm_descriptor),
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubType = USB_CDC_ACM_TYPE,
.bmCapabilities = USB_CDC_CAP_LINE,
};
static struct usb_cdc_union_desc acm_union_desc = {
.bLength = sizeof(acm_union_desc),
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubType = USB_CDC_UNION_TYPE,
/* .bMasterInterface0 = DYNAMIC */
/* .bSlaveInterface0 = DYNAMIC */
};
/* full speed support: */
static struct usb_endpoint_descriptor acm_fs_notify_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_IN,
.bmAttributes = USB_ENDPOINT_XFER_INT,
.wMaxPacketSize = cpu_to_le16(GS_NOTIFY_MAXPACKET),
.bInterval = 1 << GS_LOG2_NOTIFY_INTERVAL,
};
static struct usb_endpoint_descriptor acm_fs_in_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_IN,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
};
static struct usb_endpoint_descriptor acm_fs_out_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_OUT,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
};
static struct usb_descriptor_header *acm_fs_function[] = {
(struct usb_descriptor_header *) &acm_iad_descriptor,
(struct usb_descriptor_header *) &acm_control_interface_desc,
(struct usb_descriptor_header *) &acm_header_desc,
(struct usb_descriptor_header *) &acm_call_mgmt_descriptor,
(struct usb_descriptor_header *) &acm_descriptor,
(struct usb_descriptor_header *) &acm_union_desc,
(struct usb_descriptor_header *) &acm_fs_notify_desc,
(struct usb_descriptor_header *) &acm_data_interface_desc,
(struct usb_descriptor_header *) &acm_fs_in_desc,
(struct usb_descriptor_header *) &acm_fs_out_desc,
NULL,
};
/* high speed support: */
static struct usb_endpoint_descriptor acm_hs_notify_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_IN,
.bmAttributes = USB_ENDPOINT_XFER_INT,
.wMaxPacketSize = cpu_to_le16(GS_NOTIFY_MAXPACKET),
.bInterval = GS_LOG2_NOTIFY_INTERVAL+4,
};
static struct usb_endpoint_descriptor acm_hs_in_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
.wMaxPacketSize = cpu_to_le16(512),
};
static struct usb_endpoint_descriptor acm_hs_out_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
.wMaxPacketSize = cpu_to_le16(512),
};
static struct usb_descriptor_header *acm_hs_function[] = {
(struct usb_descriptor_header *) &acm_iad_descriptor,
(struct usb_descriptor_header *) &acm_control_interface_desc,
(struct usb_descriptor_header *) &acm_header_desc,
(struct usb_descriptor_header *) &acm_call_mgmt_descriptor,
(struct usb_descriptor_header *) &acm_descriptor,
(struct usb_descriptor_header *) &acm_union_desc,
(struct usb_descriptor_header *) &acm_hs_notify_desc,
(struct usb_descriptor_header *) &acm_data_interface_desc,
(struct usb_descriptor_header *) &acm_hs_in_desc,
(struct usb_descriptor_header *) &acm_hs_out_desc,
NULL,
};
/* string descriptors: */
#define ACM_CTRL_IDX 0
#define ACM_DATA_IDX 1
#define ACM_IAD_IDX 2
/* static strings, in UTF-8 */
static struct usb_string acm_string_defs[] = {
[ACM_CTRL_IDX].s = "CDC Abstract Control Model (ACM)",
[ACM_DATA_IDX].s = "CDC ACM Data",
[ACM_IAD_IDX ].s = "CDC Serial",
{ /* ZEROES END LIST */ },
};
static struct usb_gadget_strings acm_string_table = {
.language = 0x0409, /* en-us */
.strings = acm_string_defs,
};
static struct usb_gadget_strings *acm_strings[] = {
&acm_string_table,
NULL,
};
/*-------------------------------------------------------------------------*/
/* ACM control ... data handling is delegated to tty library code.
* The main task of this function is to activate and deactivate
* that code based on device state; track parameters like line
* speed, handshake state, and so on; and issue notifications.
*/
static void acm_complete_set_line_coding(struct usb_ep *ep,
struct usb_request *req)
{
struct f_acm *acm = ep->driver_data;
struct usb_composite_dev *cdev = acm->port.func.config->cdev;
if (req->status != 0) {
DBG(cdev, "acm ttyGS%d completion, err %d\n",
acm->port_num, req->status);
return;
}
/* normal completion */
if (req->actual != sizeof(acm->port_line_coding)) {
DBG(cdev, "acm ttyGS%d short resp, len %d\n",
acm->port_num, req->actual);
usb_ep_set_halt(ep);
} else {
struct usb_cdc_line_coding *value = req->buf;
/* REVISIT: we currently just remember this data.
* If we change that, (a) validate it first, then
* (b) update whatever hardware needs updating,
* (c) worry about locking. This is information on
* the order of 9600-8-N-1 ... most of which means
* nothing unless we control a real RS232 line.
*/
acm->port_line_coding = *value;
}
}
static int acm_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
{
struct f_acm *acm = func_to_acm(f);
struct usb_composite_dev *cdev = f->config->cdev;
struct usb_request *req = cdev->req;
int value = -EOPNOTSUPP;
u16 w_index = le16_to_cpu(ctrl->wIndex);
u16 w_value = le16_to_cpu(ctrl->wValue);
u16 w_length = le16_to_cpu(ctrl->wLength);
/* composite driver infrastructure handles everything except
* CDC class messages; interface activation uses set_alt().
*
* Note CDC spec table 4 lists the ACM request profile. It requires
* encapsulated command support ... we don't handle any, and respond
* to them by stalling. Options include get/set/clear comm features
* (not that useful) and SEND_BREAK.
*/
switch ((ctrl->bRequestType << 8) | ctrl->bRequest) {
/* SET_LINE_CODING ... just read and save what the host sends */
case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
| USB_CDC_REQ_SET_LINE_CODING:
if (w_length != sizeof(struct usb_cdc_line_coding)
|| w_index != acm->ctrl_id)
goto invalid;
value = w_length;
cdev->gadget->ep0->driver_data = acm;
req->complete = acm_complete_set_line_coding;
break;
/* GET_LINE_CODING ... return what host sent, or initial value */
case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
| USB_CDC_REQ_GET_LINE_CODING:
if (w_index != acm->ctrl_id)
goto invalid;
value = min_t(unsigned, w_length,
sizeof(struct usb_cdc_line_coding));
memcpy(req->buf, &acm->port_line_coding, value);
break;
/* SET_CONTROL_LINE_STATE ... save what the host sent */
case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
| USB_CDC_REQ_SET_CONTROL_LINE_STATE:
if (w_index != acm->ctrl_id)
goto invalid;
value = 0;
/* FIXME we should not allow data to flow until the
* host sets the ACM_CTRL_DTR bit; and when it clears
* that bit, we should return to that no-flow state.
*/
acm->port_handshake_bits = w_value;
break;
default:
invalid:
VDBG(cdev, "invalid control req%02x.%02x v%04x i%04x l%d\n",
ctrl->bRequestType, ctrl->bRequest,
w_value, w_index, w_length);
}
/* respond with data transfer or status phase? */
if (value >= 0) {
DBG(cdev, "acm ttyGS%d req%02x.%02x v%04x i%04x l%d\n",
acm->port_num, ctrl->bRequestType, ctrl->bRequest,
w_value, w_index, w_length);
req->zero = 0;
req->length = value;
value = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
if (value < 0)
ERROR(cdev, "acm response on ttyGS%d, err %d\n",
acm->port_num, value);
}
/* device either stalls (value < 0) or reports success */
return value;
}
static int acm_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
{
struct f_acm *acm = func_to_acm(f);
struct usb_composite_dev *cdev = f->config->cdev;
/* we know alt == 0, so this is an activation or a reset */
if (intf == acm->ctrl_id) {
if (acm->notify->driver_data) {
VDBG(cdev, "reset acm control interface %d\n", intf);
usb_ep_disable(acm->notify);
} else {
VDBG(cdev, "init acm ctrl interface %d\n", intf);
}
acm->notify_desc = ep_choose(cdev->gadget,
acm->hs.notify,
acm->fs.notify);
usb_ep_enable(acm->notify, acm->notify_desc);
acm->notify->driver_data = acm;
} else if (intf == acm->data_id) {
if (acm->port.in->driver_data) {
DBG(cdev, "reset acm ttyGS%d\n", acm->port_num);
gserial_disconnect(&acm->port);
} else {
DBG(cdev, "activate acm ttyGS%d\n", acm->port_num);
}
acm->port.in_desc = ep_choose(cdev->gadget,
acm->hs.in, acm->fs.in);
acm->port.out_desc = ep_choose(cdev->gadget,
acm->hs.out, acm->fs.out);
gserial_connect(&acm->port, acm->port_num);
} else
return -EINVAL;
return 0;
}
static void acm_disable(struct usb_function *f)
{
struct f_acm *acm = func_to_acm(f);
struct usb_composite_dev *cdev = f->config->cdev;
DBG(cdev, "acm ttyGS%d deactivated\n", acm->port_num);
gserial_disconnect(&acm->port);
usb_ep_disable(acm->notify);
acm->notify->driver_data = NULL;
}
/*-------------------------------------------------------------------------*/
/**
* acm_cdc_notify - issue CDC notification to host
* @acm: wraps host to be notified
* @type: notification type
* @value: Refer to cdc specs, wValue field.
* @data: data to be sent
* @length: size of data
* Context: irqs blocked, acm->lock held, acm_notify_req non-null
*
* Returns zero on success or a negative errno.
*
* See section 6.3.5 of the CDC 1.1 specification for information
* about the only notification we issue: SerialState change.
*/
static int acm_cdc_notify(struct f_acm *acm, u8 type, u16 value,
void *data, unsigned length)
{
struct usb_ep *ep = acm->notify;
struct usb_request *req;
struct usb_cdc_notification *notify;
const unsigned len = sizeof(*notify) + length;
void *buf;
int status;
req = acm->notify_req;
acm->notify_req = NULL;
acm->pending = false;
req->length = len;
notify = req->buf;
buf = notify + 1;
notify->bmRequestType = USB_DIR_IN | USB_TYPE_CLASS
| USB_RECIP_INTERFACE;
notify->bNotificationType = type;
notify->wValue = cpu_to_le16(value);
notify->wIndex = cpu_to_le16(acm->ctrl_id);
notify->wLength = cpu_to_le16(length);
memcpy(buf, data, length);
/* ep_queue() can complete immediately if it fills the fifo... */
spin_unlock(&acm->lock);
status = usb_ep_queue(ep, req, GFP_ATOMIC);
spin_lock(&acm->lock);
if (status < 0) {
ERROR(acm->port.func.config->cdev,
"acm ttyGS%d can't notify serial state, %d\n",
acm->port_num, status);
acm->notify_req = req;
}
return status;
}
static int acm_notify_serial_state(struct f_acm *acm)
{
struct usb_composite_dev *cdev = acm->port.func.config->cdev;
int status;
spin_lock(&acm->lock);
if (acm->notify_req) {
DBG(cdev, "acm ttyGS%d serial state %04x\n",
acm->port_num, acm->serial_state);
status = acm_cdc_notify(acm, USB_CDC_NOTIFY_SERIAL_STATE,
0, &acm->serial_state, sizeof(acm->serial_state));
} else {
acm->pending = true;
status = 0;
}
spin_unlock(&acm->lock);
return status;
}
static void acm_cdc_notify_complete(struct usb_ep *ep, struct usb_request *req)
{
struct f_acm *acm = req->context;
u8 doit = false;
/* on this call path we do NOT hold the port spinlock,
* which is why ACM needs its own spinlock
*/
spin_lock(&acm->lock);
if (req->status != -ESHUTDOWN)
doit = acm->pending;
acm->notify_req = req;
spin_unlock(&acm->lock);
if (doit)
acm_notify_serial_state(acm);
}
/* connect == the TTY link is open */
static void acm_connect(struct gserial *port)
{
struct f_acm *acm = port_to_acm(port);
acm->serial_state |= ACM_CTRL_DSR | ACM_CTRL_DCD;
acm_notify_serial_state(acm);
}
static void acm_disconnect(struct gserial *port)
{
struct f_acm *acm = port_to_acm(port);
acm->serial_state &= ~(ACM_CTRL_DSR | ACM_CTRL_DCD);
acm_notify_serial_state(acm);
}
static int acm_send_break(struct gserial *port, int duration)
{
struct f_acm *acm = port_to_acm(port);
u16 state;
state = acm->serial_state;
state &= ~ACM_CTRL_BRK;
if (duration)
state |= ACM_CTRL_BRK;
acm->serial_state = state;
return acm_notify_serial_state(acm);
}
/*-------------------------------------------------------------------------*/
/* ACM function driver setup/binding */
static int
acm_bind(struct usb_configuration *c, struct usb_function *f)
{
struct usb_composite_dev *cdev = c->cdev;
struct f_acm *acm = func_to_acm(f);
int status;
struct usb_ep *ep;
/* allocate instance-specific interface IDs, and patch descriptors */
status = usb_interface_id(c, f);
if (status < 0)
goto fail;
acm->ctrl_id = status;
acm_iad_descriptor.bFirstInterface = status;
acm_control_interface_desc.bInterfaceNumber = status;
acm_union_desc .bMasterInterface0 = status;
status = usb_interface_id(c, f);
if (status < 0)
goto fail;
acm->data_id = status;
acm_data_interface_desc.bInterfaceNumber = status;
acm_union_desc.bSlaveInterface0 = status;
acm_call_mgmt_descriptor.bDataInterface = status;
status = -ENODEV;
/* allocate instance-specific endpoints */
ep = usb_ep_autoconfig(cdev->gadget, &acm_fs_in_desc);
if (!ep)
goto fail;
acm->port.in = ep;
ep->driver_data = cdev; /* claim */
ep = usb_ep_autoconfig(cdev->gadget, &acm_fs_out_desc);
if (!ep)
goto fail;
acm->port.out = ep;
ep->driver_data = cdev; /* claim */
ep = usb_ep_autoconfig(cdev->gadget, &acm_fs_notify_desc);
if (!ep)
goto fail;
acm->notify = ep;
ep->driver_data = cdev; /* claim */
/* allocate notification */
acm->notify_req = gs_alloc_req(ep,
sizeof(struct usb_cdc_notification) + 2,
GFP_KERNEL);
if (!acm->notify_req)
goto fail;
acm->notify_req->complete = acm_cdc_notify_complete;
acm->notify_req->context = acm;
/* copy descriptors, and track endpoint copies */
f->descriptors = usb_copy_descriptors(acm_fs_function);
if (!f->descriptors)
goto fail;
acm->fs.in = usb_find_endpoint(acm_fs_function,
f->descriptors, &acm_fs_in_desc);
acm->fs.out = usb_find_endpoint(acm_fs_function,
f->descriptors, &acm_fs_out_desc);
acm->fs.notify = usb_find_endpoint(acm_fs_function,
f->descriptors, &acm_fs_notify_desc);
/* support all relevant hardware speeds... we expect that when
* hardware is dual speed, all bulk-capable endpoints work at
* both speeds
*/
if (gadget_is_dualspeed(c->cdev->gadget)) {
acm_hs_in_desc.bEndpointAddress =
acm_fs_in_desc.bEndpointAddress;
acm_hs_out_desc.bEndpointAddress =
acm_fs_out_desc.bEndpointAddress;
acm_hs_notify_desc.bEndpointAddress =
acm_fs_notify_desc.bEndpointAddress;
/* copy descriptors, and track endpoint copies */
f->hs_descriptors = usb_copy_descriptors(acm_hs_function);
acm->hs.in = usb_find_endpoint(acm_hs_function,
f->hs_descriptors, &acm_hs_in_desc);
acm->hs.out = usb_find_endpoint(acm_hs_function,
f->hs_descriptors, &acm_hs_out_desc);
acm->hs.notify = usb_find_endpoint(acm_hs_function,
f->hs_descriptors, &acm_hs_notify_desc);
}
DBG(cdev, "acm ttyGS%d: %s speed IN/%s OUT/%s NOTIFY/%s\n",
acm->port_num,
gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full",
acm->port.in->name, acm->port.out->name,
acm->notify->name);
return 0;
fail:
if (acm->notify_req)
gs_free_req(acm->notify, acm->notify_req);
/* we might as well release our claims on endpoints */
if (acm->notify)
acm->notify->driver_data = NULL;
if (acm->port.out)
acm->port.out->driver_data = NULL;
if (acm->port.in)
acm->port.in->driver_data = NULL;
ERROR(cdev, "%s/%p: can't bind, err %d\n", f->name, f, status);
return status;
}
static void
acm_unbind(struct usb_configuration *c, struct usb_function *f)
{
struct f_acm *acm = func_to_acm(f);
if (gadget_is_dualspeed(c->cdev->gadget))
usb_free_descriptors(f->hs_descriptors);
usb_free_descriptors(f->descriptors);
gs_free_req(acm->notify, acm->notify_req);
kfree(acm);
}
/* Some controllers can't support CDC ACM ... */
static inline bool can_support_cdc(struct usb_configuration *c)
{
/* everything else is *probably* fine ... */
return true;
}
/**
* acm_bind_config - add a CDC ACM function to a configuration
* @c: the configuration to support the CDC ACM instance
* @port_num: /dev/ttyGS* port this interface will use
* Context: single threaded during gadget setup
*
* Returns zero on success, else negative errno.
*
* Caller must have called @gserial_setup() with enough ports to
* handle all the ones it binds. Caller is also responsible
* for calling @gserial_cleanup() before module unload.
*/
int acm_bind_config(struct usb_configuration *c, u8 port_num)
{
struct f_acm *acm;
int status;
if (!can_support_cdc(c))
return -EINVAL;
/* REVISIT might want instance-specific strings to help
* distinguish instances ...
*/
/* maybe allocate device-global string IDs, and patch descriptors */
if (acm_string_defs[ACM_CTRL_IDX].id == 0) {
status = usb_string_id(c->cdev);
if (status < 0)
return status;
acm_string_defs[ACM_CTRL_IDX].id = status;
acm_control_interface_desc.iInterface = status;
status = usb_string_id(c->cdev);
if (status < 0)
return status;
acm_string_defs[ACM_DATA_IDX].id = status;
acm_data_interface_desc.iInterface = status;
status = usb_string_id(c->cdev);
if (status < 0)
return status;
acm_string_defs[ACM_IAD_IDX].id = status;
acm_iad_descriptor.iFunction = status;
}
/* allocate and initialize one new instance */
acm = kzalloc(sizeof *acm, GFP_KERNEL);
if (!acm)
return -ENOMEM;
spin_lock_init(&acm->lock);
acm->port_num = port_num;
acm->port.connect = acm_connect;
acm->port.disconnect = acm_disconnect;
acm->port.send_break = acm_send_break;
acm->port.func.name = "acm";
acm->port.func.strings = acm_strings;
/* descriptors are per-instance copies */
acm->port.func.bind = acm_bind;
acm->port.func.unbind = acm_unbind;
acm->port.func.set_alt = acm_set_alt;
acm->port.func.setup = acm_setup;
acm->port.func.disable = acm_disable;
status = usb_add_function(c, &acm->port.func);
if (status)
kfree(acm);
return status;
}
#ifdef CONFIG_USB_ANDROID_ACM
int acm_function_bind_config(struct usb_configuration *c)
{
int ret = acm_bind_config(c, 0);
if (ret == 0)
gserial_setup(c->cdev->gadget, 1);
return ret;
}
static struct android_usb_function acm_function = {
.name = "acm",
.bind_config = acm_function_bind_config,
};
static int __init init(void)
{
printk(KERN_INFO "f_acm init\n");
android_register_function(&acm_function);
return 0;
}
module_init(init);
#endif /* CONFIG_USB_ANDROID_ACM */
| gpl-2.0 |
stevelord/PR30 | linux-2.6.31/arch/arm/mach-sa1100/hackkit.c | 725 | 4794 | /*
* linux/arch/arm/mach-sa1100/hackkit.c
*
* Copyright (C) 2002 Stefan Eletzhofer <stefan.eletzhofer@eletztrick.de>
*
* This file contains all HackKit tweaks. Based on original work from
* Nicolas Pitre's assabet fixes
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/tty.h>
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/cpufreq.h>
#include <linux/serial_core.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
#include <mach/hardware.h>
#include <asm/mach-types.h>
#include <asm/setup.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/irq.h>
#include <asm/mach/arch.h>
#include <asm/mach/flash.h>
#include <asm/mach/map.h>
#include <asm/mach/irq.h>
#include <asm/mach/serial_sa1100.h>
#include "generic.h"
/**********************************************************************
* prototypes
*/
/* init funcs */
static void __init hackkit_map_io(void);
static u_int hackkit_get_mctrl(struct uart_port *port);
static void hackkit_set_mctrl(struct uart_port *port, u_int mctrl);
static void hackkit_uart_pm(struct uart_port *port, u_int state, u_int oldstate);
/**********************************************************************
* global data
*/
/**********************************************************************
* static data
*/
static struct map_desc hackkit_io_desc[] __initdata = {
{ /* Flash bank 0 */
.virtual = 0xe8000000,
.pfn = __phys_to_pfn(0x00000000),
.length = 0x01000000,
.type = MT_DEVICE
},
};
static struct sa1100_port_fns hackkit_port_fns __initdata = {
.set_mctrl = hackkit_set_mctrl,
.get_mctrl = hackkit_get_mctrl,
.pm = hackkit_uart_pm,
};
/**********************************************************************
* Static functions
*/
static void __init hackkit_map_io(void)
{
sa1100_map_io();
iotable_init(hackkit_io_desc, ARRAY_SIZE(hackkit_io_desc));
sa1100_register_uart_fns(&hackkit_port_fns);
sa1100_register_uart(0, 1); /* com port */
sa1100_register_uart(1, 2);
sa1100_register_uart(2, 3); /* radio module */
Ser1SDCR0 |= SDCR0_SUS;
}
/**
* hackkit_uart_pm - powermgmt callback function for system 3 UART
* @port: uart port structure
* @state: pm state
* @oldstate: old pm state
*
*/
static void hackkit_uart_pm(struct uart_port *port, u_int state, u_int oldstate)
{
/* TODO: switch on/off uart in powersave mode */
}
/*
* Note! this can be called from IRQ context.
* FIXME: No modem ctrl lines yet.
*/
static void hackkit_set_mctrl(struct uart_port *port, u_int mctrl)
{
#if 0
if (port->mapbase == _Ser1UTCR0) {
u_int set = 0, clear = 0;
if (mctrl & TIOCM_RTS)
set |= PT_CTRL2_RS1_RTS;
else
clear |= PT_CTRL2_RS1_RTS;
if (mctrl & TIOCM_DTR)
set |= PT_CTRL2_RS1_DTR;
else
clear |= PT_CTRL2_RS1_DTR;
PTCTRL2_clear(clear);
PTCTRL2_set(set);
}
#endif
}
static u_int hackkit_get_mctrl(struct uart_port *port)
{
u_int ret = 0;
#if 0
u_int irqsr = PT_IRQSR;
/* need 2 reads to read current value */
irqsr = PT_IRQSR;
/* TODO: check IRQ source register for modem/com
status lines and set them correctly. */
#endif
ret = TIOCM_CD | TIOCM_CTS | TIOCM_DSR;
return ret;
}
static struct mtd_partition hackkit_partitions[] = {
{
.name = "BLOB",
.size = 0x00040000,
.offset = 0x00000000,
.mask_flags = MTD_WRITEABLE, /* force read-only */
}, {
.name = "config",
.size = 0x00040000,
.offset = MTDPART_OFS_APPEND,
}, {
.name = "kernel",
.size = 0x00100000,
.offset = MTDPART_OFS_APPEND,
}, {
.name = "initrd",
.size = 0x00180000,
.offset = MTDPART_OFS_APPEND,
}, {
.name = "rootfs",
.size = 0x700000,
.offset = MTDPART_OFS_APPEND,
}, {
.name = "data",
.size = MTDPART_SIZ_FULL,
.offset = MTDPART_OFS_APPEND,
}
};
static struct flash_platform_data hackkit_flash_data = {
.map_name = "cfi_probe",
.parts = hackkit_partitions,
.nr_parts = ARRAY_SIZE(hackkit_partitions),
};
static struct resource hackkit_flash_resource = {
.start = SA1100_CS0_PHYS,
.end = SA1100_CS0_PHYS + SZ_32M,
.flags = IORESOURCE_MEM,
};
static void __init hackkit_init(void)
{
sa11x0_set_flash_data(&hackkit_flash_data, &hackkit_flash_resource, 1);
}
/**********************************************************************
* Exported Functions
*/
MACHINE_START(HACKKIT, "HackKit Cpu Board")
.phys_io = 0x80000000,
.io_pg_offst = ((0xf8000000) >> 18) & 0xfffc,
.boot_params = 0xc0000100,
.map_io = hackkit_map_io,
.init_irq = sa1100_init_irq,
.timer = &sa1100_timer,
.init_machine = hackkit_init,
MACHINE_END
| gpl-2.0 |
is00hcw/fastsocket | kernel/arch/arm/mach-ixp4xx/nslu2-pci.c | 725 | 1561 | /*
* arch/arm/mach-ixp4xx/nslu2-pci.c
*
* NSLU2 board-level PCI initialization
*
* based on ixdp425-pci.c:
* Copyright (C) 2002 Intel Corporation.
* Copyright (C) 2003-2004 MontaVista Software, Inc.
*
* Maintainer: http://www.nslu2-linux.org/
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/irq.h>
#include <asm/mach/pci.h>
#include <asm/mach-types.h>
void __init nslu2_pci_preinit(void)
{
set_irq_type(IRQ_NSLU2_PCI_INTA, IRQ_TYPE_LEVEL_LOW);
set_irq_type(IRQ_NSLU2_PCI_INTB, IRQ_TYPE_LEVEL_LOW);
set_irq_type(IRQ_NSLU2_PCI_INTC, IRQ_TYPE_LEVEL_LOW);
ixp4xx_pci_preinit();
}
static int __init nslu2_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
{
static int pci_irq_table[NSLU2_PCI_IRQ_LINES] = {
IRQ_NSLU2_PCI_INTA,
IRQ_NSLU2_PCI_INTB,
IRQ_NSLU2_PCI_INTC,
};
int irq = -1;
if (slot >= 1 && slot <= NSLU2_PCI_MAX_DEV &&
pin >= 1 && pin <= NSLU2_PCI_IRQ_LINES) {
irq = pci_irq_table[(slot + pin - 2) % NSLU2_PCI_IRQ_LINES];
}
return irq;
}
struct hw_pci __initdata nslu2_pci = {
.nr_controllers = 1,
.preinit = nslu2_pci_preinit,
.swizzle = pci_std_swizzle,
.setup = ixp4xx_setup,
.scan = ixp4xx_scan_bus,
.map_irq = nslu2_map_irq,
};
int __init nslu2_pci_init(void) /* monkey see, monkey do */
{
if (machine_is_nslu2())
pci_common_init(&nslu2_pci);
return 0;
}
subsys_initcall(nslu2_pci_init);
| gpl-2.0 |
willcharlton/linux | arch/sparc/kernel/starfire.c | 1237 | 2600 | /*
* starfire.c: Starfire/E10000 support.
*
* Copyright (C) 1998 David S. Miller (davem@redhat.com)
* Copyright (C) 2000 Anton Blanchard (anton@samba.org)
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include <asm/page.h>
#include <asm/oplib.h>
#include <asm/smp.h>
#include <asm/upa.h>
#include <asm/starfire.h>
/*
* A few places around the kernel check this to see if
* they need to call us to do things in a Starfire specific
* way.
*/
int this_is_starfire = 0;
void check_if_starfire(void)
{
phandle ssnode = prom_finddevice("/ssp-serial");
if (ssnode != 0 && (s32)ssnode != -1)
this_is_starfire = 1;
}
/*
* Each Starfire board has 32 registers which perform translation
* and delivery of traditional interrupt packets into the extended
* Starfire hardware format. Essentially UPAID's now have 2 more
* bits than in all previous Sun5 systems.
*/
struct starfire_irqinfo {
unsigned long imap_slots[32];
unsigned long tregs[32];
struct starfire_irqinfo *next;
int upaid, hwmid;
};
static struct starfire_irqinfo *sflist = NULL;
/* Beam me up Scott(McNeil)y... */
void starfire_hookup(int upaid)
{
struct starfire_irqinfo *p;
unsigned long treg_base, hwmid, i;
p = kmalloc(sizeof(*p), GFP_KERNEL);
if (!p) {
prom_printf("starfire_hookup: No memory, this is insane.\n");
prom_halt();
}
treg_base = 0x100fc000000UL;
hwmid = ((upaid & 0x3c) << 1) |
((upaid & 0x40) >> 4) |
(upaid & 0x3);
p->hwmid = hwmid;
treg_base += (hwmid << 33UL);
treg_base += 0x200UL;
for (i = 0; i < 32; i++) {
p->imap_slots[i] = 0UL;
p->tregs[i] = treg_base + (i * 0x10UL);
/* Lets play it safe and not overwrite existing mappings */
if (upa_readl(p->tregs[i]) != 0)
p->imap_slots[i] = 0xdeadbeaf;
}
p->upaid = upaid;
p->next = sflist;
sflist = p;
}
unsigned int starfire_translate(unsigned long imap,
unsigned int upaid)
{
struct starfire_irqinfo *p;
unsigned int bus_hwmid;
unsigned int i;
bus_hwmid = (((unsigned long)imap) >> 33) & 0x7f;
for (p = sflist; p != NULL; p = p->next)
if (p->hwmid == bus_hwmid)
break;
if (p == NULL) {
prom_printf("XFIRE: Cannot find irqinfo for imap %016lx\n",
((unsigned long)imap));
prom_halt();
}
for (i = 0; i < 32; i++) {
if (p->imap_slots[i] == imap ||
p->imap_slots[i] == 0UL)
break;
}
if (i == 32) {
printk("starfire_translate: Are you kidding me?\n");
panic("Lucy in the sky....");
}
p->imap_slots[i] = imap;
/* map to real upaid */
upaid = (((upaid & 0x3c) << 1) |
((upaid & 0x40) >> 4) |
(upaid & 0x3));
upa_writel(upaid, p->tregs[i]);
return i;
}
| gpl-2.0 |
lukego/linux | drivers/sbus/char/flash.c | 1237 | 4819 | /* flash.c: Allow mmap access to the OBP Flash, for OBP updates.
*
* Copyright (C) 1997 Eddie C. Dost (ecd@skynet.be)
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/miscdevice.h>
#include <linux/fcntl.h>
#include <linux/poll.h>
#include <linux/mutex.h>
#include <linux/spinlock.h>
#include <linux/mm.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
#include <asm/io.h>
#include <asm/upa.h>
static DEFINE_MUTEX(flash_mutex);
static DEFINE_SPINLOCK(flash_lock);
static struct {
unsigned long read_base; /* Physical read address */
unsigned long write_base; /* Physical write address */
unsigned long read_size; /* Size of read area */
unsigned long write_size; /* Size of write area */
unsigned long busy; /* In use? */
} flash;
#define FLASH_MINOR 152
static int
flash_mmap(struct file *file, struct vm_area_struct *vma)
{
unsigned long addr;
unsigned long size;
spin_lock(&flash_lock);
if (flash.read_base == flash.write_base) {
addr = flash.read_base;
size = flash.read_size;
} else {
if ((vma->vm_flags & VM_READ) &&
(vma->vm_flags & VM_WRITE)) {
spin_unlock(&flash_lock);
return -EINVAL;
}
if (vma->vm_flags & VM_READ) {
addr = flash.read_base;
size = flash.read_size;
} else if (vma->vm_flags & VM_WRITE) {
addr = flash.write_base;
size = flash.write_size;
} else {
spin_unlock(&flash_lock);
return -ENXIO;
}
}
spin_unlock(&flash_lock);
if ((vma->vm_pgoff << PAGE_SHIFT) > size)
return -ENXIO;
addr = vma->vm_pgoff + (addr >> PAGE_SHIFT);
if (vma->vm_end - (vma->vm_start + (vma->vm_pgoff << PAGE_SHIFT)) > size)
size = vma->vm_end - (vma->vm_start + (vma->vm_pgoff << PAGE_SHIFT));
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
if (io_remap_pfn_range(vma, vma->vm_start, addr, size, vma->vm_page_prot))
return -EAGAIN;
return 0;
}
static long long
flash_llseek(struct file *file, long long offset, int origin)
{
mutex_lock(&flash_mutex);
switch (origin) {
case 0:
file->f_pos = offset;
break;
case 1:
file->f_pos += offset;
if (file->f_pos > flash.read_size)
file->f_pos = flash.read_size;
break;
case 2:
file->f_pos = flash.read_size;
break;
default:
mutex_unlock(&flash_mutex);
return -EINVAL;
}
mutex_unlock(&flash_mutex);
return file->f_pos;
}
static ssize_t
flash_read(struct file * file, char __user * buf,
size_t count, loff_t *ppos)
{
loff_t p = *ppos;
int i;
if (count > flash.read_size - p)
count = flash.read_size - p;
for (i = 0; i < count; i++) {
u8 data = upa_readb(flash.read_base + p + i);
if (put_user(data, buf))
return -EFAULT;
buf++;
}
*ppos += count;
return count;
}
static int
flash_open(struct inode *inode, struct file *file)
{
mutex_lock(&flash_mutex);
if (test_and_set_bit(0, (void *)&flash.busy) != 0) {
mutex_unlock(&flash_mutex);
return -EBUSY;
}
mutex_unlock(&flash_mutex);
return 0;
}
static int
flash_release(struct inode *inode, struct file *file)
{
spin_lock(&flash_lock);
flash.busy = 0;
spin_unlock(&flash_lock);
return 0;
}
static const struct file_operations flash_fops = {
/* no write to the Flash, use mmap
* and play flash dependent tricks.
*/
.owner = THIS_MODULE,
.llseek = flash_llseek,
.read = flash_read,
.mmap = flash_mmap,
.open = flash_open,
.release = flash_release,
};
static struct miscdevice flash_dev = { FLASH_MINOR, "flash", &flash_fops };
static int flash_probe(struct platform_device *op)
{
struct device_node *dp = op->dev.of_node;
struct device_node *parent;
parent = dp->parent;
if (strcmp(parent->name, "sbus") &&
strcmp(parent->name, "sbi") &&
strcmp(parent->name, "ebus"))
return -ENODEV;
flash.read_base = op->resource[0].start;
flash.read_size = resource_size(&op->resource[0]);
if (op->resource[1].flags) {
flash.write_base = op->resource[1].start;
flash.write_size = resource_size(&op->resource[1]);
} else {
flash.write_base = op->resource[0].start;
flash.write_size = resource_size(&op->resource[0]);
}
flash.busy = 0;
printk(KERN_INFO "%s: OBP Flash, RD %lx[%lx] WR %lx[%lx]\n",
op->dev.of_node->full_name,
flash.read_base, flash.read_size,
flash.write_base, flash.write_size);
return misc_register(&flash_dev);
}
static int flash_remove(struct platform_device *op)
{
misc_deregister(&flash_dev);
return 0;
}
static const struct of_device_id flash_match[] = {
{
.name = "flashprom",
},
{},
};
MODULE_DEVICE_TABLE(of, flash_match);
static struct platform_driver flash_driver = {
.driver = {
.name = "flash",
.of_match_table = flash_match,
},
.probe = flash_probe,
.remove = flash_remove,
};
module_platform_driver(flash_driver);
MODULE_LICENSE("GPL");
| gpl-2.0 |
8l/Bcachefs | arch/powerpc/sysdev/fsl_lbc.c | 1493 | 3956 | /*
* Freescale LBC and UPM routines.
*
* Copyright (c) 2007-2008 MontaVista Software, Inc.
*
* Author: Anton Vorontsov <avorontsov@ru.mvista.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/compiler.h>
#include <linux/spinlock.h>
#include <linux/types.h>
#include <linux/io.h>
#include <linux/of.h>
#include <asm/prom.h>
#include <asm/fsl_lbc.h>
static spinlock_t fsl_lbc_lock = __SPIN_LOCK_UNLOCKED(fsl_lbc_lock);
static struct fsl_lbc_regs __iomem *fsl_lbc_regs;
static char __initdata *compat_lbc[] = {
"fsl,pq2-localbus",
"fsl,pq2pro-localbus",
"fsl,pq3-localbus",
"fsl,elbc",
};
static int __init fsl_lbc_init(void)
{
struct device_node *lbus;
int i;
for (i = 0; i < ARRAY_SIZE(compat_lbc); i++) {
lbus = of_find_compatible_node(NULL, NULL, compat_lbc[i]);
if (lbus)
goto found;
}
return -ENODEV;
found:
fsl_lbc_regs = of_iomap(lbus, 0);
of_node_put(lbus);
if (!fsl_lbc_regs)
return -ENOMEM;
return 0;
}
arch_initcall(fsl_lbc_init);
/**
* fsl_lbc_find - find Localbus bank
* @addr_base: base address of the memory bank
*
* This function walks LBC banks comparing "Base address" field of the BR
* registers with the supplied addr_base argument. When bases match this
* function returns bank number (starting with 0), otherwise it returns
* appropriate errno value.
*/
int fsl_lbc_find(phys_addr_t addr_base)
{
int i;
if (!fsl_lbc_regs)
return -ENODEV;
for (i = 0; i < ARRAY_SIZE(fsl_lbc_regs->bank); i++) {
__be32 br = in_be32(&fsl_lbc_regs->bank[i].br);
__be32 or = in_be32(&fsl_lbc_regs->bank[i].or);
if (br & BR_V && (br & or & BR_BA) == addr_base)
return i;
}
return -ENOENT;
}
EXPORT_SYMBOL(fsl_lbc_find);
/**
* fsl_upm_find - find pre-programmed UPM via base address
* @addr_base: base address of the memory bank controlled by the UPM
* @upm: pointer to the allocated fsl_upm structure
*
* This function fills fsl_upm structure so you can use it with the rest of
* UPM API. On success this function returns 0, otherwise it returns
* appropriate errno value.
*/
int fsl_upm_find(phys_addr_t addr_base, struct fsl_upm *upm)
{
int bank;
__be32 br;
bank = fsl_lbc_find(addr_base);
if (bank < 0)
return bank;
br = in_be32(&fsl_lbc_regs->bank[bank].br);
switch (br & BR_MSEL) {
case BR_MS_UPMA:
upm->mxmr = &fsl_lbc_regs->mamr;
break;
case BR_MS_UPMB:
upm->mxmr = &fsl_lbc_regs->mbmr;
break;
case BR_MS_UPMC:
upm->mxmr = &fsl_lbc_regs->mcmr;
break;
default:
return -EINVAL;
}
switch (br & BR_PS) {
case BR_PS_8:
upm->width = 8;
break;
case BR_PS_16:
upm->width = 16;
break;
case BR_PS_32:
upm->width = 32;
break;
default:
return -EINVAL;
}
return 0;
}
EXPORT_SYMBOL(fsl_upm_find);
/**
* fsl_upm_run_pattern - actually run an UPM pattern
* @upm: pointer to the fsl_upm structure obtained via fsl_upm_find
* @io_base: remapped pointer to where memory access should happen
* @mar: MAR register content during pattern execution
*
* This function triggers dummy write to the memory specified by the io_base,
* thus UPM pattern actually executed. Note that mar usage depends on the
* pre-programmed AMX bits in the UPM RAM.
*/
int fsl_upm_run_pattern(struct fsl_upm *upm, void __iomem *io_base, u32 mar)
{
int ret = 0;
unsigned long flags;
spin_lock_irqsave(&fsl_lbc_lock, flags);
out_be32(&fsl_lbc_regs->mar, mar);
switch (upm->width) {
case 8:
out_8(io_base, 0x0);
break;
case 16:
out_be16(io_base, 0x0);
break;
case 32:
out_be32(io_base, 0x0);
break;
default:
ret = -EINVAL;
break;
}
spin_unlock_irqrestore(&fsl_lbc_lock, flags);
return ret;
}
EXPORT_SYMBOL(fsl_upm_run_pattern);
| gpl-2.0 |
h8rift/android_kernel_htc_holiday | net/ipv4/ip_sockglue.c | 1493 | 31241 | /*
* INET An implementation of the TCP/IP protocol suite for the LINUX
* operating system. INET is implemented using the BSD Socket
* interface as the means of communication with the user level.
*
* The IP to API glue.
*
* Authors: see ip.c
*
* Fixes:
* Many : Split from ip.c , see ip.c for history.
* Martin Mares : TOS setting fixed.
* Alan Cox : Fixed a couple of oopses in Martin's
* TOS tweaks.
* Mike McLagan : Routing by source
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/skbuff.h>
#include <linux/ip.h>
#include <linux/icmp.h>
#include <linux/inetdevice.h>
#include <linux/netdevice.h>
#include <linux/slab.h>
#include <net/sock.h>
#include <net/ip.h>
#include <net/icmp.h>
#include <net/tcp_states.h>
#include <linux/udp.h>
#include <linux/igmp.h>
#include <linux/netfilter.h>
#include <linux/route.h>
#include <linux/mroute.h>
#include <net/route.h>
#include <net/xfrm.h>
#include <net/compat.h>
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
#include <net/transp_v6.h>
#endif
#include <linux/errqueue.h>
#include <asm/uaccess.h>
#define IP_CMSG_PKTINFO 1
#define IP_CMSG_TTL 2
#define IP_CMSG_TOS 4
#define IP_CMSG_RECVOPTS 8
#define IP_CMSG_RETOPTS 16
#define IP_CMSG_PASSSEC 32
#define IP_CMSG_ORIGDSTADDR 64
/*
* SOL_IP control messages.
*/
static void ip_cmsg_recv_pktinfo(struct msghdr *msg, struct sk_buff *skb)
{
struct in_pktinfo info;
struct rtable *rt = skb_rtable(skb);
info.ipi_addr.s_addr = ip_hdr(skb)->daddr;
if (rt) {
info.ipi_ifindex = rt->rt_iif;
info.ipi_spec_dst.s_addr = rt->rt_spec_dst;
} else {
info.ipi_ifindex = 0;
info.ipi_spec_dst.s_addr = 0;
}
put_cmsg(msg, SOL_IP, IP_PKTINFO, sizeof(info), &info);
}
static void ip_cmsg_recv_ttl(struct msghdr *msg, struct sk_buff *skb)
{
int ttl = ip_hdr(skb)->ttl;
put_cmsg(msg, SOL_IP, IP_TTL, sizeof(int), &ttl);
}
static void ip_cmsg_recv_tos(struct msghdr *msg, struct sk_buff *skb)
{
put_cmsg(msg, SOL_IP, IP_TOS, 1, &ip_hdr(skb)->tos);
}
static void ip_cmsg_recv_opts(struct msghdr *msg, struct sk_buff *skb)
{
if (IPCB(skb)->opt.optlen == 0)
return;
put_cmsg(msg, SOL_IP, IP_RECVOPTS, IPCB(skb)->opt.optlen,
ip_hdr(skb) + 1);
}
static void ip_cmsg_recv_retopts(struct msghdr *msg, struct sk_buff *skb)
{
unsigned char optbuf[sizeof(struct ip_options) + 40];
struct ip_options * opt = (struct ip_options *)optbuf;
if (IPCB(skb)->opt.optlen == 0)
return;
if (ip_options_echo(opt, skb)) {
msg->msg_flags |= MSG_CTRUNC;
return;
}
ip_options_undo(opt);
put_cmsg(msg, SOL_IP, IP_RETOPTS, opt->optlen, opt->__data);
}
static void ip_cmsg_recv_security(struct msghdr *msg, struct sk_buff *skb)
{
char *secdata;
u32 seclen, secid;
int err;
err = security_socket_getpeersec_dgram(NULL, skb, &secid);
if (err)
return;
err = security_secid_to_secctx(secid, &secdata, &seclen);
if (err)
return;
put_cmsg(msg, SOL_IP, SCM_SECURITY, seclen, secdata);
security_release_secctx(secdata, seclen);
}
static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb)
{
struct sockaddr_in sin;
const struct iphdr *iph = ip_hdr(skb);
__be16 *ports = (__be16 *)skb_transport_header(skb);
if (skb_transport_offset(skb) + 4 > skb->len)
return;
/* All current transport protocols have the port numbers in the
* first four bytes of the transport header and this function is
* written with this assumption in mind.
*/
sin.sin_family = AF_INET;
sin.sin_addr.s_addr = iph->daddr;
sin.sin_port = ports[1];
memset(sin.sin_zero, 0, sizeof(sin.sin_zero));
put_cmsg(msg, SOL_IP, IP_ORIGDSTADDR, sizeof(sin), &sin);
}
void ip_cmsg_recv(struct msghdr *msg, struct sk_buff *skb)
{
struct inet_sock *inet = inet_sk(skb->sk);
unsigned flags = inet->cmsg_flags;
/* Ordered by supposed usage frequency */
if (flags & 1)
ip_cmsg_recv_pktinfo(msg, skb);
if ((flags >>= 1) == 0)
return;
if (flags & 1)
ip_cmsg_recv_ttl(msg, skb);
if ((flags >>= 1) == 0)
return;
if (flags & 1)
ip_cmsg_recv_tos(msg, skb);
if ((flags >>= 1) == 0)
return;
if (flags & 1)
ip_cmsg_recv_opts(msg, skb);
if ((flags >>= 1) == 0)
return;
if (flags & 1)
ip_cmsg_recv_retopts(msg, skb);
if ((flags >>= 1) == 0)
return;
if (flags & 1)
ip_cmsg_recv_security(msg, skb);
if ((flags >>= 1) == 0)
return;
if (flags & 1)
ip_cmsg_recv_dstaddr(msg, skb);
}
EXPORT_SYMBOL(ip_cmsg_recv);
int ip_cmsg_send(struct net *net, struct msghdr *msg, struct ipcm_cookie *ipc)
{
int err;
struct cmsghdr *cmsg;
for (cmsg = CMSG_FIRSTHDR(msg); cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) {
if (!CMSG_OK(msg, cmsg))
return -EINVAL;
if (cmsg->cmsg_level != SOL_IP)
continue;
switch (cmsg->cmsg_type) {
case IP_RETOPTS:
err = cmsg->cmsg_len - CMSG_ALIGN(sizeof(struct cmsghdr));
err = ip_options_get(net, &ipc->opt, CMSG_DATA(cmsg),
err < 40 ? err : 40);
if (err)
return err;
break;
case IP_PKTINFO:
{
struct in_pktinfo *info;
if (cmsg->cmsg_len != CMSG_LEN(sizeof(struct in_pktinfo)))
return -EINVAL;
info = (struct in_pktinfo *)CMSG_DATA(cmsg);
ipc->oif = info->ipi_ifindex;
ipc->addr = info->ipi_spec_dst.s_addr;
break;
}
default:
return -EINVAL;
}
}
return 0;
}
/* Special input handler for packets caught by router alert option.
They are selected only by protocol field, and then processed likely
local ones; but only if someone wants them! Otherwise, router
not running rsvpd will kill RSVP.
It is user level problem, what it will make with them.
I have no idea, how it will masquearde or NAT them (it is joke, joke :-)),
but receiver should be enough clever f.e. to forward mtrace requests,
sent to multicast group to reach destination designated router.
*/
struct ip_ra_chain __rcu *ip_ra_chain;
static DEFINE_SPINLOCK(ip_ra_lock);
static void ip_ra_destroy_rcu(struct rcu_head *head)
{
struct ip_ra_chain *ra = container_of(head, struct ip_ra_chain, rcu);
sock_put(ra->saved_sk);
kfree(ra);
}
int ip_ra_control(struct sock *sk, unsigned char on,
void (*destructor)(struct sock *))
{
struct ip_ra_chain *ra, *new_ra;
struct ip_ra_chain __rcu **rap;
if (sk->sk_type != SOCK_RAW || inet_sk(sk)->inet_num == IPPROTO_RAW)
return -EINVAL;
new_ra = on ? kmalloc(sizeof(*new_ra), GFP_KERNEL) : NULL;
spin_lock_bh(&ip_ra_lock);
for (rap = &ip_ra_chain;
(ra = rcu_dereference_protected(*rap,
lockdep_is_held(&ip_ra_lock))) != NULL;
rap = &ra->next) {
if (ra->sk == sk) {
if (on) {
spin_unlock_bh(&ip_ra_lock);
kfree(new_ra);
return -EADDRINUSE;
}
/* dont let ip_call_ra_chain() use sk again */
ra->sk = NULL;
rcu_assign_pointer(*rap, ra->next);
spin_unlock_bh(&ip_ra_lock);
if (ra->destructor)
ra->destructor(sk);
/*
* Delay sock_put(sk) and kfree(ra) after one rcu grace
* period. This guarantee ip_call_ra_chain() dont need
* to mess with socket refcounts.
*/
ra->saved_sk = sk;
call_rcu(&ra->rcu, ip_ra_destroy_rcu);
return 0;
}
}
if (new_ra == NULL) {
spin_unlock_bh(&ip_ra_lock);
return -ENOBUFS;
}
new_ra->sk = sk;
new_ra->destructor = destructor;
new_ra->next = ra;
rcu_assign_pointer(*rap, new_ra);
sock_hold(sk);
spin_unlock_bh(&ip_ra_lock);
return 0;
}
void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err,
__be16 port, u32 info, u8 *payload)
{
struct sock_exterr_skb *serr;
skb = skb_clone(skb, GFP_ATOMIC);
if (!skb)
return;
serr = SKB_EXT_ERR(skb);
serr->ee.ee_errno = err;
serr->ee.ee_origin = SO_EE_ORIGIN_ICMP;
serr->ee.ee_type = icmp_hdr(skb)->type;
serr->ee.ee_code = icmp_hdr(skb)->code;
serr->ee.ee_pad = 0;
serr->ee.ee_info = info;
serr->ee.ee_data = 0;
serr->addr_offset = (u8 *)&(((struct iphdr *)(icmp_hdr(skb) + 1))->daddr) -
skb_network_header(skb);
serr->port = port;
if (skb_pull(skb, payload - skb->data) != NULL) {
skb_reset_transport_header(skb);
if (sock_queue_err_skb(sk, skb) == 0)
return;
}
kfree_skb(skb);
}
void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 port, u32 info)
{
struct inet_sock *inet = inet_sk(sk);
struct sock_exterr_skb *serr;
struct iphdr *iph;
struct sk_buff *skb;
if (!inet->recverr)
return;
skb = alloc_skb(sizeof(struct iphdr), GFP_ATOMIC);
if (!skb)
return;
skb_put(skb, sizeof(struct iphdr));
skb_reset_network_header(skb);
iph = ip_hdr(skb);
iph->daddr = daddr;
serr = SKB_EXT_ERR(skb);
serr->ee.ee_errno = err;
serr->ee.ee_origin = SO_EE_ORIGIN_LOCAL;
serr->ee.ee_type = 0;
serr->ee.ee_code = 0;
serr->ee.ee_pad = 0;
serr->ee.ee_info = info;
serr->ee.ee_data = 0;
serr->addr_offset = (u8 *)&iph->daddr - skb_network_header(skb);
serr->port = port;
__skb_pull(skb, skb_tail_pointer(skb) - skb->data);
skb_reset_transport_header(skb);
if (sock_queue_err_skb(sk, skb))
kfree_skb(skb);
}
/*
* Handle MSG_ERRQUEUE
*/
int ip_recv_error(struct sock *sk, struct msghdr *msg, int len)
{
struct sock_exterr_skb *serr;
struct sk_buff *skb, *skb2;
struct sockaddr_in *sin;
struct {
struct sock_extended_err ee;
struct sockaddr_in offender;
} errhdr;
int err;
int copied;
err = -EAGAIN;
skb = skb_dequeue(&sk->sk_error_queue);
if (skb == NULL)
goto out;
copied = skb->len;
if (copied > len) {
msg->msg_flags |= MSG_TRUNC;
copied = len;
}
err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
if (err)
goto out_free_skb;
sock_recv_timestamp(msg, sk, skb);
serr = SKB_EXT_ERR(skb);
sin = (struct sockaddr_in *)msg->msg_name;
if (sin) {
sin->sin_family = AF_INET;
sin->sin_addr.s_addr = *(__be32 *)(skb_network_header(skb) +
serr->addr_offset);
sin->sin_port = serr->port;
memset(&sin->sin_zero, 0, sizeof(sin->sin_zero));
}
memcpy(&errhdr.ee, &serr->ee, sizeof(struct sock_extended_err));
sin = &errhdr.offender;
sin->sin_family = AF_UNSPEC;
if (serr->ee.ee_origin == SO_EE_ORIGIN_ICMP) {
struct inet_sock *inet = inet_sk(sk);
sin->sin_family = AF_INET;
sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
sin->sin_port = 0;
memset(&sin->sin_zero, 0, sizeof(sin->sin_zero));
if (inet->cmsg_flags)
ip_cmsg_recv(msg, skb);
}
put_cmsg(msg, SOL_IP, IP_RECVERR, sizeof(errhdr), &errhdr);
/* Now we could try to dump offended packet options */
msg->msg_flags |= MSG_ERRQUEUE;
err = copied;
/* Reset and regenerate socket error */
spin_lock_bh(&sk->sk_error_queue.lock);
sk->sk_err = 0;
skb2 = skb_peek(&sk->sk_error_queue);
if (skb2 != NULL) {
sk->sk_err = SKB_EXT_ERR(skb2)->ee.ee_errno;
spin_unlock_bh(&sk->sk_error_queue.lock);
sk->sk_error_report(sk);
} else
spin_unlock_bh(&sk->sk_error_queue.lock);
out_free_skb:
kfree_skb(skb);
out:
return err;
}
static void opt_kfree_rcu(struct rcu_head *head)
{
kfree(container_of(head, struct ip_options_rcu, rcu));
}
/*
* Socket option code for IP. This is the end of the line after any
* TCP,UDP etc options on an IP socket.
*/
static int do_ip_setsockopt(struct sock *sk, int level,
int optname, char __user *optval, unsigned int optlen)
{
struct inet_sock *inet = inet_sk(sk);
int val = 0, err;
if (((1<<optname) & ((1<<IP_PKTINFO) | (1<<IP_RECVTTL) |
(1<<IP_RECVOPTS) | (1<<IP_RECVTOS) |
(1<<IP_RETOPTS) | (1<<IP_TOS) |
(1<<IP_TTL) | (1<<IP_HDRINCL) |
(1<<IP_MTU_DISCOVER) | (1<<IP_RECVERR) |
(1<<IP_ROUTER_ALERT) | (1<<IP_FREEBIND) |
(1<<IP_PASSSEC) | (1<<IP_TRANSPARENT) |
(1<<IP_MINTTL) | (1<<IP_NODEFRAG))) ||
optname == IP_MULTICAST_TTL ||
optname == IP_MULTICAST_ALL ||
optname == IP_MULTICAST_LOOP ||
optname == IP_RECVORIGDSTADDR) {
if (optlen >= sizeof(int)) {
if (get_user(val, (int __user *) optval))
return -EFAULT;
} else if (optlen >= sizeof(char)) {
unsigned char ucval;
if (get_user(ucval, (unsigned char __user *) optval))
return -EFAULT;
val = (int) ucval;
}
}
/* If optlen==0, it is equivalent to val == 0 */
if (ip_mroute_opt(optname))
return ip_mroute_setsockopt(sk, optname, optval, optlen);
err = 0;
lock_sock(sk);
switch (optname) {
case IP_OPTIONS:
{
struct ip_options_rcu *old, *opt = NULL;
if (optlen > 40)
goto e_inval;
err = ip_options_get_from_user(sock_net(sk), &opt,
optval, optlen);
if (err)
break;
old = rcu_dereference_protected(inet->inet_opt,
sock_owned_by_user(sk));
if (inet->is_icsk) {
struct inet_connection_sock *icsk = inet_csk(sk);
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
if (sk->sk_family == PF_INET ||
(!((1 << sk->sk_state) &
(TCPF_LISTEN | TCPF_CLOSE)) &&
inet->inet_daddr != LOOPBACK4_IPV6)) {
#endif
if (old)
icsk->icsk_ext_hdr_len -= old->opt.optlen;
if (opt)
icsk->icsk_ext_hdr_len += opt->opt.optlen;
icsk->icsk_sync_mss(sk, icsk->icsk_pmtu_cookie);
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
}
#endif
}
rcu_assign_pointer(inet->inet_opt, opt);
if (old)
call_rcu(&old->rcu, opt_kfree_rcu);
break;
}
case IP_PKTINFO:
if (val)
inet->cmsg_flags |= IP_CMSG_PKTINFO;
else
inet->cmsg_flags &= ~IP_CMSG_PKTINFO;
break;
case IP_RECVTTL:
if (val)
inet->cmsg_flags |= IP_CMSG_TTL;
else
inet->cmsg_flags &= ~IP_CMSG_TTL;
break;
case IP_RECVTOS:
if (val)
inet->cmsg_flags |= IP_CMSG_TOS;
else
inet->cmsg_flags &= ~IP_CMSG_TOS;
break;
case IP_RECVOPTS:
if (val)
inet->cmsg_flags |= IP_CMSG_RECVOPTS;
else
inet->cmsg_flags &= ~IP_CMSG_RECVOPTS;
break;
case IP_RETOPTS:
if (val)
inet->cmsg_flags |= IP_CMSG_RETOPTS;
else
inet->cmsg_flags &= ~IP_CMSG_RETOPTS;
break;
case IP_PASSSEC:
if (val)
inet->cmsg_flags |= IP_CMSG_PASSSEC;
else
inet->cmsg_flags &= ~IP_CMSG_PASSSEC;
break;
case IP_RECVORIGDSTADDR:
if (val)
inet->cmsg_flags |= IP_CMSG_ORIGDSTADDR;
else
inet->cmsg_flags &= ~IP_CMSG_ORIGDSTADDR;
break;
case IP_TOS: /* This sets both TOS and Precedence */
if (sk->sk_type == SOCK_STREAM) {
val &= ~3;
val |= inet->tos & 3;
}
if (inet->tos != val) {
inet->tos = val;
sk->sk_priority = rt_tos2priority(val);
sk_dst_reset(sk);
}
break;
case IP_TTL:
if (optlen < 1)
goto e_inval;
if (val != -1 && (val < 0 || val > 255))
goto e_inval;
inet->uc_ttl = val;
break;
case IP_HDRINCL:
if (sk->sk_type != SOCK_RAW) {
err = -ENOPROTOOPT;
break;
}
inet->hdrincl = val ? 1 : 0;
break;
case IP_NODEFRAG:
if (sk->sk_type != SOCK_RAW) {
err = -ENOPROTOOPT;
break;
}
inet->nodefrag = val ? 1 : 0;
break;
case IP_MTU_DISCOVER:
if (val < IP_PMTUDISC_DONT || val > IP_PMTUDISC_PROBE)
goto e_inval;
inet->pmtudisc = val;
break;
case IP_RECVERR:
inet->recverr = !!val;
if (!val)
skb_queue_purge(&sk->sk_error_queue);
break;
case IP_MULTICAST_TTL:
if (sk->sk_type == SOCK_STREAM)
goto e_inval;
if (optlen < 1)
goto e_inval;
if (val == -1)
val = 1;
if (val < 0 || val > 255)
goto e_inval;
inet->mc_ttl = val;
break;
case IP_MULTICAST_LOOP:
if (optlen < 1)
goto e_inval;
inet->mc_loop = !!val;
break;
case IP_MULTICAST_IF:
{
struct ip_mreqn mreq;
struct net_device *dev = NULL;
if (sk->sk_type == SOCK_STREAM)
goto e_inval;
/*
* Check the arguments are allowable
*/
if (optlen < sizeof(struct in_addr))
goto e_inval;
err = -EFAULT;
if (optlen >= sizeof(struct ip_mreqn)) {
if (copy_from_user(&mreq, optval, sizeof(mreq)))
break;
} else {
memset(&mreq, 0, sizeof(mreq));
if (optlen >= sizeof(struct in_addr) &&
copy_from_user(&mreq.imr_address, optval,
sizeof(struct in_addr)))
break;
}
if (!mreq.imr_ifindex) {
if (mreq.imr_address.s_addr == htonl(INADDR_ANY)) {
inet->mc_index = 0;
inet->mc_addr = 0;
err = 0;
break;
}
dev = ip_dev_find(sock_net(sk), mreq.imr_address.s_addr);
if (dev)
mreq.imr_ifindex = dev->ifindex;
} else
dev = dev_get_by_index(sock_net(sk), mreq.imr_ifindex);
err = -EADDRNOTAVAIL;
if (!dev)
break;
dev_put(dev);
err = -EINVAL;
if (sk->sk_bound_dev_if &&
mreq.imr_ifindex != sk->sk_bound_dev_if)
break;
inet->mc_index = mreq.imr_ifindex;
inet->mc_addr = mreq.imr_address.s_addr;
err = 0;
break;
}
case IP_ADD_MEMBERSHIP:
case IP_DROP_MEMBERSHIP:
{
struct ip_mreqn mreq;
err = -EPROTO;
if (inet_sk(sk)->is_icsk)
break;
if (optlen < sizeof(struct ip_mreq))
goto e_inval;
err = -EFAULT;
if (optlen >= sizeof(struct ip_mreqn)) {
if (copy_from_user(&mreq, optval, sizeof(mreq)))
break;
} else {
memset(&mreq, 0, sizeof(mreq));
if (copy_from_user(&mreq, optval, sizeof(struct ip_mreq)))
break;
}
if (optname == IP_ADD_MEMBERSHIP)
err = ip_mc_join_group(sk, &mreq);
else
err = ip_mc_leave_group(sk, &mreq);
break;
}
case IP_MSFILTER:
{
struct ip_msfilter *msf;
if (optlen < IP_MSFILTER_SIZE(0))
goto e_inval;
if (optlen > sysctl_optmem_max) {
err = -ENOBUFS;
break;
}
msf = kmalloc(optlen, GFP_KERNEL);
if (!msf) {
err = -ENOBUFS;
break;
}
err = -EFAULT;
if (copy_from_user(msf, optval, optlen)) {
kfree(msf);
break;
}
/* numsrc >= (1G-4) overflow in 32 bits */
if (msf->imsf_numsrc >= 0x3ffffffcU ||
msf->imsf_numsrc > sysctl_igmp_max_msf) {
kfree(msf);
err = -ENOBUFS;
break;
}
if (IP_MSFILTER_SIZE(msf->imsf_numsrc) > optlen) {
kfree(msf);
err = -EINVAL;
break;
}
err = ip_mc_msfilter(sk, msf, 0);
kfree(msf);
break;
}
case IP_BLOCK_SOURCE:
case IP_UNBLOCK_SOURCE:
case IP_ADD_SOURCE_MEMBERSHIP:
case IP_DROP_SOURCE_MEMBERSHIP:
{
struct ip_mreq_source mreqs;
int omode, add;
if (optlen != sizeof(struct ip_mreq_source))
goto e_inval;
if (copy_from_user(&mreqs, optval, sizeof(mreqs))) {
err = -EFAULT;
break;
}
if (optname == IP_BLOCK_SOURCE) {
omode = MCAST_EXCLUDE;
add = 1;
} else if (optname == IP_UNBLOCK_SOURCE) {
omode = MCAST_EXCLUDE;
add = 0;
} else if (optname == IP_ADD_SOURCE_MEMBERSHIP) {
struct ip_mreqn mreq;
mreq.imr_multiaddr.s_addr = mreqs.imr_multiaddr;
mreq.imr_address.s_addr = mreqs.imr_interface;
mreq.imr_ifindex = 0;
err = ip_mc_join_group(sk, &mreq);
if (err && err != -EADDRINUSE)
break;
omode = MCAST_INCLUDE;
add = 1;
} else /* IP_DROP_SOURCE_MEMBERSHIP */ {
omode = MCAST_INCLUDE;
add = 0;
}
err = ip_mc_source(add, omode, sk, &mreqs, 0);
break;
}
case MCAST_JOIN_GROUP:
case MCAST_LEAVE_GROUP:
{
struct group_req greq;
struct sockaddr_in *psin;
struct ip_mreqn mreq;
if (optlen < sizeof(struct group_req))
goto e_inval;
err = -EFAULT;
if (copy_from_user(&greq, optval, sizeof(greq)))
break;
psin = (struct sockaddr_in *)&greq.gr_group;
if (psin->sin_family != AF_INET)
goto e_inval;
memset(&mreq, 0, sizeof(mreq));
mreq.imr_multiaddr = psin->sin_addr;
mreq.imr_ifindex = greq.gr_interface;
if (optname == MCAST_JOIN_GROUP)
err = ip_mc_join_group(sk, &mreq);
else
err = ip_mc_leave_group(sk, &mreq);
break;
}
case MCAST_JOIN_SOURCE_GROUP:
case MCAST_LEAVE_SOURCE_GROUP:
case MCAST_BLOCK_SOURCE:
case MCAST_UNBLOCK_SOURCE:
{
struct group_source_req greqs;
struct ip_mreq_source mreqs;
struct sockaddr_in *psin;
int omode, add;
if (optlen != sizeof(struct group_source_req))
goto e_inval;
if (copy_from_user(&greqs, optval, sizeof(greqs))) {
err = -EFAULT;
break;
}
if (greqs.gsr_group.ss_family != AF_INET ||
greqs.gsr_source.ss_family != AF_INET) {
err = -EADDRNOTAVAIL;
break;
}
psin = (struct sockaddr_in *)&greqs.gsr_group;
mreqs.imr_multiaddr = psin->sin_addr.s_addr;
psin = (struct sockaddr_in *)&greqs.gsr_source;
mreqs.imr_sourceaddr = psin->sin_addr.s_addr;
mreqs.imr_interface = 0; /* use index for mc_source */
if (optname == MCAST_BLOCK_SOURCE) {
omode = MCAST_EXCLUDE;
add = 1;
} else if (optname == MCAST_UNBLOCK_SOURCE) {
omode = MCAST_EXCLUDE;
add = 0;
} else if (optname == MCAST_JOIN_SOURCE_GROUP) {
struct ip_mreqn mreq;
psin = (struct sockaddr_in *)&greqs.gsr_group;
mreq.imr_multiaddr = psin->sin_addr;
mreq.imr_address.s_addr = 0;
mreq.imr_ifindex = greqs.gsr_interface;
err = ip_mc_join_group(sk, &mreq);
if (err && err != -EADDRINUSE)
break;
greqs.gsr_interface = mreq.imr_ifindex;
omode = MCAST_INCLUDE;
add = 1;
} else /* MCAST_LEAVE_SOURCE_GROUP */ {
omode = MCAST_INCLUDE;
add = 0;
}
err = ip_mc_source(add, omode, sk, &mreqs,
greqs.gsr_interface);
break;
}
case MCAST_MSFILTER:
{
struct sockaddr_in *psin;
struct ip_msfilter *msf = NULL;
struct group_filter *gsf = NULL;
int msize, i, ifindex;
if (optlen < GROUP_FILTER_SIZE(0))
goto e_inval;
if (optlen > sysctl_optmem_max) {
err = -ENOBUFS;
break;
}
gsf = kmalloc(optlen, GFP_KERNEL);
if (!gsf) {
err = -ENOBUFS;
break;
}
err = -EFAULT;
if (copy_from_user(gsf, optval, optlen))
goto mc_msf_out;
/* numsrc >= (4G-140)/128 overflow in 32 bits */
if (gsf->gf_numsrc >= 0x1ffffff ||
gsf->gf_numsrc > sysctl_igmp_max_msf) {
err = -ENOBUFS;
goto mc_msf_out;
}
if (GROUP_FILTER_SIZE(gsf->gf_numsrc) > optlen) {
err = -EINVAL;
goto mc_msf_out;
}
msize = IP_MSFILTER_SIZE(gsf->gf_numsrc);
msf = kmalloc(msize, GFP_KERNEL);
if (!msf) {
err = -ENOBUFS;
goto mc_msf_out;
}
ifindex = gsf->gf_interface;
psin = (struct sockaddr_in *)&gsf->gf_group;
if (psin->sin_family != AF_INET) {
err = -EADDRNOTAVAIL;
goto mc_msf_out;
}
msf->imsf_multiaddr = psin->sin_addr.s_addr;
msf->imsf_interface = 0;
msf->imsf_fmode = gsf->gf_fmode;
msf->imsf_numsrc = gsf->gf_numsrc;
err = -EADDRNOTAVAIL;
for (i = 0; i < gsf->gf_numsrc; ++i) {
psin = (struct sockaddr_in *)&gsf->gf_slist[i];
if (psin->sin_family != AF_INET)
goto mc_msf_out;
msf->imsf_slist[i] = psin->sin_addr.s_addr;
}
kfree(gsf);
gsf = NULL;
err = ip_mc_msfilter(sk, msf, ifindex);
mc_msf_out:
kfree(msf);
kfree(gsf);
break;
}
case IP_MULTICAST_ALL:
if (optlen < 1)
goto e_inval;
if (val != 0 && val != 1)
goto e_inval;
inet->mc_all = val;
break;
case IP_ROUTER_ALERT:
err = ip_ra_control(sk, val ? 1 : 0, NULL);
break;
case IP_FREEBIND:
if (optlen < 1)
goto e_inval;
inet->freebind = !!val;
break;
case IP_IPSEC_POLICY:
case IP_XFRM_POLICY:
err = -EPERM;
if (!capable(CAP_NET_ADMIN))
break;
err = xfrm_user_policy(sk, optname, optval, optlen);
break;
case IP_TRANSPARENT:
if (!capable(CAP_NET_ADMIN)) {
err = -EPERM;
break;
}
if (optlen < 1)
goto e_inval;
inet->transparent = !!val;
break;
case IP_MINTTL:
if (optlen < 1)
goto e_inval;
if (val < 0 || val > 255)
goto e_inval;
inet->min_ttl = val;
break;
default:
err = -ENOPROTOOPT;
break;
}
release_sock(sk);
return err;
e_inval:
release_sock(sk);
return -EINVAL;
}
/**
* ip_queue_rcv_skb - Queue an skb into sock receive queue
* @sk: socket
* @skb: buffer
*
* Queues an skb into socket receive queue. If IP_CMSG_PKTINFO option
* is not set, we drop skb dst entry now, while dst cache line is hot.
*/
int ip_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
{
if (!(inet_sk(sk)->cmsg_flags & IP_CMSG_PKTINFO))
skb_dst_drop(skb);
return sock_queue_rcv_skb(sk, skb);
}
EXPORT_SYMBOL(ip_queue_rcv_skb);
int ip_setsockopt(struct sock *sk, int level,
int optname, char __user *optval, unsigned int optlen)
{
int err;
if (level != SOL_IP)
return -ENOPROTOOPT;
err = do_ip_setsockopt(sk, level, optname, optval, optlen);
#ifdef CONFIG_NETFILTER
/* we need to exclude all possible ENOPROTOOPTs except default case */
if (err == -ENOPROTOOPT && optname != IP_HDRINCL &&
optname != IP_IPSEC_POLICY &&
optname != IP_XFRM_POLICY &&
!ip_mroute_opt(optname)) {
lock_sock(sk);
err = nf_setsockopt(sk, PF_INET, optname, optval, optlen);
release_sock(sk);
}
#endif
return err;
}
EXPORT_SYMBOL(ip_setsockopt);
#ifdef CONFIG_COMPAT
int compat_ip_setsockopt(struct sock *sk, int level, int optname,
char __user *optval, unsigned int optlen)
{
int err;
if (level != SOL_IP)
return -ENOPROTOOPT;
if (optname >= MCAST_JOIN_GROUP && optname <= MCAST_MSFILTER)
return compat_mc_setsockopt(sk, level, optname, optval, optlen,
ip_setsockopt);
err = do_ip_setsockopt(sk, level, optname, optval, optlen);
#ifdef CONFIG_NETFILTER
/* we need to exclude all possible ENOPROTOOPTs except default case */
if (err == -ENOPROTOOPT && optname != IP_HDRINCL &&
optname != IP_IPSEC_POLICY &&
optname != IP_XFRM_POLICY &&
!ip_mroute_opt(optname)) {
lock_sock(sk);
err = compat_nf_setsockopt(sk, PF_INET, optname,
optval, optlen);
release_sock(sk);
}
#endif
return err;
}
EXPORT_SYMBOL(compat_ip_setsockopt);
#endif
/*
* Get the options. Note for future reference. The GET of IP options gets
* the _received_ ones. The set sets the _sent_ ones.
*/
static int do_ip_getsockopt(struct sock *sk, int level, int optname,
char __user *optval, int __user *optlen)
{
struct inet_sock *inet = inet_sk(sk);
int val;
int len;
if (level != SOL_IP)
return -EOPNOTSUPP;
if (ip_mroute_opt(optname))
return ip_mroute_getsockopt(sk, optname, optval, optlen);
if (get_user(len, optlen))
return -EFAULT;
if (len < 0)
return -EINVAL;
lock_sock(sk);
switch (optname) {
case IP_OPTIONS:
{
unsigned char optbuf[sizeof(struct ip_options)+40];
struct ip_options *opt = (struct ip_options *)optbuf;
struct ip_options_rcu *inet_opt;
inet_opt = rcu_dereference_protected(inet->inet_opt,
sock_owned_by_user(sk));
opt->optlen = 0;
if (inet_opt)
memcpy(optbuf, &inet_opt->opt,
sizeof(struct ip_options) +
inet_opt->opt.optlen);
release_sock(sk);
if (opt->optlen == 0)
return put_user(0, optlen);
ip_options_undo(opt);
len = min_t(unsigned int, len, opt->optlen);
if (put_user(len, optlen))
return -EFAULT;
if (copy_to_user(optval, opt->__data, len))
return -EFAULT;
return 0;
}
case IP_PKTINFO:
val = (inet->cmsg_flags & IP_CMSG_PKTINFO) != 0;
break;
case IP_RECVTTL:
val = (inet->cmsg_flags & IP_CMSG_TTL) != 0;
break;
case IP_RECVTOS:
val = (inet->cmsg_flags & IP_CMSG_TOS) != 0;
break;
case IP_RECVOPTS:
val = (inet->cmsg_flags & IP_CMSG_RECVOPTS) != 0;
break;
case IP_RETOPTS:
val = (inet->cmsg_flags & IP_CMSG_RETOPTS) != 0;
break;
case IP_PASSSEC:
val = (inet->cmsg_flags & IP_CMSG_PASSSEC) != 0;
break;
case IP_RECVORIGDSTADDR:
val = (inet->cmsg_flags & IP_CMSG_ORIGDSTADDR) != 0;
break;
case IP_TOS:
val = inet->tos;
break;
case IP_TTL:
val = (inet->uc_ttl == -1 ?
sysctl_ip_default_ttl :
inet->uc_ttl);
break;
case IP_HDRINCL:
val = inet->hdrincl;
break;
case IP_NODEFRAG:
val = inet->nodefrag;
break;
case IP_MTU_DISCOVER:
val = inet->pmtudisc;
break;
case IP_MTU:
{
struct dst_entry *dst;
val = 0;
dst = sk_dst_get(sk);
if (dst) {
val = dst_mtu(dst);
dst_release(dst);
}
if (!val) {
release_sock(sk);
return -ENOTCONN;
}
break;
}
case IP_RECVERR:
val = inet->recverr;
break;
case IP_MULTICAST_TTL:
val = inet->mc_ttl;
break;
case IP_MULTICAST_LOOP:
val = inet->mc_loop;
break;
case IP_MULTICAST_IF:
{
struct in_addr addr;
len = min_t(unsigned int, len, sizeof(struct in_addr));
addr.s_addr = inet->mc_addr;
release_sock(sk);
if (put_user(len, optlen))
return -EFAULT;
if (copy_to_user(optval, &addr, len))
return -EFAULT;
return 0;
}
case IP_MSFILTER:
{
struct ip_msfilter msf;
int err;
if (len < IP_MSFILTER_SIZE(0)) {
release_sock(sk);
return -EINVAL;
}
if (copy_from_user(&msf, optval, IP_MSFILTER_SIZE(0))) {
release_sock(sk);
return -EFAULT;
}
err = ip_mc_msfget(sk, &msf,
(struct ip_msfilter __user *)optval, optlen);
release_sock(sk);
return err;
}
case MCAST_MSFILTER:
{
struct group_filter gsf;
int err;
if (len < GROUP_FILTER_SIZE(0)) {
release_sock(sk);
return -EINVAL;
}
if (copy_from_user(&gsf, optval, GROUP_FILTER_SIZE(0))) {
release_sock(sk);
return -EFAULT;
}
err = ip_mc_gsfget(sk, &gsf,
(struct group_filter __user *)optval,
optlen);
release_sock(sk);
return err;
}
case IP_MULTICAST_ALL:
val = inet->mc_all;
break;
case IP_PKTOPTIONS:
{
struct msghdr msg;
release_sock(sk);
if (sk->sk_type != SOCK_STREAM)
return -ENOPROTOOPT;
msg.msg_control = optval;
msg.msg_controllen = len;
msg.msg_flags = 0;
if (inet->cmsg_flags & IP_CMSG_PKTINFO) {
struct in_pktinfo info;
info.ipi_addr.s_addr = inet->inet_rcv_saddr;
info.ipi_spec_dst.s_addr = inet->inet_rcv_saddr;
info.ipi_ifindex = inet->mc_index;
put_cmsg(&msg, SOL_IP, IP_PKTINFO, sizeof(info), &info);
}
if (inet->cmsg_flags & IP_CMSG_TTL) {
int hlim = inet->mc_ttl;
put_cmsg(&msg, SOL_IP, IP_TTL, sizeof(hlim), &hlim);
}
len -= msg.msg_controllen;
return put_user(len, optlen);
}
case IP_FREEBIND:
val = inet->freebind;
break;
case IP_TRANSPARENT:
val = inet->transparent;
break;
case IP_MINTTL:
val = inet->min_ttl;
break;
default:
release_sock(sk);
return -ENOPROTOOPT;
}
release_sock(sk);
if (len < sizeof(int) && len > 0 && val >= 0 && val <= 255) {
unsigned char ucval = (unsigned char)val;
len = 1;
if (put_user(len, optlen))
return -EFAULT;
if (copy_to_user(optval, &ucval, 1))
return -EFAULT;
} else {
len = min_t(unsigned int, sizeof(int), len);
if (put_user(len, optlen))
return -EFAULT;
if (copy_to_user(optval, &val, len))
return -EFAULT;
}
return 0;
}
int ip_getsockopt(struct sock *sk, int level,
int optname, char __user *optval, int __user *optlen)
{
int err;
err = do_ip_getsockopt(sk, level, optname, optval, optlen);
#ifdef CONFIG_NETFILTER
/* we need to exclude all possible ENOPROTOOPTs except default case */
if (err == -ENOPROTOOPT && optname != IP_PKTOPTIONS &&
!ip_mroute_opt(optname)) {
int len;
if (get_user(len, optlen))
return -EFAULT;
lock_sock(sk);
err = nf_getsockopt(sk, PF_INET, optname, optval,
&len);
release_sock(sk);
if (err >= 0)
err = put_user(len, optlen);
return err;
}
#endif
return err;
}
EXPORT_SYMBOL(ip_getsockopt);
#ifdef CONFIG_COMPAT
int compat_ip_getsockopt(struct sock *sk, int level, int optname,
char __user *optval, int __user *optlen)
{
int err;
if (optname == MCAST_MSFILTER)
return compat_mc_getsockopt(sk, level, optname, optval, optlen,
ip_getsockopt);
err = do_ip_getsockopt(sk, level, optname, optval, optlen);
#ifdef CONFIG_NETFILTER
/* we need to exclude all possible ENOPROTOOPTs except default case */
if (err == -ENOPROTOOPT && optname != IP_PKTOPTIONS &&
!ip_mroute_opt(optname)) {
int len;
if (get_user(len, optlen))
return -EFAULT;
lock_sock(sk);
err = compat_nf_getsockopt(sk, PF_INET, optname, optval, &len);
release_sock(sk);
if (err >= 0)
err = put_user(len, optlen);
return err;
}
#endif
return err;
}
EXPORT_SYMBOL(compat_ip_getsockopt);
#endif
| gpl-2.0 |
pritanshchandra/purex_kernel_xolo_black | arch/arm/mach-pxa/lpd270.c | 2261 | 12111 | /*
* linux/arch/arm/mach-pxa/lpd270.c
*
* Support for the LogicPD PXA270 Card Engine.
* Derived from the mainstone code, which carries these notices:
*
* Author: Nicolas Pitre
* Created: Nov 05, 2002
* Copyright: MontaVista Software Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/gpio.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/syscore_ops.h>
#include <linux/interrupt.h>
#include <linux/sched.h>
#include <linux/bitops.h>
#include <linux/fb.h>
#include <linux/ioport.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
#include <linux/pwm_backlight.h>
#include <asm/types.h>
#include <asm/setup.h>
#include <asm/memory.h>
#include <asm/mach-types.h>
#include <mach/hardware.h>
#include <asm/irq.h>
#include <asm/sizes.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
#include <asm/mach/irq.h>
#include <asm/mach/flash.h>
#include <mach/pxa27x.h>
#include <mach/lpd270.h>
#include <mach/audio.h>
#include <linux/platform_data/video-pxafb.h>
#include <linux/platform_data/mmc-pxamci.h>
#include <linux/platform_data/irda-pxaficp.h>
#include <linux/platform_data/usb-ohci-pxa27x.h>
#include <mach/smemc.h>
#include "generic.h"
#include "devices.h"
static unsigned long lpd270_pin_config[] __initdata = {
/* Chip Selects */
GPIO15_nCS_1, /* Mainboard Flash */
GPIO78_nCS_2, /* CPLD + Ethernet */
/* LCD - 16bpp Active TFT */
GPIO58_LCD_LDD_0,
GPIO59_LCD_LDD_1,
GPIO60_LCD_LDD_2,
GPIO61_LCD_LDD_3,
GPIO62_LCD_LDD_4,
GPIO63_LCD_LDD_5,
GPIO64_LCD_LDD_6,
GPIO65_LCD_LDD_7,
GPIO66_LCD_LDD_8,
GPIO67_LCD_LDD_9,
GPIO68_LCD_LDD_10,
GPIO69_LCD_LDD_11,
GPIO70_LCD_LDD_12,
GPIO71_LCD_LDD_13,
GPIO72_LCD_LDD_14,
GPIO73_LCD_LDD_15,
GPIO74_LCD_FCLK,
GPIO75_LCD_LCLK,
GPIO76_LCD_PCLK,
GPIO77_LCD_BIAS,
GPIO16_PWM0_OUT, /* Backlight */
/* USB Host */
GPIO88_USBH1_PWR,
GPIO89_USBH1_PEN,
/* AC97 */
GPIO28_AC97_BITCLK,
GPIO29_AC97_SDATA_IN_0,
GPIO30_AC97_SDATA_OUT,
GPIO31_AC97_SYNC,
GPIO45_AC97_SYSCLK,
GPIO1_GPIO | WAKEUP_ON_EDGE_BOTH,
};
static unsigned int lpd270_irq_enabled;
static void lpd270_mask_irq(struct irq_data *d)
{
int lpd270_irq = d->irq - LPD270_IRQ(0);
__raw_writew(~(1 << lpd270_irq), LPD270_INT_STATUS);
lpd270_irq_enabled &= ~(1 << lpd270_irq);
__raw_writew(lpd270_irq_enabled, LPD270_INT_MASK);
}
static void lpd270_unmask_irq(struct irq_data *d)
{
int lpd270_irq = d->irq - LPD270_IRQ(0);
lpd270_irq_enabled |= 1 << lpd270_irq;
__raw_writew(lpd270_irq_enabled, LPD270_INT_MASK);
}
static struct irq_chip lpd270_irq_chip = {
.name = "CPLD",
.irq_ack = lpd270_mask_irq,
.irq_mask = lpd270_mask_irq,
.irq_unmask = lpd270_unmask_irq,
};
static void lpd270_irq_handler(unsigned int irq, struct irq_desc *desc)
{
unsigned long pending;
pending = __raw_readw(LPD270_INT_STATUS) & lpd270_irq_enabled;
do {
/* clear useless edge notification */
desc->irq_data.chip->irq_ack(&desc->irq_data);
if (likely(pending)) {
irq = LPD270_IRQ(0) + __ffs(pending);
generic_handle_irq(irq);
pending = __raw_readw(LPD270_INT_STATUS) &
lpd270_irq_enabled;
}
} while (pending);
}
static void __init lpd270_init_irq(void)
{
int irq;
pxa27x_init_irq();
__raw_writew(0, LPD270_INT_MASK);
__raw_writew(0, LPD270_INT_STATUS);
/* setup extra LogicPD PXA270 irqs */
for (irq = LPD270_IRQ(2); irq <= LPD270_IRQ(4); irq++) {
irq_set_chip_and_handler(irq, &lpd270_irq_chip,
handle_level_irq);
set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
}
irq_set_chained_handler(PXA_GPIO_TO_IRQ(0), lpd270_irq_handler);
irq_set_irq_type(PXA_GPIO_TO_IRQ(0), IRQ_TYPE_EDGE_FALLING);
}
#ifdef CONFIG_PM
static void lpd270_irq_resume(void)
{
__raw_writew(lpd270_irq_enabled, LPD270_INT_MASK);
}
static struct syscore_ops lpd270_irq_syscore_ops = {
.resume = lpd270_irq_resume,
};
static int __init lpd270_irq_device_init(void)
{
if (machine_is_logicpd_pxa270()) {
register_syscore_ops(&lpd270_irq_syscore_ops);
return 0;
}
return -ENODEV;
}
device_initcall(lpd270_irq_device_init);
#endif
static struct resource smc91x_resources[] = {
[0] = {
.start = LPD270_ETH_PHYS,
.end = (LPD270_ETH_PHYS + 0xfffff),
.flags = IORESOURCE_MEM,
},
[1] = {
.start = LPD270_ETHERNET_IRQ,
.end = LPD270_ETHERNET_IRQ,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device smc91x_device = {
.name = "smc91x",
.id = 0,
.num_resources = ARRAY_SIZE(smc91x_resources),
.resource = smc91x_resources,
};
static struct resource lpd270_flash_resources[] = {
[0] = {
.start = PXA_CS0_PHYS,
.end = PXA_CS0_PHYS + SZ_64M - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = PXA_CS1_PHYS,
.end = PXA_CS1_PHYS + SZ_64M - 1,
.flags = IORESOURCE_MEM,
},
};
static struct mtd_partition lpd270_flash0_partitions[] = {
{
.name = "Bootloader",
.size = 0x00040000,
.offset = 0,
.mask_flags = MTD_WRITEABLE /* force read-only */
}, {
.name = "Kernel",
.size = 0x00400000,
.offset = 0x00040000,
}, {
.name = "Filesystem",
.size = MTDPART_SIZ_FULL,
.offset = 0x00440000
},
};
static struct flash_platform_data lpd270_flash_data[2] = {
{
.name = "processor-flash",
.map_name = "cfi_probe",
.parts = lpd270_flash0_partitions,
.nr_parts = ARRAY_SIZE(lpd270_flash0_partitions),
}, {
.name = "mainboard-flash",
.map_name = "cfi_probe",
.parts = NULL,
.nr_parts = 0,
}
};
static struct platform_device lpd270_flash_device[2] = {
{
.name = "pxa2xx-flash",
.id = 0,
.dev = {
.platform_data = &lpd270_flash_data[0],
},
.resource = &lpd270_flash_resources[0],
.num_resources = 1,
}, {
.name = "pxa2xx-flash",
.id = 1,
.dev = {
.platform_data = &lpd270_flash_data[1],
},
.resource = &lpd270_flash_resources[1],
.num_resources = 1,
},
};
static struct platform_pwm_backlight_data lpd270_backlight_data = {
.pwm_id = 0,
.max_brightness = 1,
.dft_brightness = 1,
.pwm_period_ns = 78770,
};
static struct platform_device lpd270_backlight_device = {
.name = "pwm-backlight",
.dev = {
.parent = &pxa27x_device_pwm0.dev,
.platform_data = &lpd270_backlight_data,
},
};
/* 5.7" TFT QVGA (LoLo display number 1) */
static struct pxafb_mode_info sharp_lq057q3dc02_mode = {
.pixclock = 150000,
.xres = 320,
.yres = 240,
.bpp = 16,
.hsync_len = 0x14,
.left_margin = 0x28,
.right_margin = 0x0a,
.vsync_len = 0x02,
.upper_margin = 0x08,
.lower_margin = 0x14,
.sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
};
static struct pxafb_mach_info sharp_lq057q3dc02 = {
.modes = &sharp_lq057q3dc02_mode,
.num_modes = 1,
.lcd_conn = LCD_COLOR_TFT_16BPP | LCD_PCLK_EDGE_FALL |
LCD_ALTERNATE_MAPPING,
};
/* 12.1" TFT SVGA (LoLo display number 2) */
static struct pxafb_mode_info sharp_lq121s1dg31_mode = {
.pixclock = 50000,
.xres = 800,
.yres = 600,
.bpp = 16,
.hsync_len = 0x05,
.left_margin = 0x52,
.right_margin = 0x05,
.vsync_len = 0x04,
.upper_margin = 0x14,
.lower_margin = 0x0a,
.sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
};
static struct pxafb_mach_info sharp_lq121s1dg31 = {
.modes = &sharp_lq121s1dg31_mode,
.num_modes = 1,
.lcd_conn = LCD_COLOR_TFT_16BPP | LCD_PCLK_EDGE_FALL |
LCD_ALTERNATE_MAPPING,
};
/* 3.6" TFT QVGA (LoLo display number 3) */
static struct pxafb_mode_info sharp_lq036q1da01_mode = {
.pixclock = 150000,
.xres = 320,
.yres = 240,
.bpp = 16,
.hsync_len = 0x0e,
.left_margin = 0x04,
.right_margin = 0x0a,
.vsync_len = 0x03,
.upper_margin = 0x03,
.lower_margin = 0x03,
.sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
};
static struct pxafb_mach_info sharp_lq036q1da01 = {
.modes = &sharp_lq036q1da01_mode,
.num_modes = 1,
.lcd_conn = LCD_COLOR_TFT_16BPP | LCD_PCLK_EDGE_FALL |
LCD_ALTERNATE_MAPPING,
};
/* 6.4" TFT VGA (LoLo display number 5) */
static struct pxafb_mode_info sharp_lq64d343_mode = {
.pixclock = 25000,
.xres = 640,
.yres = 480,
.bpp = 16,
.hsync_len = 0x31,
.left_margin = 0x89,
.right_margin = 0x19,
.vsync_len = 0x12,
.upper_margin = 0x22,
.lower_margin = 0x00,
.sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
};
static struct pxafb_mach_info sharp_lq64d343 = {
.modes = &sharp_lq64d343_mode,
.num_modes = 1,
.lcd_conn = LCD_COLOR_TFT_16BPP | LCD_PCLK_EDGE_FALL |
LCD_ALTERNATE_MAPPING,
};
/* 10.4" TFT VGA (LoLo display number 7) */
static struct pxafb_mode_info sharp_lq10d368_mode = {
.pixclock = 25000,
.xres = 640,
.yres = 480,
.bpp = 16,
.hsync_len = 0x31,
.left_margin = 0x89,
.right_margin = 0x19,
.vsync_len = 0x12,
.upper_margin = 0x22,
.lower_margin = 0x00,
.sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
};
static struct pxafb_mach_info sharp_lq10d368 = {
.modes = &sharp_lq10d368_mode,
.num_modes = 1,
.lcd_conn = LCD_COLOR_TFT_16BPP | LCD_PCLK_EDGE_FALL |
LCD_ALTERNATE_MAPPING,
};
/* 3.5" TFT QVGA (LoLo display number 8) */
static struct pxafb_mode_info sharp_lq035q7db02_20_mode = {
.pixclock = 150000,
.xres = 240,
.yres = 320,
.bpp = 16,
.hsync_len = 0x0e,
.left_margin = 0x0a,
.right_margin = 0x0a,
.vsync_len = 0x03,
.upper_margin = 0x05,
.lower_margin = 0x14,
.sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
};
static struct pxafb_mach_info sharp_lq035q7db02_20 = {
.modes = &sharp_lq035q7db02_20_mode,
.num_modes = 1,
.lcd_conn = LCD_COLOR_TFT_16BPP | LCD_PCLK_EDGE_FALL |
LCD_ALTERNATE_MAPPING,
};
static struct pxafb_mach_info *lpd270_lcd_to_use;
static int __init lpd270_set_lcd(char *str)
{
if (!strnicmp(str, "lq057q3dc02", 11)) {
lpd270_lcd_to_use = &sharp_lq057q3dc02;
} else if (!strnicmp(str, "lq121s1dg31", 11)) {
lpd270_lcd_to_use = &sharp_lq121s1dg31;
} else if (!strnicmp(str, "lq036q1da01", 11)) {
lpd270_lcd_to_use = &sharp_lq036q1da01;
} else if (!strnicmp(str, "lq64d343", 8)) {
lpd270_lcd_to_use = &sharp_lq64d343;
} else if (!strnicmp(str, "lq10d368", 8)) {
lpd270_lcd_to_use = &sharp_lq10d368;
} else if (!strnicmp(str, "lq035q7db02-20", 14)) {
lpd270_lcd_to_use = &sharp_lq035q7db02_20;
} else {
printk(KERN_INFO "lpd270: unknown lcd panel [%s]\n", str);
}
return 1;
}
__setup("lcd=", lpd270_set_lcd);
static struct platform_device *platform_devices[] __initdata = {
&smc91x_device,
&lpd270_backlight_device,
&lpd270_flash_device[0],
&lpd270_flash_device[1],
};
static struct pxaohci_platform_data lpd270_ohci_platform_data = {
.port_mode = PMM_PERPORT_MODE,
.flags = ENABLE_PORT_ALL | POWER_CONTROL_LOW | POWER_SENSE_LOW,
};
static void __init lpd270_init(void)
{
pxa2xx_mfp_config(ARRAY_AND_SIZE(lpd270_pin_config));
pxa_set_ffuart_info(NULL);
pxa_set_btuart_info(NULL);
pxa_set_stuart_info(NULL);
lpd270_flash_data[0].width = (__raw_readl(BOOT_DEF) & 1) ? 2 : 4;
lpd270_flash_data[1].width = 4;
/*
* System bus arbiter setting:
* - Core_Park
* - LCD_wt:DMA_wt:CORE_Wt = 2:3:4
*/
ARB_CNTRL = ARB_CORE_PARK | 0x234;
platform_add_devices(platform_devices, ARRAY_SIZE(platform_devices));
pxa_set_ac97_info(NULL);
if (lpd270_lcd_to_use != NULL)
pxa_set_fb_info(NULL, lpd270_lcd_to_use);
pxa_set_ohci_info(&lpd270_ohci_platform_data);
}
static struct map_desc lpd270_io_desc[] __initdata = {
{
.virtual = (unsigned long)LPD270_CPLD_VIRT,
.pfn = __phys_to_pfn(LPD270_CPLD_PHYS),
.length = LPD270_CPLD_SIZE,
.type = MT_DEVICE,
},
};
static void __init lpd270_map_io(void)
{
pxa27x_map_io();
iotable_init(lpd270_io_desc, ARRAY_SIZE(lpd270_io_desc));
/* for use I SRAM as framebuffer. */
PSLR |= 0x00000F04;
PCFR = 0x00000066;
}
MACHINE_START(LOGICPD_PXA270, "LogicPD PXA270 Card Engine")
/* Maintainer: Peter Barada */
.atag_offset = 0x100,
.map_io = lpd270_map_io,
.nr_irqs = LPD270_NR_IRQS,
.init_irq = lpd270_init_irq,
.handle_irq = pxa27x_handle_irq,
.init_time = pxa_timer_init,
.init_machine = lpd270_init,
.restart = pxa_restart,
MACHINE_END
| gpl-2.0 |
Razdroid/razdroid-kernel | drivers/staging/csr/mlme.c | 2261 | 14734 | /*
* ---------------------------------------------------------------------------
* FILE: mlme.c
*
* PURPOSE:
* This file provides functions to send MLME requests to the UniFi.
*
* Copyright (C) 2007-2008 by Cambridge Silicon Radio Ltd.
*
* Refer to LICENSE.txt included with this source code for details on
* the license terms.
*
* ---------------------------------------------------------------------------
*/
#include "csr_wifi_hip_unifi.h"
#include "unifi_priv.h"
/*
* ---------------------------------------------------------------------------
* unifi_mlme_wait_for_reply
*
* Wait for a reply after sending a signal.
*
* Arguments:
* priv Pointer to device private context struct
* ul_client Pointer to linux client
* sig_reply_id ID of the expected reply (defined in sigs.h).
* timeout timeout in ms
*
* Returns:
* 0 on success, -ve POSIX code on error.
*
* Notes:
* This function waits for a specific (sig_reply_id) signal from UniFi.
* It also match the sequence number of the received (cfm) signal, with
* the latest sequence number of the signal (req) we have sent.
* These two number match be equal.
* Should only be used for waiting xxx.cfm signals and only after
* we have sent the matching xxx.req signal to UniFi.
* If no response is received within the expected time (timeout), we assume
* that the UniFi is busy and return an error.
* If the wait is aborted by a kernel signal arriving, we stop waiting.
* If a response from UniFi is not what we expected, we discard it and
* wait again. This could be a response from an aborted request. If we
* see several bad responses we assume we have lost synchronisation with
* UniFi.
* ---------------------------------------------------------------------------
*/
static int
unifi_mlme_wait_for_reply(unifi_priv_t *priv, ul_client_t *pcli, int sig_reply_id, int timeout)
{
int retries = 0;
long r;
long t = timeout;
unsigned int sent_seq_no;
/* Convert t in ms to jiffies */
t = msecs_to_jiffies(t);
do {
/* Wait for the confirm or timeout. */
r = wait_event_interruptible_timeout(pcli->udi_wq,
(pcli->wake_up_wq_id) || (priv->io_aborted == 1),
t);
/* Check for general i/o error */
if (priv->io_aborted) {
unifi_error(priv, "MLME operation aborted\n");
return -EIO;
}
/*
* If r=0 the request has timed-out.
* If r>0 the request has completed successfully.
* If r=-ERESTARTSYS an event (kill signal) has interrupted the wait_event.
*/
if ((r == 0) && (pcli->wake_up_wq_id == 0)) {
unifi_error(priv, "mlme_wait: timed-out waiting for 0x%.4X, after %lu msec.\n",
sig_reply_id, jiffies_to_msecs(t));
pcli->wake_up_wq_id = 0;
return -ETIMEDOUT;
} else if (r == -ERESTARTSYS) {
unifi_error(priv, "mlme_wait: waiting for 0x%.4X was aborted.\n", sig_reply_id);
pcli->wake_up_wq_id = 0;
return -EINTR;
} else {
/* Get the sequence number of the signal that we previously set. */
if (pcli->seq_no != 0) {
sent_seq_no = pcli->seq_no - 1;
} else {
sent_seq_no = 0x0F;
}
unifi_trace(priv, UDBG5, "Received 0x%.4X, seq: (r:%d, s:%d)\n",
pcli->wake_up_wq_id,
pcli->wake_seq_no, sent_seq_no);
/* The two sequence ids must match. */
if (pcli->wake_seq_no == sent_seq_no) {
/* and the signal ids must match. */
if (sig_reply_id == pcli->wake_up_wq_id) {
/* Found the expected signal */
break;
} else {
/* This should never happen ... */
unifi_error(priv, "mlme_wait: mismatching signal id (0x%.4X - exp 0x%.4X) (seq %d)\n",
pcli->wake_up_wq_id,
sig_reply_id,
pcli->wake_seq_no);
pcli->wake_up_wq_id = 0;
return -EIO;
}
}
/* Wait for the next signal. */
pcli->wake_up_wq_id = 0;
retries ++;
if (retries >= 3) {
unifi_error(priv, "mlme_wait: confirm wait retries exhausted (0x%.4X - exp 0x%.4X)\n",
pcli->wake_up_wq_id,
sig_reply_id);
pcli->wake_up_wq_id = 0;
return -EIO;
}
}
} while (1);
pcli->wake_up_wq_id = 0;
return 0;
} /* unifi_mlme_wait_for_reply() */
/*
* ---------------------------------------------------------------------------
* unifi_mlme_blocking_request
*
* Send a MLME request signal to UniFi.
*
* Arguments:
* priv Pointer to device private context struct
* pcli Pointer to context of calling process
* sig Pointer to the signal to send
* data_ptrs Pointer to the bulk data of the signal
* timeout The request's timeout.
*
* Returns:
* 0 on success, 802.11 result code on error.
* ---------------------------------------------------------------------------
*/
int
unifi_mlme_blocking_request(unifi_priv_t *priv, ul_client_t *pcli,
CSR_SIGNAL *sig, bulk_data_param_t *data_ptrs,
int timeout)
{
int r;
if (sig->SignalPrimitiveHeader.SignalId == 0) {
unifi_error(priv, "unifi_mlme_blocking_request: Invalid Signal Id (0x%x)\n",
sig->SignalPrimitiveHeader.SignalId);
return -EINVAL;
}
down(&priv->mlme_blocking_mutex);
sig->SignalPrimitiveHeader.ReceiverProcessId = 0;
sig->SignalPrimitiveHeader.SenderProcessId = pcli->sender_id | pcli->seq_no;
unifi_trace(priv, UDBG2, "Send client=%d, S:0x%04X, sig 0x%.4X\n",
pcli->client_id,
sig->SignalPrimitiveHeader.SenderProcessId,
sig->SignalPrimitiveHeader.SignalId);
/* Send the signal to UniFi */
r = ul_send_signal_unpacked(priv, sig, data_ptrs);
if (r) {
up(&priv->mlme_blocking_mutex);
unifi_error(priv, "Error queueing MLME REQUEST signal\n");
return r;
}
unifi_trace(priv, UDBG5, "Send 0x%.4X, seq = %d\n",
sig->SignalPrimitiveHeader.SignalId, pcli->seq_no);
/*
* Advance the sequence number of the last sent signal, only
* if the signal has been successfully set.
*/
pcli->seq_no++;
if (pcli->seq_no > 0x0F) {
pcli->seq_no = 0;
}
r = unifi_mlme_wait_for_reply(priv, pcli, (sig->SignalPrimitiveHeader.SignalId + 1), timeout);
up(&priv->mlme_blocking_mutex);
if (r) {
unifi_error(priv, "Error waiting for MLME CONFIRM signal\n");
return r;
}
return 0;
} /* unifi_mlme_blocking_request() */
/*
* ---------------------------------------------------------------------------
* unifi_mlme_copy_reply_and_wakeup_client
*
* Copy the reply signal from UniFi to the client's structure
* and wake up the waiting client.
*
* Arguments:
* None.
*
* Returns:
* None.
* ---------------------------------------------------------------------------
*/
void
unifi_mlme_copy_reply_and_wakeup_client(ul_client_t *pcli,
CSR_SIGNAL *signal, int signal_len,
const bulk_data_param_t *bulkdata)
{
int i;
/* Copy the signal to the reply */
memcpy(pcli->reply_signal, signal, signal_len);
/* Get the sequence number of the signal that woke us up. */
pcli->wake_seq_no = pcli->reply_signal->SignalPrimitiveHeader.ReceiverProcessId & 0x0F;
/* Append any bulk data */
for (i = 0; i < UNIFI_MAX_DATA_REFERENCES; i++) {
if (bulkdata->d[i].data_length > 0) {
if (bulkdata->d[i].os_data_ptr) {
memcpy(pcli->reply_bulkdata[i]->ptr, bulkdata->d[i].os_data_ptr, bulkdata->d[i].data_length);
pcli->reply_bulkdata[i]->length = bulkdata->d[i].data_length;
} else {
pcli->reply_bulkdata[i]->length = 0;
}
}
}
/* Wake the requesting MLME function. */
pcli->wake_up_wq_id = pcli->reply_signal->SignalPrimitiveHeader.SignalId;
wake_up_interruptible(&pcli->udi_wq);
} /* unifi_mlme_copy_reply_and_wakeup_client() */
/*
* ---------------------------------------------------------------------------
* uf_abort_mlme
*
* Abort any MLME operation in progress.
* This is used in the error recovery mechanism.
*
* Arguments:
* priv Pointer to driver context.
*
* Returns:
* 0 on success.
* ---------------------------------------------------------------------------
*/
int
uf_abort_mlme(unifi_priv_t *priv)
{
ul_client_t *ul_cli;
/* Ensure no MLME functions are waiting on a the mlme_event semaphore. */
priv->io_aborted = 1;
ul_cli = priv->netdev_client;
if (ul_cli) {
wake_up_interruptible(&ul_cli->udi_wq);
}
ul_cli = priv->wext_client;
if (ul_cli) {
wake_up_interruptible(&ul_cli->udi_wq);
}
return 0;
} /* uf_abort_mlme() */
/*
* ---------------------------------------------------------------------------
*
* Human-readable decoding of Reason and Result codes.
*
* ---------------------------------------------------------------------------
*/
struct mlme_code {
const char *name;
int id;
};
static const struct mlme_code Result_codes[] = {
{ "Success", 0x0000 },
{ "Unspecified Failure", 0x0001 },
/* (Reserved) 0x0002 - 0x0009 */
{ "Refused Capabilities Mismatch", 0x000A },
/* (Reserved) 0x000B */
{ "Refused External Reason", 0x000C },
/* (Reserved) 0x000D - 0x0010 */
{ "Refused AP Out Of Memory", 0x0011 },
{ "Refused Basic Rates Mismatch", 0x0012 },
/* (Reserved) 0x0013 - 0x001F */
{ "Failure", 0x0020 },
/* (Reserved) 0x0021 - 0x0024 */
{ "Refused Reason Unspecified", 0x0025 },
{ "Invalid Parameters", 0x0026 },
{ "Rejected With Suggested Changes", 0x0027 },
/* (Reserved) 0x0028 - 0x002E */
{ "Rejected For Delay Period", 0x002F },
{ "Not Allowed", 0x0030 },
{ "Not Present", 0x0031 },
{ "Not QSTA", 0x0032 },
/* (Reserved) 0x0033 - 0x7FFF */
{ "Timeout", 0x8000 },
{ "Too Many Simultaneous Requests", 0x8001 },
{ "BSS Already Started Or Joined", 0x8002 },
{ "Not Supported", 0x8003 },
{ "Transmission Failure", 0x8004 },
{ "Refused Not Authenticated", 0x8005 },
{ "Reset Required Before Start", 0x8006 },
{ "LM Info Unavailable", 0x8007 },
{ NULL, -1 }
};
static const struct mlme_code Reason_codes[] = {
/* (Reserved) 0x0000 */
{ "Unspecified Reason", 0x0001 },
{ "Authentication Not Valid", 0x0002 },
{ "Deauthenticated Leave BSS", 0x0003 },
{ "Disassociated Inactivity", 0x0004 },
{ "AP Overload", 0x0005 },
{ "Class2 Frame Error", 0x0006 },
{ "Class3 Frame Error", 0x0007 },
{ "Disassociated Leave BSS", 0x0008 },
{ "Association Not Authenticated", 0x0009 },
{ "Disassociated Power Capability", 0x000A },
{ "Disassociated Supported Channels", 0x000B },
/* (Reserved) 0x000C */
{ "Invalid Information Element", 0x000D },
{ "Michael MIC Failure", 0x000E },
{ "Fourway Handshake Timeout", 0x000F },
{ "Group Key Update Timeout", 0x0010 },
{ "Handshake Element Different", 0x0011 },
{ "Invalid Group Cipher", 0x0012 },
{ "Invalid Pairwise Cipher", 0x0013 },
{ "Invalid AKMP", 0x0014 },
{ "Unsupported RSN IE Version", 0x0015 },
{ "Invalid RSN IE Capabilities", 0x0016 },
{ "Dot1X Auth Failed", 0x0017 },
{ "Cipher Rejected By Policy", 0x0018 },
/* (Reserved) 0x0019 - 0x001F */
{ "QoS Unspecified Reason", 0x0020 },
{ "QoS Insufficient Bandwidth", 0x0021 },
{ "QoS Excessive Not Ack", 0x0022 },
{ "QoS TXOP Limit Exceeded", 0x0023 },
{ "QSTA Leaving", 0x0024 },
{ "End TS, End DLS, End BA", 0x0025 },
{ "Unknown TS, Unknown DLS, Unknown BA", 0x0026 },
{ "Timeout", 0x0027 },
/* (Reserved) 0x0028 - 0x002C */
{ "STAKey Mismatch", 0x002D },
{ NULL, -1 }
};
static const char *
lookup_something(const struct mlme_code *n, int id)
{
for (; n->name; n++) {
if (n->id == id) {
return n->name;
}
}
/* not found */
return NULL;
} /* lookup_something() */
const char *
lookup_result_code(int result)
{
static char fallback[16];
const char *str;
str = lookup_something(Result_codes, result);
if (str == NULL) {
snprintf(fallback, 16, "%d", result);
str = fallback;
}
return str;
} /* lookup_result_code() */
/*
* ---------------------------------------------------------------------------
* lookup_reason
*
* Return a description string for a WiFi MLME ReasonCode.
*
* Arguments:
* reason The ReasonCode to interpret.
*
* Returns:
* Pointer to description string.
* ---------------------------------------------------------------------------
*/
const char *
lookup_reason_code(int reason)
{
static char fallback[16];
const char *str;
str = lookup_something(Reason_codes, reason);
if (str == NULL) {
snprintf(fallback, 16, "%d", reason);
str = fallback;
}
return str;
} /* lookup_reason_code() */
| gpl-2.0 |
mrjaydee82/SinLessKernel-m8 | net/bluetooth/sco.c | 2517 | 23101 | /*
BlueZ - Bluetooth protocol stack for Linux
Copyright (C) 2000-2001 Qualcomm Incorporated
Copyright (c) 2011, The Linux Foundation. All rights reserved.
Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License version 2 as
published by the Free Software Foundation;
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
SOFTWARE IS DISCLAIMED.
*/
/* Bluetooth SCO sockets. */
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/poll.h>
#include <linux/fcntl.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/socket.h>
#include <linux/skbuff.h>
#include <linux/device.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/list.h>
#include <net/sock.h>
#include <asm/system.h>
#include <linux/uaccess.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
#include <net/bluetooth/sco.h>
static bool disable_esco;
static const struct proto_ops sco_sock_ops;
static struct bt_sock_list sco_sk_list = {
.lock = __RW_LOCK_UNLOCKED(sco_sk_list.lock)
};
static void __sco_chan_add(struct sco_conn *conn, struct sock *sk, struct sock *parent);
static void sco_chan_del(struct sock *sk, int err);
static int sco_conn_del(struct hci_conn *conn, int err, u8 is_process);
static void sco_sock_close(struct sock *sk);
static void sco_sock_kill(struct sock *sk);
/* ---- SCO timers ---- */
static void sco_sock_timeout(unsigned long arg)
{
struct sock *sk = (struct sock *) arg;
BT_DBG("sock %p state %d", sk, sk->sk_state);
bh_lock_sock(sk);
sk->sk_err = ETIMEDOUT;
sk->sk_state_change(sk);
bh_unlock_sock(sk);
sco_sock_kill(sk);
sock_put(sk);
}
static void sco_sock_set_timer(struct sock *sk, long timeout)
{
BT_DBG("sock %p state %d timeout %ld", sk, sk->sk_state, timeout);
sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
}
static void sco_sock_clear_timer(struct sock *sk)
{
BT_DBG("sock %p state %d", sk, sk->sk_state);
sk_stop_timer(sk, &sk->sk_timer);
}
/* ---- SCO connections ---- */
static struct sco_conn *sco_conn_add(struct hci_conn *hcon, __u8 status)
{
struct hci_dev *hdev = hcon->hdev;
struct sco_conn *conn = hcon->sco_data;
if (conn || status)
return conn;
conn = kzalloc(sizeof(struct sco_conn), GFP_ATOMIC);
if (!conn)
return NULL;
spin_lock_init(&conn->lock);
hcon->sco_data = conn;
conn->hcon = hcon;
conn->src = &hdev->bdaddr;
conn->dst = &hcon->dst;
if (hdev->sco_mtu > 0)
conn->mtu = hdev->sco_mtu;
else
conn->mtu = 60;
BT_DBG("hcon %p conn %p", hcon, conn);
return conn;
}
static inline struct sock *sco_chan_get(struct sco_conn *conn)
{
struct sock *sk = NULL;
sco_conn_lock(conn);
sk = conn->sk;
sco_conn_unlock(conn);
return sk;
}
static int sco_conn_del(struct hci_conn *hcon, int err, u8 is_process)
{
struct sco_conn *conn = hcon->sco_data;
struct sock *sk;
if (!conn)
return 0;
BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
/* Kill socket */
sk = sco_chan_get(conn);
if (sk) {
if (is_process)
lock_sock(sk);
else
bh_lock_sock(sk);
sco_sock_clear_timer(sk);
sco_chan_del(sk, err);
if (is_process)
release_sock(sk);
else
bh_unlock_sock(sk);
sco_sock_kill(sk);
}
hcon->sco_data = NULL;
kfree(conn);
return 0;
}
static inline int sco_chan_add(struct sco_conn *conn, struct sock *sk, struct sock *parent)
{
int err = 0;
sco_conn_lock(conn);
if (conn->sk)
err = -EBUSY;
else
__sco_chan_add(conn, sk, parent);
sco_conn_unlock(conn);
return err;
}
static int sco_connect(struct sock *sk, __s8 is_wbs)
{
bdaddr_t *src = &bt_sk(sk)->src;
bdaddr_t *dst = &bt_sk(sk)->dst;
__u16 pkt_type = sco_pi(sk)->pkt_type;
struct sco_conn *conn;
struct hci_conn *hcon;
struct hci_dev *hdev;
int err, type;
BT_DBG("%s -> %s", batostr(src), batostr(dst));
hdev = hci_get_route(dst, src);
if (!hdev)
return -EHOSTUNREACH;
hci_dev_lock_bh(hdev);
hdev->is_wbs = is_wbs;
if (lmp_esco_capable(hdev) && !disable_esco) {
type = ESCO_LINK;
} else if (is_wbs) {
return -ENAVAIL;
} else {
type = SCO_LINK;
pkt_type &= SCO_ESCO_MASK;
}
BT_DBG("type: %d, pkt_type: 0x%x", type, pkt_type);
hcon = hci_connect(hdev, type, pkt_type, dst,
BT_SECURITY_LOW, HCI_AT_NO_BONDING);
if (IS_ERR(hcon)) {
err = PTR_ERR(hcon);
goto done;
}
if (is_wbs && (hcon->type != ESCO_LINK)) {
BT_ERR("WBS [ hcon->type: 0x%x, hcon->pkt_type: 0x%x ]",
hcon->type, hcon->pkt_type);
err = -EREMOTEIO;
goto done;
}
conn = sco_conn_add(hcon, 0);
if (!conn) {
hci_conn_put(hcon);
err = -ENOMEM;
goto done;
}
/* Update source addr of the socket */
bacpy(src, conn->src);
err = sco_chan_add(conn, sk, NULL);
if (err)
goto done;
if (hcon->state == BT_CONNECTED) {
sco_sock_clear_timer(sk);
sk->sk_state = BT_CONNECTED;
} else {
sk->sk_state = BT_CONNECT;
sco_sock_set_timer(sk, sk->sk_sndtimeo);
}
done:
hci_dev_unlock_bh(hdev);
hci_dev_put(hdev);
return err;
}
static inline int sco_send_frame(struct sock *sk, struct msghdr *msg, int len)
{
struct sco_conn *conn = sco_pi(sk)->conn;
struct sk_buff *skb;
int err, count;
/* Check outgoing MTU */
if (len > conn->mtu)
return -EINVAL;
BT_DBG("sk %p len %d", sk, len);
count = min_t(unsigned int, conn->mtu, len);
skb = bt_skb_send_alloc(sk, count,
msg->msg_flags & MSG_DONTWAIT, &err);
if (!skb)
return err;
if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count)) {
kfree_skb(skb);
return -EFAULT;
}
hci_send_sco(conn->hcon, skb);
return count;
}
static inline void sco_recv_frame(struct sco_conn *conn, struct sk_buff *skb)
{
struct sock *sk = sco_chan_get(conn);
if (!sk)
goto drop;
BT_DBG("sk %p len %d", sk, skb->len);
if (sk->sk_state != BT_CONNECTED)
goto drop;
if (!sock_queue_rcv_skb(sk, skb))
return;
drop:
kfree_skb(skb);
}
/* -------- Socket interface ---------- */
static struct sock *__sco_get_sock_by_addr(bdaddr_t *ba)
{
struct sock *sk;
struct hlist_node *node;
sk_for_each(sk, node, &sco_sk_list.head)
if (!bacmp(&bt_sk(sk)->src, ba))
goto found;
sk = NULL;
found:
return sk;
}
/* Find socket listening on source bdaddr.
* Returns closest match.
*/
static struct sock *sco_get_sock_listen(bdaddr_t *src)
{
struct sock *sk = NULL, *sk1 = NULL;
struct hlist_node *node;
read_lock(&sco_sk_list.lock);
sk_for_each(sk, node, &sco_sk_list.head) {
if (sk->sk_state != BT_LISTEN)
continue;
/* Exact match. */
if (!bacmp(&bt_sk(sk)->src, src))
break;
/* Closest match */
if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
sk1 = sk;
}
read_unlock(&sco_sk_list.lock);
return node ? sk : sk1;
}
static void sco_sock_destruct(struct sock *sk)
{
BT_DBG("sk %p", sk);
skb_queue_purge(&sk->sk_receive_queue);
skb_queue_purge(&sk->sk_write_queue);
}
static void sco_sock_cleanup_listen(struct sock *parent)
{
struct sock *sk;
BT_DBG("parent %p", parent);
/* Close not yet accepted channels */
while ((sk = bt_accept_dequeue(parent, NULL))) {
sco_sock_close(sk);
sco_sock_kill(sk);
}
parent->sk_state = BT_CLOSED;
sock_set_flag(parent, SOCK_ZAPPED);
}
/* Kill socket (only if zapped and orphan)
* Must be called on unlocked socket.
*/
static void sco_sock_kill(struct sock *sk)
{
if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
return;
BT_DBG("sk %p state %d", sk, sk->sk_state);
/* Kill poor orphan */
bt_sock_unlink(&sco_sk_list, sk);
sock_set_flag(sk, SOCK_DEAD);
sock_put(sk);
}
static void __sco_sock_close(struct sock *sk)
{
BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
switch (sk->sk_state) {
case BT_LISTEN:
sco_sock_cleanup_listen(sk);
break;
case BT_CONNECTED:
case BT_CONFIG:
if (sco_pi(sk)->conn) {
sk->sk_state = BT_DISCONN;
sco_sock_set_timer(sk, SCO_DISCONN_TIMEOUT);
if (sco_pi(sk)->conn->hcon != NULL) {
hci_conn_put(sco_pi(sk)->conn->hcon);
sco_pi(sk)->conn->hcon = NULL;
}
} else
sco_chan_del(sk, ECONNRESET);
break;
case BT_CONNECT:
case BT_DISCONN:
sco_chan_del(sk, ECONNRESET);
break;
default:
sock_set_flag(sk, SOCK_ZAPPED);
break;
}
}
/* Must be called on unlocked socket. */
static void sco_sock_close(struct sock *sk)
{
sco_sock_clear_timer(sk);
lock_sock(sk);
__sco_sock_close(sk);
release_sock(sk);
sco_sock_kill(sk);
}
static void sco_sock_init(struct sock *sk, struct sock *parent)
{
BT_DBG("sk %p", sk);
if (parent)
sk->sk_type = parent->sk_type;
}
static struct proto sco_proto = {
.name = "SCO",
.owner = THIS_MODULE,
.obj_size = sizeof(struct sco_pinfo)
};
static struct sock *sco_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
{
struct sock *sk;
sk = sk_alloc(net, PF_BLUETOOTH, prio, &sco_proto);
if (!sk)
return NULL;
sock_init_data(sock, sk);
INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
sk->sk_destruct = sco_sock_destruct;
sk->sk_sndtimeo = SCO_CONN_TIMEOUT;
sock_reset_flag(sk, SOCK_ZAPPED);
sk->sk_protocol = proto;
sk->sk_state = BT_OPEN;
setup_timer(&sk->sk_timer, sco_sock_timeout, (unsigned long)sk);
bt_sock_link(&sco_sk_list, sk);
return sk;
}
static int sco_sock_create(struct net *net, struct socket *sock, int protocol,
int kern)
{
struct sock *sk;
BT_DBG("sock %p", sock);
sock->state = SS_UNCONNECTED;
if (sock->type != SOCK_SEQPACKET)
return -ESOCKTNOSUPPORT;
sock->ops = &sco_sock_ops;
sk = sco_sock_alloc(net, sock, protocol, GFP_ATOMIC);
if (!sk)
return -ENOMEM;
sco_sock_init(sk, NULL);
return 0;
}
static int sco_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
{
struct sockaddr_sco sa;
struct sock *sk = sock->sk;
bdaddr_t *src = &sa.sco_bdaddr;
int len, err = 0;
BT_DBG("sk %p %s", sk, batostr(&sa.sco_bdaddr));
if (!addr || addr->sa_family != AF_BLUETOOTH)
return -EINVAL;
memset(&sa, 0, sizeof(sa));
len = min_t(unsigned int, sizeof(sa), alen);
memcpy(&sa, addr, len);
lock_sock(sk);
if (sk->sk_state != BT_OPEN) {
err = -EBADFD;
goto done;
}
write_lock_bh(&sco_sk_list.lock);
if (bacmp(src, BDADDR_ANY) && __sco_get_sock_by_addr(src)) {
err = -EADDRINUSE;
} else {
/* Save source address */
bacpy(&bt_sk(sk)->src, &sa.sco_bdaddr);
sco_pi(sk)->pkt_type = sa.sco_pkt_type;
sk->sk_state = BT_BOUND;
}
write_unlock_bh(&sco_sk_list.lock);
done:
release_sock(sk);
return err;
}
static int sco_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
{
struct sock *sk = sock->sk;
struct sockaddr_sco sa;
int len, err = 0;
BT_DBG("sk %p", sk);
if (!addr || addr->sa_family != AF_BLUETOOTH)
return -EINVAL;
memset(&sa, 0, sizeof(sa));
len = min_t(unsigned int, sizeof(sa), alen);
memcpy(&sa, addr, len);
lock_sock(sk);
if (sk->sk_type != SOCK_SEQPACKET) {
err = -EINVAL;
goto done;
}
if (sk->sk_state != BT_OPEN && sk->sk_state != BT_BOUND) {
err = -EBADFD;
goto done;
}
/* Set destination address and psm */
bacpy(&bt_sk(sk)->dst, &sa.sco_bdaddr);
sco_pi(sk)->pkt_type = sa.sco_pkt_type;
err = sco_connect(sk, sa.is_wbs);
if (err)
goto done;
err = bt_sock_wait_state(sk, BT_CONNECTED,
sock_sndtimeo(sk, flags & O_NONBLOCK));
done:
release_sock(sk);
return err;
}
static int sco_sock_listen(struct socket *sock, int backlog)
{
struct sock *sk = sock->sk;
int err = 0;
BT_DBG("sk %p backlog %d", sk, backlog);
lock_sock(sk);
if (sk->sk_state != BT_BOUND || sock->type != SOCK_SEQPACKET) {
err = -EBADFD;
goto done;
}
sk->sk_max_ack_backlog = backlog;
sk->sk_ack_backlog = 0;
sk->sk_state = BT_LISTEN;
done:
release_sock(sk);
return err;
}
static int sco_sock_accept(struct socket *sock, struct socket *newsock, int flags)
{
DECLARE_WAITQUEUE(wait, current);
struct sock *sk = sock->sk, *ch;
long timeo;
int err = 0;
lock_sock(sk);
if (sk->sk_state != BT_LISTEN) {
err = -EBADFD;
goto done;
}
timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
BT_DBG("sk %p timeo %ld", sk, timeo);
/* Wait for an incoming connection. (wake-one). */
add_wait_queue_exclusive(sk_sleep(sk), &wait);
while (!(ch = bt_accept_dequeue(sk, newsock))) {
set_current_state(TASK_INTERRUPTIBLE);
if (!timeo) {
err = -EAGAIN;
break;
}
release_sock(sk);
timeo = schedule_timeout(timeo);
lock_sock(sk);
if (sk->sk_state != BT_LISTEN) {
err = -EBADFD;
break;
}
if (signal_pending(current)) {
err = sock_intr_errno(timeo);
break;
}
}
set_current_state(TASK_RUNNING);
remove_wait_queue(sk_sleep(sk), &wait);
if (err)
goto done;
newsock->state = SS_CONNECTED;
BT_DBG("new socket %p", ch);
done:
release_sock(sk);
return err;
}
static int sco_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
{
struct sockaddr_sco *sa = (struct sockaddr_sco *) addr;
struct sock *sk = sock->sk;
BT_DBG("sock %p, sk %p", sock, sk);
addr->sa_family = AF_BLUETOOTH;
*len = sizeof(struct sockaddr_sco);
if (peer)
bacpy(&sa->sco_bdaddr, &bt_sk(sk)->dst);
else
bacpy(&sa->sco_bdaddr, &bt_sk(sk)->src);
sa->sco_pkt_type = sco_pi(sk)->pkt_type;
return 0;
}
static int sco_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t len)
{
struct sock *sk = sock->sk;
int err;
BT_DBG("sock %p, sk %p", sock, sk);
err = sock_error(sk);
if (err)
return err;
if (msg->msg_flags & MSG_OOB)
return -EOPNOTSUPP;
lock_sock(sk);
if (sk->sk_state == BT_CONNECTED)
err = sco_send_frame(sk, msg, len);
else
err = -ENOTCONN;
release_sock(sk);
return err;
}
static int sco_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
{
struct sock *sk = sock->sk;
int err = 0;
BT_DBG("sk %p", sk);
lock_sock(sk);
switch (optname) {
default:
err = -ENOPROTOOPT;
break;
}
release_sock(sk);
return err;
}
static int sco_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
{
struct sock *sk = sock->sk;
struct sco_options opts;
struct sco_conninfo cinfo;
int len, err = 0;
BT_DBG("sk %p", sk);
if (get_user(len, optlen))
return -EFAULT;
lock_sock(sk);
switch (optname) {
case SCO_OPTIONS:
if (sk->sk_state != BT_CONNECTED) {
err = -ENOTCONN;
break;
}
opts.mtu = sco_pi(sk)->conn->mtu;
BT_DBG("mtu %d", opts.mtu);
len = min_t(unsigned int, len, sizeof(opts));
if (copy_to_user(optval, (char *)&opts, len))
err = -EFAULT;
break;
case SCO_CONNINFO:
if (sk->sk_state != BT_CONNECTED) {
err = -ENOTCONN;
break;
}
memset(&cinfo, 0, sizeof(cinfo));
cinfo.hci_handle = sco_pi(sk)->conn->hcon->handle;
memcpy(cinfo.dev_class, sco_pi(sk)->conn->hcon->dev_class, 3);
len = min_t(unsigned int, len, sizeof(cinfo));
if (copy_to_user(optval, (char *)&cinfo, len))
err = -EFAULT;
break;
default:
err = -ENOPROTOOPT;
break;
}
release_sock(sk);
return err;
}
static int sco_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
{
struct sock *sk = sock->sk;
int len, err = 0;
BT_DBG("sk %p", sk);
if (level == SOL_SCO)
return sco_sock_getsockopt_old(sock, optname, optval, optlen);
if (get_user(len, optlen))
return -EFAULT;
lock_sock(sk);
switch (optname) {
default:
err = -ENOPROTOOPT;
break;
}
release_sock(sk);
return err;
}
static int sco_sock_shutdown(struct socket *sock, int how)
{
struct sock *sk = sock->sk;
int err = 0;
BT_DBG("sock %p, sk %p", sock, sk);
if (!sk)
return 0;
lock_sock(sk);
if (!sk->sk_shutdown) {
sk->sk_shutdown = SHUTDOWN_MASK;
sco_sock_clear_timer(sk);
__sco_sock_close(sk);
if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
err = bt_sock_wait_state(sk, BT_CLOSED,
sk->sk_lingertime);
else
err = bt_sock_wait_state(sk, BT_CLOSED,
SCO_DISCONN_TIMEOUT);
}
release_sock(sk);
return err;
}
static int sco_sock_release(struct socket *sock)
{
struct sock *sk = sock->sk;
int err = 0;
BT_DBG("sock %p, sk %p", sock, sk);
if (!sk)
return 0;
sco_sock_close(sk);
if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime) {
lock_sock(sk);
err = bt_sock_wait_state(sk, BT_CLOSED, sk->sk_lingertime);
release_sock(sk);
} else {
lock_sock(sk);
err = bt_sock_wait_state(sk, BT_CLOSED,
SCO_DISCONN_TIMEOUT);
release_sock(sk);
}
sock_orphan(sk);
sco_sock_kill(sk);
return err;
}
static void __sco_chan_add(struct sco_conn *conn, struct sock *sk, struct sock *parent)
{
BT_DBG("conn %p", conn);
sco_pi(sk)->conn = conn;
conn->sk = sk;
if (parent)
bt_accept_enqueue(parent, sk);
}
/* Delete channel.
* Must be called on the locked socket. */
static void sco_chan_del(struct sock *sk, int err)
{
struct sco_conn *conn;
conn = sco_pi(sk)->conn;
BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
if (conn) {
sco_conn_lock(conn);
conn->sk = NULL;
sco_pi(sk)->conn = NULL;
sco_conn_unlock(conn);
if (conn->hcon)
hci_conn_put(conn->hcon);
}
sk->sk_state = BT_CLOSED;
sk->sk_err = err;
sk->sk_state_change(sk);
sock_set_flag(sk, SOCK_ZAPPED);
}
static void sco_conn_ready(struct sco_conn *conn)
{
struct sock *parent;
struct sock *sk = conn->sk;
BT_DBG("conn %p", conn);
sco_conn_lock(conn);
if (sk) {
sco_sock_clear_timer(sk);
bh_lock_sock(sk);
sk->sk_state = BT_CONNECTED;
sk->sk_state_change(sk);
bh_unlock_sock(sk);
} else {
parent = sco_get_sock_listen(conn->src);
if (!parent)
goto done;
bh_lock_sock(parent);
sk = sco_sock_alloc(sock_net(parent), NULL,
BTPROTO_SCO, GFP_ATOMIC);
if (!sk) {
bh_unlock_sock(parent);
goto done;
}
sco_sock_init(sk, parent);
bacpy(&bt_sk(sk)->src, conn->src);
bacpy(&bt_sk(sk)->dst, conn->dst);
hci_conn_hold(conn->hcon);
__sco_chan_add(conn, sk, parent);
sk->sk_state = BT_CONNECTED;
/* Wake up parent */
parent->sk_data_ready(parent, 1);
bh_unlock_sock(parent);
}
done:
sco_conn_unlock(conn);
}
/* ----- SCO interface with lower layer (HCI) ----- */
static int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 type)
{
register struct sock *sk;
struct hlist_node *node;
int lm = 0;
if (type != SCO_LINK && type != ESCO_LINK)
return 0;
BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
/* Find listening sockets */
read_lock(&sco_sk_list.lock);
sk_for_each(sk, node, &sco_sk_list.head) {
if (sk->sk_state != BT_LISTEN)
continue;
if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr) ||
!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
lm |= HCI_LM_ACCEPT;
break;
}
}
read_unlock(&sco_sk_list.lock);
return lm;
}
static int sco_connect_cfm(struct hci_conn *hcon, __u8 status)
{
BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
if (hcon->type != SCO_LINK && hcon->type != ESCO_LINK)
return -EINVAL;
if (!status) {
struct sco_conn *conn;
conn = sco_conn_add(hcon, status);
if (conn)
sco_conn_ready(conn);
} else
sco_conn_del(hcon, bt_err(status), 0);
return 0;
}
static int sco_disconn_cfm(struct hci_conn *hcon, __u8 reason, __u8 is_process)
{
BT_DBG("hcon %p reason %d", hcon, reason);
if (hcon->type != SCO_LINK && hcon->type != ESCO_LINK)
return -EINVAL;
sco_conn_del(hcon, bt_err(reason), is_process);
return 0;
}
static int sco_recv_scodata(struct hci_conn *hcon, struct sk_buff *skb)
{
struct sco_conn *conn = hcon->sco_data;
if (!conn)
goto drop;
BT_DBG("conn %p len %d", conn, skb->len);
if (skb->len) {
sco_recv_frame(conn, skb);
return 0;
}
drop:
kfree_skb(skb);
return 0;
}
static int sco_debugfs_show(struct seq_file *f, void *p)
{
struct sock *sk;
struct hlist_node *node;
read_lock_bh(&sco_sk_list.lock);
sk_for_each(sk, node, &sco_sk_list.head) {
seq_printf(f, "%s %s %d\n", batostr(&bt_sk(sk)->src),
batostr(&bt_sk(sk)->dst), sk->sk_state);
}
read_unlock_bh(&sco_sk_list.lock);
return 0;
}
static int sco_debugfs_open(struct inode *inode, struct file *file)
{
return single_open(file, sco_debugfs_show, inode->i_private);
}
static const struct file_operations sco_debugfs_fops = {
.open = sco_debugfs_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static struct dentry *sco_debugfs;
static const struct proto_ops sco_sock_ops = {
.family = PF_BLUETOOTH,
.owner = THIS_MODULE,
.release = sco_sock_release,
.bind = sco_sock_bind,
.connect = sco_sock_connect,
.listen = sco_sock_listen,
.accept = sco_sock_accept,
.getname = sco_sock_getname,
.sendmsg = sco_sock_sendmsg,
.recvmsg = bt_sock_recvmsg,
.poll = bt_sock_poll,
.ioctl = bt_sock_ioctl,
.mmap = sock_no_mmap,
.socketpair = sock_no_socketpair,
.shutdown = sco_sock_shutdown,
.setsockopt = sco_sock_setsockopt,
.getsockopt = sco_sock_getsockopt
};
static const struct net_proto_family sco_sock_family_ops = {
.family = PF_BLUETOOTH,
.owner = THIS_MODULE,
.create = sco_sock_create,
};
static struct hci_proto sco_hci_proto = {
.name = "SCO",
.id = HCI_PROTO_SCO,
.connect_ind = sco_connect_ind,
.connect_cfm = sco_connect_cfm,
.disconn_cfm = sco_disconn_cfm,
.recv_scodata = sco_recv_scodata
};
int __init sco_init(void)
{
int err;
err = proto_register(&sco_proto, 0);
if (err < 0)
return err;
err = bt_sock_register(BTPROTO_SCO, &sco_sock_family_ops);
if (err < 0) {
BT_ERR("SCO socket registration failed");
goto error;
}
err = hci_register_proto(&sco_hci_proto);
if (err < 0) {
BT_ERR("SCO protocol registration failed");
bt_sock_unregister(BTPROTO_SCO);
goto error;
}
if (bt_debugfs) {
sco_debugfs = debugfs_create_file("sco", 0444,
bt_debugfs, NULL, &sco_debugfs_fops);
if (!sco_debugfs)
BT_ERR("Failed to create SCO debug file");
}
BT_INFO("SCO socket layer initialized");
return 0;
error:
proto_unregister(&sco_proto);
return err;
}
void __exit sco_exit(void)
{
debugfs_remove(sco_debugfs);
if (bt_sock_unregister(BTPROTO_SCO) < 0)
BT_ERR("SCO socket unregistration failed");
if (hci_unregister_proto(&sco_hci_proto) < 0)
BT_ERR("SCO protocol unregistration failed");
proto_unregister(&sco_proto);
}
module_param(disable_esco, bool, 0644);
MODULE_PARM_DESC(disable_esco, "Disable eSCO connection creation");
| gpl-2.0 |
playfulgod/android_kernel_lge_kk_zee | drivers/virtio/virtio_ring.c | 2517 | 19491 | /* Virtio ring implementation.
*
* Copyright 2007 Rusty Russell IBM Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <linux/virtio.h>
#include <linux/virtio_ring.h>
#include <linux/virtio_config.h>
#include <linux/device.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/hrtimer.h>
/* virtio guest is communicating with a virtual "device" that actually runs on
* a host processor. Memory barriers are used to control SMP effects. */
#ifdef CONFIG_SMP
/* Where possible, use SMP barriers which are more lightweight than mandatory
* barriers, because mandatory barriers control MMIO effects on accesses
* through relaxed memory I/O windows (which virtio-pci does not use). */
#define virtio_mb(vq) \
do { if ((vq)->weak_barriers) smp_mb(); else mb(); } while(0)
#define virtio_rmb(vq) \
do { if ((vq)->weak_barriers) smp_rmb(); else rmb(); } while(0)
#define virtio_wmb(vq) \
do { if ((vq)->weak_barriers) smp_wmb(); else wmb(); } while(0)
#else
/* We must force memory ordering even if guest is UP since host could be
* running on another CPU, but SMP barriers are defined to barrier() in that
* configuration. So fall back to mandatory barriers instead. */
#define virtio_mb(vq) mb()
#define virtio_rmb(vq) rmb()
#define virtio_wmb(vq) wmb()
#endif
#ifdef DEBUG
/* For development, we want to crash whenever the ring is screwed. */
#define BAD_RING(_vq, fmt, args...) \
do { \
dev_err(&(_vq)->vq.vdev->dev, \
"%s:"fmt, (_vq)->vq.name, ##args); \
BUG(); \
} while (0)
/* Caller is supposed to guarantee no reentry. */
#define START_USE(_vq) \
do { \
if ((_vq)->in_use) \
panic("%s:in_use = %i\n", \
(_vq)->vq.name, (_vq)->in_use); \
(_vq)->in_use = __LINE__; \
} while (0)
#define END_USE(_vq) \
do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; } while(0)
#else
#define BAD_RING(_vq, fmt, args...) \
do { \
dev_err(&_vq->vq.vdev->dev, \
"%s:"fmt, (_vq)->vq.name, ##args); \
(_vq)->broken = true; \
} while (0)
#define START_USE(vq)
#define END_USE(vq)
#endif
struct vring_virtqueue
{
struct virtqueue vq;
/* Actual memory layout for this queue */
struct vring vring;
/* Can we use weak barriers? */
bool weak_barriers;
/* Other side has made a mess, don't try any more. */
bool broken;
/* Host supports indirect buffers */
bool indirect;
/* Host publishes avail event idx */
bool event;
/* Number of free buffers */
unsigned int num_free;
/* Head of free buffer list. */
unsigned int free_head;
/* Number we've added since last sync. */
unsigned int num_added;
/* Last used index we've seen. */
u16 last_used_idx;
/* How to notify other side. FIXME: commonalize hcalls! */
void (*notify)(struct virtqueue *vq);
#ifdef DEBUG
/* They're supposed to lock for us. */
unsigned int in_use;
/* Figure out if their kicks are too delayed. */
bool last_add_time_valid;
ktime_t last_add_time;
#endif
/* Tokens for callbacks. */
void *data[];
};
#define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq)
/* Set up an indirect table of descriptors and add it to the queue. */
static int vring_add_indirect(struct vring_virtqueue *vq,
struct scatterlist sg[],
unsigned int out,
unsigned int in,
gfp_t gfp)
{
struct vring_desc *desc;
unsigned head;
int i;
desc = kmalloc((out + in) * sizeof(struct vring_desc), gfp);
if (!desc)
return -ENOMEM;
/* Transfer entries from the sg list into the indirect page */
for (i = 0; i < out; i++) {
desc[i].flags = VRING_DESC_F_NEXT;
desc[i].addr = sg_phys(sg);
desc[i].len = sg->length;
desc[i].next = i+1;
sg++;
}
for (; i < (out + in); i++) {
desc[i].flags = VRING_DESC_F_NEXT|VRING_DESC_F_WRITE;
desc[i].addr = sg_phys(sg);
desc[i].len = sg->length;
desc[i].next = i+1;
sg++;
}
/* Last one doesn't continue. */
desc[i-1].flags &= ~VRING_DESC_F_NEXT;
desc[i-1].next = 0;
/* We're about to use a buffer */
vq->num_free--;
/* Use a single buffer which doesn't continue */
head = vq->free_head;
vq->vring.desc[head].flags = VRING_DESC_F_INDIRECT;
vq->vring.desc[head].addr = virt_to_phys(desc);
vq->vring.desc[head].len = i * sizeof(struct vring_desc);
/* Update free pointer */
vq->free_head = vq->vring.desc[head].next;
return head;
}
/**
* vring_add_buf - expose buffer to other end
* @vq: the struct virtqueue we're talking about.
* @sg: the description of the buffer(s).
* @out_num: the number of sg readable by other side
* @in_num: the number of sg which are writable (after readable ones)
* @data: the token identifying the buffer.
* @gfp: how to do memory allocations (if necessary).
*
* Caller must ensure we don't call this with other virtqueue operations
* at the same time (except where noted).
*
* Returns remaining capacity of queue or a negative error
* (ie. ENOSPC). Note that it only really makes sense to treat all
* positive return values as "available": indirect buffers mean that
* we can put an entire sg[] array inside a single queue entry.
*/
static int vring_add_buf(struct virtqueue *_vq,
struct scatterlist sg[],
unsigned int out,
unsigned int in,
void *data,
gfp_t gfp)
{
struct vring_virtqueue *vq = to_vvq(_vq);
unsigned int i, avail, uninitialized_var(prev);
int head;
START_USE(vq);
BUG_ON(data == NULL);
#ifdef DEBUG
{
ktime_t now = ktime_get();
/* No kick or get, with .1 second between? Warn. */
if (vq->last_add_time_valid)
WARN_ON(ktime_to_ms(ktime_sub(now, vq->last_add_time))
> 100);
vq->last_add_time = now;
vq->last_add_time_valid = true;
}
#endif
/* If the host supports indirect descriptor tables, and we have multiple
* buffers, then go indirect. FIXME: tune this threshold */
if (vq->indirect && (out + in) > 1 && vq->num_free) {
head = vring_add_indirect(vq, sg, out, in, gfp);
if (likely(head >= 0))
goto add_head;
}
BUG_ON(out + in > vq->vring.num);
BUG_ON(out + in == 0);
if (vq->num_free < out + in) {
pr_debug("Can't add buf len %i - avail = %i\n",
out + in, vq->num_free);
/* FIXME: for historical reasons, we force a notify here if
* there are outgoing parts to the buffer. Presumably the
* host should service the ring ASAP. */
if (out)
vq->notify(&vq->vq);
END_USE(vq);
return -ENOSPC;
}
/* We're about to use some buffers from the free list. */
vq->num_free -= out + in;
head = vq->free_head;
for (i = vq->free_head; out; i = vq->vring.desc[i].next, out--) {
vq->vring.desc[i].flags = VRING_DESC_F_NEXT;
vq->vring.desc[i].addr = sg_phys(sg);
vq->vring.desc[i].len = sg->length;
prev = i;
sg++;
}
for (; in; i = vq->vring.desc[i].next, in--) {
vq->vring.desc[i].flags = VRING_DESC_F_NEXT|VRING_DESC_F_WRITE;
vq->vring.desc[i].addr = sg_phys(sg);
vq->vring.desc[i].len = sg->length;
prev = i;
sg++;
}
/* Last one doesn't continue. */
vq->vring.desc[prev].flags &= ~VRING_DESC_F_NEXT;
/* Update free pointer */
vq->free_head = i;
add_head:
/* Set token. */
vq->data[head] = data;
/* Put entry in available array (but don't update avail->idx until they
* do sync). */
avail = (vq->vring.avail->idx & (vq->vring.num-1));
vq->vring.avail->ring[avail] = head;
/* Descriptors and available array need to be set before we expose the
* new available array entries. */
virtio_wmb(vq);
vq->vring.avail->idx++;
vq->num_added++;
/* This is very unlikely, but theoretically possible. Kick
* just in case. */
if (unlikely(vq->num_added == (1 << 16) - 1))
virtqueue_kick(_vq);
pr_debug("Added buffer head %i to %p\n", head, vq);
END_USE(vq);
return vq->num_free;
}
/**
* vring_kick_prepare - first half of split vring_kick call.
* @vq: the struct virtqueue
*
* Instead of vring_kick(), you can do:
* if (vring_kick_prepare(vq))
* vring_kick_notify(vq);
*
* This is sometimes useful because the vring_kick_prepare() needs
* to be serialized, but the actual vring_kick_notify() call does not.
*/
static bool vring_kick_prepare(struct virtqueue *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
u16 new, old;
bool needs_kick;
START_USE(vq);
/* We need to expose available array entries before checking avail
* event. */
virtio_mb(vq);
old = vq->vring.avail->idx - vq->num_added;
new = vq->vring.avail->idx;
vq->num_added = 0;
#ifdef DEBUG
if (vq->last_add_time_valid) {
WARN_ON(ktime_to_ms(ktime_sub(ktime_get(),
vq->last_add_time)) > 100);
}
vq->last_add_time_valid = false;
#endif
if (vq->event) {
needs_kick = vring_need_event(vring_avail_event(&vq->vring),
new, old);
} else {
needs_kick = !(vq->vring.used->flags & VRING_USED_F_NO_NOTIFY);
}
END_USE(vq);
return needs_kick;
}
/**
* vring_kick_notify - second half of split virtqueue_kick call.
* @vq: the struct virtqueue
*
* This does not need to be serialized.
*/
static void vring_kick_notify(struct virtqueue *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
/* Prod other side to tell it about changes. */
vq->notify(_vq);
}
/**
* vring_kick - update after add_buf
* @vq: the struct virtqueue
*
* After one or more vring_add_buf calls, invoke this to kick
* the other side.
*
* Caller must ensure we don't call this with other virtqueue
* operations at the same time (except where noted).
*/
static void vring_kick(struct virtqueue *vq)
{
if (vring_kick_prepare(vq))
vring_kick_notify(vq);
}
static void detach_buf(struct vring_virtqueue *vq, unsigned int head)
{
unsigned int i;
/* Clear data ptr. */
vq->data[head] = NULL;
/* Put back on free list: find end */
i = head;
/* Free the indirect table */
if (vq->vring.desc[i].flags & VRING_DESC_F_INDIRECT)
kfree(phys_to_virt(vq->vring.desc[i].addr));
while (vq->vring.desc[i].flags & VRING_DESC_F_NEXT) {
i = vq->vring.desc[i].next;
vq->num_free++;
}
vq->vring.desc[i].next = vq->free_head;
vq->free_head = head;
/* Plus final descriptor */
vq->num_free++;
}
static inline bool more_used(const struct vring_virtqueue *vq)
{
return vq->last_used_idx != vq->vring.used->idx;
}
/**
* vring_get_buf - get the next used buffer
* @vq: the struct virtqueue we're talking about.
* @len: the length written into the buffer
*
* If the driver wrote data into the buffer, @len will be set to the
* amount written. This means you don't need to clear the buffer
* beforehand to ensure there's no data leakage in the case of short
* writes.
*
* Caller must ensure we don't call this with other virtqueue
* operations at the same time (except where noted).
*
* Returns NULL if there are no used buffers, or the "data" token
* handed to vring_add_buf().
*/
static void *vring_get_buf(struct virtqueue *_vq, unsigned int *len)
{
struct vring_virtqueue *vq = to_vvq(_vq);
void *ret;
unsigned int i;
u16 last_used;
START_USE(vq);
if (unlikely(vq->broken)) {
END_USE(vq);
return NULL;
}
if (!more_used(vq)) {
pr_debug("No more buffers in queue\n");
END_USE(vq);
return NULL;
}
/* Only get used array entries after they have been exposed by host. */
virtio_rmb(vq);
last_used = (vq->last_used_idx & (vq->vring.num - 1));
i = vq->vring.used->ring[last_used].id;
*len = vq->vring.used->ring[last_used].len;
if (unlikely(i >= vq->vring.num)) {
BAD_RING(vq, "id %u out of range\n", i);
return NULL;
}
if (unlikely(!vq->data[i])) {
BAD_RING(vq, "id %u is not a head!\n", i);
return NULL;
}
/* detach_buf clears data, so grab it now. */
ret = vq->data[i];
detach_buf(vq, i);
vq->last_used_idx++;
/* If we expect an interrupt for the next entry, tell host
* by writing event index and flush out the write before
* the read in the next get_buf call. */
if (!(vq->vring.avail->flags & VRING_AVAIL_F_NO_INTERRUPT)) {
vring_used_event(&vq->vring) = vq->last_used_idx;
virtio_mb(vq);
}
#ifdef DEBUG
vq->last_add_time_valid = false;
#endif
END_USE(vq);
return ret;
}
/**
* vring_disable_cb - disable callbacks
* @vq: the struct virtqueue we're talking about.
*
* Note that this is not necessarily synchronous, hence unreliable and only
* useful as an optimization.
*
* Unlike other operations, this need not be serialized.
*/
static void vring_disable_cb(struct virtqueue *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
}
/**
* vring_enable_cb - restart callbacks after disable_cb.
* @vq: the struct virtqueue we're talking about.
*
* This re-enables callbacks; it returns "false" if there are pending
* buffers in the queue, to detect a possible race between the driver
* checking for more work, and enabling callbacks.
*
* Caller must ensure we don't call this with other virtqueue
* operations at the same time (except where noted).
*/
static bool vring_enable_cb(struct virtqueue *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
START_USE(vq);
/* We optimistically turn back on interrupts, then check if there was
* more to do. */
/* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to
* either clear the flags bit or point the event index at the next
* entry. Always do both to keep code simple. */
vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
vring_used_event(&vq->vring) = vq->last_used_idx;
virtio_mb(vq);
if (unlikely(more_used(vq))) {
END_USE(vq);
return false;
}
END_USE(vq);
return true;
}
/**
* vring_enable_cb_delayed - restart callbacks after disable_cb.
* @vq: the struct virtqueue we're talking about.
*
* This re-enables callbacks but hints to the other side to delay
* interrupts until most of the available buffers have been processed;
* it returns "false" if there are many pending buffers in the queue,
* to detect a possible race between the driver checking for more work,
* and enabling callbacks.
*
* Caller must ensure we don't call this with other virtqueue
* operations at the same time (except where noted).
*/
static bool vring_enable_cb_delayed(struct virtqueue *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
u16 bufs;
START_USE(vq);
/* We optimistically turn back on interrupts, then check if there was
* more to do. */
/* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to
* either clear the flags bit or point the event index at the next
* entry. Always do both to keep code simple. */
vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
/* TODO: tune this threshold */
bufs = (u16)(vq->vring.avail->idx - vq->last_used_idx) * 3 / 4;
vring_used_event(&vq->vring) = vq->last_used_idx + bufs;
virtio_mb(vq);
if (unlikely((u16)(vq->vring.used->idx - vq->last_used_idx) > bufs)) {
END_USE(vq);
return false;
}
END_USE(vq);
return true;
}
/**
* vring_detach_unused_buf - detach first unused buffer
* @vq: the struct virtqueue we're talking about.
*
* Returns NULL or the "data" token handed to vring_add_buf().
* This is not valid on an active queue; it is useful only for device
* shutdown.
*/
static void *vring_detach_unused_buf(struct virtqueue *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
unsigned int i;
void *buf;
START_USE(vq);
for (i = 0; i < vq->vring.num; i++) {
if (!vq->data[i])
continue;
/* detach_buf clears data, so grab it now. */
buf = vq->data[i];
detach_buf(vq, i);
vq->vring.avail->idx--;
END_USE(vq);
return buf;
}
/* That should have freed everything. */
BUG_ON(vq->num_free != vq->vring.num);
END_USE(vq);
return NULL;
}
irqreturn_t vring_interrupt(int irq, void *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
if (!more_used(vq)) {
pr_debug("virtqueue interrupt with no work for %p\n", vq);
return IRQ_NONE;
}
if (unlikely(vq->broken))
return IRQ_HANDLED;
pr_debug("virtqueue callback for %p (%p)\n", vq, vq->vq.callback);
if (vq->vq.callback)
vq->vq.callback(&vq->vq);
return IRQ_HANDLED;
}
EXPORT_SYMBOL_GPL(vring_interrupt);
/**
* get_vring_size - return the size of the virtqueue's vring
* @vq: the struct virtqueue containing the vring of interest.
*
* Returns the size of the vring. This is mainly used for boasting to
* userspace. Unlike other operations, this need not be serialized.
*/
static unsigned int get_vring_size(struct virtqueue *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
return vq->vring.num;
}
static struct virtqueue_ops vring_vq_ops = {
.add_buf = vring_add_buf,
.get_buf = vring_get_buf,
.kick = vring_kick,
.kick_prepare = vring_kick_prepare,
.kick_notify = vring_kick_notify,
.disable_cb = vring_disable_cb,
.enable_cb = vring_enable_cb,
.enable_cb_delayed = vring_enable_cb_delayed,
.detach_unused_buf = vring_detach_unused_buf,
.get_impl_size = get_vring_size,
};
struct virtqueue *vring_new_virtqueue(unsigned int num,
unsigned int vring_align,
struct virtio_device *vdev,
bool weak_barriers,
void *pages,
void (*notify)(struct virtqueue *),
void (*callback)(struct virtqueue *),
const char *name)
{
struct vring_virtqueue *vq;
unsigned int i;
/* We assume num is a power of 2. */
if (num & (num - 1)) {
dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num);
return NULL;
}
vq = kmalloc(sizeof(*vq) + sizeof(void *)*num, GFP_KERNEL);
if (!vq)
return NULL;
vring_init(&vq->vring, num, pages, vring_align);
vq->vq.callback = callback;
vq->vq.vdev = vdev;
vq->vq.vq_ops = &vring_vq_ops;
vq->vq.name = name;
vq->notify = notify;
vq->weak_barriers = weak_barriers;
vq->broken = false;
vq->last_used_idx = 0;
vq->num_added = 0;
list_add_tail(&vq->vq.list, &vdev->vqs);
#ifdef DEBUG
vq->in_use = false;
vq->last_add_time_valid = false;
#endif
vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC);
vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
/* No callback? Tell other side not to bother us. */
if (!callback)
vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
/* Put everything in free lists. */
vq->num_free = num;
vq->free_head = 0;
for (i = 0; i < num-1; i++) {
vq->vring.desc[i].next = i+1;
vq->data[i] = NULL;
}
vq->data[i] = NULL;
return &vq->vq;
}
EXPORT_SYMBOL_GPL(vring_new_virtqueue);
void vring_del_virtqueue(struct virtqueue *vq)
{
list_del(&vq->list);
kfree(to_vvq(vq));
}
EXPORT_SYMBOL_GPL(vring_del_virtqueue);
/* Manipulates transport-specific feature bits. */
void vring_transport_features(struct virtio_device *vdev)
{
unsigned int i;
for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) {
switch (i) {
case VIRTIO_RING_F_INDIRECT_DESC:
break;
case VIRTIO_RING_F_EVENT_IDX:
break;
default:
/* We don't understand this bit. */
clear_bit(i, vdev->features);
}
}
}
EXPORT_SYMBOL_GPL(vring_transport_features);
MODULE_LICENSE("GPL");
| gpl-2.0 |
zombi-x/that1_kernel_asus_tf700t | drivers/hwmon/lm70.c | 3029 | 6042 | /*
* lm70.c
*
* The LM70 is a temperature sensor chip from National Semiconductor (NS).
* Copyright (C) 2006 Kaiwan N Billimoria <kaiwan@designergraphix.com>
*
* The LM70 communicates with a host processor via an SPI/Microwire Bus
* interface. The complete datasheet is available at National's website
* here:
* http://www.national.com/pf/LM/LM70.html
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/init.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/sysfs.h>
#include <linux/hwmon.h>
#include <linux/mutex.h>
#include <linux/mod_devicetable.h>
#include <linux/spi/spi.h>
#include <linux/slab.h>
#define DRVNAME "lm70"
#define LM70_CHIP_LM70 0 /* original NS LM70 */
#define LM70_CHIP_TMP121 1 /* TI TMP121/TMP123 */
struct lm70 {
struct device *hwmon_dev;
struct mutex lock;
unsigned int chip;
};
/* sysfs hook function */
static ssize_t lm70_sense_temp(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct spi_device *spi = to_spi_device(dev);
int status, val = 0;
u8 rxbuf[2];
s16 raw=0;
struct lm70 *p_lm70 = spi_get_drvdata(spi);
if (mutex_lock_interruptible(&p_lm70->lock))
return -ERESTARTSYS;
/*
* spi_read() requires a DMA-safe buffer; so we use
* spi_write_then_read(), transmitting 0 bytes.
*/
status = spi_write_then_read(spi, NULL, 0, &rxbuf[0], 2);
if (status < 0) {
pr_warn("spi_write_then_read failed with status %d\n", status);
goto out;
}
raw = (rxbuf[0] << 8) + rxbuf[1];
dev_dbg(dev, "rxbuf[0] : 0x%02x rxbuf[1] : 0x%02x raw=0x%04x\n",
rxbuf[0], rxbuf[1], raw);
/*
* LM70:
* The "raw" temperature read into rxbuf[] is a 16-bit signed 2's
* complement value. Only the MSB 11 bits (1 sign + 10 temperature
* bits) are meaningful; the LSB 5 bits are to be discarded.
* See the datasheet.
*
* Further, each bit represents 0.25 degrees Celsius; so, multiply
* by 0.25. Also multiply by 1000 to represent in millidegrees
* Celsius.
* So it's equivalent to multiplying by 0.25 * 1000 = 250.
*
* TMP121/TMP123:
* 13 bits of 2's complement data, discard LSB 3 bits,
* resolution 0.0625 degrees celsius.
*/
switch (p_lm70->chip) {
case LM70_CHIP_LM70:
val = ((int)raw / 32) * 250;
break;
case LM70_CHIP_TMP121:
val = ((int)raw / 8) * 625 / 10;
break;
}
status = sprintf(buf, "%d\n", val); /* millidegrees Celsius */
out:
mutex_unlock(&p_lm70->lock);
return status;
}
static DEVICE_ATTR(temp1_input, S_IRUGO, lm70_sense_temp, NULL);
static ssize_t lm70_show_name(struct device *dev, struct device_attribute
*devattr, char *buf)
{
struct lm70 *p_lm70 = dev_get_drvdata(dev);
int ret;
switch (p_lm70->chip) {
case LM70_CHIP_LM70:
ret = sprintf(buf, "lm70\n");
break;
case LM70_CHIP_TMP121:
ret = sprintf(buf, "tmp121\n");
break;
default:
ret = -EINVAL;
}
return ret;
}
static DEVICE_ATTR(name, S_IRUGO, lm70_show_name, NULL);
/*----------------------------------------------------------------------*/
static int __devinit lm70_probe(struct spi_device *spi)
{
int chip = spi_get_device_id(spi)->driver_data;
struct lm70 *p_lm70;
int status;
/* signaling is SPI_MODE_0 for both LM70 and TMP121 */
if (spi->mode & (SPI_CPOL | SPI_CPHA))
return -EINVAL;
/* 3-wire link (shared SI/SO) for LM70 */
if (chip == LM70_CHIP_LM70 && !(spi->mode & SPI_3WIRE))
return -EINVAL;
/* NOTE: we assume 8-bit words, and convert to 16 bits manually */
p_lm70 = kzalloc(sizeof *p_lm70, GFP_KERNEL);
if (!p_lm70)
return -ENOMEM;
mutex_init(&p_lm70->lock);
p_lm70->chip = chip;
/* sysfs hook */
p_lm70->hwmon_dev = hwmon_device_register(&spi->dev);
if (IS_ERR(p_lm70->hwmon_dev)) {
dev_dbg(&spi->dev, "hwmon_device_register failed.\n");
status = PTR_ERR(p_lm70->hwmon_dev);
goto out_dev_reg_failed;
}
spi_set_drvdata(spi, p_lm70);
if ((status = device_create_file(&spi->dev, &dev_attr_temp1_input))
|| (status = device_create_file(&spi->dev, &dev_attr_name))) {
dev_dbg(&spi->dev, "device_create_file failure.\n");
goto out_dev_create_file_failed;
}
return 0;
out_dev_create_file_failed:
device_remove_file(&spi->dev, &dev_attr_temp1_input);
hwmon_device_unregister(p_lm70->hwmon_dev);
out_dev_reg_failed:
spi_set_drvdata(spi, NULL);
kfree(p_lm70);
return status;
}
static int __devexit lm70_remove(struct spi_device *spi)
{
struct lm70 *p_lm70 = spi_get_drvdata(spi);
device_remove_file(&spi->dev, &dev_attr_temp1_input);
device_remove_file(&spi->dev, &dev_attr_name);
hwmon_device_unregister(p_lm70->hwmon_dev);
spi_set_drvdata(spi, NULL);
kfree(p_lm70);
return 0;
}
static const struct spi_device_id lm70_ids[] = {
{ "lm70", LM70_CHIP_LM70 },
{ "tmp121", LM70_CHIP_TMP121 },
{ },
};
MODULE_DEVICE_TABLE(spi, lm70_ids);
static struct spi_driver lm70_driver = {
.driver = {
.name = "lm70",
.owner = THIS_MODULE,
},
.id_table = lm70_ids,
.probe = lm70_probe,
.remove = __devexit_p(lm70_remove),
};
static int __init init_lm70(void)
{
return spi_register_driver(&lm70_driver);
}
static void __exit cleanup_lm70(void)
{
spi_unregister_driver(&lm70_driver);
}
module_init(init_lm70);
module_exit(cleanup_lm70);
MODULE_AUTHOR("Kaiwan N Billimoria");
MODULE_DESCRIPTION("NS LM70 / TI TMP121/TMP123 Linux driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
Loller79/Solid_Kernel-GEEHRC-Bricked | drivers/firewire/core-device.c | 3285 | 33827 | /*
* Device probing and sysfs code.
*
* Copyright (C) 2005-2006 Kristian Hoegsberg <krh@bitplanet.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#include <linux/bug.h>
#include <linux/ctype.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/firewire.h>
#include <linux/firewire-constants.h>
#include <linux/idr.h>
#include <linux/jiffies.h>
#include <linux/kobject.h>
#include <linux/list.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/rwsem.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/workqueue.h>
#include <linux/atomic.h>
#include <asm/byteorder.h>
#include "core.h"
void fw_csr_iterator_init(struct fw_csr_iterator *ci, const u32 *p)
{
ci->p = p + 1;
ci->end = ci->p + (p[0] >> 16);
}
EXPORT_SYMBOL(fw_csr_iterator_init);
int fw_csr_iterator_next(struct fw_csr_iterator *ci, int *key, int *value)
{
*key = *ci->p >> 24;
*value = *ci->p & 0xffffff;
return ci->p++ < ci->end;
}
EXPORT_SYMBOL(fw_csr_iterator_next);
static const u32 *search_leaf(const u32 *directory, int search_key)
{
struct fw_csr_iterator ci;
int last_key = 0, key, value;
fw_csr_iterator_init(&ci, directory);
while (fw_csr_iterator_next(&ci, &key, &value)) {
if (last_key == search_key &&
key == (CSR_DESCRIPTOR | CSR_LEAF))
return ci.p - 1 + value;
last_key = key;
}
return NULL;
}
static int textual_leaf_to_string(const u32 *block, char *buf, size_t size)
{
unsigned int quadlets, i;
char c;
if (!size || !buf)
return -EINVAL;
quadlets = min(block[0] >> 16, 256U);
if (quadlets < 2)
return -ENODATA;
if (block[1] != 0 || block[2] != 0)
/* unknown language/character set */
return -ENODATA;
block += 3;
quadlets -= 2;
for (i = 0; i < quadlets * 4 && i < size - 1; i++) {
c = block[i / 4] >> (24 - 8 * (i % 4));
if (c == '\0')
break;
buf[i] = c;
}
buf[i] = '\0';
return i;
}
/**
* fw_csr_string() - reads a string from the configuration ROM
* @directory: e.g. root directory or unit directory
* @key: the key of the preceding directory entry
* @buf: where to put the string
* @size: size of @buf, in bytes
*
* The string is taken from a minimal ASCII text descriptor leaf after
* the immediate entry with @key. The string is zero-terminated.
* Returns strlen(buf) or a negative error code.
*/
int fw_csr_string(const u32 *directory, int key, char *buf, size_t size)
{
const u32 *leaf = search_leaf(directory, key);
if (!leaf)
return -ENOENT;
return textual_leaf_to_string(leaf, buf, size);
}
EXPORT_SYMBOL(fw_csr_string);
static void get_ids(const u32 *directory, int *id)
{
struct fw_csr_iterator ci;
int key, value;
fw_csr_iterator_init(&ci, directory);
while (fw_csr_iterator_next(&ci, &key, &value)) {
switch (key) {
case CSR_VENDOR: id[0] = value; break;
case CSR_MODEL: id[1] = value; break;
case CSR_SPECIFIER_ID: id[2] = value; break;
case CSR_VERSION: id[3] = value; break;
}
}
}
static void get_modalias_ids(struct fw_unit *unit, int *id)
{
get_ids(&fw_parent_device(unit)->config_rom[5], id);
get_ids(unit->directory, id);
}
static bool match_ids(const struct ieee1394_device_id *id_table, int *id)
{
int match = 0;
if (id[0] == id_table->vendor_id)
match |= IEEE1394_MATCH_VENDOR_ID;
if (id[1] == id_table->model_id)
match |= IEEE1394_MATCH_MODEL_ID;
if (id[2] == id_table->specifier_id)
match |= IEEE1394_MATCH_SPECIFIER_ID;
if (id[3] == id_table->version)
match |= IEEE1394_MATCH_VERSION;
return (match & id_table->match_flags) == id_table->match_flags;
}
static bool is_fw_unit(struct device *dev);
static int fw_unit_match(struct device *dev, struct device_driver *drv)
{
const struct ieee1394_device_id *id_table =
container_of(drv, struct fw_driver, driver)->id_table;
int id[] = {0, 0, 0, 0};
/* We only allow binding to fw_units. */
if (!is_fw_unit(dev))
return 0;
get_modalias_ids(fw_unit(dev), id);
for (; id_table->match_flags != 0; id_table++)
if (match_ids(id_table, id))
return 1;
return 0;
}
static int get_modalias(struct fw_unit *unit, char *buffer, size_t buffer_size)
{
int id[] = {0, 0, 0, 0};
get_modalias_ids(unit, id);
return snprintf(buffer, buffer_size,
"ieee1394:ven%08Xmo%08Xsp%08Xver%08X",
id[0], id[1], id[2], id[3]);
}
static int fw_unit_uevent(struct device *dev, struct kobj_uevent_env *env)
{
struct fw_unit *unit = fw_unit(dev);
char modalias[64];
get_modalias(unit, modalias, sizeof(modalias));
if (add_uevent_var(env, "MODALIAS=%s", modalias))
return -ENOMEM;
return 0;
}
struct bus_type fw_bus_type = {
.name = "firewire",
.match = fw_unit_match,
};
EXPORT_SYMBOL(fw_bus_type);
int fw_device_enable_phys_dma(struct fw_device *device)
{
int generation = device->generation;
/* device->node_id, accessed below, must not be older than generation */
smp_rmb();
return device->card->driver->enable_phys_dma(device->card,
device->node_id,
generation);
}
EXPORT_SYMBOL(fw_device_enable_phys_dma);
struct config_rom_attribute {
struct device_attribute attr;
u32 key;
};
static ssize_t show_immediate(struct device *dev,
struct device_attribute *dattr, char *buf)
{
struct config_rom_attribute *attr =
container_of(dattr, struct config_rom_attribute, attr);
struct fw_csr_iterator ci;
const u32 *dir;
int key, value, ret = -ENOENT;
down_read(&fw_device_rwsem);
if (is_fw_unit(dev))
dir = fw_unit(dev)->directory;
else
dir = fw_device(dev)->config_rom + 5;
fw_csr_iterator_init(&ci, dir);
while (fw_csr_iterator_next(&ci, &key, &value))
if (attr->key == key) {
ret = snprintf(buf, buf ? PAGE_SIZE : 0,
"0x%06x\n", value);
break;
}
up_read(&fw_device_rwsem);
return ret;
}
#define IMMEDIATE_ATTR(name, key) \
{ __ATTR(name, S_IRUGO, show_immediate, NULL), key }
static ssize_t show_text_leaf(struct device *dev,
struct device_attribute *dattr, char *buf)
{
struct config_rom_attribute *attr =
container_of(dattr, struct config_rom_attribute, attr);
const u32 *dir;
size_t bufsize;
char dummy_buf[2];
int ret;
down_read(&fw_device_rwsem);
if (is_fw_unit(dev))
dir = fw_unit(dev)->directory;
else
dir = fw_device(dev)->config_rom + 5;
if (buf) {
bufsize = PAGE_SIZE - 1;
} else {
buf = dummy_buf;
bufsize = 1;
}
ret = fw_csr_string(dir, attr->key, buf, bufsize);
if (ret >= 0) {
/* Strip trailing whitespace and add newline. */
while (ret > 0 && isspace(buf[ret - 1]))
ret--;
strcpy(buf + ret, "\n");
ret++;
}
up_read(&fw_device_rwsem);
return ret;
}
#define TEXT_LEAF_ATTR(name, key) \
{ __ATTR(name, S_IRUGO, show_text_leaf, NULL), key }
static struct config_rom_attribute config_rom_attributes[] = {
IMMEDIATE_ATTR(vendor, CSR_VENDOR),
IMMEDIATE_ATTR(hardware_version, CSR_HARDWARE_VERSION),
IMMEDIATE_ATTR(specifier_id, CSR_SPECIFIER_ID),
IMMEDIATE_ATTR(version, CSR_VERSION),
IMMEDIATE_ATTR(model, CSR_MODEL),
TEXT_LEAF_ATTR(vendor_name, CSR_VENDOR),
TEXT_LEAF_ATTR(model_name, CSR_MODEL),
TEXT_LEAF_ATTR(hardware_version_name, CSR_HARDWARE_VERSION),
};
static void init_fw_attribute_group(struct device *dev,
struct device_attribute *attrs,
struct fw_attribute_group *group)
{
struct device_attribute *attr;
int i, j;
for (j = 0; attrs[j].attr.name != NULL; j++)
group->attrs[j] = &attrs[j].attr;
for (i = 0; i < ARRAY_SIZE(config_rom_attributes); i++) {
attr = &config_rom_attributes[i].attr;
if (attr->show(dev, attr, NULL) < 0)
continue;
group->attrs[j++] = &attr->attr;
}
group->attrs[j] = NULL;
group->groups[0] = &group->group;
group->groups[1] = NULL;
group->group.attrs = group->attrs;
dev->groups = (const struct attribute_group **) group->groups;
}
static ssize_t modalias_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct fw_unit *unit = fw_unit(dev);
int length;
length = get_modalias(unit, buf, PAGE_SIZE);
strcpy(buf + length, "\n");
return length + 1;
}
static ssize_t rom_index_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct fw_device *device = fw_device(dev->parent);
struct fw_unit *unit = fw_unit(dev);
return snprintf(buf, PAGE_SIZE, "%d\n",
(int)(unit->directory - device->config_rom));
}
static struct device_attribute fw_unit_attributes[] = {
__ATTR_RO(modalias),
__ATTR_RO(rom_index),
__ATTR_NULL,
};
static ssize_t config_rom_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct fw_device *device = fw_device(dev);
size_t length;
down_read(&fw_device_rwsem);
length = device->config_rom_length * 4;
memcpy(buf, device->config_rom, length);
up_read(&fw_device_rwsem);
return length;
}
static ssize_t guid_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct fw_device *device = fw_device(dev);
int ret;
down_read(&fw_device_rwsem);
ret = snprintf(buf, PAGE_SIZE, "0x%08x%08x\n",
device->config_rom[3], device->config_rom[4]);
up_read(&fw_device_rwsem);
return ret;
}
static int units_sprintf(char *buf, const u32 *directory)
{
struct fw_csr_iterator ci;
int key, value;
int specifier_id = 0;
int version = 0;
fw_csr_iterator_init(&ci, directory);
while (fw_csr_iterator_next(&ci, &key, &value)) {
switch (key) {
case CSR_SPECIFIER_ID:
specifier_id = value;
break;
case CSR_VERSION:
version = value;
break;
}
}
return sprintf(buf, "0x%06x:0x%06x ", specifier_id, version);
}
static ssize_t units_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct fw_device *device = fw_device(dev);
struct fw_csr_iterator ci;
int key, value, i = 0;
down_read(&fw_device_rwsem);
fw_csr_iterator_init(&ci, &device->config_rom[5]);
while (fw_csr_iterator_next(&ci, &key, &value)) {
if (key != (CSR_UNIT | CSR_DIRECTORY))
continue;
i += units_sprintf(&buf[i], ci.p + value - 1);
if (i >= PAGE_SIZE - (8 + 1 + 8 + 1))
break;
}
up_read(&fw_device_rwsem);
if (i)
buf[i - 1] = '\n';
return i;
}
static struct device_attribute fw_device_attributes[] = {
__ATTR_RO(config_rom),
__ATTR_RO(guid),
__ATTR_RO(units),
__ATTR_NULL,
};
static int read_rom(struct fw_device *device,
int generation, int index, u32 *data)
{
u64 offset = (CSR_REGISTER_BASE | CSR_CONFIG_ROM) + index * 4;
int i, rcode;
/* device->node_id, accessed below, must not be older than generation */
smp_rmb();
for (i = 10; i < 100; i += 10) {
rcode = fw_run_transaction(device->card,
TCODE_READ_QUADLET_REQUEST, device->node_id,
generation, device->max_speed, offset, data, 4);
if (rcode != RCODE_BUSY)
break;
msleep(i);
}
be32_to_cpus(data);
return rcode;
}
#define MAX_CONFIG_ROM_SIZE 256
/*
* Read the bus info block, perform a speed probe, and read all of the rest of
* the config ROM. We do all this with a cached bus generation. If the bus
* generation changes under us, read_config_rom will fail and get retried.
* It's better to start all over in this case because the node from which we
* are reading the ROM may have changed the ROM during the reset.
*/
static int read_config_rom(struct fw_device *device, int generation)
{
struct fw_card *card = device->card;
const u32 *old_rom, *new_rom;
u32 *rom, *stack;
u32 sp, key;
int i, end, length, ret = -1;
rom = kmalloc(sizeof(*rom) * MAX_CONFIG_ROM_SIZE +
sizeof(*stack) * MAX_CONFIG_ROM_SIZE, GFP_KERNEL);
if (rom == NULL)
return -ENOMEM;
stack = &rom[MAX_CONFIG_ROM_SIZE];
memset(rom, 0, sizeof(*rom) * MAX_CONFIG_ROM_SIZE);
device->max_speed = SCODE_100;
/* First read the bus info block. */
for (i = 0; i < 5; i++) {
if (read_rom(device, generation, i, &rom[i]) != RCODE_COMPLETE)
goto out;
/*
* As per IEEE1212 7.2, during power-up, devices can
* reply with a 0 for the first quadlet of the config
* rom to indicate that they are booting (for example,
* if the firmware is on the disk of a external
* harddisk). In that case we just fail, and the
* retry mechanism will try again later.
*/
if (i == 0 && rom[i] == 0)
goto out;
}
device->max_speed = device->node->max_speed;
/*
* Determine the speed of
* - devices with link speed less than PHY speed,
* - devices with 1394b PHY (unless only connected to 1394a PHYs),
* - all devices if there are 1394b repeaters.
* Note, we cannot use the bus info block's link_spd as starting point
* because some buggy firmwares set it lower than necessary and because
* 1394-1995 nodes do not have the field.
*/
if ((rom[2] & 0x7) < device->max_speed ||
device->max_speed == SCODE_BETA ||
card->beta_repeaters_present) {
u32 dummy;
/* for S1600 and S3200 */
if (device->max_speed == SCODE_BETA)
device->max_speed = card->link_speed;
while (device->max_speed > SCODE_100) {
if (read_rom(device, generation, 0, &dummy) ==
RCODE_COMPLETE)
break;
device->max_speed--;
}
}
/*
* Now parse the config rom. The config rom is a recursive
* directory structure so we parse it using a stack of
* references to the blocks that make up the structure. We
* push a reference to the root directory on the stack to
* start things off.
*/
length = i;
sp = 0;
stack[sp++] = 0xc0000005;
while (sp > 0) {
/*
* Pop the next block reference of the stack. The
* lower 24 bits is the offset into the config rom,
* the upper 8 bits are the type of the reference the
* block.
*/
key = stack[--sp];
i = key & 0xffffff;
if (WARN_ON(i >= MAX_CONFIG_ROM_SIZE))
goto out;
/* Read header quadlet for the block to get the length. */
if (read_rom(device, generation, i, &rom[i]) != RCODE_COMPLETE)
goto out;
end = i + (rom[i] >> 16) + 1;
if (end > MAX_CONFIG_ROM_SIZE) {
/*
* This block extends outside the config ROM which is
* a firmware bug. Ignore this whole block, i.e.
* simply set a fake block length of 0.
*/
fw_err(card, "skipped invalid ROM block %x at %llx\n",
rom[i],
i * 4 | CSR_REGISTER_BASE | CSR_CONFIG_ROM);
rom[i] = 0;
end = i;
}
i++;
/*
* Now read in the block. If this is a directory
* block, check the entries as we read them to see if
* it references another block, and push it in that case.
*/
for (; i < end; i++) {
if (read_rom(device, generation, i, &rom[i]) !=
RCODE_COMPLETE)
goto out;
if ((key >> 30) != 3 || (rom[i] >> 30) < 2)
continue;
/*
* Offset points outside the ROM. May be a firmware
* bug or an Extended ROM entry (IEEE 1212-2001 clause
* 7.7.18). Simply overwrite this pointer here by a
* fake immediate entry so that later iterators over
* the ROM don't have to check offsets all the time.
*/
if (i + (rom[i] & 0xffffff) >= MAX_CONFIG_ROM_SIZE) {
fw_err(card,
"skipped unsupported ROM entry %x at %llx\n",
rom[i],
i * 4 | CSR_REGISTER_BASE | CSR_CONFIG_ROM);
rom[i] = 0;
continue;
}
stack[sp++] = i + rom[i];
}
if (length < i)
length = i;
}
old_rom = device->config_rom;
new_rom = kmemdup(rom, length * 4, GFP_KERNEL);
if (new_rom == NULL)
goto out;
down_write(&fw_device_rwsem);
device->config_rom = new_rom;
device->config_rom_length = length;
up_write(&fw_device_rwsem);
kfree(old_rom);
ret = 0;
device->max_rec = rom[2] >> 12 & 0xf;
device->cmc = rom[2] >> 30 & 1;
device->irmc = rom[2] >> 31 & 1;
out:
kfree(rom);
return ret;
}
static void fw_unit_release(struct device *dev)
{
struct fw_unit *unit = fw_unit(dev);
fw_device_put(fw_parent_device(unit));
kfree(unit);
}
static struct device_type fw_unit_type = {
.uevent = fw_unit_uevent,
.release = fw_unit_release,
};
static bool is_fw_unit(struct device *dev)
{
return dev->type == &fw_unit_type;
}
static void create_units(struct fw_device *device)
{
struct fw_csr_iterator ci;
struct fw_unit *unit;
int key, value, i;
i = 0;
fw_csr_iterator_init(&ci, &device->config_rom[5]);
while (fw_csr_iterator_next(&ci, &key, &value)) {
if (key != (CSR_UNIT | CSR_DIRECTORY))
continue;
/*
* Get the address of the unit directory and try to
* match the drivers id_tables against it.
*/
unit = kzalloc(sizeof(*unit), GFP_KERNEL);
if (unit == NULL) {
fw_err(device->card, "out of memory for unit\n");
continue;
}
unit->directory = ci.p + value - 1;
unit->device.bus = &fw_bus_type;
unit->device.type = &fw_unit_type;
unit->device.parent = &device->device;
dev_set_name(&unit->device, "%s.%d", dev_name(&device->device), i++);
BUILD_BUG_ON(ARRAY_SIZE(unit->attribute_group.attrs) <
ARRAY_SIZE(fw_unit_attributes) +
ARRAY_SIZE(config_rom_attributes));
init_fw_attribute_group(&unit->device,
fw_unit_attributes,
&unit->attribute_group);
if (device_register(&unit->device) < 0)
goto skip_unit;
fw_device_get(device);
continue;
skip_unit:
kfree(unit);
}
}
static int shutdown_unit(struct device *device, void *data)
{
device_unregister(device);
return 0;
}
/*
* fw_device_rwsem acts as dual purpose mutex:
* - serializes accesses to fw_device_idr,
* - serializes accesses to fw_device.config_rom/.config_rom_length and
* fw_unit.directory, unless those accesses happen at safe occasions
*/
DECLARE_RWSEM(fw_device_rwsem);
DEFINE_IDR(fw_device_idr);
int fw_cdev_major;
struct fw_device *fw_device_get_by_devt(dev_t devt)
{
struct fw_device *device;
down_read(&fw_device_rwsem);
device = idr_find(&fw_device_idr, MINOR(devt));
if (device)
fw_device_get(device);
up_read(&fw_device_rwsem);
return device;
}
struct workqueue_struct *fw_workqueue;
EXPORT_SYMBOL(fw_workqueue);
static void fw_schedule_device_work(struct fw_device *device,
unsigned long delay)
{
queue_delayed_work(fw_workqueue, &device->work, delay);
}
/*
* These defines control the retry behavior for reading the config
* rom. It shouldn't be necessary to tweak these; if the device
* doesn't respond to a config rom read within 10 seconds, it's not
* going to respond at all. As for the initial delay, a lot of
* devices will be able to respond within half a second after bus
* reset. On the other hand, it's not really worth being more
* aggressive than that, since it scales pretty well; if 10 devices
* are plugged in, they're all getting read within one second.
*/
#define MAX_RETRIES 10
#define RETRY_DELAY (3 * HZ)
#define INITIAL_DELAY (HZ / 2)
#define SHUTDOWN_DELAY (2 * HZ)
static void fw_device_shutdown(struct work_struct *work)
{
struct fw_device *device =
container_of(work, struct fw_device, work.work);
int minor = MINOR(device->device.devt);
if (time_before64(get_jiffies_64(),
device->card->reset_jiffies + SHUTDOWN_DELAY)
&& !list_empty(&device->card->link)) {
fw_schedule_device_work(device, SHUTDOWN_DELAY);
return;
}
if (atomic_cmpxchg(&device->state,
FW_DEVICE_GONE,
FW_DEVICE_SHUTDOWN) != FW_DEVICE_GONE)
return;
fw_device_cdev_remove(device);
device_for_each_child(&device->device, NULL, shutdown_unit);
device_unregister(&device->device);
down_write(&fw_device_rwsem);
idr_remove(&fw_device_idr, minor);
up_write(&fw_device_rwsem);
fw_device_put(device);
}
static void fw_device_release(struct device *dev)
{
struct fw_device *device = fw_device(dev);
struct fw_card *card = device->card;
unsigned long flags;
/*
* Take the card lock so we don't set this to NULL while a
* FW_NODE_UPDATED callback is being handled or while the
* bus manager work looks at this node.
*/
spin_lock_irqsave(&card->lock, flags);
device->node->data = NULL;
spin_unlock_irqrestore(&card->lock, flags);
fw_node_put(device->node);
kfree(device->config_rom);
kfree(device);
fw_card_put(card);
}
static struct device_type fw_device_type = {
.release = fw_device_release,
};
static bool is_fw_device(struct device *dev)
{
return dev->type == &fw_device_type;
}
static int update_unit(struct device *dev, void *data)
{
struct fw_unit *unit = fw_unit(dev);
struct fw_driver *driver = (struct fw_driver *)dev->driver;
if (is_fw_unit(dev) && driver != NULL && driver->update != NULL) {
device_lock(dev);
driver->update(unit);
device_unlock(dev);
}
return 0;
}
static void fw_device_update(struct work_struct *work)
{
struct fw_device *device =
container_of(work, struct fw_device, work.work);
fw_device_cdev_update(device);
device_for_each_child(&device->device, NULL, update_unit);
}
/*
* If a device was pending for deletion because its node went away but its
* bus info block and root directory header matches that of a newly discovered
* device, revive the existing fw_device.
* The newly allocated fw_device becomes obsolete instead.
*/
static int lookup_existing_device(struct device *dev, void *data)
{
struct fw_device *old = fw_device(dev);
struct fw_device *new = data;
struct fw_card *card = new->card;
int match = 0;
if (!is_fw_device(dev))
return 0;
down_read(&fw_device_rwsem); /* serialize config_rom access */
spin_lock_irq(&card->lock); /* serialize node access */
if (memcmp(old->config_rom, new->config_rom, 6 * 4) == 0 &&
atomic_cmpxchg(&old->state,
FW_DEVICE_GONE,
FW_DEVICE_RUNNING) == FW_DEVICE_GONE) {
struct fw_node *current_node = new->node;
struct fw_node *obsolete_node = old->node;
new->node = obsolete_node;
new->node->data = new;
old->node = current_node;
old->node->data = old;
old->max_speed = new->max_speed;
old->node_id = current_node->node_id;
smp_wmb(); /* update node_id before generation */
old->generation = card->generation;
old->config_rom_retries = 0;
fw_notice(card, "rediscovered device %s\n", dev_name(dev));
PREPARE_DELAYED_WORK(&old->work, fw_device_update);
fw_schedule_device_work(old, 0);
if (current_node == card->root_node)
fw_schedule_bm_work(card, 0);
match = 1;
}
spin_unlock_irq(&card->lock);
up_read(&fw_device_rwsem);
return match;
}
enum { BC_UNKNOWN = 0, BC_UNIMPLEMENTED, BC_IMPLEMENTED, };
static void set_broadcast_channel(struct fw_device *device, int generation)
{
struct fw_card *card = device->card;
__be32 data;
int rcode;
if (!card->broadcast_channel_allocated)
return;
/*
* The Broadcast_Channel Valid bit is required by nodes which want to
* transmit on this channel. Such transmissions are practically
* exclusive to IP over 1394 (RFC 2734). IP capable nodes are required
* to be IRM capable and have a max_rec of 8 or more. We use this fact
* to narrow down to which nodes we send Broadcast_Channel updates.
*/
if (!device->irmc || device->max_rec < 8)
return;
/*
* Some 1394-1995 nodes crash if this 1394a-2000 register is written.
* Perform a read test first.
*/
if (device->bc_implemented == BC_UNKNOWN) {
rcode = fw_run_transaction(card, TCODE_READ_QUADLET_REQUEST,
device->node_id, generation, device->max_speed,
CSR_REGISTER_BASE + CSR_BROADCAST_CHANNEL,
&data, 4);
switch (rcode) {
case RCODE_COMPLETE:
if (data & cpu_to_be32(1 << 31)) {
device->bc_implemented = BC_IMPLEMENTED;
break;
}
/* else fall through to case address error */
case RCODE_ADDRESS_ERROR:
device->bc_implemented = BC_UNIMPLEMENTED;
}
}
if (device->bc_implemented == BC_IMPLEMENTED) {
data = cpu_to_be32(BROADCAST_CHANNEL_INITIAL |
BROADCAST_CHANNEL_VALID);
fw_run_transaction(card, TCODE_WRITE_QUADLET_REQUEST,
device->node_id, generation, device->max_speed,
CSR_REGISTER_BASE + CSR_BROADCAST_CHANNEL,
&data, 4);
}
}
int fw_device_set_broadcast_channel(struct device *dev, void *gen)
{
if (is_fw_device(dev))
set_broadcast_channel(fw_device(dev), (long)gen);
return 0;
}
static void fw_device_init(struct work_struct *work)
{
struct fw_device *device =
container_of(work, struct fw_device, work.work);
struct fw_card *card = device->card;
struct device *revived_dev;
int minor, ret;
/*
* All failure paths here set node->data to NULL, so that we
* don't try to do device_for_each_child() on a kfree()'d
* device.
*/
if (read_config_rom(device, device->generation) < 0) {
if (device->config_rom_retries < MAX_RETRIES &&
atomic_read(&device->state) == FW_DEVICE_INITIALIZING) {
device->config_rom_retries++;
fw_schedule_device_work(device, RETRY_DELAY);
} else {
if (device->node->link_on)
fw_notice(card, "giving up on Config ROM for node id %x\n",
device->node_id);
if (device->node == card->root_node)
fw_schedule_bm_work(card, 0);
fw_device_release(&device->device);
}
return;
}
revived_dev = device_find_child(card->device,
device, lookup_existing_device);
if (revived_dev) {
put_device(revived_dev);
fw_device_release(&device->device);
return;
}
device_initialize(&device->device);
fw_device_get(device);
down_write(&fw_device_rwsem);
ret = idr_pre_get(&fw_device_idr, GFP_KERNEL) ?
idr_get_new(&fw_device_idr, device, &minor) :
-ENOMEM;
up_write(&fw_device_rwsem);
if (ret < 0)
goto error;
device->device.bus = &fw_bus_type;
device->device.type = &fw_device_type;
device->device.parent = card->device;
device->device.devt = MKDEV(fw_cdev_major, minor);
dev_set_name(&device->device, "fw%d", minor);
BUILD_BUG_ON(ARRAY_SIZE(device->attribute_group.attrs) <
ARRAY_SIZE(fw_device_attributes) +
ARRAY_SIZE(config_rom_attributes));
init_fw_attribute_group(&device->device,
fw_device_attributes,
&device->attribute_group);
if (device_add(&device->device)) {
fw_err(card, "failed to add device\n");
goto error_with_cdev;
}
create_units(device);
/*
* Transition the device to running state. If it got pulled
* out from under us while we did the intialization work, we
* have to shut down the device again here. Normally, though,
* fw_node_event will be responsible for shutting it down when
* necessary. We have to use the atomic cmpxchg here to avoid
* racing with the FW_NODE_DESTROYED case in
* fw_node_event().
*/
if (atomic_cmpxchg(&device->state,
FW_DEVICE_INITIALIZING,
FW_DEVICE_RUNNING) == FW_DEVICE_GONE) {
PREPARE_DELAYED_WORK(&device->work, fw_device_shutdown);
fw_schedule_device_work(device, SHUTDOWN_DELAY);
} else {
fw_notice(card, "created device %s: GUID %08x%08x, S%d00\n",
dev_name(&device->device),
device->config_rom[3], device->config_rom[4],
1 << device->max_speed);
device->config_rom_retries = 0;
set_broadcast_channel(device, device->generation);
}
/*
* Reschedule the IRM work if we just finished reading the
* root node config rom. If this races with a bus reset we
* just end up running the IRM work a couple of extra times -
* pretty harmless.
*/
if (device->node == card->root_node)
fw_schedule_bm_work(card, 0);
return;
error_with_cdev:
down_write(&fw_device_rwsem);
idr_remove(&fw_device_idr, minor);
up_write(&fw_device_rwsem);
error:
fw_device_put(device); /* fw_device_idr's reference */
put_device(&device->device); /* our reference */
}
enum {
REREAD_BIB_ERROR,
REREAD_BIB_GONE,
REREAD_BIB_UNCHANGED,
REREAD_BIB_CHANGED,
};
/* Reread and compare bus info block and header of root directory */
static int reread_config_rom(struct fw_device *device, int generation)
{
u32 q;
int i;
for (i = 0; i < 6; i++) {
if (read_rom(device, generation, i, &q) != RCODE_COMPLETE)
return REREAD_BIB_ERROR;
if (i == 0 && q == 0)
return REREAD_BIB_GONE;
if (q != device->config_rom[i])
return REREAD_BIB_CHANGED;
}
return REREAD_BIB_UNCHANGED;
}
static void fw_device_refresh(struct work_struct *work)
{
struct fw_device *device =
container_of(work, struct fw_device, work.work);
struct fw_card *card = device->card;
int node_id = device->node_id;
switch (reread_config_rom(device, device->generation)) {
case REREAD_BIB_ERROR:
if (device->config_rom_retries < MAX_RETRIES / 2 &&
atomic_read(&device->state) == FW_DEVICE_INITIALIZING) {
device->config_rom_retries++;
fw_schedule_device_work(device, RETRY_DELAY / 2);
return;
}
goto give_up;
case REREAD_BIB_GONE:
goto gone;
case REREAD_BIB_UNCHANGED:
if (atomic_cmpxchg(&device->state,
FW_DEVICE_INITIALIZING,
FW_DEVICE_RUNNING) == FW_DEVICE_GONE)
goto gone;
fw_device_update(work);
device->config_rom_retries = 0;
goto out;
case REREAD_BIB_CHANGED:
break;
}
/*
* Something changed. We keep things simple and don't investigate
* further. We just destroy all previous units and create new ones.
*/
device_for_each_child(&device->device, NULL, shutdown_unit);
if (read_config_rom(device, device->generation) < 0) {
if (device->config_rom_retries < MAX_RETRIES &&
atomic_read(&device->state) == FW_DEVICE_INITIALIZING) {
device->config_rom_retries++;
fw_schedule_device_work(device, RETRY_DELAY);
return;
}
goto give_up;
}
fw_device_cdev_update(device);
create_units(device);
/* Userspace may want to re-read attributes. */
kobject_uevent(&device->device.kobj, KOBJ_CHANGE);
if (atomic_cmpxchg(&device->state,
FW_DEVICE_INITIALIZING,
FW_DEVICE_RUNNING) == FW_DEVICE_GONE)
goto gone;
fw_notice(card, "refreshed device %s\n", dev_name(&device->device));
device->config_rom_retries = 0;
goto out;
give_up:
fw_notice(card, "giving up on refresh of device %s\n",
dev_name(&device->device));
gone:
atomic_set(&device->state, FW_DEVICE_GONE);
PREPARE_DELAYED_WORK(&device->work, fw_device_shutdown);
fw_schedule_device_work(device, SHUTDOWN_DELAY);
out:
if (node_id == card->root_node->node_id)
fw_schedule_bm_work(card, 0);
}
void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
{
struct fw_device *device;
switch (event) {
case FW_NODE_CREATED:
/*
* Attempt to scan the node, regardless whether its self ID has
* the L (link active) flag set or not. Some broken devices
* send L=0 but have an up-and-running link; others send L=1
* without actually having a link.
*/
create:
device = kzalloc(sizeof(*device), GFP_ATOMIC);
if (device == NULL)
break;
/*
* Do minimal intialization of the device here, the
* rest will happen in fw_device_init().
*
* Attention: A lot of things, even fw_device_get(),
* cannot be done before fw_device_init() finished!
* You can basically just check device->state and
* schedule work until then, but only while holding
* card->lock.
*/
atomic_set(&device->state, FW_DEVICE_INITIALIZING);
device->card = fw_card_get(card);
device->node = fw_node_get(node);
device->node_id = node->node_id;
device->generation = card->generation;
device->is_local = node == card->local_node;
mutex_init(&device->client_list_mutex);
INIT_LIST_HEAD(&device->client_list);
/*
* Set the node data to point back to this device so
* FW_NODE_UPDATED callbacks can update the node_id
* and generation for the device.
*/
node->data = device;
/*
* Many devices are slow to respond after bus resets,
* especially if they are bus powered and go through
* power-up after getting plugged in. We schedule the
* first config rom scan half a second after bus reset.
*/
INIT_DELAYED_WORK(&device->work, fw_device_init);
fw_schedule_device_work(device, INITIAL_DELAY);
break;
case FW_NODE_INITIATED_RESET:
case FW_NODE_LINK_ON:
device = node->data;
if (device == NULL)
goto create;
device->node_id = node->node_id;
smp_wmb(); /* update node_id before generation */
device->generation = card->generation;
if (atomic_cmpxchg(&device->state,
FW_DEVICE_RUNNING,
FW_DEVICE_INITIALIZING) == FW_DEVICE_RUNNING) {
PREPARE_DELAYED_WORK(&device->work, fw_device_refresh);
fw_schedule_device_work(device,
device->is_local ? 0 : INITIAL_DELAY);
}
break;
case FW_NODE_UPDATED:
device = node->data;
if (device == NULL)
break;
device->node_id = node->node_id;
smp_wmb(); /* update node_id before generation */
device->generation = card->generation;
if (atomic_read(&device->state) == FW_DEVICE_RUNNING) {
PREPARE_DELAYED_WORK(&device->work, fw_device_update);
fw_schedule_device_work(device, 0);
}
break;
case FW_NODE_DESTROYED:
case FW_NODE_LINK_OFF:
if (!node->data)
break;
/*
* Destroy the device associated with the node. There
* are two cases here: either the device is fully
* initialized (FW_DEVICE_RUNNING) or we're in the
* process of reading its config rom
* (FW_DEVICE_INITIALIZING). If it is fully
* initialized we can reuse device->work to schedule a
* full fw_device_shutdown(). If not, there's work
* scheduled to read it's config rom, and we just put
* the device in shutdown state to have that code fail
* to create the device.
*/
device = node->data;
if (atomic_xchg(&device->state,
FW_DEVICE_GONE) == FW_DEVICE_RUNNING) {
PREPARE_DELAYED_WORK(&device->work, fw_device_shutdown);
fw_schedule_device_work(device,
list_empty(&card->link) ? 0 : SHUTDOWN_DELAY);
}
break;
}
}
| gpl-2.0 |
eugene373/ApexqJB | fs/nfs/nfs3xdr.c | 3285 | 54173 | /*
* linux/fs/nfs/nfs3xdr.c
*
* XDR functions to encode/decode NFSv3 RPC arguments and results.
*
* Copyright (C) 1996, 1997 Olaf Kirch
*/
#include <linux/param.h>
#include <linux/time.h>
#include <linux/mm.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/in.h>
#include <linux/pagemap.h>
#include <linux/proc_fs.h>
#include <linux/kdev_t.h>
#include <linux/sunrpc/clnt.h>
#include <linux/nfs.h>
#include <linux/nfs3.h>
#include <linux/nfs_fs.h>
#include <linux/nfsacl.h>
#include "internal.h"
#define NFSDBG_FACILITY NFSDBG_XDR
/* Mapping from NFS error code to "errno" error code. */
#define errno_NFSERR_IO EIO
/*
* Declare the space requirements for NFS arguments and replies as
* number of 32bit-words
*/
#define NFS3_fhandle_sz (1+16)
#define NFS3_fh_sz (NFS3_fhandle_sz) /* shorthand */
#define NFS3_sattr_sz (15)
#define NFS3_filename_sz (1+(NFS3_MAXNAMLEN>>2))
#define NFS3_path_sz (1+(NFS3_MAXPATHLEN>>2))
#define NFS3_fattr_sz (21)
#define NFS3_cookieverf_sz (NFS3_COOKIEVERFSIZE>>2)
#define NFS3_wcc_attr_sz (6)
#define NFS3_pre_op_attr_sz (1+NFS3_wcc_attr_sz)
#define NFS3_post_op_attr_sz (1+NFS3_fattr_sz)
#define NFS3_wcc_data_sz (NFS3_pre_op_attr_sz+NFS3_post_op_attr_sz)
#define NFS3_diropargs_sz (NFS3_fh_sz+NFS3_filename_sz)
#define NFS3_getattrargs_sz (NFS3_fh_sz)
#define NFS3_setattrargs_sz (NFS3_fh_sz+NFS3_sattr_sz+3)
#define NFS3_lookupargs_sz (NFS3_fh_sz+NFS3_filename_sz)
#define NFS3_accessargs_sz (NFS3_fh_sz+1)
#define NFS3_readlinkargs_sz (NFS3_fh_sz)
#define NFS3_readargs_sz (NFS3_fh_sz+3)
#define NFS3_writeargs_sz (NFS3_fh_sz+5)
#define NFS3_createargs_sz (NFS3_diropargs_sz+NFS3_sattr_sz)
#define NFS3_mkdirargs_sz (NFS3_diropargs_sz+NFS3_sattr_sz)
#define NFS3_symlinkargs_sz (NFS3_diropargs_sz+1+NFS3_sattr_sz)
#define NFS3_mknodargs_sz (NFS3_diropargs_sz+2+NFS3_sattr_sz)
#define NFS3_removeargs_sz (NFS3_fh_sz+NFS3_filename_sz)
#define NFS3_renameargs_sz (NFS3_diropargs_sz+NFS3_diropargs_sz)
#define NFS3_linkargs_sz (NFS3_fh_sz+NFS3_diropargs_sz)
#define NFS3_readdirargs_sz (NFS3_fh_sz+NFS3_cookieverf_sz+3)
#define NFS3_readdirplusargs_sz (NFS3_fh_sz+NFS3_cookieverf_sz+4)
#define NFS3_commitargs_sz (NFS3_fh_sz+3)
#define NFS3_getattrres_sz (1+NFS3_fattr_sz)
#define NFS3_setattrres_sz (1+NFS3_wcc_data_sz)
#define NFS3_removeres_sz (NFS3_setattrres_sz)
#define NFS3_lookupres_sz (1+NFS3_fh_sz+(2 * NFS3_post_op_attr_sz))
#define NFS3_accessres_sz (1+NFS3_post_op_attr_sz+1)
#define NFS3_readlinkres_sz (1+NFS3_post_op_attr_sz+1)
#define NFS3_readres_sz (1+NFS3_post_op_attr_sz+3)
#define NFS3_writeres_sz (1+NFS3_wcc_data_sz+4)
#define NFS3_createres_sz (1+NFS3_fh_sz+NFS3_post_op_attr_sz+NFS3_wcc_data_sz)
#define NFS3_renameres_sz (1+(2 * NFS3_wcc_data_sz))
#define NFS3_linkres_sz (1+NFS3_post_op_attr_sz+NFS3_wcc_data_sz)
#define NFS3_readdirres_sz (1+NFS3_post_op_attr_sz+2)
#define NFS3_fsstatres_sz (1+NFS3_post_op_attr_sz+13)
#define NFS3_fsinfores_sz (1+NFS3_post_op_attr_sz+12)
#define NFS3_pathconfres_sz (1+NFS3_post_op_attr_sz+6)
#define NFS3_commitres_sz (1+NFS3_wcc_data_sz+2)
#define ACL3_getaclargs_sz (NFS3_fh_sz+1)
#define ACL3_setaclargs_sz (NFS3_fh_sz+1+ \
XDR_QUADLEN(NFS_ACL_INLINE_BUFSIZE))
#define ACL3_getaclres_sz (1+NFS3_post_op_attr_sz+1+ \
XDR_QUADLEN(NFS_ACL_INLINE_BUFSIZE))
#define ACL3_setaclres_sz (1+NFS3_post_op_attr_sz)
/*
* Map file type to S_IFMT bits
*/
static const umode_t nfs_type2fmt[] = {
[NF3BAD] = 0,
[NF3REG] = S_IFREG,
[NF3DIR] = S_IFDIR,
[NF3BLK] = S_IFBLK,
[NF3CHR] = S_IFCHR,
[NF3LNK] = S_IFLNK,
[NF3SOCK] = S_IFSOCK,
[NF3FIFO] = S_IFIFO,
};
/*
* While encoding arguments, set up the reply buffer in advance to
* receive reply data directly into the page cache.
*/
static void prepare_reply_buffer(struct rpc_rqst *req, struct page **pages,
unsigned int base, unsigned int len,
unsigned int bufsize)
{
struct rpc_auth *auth = req->rq_cred->cr_auth;
unsigned int replen;
replen = RPC_REPHDRSIZE + auth->au_rslack + bufsize;
xdr_inline_pages(&req->rq_rcv_buf, replen << 2, pages, base, len);
}
/*
* Handle decode buffer overflows out-of-line.
*/
static void print_overflow_msg(const char *func, const struct xdr_stream *xdr)
{
dprintk("NFS: %s prematurely hit the end of our receive buffer. "
"Remaining buffer length is %tu words.\n",
func, xdr->end - xdr->p);
}
/*
* Encode/decode NFSv3 basic data types
*
* Basic NFSv3 data types are defined in section 2.5 of RFC 1813:
* "NFS Version 3 Protocol Specification".
*
* Not all basic data types have their own encoding and decoding
* functions. For run-time efficiency, some data types are encoded
* or decoded inline.
*/
static void encode_uint32(struct xdr_stream *xdr, u32 value)
{
__be32 *p = xdr_reserve_space(xdr, 4);
*p = cpu_to_be32(value);
}
static int decode_uint32(struct xdr_stream *xdr, u32 *value)
{
__be32 *p;
p = xdr_inline_decode(xdr, 4);
if (unlikely(p == NULL))
goto out_overflow;
*value = be32_to_cpup(p);
return 0;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
}
static int decode_uint64(struct xdr_stream *xdr, u64 *value)
{
__be32 *p;
p = xdr_inline_decode(xdr, 8);
if (unlikely(p == NULL))
goto out_overflow;
xdr_decode_hyper(p, value);
return 0;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
}
/*
* fileid3
*
* typedef uint64 fileid3;
*/
static __be32 *xdr_decode_fileid3(__be32 *p, u64 *fileid)
{
return xdr_decode_hyper(p, fileid);
}
static int decode_fileid3(struct xdr_stream *xdr, u64 *fileid)
{
return decode_uint64(xdr, fileid);
}
/*
* filename3
*
* typedef string filename3<>;
*/
static void encode_filename3(struct xdr_stream *xdr,
const char *name, u32 length)
{
__be32 *p;
BUG_ON(length > NFS3_MAXNAMLEN);
p = xdr_reserve_space(xdr, 4 + length);
xdr_encode_opaque(p, name, length);
}
static int decode_inline_filename3(struct xdr_stream *xdr,
const char **name, u32 *length)
{
__be32 *p;
u32 count;
p = xdr_inline_decode(xdr, 4);
if (unlikely(p == NULL))
goto out_overflow;
count = be32_to_cpup(p);
if (count > NFS3_MAXNAMLEN)
goto out_nametoolong;
p = xdr_inline_decode(xdr, count);
if (unlikely(p == NULL))
goto out_overflow;
*name = (const char *)p;
*length = count;
return 0;
out_nametoolong:
dprintk("NFS: returned filename too long: %u\n", count);
return -ENAMETOOLONG;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
}
/*
* nfspath3
*
* typedef string nfspath3<>;
*/
static void encode_nfspath3(struct xdr_stream *xdr, struct page **pages,
const u32 length)
{
BUG_ON(length > NFS3_MAXPATHLEN);
encode_uint32(xdr, length);
xdr_write_pages(xdr, pages, 0, length);
}
static int decode_nfspath3(struct xdr_stream *xdr)
{
u32 recvd, count;
size_t hdrlen;
__be32 *p;
p = xdr_inline_decode(xdr, 4);
if (unlikely(p == NULL))
goto out_overflow;
count = be32_to_cpup(p);
if (unlikely(count >= xdr->buf->page_len || count > NFS3_MAXPATHLEN))
goto out_nametoolong;
hdrlen = (u8 *)xdr->p - (u8 *)xdr->iov->iov_base;
recvd = xdr->buf->len - hdrlen;
if (unlikely(count > recvd))
goto out_cheating;
xdr_read_pages(xdr, count);
xdr_terminate_string(xdr->buf, count);
return 0;
out_nametoolong:
dprintk("NFS: returned pathname too long: %u\n", count);
return -ENAMETOOLONG;
out_cheating:
dprintk("NFS: server cheating in pathname result: "
"count %u > recvd %u\n", count, recvd);
return -EIO;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
}
/*
* cookie3
*
* typedef uint64 cookie3
*/
static __be32 *xdr_encode_cookie3(__be32 *p, u64 cookie)
{
return xdr_encode_hyper(p, cookie);
}
static int decode_cookie3(struct xdr_stream *xdr, u64 *cookie)
{
return decode_uint64(xdr, cookie);
}
/*
* cookieverf3
*
* typedef opaque cookieverf3[NFS3_COOKIEVERFSIZE];
*/
static __be32 *xdr_encode_cookieverf3(__be32 *p, const __be32 *verifier)
{
memcpy(p, verifier, NFS3_COOKIEVERFSIZE);
return p + XDR_QUADLEN(NFS3_COOKIEVERFSIZE);
}
static int decode_cookieverf3(struct xdr_stream *xdr, __be32 *verifier)
{
__be32 *p;
p = xdr_inline_decode(xdr, NFS3_COOKIEVERFSIZE);
if (unlikely(p == NULL))
goto out_overflow;
memcpy(verifier, p, NFS3_COOKIEVERFSIZE);
return 0;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
}
/*
* createverf3
*
* typedef opaque createverf3[NFS3_CREATEVERFSIZE];
*/
static void encode_createverf3(struct xdr_stream *xdr, const __be32 *verifier)
{
__be32 *p;
p = xdr_reserve_space(xdr, NFS3_CREATEVERFSIZE);
memcpy(p, verifier, NFS3_CREATEVERFSIZE);
}
static int decode_writeverf3(struct xdr_stream *xdr, __be32 *verifier)
{
__be32 *p;
p = xdr_inline_decode(xdr, NFS3_WRITEVERFSIZE);
if (unlikely(p == NULL))
goto out_overflow;
memcpy(verifier, p, NFS3_WRITEVERFSIZE);
return 0;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
}
/*
* size3
*
* typedef uint64 size3;
*/
static __be32 *xdr_decode_size3(__be32 *p, u64 *size)
{
return xdr_decode_hyper(p, size);
}
/*
* nfsstat3
*
* enum nfsstat3 {
* NFS3_OK = 0,
* ...
* }
*/
#define NFS3_OK NFS_OK
static int decode_nfsstat3(struct xdr_stream *xdr, enum nfs_stat *status)
{
__be32 *p;
p = xdr_inline_decode(xdr, 4);
if (unlikely(p == NULL))
goto out_overflow;
*status = be32_to_cpup(p);
return 0;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
}
/*
* ftype3
*
* enum ftype3 {
* NF3REG = 1,
* NF3DIR = 2,
* NF3BLK = 3,
* NF3CHR = 4,
* NF3LNK = 5,
* NF3SOCK = 6,
* NF3FIFO = 7
* };
*/
static void encode_ftype3(struct xdr_stream *xdr, const u32 type)
{
BUG_ON(type > NF3FIFO);
encode_uint32(xdr, type);
}
static __be32 *xdr_decode_ftype3(__be32 *p, umode_t *mode)
{
u32 type;
type = be32_to_cpup(p++);
if (type > NF3FIFO)
type = NF3NON;
*mode = nfs_type2fmt[type];
return p;
}
/*
* specdata3
*
* struct specdata3 {
* uint32 specdata1;
* uint32 specdata2;
* };
*/
static void encode_specdata3(struct xdr_stream *xdr, const dev_t rdev)
{
__be32 *p;
p = xdr_reserve_space(xdr, 8);
*p++ = cpu_to_be32(MAJOR(rdev));
*p = cpu_to_be32(MINOR(rdev));
}
static __be32 *xdr_decode_specdata3(__be32 *p, dev_t *rdev)
{
unsigned int major, minor;
major = be32_to_cpup(p++);
minor = be32_to_cpup(p++);
*rdev = MKDEV(major, minor);
if (MAJOR(*rdev) != major || MINOR(*rdev) != minor)
*rdev = 0;
return p;
}
/*
* nfs_fh3
*
* struct nfs_fh3 {
* opaque data<NFS3_FHSIZE>;
* };
*/
static void encode_nfs_fh3(struct xdr_stream *xdr, const struct nfs_fh *fh)
{
__be32 *p;
BUG_ON(fh->size > NFS3_FHSIZE);
p = xdr_reserve_space(xdr, 4 + fh->size);
xdr_encode_opaque(p, fh->data, fh->size);
}
static int decode_nfs_fh3(struct xdr_stream *xdr, struct nfs_fh *fh)
{
u32 length;
__be32 *p;
p = xdr_inline_decode(xdr, 4);
if (unlikely(p == NULL))
goto out_overflow;
length = be32_to_cpup(p++);
if (unlikely(length > NFS3_FHSIZE))
goto out_toobig;
p = xdr_inline_decode(xdr, length);
if (unlikely(p == NULL))
goto out_overflow;
fh->size = length;
memcpy(fh->data, p, length);
return 0;
out_toobig:
dprintk("NFS: file handle size (%u) too big\n", length);
return -E2BIG;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
}
static void zero_nfs_fh3(struct nfs_fh *fh)
{
memset(fh, 0, sizeof(*fh));
}
/*
* nfstime3
*
* struct nfstime3 {
* uint32 seconds;
* uint32 nseconds;
* };
*/
static __be32 *xdr_encode_nfstime3(__be32 *p, const struct timespec *timep)
{
*p++ = cpu_to_be32(timep->tv_sec);
*p++ = cpu_to_be32(timep->tv_nsec);
return p;
}
static __be32 *xdr_decode_nfstime3(__be32 *p, struct timespec *timep)
{
timep->tv_sec = be32_to_cpup(p++);
timep->tv_nsec = be32_to_cpup(p++);
return p;
}
/*
* sattr3
*
* enum time_how {
* DONT_CHANGE = 0,
* SET_TO_SERVER_TIME = 1,
* SET_TO_CLIENT_TIME = 2
* };
*
* union set_mode3 switch (bool set_it) {
* case TRUE:
* mode3 mode;
* default:
* void;
* };
*
* union set_uid3 switch (bool set_it) {
* case TRUE:
* uid3 uid;
* default:
* void;
* };
*
* union set_gid3 switch (bool set_it) {
* case TRUE:
* gid3 gid;
* default:
* void;
* };
*
* union set_size3 switch (bool set_it) {
* case TRUE:
* size3 size;
* default:
* void;
* };
*
* union set_atime switch (time_how set_it) {
* case SET_TO_CLIENT_TIME:
* nfstime3 atime;
* default:
* void;
* };
*
* union set_mtime switch (time_how set_it) {
* case SET_TO_CLIENT_TIME:
* nfstime3 mtime;
* default:
* void;
* };
*
* struct sattr3 {
* set_mode3 mode;
* set_uid3 uid;
* set_gid3 gid;
* set_size3 size;
* set_atime atime;
* set_mtime mtime;
* };
*/
static void encode_sattr3(struct xdr_stream *xdr, const struct iattr *attr)
{
u32 nbytes;
__be32 *p;
/*
* In order to make only a single xdr_reserve_space() call,
* pre-compute the total number of bytes to be reserved.
* Six boolean values, one for each set_foo field, are always
* present in the encoded result, so start there.
*/
nbytes = 6 * 4;
if (attr->ia_valid & ATTR_MODE)
nbytes += 4;
if (attr->ia_valid & ATTR_UID)
nbytes += 4;
if (attr->ia_valid & ATTR_GID)
nbytes += 4;
if (attr->ia_valid & ATTR_SIZE)
nbytes += 8;
if (attr->ia_valid & ATTR_ATIME_SET)
nbytes += 8;
if (attr->ia_valid & ATTR_MTIME_SET)
nbytes += 8;
p = xdr_reserve_space(xdr, nbytes);
if (attr->ia_valid & ATTR_MODE) {
*p++ = xdr_one;
*p++ = cpu_to_be32(attr->ia_mode & S_IALLUGO);
} else
*p++ = xdr_zero;
if (attr->ia_valid & ATTR_UID) {
*p++ = xdr_one;
*p++ = cpu_to_be32(attr->ia_uid);
} else
*p++ = xdr_zero;
if (attr->ia_valid & ATTR_GID) {
*p++ = xdr_one;
*p++ = cpu_to_be32(attr->ia_gid);
} else
*p++ = xdr_zero;
if (attr->ia_valid & ATTR_SIZE) {
*p++ = xdr_one;
p = xdr_encode_hyper(p, (u64)attr->ia_size);
} else
*p++ = xdr_zero;
if (attr->ia_valid & ATTR_ATIME_SET) {
*p++ = xdr_two;
p = xdr_encode_nfstime3(p, &attr->ia_atime);
} else if (attr->ia_valid & ATTR_ATIME) {
*p++ = xdr_one;
} else
*p++ = xdr_zero;
if (attr->ia_valid & ATTR_MTIME_SET) {
*p++ = xdr_two;
xdr_encode_nfstime3(p, &attr->ia_mtime);
} else if (attr->ia_valid & ATTR_MTIME) {
*p = xdr_one;
} else
*p = xdr_zero;
}
/*
* fattr3
*
* struct fattr3 {
* ftype3 type;
* mode3 mode;
* uint32 nlink;
* uid3 uid;
* gid3 gid;
* size3 size;
* size3 used;
* specdata3 rdev;
* uint64 fsid;
* fileid3 fileid;
* nfstime3 atime;
* nfstime3 mtime;
* nfstime3 ctime;
* };
*/
static int decode_fattr3(struct xdr_stream *xdr, struct nfs_fattr *fattr)
{
umode_t fmode;
__be32 *p;
p = xdr_inline_decode(xdr, NFS3_fattr_sz << 2);
if (unlikely(p == NULL))
goto out_overflow;
p = xdr_decode_ftype3(p, &fmode);
fattr->mode = (be32_to_cpup(p++) & ~S_IFMT) | fmode;
fattr->nlink = be32_to_cpup(p++);
fattr->uid = be32_to_cpup(p++);
fattr->gid = be32_to_cpup(p++);
p = xdr_decode_size3(p, &fattr->size);
p = xdr_decode_size3(p, &fattr->du.nfs3.used);
p = xdr_decode_specdata3(p, &fattr->rdev);
p = xdr_decode_hyper(p, &fattr->fsid.major);
fattr->fsid.minor = 0;
p = xdr_decode_fileid3(p, &fattr->fileid);
p = xdr_decode_nfstime3(p, &fattr->atime);
p = xdr_decode_nfstime3(p, &fattr->mtime);
xdr_decode_nfstime3(p, &fattr->ctime);
fattr->valid |= NFS_ATTR_FATTR_V3;
return 0;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
}
/*
* post_op_attr
*
* union post_op_attr switch (bool attributes_follow) {
* case TRUE:
* fattr3 attributes;
* case FALSE:
* void;
* };
*/
static int decode_post_op_attr(struct xdr_stream *xdr, struct nfs_fattr *fattr)
{
__be32 *p;
p = xdr_inline_decode(xdr, 4);
if (unlikely(p == NULL))
goto out_overflow;
if (*p != xdr_zero)
return decode_fattr3(xdr, fattr);
return 0;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
}
/*
* wcc_attr
* struct wcc_attr {
* size3 size;
* nfstime3 mtime;
* nfstime3 ctime;
* };
*/
static int decode_wcc_attr(struct xdr_stream *xdr, struct nfs_fattr *fattr)
{
__be32 *p;
p = xdr_inline_decode(xdr, NFS3_wcc_attr_sz << 2);
if (unlikely(p == NULL))
goto out_overflow;
fattr->valid |= NFS_ATTR_FATTR_PRESIZE
| NFS_ATTR_FATTR_PREMTIME
| NFS_ATTR_FATTR_PRECTIME;
p = xdr_decode_size3(p, &fattr->pre_size);
p = xdr_decode_nfstime3(p, &fattr->pre_mtime);
xdr_decode_nfstime3(p, &fattr->pre_ctime);
return 0;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
}
/*
* pre_op_attr
* union pre_op_attr switch (bool attributes_follow) {
* case TRUE:
* wcc_attr attributes;
* case FALSE:
* void;
* };
*
* wcc_data
*
* struct wcc_data {
* pre_op_attr before;
* post_op_attr after;
* };
*/
static int decode_pre_op_attr(struct xdr_stream *xdr, struct nfs_fattr *fattr)
{
__be32 *p;
p = xdr_inline_decode(xdr, 4);
if (unlikely(p == NULL))
goto out_overflow;
if (*p != xdr_zero)
return decode_wcc_attr(xdr, fattr);
return 0;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
}
static int decode_wcc_data(struct xdr_stream *xdr, struct nfs_fattr *fattr)
{
int error;
error = decode_pre_op_attr(xdr, fattr);
if (unlikely(error))
goto out;
error = decode_post_op_attr(xdr, fattr);
out:
return error;
}
/*
* post_op_fh3
*
* union post_op_fh3 switch (bool handle_follows) {
* case TRUE:
* nfs_fh3 handle;
* case FALSE:
* void;
* };
*/
static int decode_post_op_fh3(struct xdr_stream *xdr, struct nfs_fh *fh)
{
__be32 *p = xdr_inline_decode(xdr, 4);
if (unlikely(p == NULL))
goto out_overflow;
if (*p != xdr_zero)
return decode_nfs_fh3(xdr, fh);
zero_nfs_fh3(fh);
return 0;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
}
/*
* diropargs3
*
* struct diropargs3 {
* nfs_fh3 dir;
* filename3 name;
* };
*/
static void encode_diropargs3(struct xdr_stream *xdr, const struct nfs_fh *fh,
const char *name, u32 length)
{
encode_nfs_fh3(xdr, fh);
encode_filename3(xdr, name, length);
}
/*
* NFSv3 XDR encode functions
*
* NFSv3 argument types are defined in section 3.3 of RFC 1813:
* "NFS Version 3 Protocol Specification".
*/
/*
* 3.3.1 GETATTR3args
*
* struct GETATTR3args {
* nfs_fh3 object;
* };
*/
static void nfs3_xdr_enc_getattr3args(struct rpc_rqst *req,
struct xdr_stream *xdr,
const struct nfs_fh *fh)
{
encode_nfs_fh3(xdr, fh);
}
/*
* 3.3.2 SETATTR3args
*
* union sattrguard3 switch (bool check) {
* case TRUE:
* nfstime3 obj_ctime;
* case FALSE:
* void;
* };
*
* struct SETATTR3args {
* nfs_fh3 object;
* sattr3 new_attributes;
* sattrguard3 guard;
* };
*/
static void encode_sattrguard3(struct xdr_stream *xdr,
const struct nfs3_sattrargs *args)
{
__be32 *p;
if (args->guard) {
p = xdr_reserve_space(xdr, 4 + 8);
*p++ = xdr_one;
xdr_encode_nfstime3(p, &args->guardtime);
} else {
p = xdr_reserve_space(xdr, 4);
*p = xdr_zero;
}
}
static void nfs3_xdr_enc_setattr3args(struct rpc_rqst *req,
struct xdr_stream *xdr,
const struct nfs3_sattrargs *args)
{
encode_nfs_fh3(xdr, args->fh);
encode_sattr3(xdr, args->sattr);
encode_sattrguard3(xdr, args);
}
/*
* 3.3.3 LOOKUP3args
*
* struct LOOKUP3args {
* diropargs3 what;
* };
*/
static void nfs3_xdr_enc_lookup3args(struct rpc_rqst *req,
struct xdr_stream *xdr,
const struct nfs3_diropargs *args)
{
encode_diropargs3(xdr, args->fh, args->name, args->len);
}
/*
* 3.3.4 ACCESS3args
*
* struct ACCESS3args {
* nfs_fh3 object;
* uint32 access;
* };
*/
static void encode_access3args(struct xdr_stream *xdr,
const struct nfs3_accessargs *args)
{
encode_nfs_fh3(xdr, args->fh);
encode_uint32(xdr, args->access);
}
static void nfs3_xdr_enc_access3args(struct rpc_rqst *req,
struct xdr_stream *xdr,
const struct nfs3_accessargs *args)
{
encode_access3args(xdr, args);
}
/*
* 3.3.5 READLINK3args
*
* struct READLINK3args {
* nfs_fh3 symlink;
* };
*/
static void nfs3_xdr_enc_readlink3args(struct rpc_rqst *req,
struct xdr_stream *xdr,
const struct nfs3_readlinkargs *args)
{
encode_nfs_fh3(xdr, args->fh);
prepare_reply_buffer(req, args->pages, args->pgbase,
args->pglen, NFS3_readlinkres_sz);
}
/*
* 3.3.6 READ3args
*
* struct READ3args {
* nfs_fh3 file;
* offset3 offset;
* count3 count;
* };
*/
static void encode_read3args(struct xdr_stream *xdr,
const struct nfs_readargs *args)
{
__be32 *p;
encode_nfs_fh3(xdr, args->fh);
p = xdr_reserve_space(xdr, 8 + 4);
p = xdr_encode_hyper(p, args->offset);
*p = cpu_to_be32(args->count);
}
static void nfs3_xdr_enc_read3args(struct rpc_rqst *req,
struct xdr_stream *xdr,
const struct nfs_readargs *args)
{
encode_read3args(xdr, args);
prepare_reply_buffer(req, args->pages, args->pgbase,
args->count, NFS3_readres_sz);
req->rq_rcv_buf.flags |= XDRBUF_READ;
}
/*
* 3.3.7 WRITE3args
*
* enum stable_how {
* UNSTABLE = 0,
* DATA_SYNC = 1,
* FILE_SYNC = 2
* };
*
* struct WRITE3args {
* nfs_fh3 file;
* offset3 offset;
* count3 count;
* stable_how stable;
* opaque data<>;
* };
*/
static void encode_write3args(struct xdr_stream *xdr,
const struct nfs_writeargs *args)
{
__be32 *p;
encode_nfs_fh3(xdr, args->fh);
p = xdr_reserve_space(xdr, 8 + 4 + 4 + 4);
p = xdr_encode_hyper(p, args->offset);
*p++ = cpu_to_be32(args->count);
*p++ = cpu_to_be32(args->stable);
*p = cpu_to_be32(args->count);
xdr_write_pages(xdr, args->pages, args->pgbase, args->count);
}
static void nfs3_xdr_enc_write3args(struct rpc_rqst *req,
struct xdr_stream *xdr,
const struct nfs_writeargs *args)
{
encode_write3args(xdr, args);
xdr->buf->flags |= XDRBUF_WRITE;
}
/*
* 3.3.8 CREATE3args
*
* enum createmode3 {
* UNCHECKED = 0,
* GUARDED = 1,
* EXCLUSIVE = 2
* };
*
* union createhow3 switch (createmode3 mode) {
* case UNCHECKED:
* case GUARDED:
* sattr3 obj_attributes;
* case EXCLUSIVE:
* createverf3 verf;
* };
*
* struct CREATE3args {
* diropargs3 where;
* createhow3 how;
* };
*/
static void encode_createhow3(struct xdr_stream *xdr,
const struct nfs3_createargs *args)
{
encode_uint32(xdr, args->createmode);
switch (args->createmode) {
case NFS3_CREATE_UNCHECKED:
case NFS3_CREATE_GUARDED:
encode_sattr3(xdr, args->sattr);
break;
case NFS3_CREATE_EXCLUSIVE:
encode_createverf3(xdr, args->verifier);
break;
default:
BUG();
}
}
static void nfs3_xdr_enc_create3args(struct rpc_rqst *req,
struct xdr_stream *xdr,
const struct nfs3_createargs *args)
{
encode_diropargs3(xdr, args->fh, args->name, args->len);
encode_createhow3(xdr, args);
}
/*
* 3.3.9 MKDIR3args
*
* struct MKDIR3args {
* diropargs3 where;
* sattr3 attributes;
* };
*/
static void nfs3_xdr_enc_mkdir3args(struct rpc_rqst *req,
struct xdr_stream *xdr,
const struct nfs3_mkdirargs *args)
{
encode_diropargs3(xdr, args->fh, args->name, args->len);
encode_sattr3(xdr, args->sattr);
}
/*
* 3.3.10 SYMLINK3args
*
* struct symlinkdata3 {
* sattr3 symlink_attributes;
* nfspath3 symlink_data;
* };
*
* struct SYMLINK3args {
* diropargs3 where;
* symlinkdata3 symlink;
* };
*/
static void encode_symlinkdata3(struct xdr_stream *xdr,
const struct nfs3_symlinkargs *args)
{
encode_sattr3(xdr, args->sattr);
encode_nfspath3(xdr, args->pages, args->pathlen);
}
static void nfs3_xdr_enc_symlink3args(struct rpc_rqst *req,
struct xdr_stream *xdr,
const struct nfs3_symlinkargs *args)
{
encode_diropargs3(xdr, args->fromfh, args->fromname, args->fromlen);
encode_symlinkdata3(xdr, args);
}
/*
* 3.3.11 MKNOD3args
*
* struct devicedata3 {
* sattr3 dev_attributes;
* specdata3 spec;
* };
*
* union mknoddata3 switch (ftype3 type) {
* case NF3CHR:
* case NF3BLK:
* devicedata3 device;
* case NF3SOCK:
* case NF3FIFO:
* sattr3 pipe_attributes;
* default:
* void;
* };
*
* struct MKNOD3args {
* diropargs3 where;
* mknoddata3 what;
* };
*/
static void encode_devicedata3(struct xdr_stream *xdr,
const struct nfs3_mknodargs *args)
{
encode_sattr3(xdr, args->sattr);
encode_specdata3(xdr, args->rdev);
}
static void encode_mknoddata3(struct xdr_stream *xdr,
const struct nfs3_mknodargs *args)
{
encode_ftype3(xdr, args->type);
switch (args->type) {
case NF3CHR:
case NF3BLK:
encode_devicedata3(xdr, args);
break;
case NF3SOCK:
case NF3FIFO:
encode_sattr3(xdr, args->sattr);
break;
case NF3REG:
case NF3DIR:
break;
default:
BUG();
}
}
static void nfs3_xdr_enc_mknod3args(struct rpc_rqst *req,
struct xdr_stream *xdr,
const struct nfs3_mknodargs *args)
{
encode_diropargs3(xdr, args->fh, args->name, args->len);
encode_mknoddata3(xdr, args);
}
/*
* 3.3.12 REMOVE3args
*
* struct REMOVE3args {
* diropargs3 object;
* };
*/
static void nfs3_xdr_enc_remove3args(struct rpc_rqst *req,
struct xdr_stream *xdr,
const struct nfs_removeargs *args)
{
encode_diropargs3(xdr, args->fh, args->name.name, args->name.len);
}
/*
* 3.3.14 RENAME3args
*
* struct RENAME3args {
* diropargs3 from;
* diropargs3 to;
* };
*/
static void nfs3_xdr_enc_rename3args(struct rpc_rqst *req,
struct xdr_stream *xdr,
const struct nfs_renameargs *args)
{
const struct qstr *old = args->old_name;
const struct qstr *new = args->new_name;
encode_diropargs3(xdr, args->old_dir, old->name, old->len);
encode_diropargs3(xdr, args->new_dir, new->name, new->len);
}
/*
* 3.3.15 LINK3args
*
* struct LINK3args {
* nfs_fh3 file;
* diropargs3 link;
* };
*/
static void nfs3_xdr_enc_link3args(struct rpc_rqst *req,
struct xdr_stream *xdr,
const struct nfs3_linkargs *args)
{
encode_nfs_fh3(xdr, args->fromfh);
encode_diropargs3(xdr, args->tofh, args->toname, args->tolen);
}
/*
* 3.3.16 READDIR3args
*
* struct READDIR3args {
* nfs_fh3 dir;
* cookie3 cookie;
* cookieverf3 cookieverf;
* count3 count;
* };
*/
static void encode_readdir3args(struct xdr_stream *xdr,
const struct nfs3_readdirargs *args)
{
__be32 *p;
encode_nfs_fh3(xdr, args->fh);
p = xdr_reserve_space(xdr, 8 + NFS3_COOKIEVERFSIZE + 4);
p = xdr_encode_cookie3(p, args->cookie);
p = xdr_encode_cookieverf3(p, args->verf);
*p = cpu_to_be32(args->count);
}
static void nfs3_xdr_enc_readdir3args(struct rpc_rqst *req,
struct xdr_stream *xdr,
const struct nfs3_readdirargs *args)
{
encode_readdir3args(xdr, args);
prepare_reply_buffer(req, args->pages, 0,
args->count, NFS3_readdirres_sz);
}
/*
* 3.3.17 READDIRPLUS3args
*
* struct READDIRPLUS3args {
* nfs_fh3 dir;
* cookie3 cookie;
* cookieverf3 cookieverf;
* count3 dircount;
* count3 maxcount;
* };
*/
static void encode_readdirplus3args(struct xdr_stream *xdr,
const struct nfs3_readdirargs *args)
{
__be32 *p;
encode_nfs_fh3(xdr, args->fh);
p = xdr_reserve_space(xdr, 8 + NFS3_COOKIEVERFSIZE + 4 + 4);
p = xdr_encode_cookie3(p, args->cookie);
p = xdr_encode_cookieverf3(p, args->verf);
/*
* readdirplus: need dircount + buffer size.
* We just make sure we make dircount big enough
*/
*p++ = cpu_to_be32(args->count >> 3);
*p = cpu_to_be32(args->count);
}
static void nfs3_xdr_enc_readdirplus3args(struct rpc_rqst *req,
struct xdr_stream *xdr,
const struct nfs3_readdirargs *args)
{
encode_readdirplus3args(xdr, args);
prepare_reply_buffer(req, args->pages, 0,
args->count, NFS3_readdirres_sz);
}
/*
* 3.3.21 COMMIT3args
*
* struct COMMIT3args {
* nfs_fh3 file;
* offset3 offset;
* count3 count;
* };
*/
static void encode_commit3args(struct xdr_stream *xdr,
const struct nfs_writeargs *args)
{
__be32 *p;
encode_nfs_fh3(xdr, args->fh);
p = xdr_reserve_space(xdr, 8 + 4);
p = xdr_encode_hyper(p, args->offset);
*p = cpu_to_be32(args->count);
}
static void nfs3_xdr_enc_commit3args(struct rpc_rqst *req,
struct xdr_stream *xdr,
const struct nfs_writeargs *args)
{
encode_commit3args(xdr, args);
}
#ifdef CONFIG_NFS_V3_ACL
static void nfs3_xdr_enc_getacl3args(struct rpc_rqst *req,
struct xdr_stream *xdr,
const struct nfs3_getaclargs *args)
{
encode_nfs_fh3(xdr, args->fh);
encode_uint32(xdr, args->mask);
if (args->mask & (NFS_ACL | NFS_DFACL))
prepare_reply_buffer(req, args->pages, 0,
NFSACL_MAXPAGES << PAGE_SHIFT,
ACL3_getaclres_sz);
}
static void nfs3_xdr_enc_setacl3args(struct rpc_rqst *req,
struct xdr_stream *xdr,
const struct nfs3_setaclargs *args)
{
unsigned int base;
int error;
encode_nfs_fh3(xdr, NFS_FH(args->inode));
encode_uint32(xdr, args->mask);
base = req->rq_slen;
if (args->npages != 0)
xdr_write_pages(xdr, args->pages, 0, args->len);
else
xdr_reserve_space(xdr, NFS_ACL_INLINE_BUFSIZE);
error = nfsacl_encode(xdr->buf, base, args->inode,
(args->mask & NFS_ACL) ?
args->acl_access : NULL, 1, 0);
BUG_ON(error < 0);
error = nfsacl_encode(xdr->buf, base + error, args->inode,
(args->mask & NFS_DFACL) ?
args->acl_default : NULL, 1,
NFS_ACL_DEFAULT);
BUG_ON(error < 0);
}
#endif /* CONFIG_NFS_V3_ACL */
/*
* NFSv3 XDR decode functions
*
* NFSv3 result types are defined in section 3.3 of RFC 1813:
* "NFS Version 3 Protocol Specification".
*/
/*
* 3.3.1 GETATTR3res
*
* struct GETATTR3resok {
* fattr3 obj_attributes;
* };
*
* union GETATTR3res switch (nfsstat3 status) {
* case NFS3_OK:
* GETATTR3resok resok;
* default:
* void;
* };
*/
static int nfs3_xdr_dec_getattr3res(struct rpc_rqst *req,
struct xdr_stream *xdr,
struct nfs_fattr *result)
{
enum nfs_stat status;
int error;
error = decode_nfsstat3(xdr, &status);
if (unlikely(error))
goto out;
if (status != NFS3_OK)
goto out_default;
error = decode_fattr3(xdr, result);
out:
return error;
out_default:
return nfs_stat_to_errno(status);
}
/*
* 3.3.2 SETATTR3res
*
* struct SETATTR3resok {
* wcc_data obj_wcc;
* };
*
* struct SETATTR3resfail {
* wcc_data obj_wcc;
* };
*
* union SETATTR3res switch (nfsstat3 status) {
* case NFS3_OK:
* SETATTR3resok resok;
* default:
* SETATTR3resfail resfail;
* };
*/
static int nfs3_xdr_dec_setattr3res(struct rpc_rqst *req,
struct xdr_stream *xdr,
struct nfs_fattr *result)
{
enum nfs_stat status;
int error;
error = decode_nfsstat3(xdr, &status);
if (unlikely(error))
goto out;
error = decode_wcc_data(xdr, result);
if (unlikely(error))
goto out;
if (status != NFS3_OK)
goto out_status;
out:
return error;
out_status:
return nfs_stat_to_errno(status);
}
/*
* 3.3.3 LOOKUP3res
*
* struct LOOKUP3resok {
* nfs_fh3 object;
* post_op_attr obj_attributes;
* post_op_attr dir_attributes;
* };
*
* struct LOOKUP3resfail {
* post_op_attr dir_attributes;
* };
*
* union LOOKUP3res switch (nfsstat3 status) {
* case NFS3_OK:
* LOOKUP3resok resok;
* default:
* LOOKUP3resfail resfail;
* };
*/
static int nfs3_xdr_dec_lookup3res(struct rpc_rqst *req,
struct xdr_stream *xdr,
struct nfs3_diropres *result)
{
enum nfs_stat status;
int error;
error = decode_nfsstat3(xdr, &status);
if (unlikely(error))
goto out;
if (status != NFS3_OK)
goto out_default;
error = decode_nfs_fh3(xdr, result->fh);
if (unlikely(error))
goto out;
error = decode_post_op_attr(xdr, result->fattr);
if (unlikely(error))
goto out;
error = decode_post_op_attr(xdr, result->dir_attr);
out:
return error;
out_default:
error = decode_post_op_attr(xdr, result->dir_attr);
if (unlikely(error))
goto out;
return nfs_stat_to_errno(status);
}
/*
* 3.3.4 ACCESS3res
*
* struct ACCESS3resok {
* post_op_attr obj_attributes;
* uint32 access;
* };
*
* struct ACCESS3resfail {
* post_op_attr obj_attributes;
* };
*
* union ACCESS3res switch (nfsstat3 status) {
* case NFS3_OK:
* ACCESS3resok resok;
* default:
* ACCESS3resfail resfail;
* };
*/
static int nfs3_xdr_dec_access3res(struct rpc_rqst *req,
struct xdr_stream *xdr,
struct nfs3_accessres *result)
{
enum nfs_stat status;
int error;
error = decode_nfsstat3(xdr, &status);
if (unlikely(error))
goto out;
error = decode_post_op_attr(xdr, result->fattr);
if (unlikely(error))
goto out;
if (status != NFS3_OK)
goto out_default;
error = decode_uint32(xdr, &result->access);
out:
return error;
out_default:
return nfs_stat_to_errno(status);
}
/*
* 3.3.5 READLINK3res
*
* struct READLINK3resok {
* post_op_attr symlink_attributes;
* nfspath3 data;
* };
*
* struct READLINK3resfail {
* post_op_attr symlink_attributes;
* };
*
* union READLINK3res switch (nfsstat3 status) {
* case NFS3_OK:
* READLINK3resok resok;
* default:
* READLINK3resfail resfail;
* };
*/
static int nfs3_xdr_dec_readlink3res(struct rpc_rqst *req,
struct xdr_stream *xdr,
struct nfs_fattr *result)
{
enum nfs_stat status;
int error;
error = decode_nfsstat3(xdr, &status);
if (unlikely(error))
goto out;
error = decode_post_op_attr(xdr, result);
if (unlikely(error))
goto out;
if (status != NFS3_OK)
goto out_default;
error = decode_nfspath3(xdr);
out:
return error;
out_default:
return nfs_stat_to_errno(status);
}
/*
* 3.3.6 READ3res
*
* struct READ3resok {
* post_op_attr file_attributes;
* count3 count;
* bool eof;
* opaque data<>;
* };
*
* struct READ3resfail {
* post_op_attr file_attributes;
* };
*
* union READ3res switch (nfsstat3 status) {
* case NFS3_OK:
* READ3resok resok;
* default:
* READ3resfail resfail;
* };
*/
static int decode_read3resok(struct xdr_stream *xdr,
struct nfs_readres *result)
{
u32 eof, count, ocount, recvd;
size_t hdrlen;
__be32 *p;
p = xdr_inline_decode(xdr, 4 + 4 + 4);
if (unlikely(p == NULL))
goto out_overflow;
count = be32_to_cpup(p++);
eof = be32_to_cpup(p++);
ocount = be32_to_cpup(p++);
if (unlikely(ocount != count))
goto out_mismatch;
hdrlen = (u8 *)xdr->p - (u8 *)xdr->iov->iov_base;
recvd = xdr->buf->len - hdrlen;
if (unlikely(count > recvd))
goto out_cheating;
out:
xdr_read_pages(xdr, count);
result->eof = eof;
result->count = count;
return count;
out_mismatch:
dprintk("NFS: READ count doesn't match length of opaque: "
"count %u != ocount %u\n", count, ocount);
return -EIO;
out_cheating:
dprintk("NFS: server cheating in read result: "
"count %u > recvd %u\n", count, recvd);
count = recvd;
eof = 0;
goto out;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
}
static int nfs3_xdr_dec_read3res(struct rpc_rqst *req, struct xdr_stream *xdr,
struct nfs_readres *result)
{
enum nfs_stat status;
int error;
error = decode_nfsstat3(xdr, &status);
if (unlikely(error))
goto out;
error = decode_post_op_attr(xdr, result->fattr);
if (unlikely(error))
goto out;
if (status != NFS3_OK)
goto out_status;
error = decode_read3resok(xdr, result);
out:
return error;
out_status:
return nfs_stat_to_errno(status);
}
/*
* 3.3.7 WRITE3res
*
* enum stable_how {
* UNSTABLE = 0,
* DATA_SYNC = 1,
* FILE_SYNC = 2
* };
*
* struct WRITE3resok {
* wcc_data file_wcc;
* count3 count;
* stable_how committed;
* writeverf3 verf;
* };
*
* struct WRITE3resfail {
* wcc_data file_wcc;
* };
*
* union WRITE3res switch (nfsstat3 status) {
* case NFS3_OK:
* WRITE3resok resok;
* default:
* WRITE3resfail resfail;
* };
*/
static int decode_write3resok(struct xdr_stream *xdr,
struct nfs_writeres *result)
{
__be32 *p;
p = xdr_inline_decode(xdr, 4 + 4 + NFS3_WRITEVERFSIZE);
if (unlikely(p == NULL))
goto out_overflow;
result->count = be32_to_cpup(p++);
result->verf->committed = be32_to_cpup(p++);
if (unlikely(result->verf->committed > NFS_FILE_SYNC))
goto out_badvalue;
memcpy(result->verf->verifier, p, NFS3_WRITEVERFSIZE);
return result->count;
out_badvalue:
dprintk("NFS: bad stable_how value: %u\n", result->verf->committed);
return -EIO;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
}
static int nfs3_xdr_dec_write3res(struct rpc_rqst *req, struct xdr_stream *xdr,
struct nfs_writeres *result)
{
enum nfs_stat status;
int error;
error = decode_nfsstat3(xdr, &status);
if (unlikely(error))
goto out;
error = decode_wcc_data(xdr, result->fattr);
if (unlikely(error))
goto out;
if (status != NFS3_OK)
goto out_status;
error = decode_write3resok(xdr, result);
out:
return error;
out_status:
return nfs_stat_to_errno(status);
}
/*
* 3.3.8 CREATE3res
*
* struct CREATE3resok {
* post_op_fh3 obj;
* post_op_attr obj_attributes;
* wcc_data dir_wcc;
* };
*
* struct CREATE3resfail {
* wcc_data dir_wcc;
* };
*
* union CREATE3res switch (nfsstat3 status) {
* case NFS3_OK:
* CREATE3resok resok;
* default:
* CREATE3resfail resfail;
* };
*/
static int decode_create3resok(struct xdr_stream *xdr,
struct nfs3_diropres *result)
{
int error;
error = decode_post_op_fh3(xdr, result->fh);
if (unlikely(error))
goto out;
error = decode_post_op_attr(xdr, result->fattr);
if (unlikely(error))
goto out;
/* The server isn't required to return a file handle.
* If it didn't, force the client to perform a LOOKUP
* to determine the correct file handle and attribute
* values for the new object. */
if (result->fh->size == 0)
result->fattr->valid = 0;
error = decode_wcc_data(xdr, result->dir_attr);
out:
return error;
}
static int nfs3_xdr_dec_create3res(struct rpc_rqst *req,
struct xdr_stream *xdr,
struct nfs3_diropres *result)
{
enum nfs_stat status;
int error;
error = decode_nfsstat3(xdr, &status);
if (unlikely(error))
goto out;
if (status != NFS3_OK)
goto out_default;
error = decode_create3resok(xdr, result);
out:
return error;
out_default:
error = decode_wcc_data(xdr, result->dir_attr);
if (unlikely(error))
goto out;
return nfs_stat_to_errno(status);
}
/*
* 3.3.12 REMOVE3res
*
* struct REMOVE3resok {
* wcc_data dir_wcc;
* };
*
* struct REMOVE3resfail {
* wcc_data dir_wcc;
* };
*
* union REMOVE3res switch (nfsstat3 status) {
* case NFS3_OK:
* REMOVE3resok resok;
* default:
* REMOVE3resfail resfail;
* };
*/
static int nfs3_xdr_dec_remove3res(struct rpc_rqst *req,
struct xdr_stream *xdr,
struct nfs_removeres *result)
{
enum nfs_stat status;
int error;
error = decode_nfsstat3(xdr, &status);
if (unlikely(error))
goto out;
error = decode_wcc_data(xdr, result->dir_attr);
if (unlikely(error))
goto out;
if (status != NFS3_OK)
goto out_status;
out:
return error;
out_status:
return nfs_stat_to_errno(status);
}
/*
* 3.3.14 RENAME3res
*
* struct RENAME3resok {
* wcc_data fromdir_wcc;
* wcc_data todir_wcc;
* };
*
* struct RENAME3resfail {
* wcc_data fromdir_wcc;
* wcc_data todir_wcc;
* };
*
* union RENAME3res switch (nfsstat3 status) {
* case NFS3_OK:
* RENAME3resok resok;
* default:
* RENAME3resfail resfail;
* };
*/
static int nfs3_xdr_dec_rename3res(struct rpc_rqst *req,
struct xdr_stream *xdr,
struct nfs_renameres *result)
{
enum nfs_stat status;
int error;
error = decode_nfsstat3(xdr, &status);
if (unlikely(error))
goto out;
error = decode_wcc_data(xdr, result->old_fattr);
if (unlikely(error))
goto out;
error = decode_wcc_data(xdr, result->new_fattr);
if (unlikely(error))
goto out;
if (status != NFS3_OK)
goto out_status;
out:
return error;
out_status:
return nfs_stat_to_errno(status);
}
/*
* 3.3.15 LINK3res
*
* struct LINK3resok {
* post_op_attr file_attributes;
* wcc_data linkdir_wcc;
* };
*
* struct LINK3resfail {
* post_op_attr file_attributes;
* wcc_data linkdir_wcc;
* };
*
* union LINK3res switch (nfsstat3 status) {
* case NFS3_OK:
* LINK3resok resok;
* default:
* LINK3resfail resfail;
* };
*/
static int nfs3_xdr_dec_link3res(struct rpc_rqst *req, struct xdr_stream *xdr,
struct nfs3_linkres *result)
{
enum nfs_stat status;
int error;
error = decode_nfsstat3(xdr, &status);
if (unlikely(error))
goto out;
error = decode_post_op_attr(xdr, result->fattr);
if (unlikely(error))
goto out;
error = decode_wcc_data(xdr, result->dir_attr);
if (unlikely(error))
goto out;
if (status != NFS3_OK)
goto out_status;
out:
return error;
out_status:
return nfs_stat_to_errno(status);
}
/**
* nfs3_decode_dirent - Decode a single NFSv3 directory entry stored in
* the local page cache
* @xdr: XDR stream where entry resides
* @entry: buffer to fill in with entry data
* @plus: boolean indicating whether this should be a readdirplus entry
*
* Returns zero if successful, otherwise a negative errno value is
* returned.
*
* This function is not invoked during READDIR reply decoding, but
* rather whenever an application invokes the getdents(2) system call
* on a directory already in our cache.
*
* 3.3.16 entry3
*
* struct entry3 {
* fileid3 fileid;
* filename3 name;
* cookie3 cookie;
* fhandle3 filehandle;
* post_op_attr3 attributes;
* entry3 *nextentry;
* };
*
* 3.3.17 entryplus3
* struct entryplus3 {
* fileid3 fileid;
* filename3 name;
* cookie3 cookie;
* post_op_attr name_attributes;
* post_op_fh3 name_handle;
* entryplus3 *nextentry;
* };
*/
int nfs3_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry,
int plus)
{
struct nfs_entry old = *entry;
__be32 *p;
int error;
p = xdr_inline_decode(xdr, 4);
if (unlikely(p == NULL))
goto out_overflow;
if (*p == xdr_zero) {
p = xdr_inline_decode(xdr, 4);
if (unlikely(p == NULL))
goto out_overflow;
if (*p == xdr_zero)
return -EAGAIN;
entry->eof = 1;
return -EBADCOOKIE;
}
error = decode_fileid3(xdr, &entry->ino);
if (unlikely(error))
return error;
error = decode_inline_filename3(xdr, &entry->name, &entry->len);
if (unlikely(error))
return error;
entry->prev_cookie = entry->cookie;
error = decode_cookie3(xdr, &entry->cookie);
if (unlikely(error))
return error;
entry->d_type = DT_UNKNOWN;
if (plus) {
entry->fattr->valid = 0;
error = decode_post_op_attr(xdr, entry->fattr);
if (unlikely(error))
return error;
if (entry->fattr->valid & NFS_ATTR_FATTR_V3)
entry->d_type = nfs_umode_to_dtype(entry->fattr->mode);
/* In fact, a post_op_fh3: */
p = xdr_inline_decode(xdr, 4);
if (unlikely(p == NULL))
goto out_overflow;
if (*p != xdr_zero) {
error = decode_nfs_fh3(xdr, entry->fh);
if (unlikely(error)) {
if (error == -E2BIG)
goto out_truncated;
return error;
}
} else
zero_nfs_fh3(entry->fh);
}
return 0;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EAGAIN;
out_truncated:
dprintk("NFS: directory entry contains invalid file handle\n");
*entry = old;
return -EAGAIN;
}
/*
* 3.3.16 READDIR3res
*
* struct dirlist3 {
* entry3 *entries;
* bool eof;
* };
*
* struct READDIR3resok {
* post_op_attr dir_attributes;
* cookieverf3 cookieverf;
* dirlist3 reply;
* };
*
* struct READDIR3resfail {
* post_op_attr dir_attributes;
* };
*
* union READDIR3res switch (nfsstat3 status) {
* case NFS3_OK:
* READDIR3resok resok;
* default:
* READDIR3resfail resfail;
* };
*
* Read the directory contents into the page cache, but otherwise
* don't touch them. The actual decoding is done by nfs3_decode_entry()
* during subsequent nfs_readdir() calls.
*/
static int decode_dirlist3(struct xdr_stream *xdr)
{
u32 recvd, pglen;
size_t hdrlen;
pglen = xdr->buf->page_len;
hdrlen = (u8 *)xdr->p - (u8 *)xdr->iov->iov_base;
recvd = xdr->buf->len - hdrlen;
if (unlikely(pglen > recvd))
goto out_cheating;
out:
xdr_read_pages(xdr, pglen);
return pglen;
out_cheating:
dprintk("NFS: server cheating in readdir result: "
"pglen %u > recvd %u\n", pglen, recvd);
pglen = recvd;
goto out;
}
static int decode_readdir3resok(struct xdr_stream *xdr,
struct nfs3_readdirres *result)
{
int error;
error = decode_post_op_attr(xdr, result->dir_attr);
if (unlikely(error))
goto out;
/* XXX: do we need to check if result->verf != NULL ? */
error = decode_cookieverf3(xdr, result->verf);
if (unlikely(error))
goto out;
error = decode_dirlist3(xdr);
out:
return error;
}
static int nfs3_xdr_dec_readdir3res(struct rpc_rqst *req,
struct xdr_stream *xdr,
struct nfs3_readdirres *result)
{
enum nfs_stat status;
int error;
error = decode_nfsstat3(xdr, &status);
if (unlikely(error))
goto out;
if (status != NFS3_OK)
goto out_default;
error = decode_readdir3resok(xdr, result);
out:
return error;
out_default:
error = decode_post_op_attr(xdr, result->dir_attr);
if (unlikely(error))
goto out;
return nfs_stat_to_errno(status);
}
/*
* 3.3.18 FSSTAT3res
*
* struct FSSTAT3resok {
* post_op_attr obj_attributes;
* size3 tbytes;
* size3 fbytes;
* size3 abytes;
* size3 tfiles;
* size3 ffiles;
* size3 afiles;
* uint32 invarsec;
* };
*
* struct FSSTAT3resfail {
* post_op_attr obj_attributes;
* };
*
* union FSSTAT3res switch (nfsstat3 status) {
* case NFS3_OK:
* FSSTAT3resok resok;
* default:
* FSSTAT3resfail resfail;
* };
*/
static int decode_fsstat3resok(struct xdr_stream *xdr,
struct nfs_fsstat *result)
{
__be32 *p;
p = xdr_inline_decode(xdr, 8 * 6 + 4);
if (unlikely(p == NULL))
goto out_overflow;
p = xdr_decode_size3(p, &result->tbytes);
p = xdr_decode_size3(p, &result->fbytes);
p = xdr_decode_size3(p, &result->abytes);
p = xdr_decode_size3(p, &result->tfiles);
p = xdr_decode_size3(p, &result->ffiles);
xdr_decode_size3(p, &result->afiles);
/* ignore invarsec */
return 0;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
}
static int nfs3_xdr_dec_fsstat3res(struct rpc_rqst *req,
struct xdr_stream *xdr,
struct nfs_fsstat *result)
{
enum nfs_stat status;
int error;
error = decode_nfsstat3(xdr, &status);
if (unlikely(error))
goto out;
error = decode_post_op_attr(xdr, result->fattr);
if (unlikely(error))
goto out;
if (status != NFS3_OK)
goto out_status;
error = decode_fsstat3resok(xdr, result);
out:
return error;
out_status:
return nfs_stat_to_errno(status);
}
/*
* 3.3.19 FSINFO3res
*
* struct FSINFO3resok {
* post_op_attr obj_attributes;
* uint32 rtmax;
* uint32 rtpref;
* uint32 rtmult;
* uint32 wtmax;
* uint32 wtpref;
* uint32 wtmult;
* uint32 dtpref;
* size3 maxfilesize;
* nfstime3 time_delta;
* uint32 properties;
* };
*
* struct FSINFO3resfail {
* post_op_attr obj_attributes;
* };
*
* union FSINFO3res switch (nfsstat3 status) {
* case NFS3_OK:
* FSINFO3resok resok;
* default:
* FSINFO3resfail resfail;
* };
*/
static int decode_fsinfo3resok(struct xdr_stream *xdr,
struct nfs_fsinfo *result)
{
__be32 *p;
p = xdr_inline_decode(xdr, 4 * 7 + 8 + 8 + 4);
if (unlikely(p == NULL))
goto out_overflow;
result->rtmax = be32_to_cpup(p++);
result->rtpref = be32_to_cpup(p++);
result->rtmult = be32_to_cpup(p++);
result->wtmax = be32_to_cpup(p++);
result->wtpref = be32_to_cpup(p++);
result->wtmult = be32_to_cpup(p++);
result->dtpref = be32_to_cpup(p++);
p = xdr_decode_size3(p, &result->maxfilesize);
xdr_decode_nfstime3(p, &result->time_delta);
/* ignore properties */
result->lease_time = 0;
return 0;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
}
static int nfs3_xdr_dec_fsinfo3res(struct rpc_rqst *req,
struct xdr_stream *xdr,
struct nfs_fsinfo *result)
{
enum nfs_stat status;
int error;
error = decode_nfsstat3(xdr, &status);
if (unlikely(error))
goto out;
error = decode_post_op_attr(xdr, result->fattr);
if (unlikely(error))
goto out;
if (status != NFS3_OK)
goto out_status;
error = decode_fsinfo3resok(xdr, result);
out:
return error;
out_status:
return nfs_stat_to_errno(status);
}
/*
* 3.3.20 PATHCONF3res
*
* struct PATHCONF3resok {
* post_op_attr obj_attributes;
* uint32 linkmax;
* uint32 name_max;
* bool no_trunc;
* bool chown_restricted;
* bool case_insensitive;
* bool case_preserving;
* };
*
* struct PATHCONF3resfail {
* post_op_attr obj_attributes;
* };
*
* union PATHCONF3res switch (nfsstat3 status) {
* case NFS3_OK:
* PATHCONF3resok resok;
* default:
* PATHCONF3resfail resfail;
* };
*/
static int decode_pathconf3resok(struct xdr_stream *xdr,
struct nfs_pathconf *result)
{
__be32 *p;
p = xdr_inline_decode(xdr, 4 * 6);
if (unlikely(p == NULL))
goto out_overflow;
result->max_link = be32_to_cpup(p++);
result->max_namelen = be32_to_cpup(p);
/* ignore remaining fields */
return 0;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
}
static int nfs3_xdr_dec_pathconf3res(struct rpc_rqst *req,
struct xdr_stream *xdr,
struct nfs_pathconf *result)
{
enum nfs_stat status;
int error;
error = decode_nfsstat3(xdr, &status);
if (unlikely(error))
goto out;
error = decode_post_op_attr(xdr, result->fattr);
if (unlikely(error))
goto out;
if (status != NFS3_OK)
goto out_status;
error = decode_pathconf3resok(xdr, result);
out:
return error;
out_status:
return nfs_stat_to_errno(status);
}
/*
* 3.3.21 COMMIT3res
*
* struct COMMIT3resok {
* wcc_data file_wcc;
* writeverf3 verf;
* };
*
* struct COMMIT3resfail {
* wcc_data file_wcc;
* };
*
* union COMMIT3res switch (nfsstat3 status) {
* case NFS3_OK:
* COMMIT3resok resok;
* default:
* COMMIT3resfail resfail;
* };
*/
static int nfs3_xdr_dec_commit3res(struct rpc_rqst *req,
struct xdr_stream *xdr,
struct nfs_writeres *result)
{
enum nfs_stat status;
int error;
error = decode_nfsstat3(xdr, &status);
if (unlikely(error))
goto out;
error = decode_wcc_data(xdr, result->fattr);
if (unlikely(error))
goto out;
if (status != NFS3_OK)
goto out_status;
error = decode_writeverf3(xdr, result->verf->verifier);
out:
return error;
out_status:
return nfs_stat_to_errno(status);
}
#ifdef CONFIG_NFS_V3_ACL
static inline int decode_getacl3resok(struct xdr_stream *xdr,
struct nfs3_getaclres *result)
{
struct posix_acl **acl;
unsigned int *aclcnt;
size_t hdrlen;
int error;
error = decode_post_op_attr(xdr, result->fattr);
if (unlikely(error))
goto out;
error = decode_uint32(xdr, &result->mask);
if (unlikely(error))
goto out;
error = -EINVAL;
if (result->mask & ~(NFS_ACL|NFS_ACLCNT|NFS_DFACL|NFS_DFACLCNT))
goto out;
hdrlen = (u8 *)xdr->p - (u8 *)xdr->iov->iov_base;
acl = NULL;
if (result->mask & NFS_ACL)
acl = &result->acl_access;
aclcnt = NULL;
if (result->mask & NFS_ACLCNT)
aclcnt = &result->acl_access_count;
error = nfsacl_decode(xdr->buf, hdrlen, aclcnt, acl);
if (unlikely(error <= 0))
goto out;
acl = NULL;
if (result->mask & NFS_DFACL)
acl = &result->acl_default;
aclcnt = NULL;
if (result->mask & NFS_DFACLCNT)
aclcnt = &result->acl_default_count;
error = nfsacl_decode(xdr->buf, hdrlen + error, aclcnt, acl);
if (unlikely(error <= 0))
return error;
error = 0;
out:
return error;
}
static int nfs3_xdr_dec_getacl3res(struct rpc_rqst *req,
struct xdr_stream *xdr,
struct nfs3_getaclres *result)
{
enum nfs_stat status;
int error;
error = decode_nfsstat3(xdr, &status);
if (unlikely(error))
goto out;
if (status != NFS3_OK)
goto out_default;
error = decode_getacl3resok(xdr, result);
out:
return error;
out_default:
return nfs_stat_to_errno(status);
}
static int nfs3_xdr_dec_setacl3res(struct rpc_rqst *req,
struct xdr_stream *xdr,
struct nfs_fattr *result)
{
enum nfs_stat status;
int error;
error = decode_nfsstat3(xdr, &status);
if (unlikely(error))
goto out;
if (status != NFS3_OK)
goto out_default;
error = decode_post_op_attr(xdr, result);
out:
return error;
out_default:
return nfs_stat_to_errno(status);
}
#endif /* CONFIG_NFS_V3_ACL */
#define PROC(proc, argtype, restype, timer) \
[NFS3PROC_##proc] = { \
.p_proc = NFS3PROC_##proc, \
.p_encode = (kxdreproc_t)nfs3_xdr_enc_##argtype##3args, \
.p_decode = (kxdrdproc_t)nfs3_xdr_dec_##restype##3res, \
.p_arglen = NFS3_##argtype##args_sz, \
.p_replen = NFS3_##restype##res_sz, \
.p_timer = timer, \
.p_statidx = NFS3PROC_##proc, \
.p_name = #proc, \
}
struct rpc_procinfo nfs3_procedures[] = {
PROC(GETATTR, getattr, getattr, 1),
PROC(SETATTR, setattr, setattr, 0),
PROC(LOOKUP, lookup, lookup, 2),
PROC(ACCESS, access, access, 1),
PROC(READLINK, readlink, readlink, 3),
PROC(READ, read, read, 3),
PROC(WRITE, write, write, 4),
PROC(CREATE, create, create, 0),
PROC(MKDIR, mkdir, create, 0),
PROC(SYMLINK, symlink, create, 0),
PROC(MKNOD, mknod, create, 0),
PROC(REMOVE, remove, remove, 0),
PROC(RMDIR, lookup, setattr, 0),
PROC(RENAME, rename, rename, 0),
PROC(LINK, link, link, 0),
PROC(READDIR, readdir, readdir, 3),
PROC(READDIRPLUS, readdirplus, readdir, 3),
PROC(FSSTAT, getattr, fsstat, 0),
PROC(FSINFO, getattr, fsinfo, 0),
PROC(PATHCONF, getattr, pathconf, 0),
PROC(COMMIT, commit, commit, 5),
};
struct rpc_version nfs_version3 = {
.number = 3,
.nrprocs = ARRAY_SIZE(nfs3_procedures),
.procs = nfs3_procedures
};
#ifdef CONFIG_NFS_V3_ACL
static struct rpc_procinfo nfs3_acl_procedures[] = {
[ACLPROC3_GETACL] = {
.p_proc = ACLPROC3_GETACL,
.p_encode = (kxdreproc_t)nfs3_xdr_enc_getacl3args,
.p_decode = (kxdrdproc_t)nfs3_xdr_dec_getacl3res,
.p_arglen = ACL3_getaclargs_sz,
.p_replen = ACL3_getaclres_sz,
.p_timer = 1,
.p_name = "GETACL",
},
[ACLPROC3_SETACL] = {
.p_proc = ACLPROC3_SETACL,
.p_encode = (kxdreproc_t)nfs3_xdr_enc_setacl3args,
.p_decode = (kxdrdproc_t)nfs3_xdr_dec_setacl3res,
.p_arglen = ACL3_setaclargs_sz,
.p_replen = ACL3_setaclres_sz,
.p_timer = 0,
.p_name = "SETACL",
},
};
struct rpc_version nfsacl_version3 = {
.number = 3,
.nrprocs = sizeof(nfs3_acl_procedures)/
sizeof(nfs3_acl_procedures[0]),
.procs = nfs3_acl_procedures,
};
#endif /* CONFIG_NFS_V3_ACL */
| gpl-2.0 |
htc-mirror/ville-ics-crc-3.0.8-ca24d1e | fs/nfs/nfs3xdr.c | 3285 | 54173 | /*
* linux/fs/nfs/nfs3xdr.c
*
* XDR functions to encode/decode NFSv3 RPC arguments and results.
*
* Copyright (C) 1996, 1997 Olaf Kirch
*/
#include <linux/param.h>
#include <linux/time.h>
#include <linux/mm.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/in.h>
#include <linux/pagemap.h>
#include <linux/proc_fs.h>
#include <linux/kdev_t.h>
#include <linux/sunrpc/clnt.h>
#include <linux/nfs.h>
#include <linux/nfs3.h>
#include <linux/nfs_fs.h>
#include <linux/nfsacl.h>
#include "internal.h"
#define NFSDBG_FACILITY NFSDBG_XDR
/* Mapping from NFS error code to "errno" error code. */
#define errno_NFSERR_IO EIO
/*
* Declare the space requirements for NFS arguments and replies as
* number of 32bit-words
*/
#define NFS3_fhandle_sz (1+16)
#define NFS3_fh_sz (NFS3_fhandle_sz) /* shorthand */
#define NFS3_sattr_sz (15)
#define NFS3_filename_sz (1+(NFS3_MAXNAMLEN>>2))
#define NFS3_path_sz (1+(NFS3_MAXPATHLEN>>2))
#define NFS3_fattr_sz (21)
#define NFS3_cookieverf_sz (NFS3_COOKIEVERFSIZE>>2)
#define NFS3_wcc_attr_sz (6)
#define NFS3_pre_op_attr_sz (1+NFS3_wcc_attr_sz)
#define NFS3_post_op_attr_sz (1+NFS3_fattr_sz)
#define NFS3_wcc_data_sz (NFS3_pre_op_attr_sz+NFS3_post_op_attr_sz)
#define NFS3_diropargs_sz (NFS3_fh_sz+NFS3_filename_sz)
#define NFS3_getattrargs_sz (NFS3_fh_sz)
#define NFS3_setattrargs_sz (NFS3_fh_sz+NFS3_sattr_sz+3)
#define NFS3_lookupargs_sz (NFS3_fh_sz+NFS3_filename_sz)
#define NFS3_accessargs_sz (NFS3_fh_sz+1)
#define NFS3_readlinkargs_sz (NFS3_fh_sz)
#define NFS3_readargs_sz (NFS3_fh_sz+3)
#define NFS3_writeargs_sz (NFS3_fh_sz+5)
#define NFS3_createargs_sz (NFS3_diropargs_sz+NFS3_sattr_sz)
#define NFS3_mkdirargs_sz (NFS3_diropargs_sz+NFS3_sattr_sz)
#define NFS3_symlinkargs_sz (NFS3_diropargs_sz+1+NFS3_sattr_sz)
#define NFS3_mknodargs_sz (NFS3_diropargs_sz+2+NFS3_sattr_sz)
#define NFS3_removeargs_sz (NFS3_fh_sz+NFS3_filename_sz)
#define NFS3_renameargs_sz (NFS3_diropargs_sz+NFS3_diropargs_sz)
#define NFS3_linkargs_sz (NFS3_fh_sz+NFS3_diropargs_sz)
#define NFS3_readdirargs_sz (NFS3_fh_sz+NFS3_cookieverf_sz+3)
#define NFS3_readdirplusargs_sz (NFS3_fh_sz+NFS3_cookieverf_sz+4)
#define NFS3_commitargs_sz (NFS3_fh_sz+3)
#define NFS3_getattrres_sz (1+NFS3_fattr_sz)
#define NFS3_setattrres_sz (1+NFS3_wcc_data_sz)
#define NFS3_removeres_sz (NFS3_setattrres_sz)
#define NFS3_lookupres_sz (1+NFS3_fh_sz+(2 * NFS3_post_op_attr_sz))
#define NFS3_accessres_sz (1+NFS3_post_op_attr_sz+1)
#define NFS3_readlinkres_sz (1+NFS3_post_op_attr_sz+1)
#define NFS3_readres_sz (1+NFS3_post_op_attr_sz+3)
#define NFS3_writeres_sz (1+NFS3_wcc_data_sz+4)
#define NFS3_createres_sz (1+NFS3_fh_sz+NFS3_post_op_attr_sz+NFS3_wcc_data_sz)
#define NFS3_renameres_sz (1+(2 * NFS3_wcc_data_sz))
#define NFS3_linkres_sz (1+NFS3_post_op_attr_sz+NFS3_wcc_data_sz)
#define NFS3_readdirres_sz (1+NFS3_post_op_attr_sz+2)
#define NFS3_fsstatres_sz (1+NFS3_post_op_attr_sz+13)
#define NFS3_fsinfores_sz (1+NFS3_post_op_attr_sz+12)
#define NFS3_pathconfres_sz (1+NFS3_post_op_attr_sz+6)
#define NFS3_commitres_sz (1+NFS3_wcc_data_sz+2)
#define ACL3_getaclargs_sz (NFS3_fh_sz+1)
#define ACL3_setaclargs_sz (NFS3_fh_sz+1+ \
XDR_QUADLEN(NFS_ACL_INLINE_BUFSIZE))
#define ACL3_getaclres_sz (1+NFS3_post_op_attr_sz+1+ \
XDR_QUADLEN(NFS_ACL_INLINE_BUFSIZE))
#define ACL3_setaclres_sz (1+NFS3_post_op_attr_sz)
/*
* Map file type to S_IFMT bits
*/
static const umode_t nfs_type2fmt[] = {
[NF3BAD] = 0,
[NF3REG] = S_IFREG,
[NF3DIR] = S_IFDIR,
[NF3BLK] = S_IFBLK,
[NF3CHR] = S_IFCHR,
[NF3LNK] = S_IFLNK,
[NF3SOCK] = S_IFSOCK,
[NF3FIFO] = S_IFIFO,
};
/*
* While encoding arguments, set up the reply buffer in advance to
* receive reply data directly into the page cache.
*/
static void prepare_reply_buffer(struct rpc_rqst *req, struct page **pages,
unsigned int base, unsigned int len,
unsigned int bufsize)
{
struct rpc_auth *auth = req->rq_cred->cr_auth;
unsigned int replen;
replen = RPC_REPHDRSIZE + auth->au_rslack + bufsize;
xdr_inline_pages(&req->rq_rcv_buf, replen << 2, pages, base, len);
}
/*
* Handle decode buffer overflows out-of-line.
*/
static void print_overflow_msg(const char *func, const struct xdr_stream *xdr)
{
dprintk("NFS: %s prematurely hit the end of our receive buffer. "
"Remaining buffer length is %tu words.\n",
func, xdr->end - xdr->p);
}
/*
* Encode/decode NFSv3 basic data types
*
* Basic NFSv3 data types are defined in section 2.5 of RFC 1813:
* "NFS Version 3 Protocol Specification".
*
* Not all basic data types have their own encoding and decoding
* functions. For run-time efficiency, some data types are encoded
* or decoded inline.
*/
static void encode_uint32(struct xdr_stream *xdr, u32 value)
{
__be32 *p = xdr_reserve_space(xdr, 4);
*p = cpu_to_be32(value);
}
static int decode_uint32(struct xdr_stream *xdr, u32 *value)
{
__be32 *p;
p = xdr_inline_decode(xdr, 4);
if (unlikely(p == NULL))
goto out_overflow;
*value = be32_to_cpup(p);
return 0;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
}
static int decode_uint64(struct xdr_stream *xdr, u64 *value)
{
__be32 *p;
p = xdr_inline_decode(xdr, 8);
if (unlikely(p == NULL))
goto out_overflow;
xdr_decode_hyper(p, value);
return 0;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
}
/*
* fileid3
*
* typedef uint64 fileid3;
*/
static __be32 *xdr_decode_fileid3(__be32 *p, u64 *fileid)
{
return xdr_decode_hyper(p, fileid);
}
static int decode_fileid3(struct xdr_stream *xdr, u64 *fileid)
{
return decode_uint64(xdr, fileid);
}
/*
* filename3
*
* typedef string filename3<>;
*/
static void encode_filename3(struct xdr_stream *xdr,
const char *name, u32 length)
{
__be32 *p;
BUG_ON(length > NFS3_MAXNAMLEN);
p = xdr_reserve_space(xdr, 4 + length);
xdr_encode_opaque(p, name, length);
}
static int decode_inline_filename3(struct xdr_stream *xdr,
const char **name, u32 *length)
{
__be32 *p;
u32 count;
p = xdr_inline_decode(xdr, 4);
if (unlikely(p == NULL))
goto out_overflow;
count = be32_to_cpup(p);
if (count > NFS3_MAXNAMLEN)
goto out_nametoolong;
p = xdr_inline_decode(xdr, count);
if (unlikely(p == NULL))
goto out_overflow;
*name = (const char *)p;
*length = count;
return 0;
out_nametoolong:
dprintk("NFS: returned filename too long: %u\n", count);
return -ENAMETOOLONG;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
}
/*
* nfspath3
*
* typedef string nfspath3<>;
*/
static void encode_nfspath3(struct xdr_stream *xdr, struct page **pages,
const u32 length)
{
BUG_ON(length > NFS3_MAXPATHLEN);
encode_uint32(xdr, length);
xdr_write_pages(xdr, pages, 0, length);
}
static int decode_nfspath3(struct xdr_stream *xdr)
{
u32 recvd, count;
size_t hdrlen;
__be32 *p;
p = xdr_inline_decode(xdr, 4);
if (unlikely(p == NULL))
goto out_overflow;
count = be32_to_cpup(p);
if (unlikely(count >= xdr->buf->page_len || count > NFS3_MAXPATHLEN))
goto out_nametoolong;
hdrlen = (u8 *)xdr->p - (u8 *)xdr->iov->iov_base;
recvd = xdr->buf->len - hdrlen;
if (unlikely(count > recvd))
goto out_cheating;
xdr_read_pages(xdr, count);
xdr_terminate_string(xdr->buf, count);
return 0;
out_nametoolong:
dprintk("NFS: returned pathname too long: %u\n", count);
return -ENAMETOOLONG;
out_cheating:
dprintk("NFS: server cheating in pathname result: "
"count %u > recvd %u\n", count, recvd);
return -EIO;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
}
/*
* cookie3
*
* typedef uint64 cookie3
*/
static __be32 *xdr_encode_cookie3(__be32 *p, u64 cookie)
{
return xdr_encode_hyper(p, cookie);
}
static int decode_cookie3(struct xdr_stream *xdr, u64 *cookie)
{
return decode_uint64(xdr, cookie);
}
/*
* cookieverf3
*
* typedef opaque cookieverf3[NFS3_COOKIEVERFSIZE];
*/
static __be32 *xdr_encode_cookieverf3(__be32 *p, const __be32 *verifier)
{
memcpy(p, verifier, NFS3_COOKIEVERFSIZE);
return p + XDR_QUADLEN(NFS3_COOKIEVERFSIZE);
}
static int decode_cookieverf3(struct xdr_stream *xdr, __be32 *verifier)
{
__be32 *p;
p = xdr_inline_decode(xdr, NFS3_COOKIEVERFSIZE);
if (unlikely(p == NULL))
goto out_overflow;
memcpy(verifier, p, NFS3_COOKIEVERFSIZE);
return 0;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
}
/*
* createverf3
*
* typedef opaque createverf3[NFS3_CREATEVERFSIZE];
*/
static void encode_createverf3(struct xdr_stream *xdr, const __be32 *verifier)
{
__be32 *p;
p = xdr_reserve_space(xdr, NFS3_CREATEVERFSIZE);
memcpy(p, verifier, NFS3_CREATEVERFSIZE);
}
static int decode_writeverf3(struct xdr_stream *xdr, __be32 *verifier)
{
__be32 *p;
p = xdr_inline_decode(xdr, NFS3_WRITEVERFSIZE);
if (unlikely(p == NULL))
goto out_overflow;
memcpy(verifier, p, NFS3_WRITEVERFSIZE);
return 0;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
}
/*
* size3
*
* typedef uint64 size3;
*/
static __be32 *xdr_decode_size3(__be32 *p, u64 *size)
{
return xdr_decode_hyper(p, size);
}
/*
* nfsstat3
*
* enum nfsstat3 {
* NFS3_OK = 0,
* ...
* }
*/
#define NFS3_OK NFS_OK
static int decode_nfsstat3(struct xdr_stream *xdr, enum nfs_stat *status)
{
__be32 *p;
p = xdr_inline_decode(xdr, 4);
if (unlikely(p == NULL))
goto out_overflow;
*status = be32_to_cpup(p);
return 0;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
}
/*
* ftype3
*
* enum ftype3 {
* NF3REG = 1,
* NF3DIR = 2,
* NF3BLK = 3,
* NF3CHR = 4,
* NF3LNK = 5,
* NF3SOCK = 6,
* NF3FIFO = 7
* };
*/
static void encode_ftype3(struct xdr_stream *xdr, const u32 type)
{
BUG_ON(type > NF3FIFO);
encode_uint32(xdr, type);
}
static __be32 *xdr_decode_ftype3(__be32 *p, umode_t *mode)
{
u32 type;
type = be32_to_cpup(p++);
if (type > NF3FIFO)
type = NF3NON;
*mode = nfs_type2fmt[type];
return p;
}
/*
* specdata3
*
* struct specdata3 {
* uint32 specdata1;
* uint32 specdata2;
* };
*/
static void encode_specdata3(struct xdr_stream *xdr, const dev_t rdev)
{
__be32 *p;
p = xdr_reserve_space(xdr, 8);
*p++ = cpu_to_be32(MAJOR(rdev));
*p = cpu_to_be32(MINOR(rdev));
}
static __be32 *xdr_decode_specdata3(__be32 *p, dev_t *rdev)
{
unsigned int major, minor;
major = be32_to_cpup(p++);
minor = be32_to_cpup(p++);
*rdev = MKDEV(major, minor);
if (MAJOR(*rdev) != major || MINOR(*rdev) != minor)
*rdev = 0;
return p;
}
/*
* nfs_fh3
*
* struct nfs_fh3 {
* opaque data<NFS3_FHSIZE>;
* };
*/
static void encode_nfs_fh3(struct xdr_stream *xdr, const struct nfs_fh *fh)
{
__be32 *p;
BUG_ON(fh->size > NFS3_FHSIZE);
p = xdr_reserve_space(xdr, 4 + fh->size);
xdr_encode_opaque(p, fh->data, fh->size);
}
static int decode_nfs_fh3(struct xdr_stream *xdr, struct nfs_fh *fh)
{
u32 length;
__be32 *p;
p = xdr_inline_decode(xdr, 4);
if (unlikely(p == NULL))
goto out_overflow;
length = be32_to_cpup(p++);
if (unlikely(length > NFS3_FHSIZE))
goto out_toobig;
p = xdr_inline_decode(xdr, length);
if (unlikely(p == NULL))
goto out_overflow;
fh->size = length;
memcpy(fh->data, p, length);
return 0;
out_toobig:
dprintk("NFS: file handle size (%u) too big\n", length);
return -E2BIG;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
}
static void zero_nfs_fh3(struct nfs_fh *fh)
{
memset(fh, 0, sizeof(*fh));
}
/*
* nfstime3
*
* struct nfstime3 {
* uint32 seconds;
* uint32 nseconds;
* };
*/
static __be32 *xdr_encode_nfstime3(__be32 *p, const struct timespec *timep)
{
*p++ = cpu_to_be32(timep->tv_sec);
*p++ = cpu_to_be32(timep->tv_nsec);
return p;
}
static __be32 *xdr_decode_nfstime3(__be32 *p, struct timespec *timep)
{
timep->tv_sec = be32_to_cpup(p++);
timep->tv_nsec = be32_to_cpup(p++);
return p;
}
/*
* sattr3
*
* enum time_how {
* DONT_CHANGE = 0,
* SET_TO_SERVER_TIME = 1,
* SET_TO_CLIENT_TIME = 2
* };
*
* union set_mode3 switch (bool set_it) {
* case TRUE:
* mode3 mode;
* default:
* void;
* };
*
* union set_uid3 switch (bool set_it) {
* case TRUE:
* uid3 uid;
* default:
* void;
* };
*
* union set_gid3 switch (bool set_it) {
* case TRUE:
* gid3 gid;
* default:
* void;
* };
*
* union set_size3 switch (bool set_it) {
* case TRUE:
* size3 size;
* default:
* void;
* };
*
* union set_atime switch (time_how set_it) {
* case SET_TO_CLIENT_TIME:
* nfstime3 atime;
* default:
* void;
* };
*
* union set_mtime switch (time_how set_it) {
* case SET_TO_CLIENT_TIME:
* nfstime3 mtime;
* default:
* void;
* };
*
* struct sattr3 {
* set_mode3 mode;
* set_uid3 uid;
* set_gid3 gid;
* set_size3 size;
* set_atime atime;
* set_mtime mtime;
* };
*/
static void encode_sattr3(struct xdr_stream *xdr, const struct iattr *attr)
{
u32 nbytes;
__be32 *p;
/*
* In order to make only a single xdr_reserve_space() call,
* pre-compute the total number of bytes to be reserved.
* Six boolean values, one for each set_foo field, are always
* present in the encoded result, so start there.
*/
nbytes = 6 * 4;
if (attr->ia_valid & ATTR_MODE)
nbytes += 4;
if (attr->ia_valid & ATTR_UID)
nbytes += 4;
if (attr->ia_valid & ATTR_GID)
nbytes += 4;
if (attr->ia_valid & ATTR_SIZE)
nbytes += 8;
if (attr->ia_valid & ATTR_ATIME_SET)
nbytes += 8;
if (attr->ia_valid & ATTR_MTIME_SET)
nbytes += 8;
p = xdr_reserve_space(xdr, nbytes);
if (attr->ia_valid & ATTR_MODE) {
*p++ = xdr_one;
*p++ = cpu_to_be32(attr->ia_mode & S_IALLUGO);
} else
*p++ = xdr_zero;
if (attr->ia_valid & ATTR_UID) {
*p++ = xdr_one;
*p++ = cpu_to_be32(attr->ia_uid);
} else
*p++ = xdr_zero;
if (attr->ia_valid & ATTR_GID) {
*p++ = xdr_one;
*p++ = cpu_to_be32(attr->ia_gid);
} else
*p++ = xdr_zero;
if (attr->ia_valid & ATTR_SIZE) {
*p++ = xdr_one;
p = xdr_encode_hyper(p, (u64)attr->ia_size);
} else
*p++ = xdr_zero;
if (attr->ia_valid & ATTR_ATIME_SET) {
*p++ = xdr_two;
p = xdr_encode_nfstime3(p, &attr->ia_atime);
} else if (attr->ia_valid & ATTR_ATIME) {
*p++ = xdr_one;
} else
*p++ = xdr_zero;
if (attr->ia_valid & ATTR_MTIME_SET) {
*p++ = xdr_two;
xdr_encode_nfstime3(p, &attr->ia_mtime);
} else if (attr->ia_valid & ATTR_MTIME) {
*p = xdr_one;
} else
*p = xdr_zero;
}
/*
* fattr3
*
* struct fattr3 {
* ftype3 type;
* mode3 mode;
* uint32 nlink;
* uid3 uid;
* gid3 gid;
* size3 size;
* size3 used;
* specdata3 rdev;
* uint64 fsid;
* fileid3 fileid;
* nfstime3 atime;
* nfstime3 mtime;
* nfstime3 ctime;
* };
*/
static int decode_fattr3(struct xdr_stream *xdr, struct nfs_fattr *fattr)
{
umode_t fmode;
__be32 *p;
p = xdr_inline_decode(xdr, NFS3_fattr_sz << 2);
if (unlikely(p == NULL))
goto out_overflow;
p = xdr_decode_ftype3(p, &fmode);
fattr->mode = (be32_to_cpup(p++) & ~S_IFMT) | fmode;
fattr->nlink = be32_to_cpup(p++);
fattr->uid = be32_to_cpup(p++);
fattr->gid = be32_to_cpup(p++);
p = xdr_decode_size3(p, &fattr->size);
p = xdr_decode_size3(p, &fattr->du.nfs3.used);
p = xdr_decode_specdata3(p, &fattr->rdev);
p = xdr_decode_hyper(p, &fattr->fsid.major);
fattr->fsid.minor = 0;
p = xdr_decode_fileid3(p, &fattr->fileid);
p = xdr_decode_nfstime3(p, &fattr->atime);
p = xdr_decode_nfstime3(p, &fattr->mtime);
xdr_decode_nfstime3(p, &fattr->ctime);
fattr->valid |= NFS_ATTR_FATTR_V3;
return 0;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
}
/*
* post_op_attr
*
* union post_op_attr switch (bool attributes_follow) {
* case TRUE:
* fattr3 attributes;
* case FALSE:
* void;
* };
*/
static int decode_post_op_attr(struct xdr_stream *xdr, struct nfs_fattr *fattr)
{
__be32 *p;
p = xdr_inline_decode(xdr, 4);
if (unlikely(p == NULL))
goto out_overflow;
if (*p != xdr_zero)
return decode_fattr3(xdr, fattr);
return 0;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
}
/*
* wcc_attr
* struct wcc_attr {
* size3 size;
* nfstime3 mtime;
* nfstime3 ctime;
* };
*/
static int decode_wcc_attr(struct xdr_stream *xdr, struct nfs_fattr *fattr)
{
__be32 *p;
p = xdr_inline_decode(xdr, NFS3_wcc_attr_sz << 2);
if (unlikely(p == NULL))
goto out_overflow;
fattr->valid |= NFS_ATTR_FATTR_PRESIZE
| NFS_ATTR_FATTR_PREMTIME
| NFS_ATTR_FATTR_PRECTIME;
p = xdr_decode_size3(p, &fattr->pre_size);
p = xdr_decode_nfstime3(p, &fattr->pre_mtime);
xdr_decode_nfstime3(p, &fattr->pre_ctime);
return 0;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
}
/*
* pre_op_attr
* union pre_op_attr switch (bool attributes_follow) {
* case TRUE:
* wcc_attr attributes;
* case FALSE:
* void;
* };
*
* wcc_data
*
* struct wcc_data {
* pre_op_attr before;
* post_op_attr after;
* };
*/
static int decode_pre_op_attr(struct xdr_stream *xdr, struct nfs_fattr *fattr)
{
__be32 *p;
p = xdr_inline_decode(xdr, 4);
if (unlikely(p == NULL))
goto out_overflow;
if (*p != xdr_zero)
return decode_wcc_attr(xdr, fattr);
return 0;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
}
static int decode_wcc_data(struct xdr_stream *xdr, struct nfs_fattr *fattr)
{
int error;
error = decode_pre_op_attr(xdr, fattr);
if (unlikely(error))
goto out;
error = decode_post_op_attr(xdr, fattr);
out:
return error;
}
/*
* post_op_fh3
*
* union post_op_fh3 switch (bool handle_follows) {
* case TRUE:
* nfs_fh3 handle;
* case FALSE:
* void;
* };
*/
static int decode_post_op_fh3(struct xdr_stream *xdr, struct nfs_fh *fh)
{
__be32 *p = xdr_inline_decode(xdr, 4);
if (unlikely(p == NULL))
goto out_overflow;
if (*p != xdr_zero)
return decode_nfs_fh3(xdr, fh);
zero_nfs_fh3(fh);
return 0;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
}
/*
* diropargs3
*
* struct diropargs3 {
* nfs_fh3 dir;
* filename3 name;
* };
*/
static void encode_diropargs3(struct xdr_stream *xdr, const struct nfs_fh *fh,
const char *name, u32 length)
{
encode_nfs_fh3(xdr, fh);
encode_filename3(xdr, name, length);
}
/*
* NFSv3 XDR encode functions
*
* NFSv3 argument types are defined in section 3.3 of RFC 1813:
* "NFS Version 3 Protocol Specification".
*/
/*
* 3.3.1 GETATTR3args
*
* struct GETATTR3args {
* nfs_fh3 object;
* };
*/
static void nfs3_xdr_enc_getattr3args(struct rpc_rqst *req,
struct xdr_stream *xdr,
const struct nfs_fh *fh)
{
encode_nfs_fh3(xdr, fh);
}
/*
* 3.3.2 SETATTR3args
*
* union sattrguard3 switch (bool check) {
* case TRUE:
* nfstime3 obj_ctime;
* case FALSE:
* void;
* };
*
* struct SETATTR3args {
* nfs_fh3 object;
* sattr3 new_attributes;
* sattrguard3 guard;
* };
*/
static void encode_sattrguard3(struct xdr_stream *xdr,
const struct nfs3_sattrargs *args)
{
__be32 *p;
if (args->guard) {
p = xdr_reserve_space(xdr, 4 + 8);
*p++ = xdr_one;
xdr_encode_nfstime3(p, &args->guardtime);
} else {
p = xdr_reserve_space(xdr, 4);
*p = xdr_zero;
}
}
static void nfs3_xdr_enc_setattr3args(struct rpc_rqst *req,
struct xdr_stream *xdr,
const struct nfs3_sattrargs *args)
{
encode_nfs_fh3(xdr, args->fh);
encode_sattr3(xdr, args->sattr);
encode_sattrguard3(xdr, args);
}
/*
* 3.3.3 LOOKUP3args
*
* struct LOOKUP3args {
* diropargs3 what;
* };
*/
static void nfs3_xdr_enc_lookup3args(struct rpc_rqst *req,
struct xdr_stream *xdr,
const struct nfs3_diropargs *args)
{
encode_diropargs3(xdr, args->fh, args->name, args->len);
}
/*
* 3.3.4 ACCESS3args
*
* struct ACCESS3args {
* nfs_fh3 object;
* uint32 access;
* };
*/
static void encode_access3args(struct xdr_stream *xdr,
const struct nfs3_accessargs *args)
{
encode_nfs_fh3(xdr, args->fh);
encode_uint32(xdr, args->access);
}
static void nfs3_xdr_enc_access3args(struct rpc_rqst *req,
struct xdr_stream *xdr,
const struct nfs3_accessargs *args)
{
encode_access3args(xdr, args);
}
/*
* 3.3.5 READLINK3args
*
* struct READLINK3args {
* nfs_fh3 symlink;
* };
*/
static void nfs3_xdr_enc_readlink3args(struct rpc_rqst *req,
struct xdr_stream *xdr,
const struct nfs3_readlinkargs *args)
{
encode_nfs_fh3(xdr, args->fh);
prepare_reply_buffer(req, args->pages, args->pgbase,
args->pglen, NFS3_readlinkres_sz);
}
/*
* 3.3.6 READ3args
*
* struct READ3args {
* nfs_fh3 file;
* offset3 offset;
* count3 count;
* };
*/
static void encode_read3args(struct xdr_stream *xdr,
const struct nfs_readargs *args)
{
__be32 *p;
encode_nfs_fh3(xdr, args->fh);
p = xdr_reserve_space(xdr, 8 + 4);
p = xdr_encode_hyper(p, args->offset);
*p = cpu_to_be32(args->count);
}
static void nfs3_xdr_enc_read3args(struct rpc_rqst *req,
struct xdr_stream *xdr,
const struct nfs_readargs *args)
{
encode_read3args(xdr, args);
prepare_reply_buffer(req, args->pages, args->pgbase,
args->count, NFS3_readres_sz);
req->rq_rcv_buf.flags |= XDRBUF_READ;
}
/*
* 3.3.7 WRITE3args
*
* enum stable_how {
* UNSTABLE = 0,
* DATA_SYNC = 1,
* FILE_SYNC = 2
* };
*
* struct WRITE3args {
* nfs_fh3 file;
* offset3 offset;
* count3 count;
* stable_how stable;
* opaque data<>;
* };
*/
static void encode_write3args(struct xdr_stream *xdr,
const struct nfs_writeargs *args)
{
__be32 *p;
encode_nfs_fh3(xdr, args->fh);
p = xdr_reserve_space(xdr, 8 + 4 + 4 + 4);
p = xdr_encode_hyper(p, args->offset);
*p++ = cpu_to_be32(args->count);
*p++ = cpu_to_be32(args->stable);
*p = cpu_to_be32(args->count);
xdr_write_pages(xdr, args->pages, args->pgbase, args->count);
}
static void nfs3_xdr_enc_write3args(struct rpc_rqst *req,
struct xdr_stream *xdr,
const struct nfs_writeargs *args)
{
encode_write3args(xdr, args);
xdr->buf->flags |= XDRBUF_WRITE;
}
/*
* 3.3.8 CREATE3args
*
* enum createmode3 {
* UNCHECKED = 0,
* GUARDED = 1,
* EXCLUSIVE = 2
* };
*
* union createhow3 switch (createmode3 mode) {
* case UNCHECKED:
* case GUARDED:
* sattr3 obj_attributes;
* case EXCLUSIVE:
* createverf3 verf;
* };
*
* struct CREATE3args {
* diropargs3 where;
* createhow3 how;
* };
*/
static void encode_createhow3(struct xdr_stream *xdr,
const struct nfs3_createargs *args)
{
encode_uint32(xdr, args->createmode);
switch (args->createmode) {
case NFS3_CREATE_UNCHECKED:
case NFS3_CREATE_GUARDED:
encode_sattr3(xdr, args->sattr);
break;
case NFS3_CREATE_EXCLUSIVE:
encode_createverf3(xdr, args->verifier);
break;
default:
BUG();
}
}
static void nfs3_xdr_enc_create3args(struct rpc_rqst *req,
struct xdr_stream *xdr,
const struct nfs3_createargs *args)
{
encode_diropargs3(xdr, args->fh, args->name, args->len);
encode_createhow3(xdr, args);
}
/*
* 3.3.9 MKDIR3args
*
* struct MKDIR3args {
* diropargs3 where;
* sattr3 attributes;
* };
*/
static void nfs3_xdr_enc_mkdir3args(struct rpc_rqst *req,
struct xdr_stream *xdr,
const struct nfs3_mkdirargs *args)
{
encode_diropargs3(xdr, args->fh, args->name, args->len);
encode_sattr3(xdr, args->sattr);
}
/*
* 3.3.10 SYMLINK3args
*
* struct symlinkdata3 {
* sattr3 symlink_attributes;
* nfspath3 symlink_data;
* };
*
* struct SYMLINK3args {
* diropargs3 where;
* symlinkdata3 symlink;
* };
*/
static void encode_symlinkdata3(struct xdr_stream *xdr,
const struct nfs3_symlinkargs *args)
{
encode_sattr3(xdr, args->sattr);
encode_nfspath3(xdr, args->pages, args->pathlen);
}
static void nfs3_xdr_enc_symlink3args(struct rpc_rqst *req,
struct xdr_stream *xdr,
const struct nfs3_symlinkargs *args)
{
encode_diropargs3(xdr, args->fromfh, args->fromname, args->fromlen);
encode_symlinkdata3(xdr, args);
}
/*
* 3.3.11 MKNOD3args
*
* struct devicedata3 {
* sattr3 dev_attributes;
* specdata3 spec;
* };
*
* union mknoddata3 switch (ftype3 type) {
* case NF3CHR:
* case NF3BLK:
* devicedata3 device;
* case NF3SOCK:
* case NF3FIFO:
* sattr3 pipe_attributes;
* default:
* void;
* };
*
* struct MKNOD3args {
* diropargs3 where;
* mknoddata3 what;
* };
*/
static void encode_devicedata3(struct xdr_stream *xdr,
const struct nfs3_mknodargs *args)
{
encode_sattr3(xdr, args->sattr);
encode_specdata3(xdr, args->rdev);
}
static void encode_mknoddata3(struct xdr_stream *xdr,
const struct nfs3_mknodargs *args)
{
encode_ftype3(xdr, args->type);
switch (args->type) {
case NF3CHR:
case NF3BLK:
encode_devicedata3(xdr, args);
break;
case NF3SOCK:
case NF3FIFO:
encode_sattr3(xdr, args->sattr);
break;
case NF3REG:
case NF3DIR:
break;
default:
BUG();
}
}
static void nfs3_xdr_enc_mknod3args(struct rpc_rqst *req,
struct xdr_stream *xdr,
const struct nfs3_mknodargs *args)
{
encode_diropargs3(xdr, args->fh, args->name, args->len);
encode_mknoddata3(xdr, args);
}
/*
* 3.3.12 REMOVE3args
*
* struct REMOVE3args {
* diropargs3 object;
* };
*/
static void nfs3_xdr_enc_remove3args(struct rpc_rqst *req,
struct xdr_stream *xdr,
const struct nfs_removeargs *args)
{
encode_diropargs3(xdr, args->fh, args->name.name, args->name.len);
}
/*
* 3.3.14 RENAME3args
*
* struct RENAME3args {
* diropargs3 from;
* diropargs3 to;
* };
*/
static void nfs3_xdr_enc_rename3args(struct rpc_rqst *req,
struct xdr_stream *xdr,
const struct nfs_renameargs *args)
{
const struct qstr *old = args->old_name;
const struct qstr *new = args->new_name;
encode_diropargs3(xdr, args->old_dir, old->name, old->len);
encode_diropargs3(xdr, args->new_dir, new->name, new->len);
}
/*
* 3.3.15 LINK3args
*
* struct LINK3args {
* nfs_fh3 file;
* diropargs3 link;
* };
*/
static void nfs3_xdr_enc_link3args(struct rpc_rqst *req,
struct xdr_stream *xdr,
const struct nfs3_linkargs *args)
{
encode_nfs_fh3(xdr, args->fromfh);
encode_diropargs3(xdr, args->tofh, args->toname, args->tolen);
}
/*
* 3.3.16 READDIR3args
*
* struct READDIR3args {
* nfs_fh3 dir;
* cookie3 cookie;
* cookieverf3 cookieverf;
* count3 count;
* };
*/
static void encode_readdir3args(struct xdr_stream *xdr,
const struct nfs3_readdirargs *args)
{
__be32 *p;
encode_nfs_fh3(xdr, args->fh);
p = xdr_reserve_space(xdr, 8 + NFS3_COOKIEVERFSIZE + 4);
p = xdr_encode_cookie3(p, args->cookie);
p = xdr_encode_cookieverf3(p, args->verf);
*p = cpu_to_be32(args->count);
}
static void nfs3_xdr_enc_readdir3args(struct rpc_rqst *req,
struct xdr_stream *xdr,
const struct nfs3_readdirargs *args)
{
encode_readdir3args(xdr, args);
prepare_reply_buffer(req, args->pages, 0,
args->count, NFS3_readdirres_sz);
}
/*
* 3.3.17 READDIRPLUS3args
*
* struct READDIRPLUS3args {
* nfs_fh3 dir;
* cookie3 cookie;
* cookieverf3 cookieverf;
* count3 dircount;
* count3 maxcount;
* };
*/
static void encode_readdirplus3args(struct xdr_stream *xdr,
const struct nfs3_readdirargs *args)
{
__be32 *p;
encode_nfs_fh3(xdr, args->fh);
p = xdr_reserve_space(xdr, 8 + NFS3_COOKIEVERFSIZE + 4 + 4);
p = xdr_encode_cookie3(p, args->cookie);
p = xdr_encode_cookieverf3(p, args->verf);
/*
* readdirplus: need dircount + buffer size.
* We just make sure we make dircount big enough
*/
*p++ = cpu_to_be32(args->count >> 3);
*p = cpu_to_be32(args->count);
}
static void nfs3_xdr_enc_readdirplus3args(struct rpc_rqst *req,
struct xdr_stream *xdr,
const struct nfs3_readdirargs *args)
{
encode_readdirplus3args(xdr, args);
prepare_reply_buffer(req, args->pages, 0,
args->count, NFS3_readdirres_sz);
}
/*
* 3.3.21 COMMIT3args
*
* struct COMMIT3args {
* nfs_fh3 file;
* offset3 offset;
* count3 count;
* };
*/
static void encode_commit3args(struct xdr_stream *xdr,
const struct nfs_writeargs *args)
{
__be32 *p;
encode_nfs_fh3(xdr, args->fh);
p = xdr_reserve_space(xdr, 8 + 4);
p = xdr_encode_hyper(p, args->offset);
*p = cpu_to_be32(args->count);
}
static void nfs3_xdr_enc_commit3args(struct rpc_rqst *req,
struct xdr_stream *xdr,
const struct nfs_writeargs *args)
{
encode_commit3args(xdr, args);
}
#ifdef CONFIG_NFS_V3_ACL
static void nfs3_xdr_enc_getacl3args(struct rpc_rqst *req,
struct xdr_stream *xdr,
const struct nfs3_getaclargs *args)
{
encode_nfs_fh3(xdr, args->fh);
encode_uint32(xdr, args->mask);
if (args->mask & (NFS_ACL | NFS_DFACL))
prepare_reply_buffer(req, args->pages, 0,
NFSACL_MAXPAGES << PAGE_SHIFT,
ACL3_getaclres_sz);
}
static void nfs3_xdr_enc_setacl3args(struct rpc_rqst *req,
struct xdr_stream *xdr,
const struct nfs3_setaclargs *args)
{
unsigned int base;
int error;
encode_nfs_fh3(xdr, NFS_FH(args->inode));
encode_uint32(xdr, args->mask);
base = req->rq_slen;
if (args->npages != 0)
xdr_write_pages(xdr, args->pages, 0, args->len);
else
xdr_reserve_space(xdr, NFS_ACL_INLINE_BUFSIZE);
error = nfsacl_encode(xdr->buf, base, args->inode,
(args->mask & NFS_ACL) ?
args->acl_access : NULL, 1, 0);
BUG_ON(error < 0);
error = nfsacl_encode(xdr->buf, base + error, args->inode,
(args->mask & NFS_DFACL) ?
args->acl_default : NULL, 1,
NFS_ACL_DEFAULT);
BUG_ON(error < 0);
}
#endif /* CONFIG_NFS_V3_ACL */
/*
* NFSv3 XDR decode functions
*
* NFSv3 result types are defined in section 3.3 of RFC 1813:
* "NFS Version 3 Protocol Specification".
*/
/*
* 3.3.1 GETATTR3res
*
* struct GETATTR3resok {
* fattr3 obj_attributes;
* };
*
* union GETATTR3res switch (nfsstat3 status) {
* case NFS3_OK:
* GETATTR3resok resok;
* default:
* void;
* };
*/
static int nfs3_xdr_dec_getattr3res(struct rpc_rqst *req,
struct xdr_stream *xdr,
struct nfs_fattr *result)
{
enum nfs_stat status;
int error;
error = decode_nfsstat3(xdr, &status);
if (unlikely(error))
goto out;
if (status != NFS3_OK)
goto out_default;
error = decode_fattr3(xdr, result);
out:
return error;
out_default:
return nfs_stat_to_errno(status);
}
/*
* 3.3.2 SETATTR3res
*
* struct SETATTR3resok {
* wcc_data obj_wcc;
* };
*
* struct SETATTR3resfail {
* wcc_data obj_wcc;
* };
*
* union SETATTR3res switch (nfsstat3 status) {
* case NFS3_OK:
* SETATTR3resok resok;
* default:
* SETATTR3resfail resfail;
* };
*/
static int nfs3_xdr_dec_setattr3res(struct rpc_rqst *req,
struct xdr_stream *xdr,
struct nfs_fattr *result)
{
enum nfs_stat status;
int error;
error = decode_nfsstat3(xdr, &status);
if (unlikely(error))
goto out;
error = decode_wcc_data(xdr, result);
if (unlikely(error))
goto out;
if (status != NFS3_OK)
goto out_status;
out:
return error;
out_status:
return nfs_stat_to_errno(status);
}
/*
* 3.3.3 LOOKUP3res
*
* struct LOOKUP3resok {
* nfs_fh3 object;
* post_op_attr obj_attributes;
* post_op_attr dir_attributes;
* };
*
* struct LOOKUP3resfail {
* post_op_attr dir_attributes;
* };
*
* union LOOKUP3res switch (nfsstat3 status) {
* case NFS3_OK:
* LOOKUP3resok resok;
* default:
* LOOKUP3resfail resfail;
* };
*/
static int nfs3_xdr_dec_lookup3res(struct rpc_rqst *req,
struct xdr_stream *xdr,
struct nfs3_diropres *result)
{
enum nfs_stat status;
int error;
error = decode_nfsstat3(xdr, &status);
if (unlikely(error))
goto out;
if (status != NFS3_OK)
goto out_default;
error = decode_nfs_fh3(xdr, result->fh);
if (unlikely(error))
goto out;
error = decode_post_op_attr(xdr, result->fattr);
if (unlikely(error))
goto out;
error = decode_post_op_attr(xdr, result->dir_attr);
out:
return error;
out_default:
error = decode_post_op_attr(xdr, result->dir_attr);
if (unlikely(error))
goto out;
return nfs_stat_to_errno(status);
}
/*
* 3.3.4 ACCESS3res
*
* struct ACCESS3resok {
* post_op_attr obj_attributes;
* uint32 access;
* };
*
* struct ACCESS3resfail {
* post_op_attr obj_attributes;
* };
*
* union ACCESS3res switch (nfsstat3 status) {
* case NFS3_OK:
* ACCESS3resok resok;
* default:
* ACCESS3resfail resfail;
* };
*/
static int nfs3_xdr_dec_access3res(struct rpc_rqst *req,
struct xdr_stream *xdr,
struct nfs3_accessres *result)
{
enum nfs_stat status;
int error;
error = decode_nfsstat3(xdr, &status);
if (unlikely(error))
goto out;
error = decode_post_op_attr(xdr, result->fattr);
if (unlikely(error))
goto out;
if (status != NFS3_OK)
goto out_default;
error = decode_uint32(xdr, &result->access);
out:
return error;
out_default:
return nfs_stat_to_errno(status);
}
/*
* 3.3.5 READLINK3res
*
* struct READLINK3resok {
* post_op_attr symlink_attributes;
* nfspath3 data;
* };
*
* struct READLINK3resfail {
* post_op_attr symlink_attributes;
* };
*
* union READLINK3res switch (nfsstat3 status) {
* case NFS3_OK:
* READLINK3resok resok;
* default:
* READLINK3resfail resfail;
* };
*/
static int nfs3_xdr_dec_readlink3res(struct rpc_rqst *req,
struct xdr_stream *xdr,
struct nfs_fattr *result)
{
enum nfs_stat status;
int error;
error = decode_nfsstat3(xdr, &status);
if (unlikely(error))
goto out;
error = decode_post_op_attr(xdr, result);
if (unlikely(error))
goto out;
if (status != NFS3_OK)
goto out_default;
error = decode_nfspath3(xdr);
out:
return error;
out_default:
return nfs_stat_to_errno(status);
}
/*
* 3.3.6 READ3res
*
* struct READ3resok {
* post_op_attr file_attributes;
* count3 count;
* bool eof;
* opaque data<>;
* };
*
* struct READ3resfail {
* post_op_attr file_attributes;
* };
*
* union READ3res switch (nfsstat3 status) {
* case NFS3_OK:
* READ3resok resok;
* default:
* READ3resfail resfail;
* };
*/
static int decode_read3resok(struct xdr_stream *xdr,
struct nfs_readres *result)
{
u32 eof, count, ocount, recvd;
size_t hdrlen;
__be32 *p;
p = xdr_inline_decode(xdr, 4 + 4 + 4);
if (unlikely(p == NULL))
goto out_overflow;
count = be32_to_cpup(p++);
eof = be32_to_cpup(p++);
ocount = be32_to_cpup(p++);
if (unlikely(ocount != count))
goto out_mismatch;
hdrlen = (u8 *)xdr->p - (u8 *)xdr->iov->iov_base;
recvd = xdr->buf->len - hdrlen;
if (unlikely(count > recvd))
goto out_cheating;
out:
xdr_read_pages(xdr, count);
result->eof = eof;
result->count = count;
return count;
out_mismatch:
dprintk("NFS: READ count doesn't match length of opaque: "
"count %u != ocount %u\n", count, ocount);
return -EIO;
out_cheating:
dprintk("NFS: server cheating in read result: "
"count %u > recvd %u\n", count, recvd);
count = recvd;
eof = 0;
goto out;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
}
static int nfs3_xdr_dec_read3res(struct rpc_rqst *req, struct xdr_stream *xdr,
struct nfs_readres *result)
{
enum nfs_stat status;
int error;
error = decode_nfsstat3(xdr, &status);
if (unlikely(error))
goto out;
error = decode_post_op_attr(xdr, result->fattr);
if (unlikely(error))
goto out;
if (status != NFS3_OK)
goto out_status;
error = decode_read3resok(xdr, result);
out:
return error;
out_status:
return nfs_stat_to_errno(status);
}
/*
* 3.3.7 WRITE3res
*
* enum stable_how {
* UNSTABLE = 0,
* DATA_SYNC = 1,
* FILE_SYNC = 2
* };
*
* struct WRITE3resok {
* wcc_data file_wcc;
* count3 count;
* stable_how committed;
* writeverf3 verf;
* };
*
* struct WRITE3resfail {
* wcc_data file_wcc;
* };
*
* union WRITE3res switch (nfsstat3 status) {
* case NFS3_OK:
* WRITE3resok resok;
* default:
* WRITE3resfail resfail;
* };
*/
static int decode_write3resok(struct xdr_stream *xdr,
struct nfs_writeres *result)
{
__be32 *p;
p = xdr_inline_decode(xdr, 4 + 4 + NFS3_WRITEVERFSIZE);
if (unlikely(p == NULL))
goto out_overflow;
result->count = be32_to_cpup(p++);
result->verf->committed = be32_to_cpup(p++);
if (unlikely(result->verf->committed > NFS_FILE_SYNC))
goto out_badvalue;
memcpy(result->verf->verifier, p, NFS3_WRITEVERFSIZE);
return result->count;
out_badvalue:
dprintk("NFS: bad stable_how value: %u\n", result->verf->committed);
return -EIO;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
}
static int nfs3_xdr_dec_write3res(struct rpc_rqst *req, struct xdr_stream *xdr,
struct nfs_writeres *result)
{
enum nfs_stat status;
int error;
error = decode_nfsstat3(xdr, &status);
if (unlikely(error))
goto out;
error = decode_wcc_data(xdr, result->fattr);
if (unlikely(error))
goto out;
if (status != NFS3_OK)
goto out_status;
error = decode_write3resok(xdr, result);
out:
return error;
out_status:
return nfs_stat_to_errno(status);
}
/*
* 3.3.8 CREATE3res
*
* struct CREATE3resok {
* post_op_fh3 obj;
* post_op_attr obj_attributes;
* wcc_data dir_wcc;
* };
*
* struct CREATE3resfail {
* wcc_data dir_wcc;
* };
*
* union CREATE3res switch (nfsstat3 status) {
* case NFS3_OK:
* CREATE3resok resok;
* default:
* CREATE3resfail resfail;
* };
*/
static int decode_create3resok(struct xdr_stream *xdr,
struct nfs3_diropres *result)
{
int error;
error = decode_post_op_fh3(xdr, result->fh);
if (unlikely(error))
goto out;
error = decode_post_op_attr(xdr, result->fattr);
if (unlikely(error))
goto out;
/* The server isn't required to return a file handle.
* If it didn't, force the client to perform a LOOKUP
* to determine the correct file handle and attribute
* values for the new object. */
if (result->fh->size == 0)
result->fattr->valid = 0;
error = decode_wcc_data(xdr, result->dir_attr);
out:
return error;
}
static int nfs3_xdr_dec_create3res(struct rpc_rqst *req,
struct xdr_stream *xdr,
struct nfs3_diropres *result)
{
enum nfs_stat status;
int error;
error = decode_nfsstat3(xdr, &status);
if (unlikely(error))
goto out;
if (status != NFS3_OK)
goto out_default;
error = decode_create3resok(xdr, result);
out:
return error;
out_default:
error = decode_wcc_data(xdr, result->dir_attr);
if (unlikely(error))
goto out;
return nfs_stat_to_errno(status);
}
/*
* 3.3.12 REMOVE3res
*
* struct REMOVE3resok {
* wcc_data dir_wcc;
* };
*
* struct REMOVE3resfail {
* wcc_data dir_wcc;
* };
*
* union REMOVE3res switch (nfsstat3 status) {
* case NFS3_OK:
* REMOVE3resok resok;
* default:
* REMOVE3resfail resfail;
* };
*/
static int nfs3_xdr_dec_remove3res(struct rpc_rqst *req,
struct xdr_stream *xdr,
struct nfs_removeres *result)
{
enum nfs_stat status;
int error;
error = decode_nfsstat3(xdr, &status);
if (unlikely(error))
goto out;
error = decode_wcc_data(xdr, result->dir_attr);
if (unlikely(error))
goto out;
if (status != NFS3_OK)
goto out_status;
out:
return error;
out_status:
return nfs_stat_to_errno(status);
}
/*
* 3.3.14 RENAME3res
*
* struct RENAME3resok {
* wcc_data fromdir_wcc;
* wcc_data todir_wcc;
* };
*
* struct RENAME3resfail {
* wcc_data fromdir_wcc;
* wcc_data todir_wcc;
* };
*
* union RENAME3res switch (nfsstat3 status) {
* case NFS3_OK:
* RENAME3resok resok;
* default:
* RENAME3resfail resfail;
* };
*/
static int nfs3_xdr_dec_rename3res(struct rpc_rqst *req,
struct xdr_stream *xdr,
struct nfs_renameres *result)
{
enum nfs_stat status;
int error;
error = decode_nfsstat3(xdr, &status);
if (unlikely(error))
goto out;
error = decode_wcc_data(xdr, result->old_fattr);
if (unlikely(error))
goto out;
error = decode_wcc_data(xdr, result->new_fattr);
if (unlikely(error))
goto out;
if (status != NFS3_OK)
goto out_status;
out:
return error;
out_status:
return nfs_stat_to_errno(status);
}
/*
* 3.3.15 LINK3res
*
* struct LINK3resok {
* post_op_attr file_attributes;
* wcc_data linkdir_wcc;
* };
*
* struct LINK3resfail {
* post_op_attr file_attributes;
* wcc_data linkdir_wcc;
* };
*
* union LINK3res switch (nfsstat3 status) {
* case NFS3_OK:
* LINK3resok resok;
* default:
* LINK3resfail resfail;
* };
*/
static int nfs3_xdr_dec_link3res(struct rpc_rqst *req, struct xdr_stream *xdr,
struct nfs3_linkres *result)
{
enum nfs_stat status;
int error;
error = decode_nfsstat3(xdr, &status);
if (unlikely(error))
goto out;
error = decode_post_op_attr(xdr, result->fattr);
if (unlikely(error))
goto out;
error = decode_wcc_data(xdr, result->dir_attr);
if (unlikely(error))
goto out;
if (status != NFS3_OK)
goto out_status;
out:
return error;
out_status:
return nfs_stat_to_errno(status);
}
/**
* nfs3_decode_dirent - Decode a single NFSv3 directory entry stored in
* the local page cache
* @xdr: XDR stream where entry resides
* @entry: buffer to fill in with entry data
* @plus: boolean indicating whether this should be a readdirplus entry
*
* Returns zero if successful, otherwise a negative errno value is
* returned.
*
* This function is not invoked during READDIR reply decoding, but
* rather whenever an application invokes the getdents(2) system call
* on a directory already in our cache.
*
* 3.3.16 entry3
*
* struct entry3 {
* fileid3 fileid;
* filename3 name;
* cookie3 cookie;
* fhandle3 filehandle;
* post_op_attr3 attributes;
* entry3 *nextentry;
* };
*
* 3.3.17 entryplus3
* struct entryplus3 {
* fileid3 fileid;
* filename3 name;
* cookie3 cookie;
* post_op_attr name_attributes;
* post_op_fh3 name_handle;
* entryplus3 *nextentry;
* };
*/
int nfs3_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry,
int plus)
{
struct nfs_entry old = *entry;
__be32 *p;
int error;
p = xdr_inline_decode(xdr, 4);
if (unlikely(p == NULL))
goto out_overflow;
if (*p == xdr_zero) {
p = xdr_inline_decode(xdr, 4);
if (unlikely(p == NULL))
goto out_overflow;
if (*p == xdr_zero)
return -EAGAIN;
entry->eof = 1;
return -EBADCOOKIE;
}
error = decode_fileid3(xdr, &entry->ino);
if (unlikely(error))
return error;
error = decode_inline_filename3(xdr, &entry->name, &entry->len);
if (unlikely(error))
return error;
entry->prev_cookie = entry->cookie;
error = decode_cookie3(xdr, &entry->cookie);
if (unlikely(error))
return error;
entry->d_type = DT_UNKNOWN;
if (plus) {
entry->fattr->valid = 0;
error = decode_post_op_attr(xdr, entry->fattr);
if (unlikely(error))
return error;
if (entry->fattr->valid & NFS_ATTR_FATTR_V3)
entry->d_type = nfs_umode_to_dtype(entry->fattr->mode);
/* In fact, a post_op_fh3: */
p = xdr_inline_decode(xdr, 4);
if (unlikely(p == NULL))
goto out_overflow;
if (*p != xdr_zero) {
error = decode_nfs_fh3(xdr, entry->fh);
if (unlikely(error)) {
if (error == -E2BIG)
goto out_truncated;
return error;
}
} else
zero_nfs_fh3(entry->fh);
}
return 0;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EAGAIN;
out_truncated:
dprintk("NFS: directory entry contains invalid file handle\n");
*entry = old;
return -EAGAIN;
}
/*
* 3.3.16 READDIR3res
*
* struct dirlist3 {
* entry3 *entries;
* bool eof;
* };
*
* struct READDIR3resok {
* post_op_attr dir_attributes;
* cookieverf3 cookieverf;
* dirlist3 reply;
* };
*
* struct READDIR3resfail {
* post_op_attr dir_attributes;
* };
*
* union READDIR3res switch (nfsstat3 status) {
* case NFS3_OK:
* READDIR3resok resok;
* default:
* READDIR3resfail resfail;
* };
*
* Read the directory contents into the page cache, but otherwise
* don't touch them. The actual decoding is done by nfs3_decode_entry()
* during subsequent nfs_readdir() calls.
*/
static int decode_dirlist3(struct xdr_stream *xdr)
{
u32 recvd, pglen;
size_t hdrlen;
pglen = xdr->buf->page_len;
hdrlen = (u8 *)xdr->p - (u8 *)xdr->iov->iov_base;
recvd = xdr->buf->len - hdrlen;
if (unlikely(pglen > recvd))
goto out_cheating;
out:
xdr_read_pages(xdr, pglen);
return pglen;
out_cheating:
dprintk("NFS: server cheating in readdir result: "
"pglen %u > recvd %u\n", pglen, recvd);
pglen = recvd;
goto out;
}
static int decode_readdir3resok(struct xdr_stream *xdr,
struct nfs3_readdirres *result)
{
int error;
error = decode_post_op_attr(xdr, result->dir_attr);
if (unlikely(error))
goto out;
/* XXX: do we need to check if result->verf != NULL ? */
error = decode_cookieverf3(xdr, result->verf);
if (unlikely(error))
goto out;
error = decode_dirlist3(xdr);
out:
return error;
}
static int nfs3_xdr_dec_readdir3res(struct rpc_rqst *req,
struct xdr_stream *xdr,
struct nfs3_readdirres *result)
{
enum nfs_stat status;
int error;
error = decode_nfsstat3(xdr, &status);
if (unlikely(error))
goto out;
if (status != NFS3_OK)
goto out_default;
error = decode_readdir3resok(xdr, result);
out:
return error;
out_default:
error = decode_post_op_attr(xdr, result->dir_attr);
if (unlikely(error))
goto out;
return nfs_stat_to_errno(status);
}
/*
* 3.3.18 FSSTAT3res
*
* struct FSSTAT3resok {
* post_op_attr obj_attributes;
* size3 tbytes;
* size3 fbytes;
* size3 abytes;
* size3 tfiles;
* size3 ffiles;
* size3 afiles;
* uint32 invarsec;
* };
*
* struct FSSTAT3resfail {
* post_op_attr obj_attributes;
* };
*
* union FSSTAT3res switch (nfsstat3 status) {
* case NFS3_OK:
* FSSTAT3resok resok;
* default:
* FSSTAT3resfail resfail;
* };
*/
static int decode_fsstat3resok(struct xdr_stream *xdr,
struct nfs_fsstat *result)
{
__be32 *p;
p = xdr_inline_decode(xdr, 8 * 6 + 4);
if (unlikely(p == NULL))
goto out_overflow;
p = xdr_decode_size3(p, &result->tbytes);
p = xdr_decode_size3(p, &result->fbytes);
p = xdr_decode_size3(p, &result->abytes);
p = xdr_decode_size3(p, &result->tfiles);
p = xdr_decode_size3(p, &result->ffiles);
xdr_decode_size3(p, &result->afiles);
/* ignore invarsec */
return 0;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
}
static int nfs3_xdr_dec_fsstat3res(struct rpc_rqst *req,
struct xdr_stream *xdr,
struct nfs_fsstat *result)
{
enum nfs_stat status;
int error;
error = decode_nfsstat3(xdr, &status);
if (unlikely(error))
goto out;
error = decode_post_op_attr(xdr, result->fattr);
if (unlikely(error))
goto out;
if (status != NFS3_OK)
goto out_status;
error = decode_fsstat3resok(xdr, result);
out:
return error;
out_status:
return nfs_stat_to_errno(status);
}
/*
* 3.3.19 FSINFO3res
*
* struct FSINFO3resok {
* post_op_attr obj_attributes;
* uint32 rtmax;
* uint32 rtpref;
* uint32 rtmult;
* uint32 wtmax;
* uint32 wtpref;
* uint32 wtmult;
* uint32 dtpref;
* size3 maxfilesize;
* nfstime3 time_delta;
* uint32 properties;
* };
*
* struct FSINFO3resfail {
* post_op_attr obj_attributes;
* };
*
* union FSINFO3res switch (nfsstat3 status) {
* case NFS3_OK:
* FSINFO3resok resok;
* default:
* FSINFO3resfail resfail;
* };
*/
static int decode_fsinfo3resok(struct xdr_stream *xdr,
struct nfs_fsinfo *result)
{
__be32 *p;
p = xdr_inline_decode(xdr, 4 * 7 + 8 + 8 + 4);
if (unlikely(p == NULL))
goto out_overflow;
result->rtmax = be32_to_cpup(p++);
result->rtpref = be32_to_cpup(p++);
result->rtmult = be32_to_cpup(p++);
result->wtmax = be32_to_cpup(p++);
result->wtpref = be32_to_cpup(p++);
result->wtmult = be32_to_cpup(p++);
result->dtpref = be32_to_cpup(p++);
p = xdr_decode_size3(p, &result->maxfilesize);
xdr_decode_nfstime3(p, &result->time_delta);
/* ignore properties */
result->lease_time = 0;
return 0;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
}
static int nfs3_xdr_dec_fsinfo3res(struct rpc_rqst *req,
struct xdr_stream *xdr,
struct nfs_fsinfo *result)
{
enum nfs_stat status;
int error;
error = decode_nfsstat3(xdr, &status);
if (unlikely(error))
goto out;
error = decode_post_op_attr(xdr, result->fattr);
if (unlikely(error))
goto out;
if (status != NFS3_OK)
goto out_status;
error = decode_fsinfo3resok(xdr, result);
out:
return error;
out_status:
return nfs_stat_to_errno(status);
}
/*
* 3.3.20 PATHCONF3res
*
* struct PATHCONF3resok {
* post_op_attr obj_attributes;
* uint32 linkmax;
* uint32 name_max;
* bool no_trunc;
* bool chown_restricted;
* bool case_insensitive;
* bool case_preserving;
* };
*
* struct PATHCONF3resfail {
* post_op_attr obj_attributes;
* };
*
* union PATHCONF3res switch (nfsstat3 status) {
* case NFS3_OK:
* PATHCONF3resok resok;
* default:
* PATHCONF3resfail resfail;
* };
*/
static int decode_pathconf3resok(struct xdr_stream *xdr,
struct nfs_pathconf *result)
{
__be32 *p;
p = xdr_inline_decode(xdr, 4 * 6);
if (unlikely(p == NULL))
goto out_overflow;
result->max_link = be32_to_cpup(p++);
result->max_namelen = be32_to_cpup(p);
/* ignore remaining fields */
return 0;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
}
static int nfs3_xdr_dec_pathconf3res(struct rpc_rqst *req,
struct xdr_stream *xdr,
struct nfs_pathconf *result)
{
enum nfs_stat status;
int error;
error = decode_nfsstat3(xdr, &status);
if (unlikely(error))
goto out;
error = decode_post_op_attr(xdr, result->fattr);
if (unlikely(error))
goto out;
if (status != NFS3_OK)
goto out_status;
error = decode_pathconf3resok(xdr, result);
out:
return error;
out_status:
return nfs_stat_to_errno(status);
}
/*
* 3.3.21 COMMIT3res
*
* struct COMMIT3resok {
* wcc_data file_wcc;
* writeverf3 verf;
* };
*
* struct COMMIT3resfail {
* wcc_data file_wcc;
* };
*
* union COMMIT3res switch (nfsstat3 status) {
* case NFS3_OK:
* COMMIT3resok resok;
* default:
* COMMIT3resfail resfail;
* };
*/
static int nfs3_xdr_dec_commit3res(struct rpc_rqst *req,
struct xdr_stream *xdr,
struct nfs_writeres *result)
{
enum nfs_stat status;
int error;
error = decode_nfsstat3(xdr, &status);
if (unlikely(error))
goto out;
error = decode_wcc_data(xdr, result->fattr);
if (unlikely(error))
goto out;
if (status != NFS3_OK)
goto out_status;
error = decode_writeverf3(xdr, result->verf->verifier);
out:
return error;
out_status:
return nfs_stat_to_errno(status);
}
#ifdef CONFIG_NFS_V3_ACL
static inline int decode_getacl3resok(struct xdr_stream *xdr,
struct nfs3_getaclres *result)
{
struct posix_acl **acl;
unsigned int *aclcnt;
size_t hdrlen;
int error;
error = decode_post_op_attr(xdr, result->fattr);
if (unlikely(error))
goto out;
error = decode_uint32(xdr, &result->mask);
if (unlikely(error))
goto out;
error = -EINVAL;
if (result->mask & ~(NFS_ACL|NFS_ACLCNT|NFS_DFACL|NFS_DFACLCNT))
goto out;
hdrlen = (u8 *)xdr->p - (u8 *)xdr->iov->iov_base;
acl = NULL;
if (result->mask & NFS_ACL)
acl = &result->acl_access;
aclcnt = NULL;
if (result->mask & NFS_ACLCNT)
aclcnt = &result->acl_access_count;
error = nfsacl_decode(xdr->buf, hdrlen, aclcnt, acl);
if (unlikely(error <= 0))
goto out;
acl = NULL;
if (result->mask & NFS_DFACL)
acl = &result->acl_default;
aclcnt = NULL;
if (result->mask & NFS_DFACLCNT)
aclcnt = &result->acl_default_count;
error = nfsacl_decode(xdr->buf, hdrlen + error, aclcnt, acl);
if (unlikely(error <= 0))
return error;
error = 0;
out:
return error;
}
static int nfs3_xdr_dec_getacl3res(struct rpc_rqst *req,
struct xdr_stream *xdr,
struct nfs3_getaclres *result)
{
enum nfs_stat status;
int error;
error = decode_nfsstat3(xdr, &status);
if (unlikely(error))
goto out;
if (status != NFS3_OK)
goto out_default;
error = decode_getacl3resok(xdr, result);
out:
return error;
out_default:
return nfs_stat_to_errno(status);
}
static int nfs3_xdr_dec_setacl3res(struct rpc_rqst *req,
struct xdr_stream *xdr,
struct nfs_fattr *result)
{
enum nfs_stat status;
int error;
error = decode_nfsstat3(xdr, &status);
if (unlikely(error))
goto out;
if (status != NFS3_OK)
goto out_default;
error = decode_post_op_attr(xdr, result);
out:
return error;
out_default:
return nfs_stat_to_errno(status);
}
#endif /* CONFIG_NFS_V3_ACL */
#define PROC(proc, argtype, restype, timer) \
[NFS3PROC_##proc] = { \
.p_proc = NFS3PROC_##proc, \
.p_encode = (kxdreproc_t)nfs3_xdr_enc_##argtype##3args, \
.p_decode = (kxdrdproc_t)nfs3_xdr_dec_##restype##3res, \
.p_arglen = NFS3_##argtype##args_sz, \
.p_replen = NFS3_##restype##res_sz, \
.p_timer = timer, \
.p_statidx = NFS3PROC_##proc, \
.p_name = #proc, \
}
struct rpc_procinfo nfs3_procedures[] = {
PROC(GETATTR, getattr, getattr, 1),
PROC(SETATTR, setattr, setattr, 0),
PROC(LOOKUP, lookup, lookup, 2),
PROC(ACCESS, access, access, 1),
PROC(READLINK, readlink, readlink, 3),
PROC(READ, read, read, 3),
PROC(WRITE, write, write, 4),
PROC(CREATE, create, create, 0),
PROC(MKDIR, mkdir, create, 0),
PROC(SYMLINK, symlink, create, 0),
PROC(MKNOD, mknod, create, 0),
PROC(REMOVE, remove, remove, 0),
PROC(RMDIR, lookup, setattr, 0),
PROC(RENAME, rename, rename, 0),
PROC(LINK, link, link, 0),
PROC(READDIR, readdir, readdir, 3),
PROC(READDIRPLUS, readdirplus, readdir, 3),
PROC(FSSTAT, getattr, fsstat, 0),
PROC(FSINFO, getattr, fsinfo, 0),
PROC(PATHCONF, getattr, pathconf, 0),
PROC(COMMIT, commit, commit, 5),
};
struct rpc_version nfs_version3 = {
.number = 3,
.nrprocs = ARRAY_SIZE(nfs3_procedures),
.procs = nfs3_procedures
};
#ifdef CONFIG_NFS_V3_ACL
static struct rpc_procinfo nfs3_acl_procedures[] = {
[ACLPROC3_GETACL] = {
.p_proc = ACLPROC3_GETACL,
.p_encode = (kxdreproc_t)nfs3_xdr_enc_getacl3args,
.p_decode = (kxdrdproc_t)nfs3_xdr_dec_getacl3res,
.p_arglen = ACL3_getaclargs_sz,
.p_replen = ACL3_getaclres_sz,
.p_timer = 1,
.p_name = "GETACL",
},
[ACLPROC3_SETACL] = {
.p_proc = ACLPROC3_SETACL,
.p_encode = (kxdreproc_t)nfs3_xdr_enc_setacl3args,
.p_decode = (kxdrdproc_t)nfs3_xdr_dec_setacl3res,
.p_arglen = ACL3_setaclargs_sz,
.p_replen = ACL3_setaclres_sz,
.p_timer = 0,
.p_name = "SETACL",
},
};
struct rpc_version nfsacl_version3 = {
.number = 3,
.nrprocs = sizeof(nfs3_acl_procedures)/
sizeof(nfs3_acl_procedures[0]),
.procs = nfs3_acl_procedures,
};
#endif /* CONFIG_NFS_V3_ACL */
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.