repo_name
string
path
string
copies
string
size
string
content
string
license
string
El-Nath/biji-find5-kernel
arch/arm/mach-mv78xx0/db78x00-bp-setup.c
5100
2555
/* * arch/arm/mach-mv78xx0/db78x00-bp-setup.c * * Marvell DB-78x00-BP Development Board Setup * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/ata_platform.h> #include <linux/mv643xx_eth.h> #include <linux/ethtool.h> #include <linux/i2c.h> #include <mach/mv78xx0.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include "common.h" static struct mv643xx_eth_platform_data db78x00_ge00_data = { .phy_addr = MV643XX_ETH_PHY_ADDR(8), }; static struct mv643xx_eth_platform_data db78x00_ge01_data = { .phy_addr = MV643XX_ETH_PHY_ADDR(9), }; static struct mv643xx_eth_platform_data db78x00_ge10_data = { .phy_addr = MV643XX_ETH_PHY_ADDR(10), }; static struct mv643xx_eth_platform_data db78x00_ge11_data = { .phy_addr = MV643XX_ETH_PHY_ADDR(11), }; static struct mv_sata_platform_data db78x00_sata_data = { .n_ports = 2, }; static struct i2c_board_info __initdata db78x00_i2c_rtc = { I2C_BOARD_INFO("ds1338", 0x68), }; static void __init db78x00_init(void) { /* * Basic MV78xx0 setup. Needs to be called early. */ mv78xx0_init(); /* * Partition on-chip peripherals between the two CPU cores. */ if (mv78xx0_core_index() == 0) { mv78xx0_ehci0_init(); mv78xx0_ehci1_init(); mv78xx0_ehci2_init(); mv78xx0_ge00_init(&db78x00_ge00_data); mv78xx0_ge01_init(&db78x00_ge01_data); mv78xx0_ge10_init(&db78x00_ge10_data); mv78xx0_ge11_init(&db78x00_ge11_data); mv78xx0_sata_init(&db78x00_sata_data); mv78xx0_uart0_init(); mv78xx0_uart2_init(); mv78xx0_i2c_init(); i2c_register_board_info(0, &db78x00_i2c_rtc, 1); } else { mv78xx0_uart1_init(); mv78xx0_uart3_init(); } } static int __init db78x00_pci_init(void) { if (machine_is_db78x00_bp()) { /* * Assign the x16 PCIe slot on the board to CPU core * #0, and let CPU core #1 have the four x1 slots. */ if (mv78xx0_core_index() == 0) mv78xx0_pcie_init(0, 1); else mv78xx0_pcie_init(1, 0); } return 0; } subsys_initcall(db78x00_pci_init); MACHINE_START(DB78X00_BP, "Marvell DB-78x00-BP Development Board") /* Maintainer: Lennert Buytenhek <buytenh@marvell.com> */ .atag_offset = 0x100, .init_machine = db78x00_init, .map_io = mv78xx0_map_io, .init_early = mv78xx0_init_early, .init_irq = mv78xx0_init_irq, .timer = &mv78xx0_timer, .restart = mv78xx0_restart, MACHINE_END
gpl-2.0
DC07/spirit_msm8226
drivers/input/serio/libps2.c
7660
8642
/* * PS/2 driver library * * Copyright (c) 1999-2002 Vojtech Pavlik * Copyright (c) 2004 Dmitry Torokhov */ /* * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. */ #include <linux/delay.h> #include <linux/module.h> #include <linux/sched.h> #include <linux/interrupt.h> #include <linux/input.h> #include <linux/serio.h> #include <linux/i8042.h> #include <linux/init.h> #include <linux/libps2.h> #define DRIVER_DESC "PS/2 driver library" MODULE_AUTHOR("Dmitry Torokhov <dtor@mail.ru>"); MODULE_DESCRIPTION("PS/2 driver library"); MODULE_LICENSE("GPL"); /* * ps2_sendbyte() sends a byte to the device and waits for acknowledge. * It doesn't handle retransmission, though it could - because if there * is a need for retransmissions device has to be replaced anyway. * * ps2_sendbyte() can only be called from a process context. */ int ps2_sendbyte(struct ps2dev *ps2dev, unsigned char byte, int timeout) { serio_pause_rx(ps2dev->serio); ps2dev->nak = 1; ps2dev->flags |= PS2_FLAG_ACK; serio_continue_rx(ps2dev->serio); if (serio_write(ps2dev->serio, byte) == 0) wait_event_timeout(ps2dev->wait, !(ps2dev->flags & PS2_FLAG_ACK), msecs_to_jiffies(timeout)); serio_pause_rx(ps2dev->serio); ps2dev->flags &= ~PS2_FLAG_ACK; serio_continue_rx(ps2dev->serio); return -ps2dev->nak; } EXPORT_SYMBOL(ps2_sendbyte); void ps2_begin_command(struct ps2dev *ps2dev) { mutex_lock(&ps2dev->cmd_mutex); if (i8042_check_port_owner(ps2dev->serio)) i8042_lock_chip(); } EXPORT_SYMBOL(ps2_begin_command); void ps2_end_command(struct ps2dev *ps2dev) { if (i8042_check_port_owner(ps2dev->serio)) i8042_unlock_chip(); mutex_unlock(&ps2dev->cmd_mutex); } EXPORT_SYMBOL(ps2_end_command); /* * ps2_drain() waits for device to transmit requested number of bytes * and discards them. */ void ps2_drain(struct ps2dev *ps2dev, int maxbytes, int timeout) { if (maxbytes > sizeof(ps2dev->cmdbuf)) { WARN_ON(1); maxbytes = sizeof(ps2dev->cmdbuf); } ps2_begin_command(ps2dev); serio_pause_rx(ps2dev->serio); ps2dev->flags = PS2_FLAG_CMD; ps2dev->cmdcnt = maxbytes; serio_continue_rx(ps2dev->serio); wait_event_timeout(ps2dev->wait, !(ps2dev->flags & PS2_FLAG_CMD), msecs_to_jiffies(timeout)); ps2_end_command(ps2dev); } EXPORT_SYMBOL(ps2_drain); /* * ps2_is_keyboard_id() checks received ID byte against the list of * known keyboard IDs. */ int ps2_is_keyboard_id(char id_byte) { static const char keyboard_ids[] = { 0xab, /* Regular keyboards */ 0xac, /* NCD Sun keyboard */ 0x2b, /* Trust keyboard, translated */ 0x5d, /* Trust keyboard */ 0x60, /* NMB SGI keyboard, translated */ 0x47, /* NMB SGI keyboard */ }; return memchr(keyboard_ids, id_byte, sizeof(keyboard_ids)) != NULL; } EXPORT_SYMBOL(ps2_is_keyboard_id); /* * ps2_adjust_timeout() is called after receiving 1st byte of command * response and tries to reduce remaining timeout to speed up command * completion. */ static int ps2_adjust_timeout(struct ps2dev *ps2dev, int command, int timeout) { switch (command) { case PS2_CMD_RESET_BAT: /* * Device has sent the first response byte after * reset command, reset is thus done, so we can * shorten the timeout. * The next byte will come soon (keyboard) or not * at all (mouse). */ if (timeout > msecs_to_jiffies(100)) timeout = msecs_to_jiffies(100); break; case PS2_CMD_GETID: /* * Microsoft Natural Elite keyboard responds to * the GET ID command as it were a mouse, with * a single byte. Fail the command so atkbd will * use alternative probe to detect it. */ if (ps2dev->cmdbuf[1] == 0xaa) { serio_pause_rx(ps2dev->serio); ps2dev->flags = 0; serio_continue_rx(ps2dev->serio); timeout = 0; } /* * If device behind the port is not a keyboard there * won't be 2nd byte of ID response. */ if (!ps2_is_keyboard_id(ps2dev->cmdbuf[1])) { serio_pause_rx(ps2dev->serio); ps2dev->flags = ps2dev->cmdcnt = 0; serio_continue_rx(ps2dev->serio); timeout = 0; } break; default: break; } return timeout; } /* * ps2_command() sends a command and its parameters to the mouse, * then waits for the response and puts it in the param array. * * ps2_command() can only be called from a process context */ int __ps2_command(struct ps2dev *ps2dev, unsigned char *param, int command) { int timeout; int send = (command >> 12) & 0xf; int receive = (command >> 8) & 0xf; int rc = -1; int i; if (receive > sizeof(ps2dev->cmdbuf)) { WARN_ON(1); return -1; } if (send && !param) { WARN_ON(1); return -1; } serio_pause_rx(ps2dev->serio); ps2dev->flags = command == PS2_CMD_GETID ? PS2_FLAG_WAITID : 0; ps2dev->cmdcnt = receive; if (receive && param) for (i = 0; i < receive; i++) ps2dev->cmdbuf[(receive - 1) - i] = param[i]; serio_continue_rx(ps2dev->serio); /* * Some devices (Synaptics) peform the reset before * ACKing the reset command, and so it can take a long * time before the ACK arrives. */ if (ps2_sendbyte(ps2dev, command & 0xff, command == PS2_CMD_RESET_BAT ? 1000 : 200)) goto out; for (i = 0; i < send; i++) if (ps2_sendbyte(ps2dev, param[i], 200)) goto out; /* * The reset command takes a long time to execute. */ timeout = msecs_to_jiffies(command == PS2_CMD_RESET_BAT ? 4000 : 500); timeout = wait_event_timeout(ps2dev->wait, !(ps2dev->flags & PS2_FLAG_CMD1), timeout); if (ps2dev->cmdcnt && !(ps2dev->flags & PS2_FLAG_CMD1)) { timeout = ps2_adjust_timeout(ps2dev, command, timeout); wait_event_timeout(ps2dev->wait, !(ps2dev->flags & PS2_FLAG_CMD), timeout); } if (param) for (i = 0; i < receive; i++) param[i] = ps2dev->cmdbuf[(receive - 1) - i]; if (ps2dev->cmdcnt && (command != PS2_CMD_RESET_BAT || ps2dev->cmdcnt != 1)) goto out; rc = 0; out: serio_pause_rx(ps2dev->serio); ps2dev->flags = 0; serio_continue_rx(ps2dev->serio); return rc; } EXPORT_SYMBOL(__ps2_command); int ps2_command(struct ps2dev *ps2dev, unsigned char *param, int command) { int rc; ps2_begin_command(ps2dev); rc = __ps2_command(ps2dev, param, command); ps2_end_command(ps2dev); return rc; } EXPORT_SYMBOL(ps2_command); /* * ps2_init() initializes ps2dev structure */ void ps2_init(struct ps2dev *ps2dev, struct serio *serio) { mutex_init(&ps2dev->cmd_mutex); lockdep_set_subclass(&ps2dev->cmd_mutex, serio->depth); init_waitqueue_head(&ps2dev->wait); ps2dev->serio = serio; } EXPORT_SYMBOL(ps2_init); /* * ps2_handle_ack() is supposed to be used in interrupt handler * to properly process ACK/NAK of a command from a PS/2 device. */ int ps2_handle_ack(struct ps2dev *ps2dev, unsigned char data) { switch (data) { case PS2_RET_ACK: ps2dev->nak = 0; break; case PS2_RET_NAK: ps2dev->flags |= PS2_FLAG_NAK; ps2dev->nak = PS2_RET_NAK; break; case PS2_RET_ERR: if (ps2dev->flags & PS2_FLAG_NAK) { ps2dev->flags &= ~PS2_FLAG_NAK; ps2dev->nak = PS2_RET_ERR; break; } /* * Workaround for mice which don't ACK the Get ID command. * These are valid mouse IDs that we recognize. */ case 0x00: case 0x03: case 0x04: if (ps2dev->flags & PS2_FLAG_WAITID) { ps2dev->nak = 0; break; } /* Fall through */ default: return 0; } if (!ps2dev->nak) { ps2dev->flags &= ~PS2_FLAG_NAK; if (ps2dev->cmdcnt) ps2dev->flags |= PS2_FLAG_CMD | PS2_FLAG_CMD1; } ps2dev->flags &= ~PS2_FLAG_ACK; wake_up(&ps2dev->wait); if (data != PS2_RET_ACK) ps2_handle_response(ps2dev, data); return 1; } EXPORT_SYMBOL(ps2_handle_ack); /* * ps2_handle_response() is supposed to be used in interrupt handler * to properly store device's response to a command and notify process * waiting for completion of the command. */ int ps2_handle_response(struct ps2dev *ps2dev, unsigned char data) { if (ps2dev->cmdcnt) ps2dev->cmdbuf[--ps2dev->cmdcnt] = data; if (ps2dev->flags & PS2_FLAG_CMD1) { ps2dev->flags &= ~PS2_FLAG_CMD1; if (ps2dev->cmdcnt) wake_up(&ps2dev->wait); } if (!ps2dev->cmdcnt) { ps2dev->flags &= ~PS2_FLAG_CMD; wake_up(&ps2dev->wait); } return 1; } EXPORT_SYMBOL(ps2_handle_response); void ps2_cmd_aborted(struct ps2dev *ps2dev) { if (ps2dev->flags & PS2_FLAG_ACK) ps2dev->nak = 1; if (ps2dev->flags & (PS2_FLAG_ACK | PS2_FLAG_CMD)) wake_up(&ps2dev->wait); /* reset all flags except last nack */ ps2dev->flags &= PS2_FLAG_NAK; } EXPORT_SYMBOL(ps2_cmd_aborted);
gpl-2.0
linino/linux
tools/firewire/decode-fcp.c
13036
5617
#include <linux/firewire-constants.h> #include <stdio.h> #include <stdlib.h> #include "list.h" #include "nosy-dump.h" #define CSR_FCP_COMMAND 0xfffff0000b00ull #define CSR_FCP_RESPONSE 0xfffff0000d00ull static const char * const ctype_names[] = { [0x0] = "control", [0x8] = "not implemented", [0x1] = "status", [0x9] = "accepted", [0x2] = "specific inquiry", [0xa] = "rejected", [0x3] = "notify", [0xb] = "in transition", [0x4] = "general inquiry", [0xc] = "stable", [0x5] = "(reserved 0x05)", [0xd] = "changed", [0x6] = "(reserved 0x06)", [0xe] = "(reserved 0x0e)", [0x7] = "(reserved 0x07)", [0xf] = "interim", }; static const char * const subunit_type_names[] = { [0x00] = "monitor", [0x10] = "(reserved 0x10)", [0x01] = "audio", [0x11] = "(reserved 0x11)", [0x02] = "printer", [0x12] = "(reserved 0x12)", [0x03] = "disc", [0x13] = "(reserved 0x13)", [0x04] = "tape recorder/player",[0x14] = "(reserved 0x14)", [0x05] = "tuner", [0x15] = "(reserved 0x15)", [0x06] = "ca", [0x16] = "(reserved 0x16)", [0x07] = "camera", [0x17] = "(reserved 0x17)", [0x08] = "(reserved 0x08)", [0x18] = "(reserved 0x18)", [0x09] = "panel", [0x19] = "(reserved 0x19)", [0x0a] = "bulletin board", [0x1a] = "(reserved 0x1a)", [0x0b] = "camera storage", [0x1b] = "(reserved 0x1b)", [0x0c] = "(reserved 0x0c)", [0x1c] = "vendor unique", [0x0d] = "(reserved 0x0d)", [0x1d] = "all subunit types", [0x0e] = "(reserved 0x0e)", [0x1e] = "subunit_type extended to next byte", [0x0f] = "(reserved 0x0f)", [0x1f] = "unit", }; struct avc_enum { int value; const char *name; }; struct avc_field { const char *name; /* Short name for field. */ int offset; /* Location of field, specified in bits; */ /* negative means from end of packet. */ int width; /* Width of field, 0 means use data_length. */ struct avc_enum *names; }; struct avc_opcode_info { const char *name; struct avc_field fields[8]; }; struct avc_enum power_field_names[] = { { 0x70, "on" }, { 0x60, "off" }, { } }; static const struct avc_opcode_info opcode_info[256] = { /* TA Document 1999026 */ /* AV/C Digital Interface Command Set General Specification 4.0 */ [0xb2] = { "power", { { "state", 0, 8, power_field_names } } }, [0x30] = { "unit info", { { "foo", 0, 8 }, { "unit_type", 8, 5 }, { "unit", 13, 3 }, { "company id", 16, 24 }, } }, [0x31] = { "subunit info" }, [0x01] = { "reserve" }, [0xb0] = { "version" }, [0x00] = { "vendor dependent" }, [0x02] = { "plug info" }, [0x12] = { "channel usage" }, [0x24] = { "connect" }, [0x20] = { "connect av" }, [0x22] = { "connections" }, [0x11] = { "digital input" }, [0x10] = { "digital output" }, [0x25] = { "disconnect" }, [0x21] = { "disconnect av" }, [0x19] = { "input plug signal format" }, [0x18] = { "output plug signal format" }, [0x1f] = { "general bus setup" }, /* TA Document 1999025 */ /* AV/C Descriptor Mechanism Specification Version 1.0 */ [0x0c] = { "create descriptor" }, [0x08] = { "open descriptor" }, [0x09] = { "read descriptor" }, [0x0a] = { "write descriptor" }, [0x05] = { "open info block" }, [0x06] = { "read info block" }, [0x07] = { "write info block" }, [0x0b] = { "search descriptor" }, [0x0d] = { "object number select" }, /* TA Document 1999015 */ /* AV/C Command Set for Rate Control of Isochronous Data Flow 1.0 */ [0xb3] = { "rate", { { "subfunction", 0, 8 }, { "result", 8, 8 }, { "plug_type", 16, 8 }, { "plug_id", 16, 8 }, } }, /* TA Document 1999008 */ /* AV/C Audio Subunit Specification 1.0 */ [0xb8] = { "function block" }, /* TA Document 2001001 */ /* AV/C Panel Subunit Specification 1.1 */ [0x7d] = { "gui update" }, [0x7e] = { "push gui data" }, [0x7f] = { "user action" }, [0x7c] = { "pass through" }, /* */ [0x26] = { "asynchronous connection" }, }; struct avc_frame { uint32_t operand0:8; uint32_t opcode:8; uint32_t subunit_id:3; uint32_t subunit_type:5; uint32_t ctype:4; uint32_t cts:4; }; static void decode_avc(struct link_transaction *t) { struct avc_frame *frame = (struct avc_frame *) t->request->packet.write_block.data; const struct avc_opcode_info *info; const char *name; char buffer[32]; int i; info = &opcode_info[frame->opcode]; if (info->name == NULL) { snprintf(buffer, sizeof(buffer), "(unknown opcode 0x%02x)", frame->opcode); name = buffer; } else { name = info->name; } printf("av/c %s, subunit_type=%s, subunit_id=%d, opcode=%s", ctype_names[frame->ctype], subunit_type_names[frame->subunit_type], frame->subunit_id, name); for (i = 0; info->fields[i].name != NULL; i++) printf(", %s", info->fields[i].name); printf("\n"); } int decode_fcp(struct link_transaction *t) { struct avc_frame *frame = (struct avc_frame *) t->request->packet.write_block.data; unsigned long long offset = ((unsigned long long) t->request->packet.common.offset_high << 32) | t->request->packet.common.offset_low; if (t->request->packet.common.tcode != TCODE_WRITE_BLOCK_REQUEST) return 0; if (offset == CSR_FCP_COMMAND || offset == CSR_FCP_RESPONSE) { switch (frame->cts) { case 0x00: decode_avc(t); break; case 0x01: printf("cal fcp frame (cts=0x01)\n"); break; case 0x02: printf("ehs fcp frame (cts=0x02)\n"); break; case 0x03: printf("havi fcp frame (cts=0x03)\n"); break; case 0x0e: printf("vendor specific fcp frame (cts=0x0e)\n"); break; case 0x0f: printf("extended cts\n"); break; default: printf("reserved fcp frame (ctx=0x%02x)\n", frame->cts); break; } return 1; } return 0; }
gpl-2.0
usb-bullhead-ubuntu-touch/kernel_msm
drivers/media/dvb-frontends/a8293.c
2541
3687
/* * Allegro A8293 SEC driver * * Copyright (C) 2011 Antti Palosaari <crope@iki.fi> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "dvb_frontend.h" #include "a8293.h" struct a8293_priv { struct i2c_adapter *i2c; const struct a8293_config *cfg; u8 reg[2]; }; static int a8293_i2c(struct a8293_priv *priv, u8 *val, int len, bool rd) { int ret; struct i2c_msg msg[1] = { { .addr = priv->cfg->i2c_addr, .len = len, .buf = val, } }; if (rd) msg[0].flags = I2C_M_RD; else msg[0].flags = 0; ret = i2c_transfer(priv->i2c, msg, 1); if (ret == 1) { ret = 0; } else { dev_warn(&priv->i2c->dev, "%s: i2c failed=%d rd=%d\n", KBUILD_MODNAME, ret, rd); ret = -EREMOTEIO; } return ret; } static int a8293_wr(struct a8293_priv *priv, u8 *val, int len) { return a8293_i2c(priv, val, len, 0); } static int a8293_rd(struct a8293_priv *priv, u8 *val, int len) { return a8293_i2c(priv, val, len, 1); } static int a8293_set_voltage(struct dvb_frontend *fe, fe_sec_voltage_t fe_sec_voltage) { struct a8293_priv *priv = fe->sec_priv; int ret; dev_dbg(&priv->i2c->dev, "%s: fe_sec_voltage=%d\n", __func__, fe_sec_voltage); switch (fe_sec_voltage) { case SEC_VOLTAGE_OFF: /* ENB=0 */ priv->reg[0] = 0x10; break; case SEC_VOLTAGE_13: /* VSEL0=1, VSEL1=0, VSEL2=0, VSEL3=0, ENB=1*/ priv->reg[0] = 0x31; break; case SEC_VOLTAGE_18: /* VSEL0=0, VSEL1=0, VSEL2=0, VSEL3=1, ENB=1*/ priv->reg[0] = 0x38; break; default: ret = -EINVAL; goto err; } ret = a8293_wr(priv, &priv->reg[0], 1); if (ret) goto err; return ret; err: dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret); return ret; } static void a8293_release_sec(struct dvb_frontend *fe) { a8293_set_voltage(fe, SEC_VOLTAGE_OFF); kfree(fe->sec_priv); fe->sec_priv = NULL; } struct dvb_frontend *a8293_attach(struct dvb_frontend *fe, struct i2c_adapter *i2c, const struct a8293_config *cfg) { int ret; struct a8293_priv *priv = NULL; u8 buf[2]; /* allocate memory for the internal priv */ priv = kzalloc(sizeof(struct a8293_priv), GFP_KERNEL); if (priv == NULL) { ret = -ENOMEM; goto err; } /* setup the priv */ priv->i2c = i2c; priv->cfg = cfg; fe->sec_priv = priv; /* check if the SEC is there */ ret = a8293_rd(priv, buf, 2); if (ret) goto err; /* ENB=0 */ priv->reg[0] = 0x10; ret = a8293_wr(priv, &priv->reg[0], 1); if (ret) goto err; /* TMODE=0, TGATE=1 */ priv->reg[1] = 0x82; ret = a8293_wr(priv, &priv->reg[1], 1); if (ret) goto err; fe->ops.release_sec = a8293_release_sec; /* override frontend ops */ fe->ops.set_voltage = a8293_set_voltage; dev_info(&priv->i2c->dev, "%s: Allegro A8293 SEC attached\n", KBUILD_MODNAME); return fe; err: dev_dbg(&i2c->dev, "%s: failed=%d\n", __func__, ret); kfree(priv); return NULL; } EXPORT_SYMBOL(a8293_attach); MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>"); MODULE_DESCRIPTION("Allegro A8293 SEC driver"); MODULE_LICENSE("GPL");
gpl-2.0
Razdroid/razdroid-kernel
sound/usb/proc.c
3821
6138
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/init.h> #include <linux/usb.h> #include <sound/core.h> #include <sound/info.h> #include <sound/pcm.h> #include "usbaudio.h" #include "helper.h" #include "card.h" #include "endpoint.h" #include "proc.h" /* convert our full speed USB rate into sampling rate in Hz */ static inline unsigned get_full_speed_hz(unsigned int usb_rate) { return (usb_rate * 125 + (1 << 12)) >> 13; } /* convert our high speed USB rate into sampling rate in Hz */ static inline unsigned get_high_speed_hz(unsigned int usb_rate) { return (usb_rate * 125 + (1 << 9)) >> 10; } /* * common proc files to show the usb device info */ static void proc_audio_usbbus_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer) { struct snd_usb_audio *chip = entry->private_data; if (!chip->shutdown) snd_iprintf(buffer, "%03d/%03d\n", chip->dev->bus->busnum, chip->dev->devnum); } static void proc_audio_usbid_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer) { struct snd_usb_audio *chip = entry->private_data; if (!chip->shutdown) snd_iprintf(buffer, "%04x:%04x\n", USB_ID_VENDOR(chip->usb_id), USB_ID_PRODUCT(chip->usb_id)); } void snd_usb_audio_create_proc(struct snd_usb_audio *chip) { struct snd_info_entry *entry; if (!snd_card_proc_new(chip->card, "usbbus", &entry)) snd_info_set_text_ops(entry, chip, proc_audio_usbbus_read); if (!snd_card_proc_new(chip->card, "usbid", &entry)) snd_info_set_text_ops(entry, chip, proc_audio_usbid_read); } /* * proc interface for list the supported pcm formats */ static void proc_dump_substream_formats(struct snd_usb_substream *subs, struct snd_info_buffer *buffer) { struct audioformat *fp; static char *sync_types[4] = { "NONE", "ASYNC", "ADAPTIVE", "SYNC" }; list_for_each_entry(fp, &subs->fmt_list, list) { snd_pcm_format_t fmt; snd_iprintf(buffer, " Interface %d\n", fp->iface); snd_iprintf(buffer, " Altset %d\n", fp->altsetting); snd_iprintf(buffer, " Format:"); for (fmt = 0; fmt <= SNDRV_PCM_FORMAT_LAST; ++fmt) if (fp->formats & pcm_format_to_bits(fmt)) snd_iprintf(buffer, " %s", snd_pcm_format_name(fmt)); snd_iprintf(buffer, "\n"); snd_iprintf(buffer, " Channels: %d\n", fp->channels); snd_iprintf(buffer, " Endpoint: %d %s (%s)\n", fp->endpoint & USB_ENDPOINT_NUMBER_MASK, fp->endpoint & USB_DIR_IN ? "IN" : "OUT", sync_types[(fp->ep_attr & USB_ENDPOINT_SYNCTYPE) >> 2]); if (fp->rates & SNDRV_PCM_RATE_CONTINUOUS) { snd_iprintf(buffer, " Rates: %d - %d (continuous)\n", fp->rate_min, fp->rate_max); } else { unsigned int i; snd_iprintf(buffer, " Rates: "); for (i = 0; i < fp->nr_rates; i++) { if (i > 0) snd_iprintf(buffer, ", "); snd_iprintf(buffer, "%d", fp->rate_table[i]); } snd_iprintf(buffer, "\n"); } if (subs->speed != USB_SPEED_FULL) snd_iprintf(buffer, " Data packet interval: %d us\n", 125 * (1 << fp->datainterval)); // snd_iprintf(buffer, " Max Packet Size = %d\n", fp->maxpacksize); // snd_iprintf(buffer, " EP Attribute = %#x\n", fp->attributes); } } static void proc_dump_ep_status(struct snd_usb_substream *subs, struct snd_usb_endpoint *data_ep, struct snd_usb_endpoint *sync_ep, struct snd_info_buffer *buffer) { if (!data_ep) return; snd_iprintf(buffer, " Packet Size = %d\n", data_ep->curpacksize); snd_iprintf(buffer, " Momentary freq = %u Hz (%#x.%04x)\n", subs->speed == USB_SPEED_FULL ? get_full_speed_hz(data_ep->freqm) : get_high_speed_hz(data_ep->freqm), data_ep->freqm >> 16, data_ep->freqm & 0xffff); if (sync_ep && data_ep->freqshift != INT_MIN) { int res = 16 - data_ep->freqshift; snd_iprintf(buffer, " Feedback Format = %d.%d\n", (sync_ep->syncmaxsize > 3 ? 32 : 24) - res, res); } } static void proc_dump_substream_status(struct snd_usb_substream *subs, struct snd_info_buffer *buffer) { if (subs->running) { snd_iprintf(buffer, " Status: Running\n"); snd_iprintf(buffer, " Interface = %d\n", subs->interface); snd_iprintf(buffer, " Altset = %d\n", subs->altset_idx); proc_dump_ep_status(subs, subs->data_endpoint, subs->sync_endpoint, buffer); } else { snd_iprintf(buffer, " Status: Stop\n"); } } static void proc_pcm_format_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer) { struct snd_usb_stream *stream = entry->private_data; snd_iprintf(buffer, "%s : %s\n", stream->chip->card->longname, stream->pcm->name); if (stream->substream[SNDRV_PCM_STREAM_PLAYBACK].num_formats) { snd_iprintf(buffer, "\nPlayback:\n"); proc_dump_substream_status(&stream->substream[SNDRV_PCM_STREAM_PLAYBACK], buffer); proc_dump_substream_formats(&stream->substream[SNDRV_PCM_STREAM_PLAYBACK], buffer); } if (stream->substream[SNDRV_PCM_STREAM_CAPTURE].num_formats) { snd_iprintf(buffer, "\nCapture:\n"); proc_dump_substream_status(&stream->substream[SNDRV_PCM_STREAM_CAPTURE], buffer); proc_dump_substream_formats(&stream->substream[SNDRV_PCM_STREAM_CAPTURE], buffer); } } void snd_usb_proc_pcm_format_add(struct snd_usb_stream *stream) { struct snd_info_entry *entry; char name[32]; struct snd_card *card = stream->chip->card; sprintf(name, "stream%d", stream->pcm_index); if (!snd_card_proc_new(card, name, &entry)) snd_info_set_text_ops(entry, stream, proc_pcm_format_read); }
gpl-2.0
systemdaemon/systemd
src/linux/drivers/net/ethernet/dec/tulip/interrupt.c
4333
25596
/* drivers/net/ethernet/dec/tulip/interrupt.c Copyright 2000,2001 The Linux Kernel Team Written/copyright 1994-2001 by Donald Becker. This software may be used and distributed according to the terms of the GNU General Public License, incorporated herein by reference. Please submit bugs to http://bugzilla.kernel.org/ . */ #include <linux/pci.h> #include "tulip.h" #include <linux/etherdevice.h> int tulip_rx_copybreak; unsigned int tulip_max_interrupt_work; #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION #define MIT_SIZE 15 #define MIT_TABLE 15 /* We use 0 or max */ static unsigned int mit_table[MIT_SIZE+1] = { /* CRS11 21143 hardware Mitigation Control Interrupt We use only RX mitigation we other techniques for TX intr. mitigation. 31 Cycle Size (timer control) 30:27 TX timer in 16 * Cycle size 26:24 TX No pkts before Int. 23:20 RX timer in Cycle size 19:17 RX No pkts before Int. 16 Continues Mode (CM) */ 0x0, /* IM disabled */ 0x80150000, /* RX time = 1, RX pkts = 2, CM = 1 */ 0x80150000, 0x80270000, 0x80370000, 0x80490000, 0x80590000, 0x80690000, 0x807B0000, 0x808B0000, 0x809D0000, 0x80AD0000, 0x80BD0000, 0x80CF0000, 0x80DF0000, // 0x80FF0000 /* RX time = 16, RX pkts = 7, CM = 1 */ 0x80F10000 /* RX time = 16, RX pkts = 0, CM = 1 */ }; #endif int tulip_refill_rx(struct net_device *dev) { struct tulip_private *tp = netdev_priv(dev); int entry; int refilled = 0; /* Refill the Rx ring buffers. */ for (; tp->cur_rx - tp->dirty_rx > 0; tp->dirty_rx++) { entry = tp->dirty_rx % RX_RING_SIZE; if (tp->rx_buffers[entry].skb == NULL) { struct sk_buff *skb; dma_addr_t mapping; skb = tp->rx_buffers[entry].skb = netdev_alloc_skb(dev, PKT_BUF_SZ); if (skb == NULL) break; mapping = pci_map_single(tp->pdev, skb->data, PKT_BUF_SZ, PCI_DMA_FROMDEVICE); if (dma_mapping_error(&tp->pdev->dev, mapping)) { dev_kfree_skb(skb); tp->rx_buffers[entry].skb = NULL; break; } tp->rx_buffers[entry].mapping = mapping; tp->rx_ring[entry].buffer1 = cpu_to_le32(mapping); refilled++; } tp->rx_ring[entry].status = cpu_to_le32(DescOwned); } if(tp->chip_id == LC82C168) { if(((ioread32(tp->base_addr + CSR5)>>17)&0x07) == 4) { /* Rx stopped due to out of buffers, * restart it */ iowrite32(0x01, tp->base_addr + CSR2); } } return refilled; } #ifdef CONFIG_TULIP_NAPI void oom_timer(unsigned long data) { struct net_device *dev = (struct net_device *)data; struct tulip_private *tp = netdev_priv(dev); napi_schedule(&tp->napi); } int tulip_poll(struct napi_struct *napi, int budget) { struct tulip_private *tp = container_of(napi, struct tulip_private, napi); struct net_device *dev = tp->dev; int entry = tp->cur_rx % RX_RING_SIZE; int work_done = 0; #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION int received = 0; #endif #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION /* that one buffer is needed for mit activation; or might be a bug in the ring buffer code; check later -- JHS*/ if (budget >=RX_RING_SIZE) budget--; #endif if (tulip_debug > 4) netdev_dbg(dev, " In tulip_rx(), entry %d %08x\n", entry, tp->rx_ring[entry].status); do { if (ioread32(tp->base_addr + CSR5) == 0xffffffff) { netdev_dbg(dev, " In tulip_poll(), hardware disappeared\n"); break; } /* Acknowledge current RX interrupt sources. */ iowrite32((RxIntr | RxNoBuf), tp->base_addr + CSR5); /* If we own the next entry, it is a new packet. Send it up. */ while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) { s32 status = le32_to_cpu(tp->rx_ring[entry].status); short pkt_len; if (tp->dirty_rx + RX_RING_SIZE == tp->cur_rx) break; if (tulip_debug > 5) netdev_dbg(dev, "In tulip_rx(), entry %d %08x\n", entry, status); if (++work_done >= budget) goto not_done; /* * Omit the four octet CRC from the length. * (May not be considered valid until we have * checked status for RxLengthOver2047 bits) */ pkt_len = ((status >> 16) & 0x7ff) - 4; /* * Maximum pkt_len is 1518 (1514 + vlan header) * Anything higher than this is always invalid * regardless of RxLengthOver2047 bits */ if ((status & (RxLengthOver2047 | RxDescCRCError | RxDescCollisionSeen | RxDescRunt | RxDescDescErr | RxWholePkt)) != RxWholePkt || pkt_len > 1518) { if ((status & (RxLengthOver2047 | RxWholePkt)) != RxWholePkt) { /* Ingore earlier buffers. */ if ((status & 0xffff) != 0x7fff) { if (tulip_debug > 1) dev_warn(&dev->dev, "Oversized Ethernet frame spanned multiple buffers, status %08x!\n", status); dev->stats.rx_length_errors++; } } else { /* There was a fatal error. */ if (tulip_debug > 2) netdev_dbg(dev, "Receive error, Rx status %08x\n", status); dev->stats.rx_errors++; /* end of a packet.*/ if (pkt_len > 1518 || (status & RxDescRunt)) dev->stats.rx_length_errors++; if (status & 0x0004) dev->stats.rx_frame_errors++; if (status & 0x0002) dev->stats.rx_crc_errors++; if (status & 0x0001) dev->stats.rx_fifo_errors++; } } else { struct sk_buff *skb; /* Check if the packet is long enough to accept without copying to a minimally-sized skbuff. */ if (pkt_len < tulip_rx_copybreak && (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) { skb_reserve(skb, 2); /* 16 byte align the IP header */ pci_dma_sync_single_for_cpu(tp->pdev, tp->rx_buffers[entry].mapping, pkt_len, PCI_DMA_FROMDEVICE); #if ! defined(__alpha__) skb_copy_to_linear_data(skb, tp->rx_buffers[entry].skb->data, pkt_len); skb_put(skb, pkt_len); #else memcpy(skb_put(skb, pkt_len), tp->rx_buffers[entry].skb->data, pkt_len); #endif pci_dma_sync_single_for_device(tp->pdev, tp->rx_buffers[entry].mapping, pkt_len, PCI_DMA_FROMDEVICE); } else { /* Pass up the skb already on the Rx ring. */ char *temp = skb_put(skb = tp->rx_buffers[entry].skb, pkt_len); #ifndef final_version if (tp->rx_buffers[entry].mapping != le32_to_cpu(tp->rx_ring[entry].buffer1)) { dev_err(&dev->dev, "Internal fault: The skbuff addresses do not match in tulip_rx: %08x vs. %08llx %p / %p\n", le32_to_cpu(tp->rx_ring[entry].buffer1), (unsigned long long)tp->rx_buffers[entry].mapping, skb->head, temp); } #endif pci_unmap_single(tp->pdev, tp->rx_buffers[entry].mapping, PKT_BUF_SZ, PCI_DMA_FROMDEVICE); tp->rx_buffers[entry].skb = NULL; tp->rx_buffers[entry].mapping = 0; } skb->protocol = eth_type_trans(skb, dev); netif_receive_skb(skb); dev->stats.rx_packets++; dev->stats.rx_bytes += pkt_len; } #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION received++; #endif entry = (++tp->cur_rx) % RX_RING_SIZE; if (tp->cur_rx - tp->dirty_rx > RX_RING_SIZE/4) tulip_refill_rx(dev); } /* New ack strategy... irq does not ack Rx any longer hopefully this helps */ /* Really bad things can happen here... If new packet arrives * and an irq arrives (tx or just due to occasionally unset * mask), it will be acked by irq handler, but new thread * is not scheduled. It is major hole in design. * No idea how to fix this if "playing with fire" will fail * tomorrow (night 011029). If it will not fail, we won * finally: amount of IO did not increase at all. */ } while ((ioread32(tp->base_addr + CSR5) & RxIntr)); #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION /* We use this simplistic scheme for IM. It's proven by real life installations. We can have IM enabled continuesly but this would cause unnecessary latency. Unfortunely we can't use all the NET_RX_* feedback here. This would turn on IM for devices that is not contributing to backlog congestion with unnecessary latency. We monitor the device RX-ring and have: HW Interrupt Mitigation either ON or OFF. ON: More then 1 pkt received (per intr.) OR we are dropping OFF: Only 1 pkt received Note. We only use min and max (0, 15) settings from mit_table */ if( tp->flags & HAS_INTR_MITIGATION) { if( received > 1 ) { if( ! tp->mit_on ) { tp->mit_on = 1; iowrite32(mit_table[MIT_TABLE], tp->base_addr + CSR11); } } else { if( tp->mit_on ) { tp->mit_on = 0; iowrite32(0, tp->base_addr + CSR11); } } } #endif /* CONFIG_TULIP_NAPI_HW_MITIGATION */ tulip_refill_rx(dev); /* If RX ring is not full we are out of memory. */ if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL) goto oom; /* Remove us from polling list and enable RX intr. */ napi_complete(napi); iowrite32(tulip_tbl[tp->chip_id].valid_intrs, tp->base_addr+CSR7); /* The last op happens after poll completion. Which means the following: * 1. it can race with disabling irqs in irq handler * 2. it can race with dise/enabling irqs in other poll threads * 3. if an irq raised after beginning loop, it will be immediately * triggered here. * * Summarizing: the logic results in some redundant irqs both * due to races in masking and due to too late acking of already * processed irqs. But it must not result in losing events. */ return work_done; not_done: if (tp->cur_rx - tp->dirty_rx > RX_RING_SIZE/2 || tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL) tulip_refill_rx(dev); if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL) goto oom; return work_done; oom: /* Executed with RX ints disabled */ /* Start timer, stop polling, but do not enable rx interrupts. */ mod_timer(&tp->oom_timer, jiffies+1); /* Think: timer_pending() was an explicit signature of bug. * Timer can be pending now but fired and completed * before we did napi_complete(). See? We would lose it. */ /* remove ourselves from the polling list */ napi_complete(napi); return work_done; } #else /* CONFIG_TULIP_NAPI */ static int tulip_rx(struct net_device *dev) { struct tulip_private *tp = netdev_priv(dev); int entry = tp->cur_rx % RX_RING_SIZE; int rx_work_limit = tp->dirty_rx + RX_RING_SIZE - tp->cur_rx; int received = 0; if (tulip_debug > 4) netdev_dbg(dev, "In tulip_rx(), entry %d %08x\n", entry, tp->rx_ring[entry].status); /* If we own the next entry, it is a new packet. Send it up. */ while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) { s32 status = le32_to_cpu(tp->rx_ring[entry].status); short pkt_len; if (tulip_debug > 5) netdev_dbg(dev, "In tulip_rx(), entry %d %08x\n", entry, status); if (--rx_work_limit < 0) break; /* Omit the four octet CRC from the length. (May not be considered valid until we have checked status for RxLengthOver2047 bits) */ pkt_len = ((status >> 16) & 0x7ff) - 4; /* Maximum pkt_len is 1518 (1514 + vlan header) Anything higher than this is always invalid regardless of RxLengthOver2047 bits */ if ((status & (RxLengthOver2047 | RxDescCRCError | RxDescCollisionSeen | RxDescRunt | RxDescDescErr | RxWholePkt)) != RxWholePkt || pkt_len > 1518) { if ((status & (RxLengthOver2047 | RxWholePkt)) != RxWholePkt) { /* Ingore earlier buffers. */ if ((status & 0xffff) != 0x7fff) { if (tulip_debug > 1) netdev_warn(dev, "Oversized Ethernet frame spanned multiple buffers, status %08x!\n", status); dev->stats.rx_length_errors++; } } else { /* There was a fatal error. */ if (tulip_debug > 2) netdev_dbg(dev, "Receive error, Rx status %08x\n", status); dev->stats.rx_errors++; /* end of a packet.*/ if (pkt_len > 1518 || (status & RxDescRunt)) dev->stats.rx_length_errors++; if (status & 0x0004) dev->stats.rx_frame_errors++; if (status & 0x0002) dev->stats.rx_crc_errors++; if (status & 0x0001) dev->stats.rx_fifo_errors++; } } else { struct sk_buff *skb; /* Check if the packet is long enough to accept without copying to a minimally-sized skbuff. */ if (pkt_len < tulip_rx_copybreak && (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) { skb_reserve(skb, 2); /* 16 byte align the IP header */ pci_dma_sync_single_for_cpu(tp->pdev, tp->rx_buffers[entry].mapping, pkt_len, PCI_DMA_FROMDEVICE); #if ! defined(__alpha__) skb_copy_to_linear_data(skb, tp->rx_buffers[entry].skb->data, pkt_len); skb_put(skb, pkt_len); #else memcpy(skb_put(skb, pkt_len), tp->rx_buffers[entry].skb->data, pkt_len); #endif pci_dma_sync_single_for_device(tp->pdev, tp->rx_buffers[entry].mapping, pkt_len, PCI_DMA_FROMDEVICE); } else { /* Pass up the skb already on the Rx ring. */ char *temp = skb_put(skb = tp->rx_buffers[entry].skb, pkt_len); #ifndef final_version if (tp->rx_buffers[entry].mapping != le32_to_cpu(tp->rx_ring[entry].buffer1)) { dev_err(&dev->dev, "Internal fault: The skbuff addresses do not match in tulip_rx: %08x vs. %Lx %p / %p\n", le32_to_cpu(tp->rx_ring[entry].buffer1), (long long)tp->rx_buffers[entry].mapping, skb->head, temp); } #endif pci_unmap_single(tp->pdev, tp->rx_buffers[entry].mapping, PKT_BUF_SZ, PCI_DMA_FROMDEVICE); tp->rx_buffers[entry].skb = NULL; tp->rx_buffers[entry].mapping = 0; } skb->protocol = eth_type_trans(skb, dev); netif_rx(skb); dev->stats.rx_packets++; dev->stats.rx_bytes += pkt_len; } received++; entry = (++tp->cur_rx) % RX_RING_SIZE; } return received; } #endif /* CONFIG_TULIP_NAPI */ static inline unsigned int phy_interrupt (struct net_device *dev) { #ifdef __hppa__ struct tulip_private *tp = netdev_priv(dev); int csr12 = ioread32(tp->base_addr + CSR12) & 0xff; if (csr12 != tp->csr12_shadow) { /* ack interrupt */ iowrite32(csr12 | 0x02, tp->base_addr + CSR12); tp->csr12_shadow = csr12; /* do link change stuff */ spin_lock(&tp->lock); tulip_check_duplex(dev); spin_unlock(&tp->lock); /* clear irq ack bit */ iowrite32(csr12 & ~0x02, tp->base_addr + CSR12); return 1; } #endif return 0; } /* The interrupt handler does all of the Rx thread work and cleans up after the Tx thread. */ irqreturn_t tulip_interrupt(int irq, void *dev_instance) { struct net_device *dev = (struct net_device *)dev_instance; struct tulip_private *tp = netdev_priv(dev); void __iomem *ioaddr = tp->base_addr; int csr5; int missed; int rx = 0; int tx = 0; int oi = 0; int maxrx = RX_RING_SIZE; int maxtx = TX_RING_SIZE; int maxoi = TX_RING_SIZE; #ifdef CONFIG_TULIP_NAPI int rxd = 0; #else int entry; #endif unsigned int work_count = tulip_max_interrupt_work; unsigned int handled = 0; /* Let's see whether the interrupt really is for us */ csr5 = ioread32(ioaddr + CSR5); if (tp->flags & HAS_PHY_IRQ) handled = phy_interrupt (dev); if ((csr5 & (NormalIntr|AbnormalIntr)) == 0) return IRQ_RETVAL(handled); tp->nir++; do { #ifdef CONFIG_TULIP_NAPI if (!rxd && (csr5 & (RxIntr | RxNoBuf))) { rxd++; /* Mask RX intrs and add the device to poll list. */ iowrite32(tulip_tbl[tp->chip_id].valid_intrs&~RxPollInt, ioaddr + CSR7); napi_schedule(&tp->napi); if (!(csr5&~(AbnormalIntr|NormalIntr|RxPollInt|TPLnkPass))) break; } /* Acknowledge the interrupt sources we handle here ASAP the poll function does Rx and RxNoBuf acking */ iowrite32(csr5 & 0x0001ff3f, ioaddr + CSR5); #else /* Acknowledge all of the current interrupt sources ASAP. */ iowrite32(csr5 & 0x0001ffff, ioaddr + CSR5); if (csr5 & (RxIntr | RxNoBuf)) { rx += tulip_rx(dev); tulip_refill_rx(dev); } #endif /* CONFIG_TULIP_NAPI */ if (tulip_debug > 4) netdev_dbg(dev, "interrupt csr5=%#8.8x new csr5=%#8.8x\n", csr5, ioread32(ioaddr + CSR5)); if (csr5 & (TxNoBuf | TxDied | TxIntr | TimerInt)) { unsigned int dirty_tx; spin_lock(&tp->lock); for (dirty_tx = tp->dirty_tx; tp->cur_tx - dirty_tx > 0; dirty_tx++) { int entry = dirty_tx % TX_RING_SIZE; int status = le32_to_cpu(tp->tx_ring[entry].status); if (status < 0) break; /* It still has not been Txed */ /* Check for Rx filter setup frames. */ if (tp->tx_buffers[entry].skb == NULL) { /* test because dummy frames not mapped */ if (tp->tx_buffers[entry].mapping) pci_unmap_single(tp->pdev, tp->tx_buffers[entry].mapping, sizeof(tp->setup_frame), PCI_DMA_TODEVICE); continue; } if (status & 0x8000) { /* There was an major error, log it. */ #ifndef final_version if (tulip_debug > 1) netdev_dbg(dev, "Transmit error, Tx status %08x\n", status); #endif dev->stats.tx_errors++; if (status & 0x4104) dev->stats.tx_aborted_errors++; if (status & 0x0C00) dev->stats.tx_carrier_errors++; if (status & 0x0200) dev->stats.tx_window_errors++; if (status & 0x0002) dev->stats.tx_fifo_errors++; if ((status & 0x0080) && tp->full_duplex == 0) dev->stats.tx_heartbeat_errors++; } else { dev->stats.tx_bytes += tp->tx_buffers[entry].skb->len; dev->stats.collisions += (status >> 3) & 15; dev->stats.tx_packets++; } pci_unmap_single(tp->pdev, tp->tx_buffers[entry].mapping, tp->tx_buffers[entry].skb->len, PCI_DMA_TODEVICE); /* Free the original skb. */ dev_kfree_skb_irq(tp->tx_buffers[entry].skb); tp->tx_buffers[entry].skb = NULL; tp->tx_buffers[entry].mapping = 0; tx++; } #ifndef final_version if (tp->cur_tx - dirty_tx > TX_RING_SIZE) { dev_err(&dev->dev, "Out-of-sync dirty pointer, %d vs. %d\n", dirty_tx, tp->cur_tx); dirty_tx += TX_RING_SIZE; } #endif if (tp->cur_tx - dirty_tx < TX_RING_SIZE - 2) netif_wake_queue(dev); tp->dirty_tx = dirty_tx; if (csr5 & TxDied) { if (tulip_debug > 2) dev_warn(&dev->dev, "The transmitter stopped. CSR5 is %x, CSR6 %x, new CSR6 %x\n", csr5, ioread32(ioaddr + CSR6), tp->csr6); tulip_restart_rxtx(tp); } spin_unlock(&tp->lock); } /* Log errors. */ if (csr5 & AbnormalIntr) { /* Abnormal error summary bit. */ if (csr5 == 0xffffffff) break; if (csr5 & TxJabber) dev->stats.tx_errors++; if (csr5 & TxFIFOUnderflow) { if ((tp->csr6 & 0xC000) != 0xC000) tp->csr6 += 0x4000; /* Bump up the Tx threshold */ else tp->csr6 |= 0x00200000; /* Store-n-forward. */ /* Restart the transmit process. */ tulip_restart_rxtx(tp); iowrite32(0, ioaddr + CSR1); } if (csr5 & (RxDied | RxNoBuf)) { if (tp->flags & COMET_MAC_ADDR) { iowrite32(tp->mc_filter[0], ioaddr + 0xAC); iowrite32(tp->mc_filter[1], ioaddr + 0xB0); } } if (csr5 & RxDied) { /* Missed a Rx frame. */ dev->stats.rx_missed_errors += ioread32(ioaddr + CSR8) & 0xffff; dev->stats.rx_errors++; tulip_start_rxtx(tp); } /* * NB: t21142_lnk_change() does a del_timer_sync(), so be careful if this * call is ever done under the spinlock */ if (csr5 & (TPLnkPass | TPLnkFail | 0x08000000)) { if (tp->link_change) (tp->link_change)(dev, csr5); } if (csr5 & SystemError) { int error = (csr5 >> 23) & 7; /* oops, we hit a PCI error. The code produced corresponds * to the reason: * 0 - parity error * 1 - master abort * 2 - target abort * Note that on parity error, we should do a software reset * of the chip to get it back into a sane state (according * to the 21142/3 docs that is). * -- rmk */ dev_err(&dev->dev, "(%lu) System Error occurred (%d)\n", tp->nir, error); } /* Clear all error sources, included undocumented ones! */ iowrite32(0x0800f7ba, ioaddr + CSR5); oi++; } if (csr5 & TimerInt) { if (tulip_debug > 2) dev_err(&dev->dev, "Re-enabling interrupts, %08x\n", csr5); iowrite32(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR7); tp->ttimer = 0; oi++; } if (tx > maxtx || rx > maxrx || oi > maxoi) { if (tulip_debug > 1) dev_warn(&dev->dev, "Too much work during an interrupt, csr5=0x%08x. (%lu) (%d,%d,%d)\n", csr5, tp->nir, tx, rx, oi); /* Acknowledge all interrupt sources. */ iowrite32(0x8001ffff, ioaddr + CSR5); if (tp->flags & HAS_INTR_MITIGATION) { /* Josip Loncaric at ICASE did extensive experimentation to develop a good interrupt mitigation setting.*/ iowrite32(0x8b240000, ioaddr + CSR11); } else if (tp->chip_id == LC82C168) { /* the LC82C168 doesn't have a hw timer.*/ iowrite32(0x00, ioaddr + CSR7); mod_timer(&tp->timer, RUN_AT(HZ/50)); } else { /* Mask all interrupting sources, set timer to re-enable. */ iowrite32(((~csr5) & 0x0001ebef) | AbnormalIntr | TimerInt, ioaddr + CSR7); iowrite32(0x0012, ioaddr + CSR11); } break; } work_count--; if (work_count == 0) break; csr5 = ioread32(ioaddr + CSR5); #ifdef CONFIG_TULIP_NAPI if (rxd) csr5 &= ~RxPollInt; } while ((csr5 & (TxNoBuf | TxDied | TxIntr | TimerInt | /* Abnormal intr. */ RxDied | TxFIFOUnderflow | TxJabber | TPLnkFail | SystemError )) != 0); #else } while ((csr5 & (NormalIntr|AbnormalIntr)) != 0); tulip_refill_rx(dev); /* check if the card is in suspend mode */ entry = tp->dirty_rx % RX_RING_SIZE; if (tp->rx_buffers[entry].skb == NULL) { if (tulip_debug > 1) dev_warn(&dev->dev, "in rx suspend mode: (%lu) (tp->cur_rx = %u, ttimer = %d, rx = %d) go/stay in suspend mode\n", tp->nir, tp->cur_rx, tp->ttimer, rx); if (tp->chip_id == LC82C168) { iowrite32(0x00, ioaddr + CSR7); mod_timer(&tp->timer, RUN_AT(HZ/50)); } else { if (tp->ttimer == 0 || (ioread32(ioaddr + CSR11) & 0xffff) == 0) { if (tulip_debug > 1) dev_warn(&dev->dev, "in rx suspend mode: (%lu) set timer\n", tp->nir); iowrite32(tulip_tbl[tp->chip_id].valid_intrs | TimerInt, ioaddr + CSR7); iowrite32(TimerInt, ioaddr + CSR5); iowrite32(12, ioaddr + CSR11); tp->ttimer = 1; } } } #endif /* CONFIG_TULIP_NAPI */ if ((missed = ioread32(ioaddr + CSR8) & 0x1ffff)) { dev->stats.rx_dropped += missed & 0x10000 ? 0x10000 : missed; } if (tulip_debug > 4) netdev_dbg(dev, "exiting interrupt, csr5=%#04x\n", ioread32(ioaddr + CSR5)); return IRQ_HANDLED; }
gpl-2.0
buggerman/android_kernel_htc_m8_ace
arch/arm/mach-omap2/powerdomain44xx.c
4845
5775
/* * OMAP4 powerdomain control * * Copyright (C) 2009-2010 Texas Instruments, Inc. * Copyright (C) 2007-2009 Nokia Corporation * * Derived from mach-omap2/powerdomain.c written by Paul Walmsley * Rajendra Nayak <rnayak@ti.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/io.h> #include <linux/errno.h> #include <linux/delay.h> #include <linux/bug.h> #include "powerdomain.h" #include <plat/prcm.h> #include "prm2xxx_3xxx.h" #include "prm44xx.h" #include "prminst44xx.h" #include "prm-regbits-44xx.h" static int omap4_pwrdm_set_next_pwrst(struct powerdomain *pwrdm, u8 pwrst) { omap4_prminst_rmw_inst_reg_bits(OMAP_POWERSTATE_MASK, (pwrst << OMAP_POWERSTATE_SHIFT), pwrdm->prcm_partition, pwrdm->prcm_offs, OMAP4_PM_PWSTCTRL); return 0; } static int omap4_pwrdm_read_next_pwrst(struct powerdomain *pwrdm) { u32 v; v = omap4_prminst_read_inst_reg(pwrdm->prcm_partition, pwrdm->prcm_offs, OMAP4_PM_PWSTCTRL); v &= OMAP_POWERSTATE_MASK; v >>= OMAP_POWERSTATE_SHIFT; return v; } static int omap4_pwrdm_read_pwrst(struct powerdomain *pwrdm) { u32 v; v = omap4_prminst_read_inst_reg(pwrdm->prcm_partition, pwrdm->prcm_offs, OMAP4_PM_PWSTST); v &= OMAP_POWERSTATEST_MASK; v >>= OMAP_POWERSTATEST_SHIFT; return v; } static int omap4_pwrdm_read_prev_pwrst(struct powerdomain *pwrdm) { u32 v; v = omap4_prminst_read_inst_reg(pwrdm->prcm_partition, pwrdm->prcm_offs, OMAP4_PM_PWSTST); v &= OMAP4430_LASTPOWERSTATEENTERED_MASK; v >>= OMAP4430_LASTPOWERSTATEENTERED_SHIFT; return v; } static int omap4_pwrdm_set_lowpwrstchange(struct powerdomain *pwrdm) { omap4_prminst_rmw_inst_reg_bits(OMAP4430_LOWPOWERSTATECHANGE_MASK, (1 << OMAP4430_LOWPOWERSTATECHANGE_SHIFT), pwrdm->prcm_partition, pwrdm->prcm_offs, OMAP4_PM_PWSTCTRL); return 0; } static int omap4_pwrdm_clear_all_prev_pwrst(struct powerdomain *pwrdm) { omap4_prminst_rmw_inst_reg_bits(OMAP4430_LASTPOWERSTATEENTERED_MASK, OMAP4430_LASTPOWERSTATEENTERED_MASK, pwrdm->prcm_partition, pwrdm->prcm_offs, OMAP4_PM_PWSTST); return 0; } static int omap4_pwrdm_set_logic_retst(struct powerdomain *pwrdm, u8 pwrst) { u32 v; v = pwrst << __ffs(OMAP4430_LOGICRETSTATE_MASK); omap4_prminst_rmw_inst_reg_bits(OMAP4430_LOGICRETSTATE_MASK, v, pwrdm->prcm_partition, pwrdm->prcm_offs, OMAP4_PM_PWSTCTRL); return 0; } static int omap4_pwrdm_set_mem_onst(struct powerdomain *pwrdm, u8 bank, u8 pwrst) { u32 m; m = omap2_pwrdm_get_mem_bank_onstate_mask(bank); omap4_prminst_rmw_inst_reg_bits(m, (pwrst << __ffs(m)), pwrdm->prcm_partition, pwrdm->prcm_offs, OMAP4_PM_PWSTCTRL); return 0; } static int omap4_pwrdm_set_mem_retst(struct powerdomain *pwrdm, u8 bank, u8 pwrst) { u32 m; m = omap2_pwrdm_get_mem_bank_retst_mask(bank); omap4_prminst_rmw_inst_reg_bits(m, (pwrst << __ffs(m)), pwrdm->prcm_partition, pwrdm->prcm_offs, OMAP4_PM_PWSTCTRL); return 0; } static int omap4_pwrdm_read_logic_pwrst(struct powerdomain *pwrdm) { u32 v; v = omap4_prminst_read_inst_reg(pwrdm->prcm_partition, pwrdm->prcm_offs, OMAP4_PM_PWSTST); v &= OMAP4430_LOGICSTATEST_MASK; v >>= OMAP4430_LOGICSTATEST_SHIFT; return v; } static int omap4_pwrdm_read_logic_retst(struct powerdomain *pwrdm) { u32 v; v = omap4_prminst_read_inst_reg(pwrdm->prcm_partition, pwrdm->prcm_offs, OMAP4_PM_PWSTCTRL); v &= OMAP4430_LOGICRETSTATE_MASK; v >>= OMAP4430_LOGICRETSTATE_SHIFT; return v; } static int omap4_pwrdm_read_mem_pwrst(struct powerdomain *pwrdm, u8 bank) { u32 m, v; m = omap2_pwrdm_get_mem_bank_stst_mask(bank); v = omap4_prminst_read_inst_reg(pwrdm->prcm_partition, pwrdm->prcm_offs, OMAP4_PM_PWSTST); v &= m; v >>= __ffs(m); return v; } static int omap4_pwrdm_read_mem_retst(struct powerdomain *pwrdm, u8 bank) { u32 m, v; m = omap2_pwrdm_get_mem_bank_retst_mask(bank); v = omap4_prminst_read_inst_reg(pwrdm->prcm_partition, pwrdm->prcm_offs, OMAP4_PM_PWSTCTRL); v &= m; v >>= __ffs(m); return v; } static int omap4_pwrdm_wait_transition(struct powerdomain *pwrdm) { u32 c = 0; /* * REVISIT: pwrdm_wait_transition() may be better implemented * via a callback and a periodic timer check -- how long do we expect * powerdomain transitions to take? */ /* XXX Is this udelay() value meaningful? */ while ((omap4_prminst_read_inst_reg(pwrdm->prcm_partition, pwrdm->prcm_offs, OMAP4_PM_PWSTST) & OMAP_INTRANSITION_MASK) && (c++ < PWRDM_TRANSITION_BAILOUT)) udelay(1); if (c > PWRDM_TRANSITION_BAILOUT) { printk(KERN_ERR "powerdomain: waited too long for " "powerdomain %s to complete transition\n", pwrdm->name); return -EAGAIN; } pr_debug("powerdomain: completed transition in %d loops\n", c); return 0; } struct pwrdm_ops omap4_pwrdm_operations = { .pwrdm_set_next_pwrst = omap4_pwrdm_set_next_pwrst, .pwrdm_read_next_pwrst = omap4_pwrdm_read_next_pwrst, .pwrdm_read_pwrst = omap4_pwrdm_read_pwrst, .pwrdm_read_prev_pwrst = omap4_pwrdm_read_prev_pwrst, .pwrdm_set_lowpwrstchange = omap4_pwrdm_set_lowpwrstchange, .pwrdm_clear_all_prev_pwrst = omap4_pwrdm_clear_all_prev_pwrst, .pwrdm_set_logic_retst = omap4_pwrdm_set_logic_retst, .pwrdm_read_logic_pwrst = omap4_pwrdm_read_logic_pwrst, .pwrdm_read_logic_retst = omap4_pwrdm_read_logic_retst, .pwrdm_read_mem_pwrst = omap4_pwrdm_read_mem_pwrst, .pwrdm_read_mem_retst = omap4_pwrdm_read_mem_retst, .pwrdm_set_mem_onst = omap4_pwrdm_set_mem_onst, .pwrdm_set_mem_retst = omap4_pwrdm_set_mem_retst, .pwrdm_wait_transition = omap4_pwrdm_wait_transition, };
gpl-2.0
kostoulhs/android_kernel_samsung_expressltexx
drivers/scsi/bfa/bfa_fcbuild.c
5613
38090
/* * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. * All rights reserved * www.brocade.com * * Linux driver for Brocade Fibre Channel Host Bus Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as * published by the Free Software Foundation * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ /* * fcbuild.c - FC link service frame building and parsing routines */ #include "bfad_drv.h" #include "bfa_fcbuild.h" /* * static build functions */ static void fc_els_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id, __be16 ox_id); static void fc_bls_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id, __be16 ox_id); static struct fchs_s fc_els_req_tmpl; static struct fchs_s fc_els_rsp_tmpl; static struct fchs_s fc_bls_req_tmpl; static struct fchs_s fc_bls_rsp_tmpl; static struct fc_ba_acc_s ba_acc_tmpl; static struct fc_logi_s plogi_tmpl; static struct fc_prli_s prli_tmpl; static struct fc_rrq_s rrq_tmpl; static struct fchs_s fcp_fchs_tmpl; void fcbuild_init(void) { /* * fc_els_req_tmpl */ fc_els_req_tmpl.routing = FC_RTG_EXT_LINK; fc_els_req_tmpl.cat_info = FC_CAT_LD_REQUEST; fc_els_req_tmpl.type = FC_TYPE_ELS; fc_els_req_tmpl.f_ctl = bfa_hton3b(FCTL_SEQ_INI | FCTL_FS_EXCH | FCTL_END_SEQ | FCTL_SI_XFER); fc_els_req_tmpl.rx_id = FC_RXID_ANY; /* * fc_els_rsp_tmpl */ fc_els_rsp_tmpl.routing = FC_RTG_EXT_LINK; fc_els_rsp_tmpl.cat_info = FC_CAT_LD_REPLY; fc_els_rsp_tmpl.type = FC_TYPE_ELS; fc_els_rsp_tmpl.f_ctl = bfa_hton3b(FCTL_EC_RESP | FCTL_SEQ_INI | FCTL_LS_EXCH | FCTL_END_SEQ | FCTL_SI_XFER); fc_els_rsp_tmpl.rx_id = FC_RXID_ANY; /* * fc_bls_req_tmpl */ fc_bls_req_tmpl.routing = FC_RTG_BASIC_LINK; fc_bls_req_tmpl.type = FC_TYPE_BLS; fc_bls_req_tmpl.f_ctl = bfa_hton3b(FCTL_END_SEQ | FCTL_SI_XFER); fc_bls_req_tmpl.rx_id = FC_RXID_ANY; /* * fc_bls_rsp_tmpl */ fc_bls_rsp_tmpl.routing = FC_RTG_BASIC_LINK; fc_bls_rsp_tmpl.cat_info = FC_CAT_BA_ACC; fc_bls_rsp_tmpl.type = FC_TYPE_BLS; fc_bls_rsp_tmpl.f_ctl = bfa_hton3b(FCTL_EC_RESP | FCTL_SEQ_INI | FCTL_LS_EXCH | FCTL_END_SEQ | FCTL_SI_XFER); fc_bls_rsp_tmpl.rx_id = FC_RXID_ANY; /* * ba_acc_tmpl */ ba_acc_tmpl.seq_id_valid = 0; ba_acc_tmpl.low_seq_cnt = 0; ba_acc_tmpl.high_seq_cnt = 0xFFFF; /* * plogi_tmpl */ plogi_tmpl.csp.verhi = FC_PH_VER_PH_3; plogi_tmpl.csp.verlo = FC_PH_VER_4_3; plogi_tmpl.csp.ciro = 0x1; plogi_tmpl.csp.cisc = 0x0; plogi_tmpl.csp.altbbcred = 0x0; plogi_tmpl.csp.conseq = cpu_to_be16(0x00FF); plogi_tmpl.csp.ro_bitmap = cpu_to_be16(0x0002); plogi_tmpl.csp.e_d_tov = cpu_to_be32(2000); plogi_tmpl.class3.class_valid = 1; plogi_tmpl.class3.sequential = 1; plogi_tmpl.class3.conseq = 0xFF; plogi_tmpl.class3.ospx = 1; /* * prli_tmpl */ prli_tmpl.command = FC_ELS_PRLI; prli_tmpl.pglen = 0x10; prli_tmpl.pagebytes = cpu_to_be16(0x0014); prli_tmpl.parampage.type = FC_TYPE_FCP; prli_tmpl.parampage.imagepair = 1; prli_tmpl.parampage.servparams.rxrdisab = 1; /* * rrq_tmpl */ rrq_tmpl.els_cmd.els_code = FC_ELS_RRQ; /* * fcp_struct fchs_s mpl */ fcp_fchs_tmpl.routing = FC_RTG_FC4_DEV_DATA; fcp_fchs_tmpl.cat_info = FC_CAT_UNSOLICIT_CMD; fcp_fchs_tmpl.type = FC_TYPE_FCP; fcp_fchs_tmpl.f_ctl = bfa_hton3b(FCTL_FS_EXCH | FCTL_END_SEQ | FCTL_SI_XFER); fcp_fchs_tmpl.seq_id = 1; fcp_fchs_tmpl.rx_id = FC_RXID_ANY; } static void fc_gs_fchdr_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u32 ox_id) { memset(fchs, 0, sizeof(struct fchs_s)); fchs->routing = FC_RTG_FC4_DEV_DATA; fchs->cat_info = FC_CAT_UNSOLICIT_CTRL; fchs->type = FC_TYPE_SERVICES; fchs->f_ctl = bfa_hton3b(FCTL_SEQ_INI | FCTL_FS_EXCH | FCTL_END_SEQ | FCTL_SI_XFER); fchs->rx_id = FC_RXID_ANY; fchs->d_id = (d_id); fchs->s_id = (s_id); fchs->ox_id = cpu_to_be16(ox_id); /* * @todo no need to set ox_id for request * no need to set rx_id for response */ } static void fc_gsresp_fchdr_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id) { memset(fchs, 0, sizeof(struct fchs_s)); fchs->routing = FC_RTG_FC4_DEV_DATA; fchs->cat_info = FC_CAT_SOLICIT_CTRL; fchs->type = FC_TYPE_SERVICES; fchs->f_ctl = bfa_hton3b(FCTL_EC_RESP | FCTL_SEQ_INI | FCTL_LS_EXCH | FCTL_END_SEQ | FCTL_SI_XFER); fchs->d_id = d_id; fchs->s_id = s_id; fchs->ox_id = ox_id; } void fc_els_req_build(struct fchs_s *fchs, u32 d_id, u32 s_id, __be16 ox_id) { memcpy(fchs, &fc_els_req_tmpl, sizeof(struct fchs_s)); fchs->d_id = (d_id); fchs->s_id = (s_id); fchs->ox_id = cpu_to_be16(ox_id); } static void fc_els_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id, __be16 ox_id) { memcpy(fchs, &fc_els_rsp_tmpl, sizeof(struct fchs_s)); fchs->d_id = d_id; fchs->s_id = s_id; fchs->ox_id = ox_id; } enum fc_parse_status fc_els_rsp_parse(struct fchs_s *fchs, int len) { struct fc_els_cmd_s *els_cmd = (struct fc_els_cmd_s *) (fchs + 1); struct fc_ls_rjt_s *ls_rjt = (struct fc_ls_rjt_s *) els_cmd; len = len; switch (els_cmd->els_code) { case FC_ELS_LS_RJT: if (ls_rjt->reason_code == FC_LS_RJT_RSN_LOGICAL_BUSY) return FC_PARSE_BUSY; else return FC_PARSE_FAILURE; case FC_ELS_ACC: return FC_PARSE_OK; } return FC_PARSE_OK; } static void fc_bls_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id, __be16 ox_id) { memcpy(fchs, &fc_bls_rsp_tmpl, sizeof(struct fchs_s)); fchs->d_id = d_id; fchs->s_id = s_id; fchs->ox_id = ox_id; } static u16 fc_plogi_x_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id, __be16 ox_id, wwn_t port_name, wwn_t node_name, u16 pdu_size, u16 bb_cr, u8 els_code) { struct fc_logi_s *plogi = (struct fc_logi_s *) (pld); memcpy(plogi, &plogi_tmpl, sizeof(struct fc_logi_s)); plogi->els_cmd.els_code = els_code; if (els_code == FC_ELS_PLOGI) fc_els_req_build(fchs, d_id, s_id, ox_id); else fc_els_rsp_build(fchs, d_id, s_id, ox_id); plogi->csp.rxsz = plogi->class3.rxsz = cpu_to_be16(pdu_size); plogi->csp.bbcred = cpu_to_be16(bb_cr); memcpy(&plogi->port_name, &port_name, sizeof(wwn_t)); memcpy(&plogi->node_name, &node_name, sizeof(wwn_t)); return sizeof(struct fc_logi_s); } u16 fc_flogi_build(struct fchs_s *fchs, struct fc_logi_s *flogi, u32 s_id, u16 ox_id, wwn_t port_name, wwn_t node_name, u16 pdu_size, u8 set_npiv, u8 set_auth, u16 local_bb_credits) { u32 d_id = bfa_hton3b(FC_FABRIC_PORT); __be32 *vvl_info; memcpy(flogi, &plogi_tmpl, sizeof(struct fc_logi_s)); flogi->els_cmd.els_code = FC_ELS_FLOGI; fc_els_req_build(fchs, d_id, s_id, ox_id); flogi->csp.rxsz = flogi->class3.rxsz = cpu_to_be16(pdu_size); flogi->port_name = port_name; flogi->node_name = node_name; /* * Set the NPIV Capability Bit ( word 1, bit 31) of Common * Service Parameters. */ flogi->csp.ciro = set_npiv; /* set AUTH capability */ flogi->csp.security = set_auth; flogi->csp.bbcred = cpu_to_be16(local_bb_credits); /* Set brcd token in VVL */ vvl_info = (u32 *)&flogi->vvl[0]; /* set the flag to indicate the presence of VVL */ flogi->csp.npiv_supp = 1; /* @todo. field name is not correct */ vvl_info[0] = cpu_to_be32(FLOGI_VVL_BRCD); return sizeof(struct fc_logi_s); } u16 fc_flogi_acc_build(struct fchs_s *fchs, struct fc_logi_s *flogi, u32 s_id, __be16 ox_id, wwn_t port_name, wwn_t node_name, u16 pdu_size, u16 local_bb_credits, u8 bb_scn) { u32 d_id = 0; u16 bbscn_rxsz = (bb_scn << 12) | pdu_size; memcpy(flogi, &plogi_tmpl, sizeof(struct fc_logi_s)); fc_els_rsp_build(fchs, d_id, s_id, ox_id); flogi->els_cmd.els_code = FC_ELS_ACC; flogi->class3.rxsz = cpu_to_be16(pdu_size); flogi->csp.rxsz = cpu_to_be16(bbscn_rxsz); /* bb_scn/rxsz */ flogi->port_name = port_name; flogi->node_name = node_name; flogi->csp.bbcred = cpu_to_be16(local_bb_credits); return sizeof(struct fc_logi_s); } u16 fc_fdisc_build(struct fchs_s *fchs, struct fc_logi_s *flogi, u32 s_id, u16 ox_id, wwn_t port_name, wwn_t node_name, u16 pdu_size) { u32 d_id = bfa_hton3b(FC_FABRIC_PORT); memcpy(flogi, &plogi_tmpl, sizeof(struct fc_logi_s)); flogi->els_cmd.els_code = FC_ELS_FDISC; fc_els_req_build(fchs, d_id, s_id, ox_id); flogi->csp.rxsz = flogi->class3.rxsz = cpu_to_be16(pdu_size); flogi->port_name = port_name; flogi->node_name = node_name; return sizeof(struct fc_logi_s); } u16 fc_plogi_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id, u16 ox_id, wwn_t port_name, wwn_t node_name, u16 pdu_size, u16 bb_cr) { return fc_plogi_x_build(fchs, pld, d_id, s_id, ox_id, port_name, node_name, pdu_size, bb_cr, FC_ELS_PLOGI); } u16 fc_plogi_acc_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id, u16 ox_id, wwn_t port_name, wwn_t node_name, u16 pdu_size, u16 bb_cr) { return fc_plogi_x_build(fchs, pld, d_id, s_id, ox_id, port_name, node_name, pdu_size, bb_cr, FC_ELS_ACC); } enum fc_parse_status fc_plogi_rsp_parse(struct fchs_s *fchs, int len, wwn_t port_name) { struct fc_els_cmd_s *els_cmd = (struct fc_els_cmd_s *) (fchs + 1); struct fc_logi_s *plogi; struct fc_ls_rjt_s *ls_rjt; switch (els_cmd->els_code) { case FC_ELS_LS_RJT: ls_rjt = (struct fc_ls_rjt_s *) (fchs + 1); if (ls_rjt->reason_code == FC_LS_RJT_RSN_LOGICAL_BUSY) return FC_PARSE_BUSY; else return FC_PARSE_FAILURE; case FC_ELS_ACC: plogi = (struct fc_logi_s *) (fchs + 1); if (len < sizeof(struct fc_logi_s)) return FC_PARSE_FAILURE; if (!wwn_is_equal(plogi->port_name, port_name)) return FC_PARSE_FAILURE; if (!plogi->class3.class_valid) return FC_PARSE_FAILURE; if (be16_to_cpu(plogi->class3.rxsz) < (FC_MIN_PDUSZ)) return FC_PARSE_FAILURE; return FC_PARSE_OK; default: return FC_PARSE_FAILURE; } } enum fc_parse_status fc_plogi_parse(struct fchs_s *fchs) { struct fc_logi_s *plogi = (struct fc_logi_s *) (fchs + 1); if (plogi->class3.class_valid != 1) return FC_PARSE_FAILURE; if ((be16_to_cpu(plogi->class3.rxsz) < FC_MIN_PDUSZ) || (be16_to_cpu(plogi->class3.rxsz) > FC_MAX_PDUSZ) || (plogi->class3.rxsz == 0)) return FC_PARSE_FAILURE; return FC_PARSE_OK; } u16 fc_prli_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id, u16 ox_id) { struct fc_prli_s *prli = (struct fc_prli_s *) (pld); fc_els_req_build(fchs, d_id, s_id, ox_id); memcpy(prli, &prli_tmpl, sizeof(struct fc_prli_s)); prli->command = FC_ELS_PRLI; prli->parampage.servparams.initiator = 1; prli->parampage.servparams.retry = 1; prli->parampage.servparams.rec_support = 1; prli->parampage.servparams.task_retry_id = 0; prli->parampage.servparams.confirm = 1; return sizeof(struct fc_prli_s); } u16 fc_prli_acc_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id, __be16 ox_id, enum bfa_lport_role role) { struct fc_prli_s *prli = (struct fc_prli_s *) (pld); fc_els_rsp_build(fchs, d_id, s_id, ox_id); memcpy(prli, &prli_tmpl, sizeof(struct fc_prli_s)); prli->command = FC_ELS_ACC; prli->parampage.servparams.initiator = 1; prli->parampage.rspcode = FC_PRLI_ACC_XQTD; return sizeof(struct fc_prli_s); } enum fc_parse_status fc_prli_rsp_parse(struct fc_prli_s *prli, int len) { if (len < sizeof(struct fc_prli_s)) return FC_PARSE_FAILURE; if (prli->command != FC_ELS_ACC) return FC_PARSE_FAILURE; if ((prli->parampage.rspcode != FC_PRLI_ACC_XQTD) && (prli->parampage.rspcode != FC_PRLI_ACC_PREDEF_IMG)) return FC_PARSE_FAILURE; if (prli->parampage.servparams.target != 1) return FC_PARSE_FAILURE; return FC_PARSE_OK; } enum fc_parse_status fc_prli_parse(struct fc_prli_s *prli) { if (prli->parampage.type != FC_TYPE_FCP) return FC_PARSE_FAILURE; if (!prli->parampage.imagepair) return FC_PARSE_FAILURE; if (!prli->parampage.servparams.initiator) return FC_PARSE_FAILURE; return FC_PARSE_OK; } u16 fc_logo_build(struct fchs_s *fchs, struct fc_logo_s *logo, u32 d_id, u32 s_id, u16 ox_id, wwn_t port_name) { fc_els_req_build(fchs, d_id, s_id, ox_id); memset(logo, '\0', sizeof(struct fc_logo_s)); logo->els_cmd.els_code = FC_ELS_LOGO; logo->nport_id = (s_id); logo->orig_port_name = port_name; return sizeof(struct fc_logo_s); } static u16 fc_adisc_x_build(struct fchs_s *fchs, struct fc_adisc_s *adisc, u32 d_id, u32 s_id, __be16 ox_id, wwn_t port_name, wwn_t node_name, u8 els_code) { memset(adisc, '\0', sizeof(struct fc_adisc_s)); adisc->els_cmd.els_code = els_code; if (els_code == FC_ELS_ADISC) fc_els_req_build(fchs, d_id, s_id, ox_id); else fc_els_rsp_build(fchs, d_id, s_id, ox_id); adisc->orig_HA = 0; adisc->orig_port_name = port_name; adisc->orig_node_name = node_name; adisc->nport_id = (s_id); return sizeof(struct fc_adisc_s); } u16 fc_adisc_build(struct fchs_s *fchs, struct fc_adisc_s *adisc, u32 d_id, u32 s_id, __be16 ox_id, wwn_t port_name, wwn_t node_name) { return fc_adisc_x_build(fchs, adisc, d_id, s_id, ox_id, port_name, node_name, FC_ELS_ADISC); } u16 fc_adisc_acc_build(struct fchs_s *fchs, struct fc_adisc_s *adisc, u32 d_id, u32 s_id, __be16 ox_id, wwn_t port_name, wwn_t node_name) { return fc_adisc_x_build(fchs, adisc, d_id, s_id, ox_id, port_name, node_name, FC_ELS_ACC); } enum fc_parse_status fc_adisc_rsp_parse(struct fc_adisc_s *adisc, int len, wwn_t port_name, wwn_t node_name) { if (len < sizeof(struct fc_adisc_s)) return FC_PARSE_FAILURE; if (adisc->els_cmd.els_code != FC_ELS_ACC) return FC_PARSE_FAILURE; if (!wwn_is_equal(adisc->orig_port_name, port_name)) return FC_PARSE_FAILURE; return FC_PARSE_OK; } enum fc_parse_status fc_adisc_parse(struct fchs_s *fchs, void *pld, u32 host_dap, wwn_t node_name, wwn_t port_name) { struct fc_adisc_s *adisc = (struct fc_adisc_s *) pld; if (adisc->els_cmd.els_code != FC_ELS_ACC) return FC_PARSE_FAILURE; if ((adisc->nport_id == (host_dap)) && wwn_is_equal(adisc->orig_port_name, port_name) && wwn_is_equal(adisc->orig_node_name, node_name)) return FC_PARSE_OK; return FC_PARSE_FAILURE; } enum fc_parse_status fc_pdisc_parse(struct fchs_s *fchs, wwn_t node_name, wwn_t port_name) { struct fc_logi_s *pdisc = (struct fc_logi_s *) (fchs + 1); if (pdisc->class3.class_valid != 1) return FC_PARSE_FAILURE; if ((be16_to_cpu(pdisc->class3.rxsz) < (FC_MIN_PDUSZ - sizeof(struct fchs_s))) || (pdisc->class3.rxsz == 0)) return FC_PARSE_FAILURE; if (!wwn_is_equal(pdisc->port_name, port_name)) return FC_PARSE_FAILURE; if (!wwn_is_equal(pdisc->node_name, node_name)) return FC_PARSE_FAILURE; return FC_PARSE_OK; } u16 fc_abts_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id) { memcpy(fchs, &fc_bls_req_tmpl, sizeof(struct fchs_s)); fchs->cat_info = FC_CAT_ABTS; fchs->d_id = (d_id); fchs->s_id = (s_id); fchs->ox_id = cpu_to_be16(ox_id); return sizeof(struct fchs_s); } enum fc_parse_status fc_abts_rsp_parse(struct fchs_s *fchs, int len) { if ((fchs->cat_info == FC_CAT_BA_ACC) || (fchs->cat_info == FC_CAT_BA_RJT)) return FC_PARSE_OK; return FC_PARSE_FAILURE; } u16 fc_rrq_build(struct fchs_s *fchs, struct fc_rrq_s *rrq, u32 d_id, u32 s_id, u16 ox_id, u16 rrq_oxid) { fc_els_req_build(fchs, d_id, s_id, ox_id); /* * build rrq payload */ memcpy(rrq, &rrq_tmpl, sizeof(struct fc_rrq_s)); rrq->s_id = (s_id); rrq->ox_id = cpu_to_be16(rrq_oxid); rrq->rx_id = FC_RXID_ANY; return sizeof(struct fc_rrq_s); } u16 fc_logo_acc_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id, __be16 ox_id) { struct fc_els_cmd_s *acc = pld; fc_els_rsp_build(fchs, d_id, s_id, ox_id); memset(acc, 0, sizeof(struct fc_els_cmd_s)); acc->els_code = FC_ELS_ACC; return sizeof(struct fc_els_cmd_s); } u16 fc_ls_rjt_build(struct fchs_s *fchs, struct fc_ls_rjt_s *ls_rjt, u32 d_id, u32 s_id, __be16 ox_id, u8 reason_code, u8 reason_code_expl) { fc_els_rsp_build(fchs, d_id, s_id, ox_id); memset(ls_rjt, 0, sizeof(struct fc_ls_rjt_s)); ls_rjt->els_cmd.els_code = FC_ELS_LS_RJT; ls_rjt->reason_code = reason_code; ls_rjt->reason_code_expl = reason_code_expl; ls_rjt->vendor_unique = 0x00; return sizeof(struct fc_ls_rjt_s); } u16 fc_ba_acc_build(struct fchs_s *fchs, struct fc_ba_acc_s *ba_acc, u32 d_id, u32 s_id, __be16 ox_id, u16 rx_id) { fc_bls_rsp_build(fchs, d_id, s_id, ox_id); memcpy(ba_acc, &ba_acc_tmpl, sizeof(struct fc_ba_acc_s)); fchs->rx_id = rx_id; ba_acc->ox_id = fchs->ox_id; ba_acc->rx_id = fchs->rx_id; return sizeof(struct fc_ba_acc_s); } u16 fc_ls_acc_build(struct fchs_s *fchs, struct fc_els_cmd_s *els_cmd, u32 d_id, u32 s_id, __be16 ox_id) { fc_els_rsp_build(fchs, d_id, s_id, ox_id); memset(els_cmd, 0, sizeof(struct fc_els_cmd_s)); els_cmd->els_code = FC_ELS_ACC; return sizeof(struct fc_els_cmd_s); } int fc_logout_params_pages(struct fchs_s *fc_frame, u8 els_code) { int num_pages = 0; struct fc_prlo_s *prlo; struct fc_tprlo_s *tprlo; if (els_code == FC_ELS_PRLO) { prlo = (struct fc_prlo_s *) (fc_frame + 1); num_pages = (be16_to_cpu(prlo->payload_len) - 4) / 16; } else { tprlo = (struct fc_tprlo_s *) (fc_frame + 1); num_pages = (be16_to_cpu(tprlo->payload_len) - 4) / 16; } return num_pages; } u16 fc_tprlo_acc_build(struct fchs_s *fchs, struct fc_tprlo_acc_s *tprlo_acc, u32 d_id, u32 s_id, __be16 ox_id, int num_pages) { int page; fc_els_rsp_build(fchs, d_id, s_id, ox_id); memset(tprlo_acc, 0, (num_pages * 16) + 4); tprlo_acc->command = FC_ELS_ACC; tprlo_acc->page_len = 0x10; tprlo_acc->payload_len = cpu_to_be16((num_pages * 16) + 4); for (page = 0; page < num_pages; page++) { tprlo_acc->tprlo_acc_params[page].opa_valid = 0; tprlo_acc->tprlo_acc_params[page].rpa_valid = 0; tprlo_acc->tprlo_acc_params[page].fc4type_csp = FC_TYPE_FCP; tprlo_acc->tprlo_acc_params[page].orig_process_assc = 0; tprlo_acc->tprlo_acc_params[page].resp_process_assc = 0; } return be16_to_cpu(tprlo_acc->payload_len); } u16 fc_prlo_acc_build(struct fchs_s *fchs, struct fc_prlo_acc_s *prlo_acc, u32 d_id, u32 s_id, __be16 ox_id, int num_pages) { int page; fc_els_rsp_build(fchs, d_id, s_id, ox_id); memset(prlo_acc, 0, (num_pages * 16) + 4); prlo_acc->command = FC_ELS_ACC; prlo_acc->page_len = 0x10; prlo_acc->payload_len = cpu_to_be16((num_pages * 16) + 4); for (page = 0; page < num_pages; page++) { prlo_acc->prlo_acc_params[page].opa_valid = 0; prlo_acc->prlo_acc_params[page].rpa_valid = 0; prlo_acc->prlo_acc_params[page].fc4type_csp = FC_TYPE_FCP; prlo_acc->prlo_acc_params[page].orig_process_assc = 0; prlo_acc->prlo_acc_params[page].resp_process_assc = 0; } return be16_to_cpu(prlo_acc->payload_len); } u16 fc_rnid_build(struct fchs_s *fchs, struct fc_rnid_cmd_s *rnid, u32 d_id, u32 s_id, u16 ox_id, u32 data_format) { fc_els_req_build(fchs, d_id, s_id, ox_id); memset(rnid, 0, sizeof(struct fc_rnid_cmd_s)); rnid->els_cmd.els_code = FC_ELS_RNID; rnid->node_id_data_format = data_format; return sizeof(struct fc_rnid_cmd_s); } u16 fc_rnid_acc_build(struct fchs_s *fchs, struct fc_rnid_acc_s *rnid_acc, u32 d_id, u32 s_id, __be16 ox_id, u32 data_format, struct fc_rnid_common_id_data_s *common_id_data, struct fc_rnid_general_topology_data_s *gen_topo_data) { memset(rnid_acc, 0, sizeof(struct fc_rnid_acc_s)); fc_els_rsp_build(fchs, d_id, s_id, ox_id); rnid_acc->els_cmd.els_code = FC_ELS_ACC; rnid_acc->node_id_data_format = data_format; rnid_acc->common_id_data_length = sizeof(struct fc_rnid_common_id_data_s); rnid_acc->common_id_data = *common_id_data; if (data_format == RNID_NODEID_DATA_FORMAT_DISCOVERY) { rnid_acc->specific_id_data_length = sizeof(struct fc_rnid_general_topology_data_s); rnid_acc->gen_topology_data = *gen_topo_data; return sizeof(struct fc_rnid_acc_s); } else { return sizeof(struct fc_rnid_acc_s) - sizeof(struct fc_rnid_general_topology_data_s); } } u16 fc_rpsc_build(struct fchs_s *fchs, struct fc_rpsc_cmd_s *rpsc, u32 d_id, u32 s_id, u16 ox_id) { fc_els_req_build(fchs, d_id, s_id, ox_id); memset(rpsc, 0, sizeof(struct fc_rpsc_cmd_s)); rpsc->els_cmd.els_code = FC_ELS_RPSC; return sizeof(struct fc_rpsc_cmd_s); } u16 fc_rpsc2_build(struct fchs_s *fchs, struct fc_rpsc2_cmd_s *rpsc2, u32 d_id, u32 s_id, u32 *pid_list, u16 npids) { u32 dctlr_id = FC_DOMAIN_CTRLR(bfa_hton3b(d_id)); int i = 0; fc_els_req_build(fchs, bfa_hton3b(dctlr_id), s_id, 0); memset(rpsc2, 0, sizeof(struct fc_rpsc2_cmd_s)); rpsc2->els_cmd.els_code = FC_ELS_RPSC; rpsc2->token = cpu_to_be32(FC_BRCD_TOKEN); rpsc2->num_pids = cpu_to_be16(npids); for (i = 0; i < npids; i++) rpsc2->pid_list[i].pid = pid_list[i]; return sizeof(struct fc_rpsc2_cmd_s) + ((npids - 1) * (sizeof(u32))); } u16 fc_rpsc_acc_build(struct fchs_s *fchs, struct fc_rpsc_acc_s *rpsc_acc, u32 d_id, u32 s_id, __be16 ox_id, struct fc_rpsc_speed_info_s *oper_speed) { memset(rpsc_acc, 0, sizeof(struct fc_rpsc_acc_s)); fc_els_rsp_build(fchs, d_id, s_id, ox_id); rpsc_acc->command = FC_ELS_ACC; rpsc_acc->num_entries = cpu_to_be16(1); rpsc_acc->speed_info[0].port_speed_cap = cpu_to_be16(oper_speed->port_speed_cap); rpsc_acc->speed_info[0].port_op_speed = cpu_to_be16(oper_speed->port_op_speed); return sizeof(struct fc_rpsc_acc_s); } u16 fc_logo_rsp_parse(struct fchs_s *fchs, int len) { struct fc_els_cmd_s *els_cmd = (struct fc_els_cmd_s *) (fchs + 1); len = len; if (els_cmd->els_code != FC_ELS_ACC) return FC_PARSE_FAILURE; return FC_PARSE_OK; } u16 fc_pdisc_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id, wwn_t port_name, wwn_t node_name, u16 pdu_size) { struct fc_logi_s *pdisc = (struct fc_logi_s *) (fchs + 1); memcpy(pdisc, &plogi_tmpl, sizeof(struct fc_logi_s)); pdisc->els_cmd.els_code = FC_ELS_PDISC; fc_els_req_build(fchs, d_id, s_id, ox_id); pdisc->csp.rxsz = pdisc->class3.rxsz = cpu_to_be16(pdu_size); pdisc->port_name = port_name; pdisc->node_name = node_name; return sizeof(struct fc_logi_s); } u16 fc_pdisc_rsp_parse(struct fchs_s *fchs, int len, wwn_t port_name) { struct fc_logi_s *pdisc = (struct fc_logi_s *) (fchs + 1); if (len < sizeof(struct fc_logi_s)) return FC_PARSE_LEN_INVAL; if (pdisc->els_cmd.els_code != FC_ELS_ACC) return FC_PARSE_ACC_INVAL; if (!wwn_is_equal(pdisc->port_name, port_name)) return FC_PARSE_PWWN_NOT_EQUAL; if (!pdisc->class3.class_valid) return FC_PARSE_NWWN_NOT_EQUAL; if (be16_to_cpu(pdisc->class3.rxsz) < (FC_MIN_PDUSZ)) return FC_PARSE_RXSZ_INVAL; return FC_PARSE_OK; } u16 fc_prlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id, int num_pages) { struct fc_prlo_s *prlo = (struct fc_prlo_s *) (fchs + 1); int page; fc_els_req_build(fchs, d_id, s_id, ox_id); memset(prlo, 0, (num_pages * 16) + 4); prlo->command = FC_ELS_PRLO; prlo->page_len = 0x10; prlo->payload_len = cpu_to_be16((num_pages * 16) + 4); for (page = 0; page < num_pages; page++) { prlo->prlo_params[page].type = FC_TYPE_FCP; prlo->prlo_params[page].opa_valid = 0; prlo->prlo_params[page].rpa_valid = 0; prlo->prlo_params[page].orig_process_assc = 0; prlo->prlo_params[page].resp_process_assc = 0; } return be16_to_cpu(prlo->payload_len); } u16 fc_prlo_rsp_parse(struct fchs_s *fchs, int len) { struct fc_prlo_acc_s *prlo = (struct fc_prlo_acc_s *) (fchs + 1); int num_pages = 0; int page = 0; len = len; if (prlo->command != FC_ELS_ACC) return FC_PARSE_FAILURE; num_pages = ((be16_to_cpu(prlo->payload_len)) - 4) / 16; for (page = 0; page < num_pages; page++) { if (prlo->prlo_acc_params[page].type != FC_TYPE_FCP) return FC_PARSE_FAILURE; if (prlo->prlo_acc_params[page].opa_valid != 0) return FC_PARSE_FAILURE; if (prlo->prlo_acc_params[page].rpa_valid != 0) return FC_PARSE_FAILURE; if (prlo->prlo_acc_params[page].orig_process_assc != 0) return FC_PARSE_FAILURE; if (prlo->prlo_acc_params[page].resp_process_assc != 0) return FC_PARSE_FAILURE; } return FC_PARSE_OK; } u16 fc_tprlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id, int num_pages, enum fc_tprlo_type tprlo_type, u32 tpr_id) { struct fc_tprlo_s *tprlo = (struct fc_tprlo_s *) (fchs + 1); int page; fc_els_req_build(fchs, d_id, s_id, ox_id); memset(tprlo, 0, (num_pages * 16) + 4); tprlo->command = FC_ELS_TPRLO; tprlo->page_len = 0x10; tprlo->payload_len = cpu_to_be16((num_pages * 16) + 4); for (page = 0; page < num_pages; page++) { tprlo->tprlo_params[page].type = FC_TYPE_FCP; tprlo->tprlo_params[page].opa_valid = 0; tprlo->tprlo_params[page].rpa_valid = 0; tprlo->tprlo_params[page].orig_process_assc = 0; tprlo->tprlo_params[page].resp_process_assc = 0; if (tprlo_type == FC_GLOBAL_LOGO) { tprlo->tprlo_params[page].global_process_logout = 1; } else if (tprlo_type == FC_TPR_LOGO) { tprlo->tprlo_params[page].tpo_nport_valid = 1; tprlo->tprlo_params[page].tpo_nport_id = (tpr_id); } } return be16_to_cpu(tprlo->payload_len); } u16 fc_tprlo_rsp_parse(struct fchs_s *fchs, int len) { struct fc_tprlo_acc_s *tprlo = (struct fc_tprlo_acc_s *) (fchs + 1); int num_pages = 0; int page = 0; len = len; if (tprlo->command != FC_ELS_ACC) return FC_PARSE_ACC_INVAL; num_pages = (be16_to_cpu(tprlo->payload_len) - 4) / 16; for (page = 0; page < num_pages; page++) { if (tprlo->tprlo_acc_params[page].type != FC_TYPE_FCP) return FC_PARSE_NOT_FCP; if (tprlo->tprlo_acc_params[page].opa_valid != 0) return FC_PARSE_OPAFLAG_INVAL; if (tprlo->tprlo_acc_params[page].rpa_valid != 0) return FC_PARSE_RPAFLAG_INVAL; if (tprlo->tprlo_acc_params[page].orig_process_assc != 0) return FC_PARSE_OPA_INVAL; if (tprlo->tprlo_acc_params[page].resp_process_assc != 0) return FC_PARSE_RPA_INVAL; } return FC_PARSE_OK; } enum fc_parse_status fc_rrq_rsp_parse(struct fchs_s *fchs, int len) { struct fc_els_cmd_s *els_cmd = (struct fc_els_cmd_s *) (fchs + 1); len = len; if (els_cmd->els_code != FC_ELS_ACC) return FC_PARSE_FAILURE; return FC_PARSE_OK; } u16 fc_ba_rjt_build(struct fchs_s *fchs, u32 d_id, u32 s_id, __be16 ox_id, u32 reason_code, u32 reason_expl) { struct fc_ba_rjt_s *ba_rjt = (struct fc_ba_rjt_s *) (fchs + 1); fc_bls_rsp_build(fchs, d_id, s_id, ox_id); fchs->cat_info = FC_CAT_BA_RJT; ba_rjt->reason_code = reason_code; ba_rjt->reason_expl = reason_expl; return sizeof(struct fc_ba_rjt_s); } static void fc_gs_cthdr_build(struct ct_hdr_s *cthdr, u32 s_id, u16 cmd_code) { memset(cthdr, 0, sizeof(struct ct_hdr_s)); cthdr->rev_id = CT_GS3_REVISION; cthdr->gs_type = CT_GSTYPE_DIRSERVICE; cthdr->gs_sub_type = CT_GSSUBTYPE_NAMESERVER; cthdr->cmd_rsp_code = cpu_to_be16(cmd_code); } static void fc_gs_fdmi_cthdr_build(struct ct_hdr_s *cthdr, u32 s_id, u16 cmd_code) { memset(cthdr, 0, sizeof(struct ct_hdr_s)); cthdr->rev_id = CT_GS3_REVISION; cthdr->gs_type = CT_GSTYPE_MGMTSERVICE; cthdr->gs_sub_type = CT_GSSUBTYPE_HBA_MGMTSERVER; cthdr->cmd_rsp_code = cpu_to_be16(cmd_code); } static void fc_gs_ms_cthdr_build(struct ct_hdr_s *cthdr, u32 s_id, u16 cmd_code, u8 sub_type) { memset(cthdr, 0, sizeof(struct ct_hdr_s)); cthdr->rev_id = CT_GS3_REVISION; cthdr->gs_type = CT_GSTYPE_MGMTSERVICE; cthdr->gs_sub_type = sub_type; cthdr->cmd_rsp_code = cpu_to_be16(cmd_code); } u16 fc_gidpn_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id, wwn_t port_name) { struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; struct fcgs_gidpn_req_s *gidpn = (struct fcgs_gidpn_req_s *)(cthdr + 1); u32 d_id = bfa_hton3b(FC_NAME_SERVER); fc_gs_fchdr_build(fchs, d_id, s_id, ox_id); fc_gs_cthdr_build(cthdr, s_id, GS_GID_PN); memset(gidpn, 0, sizeof(struct fcgs_gidpn_req_s)); gidpn->port_name = port_name; return sizeof(struct fcgs_gidpn_req_s) + sizeof(struct ct_hdr_s); } u16 fc_gpnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id, u32 port_id) { struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; fcgs_gpnid_req_t *gpnid = (fcgs_gpnid_req_t *) (cthdr + 1); u32 d_id = bfa_hton3b(FC_NAME_SERVER); fc_gs_fchdr_build(fchs, d_id, s_id, ox_id); fc_gs_cthdr_build(cthdr, s_id, GS_GPN_ID); memset(gpnid, 0, sizeof(fcgs_gpnid_req_t)); gpnid->dap = port_id; return sizeof(fcgs_gpnid_req_t) + sizeof(struct ct_hdr_s); } u16 fc_gnnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id, u32 port_id) { struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; fcgs_gnnid_req_t *gnnid = (fcgs_gnnid_req_t *) (cthdr + 1); u32 d_id = bfa_hton3b(FC_NAME_SERVER); fc_gs_fchdr_build(fchs, d_id, s_id, ox_id); fc_gs_cthdr_build(cthdr, s_id, GS_GNN_ID); memset(gnnid, 0, sizeof(fcgs_gnnid_req_t)); gnnid->dap = port_id; return sizeof(fcgs_gnnid_req_t) + sizeof(struct ct_hdr_s); } u16 fc_ct_rsp_parse(struct ct_hdr_s *cthdr) { if (be16_to_cpu(cthdr->cmd_rsp_code) != CT_RSP_ACCEPT) { if (cthdr->reason_code == CT_RSN_LOGICAL_BUSY) return FC_PARSE_BUSY; else return FC_PARSE_FAILURE; } return FC_PARSE_OK; } u16 fc_gs_rjt_build(struct fchs_s *fchs, struct ct_hdr_s *cthdr, u32 d_id, u32 s_id, u16 ox_id, u8 reason_code, u8 reason_code_expl) { fc_gsresp_fchdr_build(fchs, d_id, s_id, ox_id); cthdr->cmd_rsp_code = cpu_to_be16(CT_RSP_REJECT); cthdr->rev_id = CT_GS3_REVISION; cthdr->reason_code = reason_code; cthdr->exp_code = reason_code_expl; return sizeof(struct ct_hdr_s); } u16 fc_scr_build(struct fchs_s *fchs, struct fc_scr_s *scr, u8 set_br_reg, u32 s_id, u16 ox_id) { u32 d_id = bfa_hton3b(FC_FABRIC_CONTROLLER); fc_els_req_build(fchs, d_id, s_id, ox_id); memset(scr, 0, sizeof(struct fc_scr_s)); scr->command = FC_ELS_SCR; scr->reg_func = FC_SCR_REG_FUNC_FULL; if (set_br_reg) scr->vu_reg_func = FC_VU_SCR_REG_FUNC_FABRIC_NAME_CHANGE; return sizeof(struct fc_scr_s); } u16 fc_rscn_build(struct fchs_s *fchs, struct fc_rscn_pl_s *rscn, u32 s_id, u16 ox_id) { u32 d_id = bfa_hton3b(FC_FABRIC_CONTROLLER); u16 payldlen; fc_els_req_build(fchs, d_id, s_id, ox_id); rscn->command = FC_ELS_RSCN; rscn->pagelen = sizeof(rscn->event[0]); payldlen = sizeof(u32) + rscn->pagelen; rscn->payldlen = cpu_to_be16(payldlen); rscn->event[0].format = FC_RSCN_FORMAT_PORTID; rscn->event[0].portid = s_id; return sizeof(struct fc_rscn_pl_s); } u16 fc_rftid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id, enum bfa_lport_role roles) { struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; struct fcgs_rftid_req_s *rftid = (struct fcgs_rftid_req_s *)(cthdr + 1); u32 type_value, d_id = bfa_hton3b(FC_NAME_SERVER); u8 index; fc_gs_fchdr_build(fchs, d_id, s_id, ox_id); fc_gs_cthdr_build(cthdr, s_id, GS_RFT_ID); memset(rftid, 0, sizeof(struct fcgs_rftid_req_s)); rftid->dap = s_id; /* By default, FCP FC4 Type is registered */ index = FC_TYPE_FCP >> 5; type_value = 1 << (FC_TYPE_FCP % 32); rftid->fc4_type[index] = cpu_to_be32(type_value); return sizeof(struct fcgs_rftid_req_s) + sizeof(struct ct_hdr_s); } u16 fc_rftid_build_sol(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id, u8 *fc4_bitmap, u32 bitmap_size) { struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; struct fcgs_rftid_req_s *rftid = (struct fcgs_rftid_req_s *)(cthdr + 1); u32 d_id = bfa_hton3b(FC_NAME_SERVER); fc_gs_fchdr_build(fchs, d_id, s_id, ox_id); fc_gs_cthdr_build(cthdr, s_id, GS_RFT_ID); memset(rftid, 0, sizeof(struct fcgs_rftid_req_s)); rftid->dap = s_id; memcpy((void *)rftid->fc4_type, (void *)fc4_bitmap, (bitmap_size < 32 ? bitmap_size : 32)); return sizeof(struct fcgs_rftid_req_s) + sizeof(struct ct_hdr_s); } u16 fc_rffid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id, u8 fc4_type, u8 fc4_ftrs) { struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; struct fcgs_rffid_req_s *rffid = (struct fcgs_rffid_req_s *)(cthdr + 1); u32 d_id = bfa_hton3b(FC_NAME_SERVER); fc_gs_fchdr_build(fchs, d_id, s_id, ox_id); fc_gs_cthdr_build(cthdr, s_id, GS_RFF_ID); memset(rffid, 0, sizeof(struct fcgs_rffid_req_s)); rffid->dap = s_id; rffid->fc4ftr_bits = fc4_ftrs; rffid->fc4_type = fc4_type; return sizeof(struct fcgs_rffid_req_s) + sizeof(struct ct_hdr_s); } u16 fc_rspnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id, u8 *name) { struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; struct fcgs_rspnid_req_s *rspnid = (struct fcgs_rspnid_req_s *)(cthdr + 1); u32 d_id = bfa_hton3b(FC_NAME_SERVER); fc_gs_fchdr_build(fchs, d_id, s_id, ox_id); fc_gs_cthdr_build(cthdr, s_id, GS_RSPN_ID); memset(rspnid, 0, sizeof(struct fcgs_rspnid_req_s)); rspnid->dap = s_id; rspnid->spn_len = (u8) strlen((char *)name); strncpy((char *)rspnid->spn, (char *)name, rspnid->spn_len); return sizeof(struct fcgs_rspnid_req_s) + sizeof(struct ct_hdr_s); } u16 fc_gid_ft_build(struct fchs_s *fchs, void *pyld, u32 s_id, u8 fc4_type) { struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; struct fcgs_gidft_req_s *gidft = (struct fcgs_gidft_req_s *)(cthdr + 1); u32 d_id = bfa_hton3b(FC_NAME_SERVER); fc_gs_fchdr_build(fchs, d_id, s_id, 0); fc_gs_cthdr_build(cthdr, s_id, GS_GID_FT); memset(gidft, 0, sizeof(struct fcgs_gidft_req_s)); gidft->fc4_type = fc4_type; gidft->domain_id = 0; gidft->area_id = 0; return sizeof(struct fcgs_gidft_req_s) + sizeof(struct ct_hdr_s); } u16 fc_rpnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id, wwn_t port_name) { struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; struct fcgs_rpnid_req_s *rpnid = (struct fcgs_rpnid_req_s *)(cthdr + 1); u32 d_id = bfa_hton3b(FC_NAME_SERVER); fc_gs_fchdr_build(fchs, d_id, s_id, 0); fc_gs_cthdr_build(cthdr, s_id, GS_RPN_ID); memset(rpnid, 0, sizeof(struct fcgs_rpnid_req_s)); rpnid->port_id = port_id; rpnid->port_name = port_name; return sizeof(struct fcgs_rpnid_req_s) + sizeof(struct ct_hdr_s); } u16 fc_rnnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id, wwn_t node_name) { struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; struct fcgs_rnnid_req_s *rnnid = (struct fcgs_rnnid_req_s *)(cthdr + 1); u32 d_id = bfa_hton3b(FC_NAME_SERVER); fc_gs_fchdr_build(fchs, d_id, s_id, 0); fc_gs_cthdr_build(cthdr, s_id, GS_RNN_ID); memset(rnnid, 0, sizeof(struct fcgs_rnnid_req_s)); rnnid->port_id = port_id; rnnid->node_name = node_name; return sizeof(struct fcgs_rnnid_req_s) + sizeof(struct ct_hdr_s); } u16 fc_rcsid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id, u32 cos) { struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; struct fcgs_rcsid_req_s *rcsid = (struct fcgs_rcsid_req_s *) (cthdr + 1); u32 d_id = bfa_hton3b(FC_NAME_SERVER); fc_gs_fchdr_build(fchs, d_id, s_id, 0); fc_gs_cthdr_build(cthdr, s_id, GS_RCS_ID); memset(rcsid, 0, sizeof(struct fcgs_rcsid_req_s)); rcsid->port_id = port_id; rcsid->cos = cos; return sizeof(struct fcgs_rcsid_req_s) + sizeof(struct ct_hdr_s); } u16 fc_rptid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id, u8 port_type) { struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; struct fcgs_rptid_req_s *rptid = (struct fcgs_rptid_req_s *)(cthdr + 1); u32 d_id = bfa_hton3b(FC_NAME_SERVER); fc_gs_fchdr_build(fchs, d_id, s_id, 0); fc_gs_cthdr_build(cthdr, s_id, GS_RPT_ID); memset(rptid, 0, sizeof(struct fcgs_rptid_req_s)); rptid->port_id = port_id; rptid->port_type = port_type; return sizeof(struct fcgs_rptid_req_s) + sizeof(struct ct_hdr_s); } u16 fc_ganxt_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id) { struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; struct fcgs_ganxt_req_s *ganxt = (struct fcgs_ganxt_req_s *)(cthdr + 1); u32 d_id = bfa_hton3b(FC_NAME_SERVER); fc_gs_fchdr_build(fchs, d_id, s_id, 0); fc_gs_cthdr_build(cthdr, s_id, GS_GA_NXT); memset(ganxt, 0, sizeof(struct fcgs_ganxt_req_s)); ganxt->port_id = port_id; return sizeof(struct ct_hdr_s) + sizeof(struct fcgs_ganxt_req_s); } /* * Builds fc hdr and ct hdr for FDMI requests. */ u16 fc_fdmi_reqhdr_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 cmd_code) { struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; u32 d_id = bfa_hton3b(FC_MGMT_SERVER); fc_gs_fchdr_build(fchs, d_id, s_id, 0); fc_gs_fdmi_cthdr_build(cthdr, s_id, cmd_code); return sizeof(struct ct_hdr_s); } /* * Given a FC4 Type, this function returns a fc4 type bitmask */ void fc_get_fc4type_bitmask(u8 fc4_type, u8 *bit_mask) { u8 index; __be32 *ptr = (__be32 *) bit_mask; u32 type_value; /* * @todo : Check for bitmask size */ index = fc4_type >> 5; type_value = 1 << (fc4_type % 32); ptr[index] = cpu_to_be32(type_value); } /* * GMAL Request */ u16 fc_gmal_req_build(struct fchs_s *fchs, void *pyld, u32 s_id, wwn_t wwn) { struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; fcgs_gmal_req_t *gmal = (fcgs_gmal_req_t *) (cthdr + 1); u32 d_id = bfa_hton3b(FC_MGMT_SERVER); fc_gs_fchdr_build(fchs, d_id, s_id, 0); fc_gs_ms_cthdr_build(cthdr, s_id, GS_FC_GMAL_CMD, CT_GSSUBTYPE_CFGSERVER); memset(gmal, 0, sizeof(fcgs_gmal_req_t)); gmal->wwn = wwn; return sizeof(struct ct_hdr_s) + sizeof(fcgs_gmal_req_t); } /* * GFN (Get Fabric Name) Request */ u16 fc_gfn_req_build(struct fchs_s *fchs, void *pyld, u32 s_id, wwn_t wwn) { struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; fcgs_gfn_req_t *gfn = (fcgs_gfn_req_t *) (cthdr + 1); u32 d_id = bfa_hton3b(FC_MGMT_SERVER); fc_gs_fchdr_build(fchs, d_id, s_id, 0); fc_gs_ms_cthdr_build(cthdr, s_id, GS_FC_GFN_CMD, CT_GSSUBTYPE_CFGSERVER); memset(gfn, 0, sizeof(fcgs_gfn_req_t)); gfn->wwn = wwn; return sizeof(struct ct_hdr_s) + sizeof(fcgs_gfn_req_t); }
gpl-2.0
MattCrystal/oneXL
net/netlabel/netlabel_user.c
5613
3221
/* * NetLabel NETLINK Interface * * This file defines the NETLINK interface for the NetLabel system. The * NetLabel system manages static and dynamic label mappings for network * protocols such as CIPSO and RIPSO. * * Author: Paul Moore <paul@paul-moore.com> * */ /* * (c) Copyright Hewlett-Packard Development Company, L.P., 2006 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See * the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/init.h> #include <linux/types.h> #include <linux/list.h> #include <linux/socket.h> #include <linux/audit.h> #include <linux/tty.h> #include <linux/security.h> #include <linux/gfp.h> #include <net/sock.h> #include <net/netlink.h> #include <net/genetlink.h> #include <net/netlabel.h> #include <asm/bug.h> #include "netlabel_mgmt.h" #include "netlabel_unlabeled.h" #include "netlabel_cipso_v4.h" #include "netlabel_user.h" /* * NetLabel NETLINK Setup Functions */ /** * netlbl_netlink_init - Initialize the NETLINK communication channel * * Description: * Call out to the NetLabel components so they can register their families and * commands with the Generic NETLINK mechanism. Returns zero on success and * non-zero on failure. * */ int __init netlbl_netlink_init(void) { int ret_val; ret_val = netlbl_mgmt_genl_init(); if (ret_val != 0) return ret_val; ret_val = netlbl_cipsov4_genl_init(); if (ret_val != 0) return ret_val; ret_val = netlbl_unlabel_genl_init(); if (ret_val != 0) return ret_val; return 0; } /* * NetLabel Audit Functions */ /** * netlbl_audit_start_common - Start an audit message * @type: audit message type * @audit_info: NetLabel audit information * * Description: * Start an audit message using the type specified in @type and fill the audit * message with some fields common to all NetLabel audit messages. Returns * a pointer to the audit buffer on success, NULL on failure. * */ struct audit_buffer *netlbl_audit_start_common(int type, struct netlbl_audit *audit_info) { struct audit_buffer *audit_buf; char *secctx; u32 secctx_len; if (audit_enabled == 0) return NULL; audit_buf = audit_log_start(current->audit_context, GFP_ATOMIC, type); if (audit_buf == NULL) return NULL; audit_log_format(audit_buf, "netlabel: auid=%u ses=%u", audit_info->loginuid, audit_info->sessionid); if (audit_info->secid != 0 && security_secid_to_secctx(audit_info->secid, &secctx, &secctx_len) == 0) { audit_log_format(audit_buf, " subj=%s", secctx); security_release_secctx(secctx, secctx_len); } return audit_buf; }
gpl-2.0
cmtrebon/android_kernel_samsung_msm7x27a
arch/mips/loongson/common/cs5536/cs5536_isa.c
8685
7788
/* * the ISA Virtual Support Module of AMD CS5536 * * Copyright (C) 2007 Lemote, Inc. * Author : jlliu, liujl@lemote.com * * Copyright (C) 2009 Lemote, Inc. * Author: Wu Zhangjin, wuzhangjin@gmail.com * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <cs5536/cs5536.h> #include <cs5536/cs5536_pci.h> /* common variables for PCI_ISA_READ/WRITE_BAR */ static const u32 divil_msr_reg[6] = { DIVIL_MSR_REG(DIVIL_LBAR_SMB), DIVIL_MSR_REG(DIVIL_LBAR_GPIO), DIVIL_MSR_REG(DIVIL_LBAR_MFGPT), DIVIL_MSR_REG(DIVIL_LBAR_IRQ), DIVIL_MSR_REG(DIVIL_LBAR_PMS), DIVIL_MSR_REG(DIVIL_LBAR_ACPI), }; static const u32 soft_bar_flag[6] = { SOFT_BAR_SMB_FLAG, SOFT_BAR_GPIO_FLAG, SOFT_BAR_MFGPT_FLAG, SOFT_BAR_IRQ_FLAG, SOFT_BAR_PMS_FLAG, SOFT_BAR_ACPI_FLAG, }; static const u32 sb_msr_reg[6] = { SB_MSR_REG(SB_R0), SB_MSR_REG(SB_R1), SB_MSR_REG(SB_R2), SB_MSR_REG(SB_R3), SB_MSR_REG(SB_R4), SB_MSR_REG(SB_R5), }; static const u32 bar_space_range[6] = { CS5536_SMB_RANGE, CS5536_GPIO_RANGE, CS5536_MFGPT_RANGE, CS5536_IRQ_RANGE, CS5536_PMS_RANGE, CS5536_ACPI_RANGE, }; static const int bar_space_len[6] = { CS5536_SMB_LENGTH, CS5536_GPIO_LENGTH, CS5536_MFGPT_LENGTH, CS5536_IRQ_LENGTH, CS5536_PMS_LENGTH, CS5536_ACPI_LENGTH, }; /* * enable the divil module bar space. * * For all the DIVIL module LBAR, you should control the DIVIL LBAR reg * and the RCONFx(0~5) reg to use the modules. */ static void divil_lbar_enable(void) { u32 hi, lo; int offset; /* * The DIVIL IRQ is not used yet. and make the RCONF0 reserved. */ for (offset = DIVIL_LBAR_SMB; offset <= DIVIL_LBAR_PMS; offset++) { _rdmsr(DIVIL_MSR_REG(offset), &hi, &lo); hi |= 0x01; _wrmsr(DIVIL_MSR_REG(offset), hi, lo); } } /* * disable the divil module bar space. */ static void divil_lbar_disable(void) { u32 hi, lo; int offset; for (offset = DIVIL_LBAR_SMB; offset <= DIVIL_LBAR_PMS; offset++) { _rdmsr(DIVIL_MSR_REG(offset), &hi, &lo); hi &= ~0x01; _wrmsr(DIVIL_MSR_REG(offset), hi, lo); } } /* * BAR write: write value to the n BAR */ void pci_isa_write_bar(int n, u32 value) { u32 hi = 0, lo = value; if (value == PCI_BAR_RANGE_MASK) { _rdmsr(GLCP_MSR_REG(GLCP_SOFT_COM), &hi, &lo); lo |= soft_bar_flag[n]; _wrmsr(GLCP_MSR_REG(GLCP_SOFT_COM), hi, lo); } else if (value & 0x01) { /* NATIVE reg */ hi = 0x0000f001; lo &= bar_space_range[n]; _wrmsr(divil_msr_reg[n], hi, lo); /* RCONFx is 4bytes in units for I/O space */ hi = ((value & 0x000ffffc) << 12) | ((bar_space_len[n] - 4) << 12) | 0x01; lo = ((value & 0x000ffffc) << 12) | 0x01; _wrmsr(sb_msr_reg[n], hi, lo); } } /* * BAR read: read the n BAR */ u32 pci_isa_read_bar(int n) { u32 conf_data = 0; u32 hi, lo; _rdmsr(GLCP_MSR_REG(GLCP_SOFT_COM), &hi, &lo); if (lo & soft_bar_flag[n]) { conf_data = bar_space_range[n] | PCI_BASE_ADDRESS_SPACE_IO; lo &= ~soft_bar_flag[n]; _wrmsr(GLCP_MSR_REG(GLCP_SOFT_COM), hi, lo); } else { _rdmsr(divil_msr_reg[n], &hi, &lo); conf_data = lo & bar_space_range[n]; conf_data |= 0x01; conf_data &= ~0x02; } return conf_data; } /* * isa_write: ISA write transfer * * We assume that this is not a bus master transfer. */ void pci_isa_write_reg(int reg, u32 value) { u32 hi = 0, lo = value; u32 temp; switch (reg) { case PCI_COMMAND: if (value & PCI_COMMAND_IO) divil_lbar_enable(); else divil_lbar_disable(); break; case PCI_STATUS: _rdmsr(SB_MSR_REG(SB_ERROR), &hi, &lo); temp = lo & 0x0000ffff; if ((value & PCI_STATUS_SIG_TARGET_ABORT) && (lo & SB_TAS_ERR_EN)) temp |= SB_TAS_ERR_FLAG; if ((value & PCI_STATUS_REC_TARGET_ABORT) && (lo & SB_TAR_ERR_EN)) temp |= SB_TAR_ERR_FLAG; if ((value & PCI_STATUS_REC_MASTER_ABORT) && (lo & SB_MAR_ERR_EN)) temp |= SB_MAR_ERR_FLAG; if ((value & PCI_STATUS_DETECTED_PARITY) && (lo & SB_PARE_ERR_EN)) temp |= SB_PARE_ERR_FLAG; lo = temp; _wrmsr(SB_MSR_REG(SB_ERROR), hi, lo); break; case PCI_CACHE_LINE_SIZE: value &= 0x0000ff00; _rdmsr(SB_MSR_REG(SB_CTRL), &hi, &lo); hi &= 0xffffff00; hi |= (value >> 8); _wrmsr(SB_MSR_REG(SB_CTRL), hi, lo); break; case PCI_BAR0_REG: pci_isa_write_bar(0, value); break; case PCI_BAR1_REG: pci_isa_write_bar(1, value); break; case PCI_BAR2_REG: pci_isa_write_bar(2, value); break; case PCI_BAR3_REG: pci_isa_write_bar(3, value); break; case PCI_BAR4_REG: pci_isa_write_bar(4, value); break; case PCI_BAR5_REG: pci_isa_write_bar(5, value); break; case PCI_UART1_INT_REG: _rdmsr(DIVIL_MSR_REG(PIC_YSEL_HIGH), &hi, &lo); /* disable uart1 interrupt in PIC */ lo &= ~(0xf << 24); if (value) /* enable uart1 interrupt in PIC */ lo |= (CS5536_UART1_INTR << 24); _wrmsr(DIVIL_MSR_REG(PIC_YSEL_HIGH), hi, lo); break; case PCI_UART2_INT_REG: _rdmsr(DIVIL_MSR_REG(PIC_YSEL_HIGH), &hi, &lo); /* disable uart2 interrupt in PIC */ lo &= ~(0xf << 28); if (value) /* enable uart2 interrupt in PIC */ lo |= (CS5536_UART2_INTR << 28); _wrmsr(DIVIL_MSR_REG(PIC_YSEL_HIGH), hi, lo); break; case PCI_ISA_FIXUP_REG: if (value) { /* enable the TARGET ABORT/MASTER ABORT etc. */ _rdmsr(SB_MSR_REG(SB_ERROR), &hi, &lo); lo |= 0x00000063; _wrmsr(SB_MSR_REG(SB_ERROR), hi, lo); } default: /* ALL OTHER PCI CONFIG SPACE HEADER IS NOT IMPLEMENTED. */ break; } } /* * isa_read: ISA read transfers * * We assume that this is not a bus master transfer. */ u32 pci_isa_read_reg(int reg) { u32 conf_data = 0; u32 hi, lo; switch (reg) { case PCI_VENDOR_ID: conf_data = CFG_PCI_VENDOR_ID(CS5536_ISA_DEVICE_ID, CS5536_VENDOR_ID); break; case PCI_COMMAND: /* we just check the first LBAR for the IO enable bit, */ /* maybe we should changed later. */ _rdmsr(DIVIL_MSR_REG(DIVIL_LBAR_SMB), &hi, &lo); if (hi & 0x01) conf_data |= PCI_COMMAND_IO; break; case PCI_STATUS: conf_data |= PCI_STATUS_66MHZ; conf_data |= PCI_STATUS_DEVSEL_MEDIUM; conf_data |= PCI_STATUS_FAST_BACK; _rdmsr(SB_MSR_REG(SB_ERROR), &hi, &lo); if (lo & SB_TAS_ERR_FLAG) conf_data |= PCI_STATUS_SIG_TARGET_ABORT; if (lo & SB_TAR_ERR_FLAG) conf_data |= PCI_STATUS_REC_TARGET_ABORT; if (lo & SB_MAR_ERR_FLAG) conf_data |= PCI_STATUS_REC_MASTER_ABORT; if (lo & SB_PARE_ERR_FLAG) conf_data |= PCI_STATUS_DETECTED_PARITY; break; case PCI_CLASS_REVISION: _rdmsr(GLCP_MSR_REG(GLCP_CHIP_REV_ID), &hi, &lo); conf_data = lo & 0x000000ff; conf_data |= (CS5536_ISA_CLASS_CODE << 8); break; case PCI_CACHE_LINE_SIZE: _rdmsr(SB_MSR_REG(SB_CTRL), &hi, &lo); hi &= 0x000000f8; conf_data = CFG_PCI_CACHE_LINE_SIZE(PCI_BRIDGE_HEADER_TYPE, hi); break; /* * we only use the LBAR of DIVIL, no RCONF used. * all of them are IO space. */ case PCI_BAR0_REG: return pci_isa_read_bar(0); break; case PCI_BAR1_REG: return pci_isa_read_bar(1); break; case PCI_BAR2_REG: return pci_isa_read_bar(2); break; case PCI_BAR3_REG: break; case PCI_BAR4_REG: return pci_isa_read_bar(4); break; case PCI_BAR5_REG: return pci_isa_read_bar(5); break; case PCI_CARDBUS_CIS: conf_data = PCI_CARDBUS_CIS_POINTER; break; case PCI_SUBSYSTEM_VENDOR_ID: conf_data = CFG_PCI_VENDOR_ID(CS5536_ISA_SUB_ID, CS5536_SUB_VENDOR_ID); break; case PCI_ROM_ADDRESS: conf_data = PCI_EXPANSION_ROM_BAR; break; case PCI_CAPABILITY_LIST: conf_data = PCI_CAPLIST_POINTER; break; case PCI_INTERRUPT_LINE: /* no interrupt used here */ conf_data = CFG_PCI_INTERRUPT_LINE(0x00, 0x00); break; default: break; } return conf_data; }
gpl-2.0
estiko/kernel_smartfren_d5c
drivers/char/hangcheck-timer.c
11245
6345
/* * hangcheck-timer.c * * Driver for a little io fencing timer. * * Copyright (C) 2002, 2003 Oracle. All rights reserved. * * Author: Joel Becker <joel.becker@oracle.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this program; if not, write to the * Free Software Foundation, Inc., 59 Temple Place - Suite 330, * Boston, MA 021110-1307, USA. */ /* * The hangcheck-timer driver uses the TSC to catch delays that * jiffies does not notice. A timer is set. When the timer fires, it * checks whether it was delayed and if that delay exceeds a given * margin of error. The hangcheck_tick module parameter takes the timer * duration in seconds. The hangcheck_margin parameter defines the * margin of error, in seconds. The defaults are 60 seconds for the * timer and 180 seconds for the margin of error. IOW, a timer is set * for 60 seconds. When the timer fires, the callback checks the * actual duration that the timer waited. If the duration exceeds the * alloted time and margin (here 60 + 180, or 240 seconds), the machine * is restarted. A healthy machine will have the duration match the * expected timeout very closely. */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/fs.h> #include <linux/mm.h> #include <linux/reboot.h> #include <linux/init.h> #include <linux/delay.h> #include <asm/uaccess.h> #include <linux/sysrq.h> #include <linux/timer.h> #include <linux/time.h> #define VERSION_STR "0.9.1" #define DEFAULT_IOFENCE_MARGIN 60 /* Default fudge factor, in seconds */ #define DEFAULT_IOFENCE_TICK 180 /* Default timer timeout, in seconds */ static int hangcheck_tick = DEFAULT_IOFENCE_TICK; static int hangcheck_margin = DEFAULT_IOFENCE_MARGIN; static int hangcheck_reboot; /* Defaults to not reboot */ static int hangcheck_dump_tasks; /* Defaults to not dumping SysRQ T */ /* options - modular */ module_param(hangcheck_tick, int, 0); MODULE_PARM_DESC(hangcheck_tick, "Timer delay."); module_param(hangcheck_margin, int, 0); MODULE_PARM_DESC(hangcheck_margin, "If the hangcheck timer has been delayed more than hangcheck_margin seconds, the driver will fire."); module_param(hangcheck_reboot, int, 0); MODULE_PARM_DESC(hangcheck_reboot, "If nonzero, the machine will reboot when the timer margin is exceeded."); module_param(hangcheck_dump_tasks, int, 0); MODULE_PARM_DESC(hangcheck_dump_tasks, "If nonzero, the machine will dump the system task state when the timer margin is exceeded."); MODULE_AUTHOR("Oracle"); MODULE_DESCRIPTION("Hangcheck-timer detects when the system has gone out to lunch past a certain margin."); MODULE_LICENSE("GPL"); MODULE_VERSION(VERSION_STR); /* options - nonmodular */ #ifndef MODULE static int __init hangcheck_parse_tick(char *str) { int par; if (get_option(&str,&par)) hangcheck_tick = par; return 1; } static int __init hangcheck_parse_margin(char *str) { int par; if (get_option(&str,&par)) hangcheck_margin = par; return 1; } static int __init hangcheck_parse_reboot(char *str) { int par; if (get_option(&str,&par)) hangcheck_reboot = par; return 1; } static int __init hangcheck_parse_dump_tasks(char *str) { int par; if (get_option(&str,&par)) hangcheck_dump_tasks = par; return 1; } __setup("hcheck_tick", hangcheck_parse_tick); __setup("hcheck_margin", hangcheck_parse_margin); __setup("hcheck_reboot", hangcheck_parse_reboot); __setup("hcheck_dump_tasks", hangcheck_parse_dump_tasks); #endif /* not MODULE */ #if defined(CONFIG_S390) # define HAVE_MONOTONIC # define TIMER_FREQ 1000000000ULL #else # define TIMER_FREQ 1000000000ULL #endif #ifdef HAVE_MONOTONIC extern unsigned long long monotonic_clock(void); #else static inline unsigned long long monotonic_clock(void) { struct timespec ts; getrawmonotonic(&ts); return timespec_to_ns(&ts); } #endif /* HAVE_MONOTONIC */ /* Last time scheduled */ static unsigned long long hangcheck_tsc, hangcheck_tsc_margin; static void hangcheck_fire(unsigned long); static DEFINE_TIMER(hangcheck_ticktock, hangcheck_fire, 0, 0); static void hangcheck_fire(unsigned long data) { unsigned long long cur_tsc, tsc_diff; cur_tsc = monotonic_clock(); if (cur_tsc > hangcheck_tsc) tsc_diff = cur_tsc - hangcheck_tsc; else tsc_diff = (cur_tsc + (~0ULL - hangcheck_tsc)); /* or something */ if (tsc_diff > hangcheck_tsc_margin) { if (hangcheck_dump_tasks) { printk(KERN_CRIT "Hangcheck: Task state:\n"); #ifdef CONFIG_MAGIC_SYSRQ handle_sysrq('t'); #endif /* CONFIG_MAGIC_SYSRQ */ } if (hangcheck_reboot) { printk(KERN_CRIT "Hangcheck: hangcheck is restarting the machine.\n"); emergency_restart(); } else { printk(KERN_CRIT "Hangcheck: hangcheck value past margin!\n"); } } #if 0 /* * Enable to investigate delays in detail */ printk("Hangcheck: called %Ld ns since last time (%Ld ns overshoot)\n", tsc_diff, tsc_diff - hangcheck_tick*TIMER_FREQ); #endif mod_timer(&hangcheck_ticktock, jiffies + (hangcheck_tick*HZ)); hangcheck_tsc = monotonic_clock(); } static int __init hangcheck_init(void) { printk("Hangcheck: starting hangcheck timer %s (tick is %d seconds, margin is %d seconds).\n", VERSION_STR, hangcheck_tick, hangcheck_margin); #if defined (HAVE_MONOTONIC) printk("Hangcheck: Using monotonic_clock().\n"); #else printk("Hangcheck: Using getrawmonotonic().\n"); #endif /* HAVE_MONOTONIC */ hangcheck_tsc_margin = (unsigned long long)(hangcheck_margin + hangcheck_tick); hangcheck_tsc_margin *= (unsigned long long)TIMER_FREQ; hangcheck_tsc = monotonic_clock(); mod_timer(&hangcheck_ticktock, jiffies + (hangcheck_tick*HZ)); return 0; } static void __exit hangcheck_exit(void) { del_timer_sync(&hangcheck_ticktock); printk("Hangcheck: Stopped hangcheck timer.\n"); } module_init(hangcheck_init); module_exit(hangcheck_exit);
gpl-2.0
faust93/kernel_f93_a5f
net/netfilter/xt_cluster.c
11501
5080
/* * (C) 2008-2009 Pablo Neira Ayuso <pablo@netfilter.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/skbuff.h> #include <linux/jhash.h> #include <linux/ip.h> #include <net/ipv6.h> #include <linux/netfilter/x_tables.h> #include <net/netfilter/nf_conntrack.h> #include <linux/netfilter/xt_cluster.h> static inline u32 nf_ct_orig_ipv4_src(const struct nf_conn *ct) { return (__force u32)ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip; } static inline const u32 *nf_ct_orig_ipv6_src(const struct nf_conn *ct) { return (__force u32 *)ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip6; } static inline u_int32_t xt_cluster_hash_ipv4(u_int32_t ip, const struct xt_cluster_match_info *info) { return jhash_1word(ip, info->hash_seed); } static inline u_int32_t xt_cluster_hash_ipv6(const void *ip, const struct xt_cluster_match_info *info) { return jhash2(ip, NF_CT_TUPLE_L3SIZE / sizeof(__u32), info->hash_seed); } static inline u_int32_t xt_cluster_hash(const struct nf_conn *ct, const struct xt_cluster_match_info *info) { u_int32_t hash = 0; switch(nf_ct_l3num(ct)) { case AF_INET: hash = xt_cluster_hash_ipv4(nf_ct_orig_ipv4_src(ct), info); break; case AF_INET6: hash = xt_cluster_hash_ipv6(nf_ct_orig_ipv6_src(ct), info); break; default: WARN_ON(1); break; } return (((u64)hash * info->total_nodes) >> 32); } static inline bool xt_cluster_ipv6_is_multicast(const struct in6_addr *addr) { __be32 st = addr->s6_addr32[0]; return ((st & htonl(0xFF000000)) == htonl(0xFF000000)); } static inline bool xt_cluster_is_multicast_addr(const struct sk_buff *skb, u_int8_t family) { bool is_multicast = false; switch(family) { case NFPROTO_IPV4: is_multicast = ipv4_is_multicast(ip_hdr(skb)->daddr); break; case NFPROTO_IPV6: is_multicast = xt_cluster_ipv6_is_multicast(&ipv6_hdr(skb)->daddr); break; default: WARN_ON(1); break; } return is_multicast; } static bool xt_cluster_mt(const struct sk_buff *skb, struct xt_action_param *par) { struct sk_buff *pskb = (struct sk_buff *)skb; const struct xt_cluster_match_info *info = par->matchinfo; const struct nf_conn *ct; enum ip_conntrack_info ctinfo; unsigned long hash; /* This match assumes that all nodes see the same packets. This can be * achieved if the switch that connects the cluster nodes support some * sort of 'port mirroring'. However, if your switch does not support * this, your cluster nodes can reply ARP request using a multicast MAC * address. Thus, your switch will flood the same packets to the * cluster nodes with the same multicast MAC address. Using a multicast * link address is a RFC 1812 (section 3.3.2) violation, but this works * fine in practise. * * Unfortunately, if you use the multicast MAC address, the link layer * sets skbuff's pkt_type to PACKET_MULTICAST, which is not accepted * by TCP and others for packets coming to this node. For that reason, * this match mangles skbuff's pkt_type if it detects a packet * addressed to a unicast address but using PACKET_MULTICAST. Yes, I * know, matches should not alter packets, but we are doing this here * because we would need to add a PKTTYPE target for this sole purpose. */ if (!xt_cluster_is_multicast_addr(skb, par->family) && skb->pkt_type == PACKET_MULTICAST) { pskb->pkt_type = PACKET_HOST; } ct = nf_ct_get(skb, &ctinfo); if (ct == NULL) return false; if (nf_ct_is_untracked(ct)) return false; if (ct->master) hash = xt_cluster_hash(ct->master, info); else hash = xt_cluster_hash(ct, info); return !!((1 << hash) & info->node_mask) ^ !!(info->flags & XT_CLUSTER_F_INV); } static int xt_cluster_mt_checkentry(const struct xt_mtchk_param *par) { struct xt_cluster_match_info *info = par->matchinfo; if (info->total_nodes > XT_CLUSTER_NODES_MAX) { pr_info("you have exceeded the maximum " "number of cluster nodes (%u > %u)\n", info->total_nodes, XT_CLUSTER_NODES_MAX); return -EINVAL; } if (info->node_mask >= (1ULL << info->total_nodes)) { pr_info("this node mask cannot be " "higher than the total number of nodes\n"); return -EDOM; } return 0; } static struct xt_match xt_cluster_match __read_mostly = { .name = "cluster", .family = NFPROTO_UNSPEC, .match = xt_cluster_mt, .checkentry = xt_cluster_mt_checkentry, .matchsize = sizeof(struct xt_cluster_match_info), .me = THIS_MODULE, }; static int __init xt_cluster_mt_init(void) { return xt_register_match(&xt_cluster_match); } static void __exit xt_cluster_mt_fini(void) { xt_unregister_match(&xt_cluster_match); } MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Xtables: hash-based cluster match"); MODULE_ALIAS("ipt_cluster"); MODULE_ALIAS("ip6t_cluster"); module_init(xt_cluster_mt_init); module_exit(xt_cluster_mt_fini);
gpl-2.0
maxnet/linux-allwinner-aufs34
net/netfilter/xt_cluster.c
11501
5080
/* * (C) 2008-2009 Pablo Neira Ayuso <pablo@netfilter.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/skbuff.h> #include <linux/jhash.h> #include <linux/ip.h> #include <net/ipv6.h> #include <linux/netfilter/x_tables.h> #include <net/netfilter/nf_conntrack.h> #include <linux/netfilter/xt_cluster.h> static inline u32 nf_ct_orig_ipv4_src(const struct nf_conn *ct) { return (__force u32)ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip; } static inline const u32 *nf_ct_orig_ipv6_src(const struct nf_conn *ct) { return (__force u32 *)ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip6; } static inline u_int32_t xt_cluster_hash_ipv4(u_int32_t ip, const struct xt_cluster_match_info *info) { return jhash_1word(ip, info->hash_seed); } static inline u_int32_t xt_cluster_hash_ipv6(const void *ip, const struct xt_cluster_match_info *info) { return jhash2(ip, NF_CT_TUPLE_L3SIZE / sizeof(__u32), info->hash_seed); } static inline u_int32_t xt_cluster_hash(const struct nf_conn *ct, const struct xt_cluster_match_info *info) { u_int32_t hash = 0; switch(nf_ct_l3num(ct)) { case AF_INET: hash = xt_cluster_hash_ipv4(nf_ct_orig_ipv4_src(ct), info); break; case AF_INET6: hash = xt_cluster_hash_ipv6(nf_ct_orig_ipv6_src(ct), info); break; default: WARN_ON(1); break; } return (((u64)hash * info->total_nodes) >> 32); } static inline bool xt_cluster_ipv6_is_multicast(const struct in6_addr *addr) { __be32 st = addr->s6_addr32[0]; return ((st & htonl(0xFF000000)) == htonl(0xFF000000)); } static inline bool xt_cluster_is_multicast_addr(const struct sk_buff *skb, u_int8_t family) { bool is_multicast = false; switch(family) { case NFPROTO_IPV4: is_multicast = ipv4_is_multicast(ip_hdr(skb)->daddr); break; case NFPROTO_IPV6: is_multicast = xt_cluster_ipv6_is_multicast(&ipv6_hdr(skb)->daddr); break; default: WARN_ON(1); break; } return is_multicast; } static bool xt_cluster_mt(const struct sk_buff *skb, struct xt_action_param *par) { struct sk_buff *pskb = (struct sk_buff *)skb; const struct xt_cluster_match_info *info = par->matchinfo; const struct nf_conn *ct; enum ip_conntrack_info ctinfo; unsigned long hash; /* This match assumes that all nodes see the same packets. This can be * achieved if the switch that connects the cluster nodes support some * sort of 'port mirroring'. However, if your switch does not support * this, your cluster nodes can reply ARP request using a multicast MAC * address. Thus, your switch will flood the same packets to the * cluster nodes with the same multicast MAC address. Using a multicast * link address is a RFC 1812 (section 3.3.2) violation, but this works * fine in practise. * * Unfortunately, if you use the multicast MAC address, the link layer * sets skbuff's pkt_type to PACKET_MULTICAST, which is not accepted * by TCP and others for packets coming to this node. For that reason, * this match mangles skbuff's pkt_type if it detects a packet * addressed to a unicast address but using PACKET_MULTICAST. Yes, I * know, matches should not alter packets, but we are doing this here * because we would need to add a PKTTYPE target for this sole purpose. */ if (!xt_cluster_is_multicast_addr(skb, par->family) && skb->pkt_type == PACKET_MULTICAST) { pskb->pkt_type = PACKET_HOST; } ct = nf_ct_get(skb, &ctinfo); if (ct == NULL) return false; if (nf_ct_is_untracked(ct)) return false; if (ct->master) hash = xt_cluster_hash(ct->master, info); else hash = xt_cluster_hash(ct, info); return !!((1 << hash) & info->node_mask) ^ !!(info->flags & XT_CLUSTER_F_INV); } static int xt_cluster_mt_checkentry(const struct xt_mtchk_param *par) { struct xt_cluster_match_info *info = par->matchinfo; if (info->total_nodes > XT_CLUSTER_NODES_MAX) { pr_info("you have exceeded the maximum " "number of cluster nodes (%u > %u)\n", info->total_nodes, XT_CLUSTER_NODES_MAX); return -EINVAL; } if (info->node_mask >= (1ULL << info->total_nodes)) { pr_info("this node mask cannot be " "higher than the total number of nodes\n"); return -EDOM; } return 0; } static struct xt_match xt_cluster_match __read_mostly = { .name = "cluster", .family = NFPROTO_UNSPEC, .match = xt_cluster_mt, .checkentry = xt_cluster_mt_checkentry, .matchsize = sizeof(struct xt_cluster_match_info), .me = THIS_MODULE, }; static int __init xt_cluster_mt_init(void) { return xt_register_match(&xt_cluster_match); } static void __exit xt_cluster_mt_fini(void) { xt_unregister_match(&xt_cluster_match); } MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Xtables: hash-based cluster match"); MODULE_ALIAS("ipt_cluster"); MODULE_ALIAS("ip6t_cluster"); module_init(xt_cluster_mt_init); module_exit(xt_cluster_mt_fini);
gpl-2.0
hunter3k/aosp_kernel_lge_d315
sound/pci/echoaudio/layla20_dsp.c
12525
7501
/**************************************************************************** Copyright Echo Digital Audio Corporation (c) 1998 - 2004 All rights reserved www.echoaudio.com This file is part of Echo Digital Audio's generic driver library. Echo Digital Audio's generic driver library is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. ************************************************************************* Translation from C++ and adaptation for use in ALSA-Driver were made by Giuliano Pochini <pochini@shiny.it> ****************************************************************************/ static int read_dsp(struct echoaudio *chip, u32 *data); static int set_professional_spdif(struct echoaudio *chip, char prof); static int load_asic_generic(struct echoaudio *chip, u32 cmd, short asic); static int check_asic_status(struct echoaudio *chip); static int update_flags(struct echoaudio *chip); static int init_hw(struct echoaudio *chip, u16 device_id, u16 subdevice_id) { int err; DE_INIT(("init_hw() - Layla20\n")); if (snd_BUG_ON((subdevice_id & 0xfff0) != LAYLA20)) return -ENODEV; if ((err = init_dsp_comm_page(chip))) { DE_INIT(("init_hw - could not initialize DSP comm page\n")); return err; } chip->device_id = device_id; chip->subdevice_id = subdevice_id; chip->bad_board = TRUE; chip->has_midi = TRUE; chip->dsp_code_to_load = FW_LAYLA20_DSP; chip->input_clock_types = ECHO_CLOCK_BIT_INTERNAL | ECHO_CLOCK_BIT_SPDIF | ECHO_CLOCK_BIT_WORD | ECHO_CLOCK_BIT_SUPER; chip->output_clock_types = ECHO_CLOCK_BIT_WORD | ECHO_CLOCK_BIT_SUPER; if ((err = load_firmware(chip)) < 0) return err; chip->bad_board = FALSE; DE_INIT(("init_hw done\n")); return err; } static int set_mixer_defaults(struct echoaudio *chip) { chip->professional_spdif = FALSE; return init_line_levels(chip); } static u32 detect_input_clocks(const struct echoaudio *chip) { u32 clocks_from_dsp, clock_bits; /* Map the DSP clock detect bits to the generic driver clock detect bits */ clocks_from_dsp = le32_to_cpu(chip->comm_page->status_clocks); clock_bits = ECHO_CLOCK_BIT_INTERNAL; if (clocks_from_dsp & GLDM_CLOCK_DETECT_BIT_SPDIF) clock_bits |= ECHO_CLOCK_BIT_SPDIF; if (clocks_from_dsp & GLDM_CLOCK_DETECT_BIT_WORD) { if (clocks_from_dsp & GLDM_CLOCK_DETECT_BIT_SUPER) clock_bits |= ECHO_CLOCK_BIT_SUPER; else clock_bits |= ECHO_CLOCK_BIT_WORD; } return clock_bits; } /* ASIC status check - some cards have one or two ASICs that need to be loaded. Once that load is complete, this function is called to see if the load was successful. If this load fails, it does not necessarily mean that the hardware is defective - the external box may be disconnected or turned off. This routine sometimes fails for Layla20; for Layla20, the loop runs 5 times and succeeds if it wins on three of the loops. */ static int check_asic_status(struct echoaudio *chip) { u32 asic_status; int goodcnt, i; chip->asic_loaded = FALSE; for (i = goodcnt = 0; i < 5; i++) { send_vector(chip, DSP_VC_TEST_ASIC); /* The DSP will return a value to indicate whether or not the ASIC is currently loaded */ if (read_dsp(chip, &asic_status) < 0) { DE_ACT(("check_asic_status: failed on read_dsp\n")); return -EIO; } if (asic_status == ASIC_ALREADY_LOADED) { if (++goodcnt == 3) { chip->asic_loaded = TRUE; return 0; } } } return -EIO; } /* Layla20 has an ASIC in the external box */ static int load_asic(struct echoaudio *chip) { int err; if (chip->asic_loaded) return 0; err = load_asic_generic(chip, DSP_FNC_LOAD_LAYLA_ASIC, FW_LAYLA20_ASIC); if (err < 0) return err; /* Check if ASIC is alive and well. */ return check_asic_status(chip); } static int set_sample_rate(struct echoaudio *chip, u32 rate) { if (snd_BUG_ON(rate < 8000 || rate > 50000)) return -EINVAL; /* Only set the clock for internal mode. Do not return failure, simply treat it as a non-event. */ if (chip->input_clock != ECHO_CLOCK_INTERNAL) { DE_ACT(("set_sample_rate: Cannot set sample rate - " "clock not set to CLK_CLOCKININTERNAL\n")); chip->comm_page->sample_rate = cpu_to_le32(rate); chip->sample_rate = rate; return 0; } if (wait_handshake(chip)) return -EIO; DE_ACT(("set_sample_rate(%d)\n", rate)); chip->sample_rate = rate; chip->comm_page->sample_rate = cpu_to_le32(rate); clear_handshake(chip); return send_vector(chip, DSP_VC_SET_LAYLA_SAMPLE_RATE); } static int set_input_clock(struct echoaudio *chip, u16 clock_source) { u16 clock; u32 rate; DE_ACT(("set_input_clock:\n")); rate = 0; switch (clock_source) { case ECHO_CLOCK_INTERNAL: DE_ACT(("Set Layla20 clock to INTERNAL\n")); rate = chip->sample_rate; clock = LAYLA20_CLOCK_INTERNAL; break; case ECHO_CLOCK_SPDIF: DE_ACT(("Set Layla20 clock to SPDIF\n")); clock = LAYLA20_CLOCK_SPDIF; break; case ECHO_CLOCK_WORD: DE_ACT(("Set Layla20 clock to WORD\n")); clock = LAYLA20_CLOCK_WORD; break; case ECHO_CLOCK_SUPER: DE_ACT(("Set Layla20 clock to SUPER\n")); clock = LAYLA20_CLOCK_SUPER; break; default: DE_ACT(("Input clock 0x%x not supported for Layla24\n", clock_source)); return -EINVAL; } chip->input_clock = clock_source; chip->comm_page->input_clock = cpu_to_le16(clock); clear_handshake(chip); send_vector(chip, DSP_VC_UPDATE_CLOCKS); if (rate) set_sample_rate(chip, rate); return 0; } static int set_output_clock(struct echoaudio *chip, u16 clock) { DE_ACT(("set_output_clock: %d\n", clock)); switch (clock) { case ECHO_CLOCK_SUPER: clock = LAYLA20_OUTPUT_CLOCK_SUPER; break; case ECHO_CLOCK_WORD: clock = LAYLA20_OUTPUT_CLOCK_WORD; break; default: DE_ACT(("set_output_clock wrong clock\n")); return -EINVAL; } if (wait_handshake(chip)) return -EIO; chip->comm_page->output_clock = cpu_to_le16(clock); chip->output_clock = clock; clear_handshake(chip); return send_vector(chip, DSP_VC_UPDATE_CLOCKS); } /* Set input bus gain (one unit is 0.5dB !) */ static int set_input_gain(struct echoaudio *chip, u16 input, int gain) { if (snd_BUG_ON(input >= num_busses_in(chip))) return -EINVAL; if (wait_handshake(chip)) return -EIO; chip->input_gain[input] = gain; gain += GL20_INPUT_GAIN_MAGIC_NUMBER; chip->comm_page->line_in_level[input] = gain; return 0; } /* Tell the DSP to reread the flags from the comm page */ static int update_flags(struct echoaudio *chip) { if (wait_handshake(chip)) return -EIO; clear_handshake(chip); return send_vector(chip, DSP_VC_UPDATE_FLAGS); } static int set_professional_spdif(struct echoaudio *chip, char prof) { DE_ACT(("set_professional_spdif %d\n", prof)); if (prof) chip->comm_page->flags |= cpu_to_le32(DSP_FLAG_PROFESSIONAL_SPDIF); else chip->comm_page->flags &= ~cpu_to_le32(DSP_FLAG_PROFESSIONAL_SPDIF); chip->professional_spdif = prof; return update_flags(chip); }
gpl-2.0
01org/XenGT-Preview-kernel
arch/sh/drivers/pci/ops-dreamcast.c
13805
2641
/* * PCI operations for the Sega Dreamcast * * Copyright (C) 2001, 2002 M. R. Brown * Copyright (C) 2002, 2003 Paul Mundt * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/sched.h> #include <linux/kernel.h> #include <linux/param.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/irq.h> #include <linux/pci.h> #include <linux/module.h> #include <linux/io.h> #include <mach/pci.h> /* * The !gapspci_config_access case really shouldn't happen, ever, unless * someone implicitly messes around with the last devfn value.. otherwise we * only support a single device anyways, and if we didn't have a BBA, we * wouldn't make it terribly far through the PCI setup anyways. * * Also, we could very easily support both Type 0 and Type 1 configurations * here, but since it doesn't seem that there is any such implementation in * existence, we don't bother. * * I suppose if someone actually gets around to ripping the chip out of * the BBA and hanging some more devices off of it, then this might be * something to take into consideration. However, due to the cost of the BBA, * and the general lack of activity by DC hardware hackers, this doesn't seem * likely to happen anytime soon. */ static int gapspci_config_access(unsigned char bus, unsigned int devfn) { return (bus == 0) && (devfn == 0); } /* * We can also actually read and write in b/w/l sizes! Thankfully this part * was at least done right, and we don't have to do the stupid masking and * shifting that we do on the 7751! Small wonders never cease to amaze. */ static int gapspci_read(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val) { *val = 0xffffffff; if (!gapspci_config_access(bus->number, devfn)) return PCIBIOS_DEVICE_NOT_FOUND; switch (size) { case 1: *val = inb(GAPSPCI_BBA_CONFIG+where); break; case 2: *val = inw(GAPSPCI_BBA_CONFIG+where); break; case 4: *val = inl(GAPSPCI_BBA_CONFIG+where); break; } return PCIBIOS_SUCCESSFUL; } static int gapspci_write(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val) { if (!gapspci_config_access(bus->number, devfn)) return PCIBIOS_DEVICE_NOT_FOUND; switch (size) { case 1: outb(( u8)val, GAPSPCI_BBA_CONFIG+where); break; case 2: outw((u16)val, GAPSPCI_BBA_CONFIG+where); break; case 4: outl((u32)val, GAPSPCI_BBA_CONFIG+where); break; } return PCIBIOS_SUCCESSFUL; } struct pci_ops gapspci_pci_ops = { .read = gapspci_read, .write = gapspci_write, };
gpl-2.0
AndroidAddict92/android_kernel_lge_w5
drivers/spi/spi_qsd.c
238
87567
/* Copyright (c) 2008-2013, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ /* * SPI driver for Qualcomm MSM platforms * */ #include <linux/version.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/spinlock.h> #include <linux/list.h> #include <linux/irq.h> #include <linux/platform_device.h> #include <linux/spi/spi.h> #include <linux/interrupt.h> #include <linux/err.h> #include <linux/clk.h> #include <linux/delay.h> #include <linux/workqueue.h> #include <linux/io.h> #include <linux/debugfs.h> #include <linux/gpio.h> #include <linux/remote_spinlock.h> #include <linux/pm_qos.h> #include <linux/of.h> #include <linux/of_gpio.h> #include <linux/dma-mapping.h> #include <linux/sched.h> #include <linux/mutex.h> #include <linux/atomic.h> #include <linux/pm_runtime.h> #include <mach/msm_spi.h> #include <mach/sps.h> #include <mach/dma.h> #include <mach/msm_bus.h> #include <mach/msm_bus_board.h> #include "spi_qsd.h" static int msm_spi_pm_resume_runtime(struct device *device); static int msm_spi_pm_suspend_runtime(struct device *device); static inline int msm_spi_configure_gsbi(struct msm_spi *dd, struct platform_device *pdev) { struct resource *resource; unsigned long gsbi_mem_phys_addr; size_t gsbi_mem_size; void __iomem *gsbi_base; resource = platform_get_resource(pdev, IORESOURCE_MEM, 1); if (!resource) return 0; gsbi_mem_phys_addr = resource->start; gsbi_mem_size = resource_size(resource); if (!devm_request_mem_region(&pdev->dev, gsbi_mem_phys_addr, gsbi_mem_size, SPI_DRV_NAME)) return -ENXIO; gsbi_base = devm_ioremap(&pdev->dev, gsbi_mem_phys_addr, gsbi_mem_size); if (!gsbi_base) return -ENXIO; /* Set GSBI to SPI mode */ writel_relaxed(GSBI_SPI_CONFIG, gsbi_base + GSBI_CTRL_REG); return 0; } static inline void msm_spi_register_init(struct msm_spi *dd) { writel_relaxed(0x00000001, dd->base + SPI_SW_RESET); msm_spi_set_state(dd, SPI_OP_STATE_RESET); writel_relaxed(0x00000000, dd->base + SPI_OPERATIONAL); writel_relaxed(0x00000000, dd->base + SPI_CONFIG); writel_relaxed(0x00000000, dd->base + SPI_IO_MODES); if (dd->qup_ver) writel_relaxed(0x00000000, dd->base + QUP_OPERATIONAL_MASK); } static inline int msm_spi_request_gpios(struct msm_spi *dd) { int i; int result = 0; for (i = 0; i < ARRAY_SIZE(spi_rsrcs); ++i) { if (dd->spi_gpios[i] >= 0) { result = gpio_request(dd->spi_gpios[i], spi_rsrcs[i]); if (result) { dev_err(dd->dev, "%s: gpio_request for pin %d " "failed with error %d\n", __func__, dd->spi_gpios[i], result); goto error; } } } return 0; error: for (; --i >= 0;) { if (dd->spi_gpios[i] >= 0) gpio_free(dd->spi_gpios[i]); } return result; } static inline void msm_spi_free_gpios(struct msm_spi *dd) { int i; for (i = 0; i < ARRAY_SIZE(spi_rsrcs); ++i) { if (dd->spi_gpios[i] >= 0) gpio_free(dd->spi_gpios[i]); } for (i = 0; i < ARRAY_SIZE(spi_cs_rsrcs); ++i) { if (dd->cs_gpios[i].valid) { gpio_free(dd->cs_gpios[i].gpio_num); dd->cs_gpios[i].valid = 0; } } } /** * msm_spi_clk_max_rate: finds the nearest lower rate for a clk * @clk the clock for which to find nearest lower rate * @rate clock frequency in Hz * @return nearest lower rate or negative error value * * Public clock API extends clk_round_rate which is a ceiling function. This * function is a floor function implemented as a binary search using the * ceiling function. */ static long msm_spi_clk_max_rate(struct clk *clk, unsigned long rate) { long lowest_available, nearest_low, step_size, cur; long step_direction = -1; long guess = rate; int max_steps = 10; cur = clk_round_rate(clk, rate); if (cur == rate) return rate; /* if we got here then: cur > rate */ lowest_available = clk_round_rate(clk, 0); if (lowest_available > rate) return -EINVAL; step_size = (rate - lowest_available) >> 1; nearest_low = lowest_available; while (max_steps-- && step_size) { guess += step_size * step_direction; cur = clk_round_rate(clk, guess); if ((cur < rate) && (cur > nearest_low)) nearest_low = cur; /* * if we stepped too far, then start stepping in the other * direction with half the step size */ if (((cur > rate) && (step_direction > 0)) || ((cur < rate) && (step_direction < 0))) { step_direction = -step_direction; step_size >>= 1; } } return nearest_low; } static void msm_spi_clock_set(struct msm_spi *dd, int speed) { long rate; int rc; rate = msm_spi_clk_max_rate(dd->clk, speed); if (rate < 0) { dev_err(dd->dev, "%s: no match found for requested clock frequency:%d", __func__, speed); return; } rc = clk_set_rate(dd->clk, rate); if (!rc) dd->clock_speed = rate; } static void msm_spi_clk_path_vote(struct msm_spi *dd) { if (dd->clk_path_vote.client_hdl) msm_bus_scale_client_update_request( dd->clk_path_vote.client_hdl, MSM_SPI_CLK_PATH_RESUME_VEC); } static void msm_spi_clk_path_unvote(struct msm_spi *dd) { if (dd->clk_path_vote.client_hdl) msm_bus_scale_client_update_request( dd->clk_path_vote.client_hdl, MSM_SPI_CLK_PATH_SUSPEND_VEC); } static void msm_spi_clk_path_teardown(struct msm_spi *dd) { if (dd->pdata->active_only) msm_spi_clk_path_unvote(dd); if (dd->clk_path_vote.client_hdl) { msm_bus_scale_unregister_client(dd->clk_path_vote.client_hdl); dd->clk_path_vote.client_hdl = 0; } } /** * msm_spi_clk_path_init_structs: internal impl detail of msm_spi_clk_path_init * * allocates and initilizes the bus scaling vectors. */ static int msm_spi_clk_path_init_structs(struct msm_spi *dd) { struct msm_bus_vectors *paths = NULL; struct msm_bus_paths *usecases = NULL; dev_dbg(dd->dev, "initialises path clock voting structs"); paths = devm_kzalloc(dd->dev, sizeof(*paths) * 2, GFP_KERNEL); if (!paths) { dev_err(dd->dev, "msm_bus_paths.paths memory allocation failed"); return -ENOMEM; } usecases = devm_kzalloc(dd->dev, sizeof(*usecases) * 2, GFP_KERNEL); if (!usecases) { dev_err(dd->dev, "msm_bus_scale_pdata.usecases memory allocation failed"); goto path_init_err; } dd->clk_path_vote.pdata = devm_kzalloc(dd->dev, sizeof(*dd->clk_path_vote.pdata), GFP_KERNEL); if (!dd->clk_path_vote.pdata) { dev_err(dd->dev, "msm_bus_scale_pdata memory allocation failed"); goto path_init_err; } paths[MSM_SPI_CLK_PATH_SUSPEND_VEC] = (struct msm_bus_vectors) { .src = dd->pdata->master_id, .dst = MSM_BUS_SLAVE_EBI_CH0, .ab = 0, .ib = 0, }; paths[MSM_SPI_CLK_PATH_RESUME_VEC] = (struct msm_bus_vectors) { .src = dd->pdata->master_id, .dst = MSM_BUS_SLAVE_EBI_CH0, .ab = MSM_SPI_CLK_PATH_AVRG_BW(dd), .ib = MSM_SPI_CLK_PATH_BRST_BW(dd), }; usecases[MSM_SPI_CLK_PATH_SUSPEND_VEC] = (struct msm_bus_paths) { .num_paths = 1, .vectors = &paths[MSM_SPI_CLK_PATH_SUSPEND_VEC], }; usecases[MSM_SPI_CLK_PATH_RESUME_VEC] = (struct msm_bus_paths) { .num_paths = 1, .vectors = &paths[MSM_SPI_CLK_PATH_RESUME_VEC], }; *dd->clk_path_vote.pdata = (struct msm_bus_scale_pdata) { .active_only = dd->pdata->active_only, .name = dev_name(dd->dev), .num_usecases = 2, .usecase = usecases, }; return 0; path_init_err: devm_kfree(dd->dev, paths); devm_kfree(dd->dev, usecases); devm_kfree(dd->dev, dd->clk_path_vote.pdata); dd->clk_path_vote.pdata = NULL; return -ENOMEM; } /** * msm_spi_clk_path_postponed_register: reg with bus-scaling after it is probed * * @return zero on success * * Workaround: SPI driver may be probed before the bus scaling driver. Calling * msm_bus_scale_register_client() will fail if the bus scaling driver is not * ready yet. Thus, this function should be called not from probe but from a * later context. Also, this function may be called more then once before * register succeed. At this case only one error message will be logged. At boot * time all clocks are on, so earlier SPI transactions should succeed. */ static int msm_spi_clk_path_postponed_register(struct msm_spi *dd) { dd->clk_path_vote.client_hdl = msm_bus_scale_register_client( dd->clk_path_vote.pdata); if (dd->clk_path_vote.client_hdl) { if (dd->clk_path_vote.reg_err) { /* log a success message if an error msg was logged */ dd->clk_path_vote.reg_err = false; dev_info(dd->dev, "msm_bus_scale_register_client(mstr-id:%d " "actv-only:%d):0x%x", dd->pdata->master_id, dd->pdata->active_only, dd->clk_path_vote.client_hdl); } if (dd->pdata->active_only) msm_spi_clk_path_vote(dd); } else { /* guard to log only one error on multiple failure */ if (!dd->clk_path_vote.reg_err) { dd->clk_path_vote.reg_err = true; dev_info(dd->dev, "msm_bus_scale_register_client(mstr-id:%d " "actv-only:%d):0", dd->pdata->master_id, dd->pdata->active_only); } } return dd->clk_path_vote.client_hdl ? 0 : -EAGAIN; } static void msm_spi_clk_path_init(struct msm_spi *dd) { /* * bail out if path voting is diabled (master_id == 0) or if it is * already registered (client_hdl != 0) */ if (!dd->pdata->master_id || dd->clk_path_vote.client_hdl) return; /* if fail once then try no more */ if (!dd->clk_path_vote.pdata && msm_spi_clk_path_init_structs(dd)) { dd->pdata->master_id = 0; return; }; /* on failure try again later */ if (msm_spi_clk_path_postponed_register(dd)) return; if (dd->pdata->active_only) msm_spi_clk_path_vote(dd); } static int msm_spi_calculate_size(int *fifo_size, int *block_size, int block, int mult) { int words; switch (block) { case 0: words = 1; /* 4 bytes */ break; case 1: words = 4; /* 16 bytes */ break; case 2: words = 8; /* 32 bytes */ break; default: return -EINVAL; } switch (mult) { case 0: *fifo_size = words * 2; break; case 1: *fifo_size = words * 4; break; case 2: *fifo_size = words * 8; break; case 3: *fifo_size = words * 16; break; default: return -EINVAL; } *block_size = words * sizeof(u32); /* in bytes */ return 0; } static void get_next_transfer(struct msm_spi *dd) { struct spi_transfer *t = dd->cur_transfer; if (t->transfer_list.next != &dd->cur_msg->transfers) { dd->cur_transfer = list_entry(t->transfer_list.next, struct spi_transfer, transfer_list); dd->write_buf = dd->cur_transfer->tx_buf; dd->read_buf = dd->cur_transfer->rx_buf; } } static void __init msm_spi_calculate_fifo_size(struct msm_spi *dd) { u32 spi_iom; int block; int mult; spi_iom = readl_relaxed(dd->base + SPI_IO_MODES); block = (spi_iom & SPI_IO_M_INPUT_BLOCK_SIZE) >> INPUT_BLOCK_SZ_SHIFT; mult = (spi_iom & SPI_IO_M_INPUT_FIFO_SIZE) >> INPUT_FIFO_SZ_SHIFT; if (msm_spi_calculate_size(&dd->input_fifo_size, &dd->input_block_size, block, mult)) { goto fifo_size_err; } block = (spi_iom & SPI_IO_M_OUTPUT_BLOCK_SIZE) >> OUTPUT_BLOCK_SZ_SHIFT; mult = (spi_iom & SPI_IO_M_OUTPUT_FIFO_SIZE) >> OUTPUT_FIFO_SZ_SHIFT; if (msm_spi_calculate_size(&dd->output_fifo_size, &dd->output_block_size, block, mult)) { goto fifo_size_err; } if (dd->qup_ver == SPI_QUP_VERSION_NONE) { /* DM mode is not available for this block size */ if (dd->input_block_size == 4 || dd->output_block_size == 4) dd->use_dma = 0; if (dd->use_dma) { dd->input_burst_size = max(dd->input_block_size, DM_BURST_SIZE); dd->output_burst_size = max(dd->output_block_size, DM_BURST_SIZE); } } return; fifo_size_err: dd->use_dma = 0; pr_err("%s: invalid FIFO size, SPI_IO_MODES=0x%x\n", __func__, spi_iom); return; } static void msm_spi_read_word_from_fifo(struct msm_spi *dd) { u32 data_in; int i; int shift; data_in = readl_relaxed(dd->base + SPI_INPUT_FIFO); if (dd->read_buf) { for (i = 0; (i < dd->bytes_per_word) && dd->rx_bytes_remaining; i++) { /* The data format depends on bytes_per_word: 4 bytes: 0x12345678 3 bytes: 0x00123456 2 bytes: 0x00001234 1 byte : 0x00000012 */ shift = 8 * (dd->bytes_per_word - i - 1); *dd->read_buf++ = (data_in & (0xFF << shift)) >> shift; dd->rx_bytes_remaining--; } } else { if (dd->rx_bytes_remaining >= dd->bytes_per_word) dd->rx_bytes_remaining -= dd->bytes_per_word; else dd->rx_bytes_remaining = 0; } dd->read_xfr_cnt++; if (dd->multi_xfr) { if (!dd->rx_bytes_remaining) dd->read_xfr_cnt = 0; else if ((dd->read_xfr_cnt * dd->bytes_per_word) == dd->read_len) { struct spi_transfer *t = dd->cur_rx_transfer; if (t->transfer_list.next != &dd->cur_msg->transfers) { t = list_entry(t->transfer_list.next, struct spi_transfer, transfer_list); dd->read_buf = t->rx_buf; dd->read_len = t->len; dd->read_xfr_cnt = 0; dd->cur_rx_transfer = t; } } } } static inline bool msm_spi_is_valid_state(struct msm_spi *dd) { u32 spi_op = readl_relaxed(dd->base + SPI_STATE); return spi_op & SPI_OP_STATE_VALID; } static inline void msm_spi_udelay(unsigned long delay_usecs) { /* * For smaller values of delay, context switch time * would negate the usage of usleep */ if (delay_usecs > 20) usleep_range(delay_usecs, delay_usecs); else if (delay_usecs) udelay(delay_usecs); } static inline int msm_spi_wait_valid(struct msm_spi *dd) { unsigned long delay = 0; unsigned long timeout = 0; if (dd->clock_speed == 0) return -EINVAL; /* * Based on the SPI clock speed, sufficient time * should be given for the SPI state transition * to occur */ delay = (10 * USEC_PER_SEC) / dd->clock_speed; /* * For small delay values, the default timeout would * be one jiffy */ if (delay < SPI_DELAY_THRESHOLD) delay = SPI_DELAY_THRESHOLD; /* Adding one to round off to the nearest jiffy */ timeout = jiffies + msecs_to_jiffies(delay * SPI_DEFAULT_TIMEOUT) + 1; while (!msm_spi_is_valid_state(dd)) { if (time_after(jiffies, timeout)) { if (!msm_spi_is_valid_state(dd)) { if (dd->cur_msg) dd->cur_msg->status = -EIO; dev_err(dd->dev, "%s: SPI operational state" "not valid\n", __func__); return -ETIMEDOUT; } else return 0; } msm_spi_udelay(delay); } return 0; } static inline int msm_spi_set_state(struct msm_spi *dd, enum msm_spi_state state) { enum msm_spi_state cur_state; if (msm_spi_wait_valid(dd)) return -EIO; cur_state = readl_relaxed(dd->base + SPI_STATE); /* Per spec: For PAUSE_STATE to RESET_STATE, two writes of (10) are required */ if (((cur_state & SPI_OP_STATE) == SPI_OP_STATE_PAUSE) && (state == SPI_OP_STATE_RESET)) { writel_relaxed(SPI_OP_STATE_CLEAR_BITS, dd->base + SPI_STATE); writel_relaxed(SPI_OP_STATE_CLEAR_BITS, dd->base + SPI_STATE); } else { writel_relaxed((cur_state & ~SPI_OP_STATE) | state, dd->base + SPI_STATE); } if (msm_spi_wait_valid(dd)) return -EIO; return 0; } /** * msm_spi_set_bpw_and_no_io_flags: configure N, and no-input/no-output flags */ static inline void msm_spi_set_bpw_and_no_io_flags(struct msm_spi *dd, u32 *config, int n) { *config &= ~(SPI_NO_INPUT|SPI_NO_OUTPUT); if (n != (*config & SPI_CFG_N)) *config = (*config & ~SPI_CFG_N) | n; if (((dd->mode == SPI_DMOV_MODE) && (!dd->read_len)) || (dd->mode == SPI_BAM_MODE)) { if (dd->read_buf == NULL) *config |= SPI_NO_INPUT; if (dd->write_buf == NULL) *config |= SPI_NO_OUTPUT; } } /** * msm_spi_calc_spi_config_loopback_and_input_first: Calculate the values that * should be updated into SPI_CONFIG's LOOPBACK and INPUT_FIRST flags * @return calculatd value for SPI_CONFIG */ static u32 msm_spi_calc_spi_config_loopback_and_input_first(u32 spi_config, u8 mode) { if (mode & SPI_LOOP) spi_config |= SPI_CFG_LOOPBACK; else spi_config &= ~SPI_CFG_LOOPBACK; if (mode & SPI_CPHA) spi_config &= ~SPI_CFG_INPUT_FIRST; else spi_config |= SPI_CFG_INPUT_FIRST; return spi_config; } /** * msm_spi_set_spi_config: prepares register SPI_CONFIG to process the * next transfer */ static void msm_spi_set_spi_config(struct msm_spi *dd, int bpw) { u32 spi_config = readl_relaxed(dd->base + SPI_CONFIG); spi_config = msm_spi_calc_spi_config_loopback_and_input_first( spi_config, dd->cur_msg->spi->mode); if (dd->qup_ver == SPI_QUP_VERSION_NONE) /* flags removed from SPI_CONFIG in QUP version-2 */ msm_spi_set_bpw_and_no_io_flags(dd, &spi_config, bpw-1); /* * HS_MODE improves signal stability for spi-clk high rates * but is invalid in LOOPBACK mode. */ if ((dd->clock_speed >= SPI_HS_MIN_RATE) && !(dd->cur_msg->spi->mode & SPI_LOOP)) spi_config |= SPI_CFG_HS_MODE; else spi_config &= ~SPI_CFG_HS_MODE; writel_relaxed(spi_config, dd->base + SPI_CONFIG); } /** * msm_spi_set_mx_counts: set SPI_MX_INPUT_COUNT and SPI_MX_INPUT_COUNT * for FIFO-mode. set SPI_MX_INPUT_COUNT and SPI_MX_OUTPUT_COUNT for * BAM and DMOV modes. * @n_words The number of reads/writes of size N. */ static void msm_spi_set_mx_counts(struct msm_spi *dd, u32 n_words) { /* * n_words cannot exceed fifo_size, and only one READ COUNT * interrupt is generated per transaction, so for transactions * larger than fifo size READ COUNT must be disabled. * For those transactions we usually move to Data Mover mode. */ if (dd->mode == SPI_FIFO_MODE) { if (n_words <= dd->input_fifo_size) { writel_relaxed(n_words, dd->base + SPI_MX_READ_COUNT); msm_spi_set_write_count(dd, n_words); } else { writel_relaxed(0, dd->base + SPI_MX_READ_COUNT); msm_spi_set_write_count(dd, 0); } if (dd->qup_ver == SPI_QUP_VERSION_BFAM) { /* must be zero for FIFO */ writel_relaxed(0, dd->base + SPI_MX_INPUT_COUNT); writel_relaxed(0, dd->base + SPI_MX_OUTPUT_COUNT); } } else { /* must be zero for BAM and DMOV */ writel_relaxed(0, dd->base + SPI_MX_READ_COUNT); msm_spi_set_write_count(dd, 0); /* * for DMA transfers, both QUP_MX_INPUT_COUNT and * QUP_MX_OUTPUT_COUNT must be zero to all cases but one. * That case is a non-balanced transfer when there is * only a read_buf. */ if (dd->qup_ver == SPI_QUP_VERSION_BFAM) { if (dd->write_buf) writel_relaxed(0, dd->base + SPI_MX_INPUT_COUNT); else writel_relaxed(n_words, dd->base + SPI_MX_INPUT_COUNT); writel_relaxed(0, dd->base + SPI_MX_OUTPUT_COUNT); } } } static int msm_spi_bam_pipe_disconnect(struct msm_spi *dd, struct msm_spi_bam_pipe *pipe) { int ret = sps_disconnect(pipe->handle); if (ret) { dev_dbg(dd->dev, "%s disconnect bam %s pipe failed\n", __func__, pipe->name); return ret; } return 0; } static int msm_spi_bam_pipe_connect(struct msm_spi *dd, struct msm_spi_bam_pipe *pipe, struct sps_connect *config) { int ret; struct sps_register_event event = { .mode = SPS_TRIGGER_WAIT, .options = SPS_O_EOT, .xfer_done = &dd->transfer_complete, }; ret = sps_connect(pipe->handle, config); if (ret) { dev_err(dd->dev, "%s: sps_connect(%s:0x%p):%d", __func__, pipe->name, pipe->handle, ret); return ret; } ret = sps_register_event(pipe->handle, &event); if (ret) { dev_err(dd->dev, "%s sps_register_event(hndl:0x%p %s):%d", __func__, pipe->handle, pipe->name, ret); msm_spi_bam_pipe_disconnect(dd, pipe); return ret; } pipe->teardown_required = true; return 0; } static void msm_spi_bam_pipe_flush(struct msm_spi *dd, enum msm_spi_pipe_direction pipe_dir) { struct msm_spi_bam_pipe *pipe = (pipe_dir == SPI_BAM_CONSUMER_PIPE) ? (&dd->bam.prod) : (&dd->bam.cons); struct sps_connect config = pipe->config; int ret; ret = msm_spi_bam_pipe_disconnect(dd, pipe); if (ret) return; ret = msm_spi_bam_pipe_connect(dd, pipe, &config); if (ret) return; } static void msm_spi_bam_flush(struct msm_spi *dd) { dev_dbg(dd->dev, "%s flushing bam for recovery\n" , __func__); msm_spi_bam_pipe_flush(dd, SPI_BAM_CONSUMER_PIPE); msm_spi_bam_pipe_flush(dd, SPI_BAM_PRODUCER_PIPE); } /** * msm_spi_bam_begin_transfer: transfer dd->tx_bytes_remaining bytes * using BAM. * @brief BAM can transfer SPI_MAX_TRFR_BTWN_RESETS byte at a single * transfer. Between transfer QUP must change to reset state. A loop is * issuing a single BAM transfer at a time. If another tsranfer is * required, it waits for the trasfer to finish, then moving to reset * state, and back to run state to issue the next transfer. * The function dose not wait for the last transfer to end, or if only * a single transfer is required, the function dose not wait for it to * end. * @timeout max time in jiffies to wait for a transfer to finish. * @return zero on success */ static int msm_spi_bam_begin_transfer(struct msm_spi *dd, u32 timeout, u8 bpw) { u32 bytes_to_send, bytes_sent, n_words_xfr, cons_flags, prod_flags; int ret; /* * QUP must move to reset mode every 64K-1 bytes of transfer * (counter is 16 bit) */ if (dd->tx_bytes_remaining > SPI_MAX_TRFR_BTWN_RESETS) { /* assert chip select unconditionally */ u32 spi_ioc = readl_relaxed(dd->base + SPI_IO_CONTROL); if (!(spi_ioc & SPI_IO_C_FORCE_CS)) writel_relaxed(spi_ioc | SPI_IO_C_FORCE_CS, dd->base + SPI_IO_CONTROL); } /* Following flags are required since we are waiting on all transfers */ cons_flags = SPS_IOVEC_FLAG_EOT | SPS_IOVEC_FLAG_NWD; /* * on a balanced transaction, BAM will set the flags on the producer * pipe based on the flags set on the consumer pipe */ prod_flags = (dd->write_buf) ? 0 : cons_flags; while (dd->tx_bytes_remaining > 0) { bytes_sent = dd->cur_transfer->len - dd->tx_bytes_remaining; bytes_to_send = min_t(u32, dd->tx_bytes_remaining , SPI_MAX_TRFR_BTWN_RESETS); n_words_xfr = DIV_ROUND_UP(bytes_to_send , dd->bytes_per_word); msm_spi_set_mx_counts(dd, n_words_xfr); ret = msm_spi_set_state(dd, SPI_OP_STATE_RUN); if (ret < 0) { dev_err(dd->dev, "%s: Failed to set QUP state to run", __func__); goto xfr_err; } /* enqueue read buffer in BAM */ if (dd->read_buf) { ret = sps_transfer_one(dd->bam.prod.handle, dd->cur_transfer->rx_dma + bytes_sent, bytes_to_send, dd, prod_flags); if (ret < 0) { dev_err(dd->dev, "%s: Failed to queue producer BAM transfer", __func__); goto xfr_err; } } /* enqueue write buffer in BAM */ if (dd->write_buf) { ret = sps_transfer_one(dd->bam.cons.handle, dd->cur_transfer->tx_dma + bytes_sent, bytes_to_send, dd, cons_flags); if (ret < 0) { dev_err(dd->dev, "%s: Failed to queue consumer BAM transfer", __func__); goto xfr_err; } } dd->tx_bytes_remaining -= bytes_to_send; /* move to reset state after SPI_MAX_TRFR_BTWN_RESETS */ if (dd->tx_bytes_remaining > 0) { if (!wait_for_completion_timeout( &dd->transfer_complete, timeout)) { dev_err(dd->dev, "%s: SPI transaction timeout", __func__); dd->cur_msg->status = -EIO; ret = -EIO; goto xfr_err; } ret = msm_spi_set_state(dd, SPI_OP_STATE_RESET); if (ret < 0) { dev_err(dd->dev, "%s: Failed to set QUP state to reset", __func__); goto xfr_err; } init_completion(&dd->transfer_complete); } } return 0; xfr_err: return ret; } static void msm_spi_setup_dm_transfer(struct msm_spi *dd) { dmov_box *box; int bytes_to_send, bytes_sent; int tx_num_rows, rx_num_rows; u32 num_transfers; atomic_set(&dd->rx_irq_called, 0); atomic_set(&dd->tx_irq_called, 0); if (dd->write_len && !dd->read_len) { /* WR-WR transfer */ bytes_sent = dd->cur_msg_len - dd->tx_bytes_remaining; dd->write_buf = dd->temp_buf; } else { bytes_sent = dd->cur_transfer->len - dd->tx_bytes_remaining; /* For WR-RD transfer, bytes_sent can be negative */ if (bytes_sent < 0) bytes_sent = 0; } /* We'll send in chunks of SPI_MAX_LEN if larger than * 4K bytes for targets that have only 12 bits in * QUP_MAX_OUTPUT_CNT register. If the target supports * more than 12bits then we send the data in chunks of * the infinite_mode value that is defined in the * corresponding board file. */ if (!dd->pdata->infinite_mode) dd->max_trfr_len = SPI_MAX_LEN; else dd->max_trfr_len = (dd->pdata->infinite_mode) * (dd->bytes_per_word); bytes_to_send = min_t(u32, dd->tx_bytes_remaining, dd->max_trfr_len); num_transfers = DIV_ROUND_UP(bytes_to_send, dd->bytes_per_word); dd->tx_unaligned_len = bytes_to_send % dd->output_burst_size; dd->rx_unaligned_len = bytes_to_send % dd->input_burst_size; tx_num_rows = bytes_to_send / dd->output_burst_size; rx_num_rows = bytes_to_send / dd->input_burst_size; dd->mode = SPI_DMOV_MODE; if (tx_num_rows) { /* src in 16 MSB, dst in 16 LSB */ box = &dd->tx_dmov_cmd->box; box->src_row_addr = dd->cur_transfer->tx_dma + bytes_sent; box->src_dst_len = (dd->output_burst_size << 16) | dd->output_burst_size; box->num_rows = (tx_num_rows << 16) | tx_num_rows; box->row_offset = (dd->output_burst_size << 16) | 0; dd->tx_dmov_cmd->cmd_ptr = CMD_PTR_LP | DMOV_CMD_ADDR(dd->tx_dmov_cmd_dma + offsetof(struct spi_dmov_cmd, box)); } else { dd->tx_dmov_cmd->cmd_ptr = CMD_PTR_LP | DMOV_CMD_ADDR(dd->tx_dmov_cmd_dma + offsetof(struct spi_dmov_cmd, single_pad)); } if (rx_num_rows) { /* src in 16 MSB, dst in 16 LSB */ box = &dd->rx_dmov_cmd->box; box->dst_row_addr = dd->cur_transfer->rx_dma + bytes_sent; box->src_dst_len = (dd->input_burst_size << 16) | dd->input_burst_size; box->num_rows = (rx_num_rows << 16) | rx_num_rows; box->row_offset = (0 << 16) | dd->input_burst_size; dd->rx_dmov_cmd->cmd_ptr = CMD_PTR_LP | DMOV_CMD_ADDR(dd->rx_dmov_cmd_dma + offsetof(struct spi_dmov_cmd, box)); } else { dd->rx_dmov_cmd->cmd_ptr = CMD_PTR_LP | DMOV_CMD_ADDR(dd->rx_dmov_cmd_dma + offsetof(struct spi_dmov_cmd, single_pad)); } if (!dd->tx_unaligned_len) { dd->tx_dmov_cmd->box.cmd |= CMD_LC; } else { dmov_s *tx_cmd = &(dd->tx_dmov_cmd->single_pad); u32 tx_offset = dd->cur_transfer->len - dd->tx_unaligned_len; if ((dd->multi_xfr) && (dd->read_len <= 0)) tx_offset = dd->cur_msg_len - dd->tx_unaligned_len; dd->tx_dmov_cmd->box.cmd &= ~CMD_LC; memset(dd->tx_padding, 0, dd->output_burst_size); if (dd->write_buf) memcpy(dd->tx_padding, dd->write_buf + tx_offset, dd->tx_unaligned_len); tx_cmd->src = dd->tx_padding_dma; tx_cmd->len = dd->output_burst_size; } if (!dd->rx_unaligned_len) { dd->rx_dmov_cmd->box.cmd |= CMD_LC; } else { dmov_s *rx_cmd = &(dd->rx_dmov_cmd->single_pad); dd->rx_dmov_cmd->box.cmd &= ~CMD_LC; memset(dd->rx_padding, 0, dd->input_burst_size); rx_cmd->dst = dd->rx_padding_dma; rx_cmd->len = dd->input_burst_size; } /* This also takes care of the padding dummy buf Since this is set to the correct length, the dummy bytes won't be actually sent */ if (dd->multi_xfr) { u32 write_transfers = 0; u32 read_transfers = 0; if (dd->write_len > 0) { write_transfers = DIV_ROUND_UP(dd->write_len, dd->bytes_per_word); writel_relaxed(write_transfers, dd->base + SPI_MX_OUTPUT_COUNT); } if (dd->read_len > 0) { /* * The read following a write transfer must take * into account, that the bytes pertaining to * the write transfer needs to be discarded, * before the actual read begins. */ read_transfers = DIV_ROUND_UP(dd->read_len + dd->write_len, dd->bytes_per_word); writel_relaxed(read_transfers, dd->base + SPI_MX_INPUT_COUNT); } } else { if (dd->write_buf) writel_relaxed(num_transfers, dd->base + SPI_MX_OUTPUT_COUNT); if (dd->read_buf) writel_relaxed(num_transfers, dd->base + SPI_MX_INPUT_COUNT); } } static void msm_spi_enqueue_dm_commands(struct msm_spi *dd) { dma_coherent_pre_ops(); if (dd->write_buf) msm_dmov_enqueue_cmd(dd->tx_dma_chan, &dd->tx_hdr); if (dd->read_buf) msm_dmov_enqueue_cmd(dd->rx_dma_chan, &dd->rx_hdr); } /* SPI core on targets that does not support infinite mode can send maximum of 4K transfers or 64K transfers depending up on size of MAX_OUTPUT_COUNT register, Therefore, we are sending in several chunks. Upon completion we send the next chunk, or complete the transfer if everything is finished. On targets that support infinite mode, we send all the bytes in as single chunk. */ static int msm_spi_dm_send_next(struct msm_spi *dd) { /* By now we should have sent all the bytes in FIFO mode, * However to make things right, we'll check anyway. */ if (dd->mode != SPI_DMOV_MODE) return 0; /* On targets which does not support infinite mode, We need to send more chunks, if we sent max last time */ if (dd->tx_bytes_remaining > dd->max_trfr_len) { dd->tx_bytes_remaining -= dd->max_trfr_len; if (msm_spi_set_state(dd, SPI_OP_STATE_RESET)) return 0; dd->read_len = dd->write_len = 0; msm_spi_setup_dm_transfer(dd); msm_spi_enqueue_dm_commands(dd); if (msm_spi_set_state(dd, SPI_OP_STATE_RUN)) return 0; return 1; } else if (dd->read_len && dd->write_len) { dd->tx_bytes_remaining -= dd->cur_transfer->len; if (list_is_last(&dd->cur_transfer->transfer_list, &dd->cur_msg->transfers)) return 0; get_next_transfer(dd); if (msm_spi_set_state(dd, SPI_OP_STATE_PAUSE)) return 0; dd->tx_bytes_remaining = dd->read_len + dd->write_len; dd->read_buf = dd->temp_buf; dd->read_len = dd->write_len = -1; msm_spi_setup_dm_transfer(dd); msm_spi_enqueue_dm_commands(dd); if (msm_spi_set_state(dd, SPI_OP_STATE_RUN)) return 0; return 1; } return 0; } static inline void msm_spi_ack_transfer(struct msm_spi *dd) { writel_relaxed(SPI_OP_MAX_INPUT_DONE_FLAG | SPI_OP_MAX_OUTPUT_DONE_FLAG, dd->base + SPI_OPERATIONAL); /* Ensure done flag was cleared before proceeding further */ mb(); } /* Figure which irq occured and call the relevant functions */ static inline irqreturn_t msm_spi_qup_irq(int irq, void *dev_id) { u32 op, ret = IRQ_NONE; struct msm_spi *dd = dev_id; if (pm_runtime_suspended(dd->dev)) { dev_warn(dd->dev, "QUP: pm runtime suspend, irq:%d\n", irq); return ret; } if (readl_relaxed(dd->base + SPI_ERROR_FLAGS) || readl_relaxed(dd->base + QUP_ERROR_FLAGS)) { struct spi_master *master = dev_get_drvdata(dd->dev); ret |= msm_spi_error_irq(irq, master); } op = readl_relaxed(dd->base + SPI_OPERATIONAL); if (op & SPI_OP_INPUT_SERVICE_FLAG) { writel_relaxed(SPI_OP_INPUT_SERVICE_FLAG, dd->base + SPI_OPERATIONAL); /* * Ensure service flag was cleared before further * processing of interrupt. */ mb(); ret |= msm_spi_input_irq(irq, dev_id); } if (op & SPI_OP_OUTPUT_SERVICE_FLAG) { writel_relaxed(SPI_OP_OUTPUT_SERVICE_FLAG, dd->base + SPI_OPERATIONAL); /* * Ensure service flag was cleared before further * processing of interrupt. */ mb(); ret |= msm_spi_output_irq(irq, dev_id); } if (dd->done) { complete(&dd->transfer_complete); dd->done = 0; } return ret; } static irqreturn_t msm_spi_input_irq(int irq, void *dev_id) { struct msm_spi *dd = dev_id; dd->stat_rx++; if (dd->mode == SPI_MODE_NONE) return IRQ_HANDLED; if (dd->mode == SPI_DMOV_MODE) { u32 op = readl_relaxed(dd->base + SPI_OPERATIONAL); if ((!dd->read_buf || op & SPI_OP_MAX_INPUT_DONE_FLAG) && (!dd->write_buf || op & SPI_OP_MAX_OUTPUT_DONE_FLAG)) { msm_spi_ack_transfer(dd); if (dd->rx_unaligned_len == 0) { if (atomic_inc_return(&dd->rx_irq_called) == 1) return IRQ_HANDLED; } msm_spi_complete(dd); return IRQ_HANDLED; } return IRQ_NONE; } if (dd->mode == SPI_FIFO_MODE) { while ((readl_relaxed(dd->base + SPI_OPERATIONAL) & SPI_OP_IP_FIFO_NOT_EMPTY) && (dd->rx_bytes_remaining > 0)) { msm_spi_read_word_from_fifo(dd); } if (dd->rx_bytes_remaining == 0) msm_spi_complete(dd); } return IRQ_HANDLED; } static void msm_spi_write_word_to_fifo(struct msm_spi *dd) { u32 word; u8 byte; int i; word = 0; if (dd->write_buf) { for (i = 0; (i < dd->bytes_per_word) && dd->tx_bytes_remaining; i++) { dd->tx_bytes_remaining--; byte = *dd->write_buf++; word |= (byte << (BITS_PER_BYTE * (3 - i))); } } else if (dd->tx_bytes_remaining > dd->bytes_per_word) dd->tx_bytes_remaining -= dd->bytes_per_word; else dd->tx_bytes_remaining = 0; dd->write_xfr_cnt++; if (dd->multi_xfr) { if (!dd->tx_bytes_remaining) dd->write_xfr_cnt = 0; else if ((dd->write_xfr_cnt * dd->bytes_per_word) == dd->write_len) { struct spi_transfer *t = dd->cur_tx_transfer; if (t->transfer_list.next != &dd->cur_msg->transfers) { t = list_entry(t->transfer_list.next, struct spi_transfer, transfer_list); dd->write_buf = t->tx_buf; dd->write_len = t->len; dd->write_xfr_cnt = 0; dd->cur_tx_transfer = t; } } } writel_relaxed(word, dd->base + SPI_OUTPUT_FIFO); } static inline void msm_spi_write_rmn_to_fifo(struct msm_spi *dd) { int count = 0; while ((dd->tx_bytes_remaining > 0) && (count < dd->input_fifo_size) && !(readl_relaxed(dd->base + SPI_OPERATIONAL) & SPI_OP_OUTPUT_FIFO_FULL)) { msm_spi_write_word_to_fifo(dd); count++; } } static irqreturn_t msm_spi_output_irq(int irq, void *dev_id) { struct msm_spi *dd = dev_id; dd->stat_tx++; if (dd->mode == SPI_MODE_NONE) return IRQ_HANDLED; if (dd->mode == SPI_DMOV_MODE) { /* TX_ONLY transaction is handled here This is the only place we send complete at tx and not rx */ if (dd->read_buf == NULL && readl_relaxed(dd->base + SPI_OPERATIONAL) & SPI_OP_MAX_OUTPUT_DONE_FLAG) { msm_spi_ack_transfer(dd); if (atomic_inc_return(&dd->tx_irq_called) == 1) return IRQ_HANDLED; msm_spi_complete(dd); return IRQ_HANDLED; } return IRQ_NONE; } /* Output FIFO is empty. Transmit any outstanding write data. */ if (dd->mode == SPI_FIFO_MODE) msm_spi_write_rmn_to_fifo(dd); return IRQ_HANDLED; } static irqreturn_t msm_spi_error_irq(int irq, void *dev_id) { struct spi_master *master = dev_id; struct msm_spi *dd = spi_master_get_devdata(master); u32 spi_err; spi_err = readl_relaxed(dd->base + SPI_ERROR_FLAGS); if (spi_err & SPI_ERR_OUTPUT_OVER_RUN_ERR) dev_warn(master->dev.parent, "SPI output overrun error\n"); if (spi_err & SPI_ERR_INPUT_UNDER_RUN_ERR) dev_warn(master->dev.parent, "SPI input underrun error\n"); if (spi_err & SPI_ERR_OUTPUT_UNDER_RUN_ERR) dev_warn(master->dev.parent, "SPI output underrun error\n"); msm_spi_get_clk_err(dd, &spi_err); if (spi_err & SPI_ERR_CLK_OVER_RUN_ERR) dev_warn(master->dev.parent, "SPI clock overrun error\n"); if (spi_err & SPI_ERR_CLK_UNDER_RUN_ERR) dev_warn(master->dev.parent, "SPI clock underrun error\n"); msm_spi_clear_error_flags(dd); msm_spi_ack_clk_err(dd); /* Ensure clearing of QUP_ERROR_FLAGS was completed */ mb(); return IRQ_HANDLED; } /** * msm_spi_dma_map_buffers: prepares buffer for DMA transfer * @return zero on success or negative error code * * calls dma_map_single() on the read/write buffers, effectively invalidating * their cash entries. for For WR-WR and WR-RD transfers, allocates temporary * buffer and copy the data to/from the client buffers */ static int msm_spi_dma_map_buffers(struct msm_spi *dd) { struct device *dev; struct spi_transfer *first_xfr; struct spi_transfer *nxt_xfr = NULL; void *tx_buf, *rx_buf; unsigned tx_len, rx_len; int ret = -EINVAL; dev = &dd->cur_msg->spi->dev; first_xfr = dd->cur_transfer; tx_buf = (void *)first_xfr->tx_buf; rx_buf = first_xfr->rx_buf; tx_len = rx_len = first_xfr->len; /* * For WR-WR and WR-RD transfers, we allocate our own temporary * buffer and copy the data to/from the client buffers. */ if (dd->multi_xfr) { dd->temp_buf = kzalloc(dd->cur_msg_len, GFP_KERNEL | __GFP_DMA); if (!dd->temp_buf) return -ENOMEM; nxt_xfr = list_entry(first_xfr->transfer_list.next, struct spi_transfer, transfer_list); if (dd->write_len && !dd->read_len) { if (!first_xfr->tx_buf || !nxt_xfr->tx_buf) goto error; memcpy(dd->temp_buf, first_xfr->tx_buf, first_xfr->len); memcpy(dd->temp_buf + first_xfr->len, nxt_xfr->tx_buf, nxt_xfr->len); tx_buf = dd->temp_buf; tx_len = dd->cur_msg_len; } else { if (!first_xfr->tx_buf || !nxt_xfr->rx_buf) goto error; rx_buf = dd->temp_buf; rx_len = dd->cur_msg_len; } } if (tx_buf != NULL) { first_xfr->tx_dma = dma_map_single(dev, tx_buf, tx_len, DMA_TO_DEVICE); if (dma_mapping_error(NULL, first_xfr->tx_dma)) { dev_err(dev, "dma %cX %d bytes error\n", 'T', tx_len); ret = -ENOMEM; goto error; } } if (rx_buf != NULL) { dma_addr_t dma_handle; dma_handle = dma_map_single(dev, rx_buf, rx_len, DMA_FROM_DEVICE); if (dma_mapping_error(NULL, dma_handle)) { dev_err(dev, "dma %cX %d bytes error\n", 'R', rx_len); if (tx_buf != NULL) dma_unmap_single(NULL, first_xfr->tx_dma, tx_len, DMA_TO_DEVICE); ret = -ENOMEM; goto error; } if (dd->multi_xfr) nxt_xfr->rx_dma = dma_handle; else first_xfr->rx_dma = dma_handle; } return 0; error: kfree(dd->temp_buf); dd->temp_buf = NULL; return ret; } static void msm_spi_dmov_unmap_buffers(struct msm_spi *dd) { struct device *dev; u32 offset; dev = &dd->cur_msg->spi->dev; if (dd->cur_msg->is_dma_mapped) goto unmap_end; if (dd->multi_xfr) { if (dd->write_len && !dd->read_len) { dma_unmap_single(dev, dd->cur_transfer->tx_dma, dd->cur_msg_len, DMA_TO_DEVICE); } else { struct spi_transfer *prev_xfr; prev_xfr = list_entry( dd->cur_transfer->transfer_list.prev, struct spi_transfer, transfer_list); if (dd->cur_transfer->rx_buf) { dma_unmap_single(dev, dd->cur_transfer->rx_dma, dd->cur_msg_len, DMA_FROM_DEVICE); } if (prev_xfr->tx_buf) { dma_unmap_single(dev, prev_xfr->tx_dma, prev_xfr->len, DMA_TO_DEVICE); } if (dd->rx_unaligned_len && dd->read_buf) { offset = dd->cur_msg_len - dd->rx_unaligned_len; dma_coherent_post_ops(); memcpy(dd->read_buf + offset, dd->rx_padding, dd->rx_unaligned_len); if (dd->cur_transfer->rx_buf) memcpy(dd->cur_transfer->rx_buf, dd->read_buf + prev_xfr->len, dd->cur_transfer->len); } } kfree(dd->temp_buf); dd->temp_buf = NULL; return; } else { if (dd->cur_transfer->rx_buf) dma_unmap_single(dev, dd->cur_transfer->rx_dma, dd->cur_transfer->len, DMA_FROM_DEVICE); if (dd->cur_transfer->tx_buf) dma_unmap_single(dev, dd->cur_transfer->tx_dma, dd->cur_transfer->len, DMA_TO_DEVICE); } unmap_end: /* If we padded the transfer, we copy it from the padding buf */ if (dd->rx_unaligned_len && dd->read_buf) { offset = dd->cur_transfer->len - dd->rx_unaligned_len; dma_coherent_post_ops(); memcpy(dd->read_buf + offset, dd->rx_padding, dd->rx_unaligned_len); } } static void msm_spi_bam_unmap_buffers(struct msm_spi *dd) { struct device *dev; /* mapped by client */ if (dd->cur_msg->is_dma_mapped) return; dev = &dd->cur_msg->spi->dev; if (dd->cur_transfer->rx_buf) dma_unmap_single(dev, dd->cur_transfer->rx_dma, dd->cur_transfer->len, DMA_FROM_DEVICE); if (dd->cur_transfer->tx_buf) dma_unmap_single(dev, dd->cur_transfer->tx_dma, dd->cur_transfer->len, DMA_TO_DEVICE); } static inline void msm_spi_dma_unmap_buffers(struct msm_spi *dd) { if (dd->mode == SPI_DMOV_MODE) msm_spi_dmov_unmap_buffers(dd); else if (dd->mode == SPI_BAM_MODE) msm_spi_bam_unmap_buffers(dd); } /** * msm_spi_use_dma - decides whether to use Data-Mover or BAM for * the given transfer * @dd: device * @tr: transfer * * Start using DMA if: * 1. Is supported by HW * 2. Is not diabled by platfrom data * 3. Transfer size is greater than 3*block size. * 4. Buffers are aligned to cache line. * 5. Bytes-per-word is 8,16 or 32. */ static inline bool msm_spi_use_dma(struct msm_spi *dd, struct spi_transfer *tr, u8 bpw) { if (!dd->use_dma) return false; /* check constraints from platform data */ if ((dd->qup_ver == SPI_QUP_VERSION_BFAM) && !dd->pdata->use_bam) return false; if (dd->cur_msg_len < 3*dd->input_block_size) return false; if (dd->multi_xfr && !dd->read_len && !dd->write_len) return false; if (dd->qup_ver == SPI_QUP_VERSION_NONE) { u32 cache_line = dma_get_cache_alignment(); if (tr->tx_buf) { if (!IS_ALIGNED((size_t)tr->tx_buf, cache_line)) return 0; } if (tr->rx_buf) { if (!IS_ALIGNED((size_t)tr->rx_buf, cache_line)) return false; } if (tr->cs_change && ((bpw != 8) && (bpw != 16) && (bpw != 32))) return false; } return true; } /** * msm_spi_set_transfer_mode: Chooses optimal transfer mode. Sets dd->mode and * prepares to process a transfer. */ static void msm_spi_set_transfer_mode(struct msm_spi *dd, u8 bpw, u32 read_count) { if (msm_spi_use_dma(dd, dd->cur_transfer, bpw)) { if (dd->qup_ver) { dd->mode = SPI_BAM_MODE; } else { dd->mode = SPI_DMOV_MODE; if (dd->write_len && dd->read_len) { dd->tx_bytes_remaining = dd->write_len; dd->rx_bytes_remaining = dd->read_len; } } } else { dd->mode = SPI_FIFO_MODE; if (dd->multi_xfr) { dd->read_len = dd->cur_transfer->len; dd->write_len = dd->cur_transfer->len; } } } /** * msm_spi_set_qup_io_modes: prepares register QUP_IO_MODES to process a * transfer */ static void msm_spi_set_qup_io_modes(struct msm_spi *dd) { u32 spi_iom; spi_iom = readl_relaxed(dd->base + SPI_IO_MODES); /* Set input and output transfer mode: FIFO, DMOV, or BAM */ spi_iom &= ~(SPI_IO_M_INPUT_MODE | SPI_IO_M_OUTPUT_MODE); spi_iom = (spi_iom | (dd->mode << OUTPUT_MODE_SHIFT)); spi_iom = (spi_iom | (dd->mode << INPUT_MODE_SHIFT)); /* Turn on packing for data mover */ if ((dd->mode == SPI_DMOV_MODE) || (dd->mode == SPI_BAM_MODE)) spi_iom |= SPI_IO_M_PACK_EN | SPI_IO_M_UNPACK_EN; else spi_iom &= ~(SPI_IO_M_PACK_EN | SPI_IO_M_UNPACK_EN); /*if (dd->mode == SPI_BAM_MODE) { spi_iom |= SPI_IO_C_NO_TRI_STATE; spi_iom &= ~(SPI_IO_C_CS_SELECT | SPI_IO_C_CS_N_POLARITY); }*/ writel_relaxed(spi_iom, dd->base + SPI_IO_MODES); } static u32 msm_spi_calc_spi_ioc_clk_polarity(u32 spi_ioc, u8 mode) { if (mode & SPI_CPOL) spi_ioc |= SPI_IO_C_CLK_IDLE_HIGH; else spi_ioc &= ~SPI_IO_C_CLK_IDLE_HIGH; return spi_ioc; } /** * msm_spi_set_spi_io_control: prepares register SPI_IO_CONTROL to process the * next transfer * @return the new set value of SPI_IO_CONTROL */ static u32 msm_spi_set_spi_io_control(struct msm_spi *dd) { u32 spi_ioc, spi_ioc_orig, chip_select; spi_ioc = readl_relaxed(dd->base + SPI_IO_CONTROL); spi_ioc_orig = spi_ioc; spi_ioc = msm_spi_calc_spi_ioc_clk_polarity(spi_ioc , dd->cur_msg->spi->mode); /* Set chip-select */ chip_select = dd->cur_msg->spi->chip_select << 2; if ((spi_ioc & SPI_IO_C_CS_SELECT) != chip_select) spi_ioc = (spi_ioc & ~SPI_IO_C_CS_SELECT) | chip_select; if (!dd->cur_transfer->cs_change) spi_ioc |= SPI_IO_C_MX_CS_MODE; if (spi_ioc != spi_ioc_orig) writel_relaxed(spi_ioc, dd->base + SPI_IO_CONTROL); return spi_ioc; } /** * msm_spi_set_qup_op_mask: prepares register QUP_OPERATIONAL_MASK to process * the next transfer */ static void msm_spi_set_qup_op_mask(struct msm_spi *dd) { /* mask INPUT and OUTPUT service flags in to prevent IRQs on FIFO status * change in BAM mode */ u32 mask = (dd->mode == SPI_BAM_MODE) ? QUP_OP_MASK_OUTPUT_SERVICE_FLAG | QUP_OP_MASK_INPUT_SERVICE_FLAG : 0; writel_relaxed(mask, dd->base + QUP_OPERATIONAL_MASK); } static void msm_spi_process_transfer(struct msm_spi *dd) { u8 bpw; u32 max_speed; u32 read_count; u32 timeout; u32 spi_ioc; u32 int_loopback = 0; dd->tx_bytes_remaining = dd->cur_msg_len; dd->rx_bytes_remaining = dd->cur_msg_len; dd->read_buf = dd->cur_transfer->rx_buf; dd->write_buf = dd->cur_transfer->tx_buf; init_completion(&dd->transfer_complete); if (dd->cur_transfer->bits_per_word) bpw = dd->cur_transfer->bits_per_word; else if (dd->cur_msg->spi->bits_per_word) bpw = dd->cur_msg->spi->bits_per_word; else bpw = 8; dd->bytes_per_word = (bpw + 7) / 8; if (dd->cur_transfer->speed_hz) max_speed = dd->cur_transfer->speed_hz; else max_speed = dd->cur_msg->spi->max_speed_hz; if (!dd->clock_speed || max_speed != dd->clock_speed) msm_spi_clock_set(dd, max_speed); timeout = 100 * msecs_to_jiffies( DIV_ROUND_UP(dd->cur_msg_len * 8, DIV_ROUND_UP(max_speed, MSEC_PER_SEC))); read_count = DIV_ROUND_UP(dd->cur_msg_len, dd->bytes_per_word); if (dd->cur_msg->spi->mode & SPI_LOOP) int_loopback = 1; if (int_loopback && dd->multi_xfr && (read_count > dd->input_fifo_size)) { if (dd->read_len && dd->write_len) pr_err( "%s:Internal Loopback does not support > fifo size" "for write-then-read transactions\n", __func__); else if (dd->write_len && !dd->read_len) pr_err( "%s:Internal Loopback does not support > fifo size" "for write-then-write transactions\n", __func__); return; } if (msm_spi_set_state(dd, SPI_OP_STATE_RESET)) dev_err(dd->dev, "%s: Error setting QUP to reset-state", __func__); msm_spi_set_transfer_mode(dd, bpw, read_count); msm_spi_set_mx_counts(dd, read_count); if ((dd->mode == SPI_BAM_MODE) || (dd->mode == SPI_DMOV_MODE)) if (msm_spi_dma_map_buffers(dd) < 0) { pr_err("Mapping DMA buffers\n"); return; } msm_spi_set_qup_io_modes(dd); msm_spi_set_spi_config(dd, bpw); msm_spi_set_qup_config(dd, bpw); spi_ioc = msm_spi_set_spi_io_control(dd); msm_spi_set_qup_op_mask(dd); if (dd->mode == SPI_DMOV_MODE) { msm_spi_setup_dm_transfer(dd); msm_spi_enqueue_dm_commands(dd); } /* The output fifo interrupt handler will handle all writes after the first. Restricting this to one write avoids contention issues and race conditions between this thread and the int handler */ else if (dd->mode == SPI_FIFO_MODE) { if (msm_spi_prepare_for_write(dd)) goto transfer_end; msm_spi_start_write(dd, read_count); } else if (dd->mode == SPI_BAM_MODE) { if ((msm_spi_bam_begin_transfer(dd, timeout, bpw)) < 0) dev_err(dd->dev, "%s: BAM transfer setup failed\n", __func__); } /* * On BAM mode, current state here is run. * Only enter the RUN state after the first word is written into * the output FIFO. Otherwise, the output FIFO EMPTY interrupt * might fire before the first word is written resulting in a * possible race condition. */ if (dd->mode != SPI_BAM_MODE) if (msm_spi_set_state(dd, SPI_OP_STATE_RUN)) { dev_warn(dd->dev, "%s: Failed to set QUP to run-state. Mode:%d", __func__, dd->mode); goto transfer_end; } /* Assume success, this might change later upon transaction result */ dd->cur_msg->status = 0; do { if (!wait_for_completion_timeout(&dd->transfer_complete, timeout)) { dev_err(dd->dev, "%s: SPI transaction timeout\n", __func__); dd->cur_msg->status = -EIO; if (dd->mode == SPI_DMOV_MODE) { msm_dmov_flush(dd->tx_dma_chan, 1); msm_dmov_flush(dd->rx_dma_chan, 1); } if (dd->mode == SPI_BAM_MODE) msm_spi_bam_flush(dd); break; } } while (msm_spi_dm_send_next(dd)); msm_spi_udelay(dd->cur_transfer->delay_usecs); transfer_end: msm_spi_dma_unmap_buffers(dd); dd->mode = SPI_MODE_NONE; msm_spi_set_state(dd, SPI_OP_STATE_RESET); writel_relaxed(spi_ioc & ~SPI_IO_C_MX_CS_MODE, dd->base + SPI_IO_CONTROL); } static void get_transfer_length(struct msm_spi *dd) { struct spi_transfer *tr; int num_xfrs = 0; int readlen = 0; int writelen = 0; dd->cur_msg_len = 0; dd->multi_xfr = 0; dd->read_len = dd->write_len = 0; list_for_each_entry(tr, &dd->cur_msg->transfers, transfer_list) { if (tr->tx_buf) writelen += tr->len; if (tr->rx_buf) readlen += tr->len; dd->cur_msg_len += tr->len; num_xfrs++; } if (num_xfrs == 2) { struct spi_transfer *first_xfr = dd->cur_transfer; dd->multi_xfr = 1; tr = list_entry(first_xfr->transfer_list.next, struct spi_transfer, transfer_list); /* * We update dd->read_len and dd->write_len only * for WR-WR and WR-RD transfers. */ if ((first_xfr->tx_buf) && (!first_xfr->rx_buf)) { if (((tr->tx_buf) && (!tr->rx_buf)) || ((!tr->tx_buf) && (tr->rx_buf))) { dd->read_len = readlen; dd->write_len = writelen; } } } else if (num_xfrs > 1) dd->multi_xfr = 1; } static inline int combine_transfers(struct msm_spi *dd) { struct spi_transfer *t = dd->cur_transfer; struct spi_transfer *nxt; int xfrs_grped = 1; dd->cur_msg_len = dd->cur_transfer->len; while (t->transfer_list.next != &dd->cur_msg->transfers) { nxt = list_entry(t->transfer_list.next, struct spi_transfer, transfer_list); if (t->cs_change != nxt->cs_change) return xfrs_grped; dd->cur_msg_len += nxt->len; xfrs_grped++; t = nxt; } return xfrs_grped; } static inline void write_force_cs(struct msm_spi *dd, bool set_flag) { u32 spi_ioc; u32 spi_ioc_orig; spi_ioc = readl_relaxed(dd->base + SPI_IO_CONTROL); spi_ioc_orig = spi_ioc; if (set_flag) spi_ioc |= SPI_IO_C_FORCE_CS; else spi_ioc &= ~SPI_IO_C_FORCE_CS; if (spi_ioc != spi_ioc_orig) writel_relaxed(spi_ioc, dd->base + SPI_IO_CONTROL); } static void msm_spi_process_message(struct msm_spi *dd) { int xfrs_grped = 0; int cs_num; int rc; bool xfer_delay = false; struct spi_transfer *tr; dd->write_xfr_cnt = dd->read_xfr_cnt = 0; cs_num = dd->cur_msg->spi->chip_select; if ((!(dd->cur_msg->spi->mode & SPI_LOOP)) && (!(dd->cs_gpios[cs_num].valid)) && (dd->cs_gpios[cs_num].gpio_num >= 0)) { rc = gpio_request(dd->cs_gpios[cs_num].gpio_num, spi_cs_rsrcs[cs_num]); if (rc) { dev_err(dd->dev, "gpio_request for pin %d failed with " "error %d\n", dd->cs_gpios[cs_num].gpio_num, rc); return; } dd->cs_gpios[cs_num].valid = 1; } list_for_each_entry(tr, &dd->cur_msg->transfers, transfer_list) { if (tr->delay_usecs) { dev_info(dd->dev, "SPI slave requests delay per txn :%d", tr->delay_usecs); xfer_delay = true; break; } } /* Don't combine xfers if delay is needed after every xfer */ if (dd->qup_ver || xfer_delay) { if (dd->qup_ver) write_force_cs(dd, 0); list_for_each_entry(dd->cur_transfer, &dd->cur_msg->transfers, transfer_list) { struct spi_transfer *t = dd->cur_transfer; struct spi_transfer *nxt; if (t->transfer_list.next != &dd->cur_msg->transfers) { nxt = list_entry(t->transfer_list.next, struct spi_transfer, transfer_list); if (dd->qup_ver && t->cs_change == nxt->cs_change) write_force_cs(dd, 1); else if (dd->qup_ver) write_force_cs(dd, 0); } dd->cur_msg_len = dd->cur_transfer->len; msm_spi_process_transfer(dd); } } else { dd->cur_transfer = list_first_entry(&dd->cur_msg->transfers, struct spi_transfer, transfer_list); get_transfer_length(dd); if (dd->multi_xfr && !dd->read_len && !dd->write_len) { /* * Handling of multi-transfers. * FIFO mode is used by default */ list_for_each_entry(dd->cur_transfer, &dd->cur_msg->transfers, transfer_list) { if (!dd->cur_transfer->len) goto error; if (xfrs_grped) { xfrs_grped--; continue; } else { dd->read_len = dd->write_len = 0; xfrs_grped = combine_transfers(dd); } dd->cur_tx_transfer = dd->cur_transfer; dd->cur_rx_transfer = dd->cur_transfer; msm_spi_process_transfer(dd); xfrs_grped--; } } else { /* Handling of a single transfer or * WR-WR or WR-RD transfers */ if ((!dd->cur_msg->is_dma_mapped) && (msm_spi_use_dma(dd, dd->cur_transfer, dd->cur_transfer->bits_per_word))) { /* Mapping of DMA buffers */ int ret = msm_spi_dma_map_buffers(dd); if (ret < 0) { dd->cur_msg->status = ret; goto error; } } dd->cur_tx_transfer = dd->cur_transfer; dd->cur_rx_transfer = dd->cur_transfer; msm_spi_process_transfer(dd); } } return; error: if (dd->cs_gpios[cs_num].valid) { gpio_free(dd->cs_gpios[cs_num].gpio_num); dd->cs_gpios[cs_num].valid = 0; } } /* workqueue - pull messages from queue & process */ static void msm_spi_workq(struct work_struct *work) { struct msm_spi *dd = container_of(work, struct msm_spi, work_data); unsigned long flags; u32 status_error = 0; pm_runtime_get_sync(dd->dev); mutex_lock(&dd->core_lock); /* * Counter-part of system-suspend when runtime-pm is not enabled. * This way, resume can be left empty and device will be put in * active mode only if client requests anything on the bus */ if (!pm_runtime_enabled(dd->dev)) msm_spi_pm_resume_runtime(dd->dev); if (dd->use_rlock) remote_mutex_lock(&dd->r_lock); if (!msm_spi_is_valid_state(dd)) { dev_err(dd->dev, "%s: SPI operational state not valid\n", __func__); status_error = 1; } spin_lock_irqsave(&dd->queue_lock, flags); dd->transfer_pending = 1; while (!list_empty(&dd->queue)) { dd->cur_msg = list_entry(dd->queue.next, struct spi_message, queue); list_del_init(&dd->cur_msg->queue); spin_unlock_irqrestore(&dd->queue_lock, flags); if (status_error) dd->cur_msg->status = -EIO; else msm_spi_process_message(dd); if (dd->cur_msg->complete) dd->cur_msg->complete(dd->cur_msg->context); spin_lock_irqsave(&dd->queue_lock, flags); } dd->transfer_pending = 0; spin_unlock_irqrestore(&dd->queue_lock, flags); if (dd->use_rlock) remote_mutex_unlock(&dd->r_lock); mutex_unlock(&dd->core_lock); pm_runtime_mark_last_busy(dd->dev); pm_runtime_put_autosuspend(dd->dev); /* If needed, this can be done after the current message is complete, and work can be continued upon resume. No motivation for now. */ if (dd->suspended) wake_up_interruptible(&dd->continue_suspend); } static int msm_spi_transfer(struct spi_device *spi, struct spi_message *msg) { struct msm_spi *dd; unsigned long flags; struct spi_transfer *tr; dd = spi_master_get_devdata(spi->master); if (list_empty(&msg->transfers) || !msg->complete) return -EINVAL; list_for_each_entry(tr, &msg->transfers, transfer_list) { /* Check message parameters */ if (tr->speed_hz > dd->pdata->max_clock_speed || (tr->bits_per_word && (tr->bits_per_word < 4 || tr->bits_per_word > 32)) || (tr->tx_buf == NULL && tr->rx_buf == NULL)) { dev_err(&spi->dev, "Invalid transfer: %d Hz, %d bpw" "tx=%p, rx=%p\n", tr->speed_hz, tr->bits_per_word, tr->tx_buf, tr->rx_buf); return -EINVAL; } } spin_lock_irqsave(&dd->queue_lock, flags); list_add_tail(&msg->queue, &dd->queue); spin_unlock_irqrestore(&dd->queue_lock, flags); queue_work(dd->workqueue, &dd->work_data); return 0; } static int msm_spi_setup(struct spi_device *spi) { struct msm_spi *dd; int rc = 0; u32 spi_ioc; u32 spi_config; u32 mask; if (spi->bits_per_word < 4 || spi->bits_per_word > 32) { dev_err(&spi->dev, "%s: invalid bits_per_word %d\n", __func__, spi->bits_per_word); rc = -EINVAL; } if (spi->chip_select > SPI_NUM_CHIPSELECTS-1) { dev_err(&spi->dev, "%s, chip select %d exceeds max value %d\n", __func__, spi->chip_select, SPI_NUM_CHIPSELECTS - 1); rc = -EINVAL; } if (rc) goto err_setup_exit; dd = spi_master_get_devdata(spi->master); pm_runtime_get_sync(dd->dev); mutex_lock(&dd->core_lock); /* Counter-part of system-suspend when runtime-pm is not enabled. */ if (!pm_runtime_enabled(dd->dev)) msm_spi_pm_resume_runtime(dd->dev); if (dd->suspended) { mutex_unlock(&dd->core_lock); return -EBUSY; } if (dd->use_rlock) remote_mutex_lock(&dd->r_lock); spi_ioc = readl_relaxed(dd->base + SPI_IO_CONTROL); mask = SPI_IO_C_CS_N_POLARITY_0 << spi->chip_select; if (spi->mode & SPI_CS_HIGH) spi_ioc |= mask; else spi_ioc &= ~mask; spi_ioc = msm_spi_calc_spi_ioc_clk_polarity(spi_ioc, spi->mode); writel_relaxed(spi_ioc, dd->base + SPI_IO_CONTROL); spi_config = readl_relaxed(dd->base + SPI_CONFIG); spi_config = msm_spi_calc_spi_config_loopback_and_input_first( spi_config, spi->mode); writel_relaxed(spi_config, dd->base + SPI_CONFIG); /* Ensure previous write completed before disabling the clocks */ mb(); if (dd->use_rlock) remote_mutex_unlock(&dd->r_lock); /* Counter-part of system-resume when runtime-pm is not enabled. */ if (!pm_runtime_enabled(dd->dev)) msm_spi_pm_suspend_runtime(dd->dev); mutex_unlock(&dd->core_lock); pm_runtime_mark_last_busy(dd->dev); pm_runtime_put_autosuspend(dd->dev); err_setup_exit: return rc; } #ifdef CONFIG_DEBUG_FS static int debugfs_iomem_x32_set(void *data, u64 val) { writel_relaxed(val, data); /* Ensure the previous write completed. */ mb(); return 0; } static int debugfs_iomem_x32_get(void *data, u64 *val) { *val = readl_relaxed(data); /* Ensure the previous read completed. */ mb(); return 0; } DEFINE_SIMPLE_ATTRIBUTE(fops_iomem_x32, debugfs_iomem_x32_get, debugfs_iomem_x32_set, "0x%08llx\n"); static void spi_debugfs_init(struct msm_spi *dd) { dd->dent_spi = debugfs_create_dir(dev_name(dd->dev), NULL); if (dd->dent_spi) { int i; for (i = 0; i < ARRAY_SIZE(debugfs_spi_regs); i++) { dd->debugfs_spi_regs[i] = debugfs_create_file( debugfs_spi_regs[i].name, debugfs_spi_regs[i].mode, dd->dent_spi, dd->base + debugfs_spi_regs[i].offset, &fops_iomem_x32); } } } static void spi_debugfs_exit(struct msm_spi *dd) { if (dd->dent_spi) { int i; debugfs_remove_recursive(dd->dent_spi); dd->dent_spi = NULL; for (i = 0; i < ARRAY_SIZE(debugfs_spi_regs); i++) dd->debugfs_spi_regs[i] = NULL; } } #else static void spi_debugfs_init(struct msm_spi *dd) {} static void spi_debugfs_exit(struct msm_spi *dd) {} #endif /* ===Device attributes begin=== */ static ssize_t show_stats(struct device *dev, struct device_attribute *attr, char *buf) { struct spi_master *master = dev_get_drvdata(dev); struct msm_spi *dd = spi_master_get_devdata(master); return snprintf(buf, PAGE_SIZE, "Device %s\n" "rx fifo_size = %d spi words\n" "tx fifo_size = %d spi words\n" "use_dma ? %s\n" "rx block size = %d bytes\n" "tx block size = %d bytes\n" "input burst size = %d bytes\n" "output burst size = %d bytes\n" "DMA configuration:\n" "tx_ch=%d, rx_ch=%d, tx_crci= %d, rx_crci=%d\n" "--statistics--\n" "Rx isrs = %d\n" "Tx isrs = %d\n" "DMA error = %d\n" "--debug--\n" "NA yet\n", dev_name(dev), dd->input_fifo_size, dd->output_fifo_size, dd->use_dma ? "yes" : "no", dd->input_block_size, dd->output_block_size, dd->input_burst_size, dd->output_burst_size, dd->tx_dma_chan, dd->rx_dma_chan, dd->tx_dma_crci, dd->rx_dma_crci, dd->stat_rx + dd->stat_dmov_rx, dd->stat_tx + dd->stat_dmov_tx, dd->stat_dmov_tx_err + dd->stat_dmov_rx_err ); } /* Reset statistics on write */ static ssize_t set_stats(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct msm_spi *dd = dev_get_drvdata(dev); dd->stat_rx = 0; dd->stat_tx = 0; dd->stat_dmov_rx = 0; dd->stat_dmov_tx = 0; dd->stat_dmov_rx_err = 0; dd->stat_dmov_tx_err = 0; return count; } static DEVICE_ATTR(stats, S_IRUGO | S_IWUSR, show_stats, set_stats); static struct attribute *dev_attrs[] = { &dev_attr_stats.attr, NULL, }; static struct attribute_group dev_attr_grp = { .attrs = dev_attrs, }; /* ===Device attributes end=== */ /** * spi_dmov_tx_complete_func - DataMover tx completion callback * * Executed in IRQ context (Data Mover's IRQ) DataMover's * spinlock @msm_dmov_lock held. */ static void spi_dmov_tx_complete_func(struct msm_dmov_cmd *cmd, unsigned int result, struct msm_dmov_errdata *err) { struct msm_spi *dd; if (!(result & DMOV_RSLT_VALID)) { pr_err("Invalid DMOV result: rc=0x%08x, cmd = %p", result, cmd); return; } /* restore original context */ dd = container_of(cmd, struct msm_spi, tx_hdr); if (result & DMOV_RSLT_DONE) { dd->stat_dmov_tx++; if ((atomic_inc_return(&dd->tx_irq_called) == 1)) return; complete(&dd->transfer_complete); } else { /* Error or flush */ if (result & DMOV_RSLT_ERROR) { dev_err(dd->dev, "DMA error (0x%08x)\n", result); dd->stat_dmov_tx_err++; } if (result & DMOV_RSLT_FLUSH) { /* * Flushing normally happens in process of * removing, when we are waiting for outstanding * DMA commands to be flushed. */ dev_info(dd->dev, "DMA channel flushed (0x%08x)\n", result); } if (err) dev_err(dd->dev, "Flush data(%08x %08x %08x %08x %08x %08x)\n", err->flush[0], err->flush[1], err->flush[2], err->flush[3], err->flush[4], err->flush[5]); dd->cur_msg->status = -EIO; complete(&dd->transfer_complete); } } /** * spi_dmov_rx_complete_func - DataMover rx completion callback * * Executed in IRQ context (Data Mover's IRQ) * DataMover's spinlock @msm_dmov_lock held. */ static void spi_dmov_rx_complete_func(struct msm_dmov_cmd *cmd, unsigned int result, struct msm_dmov_errdata *err) { struct msm_spi *dd; if (!(result & DMOV_RSLT_VALID)) { pr_err("Invalid DMOV result(rc = 0x%08x, cmd = %p)", result, cmd); return; } /* restore original context */ dd = container_of(cmd, struct msm_spi, rx_hdr); if (result & DMOV_RSLT_DONE) { dd->stat_dmov_rx++; if (atomic_inc_return(&dd->rx_irq_called) == 1) return; complete(&dd->transfer_complete); } else { /** Error or flush */ if (result & DMOV_RSLT_ERROR) { dev_err(dd->dev, "DMA error(0x%08x)\n", result); dd->stat_dmov_rx_err++; } if (result & DMOV_RSLT_FLUSH) { dev_info(dd->dev, "DMA channel flushed(0x%08x)\n", result); } if (err) dev_err(dd->dev, "Flush data(%08x %08x %08x %08x %08x %08x)\n", err->flush[0], err->flush[1], err->flush[2], err->flush[3], err->flush[4], err->flush[5]); dd->cur_msg->status = -EIO; complete(&dd->transfer_complete); } } static inline u32 get_chunk_size(struct msm_spi *dd, int input_burst_size, int output_burst_size) { u32 cache_line = dma_get_cache_alignment(); int burst_size = (input_burst_size > output_burst_size) ? input_burst_size : output_burst_size; return (roundup(sizeof(struct spi_dmov_cmd), DM_BYTE_ALIGN) + roundup(burst_size, cache_line))*2; } static void msm_spi_dmov_teardown(struct msm_spi *dd) { int limit = 0; if (!dd->use_dma) return; while (dd->mode == SPI_DMOV_MODE && limit++ < 50) { msm_dmov_flush(dd->tx_dma_chan, 1); msm_dmov_flush(dd->rx_dma_chan, 1); msleep(10); } dma_free_coherent(NULL, get_chunk_size(dd, dd->input_burst_size, dd->output_burst_size), dd->tx_dmov_cmd, dd->tx_dmov_cmd_dma); dd->tx_dmov_cmd = dd->rx_dmov_cmd = NULL; dd->tx_padding = dd->rx_padding = NULL; } static void msm_spi_bam_pipe_teardown(struct msm_spi *dd, enum msm_spi_pipe_direction pipe_dir) { struct msm_spi_bam_pipe *pipe = (pipe_dir == SPI_BAM_CONSUMER_PIPE) ? (&dd->bam.prod) : (&dd->bam.cons); if (!pipe->teardown_required) return; msm_spi_bam_pipe_disconnect(dd, pipe); dma_free_coherent(dd->dev, pipe->config.desc.size, pipe->config.desc.base, pipe->config.desc.phys_base); sps_free_endpoint(pipe->handle); pipe->handle = 0; pipe->teardown_required = false; } static int msm_spi_bam_pipe_init(struct msm_spi *dd, enum msm_spi_pipe_direction pipe_dir) { int rc = 0; struct sps_pipe *pipe_handle; struct msm_spi_bam_pipe *pipe = (pipe_dir == SPI_BAM_CONSUMER_PIPE) ? (&dd->bam.prod) : (&dd->bam.cons); struct sps_connect *pipe_conf = &pipe->config; pipe->name = (pipe_dir == SPI_BAM_CONSUMER_PIPE) ? "cons" : "prod"; pipe->handle = 0; pipe_handle = sps_alloc_endpoint(); if (!pipe_handle) { dev_err(dd->dev, "%s: Failed to allocate BAM endpoint\n" , __func__); return -ENOMEM; } memset(pipe_conf, 0, sizeof(*pipe_conf)); rc = sps_get_config(pipe_handle, pipe_conf); if (rc) { dev_err(dd->dev, "%s: Failed to get BAM pipe config\n" , __func__); goto config_err; } if (pipe_dir == SPI_BAM_CONSUMER_PIPE) { pipe_conf->source = dd->bam.handle; pipe_conf->destination = SPS_DEV_HANDLE_MEM; pipe_conf->mode = SPS_MODE_SRC; pipe_conf->src_pipe_index = dd->pdata->bam_producer_pipe_index; pipe_conf->dest_pipe_index = 0; } else { pipe_conf->source = SPS_DEV_HANDLE_MEM; pipe_conf->destination = dd->bam.handle; pipe_conf->mode = SPS_MODE_DEST; pipe_conf->src_pipe_index = 0; pipe_conf->dest_pipe_index = dd->pdata->bam_consumer_pipe_index; } pipe_conf->options = SPS_O_EOT | SPS_O_AUTO_ENABLE; pipe_conf->desc.size = SPI_BAM_MAX_DESC_NUM * sizeof(struct sps_iovec); pipe_conf->desc.base = dma_alloc_coherent(dd->dev, pipe_conf->desc.size, &pipe_conf->desc.phys_base, GFP_KERNEL); if (!pipe_conf->desc.base) { dev_err(dd->dev, "%s: Failed allocate BAM pipe memory" , __func__); rc = -ENOMEM; goto config_err; } /* zero descriptor FIFO for convenient debugging of first descs */ memset(pipe_conf->desc.base, 0x00, pipe_conf->desc.size); pipe->handle = pipe_handle; rc = msm_spi_bam_pipe_connect(dd, pipe, pipe_conf); if (rc) goto connect_err; return 0; connect_err: dma_free_coherent(dd->dev, pipe_conf->desc.size, pipe_conf->desc.base, pipe_conf->desc.phys_base); config_err: sps_free_endpoint(pipe_handle); return rc; } static void msm_spi_bam_teardown(struct msm_spi *dd) { msm_spi_bam_pipe_teardown(dd, SPI_BAM_PRODUCER_PIPE); msm_spi_bam_pipe_teardown(dd, SPI_BAM_CONSUMER_PIPE); if (dd->bam.deregister_required) { sps_deregister_bam_device(dd->bam.handle); dd->bam.deregister_required = false; } } static int msm_spi_bam_init(struct msm_spi *dd) { struct sps_bam_props bam_props = {0}; u32 bam_handle; int rc = 0; rc = sps_phy2h(dd->bam.phys_addr, &bam_handle); if (rc || !bam_handle) { bam_props.phys_addr = dd->bam.phys_addr; bam_props.virt_addr = dd->bam.base; bam_props.irq = dd->bam.irq; bam_props.manage = SPS_BAM_MGR_DEVICE_REMOTE; bam_props.summing_threshold = 0x10; rc = sps_register_bam_device(&bam_props, &bam_handle); if (rc) { dev_err(dd->dev, "%s: Failed to register BAM device", __func__); return rc; } dd->bam.deregister_required = true; } dd->bam.handle = bam_handle; rc = msm_spi_bam_pipe_init(dd, SPI_BAM_PRODUCER_PIPE); if (rc) { dev_err(dd->dev, "%s: Failed to init producer BAM-pipe", __func__); goto bam_init_error; } rc = msm_spi_bam_pipe_init(dd, SPI_BAM_CONSUMER_PIPE); if (rc) { dev_err(dd->dev, "%s: Failed to init consumer BAM-pipe", __func__); goto bam_init_error; } return 0; bam_init_error: msm_spi_bam_teardown(dd); return rc; } static __init int msm_spi_dmov_init(struct msm_spi *dd) { dmov_box *box; u32 cache_line = dma_get_cache_alignment(); /* Allocate all as one chunk, since all is smaller than page size */ /* We send NULL device, since it requires coherent_dma_mask id device definition, we're okay with using system pool */ dd->tx_dmov_cmd = dma_alloc_coherent(NULL, get_chunk_size(dd, dd->input_burst_size, dd->output_burst_size), &dd->tx_dmov_cmd_dma, GFP_KERNEL); if (dd->tx_dmov_cmd == NULL) return -ENOMEM; /* DMA addresses should be 64 bit aligned aligned */ dd->rx_dmov_cmd = (struct spi_dmov_cmd *) ALIGN((size_t)&dd->tx_dmov_cmd[1], DM_BYTE_ALIGN); dd->rx_dmov_cmd_dma = ALIGN(dd->tx_dmov_cmd_dma + sizeof(struct spi_dmov_cmd), DM_BYTE_ALIGN); /* Buffers should be aligned to cache line */ dd->tx_padding = (u8 *)ALIGN((size_t)&dd->rx_dmov_cmd[1], cache_line); dd->tx_padding_dma = ALIGN(dd->rx_dmov_cmd_dma + sizeof(struct spi_dmov_cmd), cache_line); dd->rx_padding = (u8 *)ALIGN((size_t)(dd->tx_padding + dd->output_burst_size), cache_line); dd->rx_padding_dma = ALIGN(dd->tx_padding_dma + dd->output_burst_size, cache_line); /* Setup DM commands */ box = &(dd->rx_dmov_cmd->box); box->cmd = CMD_MODE_BOX | CMD_SRC_CRCI(dd->rx_dma_crci); box->src_row_addr = (uint32_t)dd->mem_phys_addr + SPI_INPUT_FIFO; dd->rx_hdr.cmdptr = DMOV_CMD_PTR_LIST | DMOV_CMD_ADDR(dd->rx_dmov_cmd_dma + offsetof(struct spi_dmov_cmd, cmd_ptr)); dd->rx_hdr.complete_func = spi_dmov_rx_complete_func; box = &(dd->tx_dmov_cmd->box); box->cmd = CMD_MODE_BOX | CMD_DST_CRCI(dd->tx_dma_crci); box->dst_row_addr = (uint32_t)dd->mem_phys_addr + SPI_OUTPUT_FIFO; dd->tx_hdr.cmdptr = DMOV_CMD_PTR_LIST | DMOV_CMD_ADDR(dd->tx_dmov_cmd_dma + offsetof(struct spi_dmov_cmd, cmd_ptr)); dd->tx_hdr.complete_func = spi_dmov_tx_complete_func; dd->tx_dmov_cmd->single_pad.cmd = CMD_MODE_SINGLE | CMD_LC | CMD_DST_CRCI(dd->tx_dma_crci); dd->tx_dmov_cmd->single_pad.dst = (uint32_t)dd->mem_phys_addr + SPI_OUTPUT_FIFO; dd->rx_dmov_cmd->single_pad.cmd = CMD_MODE_SINGLE | CMD_LC | CMD_SRC_CRCI(dd->rx_dma_crci); dd->rx_dmov_cmd->single_pad.src = (uint32_t)dd->mem_phys_addr + SPI_INPUT_FIFO; /* Clear remaining activities on channel */ msm_dmov_flush(dd->tx_dma_chan, 1); msm_dmov_flush(dd->rx_dma_chan, 1); return 0; } enum msm_spi_dt_entry_status { DT_REQ, /* Required: fail if missing */ DT_SGST, /* Suggested: warn if missing */ DT_OPT, /* Optional: don't warn if missing */ }; enum msm_spi_dt_entry_type { DT_U32, DT_GPIO, DT_BOOL, }; struct msm_spi_dt_to_pdata_map { const char *dt_name; void *ptr_data; enum msm_spi_dt_entry_status status; enum msm_spi_dt_entry_type type; int default_val; }; static int __init msm_spi_dt_to_pdata_populate(struct platform_device *pdev, struct msm_spi_platform_data *pdata, struct msm_spi_dt_to_pdata_map *itr) { int ret, err = 0; struct device_node *node = pdev->dev.of_node; for (; itr->dt_name ; ++itr) { switch (itr->type) { case DT_GPIO: ret = of_get_named_gpio(node, itr->dt_name, 0); if (ret >= 0) { *((int *) itr->ptr_data) = ret; ret = 0; } break; case DT_U32: ret = of_property_read_u32(node, itr->dt_name, (u32 *) itr->ptr_data); break; case DT_BOOL: *((bool *) itr->ptr_data) = of_property_read_bool(node, itr->dt_name); ret = 0; break; default: dev_err(&pdev->dev, "%d is an unknown DT entry type\n", itr->type); ret = -EBADE; } dev_dbg(&pdev->dev, "DT entry ret:%d name:%s val:%d\n", ret, itr->dt_name, *((int *)itr->ptr_data)); if (ret) { *((int *)itr->ptr_data) = itr->default_val; if (itr->status < DT_OPT) { dev_err(&pdev->dev, "Missing '%s' DT entry\n", itr->dt_name); /* cont on err to dump all missing entries */ if (itr->status == DT_REQ && !err) err = ret; } } } return err; } /** * msm_spi_dt_to_pdata: create pdata and read gpio config from device tree */ struct msm_spi_platform_data * __init msm_spi_dt_to_pdata( struct platform_device *pdev, struct msm_spi *dd) { struct msm_spi_platform_data *pdata; pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); if (!pdata) { pr_err("Unable to allocate platform data\n"); return NULL; } else { struct msm_spi_dt_to_pdata_map map[] = { {"spi-max-frequency", &pdata->max_clock_speed, DT_SGST, DT_U32, 0}, {"qcom,infinite-mode", &pdata->infinite_mode, DT_OPT, DT_U32, 0}, {"qcom,active-only", &pdata->active_only, DT_OPT, DT_BOOL, 0}, {"qcom,master-id", &pdata->master_id, DT_SGST, DT_U32, 0}, {"qcom,ver-reg-exists", &pdata->ver_reg_exists, DT_OPT, DT_BOOL, 0}, {"qcom,use-bam", &pdata->use_bam, DT_OPT, DT_BOOL, 0}, {"qcom,bam-consumer-pipe-index", &pdata->bam_consumer_pipe_index, DT_OPT, DT_U32, 0}, {"qcom,bam-producer-pipe-index", &pdata->bam_producer_pipe_index, DT_OPT, DT_U32, 0}, {"qcom,gpio-clk", &dd->spi_gpios[0], DT_OPT, DT_GPIO, -1}, {"qcom,gpio-miso", &dd->spi_gpios[1], DT_OPT, DT_GPIO, -1}, {"qcom,gpio-mosi", &dd->spi_gpios[2], DT_OPT, DT_GPIO, -1}, {"qcom,gpio-cs0", &dd->cs_gpios[0].gpio_num, DT_OPT, DT_GPIO, -1}, {"qcom,gpio-cs1", &dd->cs_gpios[1].gpio_num, DT_OPT, DT_GPIO, -1}, {"qcom,gpio-cs2", &dd->cs_gpios[2].gpio_num, DT_OPT, DT_GPIO, -1}, {"qcom,gpio-cs3", &dd->cs_gpios[3].gpio_num, DT_OPT, DT_GPIO, -1}, {NULL, NULL, 0, 0, 0}, }; if (msm_spi_dt_to_pdata_populate(pdev, pdata, map)) { devm_kfree(&pdev->dev, pdata); return NULL; } } if (pdata->use_bam) { if (!pdata->bam_consumer_pipe_index) { dev_warn(&pdev->dev, "missing qcom,bam-consumer-pipe-index entry in device-tree\n"); pdata->use_bam = false; } if (!pdata->bam_producer_pipe_index) { dev_warn(&pdev->dev, "missing qcom,bam-producer-pipe-index entry in device-tree\n"); pdata->use_bam = false; } } return pdata; } static int __init msm_spi_get_qup_hw_ver(struct device *dev, struct msm_spi *dd) { u32 data = readl_relaxed(dd->base + QUP_HARDWARE_VER); return (data >= QUP_HARDWARE_VER_2_1_1) ? SPI_QUP_VERSION_BFAM : SPI_QUP_VERSION_NONE; } static int __init msm_spi_bam_get_resources(struct msm_spi *dd, struct platform_device *pdev, struct spi_master *master) { struct resource *resource; size_t bam_mem_size; resource = platform_get_resource_byname(pdev, IORESOURCE_MEM, "spi_bam_physical"); if (!resource) { dev_warn(&pdev->dev, "%s: Missing spi_bam_physical entry in DT", __func__); return -ENXIO; } dd->bam.phys_addr = resource->start; bam_mem_size = resource_size(resource); dd->bam.base = devm_ioremap(&pdev->dev, dd->bam.phys_addr, bam_mem_size); if (!dd->bam.base) { dev_warn(&pdev->dev, "%s: Failed to ioremap(spi_bam_physical)", __func__); return -ENXIO; } dd->bam.irq = platform_get_irq_byname(pdev, "spi_bam_irq"); if (dd->bam.irq < 0) { dev_warn(&pdev->dev, "%s: Missing spi_bam_irq entry in DT", __func__); return -EINVAL; } dd->dma_init = msm_spi_bam_init; dd->dma_teardown = msm_spi_bam_teardown; return 0; } static int __init msm_spi_probe(struct platform_device *pdev) { struct spi_master *master; struct msm_spi *dd; struct resource *resource; int rc = -ENXIO; int locked = 0; int i = 0; int clk_enabled = 0; int pclk_enabled = 0; struct msm_spi_platform_data *pdata; master = spi_alloc_master(&pdev->dev, sizeof(struct msm_spi)); if (!master) { rc = -ENOMEM; dev_err(&pdev->dev, "master allocation failed\n"); goto err_probe_exit; } master->bus_num = pdev->id; master->mode_bits = SPI_SUPPORTED_MODES; master->num_chipselect = SPI_NUM_CHIPSELECTS; master->setup = msm_spi_setup; master->transfer = msm_spi_transfer; platform_set_drvdata(pdev, master); dd = spi_master_get_devdata(master); if (pdev->dev.of_node) { dd->qup_ver = SPI_QUP_VERSION_BFAM; master->dev.of_node = pdev->dev.of_node; pdata = msm_spi_dt_to_pdata(pdev, dd); if (!pdata) { rc = -ENOMEM; goto err_probe_exit; } rc = of_alias_get_id(pdev->dev.of_node, "spi"); if (rc < 0) dev_warn(&pdev->dev, "using default bus_num %d\n", pdev->id); else master->bus_num = pdev->id = rc; } else { pdata = pdev->dev.platform_data; dd->qup_ver = SPI_QUP_VERSION_NONE; for (i = 0; i < ARRAY_SIZE(spi_rsrcs); ++i) { resource = platform_get_resource(pdev, IORESOURCE_IO, i); dd->spi_gpios[i] = resource ? resource->start : -1; } for (i = 0; i < ARRAY_SIZE(spi_cs_rsrcs); ++i) { resource = platform_get_resource(pdev, IORESOURCE_IO, i + ARRAY_SIZE(spi_rsrcs)); dd->cs_gpios[i].gpio_num = resource ? resource->start : -1; } } for (i = 0; i < ARRAY_SIZE(spi_cs_rsrcs); ++i) dd->cs_gpios[i].valid = 0; dd->pdata = pdata; resource = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!resource) { rc = -ENXIO; goto err_probe_res; } dd->mem_phys_addr = resource->start; dd->mem_size = resource_size(resource); if (pdata) { if (pdata->dma_config) { rc = pdata->dma_config(); if (rc) { dev_warn(&pdev->dev, "%s: DM mode not supported\n", __func__); dd->use_dma = 0; goto skip_dma_resources; } } if (dd->qup_ver == SPI_QUP_VERSION_NONE) { resource = platform_get_resource(pdev, IORESOURCE_DMA, 0); if (resource) { dd->rx_dma_chan = resource->start; dd->tx_dma_chan = resource->end; resource = platform_get_resource(pdev, IORESOURCE_DMA, 1); if (!resource) { rc = -ENXIO; goto err_probe_res; } dd->rx_dma_crci = resource->start; dd->tx_dma_crci = resource->end; dd->use_dma = 1; master->dma_alignment = dma_get_cache_alignment(); dd->dma_init = msm_spi_dmov_init ; dd->dma_teardown = msm_spi_dmov_teardown; } } else { if (!dd->pdata->use_bam) goto skip_dma_resources; rc = msm_spi_bam_get_resources(dd, pdev, master); if (rc) { dev_warn(dd->dev, "%s: Faild to get BAM resources", __func__); goto skip_dma_resources; } dd->use_dma = 1; } } skip_dma_resources: spin_lock_init(&dd->queue_lock); mutex_init(&dd->core_lock); INIT_LIST_HEAD(&dd->queue); INIT_WORK(&dd->work_data, msm_spi_workq); init_waitqueue_head(&dd->continue_suspend); dd->workqueue = create_singlethread_workqueue( dev_name(master->dev.parent)); if (!dd->workqueue) goto err_probe_workq; if (!devm_request_mem_region(&pdev->dev, dd->mem_phys_addr, dd->mem_size, SPI_DRV_NAME)) { rc = -ENXIO; goto err_probe_reqmem; } dd->base = devm_ioremap(&pdev->dev, dd->mem_phys_addr, dd->mem_size); if (!dd->base) { rc = -ENOMEM; goto err_probe_reqmem; } if (pdata && pdata->rsl_id) { struct remote_mutex_id rmid; rmid.r_spinlock_id = pdata->rsl_id; rmid.delay_us = SPI_TRYLOCK_DELAY; rc = remote_mutex_init(&dd->r_lock, &rmid); if (rc) { dev_err(&pdev->dev, "%s: unable to init remote_mutex " "(%s), (rc=%d)\n", rmid.r_spinlock_id, __func__, rc); goto err_probe_rlock_init; } dd->use_rlock = 1; dd->pm_lat = pdata->pm_lat; pm_qos_add_request(&qos_req_list, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE); } mutex_lock(&dd->core_lock); if (dd->use_rlock) remote_mutex_lock(&dd->r_lock); locked = 1; dd->dev = &pdev->dev; dd->clk = clk_get(&pdev->dev, "core_clk"); if (IS_ERR(dd->clk)) { dev_err(&pdev->dev, "%s: unable to get core_clk\n", __func__); rc = PTR_ERR(dd->clk); goto err_probe_clk_get; } dd->pclk = clk_get(&pdev->dev, "iface_clk"); if (IS_ERR(dd->pclk)) { dev_err(&pdev->dev, "%s: unable to get iface_clk\n", __func__); rc = PTR_ERR(dd->pclk); goto err_probe_pclk_get; } if (pdata && pdata->max_clock_speed) msm_spi_clock_set(dd, dd->pdata->max_clock_speed); rc = clk_prepare_enable(dd->clk); if (rc) { dev_err(&pdev->dev, "%s: unable to enable core_clk\n", __func__); goto err_probe_clk_enable; } clk_enabled = 1; rc = clk_prepare_enable(dd->pclk); if (rc) { dev_err(&pdev->dev, "%s: unable to enable iface_clk\n", __func__); goto err_probe_pclk_enable; } pclk_enabled = 1; if (pdata && pdata->ver_reg_exists) { enum msm_spi_qup_version ver = msm_spi_get_qup_hw_ver(&pdev->dev, dd); if (dd->qup_ver != ver) dev_warn(&pdev->dev, "%s: HW version different then initially assumed by probe", __func__); } /* GSBI dose not exists on B-family MSM-chips */ if (dd->qup_ver != SPI_QUP_VERSION_BFAM) { rc = msm_spi_configure_gsbi(dd, pdev); if (rc) goto err_probe_gsbi; } msm_spi_calculate_fifo_size(dd); if (dd->use_dma) { rc = dd->dma_init(dd); if (rc) goto err_probe_dma; } msm_spi_register_init(dd); /* * The SPI core generates a bogus input overrun error on some targets, * when a transition from run to reset state occurs and if the FIFO has * an odd number of entries. Hence we disable the INPUT_OVER_RUN_ERR_EN * bit. */ msm_spi_enable_error_flags(dd); writel_relaxed(SPI_IO_C_NO_TRI_STATE, dd->base + SPI_IO_CONTROL); rc = msm_spi_set_state(dd, SPI_OP_STATE_RESET); if (rc) goto err_probe_state; clk_disable_unprepare(dd->clk); clk_disable_unprepare(dd->pclk); clk_enabled = 0; pclk_enabled = 0; dd->suspended = 1; dd->transfer_pending = 0; dd->multi_xfr = 0; dd->mode = SPI_MODE_NONE; rc = msm_spi_request_irq(dd, pdev, master); if (rc) goto err_probe_irq; msm_spi_disable_irqs(dd); if (dd->use_rlock) remote_mutex_unlock(&dd->r_lock); mutex_unlock(&dd->core_lock); locked = 0; pm_runtime_set_autosuspend_delay(&pdev->dev, MSEC_PER_SEC); pm_runtime_use_autosuspend(&pdev->dev); pm_runtime_enable(&pdev->dev); rc = spi_register_master(master); if (rc) goto err_probe_reg_master; rc = sysfs_create_group(&(dd->dev->kobj), &dev_attr_grp); if (rc) { dev_err(&pdev->dev, "failed to create dev. attrs : %d\n", rc); goto err_attrs; } spi_debugfs_init(dd); return 0; err_attrs: spi_unregister_master(master); err_probe_reg_master: pm_runtime_disable(&pdev->dev); err_probe_irq: err_probe_state: if (dd->dma_teardown) dd->dma_teardown(dd); err_probe_dma: err_probe_gsbi: if (pclk_enabled) clk_disable_unprepare(dd->pclk); err_probe_pclk_enable: if (clk_enabled) clk_disable_unprepare(dd->clk); err_probe_clk_enable: clk_put(dd->pclk); err_probe_pclk_get: clk_put(dd->clk); err_probe_clk_get: if (locked) { if (dd->use_rlock) remote_mutex_unlock(&dd->r_lock); mutex_unlock(&dd->core_lock); } err_probe_rlock_init: err_probe_reqmem: destroy_workqueue(dd->workqueue); err_probe_workq: err_probe_res: spi_master_put(master); err_probe_exit: return rc; } #ifdef CONFIG_PM static int msm_spi_pm_suspend_runtime(struct device *device) { struct platform_device *pdev = to_platform_device(device); struct spi_master *master = platform_get_drvdata(pdev); struct msm_spi *dd; unsigned long flags; dev_dbg(device, "pm_runtime: suspending...\n"); if (!master) goto suspend_exit; dd = spi_master_get_devdata(master); if (!dd) goto suspend_exit; if (dd->suspended) return 0; /* * Make sure nothing is added to the queue while we're * suspending */ spin_lock_irqsave(&dd->queue_lock, flags); dd->suspended = 1; spin_unlock_irqrestore(&dd->queue_lock, flags); /* Wait for transactions to end, or time out */ wait_event_interruptible(dd->continue_suspend, !dd->transfer_pending); msm_spi_disable_irqs(dd); clk_disable_unprepare(dd->clk); clk_disable_unprepare(dd->pclk); if (dd->pdata && !dd->pdata->active_only) msm_spi_clk_path_unvote(dd); /* Free the spi clk, miso, mosi, cs gpio */ if (dd->pdata && dd->pdata->gpio_release) dd->pdata->gpio_release(); msm_spi_free_gpios(dd); if (pm_qos_request_active(&qos_req_list)) pm_qos_update_request(&qos_req_list, PM_QOS_DEFAULT_VALUE); suspend_exit: return 0; } static int msm_spi_pm_resume_runtime(struct device *device) { struct platform_device *pdev = to_platform_device(device); struct spi_master *master = platform_get_drvdata(pdev); struct msm_spi *dd; int ret = 0; dev_dbg(device, "pm_runtime: resuming...\n"); if (!master) goto resume_exit; dd = spi_master_get_devdata(master); if (!dd) goto resume_exit; if (!dd->suspended) return 0; if (pm_qos_request_active(&qos_req_list)) pm_qos_update_request(&qos_req_list, dd->pm_lat); /* Configure the spi clk, miso, mosi and cs gpio */ if (dd->pdata->gpio_config) { ret = dd->pdata->gpio_config(); if (ret) { dev_err(dd->dev, "%s: error configuring GPIOs\n", __func__); return ret; } } ret = msm_spi_request_gpios(dd); if (ret) return ret; msm_spi_clk_path_init(dd); if (!dd->pdata->active_only) msm_spi_clk_path_vote(dd); clk_prepare_enable(dd->clk); clk_prepare_enable(dd->pclk); msm_spi_enable_irqs(dd); dd->suspended = 0; resume_exit: return 0; } static int msm_spi_suspend(struct device *device) { if (!pm_runtime_enabled(device) || !pm_runtime_suspended(device)) { struct platform_device *pdev = to_platform_device(device); struct spi_master *master = platform_get_drvdata(pdev); struct msm_spi *dd; dev_dbg(device, "system suspend"); if (!master) goto suspend_exit; dd = spi_master_get_devdata(master); if (!dd) goto suspend_exit; msm_spi_pm_suspend_runtime(device); /* * set the device's runtime PM status to 'suspended' */ pm_runtime_disable(device); pm_runtime_set_suspended(device); pm_runtime_enable(device); } suspend_exit: return 0; } static int msm_spi_resume(struct device *device) { /* * Rely on runtime-PM to call resume in case it is enabled * Even if it's not enabled, rely on 1st client transaction to do * clock ON and gpio configuration */ dev_dbg(device, "system resume"); return 0; } #else #define msm_spi_suspend NULL #define msm_spi_resume NULL #define msm_spi_pm_suspend_runtime NULL #define msm_spi_pm_resume_runtime NULL #endif /* CONFIG_PM */ static int __devexit msm_spi_remove(struct platform_device *pdev) { struct spi_master *master = platform_get_drvdata(pdev); struct msm_spi *dd = spi_master_get_devdata(master); pm_qos_remove_request(&qos_req_list); spi_debugfs_exit(dd); sysfs_remove_group(&pdev->dev.kobj, &dev_attr_grp); if (dd->dma_teardown) dd->dma_teardown(dd); pm_runtime_disable(&pdev->dev); pm_runtime_set_suspended(&pdev->dev); clk_put(dd->clk); clk_put(dd->pclk); msm_spi_clk_path_teardown(dd); destroy_workqueue(dd->workqueue); platform_set_drvdata(pdev, 0); spi_unregister_master(master); spi_master_put(master); return 0; } static struct of_device_id msm_spi_dt_match[] = { { .compatible = "qcom,spi-qup-v2", }, {} }; static const struct dev_pm_ops msm_spi_dev_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(msm_spi_suspend, msm_spi_resume) SET_RUNTIME_PM_OPS(msm_spi_pm_suspend_runtime, msm_spi_pm_resume_runtime, NULL) }; static struct platform_driver msm_spi_driver = { .driver = { .name = SPI_DRV_NAME, .owner = THIS_MODULE, .pm = &msm_spi_dev_pm_ops, .of_match_table = msm_spi_dt_match, }, .remove = __exit_p(msm_spi_remove), }; static int __init msm_spi_init(void) { return platform_driver_probe(&msm_spi_driver, msm_spi_probe); } module_init(msm_spi_init); static void __exit msm_spi_exit(void) { platform_driver_unregister(&msm_spi_driver); } module_exit(msm_spi_exit); MODULE_LICENSE("GPL v2"); MODULE_VERSION("0.4"); MODULE_ALIAS("platform:"SPI_DRV_NAME);
gpl-2.0
jamesbulpin/xcp-linux-2.6.32
net/ipv6/fib6_rules.c
494
7540
/* * net/ipv6/fib6_rules.c IPv6 Routing Policy Rules * * Copyright (C)2003-2006 Helsinki University of Technology * Copyright (C)2003-2006 USAGI/WIDE Project * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation, version 2. * * Authors * Thomas Graf <tgraf@suug.ch> * Ville Nuorvala <vnuorval@tcs.hut.fi> */ #include <linux/netdevice.h> #include <net/fib_rules.h> #include <net/ipv6.h> #include <net/addrconf.h> #include <net/ip6_route.h> #include <net/netlink.h> struct fib6_rule { struct fib_rule common; struct rt6key src; struct rt6key dst; u8 tclass; }; struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi *fl, int flags, pol_lookup_t lookup) { struct fib_lookup_arg arg = { .lookup_ptr = lookup, }; fib_rules_lookup(net->ipv6.fib6_rules_ops, fl, flags, &arg); if (arg.rule) fib_rule_put(arg.rule); if (arg.result) return arg.result; dst_hold(&net->ipv6.ip6_null_entry->u.dst); return &net->ipv6.ip6_null_entry->u.dst; } static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp, int flags, struct fib_lookup_arg *arg) { struct rt6_info *rt = NULL; struct fib6_table *table; struct net *net = rule->fr_net; pol_lookup_t lookup = arg->lookup_ptr; switch (rule->action) { case FR_ACT_TO_TBL: break; case FR_ACT_UNREACHABLE: rt = net->ipv6.ip6_null_entry; goto discard_pkt; default: case FR_ACT_BLACKHOLE: rt = net->ipv6.ip6_blk_hole_entry; goto discard_pkt; case FR_ACT_PROHIBIT: rt = net->ipv6.ip6_prohibit_entry; goto discard_pkt; } table = fib6_get_table(net, rule->table); if (table) rt = lookup(net, table, flp, flags); if (rt != net->ipv6.ip6_null_entry) { struct fib6_rule *r = (struct fib6_rule *)rule; /* * If we need to find a source address for this traffic, * we check the result if it meets requirement of the rule. */ if ((rule->flags & FIB_RULE_FIND_SADDR) && r->src.plen && !(flags & RT6_LOOKUP_F_HAS_SADDR)) { struct in6_addr saddr; unsigned int srcprefs = 0; if (flags & RT6_LOOKUP_F_SRCPREF_TMP) srcprefs |= IPV6_PREFER_SRC_TMP; if (flags & RT6_LOOKUP_F_SRCPREF_PUBLIC) srcprefs |= IPV6_PREFER_SRC_PUBLIC; if (flags & RT6_LOOKUP_F_SRCPREF_COA) srcprefs |= IPV6_PREFER_SRC_COA; if (ipv6_dev_get_saddr(net, ip6_dst_idev(&rt->u.dst)->dev, &flp->fl6_dst, srcprefs, &saddr)) goto again; if (!ipv6_prefix_equal(&saddr, &r->src.addr, r->src.plen)) goto again; ipv6_addr_copy(&flp->fl6_src, &saddr); } goto out; } again: dst_release(&rt->u.dst); rt = NULL; goto out; discard_pkt: dst_hold(&rt->u.dst); out: arg->result = rt; return rt == NULL ? -EAGAIN : 0; } static int fib6_rule_match(struct fib_rule *rule, struct flowi *fl, int flags) { struct fib6_rule *r = (struct fib6_rule *) rule; if (r->dst.plen && !ipv6_prefix_equal(&fl->fl6_dst, &r->dst.addr, r->dst.plen)) return 0; /* * If FIB_RULE_FIND_SADDR is set and we do not have a * source address for the traffic, we defer check for * source address. */ if (r->src.plen) { if (flags & RT6_LOOKUP_F_HAS_SADDR) { if (!ipv6_prefix_equal(&fl->fl6_src, &r->src.addr, r->src.plen)) return 0; } else if (!(r->common.flags & FIB_RULE_FIND_SADDR)) return 0; } if (r->tclass && r->tclass != ((ntohl(fl->fl6_flowlabel) >> 20) & 0xff)) return 0; return 1; } static const struct nla_policy fib6_rule_policy[FRA_MAX+1] = { FRA_GENERIC_POLICY, }; static int fib6_rule_configure(struct fib_rule *rule, struct sk_buff *skb, struct fib_rule_hdr *frh, struct nlattr **tb) { int err = -EINVAL; struct net *net = sock_net(skb->sk); struct fib6_rule *rule6 = (struct fib6_rule *) rule; if (rule->action == FR_ACT_TO_TBL) { if (rule->table == RT6_TABLE_UNSPEC) goto errout; if (fib6_new_table(net, rule->table) == NULL) { err = -ENOBUFS; goto errout; } } if (frh->src_len) nla_memcpy(&rule6->src.addr, tb[FRA_SRC], sizeof(struct in6_addr)); if (frh->dst_len) nla_memcpy(&rule6->dst.addr, tb[FRA_DST], sizeof(struct in6_addr)); rule6->src.plen = frh->src_len; rule6->dst.plen = frh->dst_len; rule6->tclass = frh->tos; err = 0; errout: return err; } static int fib6_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh, struct nlattr **tb) { struct fib6_rule *rule6 = (struct fib6_rule *) rule; if (frh->src_len && (rule6->src.plen != frh->src_len)) return 0; if (frh->dst_len && (rule6->dst.plen != frh->dst_len)) return 0; if (frh->tos && (rule6->tclass != frh->tos)) return 0; if (frh->src_len && nla_memcmp(tb[FRA_SRC], &rule6->src.addr, sizeof(struct in6_addr))) return 0; if (frh->dst_len && nla_memcmp(tb[FRA_DST], &rule6->dst.addr, sizeof(struct in6_addr))) return 0; return 1; } static int fib6_rule_fill(struct fib_rule *rule, struct sk_buff *skb, struct fib_rule_hdr *frh) { struct fib6_rule *rule6 = (struct fib6_rule *) rule; frh->family = AF_INET6; frh->dst_len = rule6->dst.plen; frh->src_len = rule6->src.plen; frh->tos = rule6->tclass; if (rule6->dst.plen) NLA_PUT(skb, FRA_DST, sizeof(struct in6_addr), &rule6->dst.addr); if (rule6->src.plen) NLA_PUT(skb, FRA_SRC, sizeof(struct in6_addr), &rule6->src.addr); return 0; nla_put_failure: return -ENOBUFS; } static u32 fib6_rule_default_pref(struct fib_rules_ops *ops) { return 0x3FFF; } static size_t fib6_rule_nlmsg_payload(struct fib_rule *rule) { return nla_total_size(16) /* dst */ + nla_total_size(16); /* src */ } static struct fib_rules_ops fib6_rules_ops_template = { .family = AF_INET6, .rule_size = sizeof(struct fib6_rule), .addr_size = sizeof(struct in6_addr), .action = fib6_rule_action, .match = fib6_rule_match, .configure = fib6_rule_configure, .compare = fib6_rule_compare, .fill = fib6_rule_fill, .default_pref = fib6_rule_default_pref, .nlmsg_payload = fib6_rule_nlmsg_payload, .nlgroup = RTNLGRP_IPV6_RULE, .policy = fib6_rule_policy, .owner = THIS_MODULE, .fro_net = &init_net, }; static int fib6_rules_net_init(struct net *net) { int err = -ENOMEM; net->ipv6.fib6_rules_ops = kmemdup(&fib6_rules_ops_template, sizeof(*net->ipv6.fib6_rules_ops), GFP_KERNEL); if (!net->ipv6.fib6_rules_ops) goto out; net->ipv6.fib6_rules_ops->fro_net = net; INIT_LIST_HEAD(&net->ipv6.fib6_rules_ops->rules_list); err = fib_default_rule_add(net->ipv6.fib6_rules_ops, 0, RT6_TABLE_LOCAL, FIB_RULE_PERMANENT); if (err) goto out_fib6_rules_ops; err = fib_default_rule_add(net->ipv6.fib6_rules_ops, 0x7FFE, RT6_TABLE_MAIN, 0); if (err) goto out_fib6_default_rule_add; err = fib_rules_register(net->ipv6.fib6_rules_ops); if (err) goto out_fib6_default_rule_add; out: return err; out_fib6_default_rule_add: fib_rules_cleanup_ops(net->ipv6.fib6_rules_ops); out_fib6_rules_ops: kfree(net->ipv6.fib6_rules_ops); goto out; } static void fib6_rules_net_exit(struct net *net) { fib_rules_unregister(net->ipv6.fib6_rules_ops); kfree(net->ipv6.fib6_rules_ops); } static struct pernet_operations fib6_rules_net_ops = { .init = fib6_rules_net_init, .exit = fib6_rules_net_exit, }; int __init fib6_rules_init(void) { return register_pernet_subsys(&fib6_rules_net_ops); } void fib6_rules_cleanup(void) { unregister_pernet_subsys(&fib6_rules_net_ops); }
gpl-2.0
pornmailbox/linux-hi3518
arch/x86/kernel/cpu/mcheck/mce-apei.c
494
4455
/* * Bridge between MCE and APEI * * On some machine, corrected memory errors are reported via APEI * generic hardware error source (GHES) instead of corrected Machine * Check. These corrected memory errors can be reported to user space * through /dev/mcelog via faking a corrected Machine Check, so that * the error memory page can be offlined by /sbin/mcelog if the error * count for one page is beyond the threshold. * * For fatal MCE, save MCE record into persistent storage via ERST, so * that the MCE record can be logged after reboot via ERST. * * Copyright 2010 Intel Corp. * Author: Huang Ying <ying.huang@intel.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version * 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/export.h> #include <linux/kernel.h> #include <linux/acpi.h> #include <linux/cper.h> #include <acpi/apei.h> #include <acpi/ghes.h> #include <asm/mce.h> #include "mce-internal.h" void apei_mce_report_mem_error(int severity, struct cper_sec_mem_err *mem_err) { struct mce m; if (!(mem_err->validation_bits & CPER_MEM_VALID_PA)) return; mce_setup(&m); m.bank = 1; /* Fake a memory read error with unknown channel */ m.status = MCI_STATUS_VAL | MCI_STATUS_EN | MCI_STATUS_ADDRV | 0x9f; if (severity >= GHES_SEV_RECOVERABLE) m.status |= MCI_STATUS_UC; if (severity >= GHES_SEV_PANIC) m.status |= MCI_STATUS_PCC; m.addr = mem_err->physical_addr; mce_log(&m); } EXPORT_SYMBOL_GPL(apei_mce_report_mem_error); #define CPER_CREATOR_MCE \ UUID_LE(0x75a574e3, 0x5052, 0x4b29, 0x8a, 0x8e, 0xbe, 0x2c, \ 0x64, 0x90, 0xb8, 0x9d) #define CPER_SECTION_TYPE_MCE \ UUID_LE(0xfe08ffbe, 0x95e4, 0x4be7, 0xbc, 0x73, 0x40, 0x96, \ 0x04, 0x4a, 0x38, 0xfc) /* * CPER specification (in UEFI specification 2.3 appendix N) requires * byte-packed. */ struct cper_mce_record { struct cper_record_header hdr; struct cper_section_descriptor sec_hdr; struct mce mce; } __packed; int apei_write_mce(struct mce *m) { struct cper_mce_record rcd; memset(&rcd, 0, sizeof(rcd)); memcpy(rcd.hdr.signature, CPER_SIG_RECORD, CPER_SIG_SIZE); rcd.hdr.revision = CPER_RECORD_REV; rcd.hdr.signature_end = CPER_SIG_END; rcd.hdr.section_count = 1; rcd.hdr.error_severity = CPER_SEV_FATAL; /* timestamp, platform_id, partition_id are all invalid */ rcd.hdr.validation_bits = 0; rcd.hdr.record_length = sizeof(rcd); rcd.hdr.creator_id = CPER_CREATOR_MCE; rcd.hdr.notification_type = CPER_NOTIFY_MCE; rcd.hdr.record_id = cper_next_record_id(); rcd.hdr.flags = CPER_HW_ERROR_FLAGS_PREVERR; rcd.sec_hdr.section_offset = (void *)&rcd.mce - (void *)&rcd; rcd.sec_hdr.section_length = sizeof(rcd.mce); rcd.sec_hdr.revision = CPER_SEC_REV; /* fru_id and fru_text is invalid */ rcd.sec_hdr.validation_bits = 0; rcd.sec_hdr.flags = CPER_SEC_PRIMARY; rcd.sec_hdr.section_type = CPER_SECTION_TYPE_MCE; rcd.sec_hdr.section_severity = CPER_SEV_FATAL; memcpy(&rcd.mce, m, sizeof(*m)); return erst_write(&rcd.hdr); } ssize_t apei_read_mce(struct mce *m, u64 *record_id) { struct cper_mce_record rcd; int rc, pos; rc = erst_get_record_id_begin(&pos); if (rc) return rc; retry: rc = erst_get_record_id_next(&pos, record_id); if (rc) goto out; /* no more record */ if (*record_id == APEI_ERST_INVALID_RECORD_ID) goto out; rc = erst_read(*record_id, &rcd.hdr, sizeof(rcd)); /* someone else has cleared the record, try next one */ if (rc == -ENOENT) goto retry; else if (rc < 0) goto out; /* try to skip other type records in storage */ else if (rc != sizeof(rcd) || uuid_le_cmp(rcd.hdr.creator_id, CPER_CREATOR_MCE)) goto retry; memcpy(m, &rcd.mce, sizeof(*m)); rc = sizeof(*m); out: erst_get_record_id_end(); return rc; } /* Check whether there is record in ERST */ int apei_check_mce(void) { return erst_get_record_count(); } int apei_clear_mce(u64 record_id) { return erst_clear(record_id); }
gpl-2.0
emwno/android_kernel_U8500
arch/arm/mach-sa1100/assabet.c
494
11261
/* * linux/arch/arm/mach-sa1100/assabet.c * * Author: Nicolas Pitre * * This file contains all Assabet-specific tweaks. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/errno.h> #include <linux/ioport.h> #include <linux/serial_core.h> #include <linux/mtd/mtd.h> #include <linux/mtd/partitions.h> #include <linux/delay.h> #include <linux/mm.h> #include <mach/hardware.h> #include <asm/mach-types.h> #include <asm/irq.h> #include <asm/setup.h> #include <asm/page.h> #include <asm/pgtable-hwdef.h> #include <asm/pgtable.h> #include <asm/tlbflush.h> #include <asm/mach/arch.h> #include <asm/mach/flash.h> #include <asm/mach/irda.h> #include <asm/mach/map.h> #include <asm/mach/serial_sa1100.h> #include <mach/assabet.h> #include <mach/mcp.h> #include "generic.h" #define ASSABET_BCR_DB1110 \ (ASSABET_BCR_SPK_OFF | ASSABET_BCR_QMUTE | \ ASSABET_BCR_LED_GREEN | ASSABET_BCR_LED_RED | \ ASSABET_BCR_RS232EN | ASSABET_BCR_LCD_12RGB | \ ASSABET_BCR_IRDA_MD0) #define ASSABET_BCR_DB1111 \ (ASSABET_BCR_SPK_OFF | ASSABET_BCR_QMUTE | \ ASSABET_BCR_LED_GREEN | ASSABET_BCR_LED_RED | \ ASSABET_BCR_RS232EN | ASSABET_BCR_LCD_12RGB | \ ASSABET_BCR_CF_BUS_OFF | ASSABET_BCR_STEREO_LB | \ ASSABET_BCR_IRDA_MD0 | ASSABET_BCR_CF_RST) unsigned long SCR_value = ASSABET_SCR_INIT; EXPORT_SYMBOL(SCR_value); static unsigned long BCR_value = ASSABET_BCR_DB1110; void ASSABET_BCR_frob(unsigned int mask, unsigned int val) { unsigned long flags; local_irq_save(flags); BCR_value = (BCR_value & ~mask) | val; ASSABET_BCR = BCR_value; local_irq_restore(flags); } EXPORT_SYMBOL(ASSABET_BCR_frob); static void assabet_backlight_power(int on) { #ifndef ASSABET_PAL_VIDEO if (on) ASSABET_BCR_set(ASSABET_BCR_LIGHT_ON); else #endif ASSABET_BCR_clear(ASSABET_BCR_LIGHT_ON); } /* * Turn on/off the backlight. When turning the backlight on, * we wait 500us after turning it on so we don't cause the * supplies to droop when we enable the LCD controller (and * cause a hard reset.) */ static void assabet_lcd_power(int on) { #ifndef ASSABET_PAL_VIDEO if (on) { ASSABET_BCR_set(ASSABET_BCR_LCD_ON); udelay(500); } else #endif ASSABET_BCR_clear(ASSABET_BCR_LCD_ON); } /* * Assabet flash support code. */ #ifdef ASSABET_REV_4 /* * Phase 4 Assabet has two 28F160B3 flash parts in bank 0: */ static struct mtd_partition assabet_partitions[] = { { .name = "bootloader", .size = 0x00020000, .offset = 0, .mask_flags = MTD_WRITEABLE, }, { .name = "bootloader params", .size = 0x00020000, .offset = MTDPART_OFS_APPEND, .mask_flags = MTD_WRITEABLE, }, { .name = "jffs", .size = MTDPART_SIZ_FULL, .offset = MTDPART_OFS_APPEND, } }; #else /* * Phase 5 Assabet has two 28F128J3A flash parts in bank 0: */ static struct mtd_partition assabet_partitions[] = { { .name = "bootloader", .size = 0x00040000, .offset = 0, .mask_flags = MTD_WRITEABLE, }, { .name = "bootloader params", .size = 0x00040000, .offset = MTDPART_OFS_APPEND, .mask_flags = MTD_WRITEABLE, }, { .name = "jffs", .size = MTDPART_SIZ_FULL, .offset = MTDPART_OFS_APPEND, } }; #endif static struct flash_platform_data assabet_flash_data = { .map_name = "cfi_probe", .parts = assabet_partitions, .nr_parts = ARRAY_SIZE(assabet_partitions), }; static struct resource assabet_flash_resources[] = { { .start = SA1100_CS0_PHYS, .end = SA1100_CS0_PHYS + SZ_32M - 1, .flags = IORESOURCE_MEM, }, { .start = SA1100_CS1_PHYS, .end = SA1100_CS1_PHYS + SZ_32M - 1, .flags = IORESOURCE_MEM, } }; /* * Assabet IrDA support code. */ static int assabet_irda_set_power(struct device *dev, unsigned int state) { static unsigned int bcr_state[4] = { ASSABET_BCR_IRDA_MD0, ASSABET_BCR_IRDA_MD1|ASSABET_BCR_IRDA_MD0, ASSABET_BCR_IRDA_MD1, 0 }; if (state < 4) { state = bcr_state[state]; ASSABET_BCR_clear(state ^ (ASSABET_BCR_IRDA_MD1| ASSABET_BCR_IRDA_MD0)); ASSABET_BCR_set(state); } return 0; } static void assabet_irda_set_speed(struct device *dev, unsigned int speed) { if (speed < 4000000) ASSABET_BCR_clear(ASSABET_BCR_IRDA_FSEL); else ASSABET_BCR_set(ASSABET_BCR_IRDA_FSEL); } static struct irda_platform_data assabet_irda_data = { .set_power = assabet_irda_set_power, .set_speed = assabet_irda_set_speed, }; static struct mcp_plat_data assabet_mcp_data = { .mccr0 = MCCR0_ADM, .sclk_rate = 11981000, }; static void __init assabet_init(void) { /* * Ensure that the power supply is in "high power" mode. */ GPDR |= GPIO_GPIO16; GPSR = GPIO_GPIO16; /* * Ensure that these pins are set as outputs and are driving * logic 0. This ensures that we won't inadvertently toggle * the WS latch in the CPLD, and we don't float causing * excessive power drain. --rmk */ GPDR |= GPIO_SSP_TXD | GPIO_SSP_SCLK | GPIO_SSP_SFRM; GPCR = GPIO_SSP_TXD | GPIO_SSP_SCLK | GPIO_SSP_SFRM; /* * Set up registers for sleep mode. */ PWER = PWER_GPIO0; PGSR = 0; PCFR = 0; PSDR = 0; PPDR |= PPC_TXD3 | PPC_TXD1; PPSR |= PPC_TXD3 | PPC_TXD1; sa1100fb_lcd_power = assabet_lcd_power; sa1100fb_backlight_power = assabet_backlight_power; if (machine_has_neponset()) { /* * Angel sets this, but other bootloaders may not. * * This must precede any driver calls to BCR_set() * or BCR_clear(). */ ASSABET_BCR = BCR_value = ASSABET_BCR_DB1111; #ifndef CONFIG_ASSABET_NEPONSET printk( "Warning: Neponset detected but full support " "hasn't been configured in the kernel\n" ); #endif } sa11x0_register_mtd(&assabet_flash_data, assabet_flash_resources, ARRAY_SIZE(assabet_flash_resources)); sa11x0_register_irda(&assabet_irda_data); sa11x0_register_mcp(&assabet_mcp_data); } /* * On Assabet, we must probe for the Neponset board _before_ * paging_init() has occurred to actually determine the amount * of RAM available. To do so, we map the appropriate IO section * in the page table here in order to access GPIO registers. */ static void __init map_sa1100_gpio_regs( void ) { unsigned long phys = __PREG(GPLR) & PMD_MASK; unsigned long virt = io_p2v(phys); int prot = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_DOMAIN(DOMAIN_IO); pmd_t *pmd; pmd = pmd_offset(pgd_offset_k(virt), virt); *pmd = __pmd(phys | prot); flush_pmd_entry(pmd); } /* * Read System Configuration "Register" * (taken from "Intel StrongARM SA-1110 Microprocessor Development Board * User's Guide", section 4.4.1) * * This same scan is performed in arch/arm/boot/compressed/head-sa1100.S * to set up the serial port for decompression status messages. We * repeat it here because the kernel may not be loaded as a zImage, and * also because it's a hassle to communicate the SCR value to the kernel * from the decompressor. * * Note that IRQs are guaranteed to be disabled. */ static void __init get_assabet_scr(void) { unsigned long scr, i; GPDR |= 0x3fc; /* Configure GPIO 9:2 as outputs */ GPSR = 0x3fc; /* Write 0xFF to GPIO 9:2 */ GPDR &= ~(0x3fc); /* Configure GPIO 9:2 as inputs */ for(i = 100; i--; ) /* Read GPIO 9:2 */ scr = GPLR; GPDR |= 0x3fc; /* restore correct pin direction */ scr &= 0x3fc; /* save as system configuration byte. */ SCR_value = scr; } static void __init fixup_assabet(struct machine_desc *desc, struct tag *tags, char **cmdline, struct meminfo *mi) { /* This must be done before any call to machine_has_neponset() */ map_sa1100_gpio_regs(); get_assabet_scr(); if (machine_has_neponset()) printk("Neponset expansion board detected\n"); } static void assabet_uart_pm(struct uart_port *port, u_int state, u_int oldstate) { if (port->mapbase == _Ser1UTCR0) { if (state) ASSABET_BCR_clear(ASSABET_BCR_RS232EN | ASSABET_BCR_COM_RTS | ASSABET_BCR_COM_DTR); else ASSABET_BCR_set(ASSABET_BCR_RS232EN | ASSABET_BCR_COM_RTS | ASSABET_BCR_COM_DTR); } } /* * Assabet uses COM_RTS and COM_DTR for both UART1 (com port) * and UART3 (radio module). We only handle them for UART1 here. */ static void assabet_set_mctrl(struct uart_port *port, u_int mctrl) { if (port->mapbase == _Ser1UTCR0) { u_int set = 0, clear = 0; if (mctrl & TIOCM_RTS) clear |= ASSABET_BCR_COM_RTS; else set |= ASSABET_BCR_COM_RTS; if (mctrl & TIOCM_DTR) clear |= ASSABET_BCR_COM_DTR; else set |= ASSABET_BCR_COM_DTR; ASSABET_BCR_clear(clear); ASSABET_BCR_set(set); } } static u_int assabet_get_mctrl(struct uart_port *port) { u_int ret = 0; u_int bsr = ASSABET_BSR; /* need 2 reads to read current value */ bsr = ASSABET_BSR; if (port->mapbase == _Ser1UTCR0) { if (bsr & ASSABET_BSR_COM_DCD) ret |= TIOCM_CD; if (bsr & ASSABET_BSR_COM_CTS) ret |= TIOCM_CTS; if (bsr & ASSABET_BSR_COM_DSR) ret |= TIOCM_DSR; } else if (port->mapbase == _Ser3UTCR0) { if (bsr & ASSABET_BSR_RAD_DCD) ret |= TIOCM_CD; if (bsr & ASSABET_BSR_RAD_CTS) ret |= TIOCM_CTS; if (bsr & ASSABET_BSR_RAD_DSR) ret |= TIOCM_DSR; if (bsr & ASSABET_BSR_RAD_RI) ret |= TIOCM_RI; } else { ret = TIOCM_CD | TIOCM_CTS | TIOCM_DSR; } return ret; } static struct sa1100_port_fns assabet_port_fns __initdata = { .set_mctrl = assabet_set_mctrl, .get_mctrl = assabet_get_mctrl, .pm = assabet_uart_pm, }; static struct map_desc assabet_io_desc[] __initdata = { { /* Board Control Register */ .virtual = 0xf1000000, .pfn = __phys_to_pfn(0x12000000), .length = 0x00100000, .type = MT_DEVICE }, { /* MQ200 */ .virtual = 0xf2800000, .pfn = __phys_to_pfn(0x4b800000), .length = 0x00800000, .type = MT_DEVICE } }; static void __init assabet_map_io(void) { sa1100_map_io(); iotable_init(assabet_io_desc, ARRAY_SIZE(assabet_io_desc)); /* * Set SUS bit in SDCR0 so serial port 1 functions. * Its called GPCLKR0 in my SA1110 manual. */ Ser1SDCR0 |= SDCR0_SUS; if (machine_has_neponset()) { #ifdef CONFIG_ASSABET_NEPONSET extern void neponset_map_io(void); /* * We map Neponset registers even if it isn't present since * many drivers will try to probe their stuff (and fail). * This is still more friendly than a kernel paging request * crash. */ neponset_map_io(); #endif } else { sa1100_register_uart_fns(&assabet_port_fns); } /* * When Neponset is attached, the first UART should be * UART3. That's what Angel is doing and many documents * are stating this. * * We do the Neponset mapping even if Neponset support * isn't compiled in so the user will still get something on * the expected physical serial port. * * We no longer do this; not all boot loaders support it, * and UART3 appears to be somewhat unreliable with blob. */ sa1100_register_uart(0, 1); sa1100_register_uart(2, 3); } MACHINE_START(ASSABET, "Intel-Assabet") .boot_params = 0xc0000100, .fixup = fixup_assabet, .map_io = assabet_map_io, .init_irq = sa1100_init_irq, .timer = &sa1100_timer, .init_machine = assabet_init, #ifdef CONFIG_SA1111 .dma_zone_size = SZ_1M, #endif MACHINE_END
gpl-2.0
jianC/android_kernel_htc_msm7x30
drivers/serial/bcm63xx_uart.c
1262
21032
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Derived from many drivers using generic_serial interface. * * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr> * * Serial driver for BCM63xx integrated UART. * * Hardware flow control was _not_ tested since I only have RX/TX on * my board. */ #if defined(CONFIG_SERIAL_BCM63XX_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ) #define SUPPORT_SYSRQ #endif #include <linux/kernel.h> #include <linux/platform_device.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/module.h> #include <linux/console.h> #include <linux/clk.h> #include <linux/tty.h> #include <linux/tty_flip.h> #include <linux/sysrq.h> #include <linux/serial.h> #include <linux/serial_core.h> #include <bcm63xx_clk.h> #include <bcm63xx_irq.h> #include <bcm63xx_regs.h> #include <bcm63xx_io.h> #define BCM63XX_NR_UARTS 2 static struct uart_port ports[BCM63XX_NR_UARTS]; /* * rx interrupt mask / stat * * mask: * - rx fifo full * - rx fifo above threshold * - rx fifo not empty for too long */ #define UART_RX_INT_MASK (UART_IR_MASK(UART_IR_RXOVER) | \ UART_IR_MASK(UART_IR_RXTHRESH) | \ UART_IR_MASK(UART_IR_RXTIMEOUT)) #define UART_RX_INT_STAT (UART_IR_STAT(UART_IR_RXOVER) | \ UART_IR_STAT(UART_IR_RXTHRESH) | \ UART_IR_STAT(UART_IR_RXTIMEOUT)) /* * tx interrupt mask / stat * * mask: * - tx fifo empty * - tx fifo below threshold */ #define UART_TX_INT_MASK (UART_IR_MASK(UART_IR_TXEMPTY) | \ UART_IR_MASK(UART_IR_TXTRESH)) #define UART_TX_INT_STAT (UART_IR_STAT(UART_IR_TXEMPTY) | \ UART_IR_STAT(UART_IR_TXTRESH)) /* * external input interrupt * * mask: any edge on CTS, DCD */ #define UART_EXTINP_INT_MASK (UART_EXTINP_IRMASK(UART_EXTINP_IR_CTS) | \ UART_EXTINP_IRMASK(UART_EXTINP_IR_DCD)) /* * handy uart register accessor */ static inline unsigned int bcm_uart_readl(struct uart_port *port, unsigned int offset) { return bcm_readl(port->membase + offset); } static inline void bcm_uart_writel(struct uart_port *port, unsigned int value, unsigned int offset) { bcm_writel(value, port->membase + offset); } /* * serial core request to check if uart tx fifo is empty */ static unsigned int bcm_uart_tx_empty(struct uart_port *port) { unsigned int val; val = bcm_uart_readl(port, UART_IR_REG); return (val & UART_IR_STAT(UART_IR_TXEMPTY)) ? 1 : 0; } /* * serial core request to set RTS and DTR pin state and loopback mode */ static void bcm_uart_set_mctrl(struct uart_port *port, unsigned int mctrl) { unsigned int val; val = bcm_uart_readl(port, UART_MCTL_REG); val &= ~(UART_MCTL_DTR_MASK | UART_MCTL_RTS_MASK); /* invert of written value is reflected on the pin */ if (!(mctrl & TIOCM_DTR)) val |= UART_MCTL_DTR_MASK; if (!(mctrl & TIOCM_RTS)) val |= UART_MCTL_RTS_MASK; bcm_uart_writel(port, val, UART_MCTL_REG); val = bcm_uart_readl(port, UART_CTL_REG); if (mctrl & TIOCM_LOOP) val |= UART_CTL_LOOPBACK_MASK; else val &= ~UART_CTL_LOOPBACK_MASK; bcm_uart_writel(port, val, UART_CTL_REG); } /* * serial core request to return RI, CTS, DCD and DSR pin state */ static unsigned int bcm_uart_get_mctrl(struct uart_port *port) { unsigned int val, mctrl; mctrl = 0; val = bcm_uart_readl(port, UART_EXTINP_REG); if (val & UART_EXTINP_RI_MASK) mctrl |= TIOCM_RI; if (val & UART_EXTINP_CTS_MASK) mctrl |= TIOCM_CTS; if (val & UART_EXTINP_DCD_MASK) mctrl |= TIOCM_CD; if (val & UART_EXTINP_DSR_MASK) mctrl |= TIOCM_DSR; return mctrl; } /* * serial core request to disable tx ASAP (used for flow control) */ static void bcm_uart_stop_tx(struct uart_port *port) { unsigned int val; val = bcm_uart_readl(port, UART_CTL_REG); val &= ~(UART_CTL_TXEN_MASK); bcm_uart_writel(port, val, UART_CTL_REG); val = bcm_uart_readl(port, UART_IR_REG); val &= ~UART_TX_INT_MASK; bcm_uart_writel(port, val, UART_IR_REG); } /* * serial core request to (re)enable tx */ static void bcm_uart_start_tx(struct uart_port *port) { unsigned int val; val = bcm_uart_readl(port, UART_IR_REG); val |= UART_TX_INT_MASK; bcm_uart_writel(port, val, UART_IR_REG); val = bcm_uart_readl(port, UART_CTL_REG); val |= UART_CTL_TXEN_MASK; bcm_uart_writel(port, val, UART_CTL_REG); } /* * serial core request to stop rx, called before port shutdown */ static void bcm_uart_stop_rx(struct uart_port *port) { unsigned int val; val = bcm_uart_readl(port, UART_IR_REG); val &= ~UART_RX_INT_MASK; bcm_uart_writel(port, val, UART_IR_REG); } /* * serial core request to enable modem status interrupt reporting */ static void bcm_uart_enable_ms(struct uart_port *port) { unsigned int val; val = bcm_uart_readl(port, UART_IR_REG); val |= UART_IR_MASK(UART_IR_EXTIP); bcm_uart_writel(port, val, UART_IR_REG); } /* * serial core request to start/stop emitting break char */ static void bcm_uart_break_ctl(struct uart_port *port, int ctl) { unsigned long flags; unsigned int val; spin_lock_irqsave(&port->lock, flags); val = bcm_uart_readl(port, UART_CTL_REG); if (ctl) val |= UART_CTL_XMITBRK_MASK; else val &= ~UART_CTL_XMITBRK_MASK; bcm_uart_writel(port, val, UART_CTL_REG); spin_unlock_irqrestore(&port->lock, flags); } /* * return port type in string format */ static const char *bcm_uart_type(struct uart_port *port) { return (port->type == PORT_BCM63XX) ? "bcm63xx_uart" : NULL; } /* * read all chars in rx fifo and send them to core */ static void bcm_uart_do_rx(struct uart_port *port) { struct tty_struct *tty; unsigned int max_count; /* limit number of char read in interrupt, should not be * higher than fifo size anyway since we're much faster than * serial port */ max_count = 32; tty = port->state->port.tty; do { unsigned int iestat, c, cstat; char flag; /* get overrun/fifo empty information from ier * register */ iestat = bcm_uart_readl(port, UART_IR_REG); if (!(iestat & UART_IR_STAT(UART_IR_RXNOTEMPTY))) break; cstat = c = bcm_uart_readl(port, UART_FIFO_REG); port->icount.rx++; flag = TTY_NORMAL; c &= 0xff; if (unlikely((cstat & UART_FIFO_ANYERR_MASK))) { /* do stats first */ if (cstat & UART_FIFO_BRKDET_MASK) { port->icount.brk++; if (uart_handle_break(port)) continue; } if (cstat & UART_FIFO_PARERR_MASK) port->icount.parity++; if (cstat & UART_FIFO_FRAMEERR_MASK) port->icount.frame++; /* update flag wrt read_status_mask */ cstat &= port->read_status_mask; if (cstat & UART_FIFO_BRKDET_MASK) flag = TTY_BREAK; if (cstat & UART_FIFO_FRAMEERR_MASK) flag = TTY_FRAME; if (cstat & UART_FIFO_PARERR_MASK) flag = TTY_PARITY; } if (uart_handle_sysrq_char(port, c)) continue; if (unlikely(iestat & UART_IR_STAT(UART_IR_RXOVER))) { port->icount.overrun++; tty_insert_flip_char(tty, 0, TTY_OVERRUN); } if ((cstat & port->ignore_status_mask) == 0) tty_insert_flip_char(tty, c, flag); } while (--max_count); tty_flip_buffer_push(tty); } /* * fill tx fifo with chars to send, stop when fifo is about to be full * or when all chars have been sent. */ static void bcm_uart_do_tx(struct uart_port *port) { struct circ_buf *xmit; unsigned int val, max_count; if (port->x_char) { bcm_uart_writel(port, port->x_char, UART_FIFO_REG); port->icount.tx++; port->x_char = 0; return; } if (uart_tx_stopped(port)) { bcm_uart_stop_tx(port); return; } xmit = &port->state->xmit; if (uart_circ_empty(xmit)) goto txq_empty; val = bcm_uart_readl(port, UART_MCTL_REG); val = (val & UART_MCTL_TXFIFOFILL_MASK) >> UART_MCTL_TXFIFOFILL_SHIFT; max_count = port->fifosize - val; while (max_count--) { unsigned int c; c = xmit->buf[xmit->tail]; bcm_uart_writel(port, c, UART_FIFO_REG); xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); port->icount.tx++; if (uart_circ_empty(xmit)) break; } if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) uart_write_wakeup(port); if (uart_circ_empty(xmit)) goto txq_empty; return; txq_empty: /* nothing to send, disable transmit interrupt */ val = bcm_uart_readl(port, UART_IR_REG); val &= ~UART_TX_INT_MASK; bcm_uart_writel(port, val, UART_IR_REG); return; } /* * process uart interrupt */ static irqreturn_t bcm_uart_interrupt(int irq, void *dev_id) { struct uart_port *port; unsigned int irqstat; port = dev_id; spin_lock(&port->lock); irqstat = bcm_uart_readl(port, UART_IR_REG); if (irqstat & UART_RX_INT_STAT) bcm_uart_do_rx(port); if (irqstat & UART_TX_INT_STAT) bcm_uart_do_tx(port); if (irqstat & UART_IR_MASK(UART_IR_EXTIP)) { unsigned int estat; estat = bcm_uart_readl(port, UART_EXTINP_REG); if (estat & UART_EXTINP_IRSTAT(UART_EXTINP_IR_CTS)) uart_handle_cts_change(port, estat & UART_EXTINP_CTS_MASK); if (estat & UART_EXTINP_IRSTAT(UART_EXTINP_IR_DCD)) uart_handle_dcd_change(port, estat & UART_EXTINP_DCD_MASK); } spin_unlock(&port->lock); return IRQ_HANDLED; } /* * enable rx & tx operation on uart */ static void bcm_uart_enable(struct uart_port *port) { unsigned int val; val = bcm_uart_readl(port, UART_CTL_REG); val |= (UART_CTL_BRGEN_MASK | UART_CTL_TXEN_MASK | UART_CTL_RXEN_MASK); bcm_uart_writel(port, val, UART_CTL_REG); } /* * disable rx & tx operation on uart */ static void bcm_uart_disable(struct uart_port *port) { unsigned int val; val = bcm_uart_readl(port, UART_CTL_REG); val &= ~(UART_CTL_BRGEN_MASK | UART_CTL_TXEN_MASK | UART_CTL_RXEN_MASK); bcm_uart_writel(port, val, UART_CTL_REG); } /* * clear all unread data in rx fifo and unsent data in tx fifo */ static void bcm_uart_flush(struct uart_port *port) { unsigned int val; /* empty rx and tx fifo */ val = bcm_uart_readl(port, UART_CTL_REG); val |= UART_CTL_RSTRXFIFO_MASK | UART_CTL_RSTTXFIFO_MASK; bcm_uart_writel(port, val, UART_CTL_REG); /* read any pending char to make sure all irq status are * cleared */ (void)bcm_uart_readl(port, UART_FIFO_REG); } /* * serial core request to initialize uart and start rx operation */ static int bcm_uart_startup(struct uart_port *port) { unsigned int val; int ret; /* mask all irq and flush port */ bcm_uart_disable(port); bcm_uart_writel(port, 0, UART_IR_REG); bcm_uart_flush(port); /* clear any pending external input interrupt */ (void)bcm_uart_readl(port, UART_EXTINP_REG); /* set rx/tx fifo thresh to fifo half size */ val = bcm_uart_readl(port, UART_MCTL_REG); val &= ~(UART_MCTL_RXFIFOTHRESH_MASK | UART_MCTL_TXFIFOTHRESH_MASK); val |= (port->fifosize / 2) << UART_MCTL_RXFIFOTHRESH_SHIFT; val |= (port->fifosize / 2) << UART_MCTL_TXFIFOTHRESH_SHIFT; bcm_uart_writel(port, val, UART_MCTL_REG); /* set rx fifo timeout to 1 char time */ val = bcm_uart_readl(port, UART_CTL_REG); val &= ~UART_CTL_RXTMOUTCNT_MASK; val |= 1 << UART_CTL_RXTMOUTCNT_SHIFT; bcm_uart_writel(port, val, UART_CTL_REG); /* report any edge on dcd and cts */ val = UART_EXTINP_INT_MASK; val |= UART_EXTINP_DCD_NOSENSE_MASK; val |= UART_EXTINP_CTS_NOSENSE_MASK; bcm_uart_writel(port, val, UART_EXTINP_REG); /* register irq and enable rx interrupts */ ret = request_irq(port->irq, bcm_uart_interrupt, 0, bcm_uart_type(port), port); if (ret) return ret; bcm_uart_writel(port, UART_RX_INT_MASK, UART_IR_REG); bcm_uart_enable(port); return 0; } /* * serial core request to flush & disable uart */ static void bcm_uart_shutdown(struct uart_port *port) { unsigned long flags; spin_lock_irqsave(&port->lock, flags); bcm_uart_writel(port, 0, UART_IR_REG); spin_unlock_irqrestore(&port->lock, flags); bcm_uart_disable(port); bcm_uart_flush(port); free_irq(port->irq, port); } /* * serial core request to change current uart setting */ static void bcm_uart_set_termios(struct uart_port *port, struct ktermios *new, struct ktermios *old) { unsigned int ctl, baud, quot, ier; unsigned long flags; spin_lock_irqsave(&port->lock, flags); /* disable uart while changing speed */ bcm_uart_disable(port); bcm_uart_flush(port); /* update Control register */ ctl = bcm_uart_readl(port, UART_CTL_REG); ctl &= ~UART_CTL_BITSPERSYM_MASK; switch (new->c_cflag & CSIZE) { case CS5: ctl |= (0 << UART_CTL_BITSPERSYM_SHIFT); break; case CS6: ctl |= (1 << UART_CTL_BITSPERSYM_SHIFT); break; case CS7: ctl |= (2 << UART_CTL_BITSPERSYM_SHIFT); break; default: ctl |= (3 << UART_CTL_BITSPERSYM_SHIFT); break; } ctl &= ~UART_CTL_STOPBITS_MASK; if (new->c_cflag & CSTOPB) ctl |= UART_CTL_STOPBITS_2; else ctl |= UART_CTL_STOPBITS_1; ctl &= ~(UART_CTL_RXPAREN_MASK | UART_CTL_TXPAREN_MASK); if (new->c_cflag & PARENB) ctl |= (UART_CTL_RXPAREN_MASK | UART_CTL_TXPAREN_MASK); ctl &= ~(UART_CTL_RXPAREVEN_MASK | UART_CTL_TXPAREVEN_MASK); if (new->c_cflag & PARODD) ctl |= (UART_CTL_RXPAREVEN_MASK | UART_CTL_TXPAREVEN_MASK); bcm_uart_writel(port, ctl, UART_CTL_REG); /* update Baudword register */ baud = uart_get_baud_rate(port, new, old, 0, port->uartclk / 16); quot = uart_get_divisor(port, baud) - 1; bcm_uart_writel(port, quot, UART_BAUD_REG); /* update Interrupt register */ ier = bcm_uart_readl(port, UART_IR_REG); ier &= ~UART_IR_MASK(UART_IR_EXTIP); if (UART_ENABLE_MS(port, new->c_cflag)) ier |= UART_IR_MASK(UART_IR_EXTIP); bcm_uart_writel(port, ier, UART_IR_REG); /* update read/ignore mask */ port->read_status_mask = UART_FIFO_VALID_MASK; if (new->c_iflag & INPCK) { port->read_status_mask |= UART_FIFO_FRAMEERR_MASK; port->read_status_mask |= UART_FIFO_PARERR_MASK; } if (new->c_iflag & (BRKINT)) port->read_status_mask |= UART_FIFO_BRKDET_MASK; port->ignore_status_mask = 0; if (new->c_iflag & IGNPAR) port->ignore_status_mask |= UART_FIFO_PARERR_MASK; if (new->c_iflag & IGNBRK) port->ignore_status_mask |= UART_FIFO_BRKDET_MASK; if (!(new->c_cflag & CREAD)) port->ignore_status_mask |= UART_FIFO_VALID_MASK; uart_update_timeout(port, new->c_cflag, baud); bcm_uart_enable(port); spin_unlock_irqrestore(&port->lock, flags); } /* * serial core request to claim uart iomem */ static int bcm_uart_request_port(struct uart_port *port) { unsigned int size; size = RSET_UART_SIZE; if (!request_mem_region(port->mapbase, size, "bcm63xx")) { dev_err(port->dev, "Memory region busy\n"); return -EBUSY; } port->membase = ioremap(port->mapbase, size); if (!port->membase) { dev_err(port->dev, "Unable to map registers\n"); release_mem_region(port->mapbase, size); return -EBUSY; } return 0; } /* * serial core request to release uart iomem */ static void bcm_uart_release_port(struct uart_port *port) { release_mem_region(port->mapbase, RSET_UART_SIZE); iounmap(port->membase); } /* * serial core request to do any port required autoconfiguration */ static void bcm_uart_config_port(struct uart_port *port, int flags) { if (flags & UART_CONFIG_TYPE) { if (bcm_uart_request_port(port)) return; port->type = PORT_BCM63XX; } } /* * serial core request to check that port information in serinfo are * suitable */ static int bcm_uart_verify_port(struct uart_port *port, struct serial_struct *serinfo) { if (port->type != PORT_BCM63XX) return -EINVAL; if (port->irq != serinfo->irq) return -EINVAL; if (port->iotype != serinfo->io_type) return -EINVAL; if (port->mapbase != (unsigned long)serinfo->iomem_base) return -EINVAL; return 0; } /* serial core callbacks */ static struct uart_ops bcm_uart_ops = { .tx_empty = bcm_uart_tx_empty, .get_mctrl = bcm_uart_get_mctrl, .set_mctrl = bcm_uart_set_mctrl, .start_tx = bcm_uart_start_tx, .stop_tx = bcm_uart_stop_tx, .stop_rx = bcm_uart_stop_rx, .enable_ms = bcm_uart_enable_ms, .break_ctl = bcm_uart_break_ctl, .startup = bcm_uart_startup, .shutdown = bcm_uart_shutdown, .set_termios = bcm_uart_set_termios, .type = bcm_uart_type, .release_port = bcm_uart_release_port, .request_port = bcm_uart_request_port, .config_port = bcm_uart_config_port, .verify_port = bcm_uart_verify_port, }; #ifdef CONFIG_SERIAL_BCM63XX_CONSOLE static inline void wait_for_xmitr(struct uart_port *port) { unsigned int tmout; /* Wait up to 10ms for the character(s) to be sent. */ tmout = 10000; while (--tmout) { unsigned int val; val = bcm_uart_readl(port, UART_IR_REG); if (val & UART_IR_STAT(UART_IR_TXEMPTY)) break; udelay(1); } /* Wait up to 1s for flow control if necessary */ if (port->flags & UPF_CONS_FLOW) { tmout = 1000000; while (--tmout) { unsigned int val; val = bcm_uart_readl(port, UART_EXTINP_REG); if (val & UART_EXTINP_CTS_MASK) break; udelay(1); } } } /* * output given char */ static void bcm_console_putchar(struct uart_port *port, int ch) { wait_for_xmitr(port); bcm_uart_writel(port, ch, UART_FIFO_REG); } /* * console core request to output given string */ static void bcm_console_write(struct console *co, const char *s, unsigned int count) { struct uart_port *port; unsigned long flags; int locked; port = &ports[co->index]; local_irq_save(flags); if (port->sysrq) { /* bcm_uart_interrupt() already took the lock */ locked = 0; } else if (oops_in_progress) { locked = spin_trylock(&port->lock); } else { spin_lock(&port->lock); locked = 1; } /* call helper to deal with \r\n */ uart_console_write(port, s, count, bcm_console_putchar); /* and wait for char to be transmitted */ wait_for_xmitr(port); if (locked) spin_unlock(&port->lock); local_irq_restore(flags); } /* * console core request to setup given console, find matching uart * port and setup it. */ static int bcm_console_setup(struct console *co, char *options) { struct uart_port *port; int baud = 9600; int bits = 8; int parity = 'n'; int flow = 'n'; if (co->index < 0 || co->index >= BCM63XX_NR_UARTS) return -EINVAL; port = &ports[co->index]; if (!port->membase) return -ENODEV; if (options) uart_parse_options(options, &baud, &parity, &bits, &flow); return uart_set_options(port, co, baud, parity, bits, flow); } static struct uart_driver bcm_uart_driver; static struct console bcm63xx_console = { .name = "ttyS", .write = bcm_console_write, .device = uart_console_device, .setup = bcm_console_setup, .flags = CON_PRINTBUFFER, .index = -1, .data = &bcm_uart_driver, }; static int __init bcm63xx_console_init(void) { register_console(&bcm63xx_console); return 0; } console_initcall(bcm63xx_console_init); #define BCM63XX_CONSOLE (&bcm63xx_console) #else #define BCM63XX_CONSOLE NULL #endif /* CONFIG_SERIAL_BCM63XX_CONSOLE */ static struct uart_driver bcm_uart_driver = { .owner = THIS_MODULE, .driver_name = "bcm63xx_uart", .dev_name = "ttyS", .major = TTY_MAJOR, .minor = 64, .nr = BCM63XX_NR_UARTS, .cons = BCM63XX_CONSOLE, }; /* * platform driver probe/remove callback */ static int __devinit bcm_uart_probe(struct platform_device *pdev) { struct resource *res_mem, *res_irq; struct uart_port *port; struct clk *clk; int ret; if (pdev->id < 0 || pdev->id >= BCM63XX_NR_UARTS) return -EINVAL; if (ports[pdev->id].membase) return -EBUSY; res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res_mem) return -ENODEV; res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (!res_irq) return -ENODEV; clk = clk_get(&pdev->dev, "periph"); if (IS_ERR(clk)) return -ENODEV; port = &ports[pdev->id]; memset(port, 0, sizeof(*port)); port->iotype = UPIO_MEM; port->mapbase = res_mem->start; port->irq = res_irq->start; port->ops = &bcm_uart_ops; port->flags = UPF_BOOT_AUTOCONF; port->dev = &pdev->dev; port->fifosize = 16; port->uartclk = clk_get_rate(clk) / 2; port->line = pdev->id; clk_put(clk); ret = uart_add_one_port(&bcm_uart_driver, port); if (ret) { ports[pdev->id].membase = 0; return ret; } platform_set_drvdata(pdev, port); return 0; } static int __devexit bcm_uart_remove(struct platform_device *pdev) { struct uart_port *port; port = platform_get_drvdata(pdev); uart_remove_one_port(&bcm_uart_driver, port); platform_set_drvdata(pdev, NULL); /* mark port as free */ ports[pdev->id].membase = 0; return 0; } /* * platform driver stuff */ static struct platform_driver bcm_uart_platform_driver = { .probe = bcm_uart_probe, .remove = __devexit_p(bcm_uart_remove), .driver = { .owner = THIS_MODULE, .name = "bcm63xx_uart", }, }; static int __init bcm_uart_init(void) { int ret; ret = uart_register_driver(&bcm_uart_driver); if (ret) return ret; ret = platform_driver_register(&bcm_uart_platform_driver); if (ret) uart_unregister_driver(&bcm_uart_driver); return ret; } static void __exit bcm_uart_exit(void) { platform_driver_unregister(&bcm_uart_platform_driver); uart_unregister_driver(&bcm_uart_driver); } module_init(bcm_uart_init); module_exit(bcm_uart_exit); MODULE_AUTHOR("Maxime Bizon <mbizon@freebox.fr>"); MODULE_DESCRIPTION("Broadcom 63<xx integrated uart driver"); MODULE_LICENSE("GPL");
gpl-2.0
ibladesi/TF101-HighOC-3P2
net/802/hippi.c
1518
6077
/* * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. INET is implemented using the BSD Socket * interface as the means of communication with the user level. * * HIPPI-type device handling. * * Version: @(#)hippi.c 1.0.0 05/29/97 * * Authors: Ross Biro * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> * Mark Evans, <evansmp@uhura.aston.ac.uk> * Florian La Roche, <rzsfl@rz.uni-sb.de> * Alan Cox, <gw4pts@gw4pts.ampr.org> * Jes Sorensen, <Jes.Sorensen@cern.ch> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/mm.h> #include <linux/socket.h> #include <linux/in.h> #include <linux/inet.h> #include <linux/netdevice.h> #include <linux/hippidevice.h> #include <linux/skbuff.h> #include <linux/errno.h> #include <net/arp.h> #include <net/sock.h> #include <asm/uaccess.h> #include <asm/system.h> /* * Create the HIPPI MAC header for an arbitrary protocol layer * * saddr=NULL means use device source address * daddr=NULL means leave destination address (eg unresolved arp) */ static int hippi_header(struct sk_buff *skb, struct net_device *dev, unsigned short type, const void *daddr, const void *saddr, unsigned len) { struct hippi_hdr *hip = (struct hippi_hdr *)skb_push(skb, HIPPI_HLEN); struct hippi_cb *hcb = (struct hippi_cb *) skb->cb; if (!len){ len = skb->len - HIPPI_HLEN; printk("hippi_header(): length not supplied\n"); } /* * Due to the stupidity of the little endian byte-order we * have to set the fp field this way. */ hip->fp.fixed = htonl(0x04800018); hip->fp.d2_size = htonl(len + 8); hip->le.fc = 0; hip->le.double_wide = 0; /* only HIPPI 800 for the time being */ hip->le.message_type = 0; /* Data PDU */ hip->le.dest_addr_type = 2; /* 12 bit SC address */ hip->le.src_addr_type = 2; /* 12 bit SC address */ memcpy(hip->le.src_switch_addr, dev->dev_addr + 3, 3); memset(&hip->le.reserved, 0, 16); hip->snap.dsap = HIPPI_EXTENDED_SAP; hip->snap.ssap = HIPPI_EXTENDED_SAP; hip->snap.ctrl = HIPPI_UI_CMD; hip->snap.oui[0] = 0x00; hip->snap.oui[1] = 0x00; hip->snap.oui[2] = 0x00; hip->snap.ethertype = htons(type); if (daddr) { memcpy(hip->le.dest_switch_addr, daddr + 3, 3); memcpy(&hcb->ifield, daddr + 2, 4); return HIPPI_HLEN; } hcb->ifield = 0; return -((int)HIPPI_HLEN); } /* * Rebuild the HIPPI MAC header. This is called after an ARP has * completed on this sk_buff. We now let ARP fill in the other fields. */ static int hippi_rebuild_header(struct sk_buff *skb) { struct hippi_hdr *hip = (struct hippi_hdr *)skb->data; /* * Only IP is currently supported */ if(hip->snap.ethertype != htons(ETH_P_IP)) { printk(KERN_DEBUG "%s: unable to resolve type %X addresses.\n",skb->dev->name,ntohs(hip->snap.ethertype)); return 0; } /* * We don't support dynamic ARP on HIPPI, but we use the ARP * static ARP tables to hold the I-FIELDs. */ return arp_find(hip->le.daddr, skb); } /* * Determine the packet's protocol ID. */ __be16 hippi_type_trans(struct sk_buff *skb, struct net_device *dev) { struct hippi_hdr *hip; /* * This is actually wrong ... question is if we really should * set the raw address here. */ skb->dev = dev; skb_reset_mac_header(skb); hip = (struct hippi_hdr *)skb_mac_header(skb); skb_pull(skb, HIPPI_HLEN); /* * No fancy promisc stuff here now. */ return hip->snap.ethertype; } EXPORT_SYMBOL(hippi_type_trans); int hippi_change_mtu(struct net_device *dev, int new_mtu) { /* * HIPPI's got these nice large MTUs. */ if ((new_mtu < 68) || (new_mtu > 65280)) return -EINVAL; dev->mtu = new_mtu; return(0); } EXPORT_SYMBOL(hippi_change_mtu); /* * For HIPPI we will actually use the lower 4 bytes of the hardware * address as the I-FIELD rather than the actual hardware address. */ int hippi_mac_addr(struct net_device *dev, void *p) { struct sockaddr *addr = p; if (netif_running(dev)) return -EBUSY; memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); return 0; } EXPORT_SYMBOL(hippi_mac_addr); int hippi_neigh_setup_dev(struct net_device *dev, struct neigh_parms *p) { /* Never send broadcast/multicast ARP messages */ p->mcast_probes = 0; /* In IPv6 unicast probes are valid even on NBMA, * because they are encapsulated in normal IPv6 protocol. * Should be a generic flag. */ if (p->tbl->family != AF_INET6) p->ucast_probes = 0; return 0; } EXPORT_SYMBOL(hippi_neigh_setup_dev); static const struct header_ops hippi_header_ops = { .create = hippi_header, .rebuild = hippi_rebuild_header, }; static void hippi_setup(struct net_device *dev) { dev->header_ops = &hippi_header_ops; /* * We don't support HIPPI `ARP' for the time being, and probably * never will unless someone else implements it. However we * still need a fake ARPHRD to make ifconfig and friends play ball. */ dev->type = ARPHRD_HIPPI; dev->hard_header_len = HIPPI_HLEN; dev->mtu = 65280; dev->addr_len = HIPPI_ALEN; dev->tx_queue_len = 25 /* 5 */; memset(dev->broadcast, 0xFF, HIPPI_ALEN); /* * HIPPI doesn't support broadcast+multicast and we only use * static ARP tables. ARP is disabled by hippi_neigh_setup_dev. */ dev->flags = 0; } /** * alloc_hippi_dev - Register HIPPI device * @sizeof_priv: Size of additional driver-private structure to be allocated * for this HIPPI device * * Fill in the fields of the device structure with HIPPI-generic values. * * Constructs a new net device, complete with a private data area of * size @sizeof_priv. A 32-byte (not bit) alignment is enforced for * this private data area. */ struct net_device *alloc_hippi_dev(int sizeof_priv) { return alloc_netdev(sizeof_priv, "hip%d", hippi_setup); } EXPORT_SYMBOL(alloc_hippi_dev);
gpl-2.0
farchanrifai/lineage
arch/x86/boot/compressed/eboot.c
1518
23905
/* ----------------------------------------------------------------------- * * Copyright 2011 Intel Corporation; author Matt Fleming * * This file is part of the Linux kernel, and is made available under * the terms of the GNU General Public License version 2. * * ----------------------------------------------------------------------- */ #include <linux/efi.h> #include <asm/efi.h> #include <asm/setup.h> #include <asm/desc.h> #undef memcpy /* Use memcpy from misc.c */ #include "eboot.h" static efi_system_table_t *sys_table; static efi_status_t __get_map(efi_memory_desc_t **map, unsigned long *map_size, unsigned long *desc_size) { efi_memory_desc_t *m = NULL; efi_status_t status; unsigned long key; u32 desc_version; *map_size = sizeof(*m) * 32; again: /* * Add an additional efi_memory_desc_t because we're doing an * allocation which may be in a new descriptor region. */ *map_size += sizeof(*m); status = efi_call_phys3(sys_table->boottime->allocate_pool, EFI_LOADER_DATA, *map_size, (void **)&m); if (status != EFI_SUCCESS) goto fail; status = efi_call_phys5(sys_table->boottime->get_memory_map, map_size, m, &key, desc_size, &desc_version); if (status == EFI_BUFFER_TOO_SMALL) { efi_call_phys1(sys_table->boottime->free_pool, m); goto again; } if (status != EFI_SUCCESS) efi_call_phys1(sys_table->boottime->free_pool, m); fail: *map = m; return status; } /* * Allocate at the highest possible address that is not above 'max'. */ static efi_status_t high_alloc(unsigned long size, unsigned long align, unsigned long *addr, unsigned long max) { unsigned long map_size, desc_size; efi_memory_desc_t *map; efi_status_t status; unsigned long nr_pages; u64 max_addr = 0; int i; status = __get_map(&map, &map_size, &desc_size); if (status != EFI_SUCCESS) goto fail; nr_pages = round_up(size, EFI_PAGE_SIZE) / EFI_PAGE_SIZE; again: for (i = 0; i < map_size / desc_size; i++) { efi_memory_desc_t *desc; unsigned long m = (unsigned long)map; u64 start, end; desc = (efi_memory_desc_t *)(m + (i * desc_size)); if (desc->type != EFI_CONVENTIONAL_MEMORY) continue; if (desc->num_pages < nr_pages) continue; start = desc->phys_addr; end = start + desc->num_pages * (1UL << EFI_PAGE_SHIFT); if ((start + size) > end || (start + size) > max) continue; if (end - size > max) end = max; if (round_down(end - size, align) < start) continue; start = round_down(end - size, align); /* * Don't allocate at 0x0. It will confuse code that * checks pointers against NULL. */ if (start == 0x0) continue; if (start > max_addr) max_addr = start; } if (!max_addr) status = EFI_NOT_FOUND; else { status = efi_call_phys4(sys_table->boottime->allocate_pages, EFI_ALLOCATE_ADDRESS, EFI_LOADER_DATA, nr_pages, &max_addr); if (status != EFI_SUCCESS) { max = max_addr; max_addr = 0; goto again; } *addr = max_addr; } free_pool: efi_call_phys1(sys_table->boottime->free_pool, map); fail: return status; } /* * Allocate at the lowest possible address. */ static efi_status_t low_alloc(unsigned long size, unsigned long align, unsigned long *addr) { unsigned long map_size, desc_size; efi_memory_desc_t *map; efi_status_t status; unsigned long nr_pages; int i; status = __get_map(&map, &map_size, &desc_size); if (status != EFI_SUCCESS) goto fail; nr_pages = round_up(size, EFI_PAGE_SIZE) / EFI_PAGE_SIZE; for (i = 0; i < map_size / desc_size; i++) { efi_memory_desc_t *desc; unsigned long m = (unsigned long)map; u64 start, end; desc = (efi_memory_desc_t *)(m + (i * desc_size)); if (desc->type != EFI_CONVENTIONAL_MEMORY) continue; if (desc->num_pages < nr_pages) continue; start = desc->phys_addr; end = start + desc->num_pages * (1UL << EFI_PAGE_SHIFT); /* * Don't allocate at 0x0. It will confuse code that * checks pointers against NULL. Skip the first 8 * bytes so we start at a nice even number. */ if (start == 0x0) start += 8; start = round_up(start, align); if ((start + size) > end) continue; status = efi_call_phys4(sys_table->boottime->allocate_pages, EFI_ALLOCATE_ADDRESS, EFI_LOADER_DATA, nr_pages, &start); if (status == EFI_SUCCESS) { *addr = start; break; } } if (i == map_size / desc_size) status = EFI_NOT_FOUND; free_pool: efi_call_phys1(sys_table->boottime->free_pool, map); fail: return status; } static void low_free(unsigned long size, unsigned long addr) { unsigned long nr_pages; nr_pages = round_up(size, EFI_PAGE_SIZE) / EFI_PAGE_SIZE; efi_call_phys2(sys_table->boottime->free_pages, addr, size); } static void find_bits(unsigned long mask, u8 *pos, u8 *size) { u8 first, len; first = 0; len = 0; if (mask) { while (!(mask & 0x1)) { mask = mask >> 1; first++; } while (mask & 0x1) { mask = mask >> 1; len++; } } *pos = first; *size = len; } /* * See if we have Graphics Output Protocol */ static efi_status_t setup_gop(struct screen_info *si, efi_guid_t *proto, unsigned long size) { struct efi_graphics_output_protocol *gop, *first_gop; struct efi_pixel_bitmask pixel_info; unsigned long nr_gops; efi_status_t status; void **gop_handle; u16 width, height; u32 fb_base, fb_size; u32 pixels_per_scan_line; int pixel_format; int i; status = efi_call_phys3(sys_table->boottime->allocate_pool, EFI_LOADER_DATA, size, &gop_handle); if (status != EFI_SUCCESS) return status; status = efi_call_phys5(sys_table->boottime->locate_handle, EFI_LOCATE_BY_PROTOCOL, proto, NULL, &size, gop_handle); if (status != EFI_SUCCESS) goto free_handle; first_gop = NULL; nr_gops = size / sizeof(void *); for (i = 0; i < nr_gops; i++) { struct efi_graphics_output_mode_info *info; efi_guid_t pciio_proto = EFI_PCI_IO_PROTOCOL_GUID; void *pciio; void *h = gop_handle[i]; status = efi_call_phys3(sys_table->boottime->handle_protocol, h, proto, &gop); if (status != EFI_SUCCESS) continue; efi_call_phys3(sys_table->boottime->handle_protocol, h, &pciio_proto, &pciio); status = efi_call_phys4(gop->query_mode, gop, gop->mode->mode, &size, &info); if (status == EFI_SUCCESS && (!first_gop || pciio)) { /* * Apple provide GOPs that are not backed by * real hardware (they're used to handle * multiple displays). The workaround is to * search for a GOP implementing the PCIIO * protocol, and if one isn't found, to just * fallback to the first GOP. */ width = info->horizontal_resolution; height = info->vertical_resolution; fb_base = gop->mode->frame_buffer_base; fb_size = gop->mode->frame_buffer_size; pixel_format = info->pixel_format; pixel_info = info->pixel_information; pixels_per_scan_line = info->pixels_per_scan_line; /* * Once we've found a GOP supporting PCIIO, * don't bother looking any further. */ if (pciio) break; first_gop = gop; } } /* Did we find any GOPs? */ if (!first_gop) goto free_handle; /* EFI framebuffer */ si->orig_video_isVGA = VIDEO_TYPE_EFI; si->lfb_width = width; si->lfb_height = height; si->lfb_base = fb_base; si->lfb_size = fb_size; si->pages = 1; if (pixel_format == PIXEL_RGB_RESERVED_8BIT_PER_COLOR) { si->lfb_depth = 32; si->lfb_linelength = pixels_per_scan_line * 4; si->red_size = 8; si->red_pos = 0; si->green_size = 8; si->green_pos = 8; si->blue_size = 8; si->blue_pos = 16; si->rsvd_size = 8; si->rsvd_pos = 24; } else if (pixel_format == PIXEL_BGR_RESERVED_8BIT_PER_COLOR) { si->lfb_depth = 32; si->lfb_linelength = pixels_per_scan_line * 4; si->red_size = 8; si->red_pos = 16; si->green_size = 8; si->green_pos = 8; si->blue_size = 8; si->blue_pos = 0; si->rsvd_size = 8; si->rsvd_pos = 24; } else if (pixel_format == PIXEL_BIT_MASK) { find_bits(pixel_info.red_mask, &si->red_pos, &si->red_size); find_bits(pixel_info.green_mask, &si->green_pos, &si->green_size); find_bits(pixel_info.blue_mask, &si->blue_pos, &si->blue_size); find_bits(pixel_info.reserved_mask, &si->rsvd_pos, &si->rsvd_size); si->lfb_depth = si->red_size + si->green_size + si->blue_size + si->rsvd_size; si->lfb_linelength = (pixels_per_scan_line * si->lfb_depth) / 8; } else { si->lfb_depth = 4; si->lfb_linelength = si->lfb_width / 2; si->red_size = 0; si->red_pos = 0; si->green_size = 0; si->green_pos = 0; si->blue_size = 0; si->blue_pos = 0; si->rsvd_size = 0; si->rsvd_pos = 0; } free_handle: efi_call_phys1(sys_table->boottime->free_pool, gop_handle); return status; } /* * See if we have Universal Graphics Adapter (UGA) protocol */ static efi_status_t setup_uga(struct screen_info *si, efi_guid_t *uga_proto, unsigned long size) { struct efi_uga_draw_protocol *uga, *first_uga; unsigned long nr_ugas; efi_status_t status; u32 width, height; void **uga_handle = NULL; int i; status = efi_call_phys3(sys_table->boottime->allocate_pool, EFI_LOADER_DATA, size, &uga_handle); if (status != EFI_SUCCESS) return status; status = efi_call_phys5(sys_table->boottime->locate_handle, EFI_LOCATE_BY_PROTOCOL, uga_proto, NULL, &size, uga_handle); if (status != EFI_SUCCESS) goto free_handle; first_uga = NULL; nr_ugas = size / sizeof(void *); for (i = 0; i < nr_ugas; i++) { efi_guid_t pciio_proto = EFI_PCI_IO_PROTOCOL_GUID; void *handle = uga_handle[i]; u32 w, h, depth, refresh; void *pciio; status = efi_call_phys3(sys_table->boottime->handle_protocol, handle, uga_proto, &uga); if (status != EFI_SUCCESS) continue; efi_call_phys3(sys_table->boottime->handle_protocol, handle, &pciio_proto, &pciio); status = efi_call_phys5(uga->get_mode, uga, &w, &h, &depth, &refresh); if (status == EFI_SUCCESS && (!first_uga || pciio)) { width = w; height = h; /* * Once we've found a UGA supporting PCIIO, * don't bother looking any further. */ if (pciio) break; first_uga = uga; } } if (!first_uga) goto free_handle; /* EFI framebuffer */ si->orig_video_isVGA = VIDEO_TYPE_EFI; si->lfb_depth = 32; si->lfb_width = width; si->lfb_height = height; si->red_size = 8; si->red_pos = 16; si->green_size = 8; si->green_pos = 8; si->blue_size = 8; si->blue_pos = 0; si->rsvd_size = 8; si->rsvd_pos = 24; free_handle: efi_call_phys1(sys_table->boottime->free_pool, uga_handle); return status; } void setup_graphics(struct boot_params *boot_params) { efi_guid_t graphics_proto = EFI_GRAPHICS_OUTPUT_PROTOCOL_GUID; struct screen_info *si; efi_guid_t uga_proto = EFI_UGA_PROTOCOL_GUID; efi_status_t status; unsigned long size; void **gop_handle = NULL; void **uga_handle = NULL; si = &boot_params->screen_info; memset(si, 0, sizeof(*si)); size = 0; status = efi_call_phys5(sys_table->boottime->locate_handle, EFI_LOCATE_BY_PROTOCOL, &graphics_proto, NULL, &size, gop_handle); if (status == EFI_BUFFER_TOO_SMALL) status = setup_gop(si, &graphics_proto, size); if (status != EFI_SUCCESS) { size = 0; status = efi_call_phys5(sys_table->boottime->locate_handle, EFI_LOCATE_BY_PROTOCOL, &uga_proto, NULL, &size, uga_handle); if (status == EFI_BUFFER_TOO_SMALL) setup_uga(si, &uga_proto, size); } } struct initrd { efi_file_handle_t *handle; u64 size; }; /* * Check the cmdline for a LILO-style initrd= arguments. * * We only support loading an initrd from the same filesystem as the * kernel image. */ static efi_status_t handle_ramdisks(efi_loaded_image_t *image, struct setup_header *hdr) { struct initrd *initrds; unsigned long initrd_addr; efi_guid_t fs_proto = EFI_FILE_SYSTEM_GUID; u64 initrd_total; efi_file_io_interface_t *io; efi_file_handle_t *fh; efi_status_t status; int nr_initrds; char *str; int i, j, k; initrd_addr = 0; initrd_total = 0; str = (char *)(unsigned long)hdr->cmd_line_ptr; j = 0; /* See close_handles */ if (!str || !*str) return EFI_SUCCESS; for (nr_initrds = 0; *str; nr_initrds++) { str = strstr(str, "initrd="); if (!str) break; str += 7; /* Skip any leading slashes */ while (*str == '/' || *str == '\\') str++; while (*str && *str != ' ' && *str != '\n') str++; } if (!nr_initrds) return EFI_SUCCESS; status = efi_call_phys3(sys_table->boottime->allocate_pool, EFI_LOADER_DATA, nr_initrds * sizeof(*initrds), &initrds); if (status != EFI_SUCCESS) goto fail; str = (char *)(unsigned long)hdr->cmd_line_ptr; for (i = 0; i < nr_initrds; i++) { struct initrd *initrd; efi_file_handle_t *h; efi_file_info_t *info; efi_char16_t filename_16[256]; unsigned long info_sz; efi_guid_t info_guid = EFI_FILE_INFO_ID; efi_char16_t *p; u64 file_sz; str = strstr(str, "initrd="); if (!str) break; str += 7; initrd = &initrds[i]; p = filename_16; /* Skip any leading slashes */ while (*str == '/' || *str == '\\') str++; while (*str && *str != ' ' && *str != '\n') { if ((u8 *)p >= (u8 *)filename_16 + sizeof(filename_16)) break; *p++ = *str++; } *p = '\0'; /* Only open the volume once. */ if (!i) { efi_boot_services_t *boottime; boottime = sys_table->boottime; status = efi_call_phys3(boottime->handle_protocol, image->device_handle, &fs_proto, &io); if (status != EFI_SUCCESS) goto free_initrds; status = efi_call_phys2(io->open_volume, io, &fh); if (status != EFI_SUCCESS) goto free_initrds; } status = efi_call_phys5(fh->open, fh, &h, filename_16, EFI_FILE_MODE_READ, (u64)0); if (status != EFI_SUCCESS) goto close_handles; initrd->handle = h; info_sz = 0; status = efi_call_phys4(h->get_info, h, &info_guid, &info_sz, NULL); if (status != EFI_BUFFER_TOO_SMALL) goto close_handles; grow: status = efi_call_phys3(sys_table->boottime->allocate_pool, EFI_LOADER_DATA, info_sz, &info); if (status != EFI_SUCCESS) goto close_handles; status = efi_call_phys4(h->get_info, h, &info_guid, &info_sz, info); if (status == EFI_BUFFER_TOO_SMALL) { efi_call_phys1(sys_table->boottime->free_pool, info); goto grow; } file_sz = info->file_size; efi_call_phys1(sys_table->boottime->free_pool, info); if (status != EFI_SUCCESS) goto close_handles; initrd->size = file_sz; initrd_total += file_sz; } if (initrd_total) { unsigned long addr; /* * Multiple initrd's need to be at consecutive * addresses in memory, so allocate enough memory for * all the initrd's. */ status = high_alloc(initrd_total, 0x1000, &initrd_addr, hdr->initrd_addr_max); if (status != EFI_SUCCESS) goto close_handles; /* We've run out of free low memory. */ if (initrd_addr > hdr->initrd_addr_max) { status = EFI_INVALID_PARAMETER; goto free_initrd_total; } addr = initrd_addr; for (j = 0; j < nr_initrds; j++) { u64 size; size = initrds[j].size; while (size) { u64 chunksize; if (size > EFI_READ_CHUNK_SIZE) chunksize = EFI_READ_CHUNK_SIZE; else chunksize = size; status = efi_call_phys3(fh->read, initrds[j].handle, &chunksize, addr); if (status != EFI_SUCCESS) goto free_initrd_total; addr += chunksize; size -= chunksize; } efi_call_phys1(fh->close, initrds[j].handle); } } efi_call_phys1(sys_table->boottime->free_pool, initrds); hdr->ramdisk_image = initrd_addr; hdr->ramdisk_size = initrd_total; return status; free_initrd_total: low_free(initrd_total, initrd_addr); close_handles: for (k = j; k < nr_initrds; k++) efi_call_phys1(fh->close, initrds[k].handle); free_initrds: efi_call_phys1(sys_table->boottime->free_pool, initrds); fail: hdr->ramdisk_image = 0; hdr->ramdisk_size = 0; return status; } /* * Because the x86 boot code expects to be passed a boot_params we * need to create one ourselves (usually the bootloader would create * one for us). */ static efi_status_t make_boot_params(struct boot_params *boot_params, efi_loaded_image_t *image, void *handle) { struct efi_info *efi = &boot_params->efi_info; struct apm_bios_info *bi = &boot_params->apm_bios_info; struct sys_desc_table *sdt = &boot_params->sys_desc_table; struct e820entry *e820_map = &boot_params->e820_map[0]; struct e820entry *prev = NULL; struct setup_header *hdr = &boot_params->hdr; unsigned long size, key, desc_size, _size; efi_memory_desc_t *mem_map; void *options = image->load_options; u32 load_options_size = image->load_options_size / 2; /* ASCII */ int options_size = 0; efi_status_t status; __u32 desc_version; unsigned long cmdline; u8 nr_entries; u16 *s2; u8 *s1; int i; hdr->type_of_loader = 0x21; /* Convert unicode cmdline to ascii */ cmdline = 0; s2 = (u16 *)options; if (s2) { while (*s2 && *s2 != '\n' && options_size < load_options_size) { s2++; options_size++; } if (options_size) { if (options_size > hdr->cmdline_size) options_size = hdr->cmdline_size; options_size++; /* NUL termination */ status = low_alloc(options_size, 1, &cmdline); if (status != EFI_SUCCESS) goto fail; s1 = (u8 *)(unsigned long)cmdline; s2 = (u16 *)options; for (i = 0; i < options_size - 1; i++) *s1++ = *s2++; *s1 = '\0'; } } hdr->cmd_line_ptr = cmdline; hdr->ramdisk_image = 0; hdr->ramdisk_size = 0; status = handle_ramdisks(image, hdr); if (status != EFI_SUCCESS) goto free_cmdline; setup_graphics(boot_params); /* Clear APM BIOS info */ memset(bi, 0, sizeof(*bi)); memset(sdt, 0, sizeof(*sdt)); memcpy(&efi->efi_loader_signature, EFI_LOADER_SIGNATURE, sizeof(__u32)); size = sizeof(*mem_map) * 32; again: size += sizeof(*mem_map); _size = size; status = low_alloc(size, 1, (unsigned long *)&mem_map); if (status != EFI_SUCCESS) goto free_cmdline; status = efi_call_phys5(sys_table->boottime->get_memory_map, &size, mem_map, &key, &desc_size, &desc_version); if (status == EFI_BUFFER_TOO_SMALL) { low_free(_size, (unsigned long)mem_map); goto again; } if (status != EFI_SUCCESS) goto free_mem_map; efi->efi_systab = (unsigned long)sys_table; efi->efi_memdesc_size = desc_size; efi->efi_memdesc_version = desc_version; efi->efi_memmap = (unsigned long)mem_map; efi->efi_memmap_size = size; #ifdef CONFIG_X86_64 efi->efi_systab_hi = (unsigned long)sys_table >> 32; efi->efi_memmap_hi = (unsigned long)mem_map >> 32; #endif /* Might as well exit boot services now */ status = efi_call_phys2(sys_table->boottime->exit_boot_services, handle, key); if (status != EFI_SUCCESS) goto free_mem_map; /* Historic? */ boot_params->alt_mem_k = 32 * 1024; /* * Convert the EFI memory map to E820. */ nr_entries = 0; for (i = 0; i < size / desc_size; i++) { efi_memory_desc_t *d; unsigned int e820_type = 0; unsigned long m = (unsigned long)mem_map; d = (efi_memory_desc_t *)(m + (i * desc_size)); switch (d->type) { case EFI_RESERVED_TYPE: case EFI_RUNTIME_SERVICES_CODE: case EFI_RUNTIME_SERVICES_DATA: case EFI_MEMORY_MAPPED_IO: case EFI_MEMORY_MAPPED_IO_PORT_SPACE: case EFI_PAL_CODE: e820_type = E820_RESERVED; break; case EFI_UNUSABLE_MEMORY: e820_type = E820_UNUSABLE; break; case EFI_ACPI_RECLAIM_MEMORY: e820_type = E820_ACPI; break; case EFI_LOADER_CODE: case EFI_LOADER_DATA: case EFI_BOOT_SERVICES_CODE: case EFI_BOOT_SERVICES_DATA: case EFI_CONVENTIONAL_MEMORY: e820_type = E820_RAM; break; case EFI_ACPI_MEMORY_NVS: e820_type = E820_NVS; break; default: continue; } /* Merge adjacent mappings */ if (prev && prev->type == e820_type && (prev->addr + prev->size) == d->phys_addr) prev->size += d->num_pages << 12; else { e820_map->addr = d->phys_addr; e820_map->size = d->num_pages << 12; e820_map->type = e820_type; prev = e820_map++; nr_entries++; } } boot_params->e820_entries = nr_entries; return EFI_SUCCESS; free_mem_map: low_free(_size, (unsigned long)mem_map); free_cmdline: if (options_size) low_free(options_size, hdr->cmd_line_ptr); fail: return status; } /* * On success we return a pointer to a boot_params structure, and NULL * on failure. */ struct boot_params *efi_main(void *handle, efi_system_table_t *_table) { struct boot_params *boot_params; unsigned long start, nr_pages; struct desc_ptr *gdt, *idt; efi_loaded_image_t *image; struct setup_header *hdr; efi_status_t status; efi_guid_t proto = LOADED_IMAGE_PROTOCOL_GUID; struct desc_struct *desc; sys_table = _table; /* Check if we were booted by the EFI firmware */ if (sys_table->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE) goto fail; status = efi_call_phys3(sys_table->boottime->handle_protocol, handle, &proto, (void *)&image); if (status != EFI_SUCCESS) goto fail; status = low_alloc(0x4000, 1, (unsigned long *)&boot_params); if (status != EFI_SUCCESS) goto fail; memset(boot_params, 0x0, 0x4000); /* Copy first two sectors to boot_params */ memcpy(boot_params, image->image_base, 1024); hdr = &boot_params->hdr; /* * The EFI firmware loader could have placed the kernel image * anywhere in memory, but the kernel has various restrictions * on the max physical address it can run at. Attempt to move * the kernel to boot_params.pref_address, or as low as * possible. */ start = hdr->pref_address; nr_pages = round_up(hdr->init_size, EFI_PAGE_SIZE) / EFI_PAGE_SIZE; status = efi_call_phys4(sys_table->boottime->allocate_pages, EFI_ALLOCATE_ADDRESS, EFI_LOADER_DATA, nr_pages, &start); if (status != EFI_SUCCESS) { status = low_alloc(hdr->init_size, hdr->kernel_alignment, &start); if (status != EFI_SUCCESS) goto fail; } hdr->code32_start = (__u32)start; hdr->pref_address = (__u64)(unsigned long)image->image_base; memcpy((void *)start, image->image_base, image->image_size); status = efi_call_phys3(sys_table->boottime->allocate_pool, EFI_LOADER_DATA, sizeof(*gdt), (void **)&gdt); if (status != EFI_SUCCESS) goto fail; gdt->size = 0x800; status = low_alloc(gdt->size, 8, (unsigned long *)&gdt->address); if (status != EFI_SUCCESS) goto fail; status = efi_call_phys3(sys_table->boottime->allocate_pool, EFI_LOADER_DATA, sizeof(*idt), (void **)&idt); if (status != EFI_SUCCESS) goto fail; idt->size = 0; idt->address = 0; status = make_boot_params(boot_params, image, handle); if (status != EFI_SUCCESS) goto fail; memset((char *)gdt->address, 0x0, gdt->size); desc = (struct desc_struct *)gdt->address; /* The first GDT is a dummy and the second is unused. */ desc += 2; desc->limit0 = 0xffff; desc->base0 = 0x0000; desc->base1 = 0x0000; desc->type = SEG_TYPE_CODE | SEG_TYPE_EXEC_READ; desc->s = DESC_TYPE_CODE_DATA; desc->dpl = 0; desc->p = 1; desc->limit = 0xf; desc->avl = 0; desc->l = 0; desc->d = SEG_OP_SIZE_32BIT; desc->g = SEG_GRANULARITY_4KB; desc->base2 = 0x00; desc++; desc->limit0 = 0xffff; desc->base0 = 0x0000; desc->base1 = 0x0000; desc->type = SEG_TYPE_DATA | SEG_TYPE_READ_WRITE; desc->s = DESC_TYPE_CODE_DATA; desc->dpl = 0; desc->p = 1; desc->limit = 0xf; desc->avl = 0; desc->l = 0; desc->d = SEG_OP_SIZE_32BIT; desc->g = SEG_GRANULARITY_4KB; desc->base2 = 0x00; #ifdef CONFIG_X86_64 /* Task segment value */ desc++; desc->limit0 = 0x0000; desc->base0 = 0x0000; desc->base1 = 0x0000; desc->type = SEG_TYPE_TSS; desc->s = 0; desc->dpl = 0; desc->p = 1; desc->limit = 0x0; desc->avl = 0; desc->l = 0; desc->d = 0; desc->g = SEG_GRANULARITY_4KB; desc->base2 = 0x00; #endif /* CONFIG_X86_64 */ asm volatile ("lidt %0" : : "m" (*idt)); asm volatile ("lgdt %0" : : "m" (*gdt)); asm volatile("cli"); return boot_params; fail: return NULL; }
gpl-2.0
heijingjie/fs_linux_3.10.58
drivers/infiniband/hw/mlx4/qp.c
1774
81309
/* * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved. * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/log2.h> #include <linux/slab.h> #include <linux/netdevice.h> #include <rdma/ib_cache.h> #include <rdma/ib_pack.h> #include <rdma/ib_addr.h> #include <rdma/ib_mad.h> #include <linux/mlx4/qp.h> #include "mlx4_ib.h" #include "user.h" enum { MLX4_IB_ACK_REQ_FREQ = 8, }; enum { MLX4_IB_DEFAULT_SCHED_QUEUE = 0x83, MLX4_IB_DEFAULT_QP0_SCHED_QUEUE = 0x3f, MLX4_IB_LINK_TYPE_IB = 0, MLX4_IB_LINK_TYPE_ETH = 1 }; enum { /* * Largest possible UD header: send with GRH and immediate * data plus 18 bytes for an Ethernet header with VLAN/802.1Q * tag. (LRH would only use 8 bytes, so Ethernet is the * biggest case) */ MLX4_IB_UD_HEADER_SIZE = 82, MLX4_IB_LSO_HEADER_SPARE = 128, }; enum { MLX4_IB_IBOE_ETHERTYPE = 0x8915 }; struct mlx4_ib_sqp { struct mlx4_ib_qp qp; int pkey_index; u32 qkey; u32 send_psn; struct ib_ud_header ud_header; u8 header_buf[MLX4_IB_UD_HEADER_SIZE]; }; enum { MLX4_IB_MIN_SQ_STRIDE = 6, MLX4_IB_CACHE_LINE_SIZE = 64, }; enum { MLX4_RAW_QP_MTU = 7, MLX4_RAW_QP_MSGMAX = 31, }; static const __be32 mlx4_ib_opcode[] = { [IB_WR_SEND] = cpu_to_be32(MLX4_OPCODE_SEND), [IB_WR_LSO] = cpu_to_be32(MLX4_OPCODE_LSO), [IB_WR_SEND_WITH_IMM] = cpu_to_be32(MLX4_OPCODE_SEND_IMM), [IB_WR_RDMA_WRITE] = cpu_to_be32(MLX4_OPCODE_RDMA_WRITE), [IB_WR_RDMA_WRITE_WITH_IMM] = cpu_to_be32(MLX4_OPCODE_RDMA_WRITE_IMM), [IB_WR_RDMA_READ] = cpu_to_be32(MLX4_OPCODE_RDMA_READ), [IB_WR_ATOMIC_CMP_AND_SWP] = cpu_to_be32(MLX4_OPCODE_ATOMIC_CS), [IB_WR_ATOMIC_FETCH_AND_ADD] = cpu_to_be32(MLX4_OPCODE_ATOMIC_FA), [IB_WR_SEND_WITH_INV] = cpu_to_be32(MLX4_OPCODE_SEND_INVAL), [IB_WR_LOCAL_INV] = cpu_to_be32(MLX4_OPCODE_LOCAL_INVAL), [IB_WR_FAST_REG_MR] = cpu_to_be32(MLX4_OPCODE_FMR), [IB_WR_MASKED_ATOMIC_CMP_AND_SWP] = cpu_to_be32(MLX4_OPCODE_MASKED_ATOMIC_CS), [IB_WR_MASKED_ATOMIC_FETCH_AND_ADD] = cpu_to_be32(MLX4_OPCODE_MASKED_ATOMIC_FA), [IB_WR_BIND_MW] = cpu_to_be32(MLX4_OPCODE_BIND_MW), }; static struct mlx4_ib_sqp *to_msqp(struct mlx4_ib_qp *mqp) { return container_of(mqp, struct mlx4_ib_sqp, qp); } static int is_tunnel_qp(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) { if (!mlx4_is_master(dev->dev)) return 0; return qp->mqp.qpn >= dev->dev->phys_caps.base_tunnel_sqpn && qp->mqp.qpn < dev->dev->phys_caps.base_tunnel_sqpn + 8 * MLX4_MFUNC_MAX; } static int is_sqp(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) { int proxy_sqp = 0; int real_sqp = 0; int i; /* PPF or Native -- real SQP */ real_sqp = ((mlx4_is_master(dev->dev) || !mlx4_is_mfunc(dev->dev)) && qp->mqp.qpn >= dev->dev->phys_caps.base_sqpn && qp->mqp.qpn <= dev->dev->phys_caps.base_sqpn + 3); if (real_sqp) return 1; /* VF or PF -- proxy SQP */ if (mlx4_is_mfunc(dev->dev)) { for (i = 0; i < dev->dev->caps.num_ports; i++) { if (qp->mqp.qpn == dev->dev->caps.qp0_proxy[i] || qp->mqp.qpn == dev->dev->caps.qp1_proxy[i]) { proxy_sqp = 1; break; } } } return proxy_sqp; } /* used for INIT/CLOSE port logic */ static int is_qp0(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) { int proxy_qp0 = 0; int real_qp0 = 0; int i; /* PPF or Native -- real QP0 */ real_qp0 = ((mlx4_is_master(dev->dev) || !mlx4_is_mfunc(dev->dev)) && qp->mqp.qpn >= dev->dev->phys_caps.base_sqpn && qp->mqp.qpn <= dev->dev->phys_caps.base_sqpn + 1); if (real_qp0) return 1; /* VF or PF -- proxy QP0 */ if (mlx4_is_mfunc(dev->dev)) { for (i = 0; i < dev->dev->caps.num_ports; i++) { if (qp->mqp.qpn == dev->dev->caps.qp0_proxy[i]) { proxy_qp0 = 1; break; } } } return proxy_qp0; } static void *get_wqe(struct mlx4_ib_qp *qp, int offset) { return mlx4_buf_offset(&qp->buf, offset); } static void *get_recv_wqe(struct mlx4_ib_qp *qp, int n) { return get_wqe(qp, qp->rq.offset + (n << qp->rq.wqe_shift)); } static void *get_send_wqe(struct mlx4_ib_qp *qp, int n) { return get_wqe(qp, qp->sq.offset + (n << qp->sq.wqe_shift)); } /* * Stamp a SQ WQE so that it is invalid if prefetched by marking the * first four bytes of every 64 byte chunk with * 0x7FFFFFF | (invalid_ownership_value << 31). * * When the max work request size is less than or equal to the WQE * basic block size, as an optimization, we can stamp all WQEs with * 0xffffffff, and skip the very first chunk of each WQE. */ static void stamp_send_wqe(struct mlx4_ib_qp *qp, int n, int size) { __be32 *wqe; int i; int s; int ind; void *buf; __be32 stamp; struct mlx4_wqe_ctrl_seg *ctrl; if (qp->sq_max_wqes_per_wr > 1) { s = roundup(size, 1U << qp->sq.wqe_shift); for (i = 0; i < s; i += 64) { ind = (i >> qp->sq.wqe_shift) + n; stamp = ind & qp->sq.wqe_cnt ? cpu_to_be32(0x7fffffff) : cpu_to_be32(0xffffffff); buf = get_send_wqe(qp, ind & (qp->sq.wqe_cnt - 1)); wqe = buf + (i & ((1 << qp->sq.wqe_shift) - 1)); *wqe = stamp; } } else { ctrl = buf = get_send_wqe(qp, n & (qp->sq.wqe_cnt - 1)); s = (ctrl->fence_size & 0x3f) << 4; for (i = 64; i < s; i += 64) { wqe = buf + i; *wqe = cpu_to_be32(0xffffffff); } } } static void post_nop_wqe(struct mlx4_ib_qp *qp, int n, int size) { struct mlx4_wqe_ctrl_seg *ctrl; struct mlx4_wqe_inline_seg *inl; void *wqe; int s; ctrl = wqe = get_send_wqe(qp, n & (qp->sq.wqe_cnt - 1)); s = sizeof(struct mlx4_wqe_ctrl_seg); if (qp->ibqp.qp_type == IB_QPT_UD) { struct mlx4_wqe_datagram_seg *dgram = wqe + sizeof *ctrl; struct mlx4_av *av = (struct mlx4_av *)dgram->av; memset(dgram, 0, sizeof *dgram); av->port_pd = cpu_to_be32((qp->port << 24) | to_mpd(qp->ibqp.pd)->pdn); s += sizeof(struct mlx4_wqe_datagram_seg); } /* Pad the remainder of the WQE with an inline data segment. */ if (size > s) { inl = wqe + s; inl->byte_count = cpu_to_be32(1 << 31 | (size - s - sizeof *inl)); } ctrl->srcrb_flags = 0; ctrl->fence_size = size / 16; /* * Make sure descriptor is fully written before setting ownership bit * (because HW can start executing as soon as we do). */ wmb(); ctrl->owner_opcode = cpu_to_be32(MLX4_OPCODE_NOP | MLX4_WQE_CTRL_NEC) | (n & qp->sq.wqe_cnt ? cpu_to_be32(1 << 31) : 0); stamp_send_wqe(qp, n + qp->sq_spare_wqes, size); } /* Post NOP WQE to prevent wrap-around in the middle of WR */ static inline unsigned pad_wraparound(struct mlx4_ib_qp *qp, int ind) { unsigned s = qp->sq.wqe_cnt - (ind & (qp->sq.wqe_cnt - 1)); if (unlikely(s < qp->sq_max_wqes_per_wr)) { post_nop_wqe(qp, ind, s << qp->sq.wqe_shift); ind += s; } return ind; } static void mlx4_ib_qp_event(struct mlx4_qp *qp, enum mlx4_event type) { struct ib_event event; struct ib_qp *ibqp = &to_mibqp(qp)->ibqp; if (type == MLX4_EVENT_TYPE_PATH_MIG) to_mibqp(qp)->port = to_mibqp(qp)->alt_port; if (ibqp->event_handler) { event.device = ibqp->device; event.element.qp = ibqp; switch (type) { case MLX4_EVENT_TYPE_PATH_MIG: event.event = IB_EVENT_PATH_MIG; break; case MLX4_EVENT_TYPE_COMM_EST: event.event = IB_EVENT_COMM_EST; break; case MLX4_EVENT_TYPE_SQ_DRAINED: event.event = IB_EVENT_SQ_DRAINED; break; case MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE: event.event = IB_EVENT_QP_LAST_WQE_REACHED; break; case MLX4_EVENT_TYPE_WQ_CATAS_ERROR: event.event = IB_EVENT_QP_FATAL; break; case MLX4_EVENT_TYPE_PATH_MIG_FAILED: event.event = IB_EVENT_PATH_MIG_ERR; break; case MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR: event.event = IB_EVENT_QP_REQ_ERR; break; case MLX4_EVENT_TYPE_WQ_ACCESS_ERROR: event.event = IB_EVENT_QP_ACCESS_ERR; break; default: pr_warn("Unexpected event type %d " "on QP %06x\n", type, qp->qpn); return; } ibqp->event_handler(&event, ibqp->qp_context); } } static int send_wqe_overhead(enum mlx4_ib_qp_type type, u32 flags) { /* * UD WQEs must have a datagram segment. * RC and UC WQEs might have a remote address segment. * MLX WQEs need two extra inline data segments (for the UD * header and space for the ICRC). */ switch (type) { case MLX4_IB_QPT_UD: return sizeof (struct mlx4_wqe_ctrl_seg) + sizeof (struct mlx4_wqe_datagram_seg) + ((flags & MLX4_IB_QP_LSO) ? MLX4_IB_LSO_HEADER_SPARE : 0); case MLX4_IB_QPT_PROXY_SMI_OWNER: case MLX4_IB_QPT_PROXY_SMI: case MLX4_IB_QPT_PROXY_GSI: return sizeof (struct mlx4_wqe_ctrl_seg) + sizeof (struct mlx4_wqe_datagram_seg) + 64; case MLX4_IB_QPT_TUN_SMI_OWNER: case MLX4_IB_QPT_TUN_GSI: return sizeof (struct mlx4_wqe_ctrl_seg) + sizeof (struct mlx4_wqe_datagram_seg); case MLX4_IB_QPT_UC: return sizeof (struct mlx4_wqe_ctrl_seg) + sizeof (struct mlx4_wqe_raddr_seg); case MLX4_IB_QPT_RC: return sizeof (struct mlx4_wqe_ctrl_seg) + sizeof (struct mlx4_wqe_atomic_seg) + sizeof (struct mlx4_wqe_raddr_seg); case MLX4_IB_QPT_SMI: case MLX4_IB_QPT_GSI: return sizeof (struct mlx4_wqe_ctrl_seg) + ALIGN(MLX4_IB_UD_HEADER_SIZE + DIV_ROUND_UP(MLX4_IB_UD_HEADER_SIZE, MLX4_INLINE_ALIGN) * sizeof (struct mlx4_wqe_inline_seg), sizeof (struct mlx4_wqe_data_seg)) + ALIGN(4 + sizeof (struct mlx4_wqe_inline_seg), sizeof (struct mlx4_wqe_data_seg)); default: return sizeof (struct mlx4_wqe_ctrl_seg); } } static int set_rq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap, int is_user, int has_rq, struct mlx4_ib_qp *qp) { /* Sanity check RQ size before proceeding */ if (cap->max_recv_wr > dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE || cap->max_recv_sge > min(dev->dev->caps.max_sq_sg, dev->dev->caps.max_rq_sg)) return -EINVAL; if (!has_rq) { if (cap->max_recv_wr) return -EINVAL; qp->rq.wqe_cnt = qp->rq.max_gs = 0; } else { /* HW requires >= 1 RQ entry with >= 1 gather entry */ if (is_user && (!cap->max_recv_wr || !cap->max_recv_sge)) return -EINVAL; qp->rq.wqe_cnt = roundup_pow_of_two(max(1U, cap->max_recv_wr)); qp->rq.max_gs = roundup_pow_of_two(max(1U, cap->max_recv_sge)); qp->rq.wqe_shift = ilog2(qp->rq.max_gs * sizeof (struct mlx4_wqe_data_seg)); } /* leave userspace return values as they were, so as not to break ABI */ if (is_user) { cap->max_recv_wr = qp->rq.max_post = qp->rq.wqe_cnt; cap->max_recv_sge = qp->rq.max_gs; } else { cap->max_recv_wr = qp->rq.max_post = min(dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE, qp->rq.wqe_cnt); cap->max_recv_sge = min(qp->rq.max_gs, min(dev->dev->caps.max_sq_sg, dev->dev->caps.max_rq_sg)); } return 0; } static int set_kernel_sq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap, enum mlx4_ib_qp_type type, struct mlx4_ib_qp *qp) { int s; /* Sanity check SQ size before proceeding */ if (cap->max_send_wr > (dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE) || cap->max_send_sge > min(dev->dev->caps.max_sq_sg, dev->dev->caps.max_rq_sg) || cap->max_inline_data + send_wqe_overhead(type, qp->flags) + sizeof (struct mlx4_wqe_inline_seg) > dev->dev->caps.max_sq_desc_sz) return -EINVAL; /* * For MLX transport we need 2 extra S/G entries: * one for the header and one for the checksum at the end */ if ((type == MLX4_IB_QPT_SMI || type == MLX4_IB_QPT_GSI || type & (MLX4_IB_QPT_PROXY_SMI_OWNER | MLX4_IB_QPT_TUN_SMI_OWNER)) && cap->max_send_sge + 2 > dev->dev->caps.max_sq_sg) return -EINVAL; s = max(cap->max_send_sge * sizeof (struct mlx4_wqe_data_seg), cap->max_inline_data + sizeof (struct mlx4_wqe_inline_seg)) + send_wqe_overhead(type, qp->flags); if (s > dev->dev->caps.max_sq_desc_sz) return -EINVAL; /* * Hermon supports shrinking WQEs, such that a single work * request can include multiple units of 1 << wqe_shift. This * way, work requests can differ in size, and do not have to * be a power of 2 in size, saving memory and speeding up send * WR posting. Unfortunately, if we do this then the * wqe_index field in CQEs can't be used to look up the WR ID * anymore, so we do this only if selective signaling is off. * * Further, on 32-bit platforms, we can't use vmap() to make * the QP buffer virtually contiguous. Thus we have to use * constant-sized WRs to make sure a WR is always fully within * a single page-sized chunk. * * Finally, we use NOP work requests to pad the end of the * work queue, to avoid wrap-around in the middle of WR. We * set NEC bit to avoid getting completions with error for * these NOP WRs, but since NEC is only supported starting * with firmware 2.2.232, we use constant-sized WRs for older * firmware. * * And, since MLX QPs only support SEND, we use constant-sized * WRs in this case. * * We look for the smallest value of wqe_shift such that the * resulting number of wqes does not exceed device * capabilities. * * We set WQE size to at least 64 bytes, this way stamping * invalidates each WQE. */ if (dev->dev->caps.fw_ver >= MLX4_FW_VER_WQE_CTRL_NEC && qp->sq_signal_bits && BITS_PER_LONG == 64 && type != MLX4_IB_QPT_SMI && type != MLX4_IB_QPT_GSI && !(type & (MLX4_IB_QPT_PROXY_SMI_OWNER | MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_GSI | MLX4_IB_QPT_TUN_SMI_OWNER))) qp->sq.wqe_shift = ilog2(64); else qp->sq.wqe_shift = ilog2(roundup_pow_of_two(s)); for (;;) { qp->sq_max_wqes_per_wr = DIV_ROUND_UP(s, 1U << qp->sq.wqe_shift); /* * We need to leave 2 KB + 1 WR of headroom in the SQ to * allow HW to prefetch. */ qp->sq_spare_wqes = (2048 >> qp->sq.wqe_shift) + qp->sq_max_wqes_per_wr; qp->sq.wqe_cnt = roundup_pow_of_two(cap->max_send_wr * qp->sq_max_wqes_per_wr + qp->sq_spare_wqes); if (qp->sq.wqe_cnt <= dev->dev->caps.max_wqes) break; if (qp->sq_max_wqes_per_wr <= 1) return -EINVAL; ++qp->sq.wqe_shift; } qp->sq.max_gs = (min(dev->dev->caps.max_sq_desc_sz, (qp->sq_max_wqes_per_wr << qp->sq.wqe_shift)) - send_wqe_overhead(type, qp->flags)) / sizeof (struct mlx4_wqe_data_seg); qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) + (qp->sq.wqe_cnt << qp->sq.wqe_shift); if (qp->rq.wqe_shift > qp->sq.wqe_shift) { qp->rq.offset = 0; qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift; } else { qp->rq.offset = qp->sq.wqe_cnt << qp->sq.wqe_shift; qp->sq.offset = 0; } cap->max_send_wr = qp->sq.max_post = (qp->sq.wqe_cnt - qp->sq_spare_wqes) / qp->sq_max_wqes_per_wr; cap->max_send_sge = min(qp->sq.max_gs, min(dev->dev->caps.max_sq_sg, dev->dev->caps.max_rq_sg)); /* We don't support inline sends for kernel QPs (yet) */ cap->max_inline_data = 0; return 0; } static int set_user_sq_size(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp, struct mlx4_ib_create_qp *ucmd) { /* Sanity check SQ size before proceeding */ if ((1 << ucmd->log_sq_bb_count) > dev->dev->caps.max_wqes || ucmd->log_sq_stride > ilog2(roundup_pow_of_two(dev->dev->caps.max_sq_desc_sz)) || ucmd->log_sq_stride < MLX4_IB_MIN_SQ_STRIDE) return -EINVAL; qp->sq.wqe_cnt = 1 << ucmd->log_sq_bb_count; qp->sq.wqe_shift = ucmd->log_sq_stride; qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) + (qp->sq.wqe_cnt << qp->sq.wqe_shift); return 0; } static int alloc_proxy_bufs(struct ib_device *dev, struct mlx4_ib_qp *qp) { int i; qp->sqp_proxy_rcv = kmalloc(sizeof (struct mlx4_ib_buf) * qp->rq.wqe_cnt, GFP_KERNEL); if (!qp->sqp_proxy_rcv) return -ENOMEM; for (i = 0; i < qp->rq.wqe_cnt; i++) { qp->sqp_proxy_rcv[i].addr = kmalloc(sizeof (struct mlx4_ib_proxy_sqp_hdr), GFP_KERNEL); if (!qp->sqp_proxy_rcv[i].addr) goto err; qp->sqp_proxy_rcv[i].map = ib_dma_map_single(dev, qp->sqp_proxy_rcv[i].addr, sizeof (struct mlx4_ib_proxy_sqp_hdr), DMA_FROM_DEVICE); } return 0; err: while (i > 0) { --i; ib_dma_unmap_single(dev, qp->sqp_proxy_rcv[i].map, sizeof (struct mlx4_ib_proxy_sqp_hdr), DMA_FROM_DEVICE); kfree(qp->sqp_proxy_rcv[i].addr); } kfree(qp->sqp_proxy_rcv); qp->sqp_proxy_rcv = NULL; return -ENOMEM; } static void free_proxy_bufs(struct ib_device *dev, struct mlx4_ib_qp *qp) { int i; for (i = 0; i < qp->rq.wqe_cnt; i++) { ib_dma_unmap_single(dev, qp->sqp_proxy_rcv[i].map, sizeof (struct mlx4_ib_proxy_sqp_hdr), DMA_FROM_DEVICE); kfree(qp->sqp_proxy_rcv[i].addr); } kfree(qp->sqp_proxy_rcv); } static int qp_has_rq(struct ib_qp_init_attr *attr) { if (attr->qp_type == IB_QPT_XRC_INI || attr->qp_type == IB_QPT_XRC_TGT) return 0; return !attr->srq; } static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, struct ib_qp_init_attr *init_attr, struct ib_udata *udata, int sqpn, struct mlx4_ib_qp **caller_qp) { int qpn; int err; struct mlx4_ib_sqp *sqp; struct mlx4_ib_qp *qp; enum mlx4_ib_qp_type qp_type = (enum mlx4_ib_qp_type) init_attr->qp_type; /* When tunneling special qps, we use a plain UD qp */ if (sqpn) { if (mlx4_is_mfunc(dev->dev) && (!mlx4_is_master(dev->dev) || !(init_attr->create_flags & MLX4_IB_SRIOV_SQP))) { if (init_attr->qp_type == IB_QPT_GSI) qp_type = MLX4_IB_QPT_PROXY_GSI; else if (mlx4_is_master(dev->dev)) qp_type = MLX4_IB_QPT_PROXY_SMI_OWNER; else qp_type = MLX4_IB_QPT_PROXY_SMI; } qpn = sqpn; /* add extra sg entry for tunneling */ init_attr->cap.max_recv_sge++; } else if (init_attr->create_flags & MLX4_IB_SRIOV_TUNNEL_QP) { struct mlx4_ib_qp_tunnel_init_attr *tnl_init = container_of(init_attr, struct mlx4_ib_qp_tunnel_init_attr, init_attr); if ((tnl_init->proxy_qp_type != IB_QPT_SMI && tnl_init->proxy_qp_type != IB_QPT_GSI) || !mlx4_is_master(dev->dev)) return -EINVAL; if (tnl_init->proxy_qp_type == IB_QPT_GSI) qp_type = MLX4_IB_QPT_TUN_GSI; else if (tnl_init->slave == mlx4_master_func_num(dev->dev)) qp_type = MLX4_IB_QPT_TUN_SMI_OWNER; else qp_type = MLX4_IB_QPT_TUN_SMI; /* we are definitely in the PPF here, since we are creating * tunnel QPs. base_tunnel_sqpn is therefore valid. */ qpn = dev->dev->phys_caps.base_tunnel_sqpn + 8 * tnl_init->slave + tnl_init->proxy_qp_type * 2 + tnl_init->port - 1; sqpn = qpn; } if (!*caller_qp) { if (qp_type == MLX4_IB_QPT_SMI || qp_type == MLX4_IB_QPT_GSI || (qp_type & (MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_SMI_OWNER | MLX4_IB_QPT_PROXY_GSI | MLX4_IB_QPT_TUN_SMI_OWNER))) { sqp = kzalloc(sizeof (struct mlx4_ib_sqp), GFP_KERNEL); if (!sqp) return -ENOMEM; qp = &sqp->qp; } else { qp = kzalloc(sizeof (struct mlx4_ib_qp), GFP_KERNEL); if (!qp) return -ENOMEM; } } else qp = *caller_qp; qp->mlx4_ib_qp_type = qp_type; mutex_init(&qp->mutex); spin_lock_init(&qp->sq.lock); spin_lock_init(&qp->rq.lock); INIT_LIST_HEAD(&qp->gid_list); INIT_LIST_HEAD(&qp->steering_rules); qp->state = IB_QPS_RESET; if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) qp->sq_signal_bits = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE); err = set_rq_size(dev, &init_attr->cap, !!pd->uobject, qp_has_rq(init_attr), qp); if (err) goto err; if (pd->uobject) { struct mlx4_ib_create_qp ucmd; if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) { err = -EFAULT; goto err; } qp->sq_no_prefetch = ucmd.sq_no_prefetch; err = set_user_sq_size(dev, qp, &ucmd); if (err) goto err; qp->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr, qp->buf_size, 0, 0); if (IS_ERR(qp->umem)) { err = PTR_ERR(qp->umem); goto err; } err = mlx4_mtt_init(dev->dev, ib_umem_page_count(qp->umem), ilog2(qp->umem->page_size), &qp->mtt); if (err) goto err_buf; err = mlx4_ib_umem_write_mtt(dev, &qp->mtt, qp->umem); if (err) goto err_mtt; if (qp_has_rq(init_attr)) { err = mlx4_ib_db_map_user(to_mucontext(pd->uobject->context), ucmd.db_addr, &qp->db); if (err) goto err_mtt; } } else { qp->sq_no_prefetch = 0; if (init_attr->create_flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) qp->flags |= MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK; if (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO) qp->flags |= MLX4_IB_QP_LSO; err = set_kernel_sq_size(dev, &init_attr->cap, qp_type, qp); if (err) goto err; if (qp_has_rq(init_attr)) { err = mlx4_db_alloc(dev->dev, &qp->db, 0); if (err) goto err; *qp->db.db = 0; } if (mlx4_buf_alloc(dev->dev, qp->buf_size, PAGE_SIZE * 2, &qp->buf)) { err = -ENOMEM; goto err_db; } err = mlx4_mtt_init(dev->dev, qp->buf.npages, qp->buf.page_shift, &qp->mtt); if (err) goto err_buf; err = mlx4_buf_write_mtt(dev->dev, &qp->mtt, &qp->buf); if (err) goto err_mtt; qp->sq.wrid = kmalloc(qp->sq.wqe_cnt * sizeof (u64), GFP_KERNEL); qp->rq.wrid = kmalloc(qp->rq.wqe_cnt * sizeof (u64), GFP_KERNEL); if (!qp->sq.wrid || !qp->rq.wrid) { err = -ENOMEM; goto err_wrid; } } if (sqpn) { if (qp->mlx4_ib_qp_type & (MLX4_IB_QPT_PROXY_SMI_OWNER | MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_GSI)) { if (alloc_proxy_bufs(pd->device, qp)) { err = -ENOMEM; goto err_wrid; } } } else { /* Raw packet QPNs must be aligned to 8 bits. If not, the WQE * BlueFlame setup flow wrongly causes VLAN insertion. */ if (init_attr->qp_type == IB_QPT_RAW_PACKET) err = mlx4_qp_reserve_range(dev->dev, 1, 1 << 8, &qpn); else err = mlx4_qp_reserve_range(dev->dev, 1, 1, &qpn); if (err) goto err_proxy; } err = mlx4_qp_alloc(dev->dev, qpn, &qp->mqp); if (err) goto err_qpn; if (init_attr->qp_type == IB_QPT_XRC_TGT) qp->mqp.qpn |= (1 << 23); /* * Hardware wants QPN written in big-endian order (after * shifting) for send doorbell. Precompute this value to save * a little bit when posting sends. */ qp->doorbell_qpn = swab32(qp->mqp.qpn << 8); qp->mqp.event = mlx4_ib_qp_event; if (!*caller_qp) *caller_qp = qp; return 0; err_qpn: if (!sqpn) mlx4_qp_release_range(dev->dev, qpn, 1); err_proxy: if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI) free_proxy_bufs(pd->device, qp); err_wrid: if (pd->uobject) { if (qp_has_rq(init_attr)) mlx4_ib_db_unmap_user(to_mucontext(pd->uobject->context), &qp->db); } else { kfree(qp->sq.wrid); kfree(qp->rq.wrid); } err_mtt: mlx4_mtt_cleanup(dev->dev, &qp->mtt); err_buf: if (pd->uobject) ib_umem_release(qp->umem); else mlx4_buf_free(dev->dev, qp->buf_size, &qp->buf); err_db: if (!pd->uobject && qp_has_rq(init_attr)) mlx4_db_free(dev->dev, &qp->db); err: if (!*caller_qp) kfree(qp); return err; } static enum mlx4_qp_state to_mlx4_state(enum ib_qp_state state) { switch (state) { case IB_QPS_RESET: return MLX4_QP_STATE_RST; case IB_QPS_INIT: return MLX4_QP_STATE_INIT; case IB_QPS_RTR: return MLX4_QP_STATE_RTR; case IB_QPS_RTS: return MLX4_QP_STATE_RTS; case IB_QPS_SQD: return MLX4_QP_STATE_SQD; case IB_QPS_SQE: return MLX4_QP_STATE_SQER; case IB_QPS_ERR: return MLX4_QP_STATE_ERR; default: return -1; } } static void mlx4_ib_lock_cqs(struct mlx4_ib_cq *send_cq, struct mlx4_ib_cq *recv_cq) __acquires(&send_cq->lock) __acquires(&recv_cq->lock) { if (send_cq == recv_cq) { spin_lock_irq(&send_cq->lock); __acquire(&recv_cq->lock); } else if (send_cq->mcq.cqn < recv_cq->mcq.cqn) { spin_lock_irq(&send_cq->lock); spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING); } else { spin_lock_irq(&recv_cq->lock); spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING); } } static void mlx4_ib_unlock_cqs(struct mlx4_ib_cq *send_cq, struct mlx4_ib_cq *recv_cq) __releases(&send_cq->lock) __releases(&recv_cq->lock) { if (send_cq == recv_cq) { __release(&recv_cq->lock); spin_unlock_irq(&send_cq->lock); } else if (send_cq->mcq.cqn < recv_cq->mcq.cqn) { spin_unlock(&recv_cq->lock); spin_unlock_irq(&send_cq->lock); } else { spin_unlock(&send_cq->lock); spin_unlock_irq(&recv_cq->lock); } } static void del_gid_entries(struct mlx4_ib_qp *qp) { struct mlx4_ib_gid_entry *ge, *tmp; list_for_each_entry_safe(ge, tmp, &qp->gid_list, list) { list_del(&ge->list); kfree(ge); } } static struct mlx4_ib_pd *get_pd(struct mlx4_ib_qp *qp) { if (qp->ibqp.qp_type == IB_QPT_XRC_TGT) return to_mpd(to_mxrcd(qp->ibqp.xrcd)->pd); else return to_mpd(qp->ibqp.pd); } static void get_cqs(struct mlx4_ib_qp *qp, struct mlx4_ib_cq **send_cq, struct mlx4_ib_cq **recv_cq) { switch (qp->ibqp.qp_type) { case IB_QPT_XRC_TGT: *send_cq = to_mcq(to_mxrcd(qp->ibqp.xrcd)->cq); *recv_cq = *send_cq; break; case IB_QPT_XRC_INI: *send_cq = to_mcq(qp->ibqp.send_cq); *recv_cq = *send_cq; break; default: *send_cq = to_mcq(qp->ibqp.send_cq); *recv_cq = to_mcq(qp->ibqp.recv_cq); break; } } static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp, int is_user) { struct mlx4_ib_cq *send_cq, *recv_cq; if (qp->state != IB_QPS_RESET) if (mlx4_qp_modify(dev->dev, NULL, to_mlx4_state(qp->state), MLX4_QP_STATE_RST, NULL, 0, 0, &qp->mqp)) pr_warn("modify QP %06x to RESET failed.\n", qp->mqp.qpn); get_cqs(qp, &send_cq, &recv_cq); mlx4_ib_lock_cqs(send_cq, recv_cq); if (!is_user) { __mlx4_ib_cq_clean(recv_cq, qp->mqp.qpn, qp->ibqp.srq ? to_msrq(qp->ibqp.srq): NULL); if (send_cq != recv_cq) __mlx4_ib_cq_clean(send_cq, qp->mqp.qpn, NULL); } mlx4_qp_remove(dev->dev, &qp->mqp); mlx4_ib_unlock_cqs(send_cq, recv_cq); mlx4_qp_free(dev->dev, &qp->mqp); if (!is_sqp(dev, qp) && !is_tunnel_qp(dev, qp)) mlx4_qp_release_range(dev->dev, qp->mqp.qpn, 1); mlx4_mtt_cleanup(dev->dev, &qp->mtt); if (is_user) { if (qp->rq.wqe_cnt) mlx4_ib_db_unmap_user(to_mucontext(qp->ibqp.uobject->context), &qp->db); ib_umem_release(qp->umem); } else { kfree(qp->sq.wrid); kfree(qp->rq.wrid); if (qp->mlx4_ib_qp_type & (MLX4_IB_QPT_PROXY_SMI_OWNER | MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_GSI)) free_proxy_bufs(&dev->ib_dev, qp); mlx4_buf_free(dev->dev, qp->buf_size, &qp->buf); if (qp->rq.wqe_cnt) mlx4_db_free(dev->dev, &qp->db); } del_gid_entries(qp); } static u32 get_sqp_num(struct mlx4_ib_dev *dev, struct ib_qp_init_attr *attr) { /* Native or PPF */ if (!mlx4_is_mfunc(dev->dev) || (mlx4_is_master(dev->dev) && attr->create_flags & MLX4_IB_SRIOV_SQP)) { return dev->dev->phys_caps.base_sqpn + (attr->qp_type == IB_QPT_SMI ? 0 : 2) + attr->port_num - 1; } /* PF or VF -- creating proxies */ if (attr->qp_type == IB_QPT_SMI) return dev->dev->caps.qp0_proxy[attr->port_num - 1]; else return dev->dev->caps.qp1_proxy[attr->port_num - 1]; } struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *init_attr, struct ib_udata *udata) { struct mlx4_ib_qp *qp = NULL; int err; u16 xrcdn = 0; /* * We only support LSO, vendor flag1, and multicast loopback blocking, * and only for kernel UD QPs. */ if (init_attr->create_flags & ~(MLX4_IB_QP_LSO | MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK | MLX4_IB_SRIOV_TUNNEL_QP | MLX4_IB_SRIOV_SQP)) return ERR_PTR(-EINVAL); if (init_attr->create_flags && (udata || ((init_attr->create_flags & ~MLX4_IB_SRIOV_SQP) && init_attr->qp_type != IB_QPT_UD) || ((init_attr->create_flags & MLX4_IB_SRIOV_SQP) && init_attr->qp_type > IB_QPT_GSI))) return ERR_PTR(-EINVAL); switch (init_attr->qp_type) { case IB_QPT_XRC_TGT: pd = to_mxrcd(init_attr->xrcd)->pd; xrcdn = to_mxrcd(init_attr->xrcd)->xrcdn; init_attr->send_cq = to_mxrcd(init_attr->xrcd)->cq; /* fall through */ case IB_QPT_XRC_INI: if (!(to_mdev(pd->device)->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC)) return ERR_PTR(-ENOSYS); init_attr->recv_cq = init_attr->send_cq; /* fall through */ case IB_QPT_RC: case IB_QPT_UC: case IB_QPT_RAW_PACKET: qp = kzalloc(sizeof *qp, GFP_KERNEL); if (!qp) return ERR_PTR(-ENOMEM); /* fall through */ case IB_QPT_UD: { err = create_qp_common(to_mdev(pd->device), pd, init_attr, udata, 0, &qp); if (err) return ERR_PTR(err); qp->ibqp.qp_num = qp->mqp.qpn; qp->xrcdn = xrcdn; break; } case IB_QPT_SMI: case IB_QPT_GSI: { /* Userspace is not allowed to create special QPs: */ if (udata) return ERR_PTR(-EINVAL); err = create_qp_common(to_mdev(pd->device), pd, init_attr, udata, get_sqp_num(to_mdev(pd->device), init_attr), &qp); if (err) return ERR_PTR(err); qp->port = init_attr->port_num; qp->ibqp.qp_num = init_attr->qp_type == IB_QPT_SMI ? 0 : 1; break; } default: /* Don't support raw QPs */ return ERR_PTR(-EINVAL); } return &qp->ibqp; } int mlx4_ib_destroy_qp(struct ib_qp *qp) { struct mlx4_ib_dev *dev = to_mdev(qp->device); struct mlx4_ib_qp *mqp = to_mqp(qp); struct mlx4_ib_pd *pd; if (is_qp0(dev, mqp)) mlx4_CLOSE_PORT(dev->dev, mqp->port); pd = get_pd(mqp); destroy_qp_common(dev, mqp, !!pd->ibpd.uobject); if (is_sqp(dev, mqp)) kfree(to_msqp(mqp)); else kfree(mqp); return 0; } static int to_mlx4_st(struct mlx4_ib_dev *dev, enum mlx4_ib_qp_type type) { switch (type) { case MLX4_IB_QPT_RC: return MLX4_QP_ST_RC; case MLX4_IB_QPT_UC: return MLX4_QP_ST_UC; case MLX4_IB_QPT_UD: return MLX4_QP_ST_UD; case MLX4_IB_QPT_XRC_INI: case MLX4_IB_QPT_XRC_TGT: return MLX4_QP_ST_XRC; case MLX4_IB_QPT_SMI: case MLX4_IB_QPT_GSI: case MLX4_IB_QPT_RAW_PACKET: return MLX4_QP_ST_MLX; case MLX4_IB_QPT_PROXY_SMI_OWNER: case MLX4_IB_QPT_TUN_SMI_OWNER: return (mlx4_is_mfunc(dev->dev) ? MLX4_QP_ST_MLX : -1); case MLX4_IB_QPT_PROXY_SMI: case MLX4_IB_QPT_TUN_SMI: case MLX4_IB_QPT_PROXY_GSI: case MLX4_IB_QPT_TUN_GSI: return (mlx4_is_mfunc(dev->dev) ? MLX4_QP_ST_UD : -1); default: return -1; } } static __be32 to_mlx4_access_flags(struct mlx4_ib_qp *qp, const struct ib_qp_attr *attr, int attr_mask) { u8 dest_rd_atomic; u32 access_flags; u32 hw_access_flags = 0; if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) dest_rd_atomic = attr->max_dest_rd_atomic; else dest_rd_atomic = qp->resp_depth; if (attr_mask & IB_QP_ACCESS_FLAGS) access_flags = attr->qp_access_flags; else access_flags = qp->atomic_rd_en; if (!dest_rd_atomic) access_flags &= IB_ACCESS_REMOTE_WRITE; if (access_flags & IB_ACCESS_REMOTE_READ) hw_access_flags |= MLX4_QP_BIT_RRE; if (access_flags & IB_ACCESS_REMOTE_ATOMIC) hw_access_flags |= MLX4_QP_BIT_RAE; if (access_flags & IB_ACCESS_REMOTE_WRITE) hw_access_flags |= MLX4_QP_BIT_RWE; return cpu_to_be32(hw_access_flags); } static void store_sqp_attrs(struct mlx4_ib_sqp *sqp, const struct ib_qp_attr *attr, int attr_mask) { if (attr_mask & IB_QP_PKEY_INDEX) sqp->pkey_index = attr->pkey_index; if (attr_mask & IB_QP_QKEY) sqp->qkey = attr->qkey; if (attr_mask & IB_QP_SQ_PSN) sqp->send_psn = attr->sq_psn; } static void mlx4_set_sched(struct mlx4_qp_path *path, u8 port) { path->sched_queue = (path->sched_queue & 0xbf) | ((port - 1) << 6); } static int mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah, struct mlx4_qp_path *path, u8 port) { int err; int is_eth = rdma_port_get_link_layer(&dev->ib_dev, port) == IB_LINK_LAYER_ETHERNET; u8 mac[6]; int is_mcast; u16 vlan_tag; int vidx; path->grh_mylmc = ah->src_path_bits & 0x7f; path->rlid = cpu_to_be16(ah->dlid); if (ah->static_rate) { path->static_rate = ah->static_rate + MLX4_STAT_RATE_OFFSET; while (path->static_rate > IB_RATE_2_5_GBPS + MLX4_STAT_RATE_OFFSET && !(1 << path->static_rate & dev->dev->caps.stat_rate_support)) --path->static_rate; } else path->static_rate = 0; if (ah->ah_flags & IB_AH_GRH) { if (ah->grh.sgid_index >= dev->dev->caps.gid_table_len[port]) { pr_err("sgid_index (%u) too large. max is %d\n", ah->grh.sgid_index, dev->dev->caps.gid_table_len[port] - 1); return -1; } path->grh_mylmc |= 1 << 7; path->mgid_index = ah->grh.sgid_index; path->hop_limit = ah->grh.hop_limit; path->tclass_flowlabel = cpu_to_be32((ah->grh.traffic_class << 20) | (ah->grh.flow_label)); memcpy(path->rgid, ah->grh.dgid.raw, 16); } if (is_eth) { path->sched_queue = MLX4_IB_DEFAULT_SCHED_QUEUE | ((port - 1) << 6) | ((ah->sl & 7) << 3); if (!(ah->ah_flags & IB_AH_GRH)) return -1; err = mlx4_ib_resolve_grh(dev, ah, mac, &is_mcast, port); if (err) return err; memcpy(path->dmac, mac, 6); path->ackto = MLX4_IB_LINK_TYPE_ETH; /* use index 0 into MAC table for IBoE */ path->grh_mylmc &= 0x80; vlan_tag = rdma_get_vlan_id(&dev->iboe.gid_table[port - 1][ah->grh.sgid_index]); if (vlan_tag < 0x1000) { if (mlx4_find_cached_vlan(dev->dev, port, vlan_tag, &vidx)) return -ENOENT; path->vlan_index = vidx; path->fl = 1 << 6; } } else path->sched_queue = MLX4_IB_DEFAULT_SCHED_QUEUE | ((port - 1) << 6) | ((ah->sl & 0xf) << 2); return 0; } static void update_mcg_macs(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) { struct mlx4_ib_gid_entry *ge, *tmp; list_for_each_entry_safe(ge, tmp, &qp->gid_list, list) { if (!ge->added && mlx4_ib_add_mc(dev, qp, &ge->gid)) { ge->added = 1; ge->port = qp->port; } } } static int __mlx4_ib_modify_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr, int attr_mask, enum ib_qp_state cur_state, enum ib_qp_state new_state) { struct mlx4_ib_dev *dev = to_mdev(ibqp->device); struct mlx4_ib_qp *qp = to_mqp(ibqp); struct mlx4_ib_pd *pd; struct mlx4_ib_cq *send_cq, *recv_cq; struct mlx4_qp_context *context; enum mlx4_qp_optpar optpar = 0; int sqd_event; int err = -EINVAL; context = kzalloc(sizeof *context, GFP_KERNEL); if (!context) return -ENOMEM; context->flags = cpu_to_be32((to_mlx4_state(new_state) << 28) | (to_mlx4_st(dev, qp->mlx4_ib_qp_type) << 16)); if (!(attr_mask & IB_QP_PATH_MIG_STATE)) context->flags |= cpu_to_be32(MLX4_QP_PM_MIGRATED << 11); else { optpar |= MLX4_QP_OPTPAR_PM_STATE; switch (attr->path_mig_state) { case IB_MIG_MIGRATED: context->flags |= cpu_to_be32(MLX4_QP_PM_MIGRATED << 11); break; case IB_MIG_REARM: context->flags |= cpu_to_be32(MLX4_QP_PM_REARM << 11); break; case IB_MIG_ARMED: context->flags |= cpu_to_be32(MLX4_QP_PM_ARMED << 11); break; } } if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI) context->mtu_msgmax = (IB_MTU_4096 << 5) | 11; else if (ibqp->qp_type == IB_QPT_RAW_PACKET) context->mtu_msgmax = (MLX4_RAW_QP_MTU << 5) | MLX4_RAW_QP_MSGMAX; else if (ibqp->qp_type == IB_QPT_UD) { if (qp->flags & MLX4_IB_QP_LSO) context->mtu_msgmax = (IB_MTU_4096 << 5) | ilog2(dev->dev->caps.max_gso_sz); else context->mtu_msgmax = (IB_MTU_4096 << 5) | 12; } else if (attr_mask & IB_QP_PATH_MTU) { if (attr->path_mtu < IB_MTU_256 || attr->path_mtu > IB_MTU_4096) { pr_err("path MTU (%u) is invalid\n", attr->path_mtu); goto out; } context->mtu_msgmax = (attr->path_mtu << 5) | ilog2(dev->dev->caps.max_msg_sz); } if (qp->rq.wqe_cnt) context->rq_size_stride = ilog2(qp->rq.wqe_cnt) << 3; context->rq_size_stride |= qp->rq.wqe_shift - 4; if (qp->sq.wqe_cnt) context->sq_size_stride = ilog2(qp->sq.wqe_cnt) << 3; context->sq_size_stride |= qp->sq.wqe_shift - 4; if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) { context->sq_size_stride |= !!qp->sq_no_prefetch << 7; context->xrcd = cpu_to_be32((u32) qp->xrcdn); if (ibqp->qp_type == IB_QPT_RAW_PACKET) context->param3 |= cpu_to_be32(1 << 30); } if (qp->ibqp.uobject) context->usr_page = cpu_to_be32(to_mucontext(ibqp->uobject->context)->uar.index); else context->usr_page = cpu_to_be32(dev->priv_uar.index); if (attr_mask & IB_QP_DEST_QPN) context->remote_qpn = cpu_to_be32(attr->dest_qp_num); if (attr_mask & IB_QP_PORT) { if (cur_state == IB_QPS_SQD && new_state == IB_QPS_SQD && !(attr_mask & IB_QP_AV)) { mlx4_set_sched(&context->pri_path, attr->port_num); optpar |= MLX4_QP_OPTPAR_SCHED_QUEUE; } } if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) { if (dev->counters[qp->port - 1] != -1) { context->pri_path.counter_index = dev->counters[qp->port - 1]; optpar |= MLX4_QP_OPTPAR_COUNTER_INDEX; } else context->pri_path.counter_index = 0xff; } if (attr_mask & IB_QP_PKEY_INDEX) { if (qp->mlx4_ib_qp_type & MLX4_IB_QPT_ANY_SRIOV) context->pri_path.disable_pkey_check = 0x40; context->pri_path.pkey_index = attr->pkey_index; optpar |= MLX4_QP_OPTPAR_PKEY_INDEX; } if (attr_mask & IB_QP_AV) { if (mlx4_set_path(dev, &attr->ah_attr, &context->pri_path, attr_mask & IB_QP_PORT ? attr->port_num : qp->port)) goto out; optpar |= (MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH | MLX4_QP_OPTPAR_SCHED_QUEUE); } if (attr_mask & IB_QP_TIMEOUT) { context->pri_path.ackto |= attr->timeout << 3; optpar |= MLX4_QP_OPTPAR_ACK_TIMEOUT; } if (attr_mask & IB_QP_ALT_PATH) { if (attr->alt_port_num == 0 || attr->alt_port_num > dev->dev->caps.num_ports) goto out; if (attr->alt_pkey_index >= dev->dev->caps.pkey_table_len[attr->alt_port_num]) goto out; if (mlx4_set_path(dev, &attr->alt_ah_attr, &context->alt_path, attr->alt_port_num)) goto out; context->alt_path.pkey_index = attr->alt_pkey_index; context->alt_path.ackto = attr->alt_timeout << 3; optpar |= MLX4_QP_OPTPAR_ALT_ADDR_PATH; } pd = get_pd(qp); get_cqs(qp, &send_cq, &recv_cq); context->pd = cpu_to_be32(pd->pdn); context->cqn_send = cpu_to_be32(send_cq->mcq.cqn); context->cqn_recv = cpu_to_be32(recv_cq->mcq.cqn); context->params1 = cpu_to_be32(MLX4_IB_ACK_REQ_FREQ << 28); /* Set "fast registration enabled" for all kernel QPs */ if (!qp->ibqp.uobject) context->params1 |= cpu_to_be32(1 << 11); if (attr_mask & IB_QP_RNR_RETRY) { context->params1 |= cpu_to_be32(attr->rnr_retry << 13); optpar |= MLX4_QP_OPTPAR_RNR_RETRY; } if (attr_mask & IB_QP_RETRY_CNT) { context->params1 |= cpu_to_be32(attr->retry_cnt << 16); optpar |= MLX4_QP_OPTPAR_RETRY_COUNT; } if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) { if (attr->max_rd_atomic) context->params1 |= cpu_to_be32(fls(attr->max_rd_atomic - 1) << 21); optpar |= MLX4_QP_OPTPAR_SRA_MAX; } if (attr_mask & IB_QP_SQ_PSN) context->next_send_psn = cpu_to_be32(attr->sq_psn); if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) { if (attr->max_dest_rd_atomic) context->params2 |= cpu_to_be32(fls(attr->max_dest_rd_atomic - 1) << 21); optpar |= MLX4_QP_OPTPAR_RRA_MAX; } if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC)) { context->params2 |= to_mlx4_access_flags(qp, attr, attr_mask); optpar |= MLX4_QP_OPTPAR_RWE | MLX4_QP_OPTPAR_RRE | MLX4_QP_OPTPAR_RAE; } if (ibqp->srq) context->params2 |= cpu_to_be32(MLX4_QP_BIT_RIC); if (attr_mask & IB_QP_MIN_RNR_TIMER) { context->rnr_nextrecvpsn |= cpu_to_be32(attr->min_rnr_timer << 24); optpar |= MLX4_QP_OPTPAR_RNR_TIMEOUT; } if (attr_mask & IB_QP_RQ_PSN) context->rnr_nextrecvpsn |= cpu_to_be32(attr->rq_psn); /* proxy and tunnel qp qkeys will be changed in modify-qp wrappers */ if (attr_mask & IB_QP_QKEY) { if (qp->mlx4_ib_qp_type & (MLX4_IB_QPT_PROXY_SMI_OWNER | MLX4_IB_QPT_TUN_SMI_OWNER)) context->qkey = cpu_to_be32(IB_QP_SET_QKEY); else { if (mlx4_is_mfunc(dev->dev) && !(qp->mlx4_ib_qp_type & MLX4_IB_QPT_ANY_SRIOV) && (attr->qkey & MLX4_RESERVED_QKEY_MASK) == MLX4_RESERVED_QKEY_BASE) { pr_err("Cannot use reserved QKEY" " 0x%x (range 0xffff0000..0xffffffff" " is reserved)\n", attr->qkey); err = -EINVAL; goto out; } context->qkey = cpu_to_be32(attr->qkey); } optpar |= MLX4_QP_OPTPAR_Q_KEY; } if (ibqp->srq) context->srqn = cpu_to_be32(1 << 24 | to_msrq(ibqp->srq)->msrq.srqn); if (qp->rq.wqe_cnt && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) context->db_rec_addr = cpu_to_be64(qp->db.dma); if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR && (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI || ibqp->qp_type == IB_QPT_UD || ibqp->qp_type == IB_QPT_RAW_PACKET)) { context->pri_path.sched_queue = (qp->port - 1) << 6; if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_SMI || qp->mlx4_ib_qp_type & (MLX4_IB_QPT_PROXY_SMI_OWNER | MLX4_IB_QPT_TUN_SMI_OWNER)) { context->pri_path.sched_queue |= MLX4_IB_DEFAULT_QP0_SCHED_QUEUE; if (qp->mlx4_ib_qp_type != MLX4_IB_QPT_SMI) context->pri_path.fl = 0x80; } else { if (qp->mlx4_ib_qp_type & MLX4_IB_QPT_ANY_SRIOV) context->pri_path.fl = 0x80; context->pri_path.sched_queue |= MLX4_IB_DEFAULT_SCHED_QUEUE; } } if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET) context->pri_path.ackto = (context->pri_path.ackto & 0xf8) | MLX4_IB_LINK_TYPE_ETH; if (cur_state == IB_QPS_RTS && new_state == IB_QPS_SQD && attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY && attr->en_sqd_async_notify) sqd_event = 1; else sqd_event = 0; if (!ibqp->uobject && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) context->rlkey |= (1 << 4); /* * Before passing a kernel QP to the HW, make sure that the * ownership bits of the send queue are set and the SQ * headroom is stamped so that the hardware doesn't start * processing stale work requests. */ if (!ibqp->uobject && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) { struct mlx4_wqe_ctrl_seg *ctrl; int i; for (i = 0; i < qp->sq.wqe_cnt; ++i) { ctrl = get_send_wqe(qp, i); ctrl->owner_opcode = cpu_to_be32(1 << 31); if (qp->sq_max_wqes_per_wr == 1) ctrl->fence_size = 1 << (qp->sq.wqe_shift - 4); stamp_send_wqe(qp, i, 1 << qp->sq.wqe_shift); } } err = mlx4_qp_modify(dev->dev, &qp->mtt, to_mlx4_state(cur_state), to_mlx4_state(new_state), context, optpar, sqd_event, &qp->mqp); if (err) goto out; qp->state = new_state; if (attr_mask & IB_QP_ACCESS_FLAGS) qp->atomic_rd_en = attr->qp_access_flags; if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) qp->resp_depth = attr->max_dest_rd_atomic; if (attr_mask & IB_QP_PORT) { qp->port = attr->port_num; update_mcg_macs(dev, qp); } if (attr_mask & IB_QP_ALT_PATH) qp->alt_port = attr->alt_port_num; if (is_sqp(dev, qp)) store_sqp_attrs(to_msqp(qp), attr, attr_mask); /* * If we moved QP0 to RTR, bring the IB link up; if we moved * QP0 to RESET or ERROR, bring the link back down. */ if (is_qp0(dev, qp)) { if (cur_state != IB_QPS_RTR && new_state == IB_QPS_RTR) if (mlx4_INIT_PORT(dev->dev, qp->port)) pr_warn("INIT_PORT failed for port %d\n", qp->port); if (cur_state != IB_QPS_RESET && cur_state != IB_QPS_ERR && (new_state == IB_QPS_RESET || new_state == IB_QPS_ERR)) mlx4_CLOSE_PORT(dev->dev, qp->port); } /* * If we moved a kernel QP to RESET, clean up all old CQ * entries and reinitialize the QP. */ if (new_state == IB_QPS_RESET && !ibqp->uobject) { mlx4_ib_cq_clean(recv_cq, qp->mqp.qpn, ibqp->srq ? to_msrq(ibqp->srq): NULL); if (send_cq != recv_cq) mlx4_ib_cq_clean(send_cq, qp->mqp.qpn, NULL); qp->rq.head = 0; qp->rq.tail = 0; qp->sq.head = 0; qp->sq.tail = 0; qp->sq_next_wqe = 0; if (qp->rq.wqe_cnt) *qp->db.db = 0; } out: kfree(context); return err; } int mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, struct ib_udata *udata) { struct mlx4_ib_dev *dev = to_mdev(ibqp->device); struct mlx4_ib_qp *qp = to_mqp(ibqp); enum ib_qp_state cur_state, new_state; int err = -EINVAL; mutex_lock(&qp->mutex); cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state; new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state; if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask)) { pr_debug("qpn 0x%x: invalid attribute mask specified " "for transition %d to %d. qp_type %d," " attr_mask 0x%x\n", ibqp->qp_num, cur_state, new_state, ibqp->qp_type, attr_mask); goto out; } if ((attr_mask & IB_QP_PORT) && (attr->port_num == 0 || attr->port_num > dev->num_ports)) { pr_debug("qpn 0x%x: invalid port number (%d) specified " "for transition %d to %d. qp_type %d\n", ibqp->qp_num, attr->port_num, cur_state, new_state, ibqp->qp_type); goto out; } if ((attr_mask & IB_QP_PORT) && (ibqp->qp_type == IB_QPT_RAW_PACKET) && (rdma_port_get_link_layer(&dev->ib_dev, attr->port_num) != IB_LINK_LAYER_ETHERNET)) goto out; if (attr_mask & IB_QP_PKEY_INDEX) { int p = attr_mask & IB_QP_PORT ? attr->port_num : qp->port; if (attr->pkey_index >= dev->dev->caps.pkey_table_len[p]) { pr_debug("qpn 0x%x: invalid pkey index (%d) specified " "for transition %d to %d. qp_type %d\n", ibqp->qp_num, attr->pkey_index, cur_state, new_state, ibqp->qp_type); goto out; } } if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC && attr->max_rd_atomic > dev->dev->caps.max_qp_init_rdma) { pr_debug("qpn 0x%x: max_rd_atomic (%d) too large. " "Transition %d to %d. qp_type %d\n", ibqp->qp_num, attr->max_rd_atomic, cur_state, new_state, ibqp->qp_type); goto out; } if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC && attr->max_dest_rd_atomic > dev->dev->caps.max_qp_dest_rdma) { pr_debug("qpn 0x%x: max_dest_rd_atomic (%d) too large. " "Transition %d to %d. qp_type %d\n", ibqp->qp_num, attr->max_dest_rd_atomic, cur_state, new_state, ibqp->qp_type); goto out; } if (cur_state == new_state && cur_state == IB_QPS_RESET) { err = 0; goto out; } err = __mlx4_ib_modify_qp(ibqp, attr, attr_mask, cur_state, new_state); out: mutex_unlock(&qp->mutex); return err; } static int build_sriov_qp0_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr, void *wqe, unsigned *mlx_seg_len) { struct mlx4_ib_dev *mdev = to_mdev(sqp->qp.ibqp.device); struct ib_device *ib_dev = &mdev->ib_dev; struct mlx4_wqe_mlx_seg *mlx = wqe; struct mlx4_wqe_inline_seg *inl = wqe + sizeof *mlx; struct mlx4_ib_ah *ah = to_mah(wr->wr.ud.ah); u16 pkey; u32 qkey; int send_size; int header_size; int spc; int i; if (wr->opcode != IB_WR_SEND) return -EINVAL; send_size = 0; for (i = 0; i < wr->num_sge; ++i) send_size += wr->sg_list[i].length; /* for proxy-qp0 sends, need to add in size of tunnel header */ /* for tunnel-qp0 sends, tunnel header is already in s/g list */ if (sqp->qp.mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_SMI_OWNER) send_size += sizeof (struct mlx4_ib_tunnel_header); ib_ud_header_init(send_size, 1, 0, 0, 0, 0, &sqp->ud_header); if (sqp->qp.mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_SMI_OWNER) { sqp->ud_header.lrh.service_level = be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 28; sqp->ud_header.lrh.destination_lid = cpu_to_be16(ah->av.ib.g_slid & 0x7f); sqp->ud_header.lrh.source_lid = cpu_to_be16(ah->av.ib.g_slid & 0x7f); } mlx->flags &= cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE); /* force loopback */ mlx->flags |= cpu_to_be32(MLX4_WQE_MLX_VL15 | 0x1 | MLX4_WQE_MLX_SLR); mlx->rlid = sqp->ud_header.lrh.destination_lid; sqp->ud_header.lrh.virtual_lane = 0; sqp->ud_header.bth.solicited_event = !!(wr->send_flags & IB_SEND_SOLICITED); ib_get_cached_pkey(ib_dev, sqp->qp.port, 0, &pkey); sqp->ud_header.bth.pkey = cpu_to_be16(pkey); if (sqp->qp.mlx4_ib_qp_type == MLX4_IB_QPT_TUN_SMI_OWNER) sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->wr.ud.remote_qpn); else sqp->ud_header.bth.destination_qpn = cpu_to_be32(mdev->dev->caps.qp0_tunnel[sqp->qp.port - 1]); sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1)); if (mlx4_get_parav_qkey(mdev->dev, sqp->qp.mqp.qpn, &qkey)) return -EINVAL; sqp->ud_header.deth.qkey = cpu_to_be32(qkey); sqp->ud_header.deth.source_qpn = cpu_to_be32(sqp->qp.mqp.qpn); sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY; sqp->ud_header.immediate_present = 0; header_size = ib_ud_header_pack(&sqp->ud_header, sqp->header_buf); /* * Inline data segments may not cross a 64 byte boundary. If * our UD header is bigger than the space available up to the * next 64 byte boundary in the WQE, use two inline data * segments to hold the UD header. */ spc = MLX4_INLINE_ALIGN - ((unsigned long) (inl + 1) & (MLX4_INLINE_ALIGN - 1)); if (header_size <= spc) { inl->byte_count = cpu_to_be32(1 << 31 | header_size); memcpy(inl + 1, sqp->header_buf, header_size); i = 1; } else { inl->byte_count = cpu_to_be32(1 << 31 | spc); memcpy(inl + 1, sqp->header_buf, spc); inl = (void *) (inl + 1) + spc; memcpy(inl + 1, sqp->header_buf + spc, header_size - spc); /* * Need a barrier here to make sure all the data is * visible before the byte_count field is set. * Otherwise the HCA prefetcher could grab the 64-byte * chunk with this inline segment and get a valid (!= * 0xffffffff) byte count but stale data, and end up * generating a packet with bad headers. * * The first inline segment's byte_count field doesn't * need a barrier, because it comes after a * control/MLX segment and therefore is at an offset * of 16 mod 64. */ wmb(); inl->byte_count = cpu_to_be32(1 << 31 | (header_size - spc)); i = 2; } *mlx_seg_len = ALIGN(i * sizeof (struct mlx4_wqe_inline_seg) + header_size, 16); return 0; } static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr, void *wqe, unsigned *mlx_seg_len) { struct ib_device *ib_dev = sqp->qp.ibqp.device; struct mlx4_wqe_mlx_seg *mlx = wqe; struct mlx4_wqe_inline_seg *inl = wqe + sizeof *mlx; struct mlx4_ib_ah *ah = to_mah(wr->wr.ud.ah); struct net_device *ndev; union ib_gid sgid; u16 pkey; int send_size; int header_size; int spc; int i; int err = 0; u16 vlan = 0xffff; bool is_eth; bool is_vlan = false; bool is_grh; send_size = 0; for (i = 0; i < wr->num_sge; ++i) send_size += wr->sg_list[i].length; is_eth = rdma_port_get_link_layer(sqp->qp.ibqp.device, sqp->qp.port) == IB_LINK_LAYER_ETHERNET; is_grh = mlx4_ib_ah_grh_present(ah); if (is_eth) { if (mlx4_is_mfunc(to_mdev(ib_dev)->dev)) { /* When multi-function is enabled, the ib_core gid * indexes don't necessarily match the hw ones, so * we must use our own cache */ sgid.global.subnet_prefix = to_mdev(ib_dev)->sriov.demux[sqp->qp.port - 1]. subnet_prefix; sgid.global.interface_id = to_mdev(ib_dev)->sriov.demux[sqp->qp.port - 1]. guid_cache[ah->av.ib.gid_index]; } else { err = ib_get_cached_gid(ib_dev, be32_to_cpu(ah->av.ib.port_pd) >> 24, ah->av.ib.gid_index, &sgid); if (err) return err; } vlan = rdma_get_vlan_id(&sgid); is_vlan = vlan < 0x1000; } ib_ud_header_init(send_size, !is_eth, is_eth, is_vlan, is_grh, 0, &sqp->ud_header); if (!is_eth) { sqp->ud_header.lrh.service_level = be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 28; sqp->ud_header.lrh.destination_lid = ah->av.ib.dlid; sqp->ud_header.lrh.source_lid = cpu_to_be16(ah->av.ib.g_slid & 0x7f); } if (is_grh) { sqp->ud_header.grh.traffic_class = (be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 20) & 0xff; sqp->ud_header.grh.flow_label = ah->av.ib.sl_tclass_flowlabel & cpu_to_be32(0xfffff); sqp->ud_header.grh.hop_limit = ah->av.ib.hop_limit; if (mlx4_is_mfunc(to_mdev(ib_dev)->dev)) { /* When multi-function is enabled, the ib_core gid * indexes don't necessarily match the hw ones, so * we must use our own cache */ sqp->ud_header.grh.source_gid.global.subnet_prefix = to_mdev(ib_dev)->sriov.demux[sqp->qp.port - 1]. subnet_prefix; sqp->ud_header.grh.source_gid.global.interface_id = to_mdev(ib_dev)->sriov.demux[sqp->qp.port - 1]. guid_cache[ah->av.ib.gid_index]; } else ib_get_cached_gid(ib_dev, be32_to_cpu(ah->av.ib.port_pd) >> 24, ah->av.ib.gid_index, &sqp->ud_header.grh.source_gid); memcpy(sqp->ud_header.grh.destination_gid.raw, ah->av.ib.dgid, 16); } mlx->flags &= cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE); if (!is_eth) { mlx->flags |= cpu_to_be32((!sqp->qp.ibqp.qp_num ? MLX4_WQE_MLX_VL15 : 0) | (sqp->ud_header.lrh.destination_lid == IB_LID_PERMISSIVE ? MLX4_WQE_MLX_SLR : 0) | (sqp->ud_header.lrh.service_level << 8)); if (ah->av.ib.port_pd & cpu_to_be32(0x80000000)) mlx->flags |= cpu_to_be32(0x1); /* force loopback */ mlx->rlid = sqp->ud_header.lrh.destination_lid; } switch (wr->opcode) { case IB_WR_SEND: sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY; sqp->ud_header.immediate_present = 0; break; case IB_WR_SEND_WITH_IMM: sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE; sqp->ud_header.immediate_present = 1; sqp->ud_header.immediate_data = wr->ex.imm_data; break; default: return -EINVAL; } if (is_eth) { u8 *smac; u16 pcp = (be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 29) << 13; mlx->sched_prio = cpu_to_be16(pcp); memcpy(sqp->ud_header.eth.dmac_h, ah->av.eth.mac, 6); /* FIXME: cache smac value? */ ndev = to_mdev(sqp->qp.ibqp.device)->iboe.netdevs[sqp->qp.port - 1]; if (!ndev) return -ENODEV; smac = ndev->dev_addr; memcpy(sqp->ud_header.eth.smac_h, smac, 6); if (!memcmp(sqp->ud_header.eth.smac_h, sqp->ud_header.eth.dmac_h, 6)) mlx->flags |= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK); if (!is_vlan) { sqp->ud_header.eth.type = cpu_to_be16(MLX4_IB_IBOE_ETHERTYPE); } else { sqp->ud_header.vlan.type = cpu_to_be16(MLX4_IB_IBOE_ETHERTYPE); sqp->ud_header.vlan.tag = cpu_to_be16(vlan | pcp); } } else { sqp->ud_header.lrh.virtual_lane = !sqp->qp.ibqp.qp_num ? 15 : 0; if (sqp->ud_header.lrh.destination_lid == IB_LID_PERMISSIVE) sqp->ud_header.lrh.source_lid = IB_LID_PERMISSIVE; } sqp->ud_header.bth.solicited_event = !!(wr->send_flags & IB_SEND_SOLICITED); if (!sqp->qp.ibqp.qp_num) ib_get_cached_pkey(ib_dev, sqp->qp.port, sqp->pkey_index, &pkey); else ib_get_cached_pkey(ib_dev, sqp->qp.port, wr->wr.ud.pkey_index, &pkey); sqp->ud_header.bth.pkey = cpu_to_be16(pkey); sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->wr.ud.remote_qpn); sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1)); sqp->ud_header.deth.qkey = cpu_to_be32(wr->wr.ud.remote_qkey & 0x80000000 ? sqp->qkey : wr->wr.ud.remote_qkey); sqp->ud_header.deth.source_qpn = cpu_to_be32(sqp->qp.ibqp.qp_num); header_size = ib_ud_header_pack(&sqp->ud_header, sqp->header_buf); if (0) { pr_err("built UD header of size %d:\n", header_size); for (i = 0; i < header_size / 4; ++i) { if (i % 8 == 0) pr_err(" [%02x] ", i * 4); pr_cont(" %08x", be32_to_cpu(((__be32 *) sqp->header_buf)[i])); if ((i + 1) % 8 == 0) pr_cont("\n"); } pr_err("\n"); } /* * Inline data segments may not cross a 64 byte boundary. If * our UD header is bigger than the space available up to the * next 64 byte boundary in the WQE, use two inline data * segments to hold the UD header. */ spc = MLX4_INLINE_ALIGN - ((unsigned long) (inl + 1) & (MLX4_INLINE_ALIGN - 1)); if (header_size <= spc) { inl->byte_count = cpu_to_be32(1 << 31 | header_size); memcpy(inl + 1, sqp->header_buf, header_size); i = 1; } else { inl->byte_count = cpu_to_be32(1 << 31 | spc); memcpy(inl + 1, sqp->header_buf, spc); inl = (void *) (inl + 1) + spc; memcpy(inl + 1, sqp->header_buf + spc, header_size - spc); /* * Need a barrier here to make sure all the data is * visible before the byte_count field is set. * Otherwise the HCA prefetcher could grab the 64-byte * chunk with this inline segment and get a valid (!= * 0xffffffff) byte count but stale data, and end up * generating a packet with bad headers. * * The first inline segment's byte_count field doesn't * need a barrier, because it comes after a * control/MLX segment and therefore is at an offset * of 16 mod 64. */ wmb(); inl->byte_count = cpu_to_be32(1 << 31 | (header_size - spc)); i = 2; } *mlx_seg_len = ALIGN(i * sizeof (struct mlx4_wqe_inline_seg) + header_size, 16); return 0; } static int mlx4_wq_overflow(struct mlx4_ib_wq *wq, int nreq, struct ib_cq *ib_cq) { unsigned cur; struct mlx4_ib_cq *cq; cur = wq->head - wq->tail; if (likely(cur + nreq < wq->max_post)) return 0; cq = to_mcq(ib_cq); spin_lock(&cq->lock); cur = wq->head - wq->tail; spin_unlock(&cq->lock); return cur + nreq >= wq->max_post; } static __be32 convert_access(int acc) { return (acc & IB_ACCESS_REMOTE_ATOMIC ? cpu_to_be32(MLX4_WQE_FMR_AND_BIND_PERM_ATOMIC) : 0) | (acc & IB_ACCESS_REMOTE_WRITE ? cpu_to_be32(MLX4_WQE_FMR_AND_BIND_PERM_REMOTE_WRITE) : 0) | (acc & IB_ACCESS_REMOTE_READ ? cpu_to_be32(MLX4_WQE_FMR_AND_BIND_PERM_REMOTE_READ) : 0) | (acc & IB_ACCESS_LOCAL_WRITE ? cpu_to_be32(MLX4_WQE_FMR_PERM_LOCAL_WRITE) : 0) | cpu_to_be32(MLX4_WQE_FMR_PERM_LOCAL_READ); } static void set_fmr_seg(struct mlx4_wqe_fmr_seg *fseg, struct ib_send_wr *wr) { struct mlx4_ib_fast_reg_page_list *mfrpl = to_mfrpl(wr->wr.fast_reg.page_list); int i; for (i = 0; i < wr->wr.fast_reg.page_list_len; ++i) mfrpl->mapped_page_list[i] = cpu_to_be64(wr->wr.fast_reg.page_list->page_list[i] | MLX4_MTT_FLAG_PRESENT); fseg->flags = convert_access(wr->wr.fast_reg.access_flags); fseg->mem_key = cpu_to_be32(wr->wr.fast_reg.rkey); fseg->buf_list = cpu_to_be64(mfrpl->map); fseg->start_addr = cpu_to_be64(wr->wr.fast_reg.iova_start); fseg->reg_len = cpu_to_be64(wr->wr.fast_reg.length); fseg->offset = 0; /* XXX -- is this just for ZBVA? */ fseg->page_size = cpu_to_be32(wr->wr.fast_reg.page_shift); fseg->reserved[0] = 0; fseg->reserved[1] = 0; } static void set_bind_seg(struct mlx4_wqe_bind_seg *bseg, struct ib_send_wr *wr) { bseg->flags1 = convert_access(wr->wr.bind_mw.bind_info.mw_access_flags) & cpu_to_be32(MLX4_WQE_FMR_AND_BIND_PERM_REMOTE_READ | MLX4_WQE_FMR_AND_BIND_PERM_REMOTE_WRITE | MLX4_WQE_FMR_AND_BIND_PERM_ATOMIC); bseg->flags2 = 0; if (wr->wr.bind_mw.mw->type == IB_MW_TYPE_2) bseg->flags2 |= cpu_to_be32(MLX4_WQE_BIND_TYPE_2); if (wr->wr.bind_mw.bind_info.mw_access_flags & IB_ZERO_BASED) bseg->flags2 |= cpu_to_be32(MLX4_WQE_BIND_ZERO_BASED); bseg->new_rkey = cpu_to_be32(wr->wr.bind_mw.rkey); bseg->lkey = cpu_to_be32(wr->wr.bind_mw.bind_info.mr->lkey); bseg->addr = cpu_to_be64(wr->wr.bind_mw.bind_info.addr); bseg->length = cpu_to_be64(wr->wr.bind_mw.bind_info.length); } static void set_local_inv_seg(struct mlx4_wqe_local_inval_seg *iseg, u32 rkey) { memset(iseg, 0, sizeof(*iseg)); iseg->mem_key = cpu_to_be32(rkey); } static __always_inline void set_raddr_seg(struct mlx4_wqe_raddr_seg *rseg, u64 remote_addr, u32 rkey) { rseg->raddr = cpu_to_be64(remote_addr); rseg->rkey = cpu_to_be32(rkey); rseg->reserved = 0; } static void set_atomic_seg(struct mlx4_wqe_atomic_seg *aseg, struct ib_send_wr *wr) { if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) { aseg->swap_add = cpu_to_be64(wr->wr.atomic.swap); aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add); } else if (wr->opcode == IB_WR_MASKED_ATOMIC_FETCH_AND_ADD) { aseg->swap_add = cpu_to_be64(wr->wr.atomic.compare_add); aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add_mask); } else { aseg->swap_add = cpu_to_be64(wr->wr.atomic.compare_add); aseg->compare = 0; } } static void set_masked_atomic_seg(struct mlx4_wqe_masked_atomic_seg *aseg, struct ib_send_wr *wr) { aseg->swap_add = cpu_to_be64(wr->wr.atomic.swap); aseg->swap_add_mask = cpu_to_be64(wr->wr.atomic.swap_mask); aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add); aseg->compare_mask = cpu_to_be64(wr->wr.atomic.compare_add_mask); } static void set_datagram_seg(struct mlx4_wqe_datagram_seg *dseg, struct ib_send_wr *wr) { memcpy(dseg->av, &to_mah(wr->wr.ud.ah)->av, sizeof (struct mlx4_av)); dseg->dqpn = cpu_to_be32(wr->wr.ud.remote_qpn); dseg->qkey = cpu_to_be32(wr->wr.ud.remote_qkey); dseg->vlan = to_mah(wr->wr.ud.ah)->av.eth.vlan; memcpy(dseg->mac, to_mah(wr->wr.ud.ah)->av.eth.mac, 6); } static void set_tunnel_datagram_seg(struct mlx4_ib_dev *dev, struct mlx4_wqe_datagram_seg *dseg, struct ib_send_wr *wr, enum ib_qp_type qpt) { union mlx4_ext_av *av = &to_mah(wr->wr.ud.ah)->av; struct mlx4_av sqp_av = {0}; int port = *((u8 *) &av->ib.port_pd) & 0x3; /* force loopback */ sqp_av.port_pd = av->ib.port_pd | cpu_to_be32(0x80000000); sqp_av.g_slid = av->ib.g_slid & 0x7f; /* no GRH */ sqp_av.sl_tclass_flowlabel = av->ib.sl_tclass_flowlabel & cpu_to_be32(0xf0000000); memcpy(dseg->av, &sqp_av, sizeof (struct mlx4_av)); /* This function used only for sending on QP1 proxies */ dseg->dqpn = cpu_to_be32(dev->dev->caps.qp1_tunnel[port - 1]); /* Use QKEY from the QP context, which is set by master */ dseg->qkey = cpu_to_be32(IB_QP_SET_QKEY); } static void build_tunnel_header(struct ib_send_wr *wr, void *wqe, unsigned *mlx_seg_len) { struct mlx4_wqe_inline_seg *inl = wqe; struct mlx4_ib_tunnel_header hdr; struct mlx4_ib_ah *ah = to_mah(wr->wr.ud.ah); int spc; int i; memcpy(&hdr.av, &ah->av, sizeof hdr.av); hdr.remote_qpn = cpu_to_be32(wr->wr.ud.remote_qpn); hdr.pkey_index = cpu_to_be16(wr->wr.ud.pkey_index); hdr.qkey = cpu_to_be32(wr->wr.ud.remote_qkey); spc = MLX4_INLINE_ALIGN - ((unsigned long) (inl + 1) & (MLX4_INLINE_ALIGN - 1)); if (sizeof (hdr) <= spc) { memcpy(inl + 1, &hdr, sizeof (hdr)); wmb(); inl->byte_count = cpu_to_be32(1 << 31 | sizeof (hdr)); i = 1; } else { memcpy(inl + 1, &hdr, spc); wmb(); inl->byte_count = cpu_to_be32(1 << 31 | spc); inl = (void *) (inl + 1) + spc; memcpy(inl + 1, (void *) &hdr + spc, sizeof (hdr) - spc); wmb(); inl->byte_count = cpu_to_be32(1 << 31 | (sizeof (hdr) - spc)); i = 2; } *mlx_seg_len = ALIGN(i * sizeof (struct mlx4_wqe_inline_seg) + sizeof (hdr), 16); } static void set_mlx_icrc_seg(void *dseg) { u32 *t = dseg; struct mlx4_wqe_inline_seg *iseg = dseg; t[1] = 0; /* * Need a barrier here before writing the byte_count field to * make sure that all the data is visible before the * byte_count field is set. Otherwise, if the segment begins * a new cacheline, the HCA prefetcher could grab the 64-byte * chunk and get a valid (!= * 0xffffffff) byte count but * stale data, and end up sending the wrong data. */ wmb(); iseg->byte_count = cpu_to_be32((1 << 31) | 4); } static void set_data_seg(struct mlx4_wqe_data_seg *dseg, struct ib_sge *sg) { dseg->lkey = cpu_to_be32(sg->lkey); dseg->addr = cpu_to_be64(sg->addr); /* * Need a barrier here before writing the byte_count field to * make sure that all the data is visible before the * byte_count field is set. Otherwise, if the segment begins * a new cacheline, the HCA prefetcher could grab the 64-byte * chunk and get a valid (!= * 0xffffffff) byte count but * stale data, and end up sending the wrong data. */ wmb(); dseg->byte_count = cpu_to_be32(sg->length); } static void __set_data_seg(struct mlx4_wqe_data_seg *dseg, struct ib_sge *sg) { dseg->byte_count = cpu_to_be32(sg->length); dseg->lkey = cpu_to_be32(sg->lkey); dseg->addr = cpu_to_be64(sg->addr); } static int build_lso_seg(struct mlx4_wqe_lso_seg *wqe, struct ib_send_wr *wr, struct mlx4_ib_qp *qp, unsigned *lso_seg_len, __be32 *lso_hdr_sz, __be32 *blh) { unsigned halign = ALIGN(sizeof *wqe + wr->wr.ud.hlen, 16); if (unlikely(halign > MLX4_IB_CACHE_LINE_SIZE)) *blh = cpu_to_be32(1 << 6); if (unlikely(!(qp->flags & MLX4_IB_QP_LSO) && wr->num_sge > qp->sq.max_gs - (halign >> 4))) return -EINVAL; memcpy(wqe->header, wr->wr.ud.header, wr->wr.ud.hlen); *lso_hdr_sz = cpu_to_be32((wr->wr.ud.mss - wr->wr.ud.hlen) << 16 | wr->wr.ud.hlen); *lso_seg_len = halign; return 0; } static __be32 send_ieth(struct ib_send_wr *wr) { switch (wr->opcode) { case IB_WR_SEND_WITH_IMM: case IB_WR_RDMA_WRITE_WITH_IMM: return wr->ex.imm_data; case IB_WR_SEND_WITH_INV: return cpu_to_be32(wr->ex.invalidate_rkey); default: return 0; } } static void add_zero_len_inline(void *wqe) { struct mlx4_wqe_inline_seg *inl = wqe; memset(wqe, 0, 16); inl->byte_count = cpu_to_be32(1 << 31); } int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, struct ib_send_wr **bad_wr) { struct mlx4_ib_qp *qp = to_mqp(ibqp); void *wqe; struct mlx4_wqe_ctrl_seg *ctrl; struct mlx4_wqe_data_seg *dseg; unsigned long flags; int nreq; int err = 0; unsigned ind; int uninitialized_var(stamp); int uninitialized_var(size); unsigned uninitialized_var(seglen); __be32 dummy; __be32 *lso_wqe; __be32 uninitialized_var(lso_hdr_sz); __be32 blh; int i; spin_lock_irqsave(&qp->sq.lock, flags); ind = qp->sq_next_wqe; for (nreq = 0; wr; ++nreq, wr = wr->next) { lso_wqe = &dummy; blh = 0; if (mlx4_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) { err = -ENOMEM; *bad_wr = wr; goto out; } if (unlikely(wr->num_sge > qp->sq.max_gs)) { err = -EINVAL; *bad_wr = wr; goto out; } ctrl = wqe = get_send_wqe(qp, ind & (qp->sq.wqe_cnt - 1)); qp->sq.wrid[(qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1)] = wr->wr_id; ctrl->srcrb_flags = (wr->send_flags & IB_SEND_SIGNALED ? cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE) : 0) | (wr->send_flags & IB_SEND_SOLICITED ? cpu_to_be32(MLX4_WQE_CTRL_SOLICITED) : 0) | ((wr->send_flags & IB_SEND_IP_CSUM) ? cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM | MLX4_WQE_CTRL_TCP_UDP_CSUM) : 0) | qp->sq_signal_bits; ctrl->imm = send_ieth(wr); wqe += sizeof *ctrl; size = sizeof *ctrl / 16; switch (qp->mlx4_ib_qp_type) { case MLX4_IB_QPT_RC: case MLX4_IB_QPT_UC: switch (wr->opcode) { case IB_WR_ATOMIC_CMP_AND_SWP: case IB_WR_ATOMIC_FETCH_AND_ADD: case IB_WR_MASKED_ATOMIC_FETCH_AND_ADD: set_raddr_seg(wqe, wr->wr.atomic.remote_addr, wr->wr.atomic.rkey); wqe += sizeof (struct mlx4_wqe_raddr_seg); set_atomic_seg(wqe, wr); wqe += sizeof (struct mlx4_wqe_atomic_seg); size += (sizeof (struct mlx4_wqe_raddr_seg) + sizeof (struct mlx4_wqe_atomic_seg)) / 16; break; case IB_WR_MASKED_ATOMIC_CMP_AND_SWP: set_raddr_seg(wqe, wr->wr.atomic.remote_addr, wr->wr.atomic.rkey); wqe += sizeof (struct mlx4_wqe_raddr_seg); set_masked_atomic_seg(wqe, wr); wqe += sizeof (struct mlx4_wqe_masked_atomic_seg); size += (sizeof (struct mlx4_wqe_raddr_seg) + sizeof (struct mlx4_wqe_masked_atomic_seg)) / 16; break; case IB_WR_RDMA_READ: case IB_WR_RDMA_WRITE: case IB_WR_RDMA_WRITE_WITH_IMM: set_raddr_seg(wqe, wr->wr.rdma.remote_addr, wr->wr.rdma.rkey); wqe += sizeof (struct mlx4_wqe_raddr_seg); size += sizeof (struct mlx4_wqe_raddr_seg) / 16; break; case IB_WR_LOCAL_INV: ctrl->srcrb_flags |= cpu_to_be32(MLX4_WQE_CTRL_STRONG_ORDER); set_local_inv_seg(wqe, wr->ex.invalidate_rkey); wqe += sizeof (struct mlx4_wqe_local_inval_seg); size += sizeof (struct mlx4_wqe_local_inval_seg) / 16; break; case IB_WR_FAST_REG_MR: ctrl->srcrb_flags |= cpu_to_be32(MLX4_WQE_CTRL_STRONG_ORDER); set_fmr_seg(wqe, wr); wqe += sizeof (struct mlx4_wqe_fmr_seg); size += sizeof (struct mlx4_wqe_fmr_seg) / 16; break; case IB_WR_BIND_MW: ctrl->srcrb_flags |= cpu_to_be32(MLX4_WQE_CTRL_STRONG_ORDER); set_bind_seg(wqe, wr); wqe += sizeof(struct mlx4_wqe_bind_seg); size += sizeof(struct mlx4_wqe_bind_seg) / 16; break; default: /* No extra segments required for sends */ break; } break; case MLX4_IB_QPT_TUN_SMI_OWNER: err = build_sriov_qp0_header(to_msqp(qp), wr, ctrl, &seglen); if (unlikely(err)) { *bad_wr = wr; goto out; } wqe += seglen; size += seglen / 16; break; case MLX4_IB_QPT_TUN_SMI: case MLX4_IB_QPT_TUN_GSI: /* this is a UD qp used in MAD responses to slaves. */ set_datagram_seg(wqe, wr); /* set the forced-loopback bit in the data seg av */ *(__be32 *) wqe |= cpu_to_be32(0x80000000); wqe += sizeof (struct mlx4_wqe_datagram_seg); size += sizeof (struct mlx4_wqe_datagram_seg) / 16; break; case MLX4_IB_QPT_UD: set_datagram_seg(wqe, wr); wqe += sizeof (struct mlx4_wqe_datagram_seg); size += sizeof (struct mlx4_wqe_datagram_seg) / 16; if (wr->opcode == IB_WR_LSO) { err = build_lso_seg(wqe, wr, qp, &seglen, &lso_hdr_sz, &blh); if (unlikely(err)) { *bad_wr = wr; goto out; } lso_wqe = (__be32 *) wqe; wqe += seglen; size += seglen / 16; } break; case MLX4_IB_QPT_PROXY_SMI_OWNER: if (unlikely(!mlx4_is_master(to_mdev(ibqp->device)->dev))) { err = -ENOSYS; *bad_wr = wr; goto out; } err = build_sriov_qp0_header(to_msqp(qp), wr, ctrl, &seglen); if (unlikely(err)) { *bad_wr = wr; goto out; } wqe += seglen; size += seglen / 16; /* to start tunnel header on a cache-line boundary */ add_zero_len_inline(wqe); wqe += 16; size++; build_tunnel_header(wr, wqe, &seglen); wqe += seglen; size += seglen / 16; break; case MLX4_IB_QPT_PROXY_SMI: /* don't allow QP0 sends on guests */ err = -ENOSYS; *bad_wr = wr; goto out; case MLX4_IB_QPT_PROXY_GSI: /* If we are tunneling special qps, this is a UD qp. * In this case we first add a UD segment targeting * the tunnel qp, and then add a header with address * information */ set_tunnel_datagram_seg(to_mdev(ibqp->device), wqe, wr, ibqp->qp_type); wqe += sizeof (struct mlx4_wqe_datagram_seg); size += sizeof (struct mlx4_wqe_datagram_seg) / 16; build_tunnel_header(wr, wqe, &seglen); wqe += seglen; size += seglen / 16; break; case MLX4_IB_QPT_SMI: case MLX4_IB_QPT_GSI: err = build_mlx_header(to_msqp(qp), wr, ctrl, &seglen); if (unlikely(err)) { *bad_wr = wr; goto out; } wqe += seglen; size += seglen / 16; break; default: break; } /* * Write data segments in reverse order, so as to * overwrite cacheline stamp last within each * cacheline. This avoids issues with WQE * prefetching. */ dseg = wqe; dseg += wr->num_sge - 1; size += wr->num_sge * (sizeof (struct mlx4_wqe_data_seg) / 16); /* Add one more inline data segment for ICRC for MLX sends */ if (unlikely(qp->mlx4_ib_qp_type == MLX4_IB_QPT_SMI || qp->mlx4_ib_qp_type == MLX4_IB_QPT_GSI || qp->mlx4_ib_qp_type & (MLX4_IB_QPT_PROXY_SMI_OWNER | MLX4_IB_QPT_TUN_SMI_OWNER))) { set_mlx_icrc_seg(dseg + 1); size += sizeof (struct mlx4_wqe_data_seg) / 16; } for (i = wr->num_sge - 1; i >= 0; --i, --dseg) set_data_seg(dseg, wr->sg_list + i); /* * Possibly overwrite stamping in cacheline with LSO * segment only after making sure all data segments * are written. */ wmb(); *lso_wqe = lso_hdr_sz; ctrl->fence_size = (wr->send_flags & IB_SEND_FENCE ? MLX4_WQE_CTRL_FENCE : 0) | size; /* * Make sure descriptor is fully written before * setting ownership bit (because HW can start * executing as soon as we do). */ wmb(); if (wr->opcode < 0 || wr->opcode >= ARRAY_SIZE(mlx4_ib_opcode)) { *bad_wr = wr; err = -EINVAL; goto out; } ctrl->owner_opcode = mlx4_ib_opcode[wr->opcode] | (ind & qp->sq.wqe_cnt ? cpu_to_be32(1 << 31) : 0) | blh; stamp = ind + qp->sq_spare_wqes; ind += DIV_ROUND_UP(size * 16, 1U << qp->sq.wqe_shift); /* * We can improve latency by not stamping the last * send queue WQE until after ringing the doorbell, so * only stamp here if there are still more WQEs to post. * * Same optimization applies to padding with NOP wqe * in case of WQE shrinking (used to prevent wrap-around * in the middle of WR). */ if (wr->next) { stamp_send_wqe(qp, stamp, size * 16); ind = pad_wraparound(qp, ind); } } out: if (likely(nreq)) { qp->sq.head += nreq; /* * Make sure that descriptors are written before * doorbell record. */ wmb(); writel(qp->doorbell_qpn, to_mdev(ibqp->device)->uar_map + MLX4_SEND_DOORBELL); /* * Make sure doorbells don't leak out of SQ spinlock * and reach the HCA out of order. */ mmiowb(); stamp_send_wqe(qp, stamp, size * 16); ind = pad_wraparound(qp, ind); qp->sq_next_wqe = ind; } spin_unlock_irqrestore(&qp->sq.lock, flags); return err; } int mlx4_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, struct ib_recv_wr **bad_wr) { struct mlx4_ib_qp *qp = to_mqp(ibqp); struct mlx4_wqe_data_seg *scat; unsigned long flags; int err = 0; int nreq; int ind; int max_gs; int i; max_gs = qp->rq.max_gs; spin_lock_irqsave(&qp->rq.lock, flags); ind = qp->rq.head & (qp->rq.wqe_cnt - 1); for (nreq = 0; wr; ++nreq, wr = wr->next) { if (mlx4_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) { err = -ENOMEM; *bad_wr = wr; goto out; } if (unlikely(wr->num_sge > qp->rq.max_gs)) { err = -EINVAL; *bad_wr = wr; goto out; } scat = get_recv_wqe(qp, ind); if (qp->mlx4_ib_qp_type & (MLX4_IB_QPT_PROXY_SMI_OWNER | MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_GSI)) { ib_dma_sync_single_for_device(ibqp->device, qp->sqp_proxy_rcv[ind].map, sizeof (struct mlx4_ib_proxy_sqp_hdr), DMA_FROM_DEVICE); scat->byte_count = cpu_to_be32(sizeof (struct mlx4_ib_proxy_sqp_hdr)); /* use dma lkey from upper layer entry */ scat->lkey = cpu_to_be32(wr->sg_list->lkey); scat->addr = cpu_to_be64(qp->sqp_proxy_rcv[ind].map); scat++; max_gs--; } for (i = 0; i < wr->num_sge; ++i) __set_data_seg(scat + i, wr->sg_list + i); if (i < max_gs) { scat[i].byte_count = 0; scat[i].lkey = cpu_to_be32(MLX4_INVALID_LKEY); scat[i].addr = 0; } qp->rq.wrid[ind] = wr->wr_id; ind = (ind + 1) & (qp->rq.wqe_cnt - 1); } out: if (likely(nreq)) { qp->rq.head += nreq; /* * Make sure that descriptors are written before * doorbell record. */ wmb(); *qp->db.db = cpu_to_be32(qp->rq.head & 0xffff); } spin_unlock_irqrestore(&qp->rq.lock, flags); return err; } static inline enum ib_qp_state to_ib_qp_state(enum mlx4_qp_state mlx4_state) { switch (mlx4_state) { case MLX4_QP_STATE_RST: return IB_QPS_RESET; case MLX4_QP_STATE_INIT: return IB_QPS_INIT; case MLX4_QP_STATE_RTR: return IB_QPS_RTR; case MLX4_QP_STATE_RTS: return IB_QPS_RTS; case MLX4_QP_STATE_SQ_DRAINING: case MLX4_QP_STATE_SQD: return IB_QPS_SQD; case MLX4_QP_STATE_SQER: return IB_QPS_SQE; case MLX4_QP_STATE_ERR: return IB_QPS_ERR; default: return -1; } } static inline enum ib_mig_state to_ib_mig_state(int mlx4_mig_state) { switch (mlx4_mig_state) { case MLX4_QP_PM_ARMED: return IB_MIG_ARMED; case MLX4_QP_PM_REARM: return IB_MIG_REARM; case MLX4_QP_PM_MIGRATED: return IB_MIG_MIGRATED; default: return -1; } } static int to_ib_qp_access_flags(int mlx4_flags) { int ib_flags = 0; if (mlx4_flags & MLX4_QP_BIT_RRE) ib_flags |= IB_ACCESS_REMOTE_READ; if (mlx4_flags & MLX4_QP_BIT_RWE) ib_flags |= IB_ACCESS_REMOTE_WRITE; if (mlx4_flags & MLX4_QP_BIT_RAE) ib_flags |= IB_ACCESS_REMOTE_ATOMIC; return ib_flags; } static void to_ib_ah_attr(struct mlx4_ib_dev *ibdev, struct ib_ah_attr *ib_ah_attr, struct mlx4_qp_path *path) { struct mlx4_dev *dev = ibdev->dev; int is_eth; memset(ib_ah_attr, 0, sizeof *ib_ah_attr); ib_ah_attr->port_num = path->sched_queue & 0x40 ? 2 : 1; if (ib_ah_attr->port_num == 0 || ib_ah_attr->port_num > dev->caps.num_ports) return; is_eth = rdma_port_get_link_layer(&ibdev->ib_dev, ib_ah_attr->port_num) == IB_LINK_LAYER_ETHERNET; if (is_eth) ib_ah_attr->sl = ((path->sched_queue >> 3) & 0x7) | ((path->sched_queue & 4) << 1); else ib_ah_attr->sl = (path->sched_queue >> 2) & 0xf; ib_ah_attr->dlid = be16_to_cpu(path->rlid); ib_ah_attr->src_path_bits = path->grh_mylmc & 0x7f; ib_ah_attr->static_rate = path->static_rate ? path->static_rate - 5 : 0; ib_ah_attr->ah_flags = (path->grh_mylmc & (1 << 7)) ? IB_AH_GRH : 0; if (ib_ah_attr->ah_flags) { ib_ah_attr->grh.sgid_index = path->mgid_index; ib_ah_attr->grh.hop_limit = path->hop_limit; ib_ah_attr->grh.traffic_class = (be32_to_cpu(path->tclass_flowlabel) >> 20) & 0xff; ib_ah_attr->grh.flow_label = be32_to_cpu(path->tclass_flowlabel) & 0xfffff; memcpy(ib_ah_attr->grh.dgid.raw, path->rgid, sizeof ib_ah_attr->grh.dgid.raw); } } int mlx4_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr) { struct mlx4_ib_dev *dev = to_mdev(ibqp->device); struct mlx4_ib_qp *qp = to_mqp(ibqp); struct mlx4_qp_context context; int mlx4_state; int err = 0; mutex_lock(&qp->mutex); if (qp->state == IB_QPS_RESET) { qp_attr->qp_state = IB_QPS_RESET; goto done; } err = mlx4_qp_query(dev->dev, &qp->mqp, &context); if (err) { err = -EINVAL; goto out; } mlx4_state = be32_to_cpu(context.flags) >> 28; qp->state = to_ib_qp_state(mlx4_state); qp_attr->qp_state = qp->state; qp_attr->path_mtu = context.mtu_msgmax >> 5; qp_attr->path_mig_state = to_ib_mig_state((be32_to_cpu(context.flags) >> 11) & 0x3); qp_attr->qkey = be32_to_cpu(context.qkey); qp_attr->rq_psn = be32_to_cpu(context.rnr_nextrecvpsn) & 0xffffff; qp_attr->sq_psn = be32_to_cpu(context.next_send_psn) & 0xffffff; qp_attr->dest_qp_num = be32_to_cpu(context.remote_qpn) & 0xffffff; qp_attr->qp_access_flags = to_ib_qp_access_flags(be32_to_cpu(context.params2)); if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC) { to_ib_ah_attr(dev, &qp_attr->ah_attr, &context.pri_path); to_ib_ah_attr(dev, &qp_attr->alt_ah_attr, &context.alt_path); qp_attr->alt_pkey_index = context.alt_path.pkey_index & 0x7f; qp_attr->alt_port_num = qp_attr->alt_ah_attr.port_num; } qp_attr->pkey_index = context.pri_path.pkey_index & 0x7f; if (qp_attr->qp_state == IB_QPS_INIT) qp_attr->port_num = qp->port; else qp_attr->port_num = context.pri_path.sched_queue & 0x40 ? 2 : 1; /* qp_attr->en_sqd_async_notify is only applicable in modify qp */ qp_attr->sq_draining = mlx4_state == MLX4_QP_STATE_SQ_DRAINING; qp_attr->max_rd_atomic = 1 << ((be32_to_cpu(context.params1) >> 21) & 0x7); qp_attr->max_dest_rd_atomic = 1 << ((be32_to_cpu(context.params2) >> 21) & 0x7); qp_attr->min_rnr_timer = (be32_to_cpu(context.rnr_nextrecvpsn) >> 24) & 0x1f; qp_attr->timeout = context.pri_path.ackto >> 3; qp_attr->retry_cnt = (be32_to_cpu(context.params1) >> 16) & 0x7; qp_attr->rnr_retry = (be32_to_cpu(context.params1) >> 13) & 0x7; qp_attr->alt_timeout = context.alt_path.ackto >> 3; done: qp_attr->cur_qp_state = qp_attr->qp_state; qp_attr->cap.max_recv_wr = qp->rq.wqe_cnt; qp_attr->cap.max_recv_sge = qp->rq.max_gs; if (!ibqp->uobject) { qp_attr->cap.max_send_wr = qp->sq.wqe_cnt; qp_attr->cap.max_send_sge = qp->sq.max_gs; } else { qp_attr->cap.max_send_wr = 0; qp_attr->cap.max_send_sge = 0; } /* * We don't support inline sends for kernel QPs (yet), and we * don't know what userspace's value should be. */ qp_attr->cap.max_inline_data = 0; qp_init_attr->cap = qp_attr->cap; qp_init_attr->create_flags = 0; if (qp->flags & MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK) qp_init_attr->create_flags |= IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK; if (qp->flags & MLX4_IB_QP_LSO) qp_init_attr->create_flags |= IB_QP_CREATE_IPOIB_UD_LSO; qp_init_attr->sq_sig_type = qp->sq_signal_bits == cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE) ? IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR; out: mutex_unlock(&qp->mutex); return err; }
gpl-2.0
thenameisnigel/android_kernel_lge_ls840
drivers/leds/leds-lp5521.c
1774
21327
/* * LP5521 LED chip driver. * * Copyright (C) 2010 Nokia Corporation * * Contact: Samu Onkalo <samu.p.onkalo@nokia.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA * 02110-1301 USA */ #include <linux/module.h> #include <linux/init.h> #include <linux/i2c.h> #include <linux/mutex.h> #include <linux/gpio.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/ctype.h> #include <linux/spinlock.h> #include <linux/wait.h> #include <linux/leds.h> #include <linux/leds-lp5521.h> #include <linux/workqueue.h> #include <linux/slab.h> #define LP5521_PROGRAM_LENGTH 32 /* in bytes */ #define LP5521_MAX_LEDS 3 /* Maximum number of LEDs */ #define LP5521_MAX_ENGINES 3 /* Maximum number of engines */ #define LP5521_ENG_MASK_BASE 0x30 /* 00110000 */ #define LP5521_ENG_STATUS_MASK 0x07 /* 00000111 */ #define LP5521_CMD_LOAD 0x15 /* 00010101 */ #define LP5521_CMD_RUN 0x2a /* 00101010 */ #define LP5521_CMD_DIRECT 0x3f /* 00111111 */ #define LP5521_CMD_DISABLED 0x00 /* 00000000 */ /* Registers */ #define LP5521_REG_ENABLE 0x00 #define LP5521_REG_OP_MODE 0x01 #define LP5521_REG_R_PWM 0x02 #define LP5521_REG_G_PWM 0x03 #define LP5521_REG_B_PWM 0x04 #define LP5521_REG_R_CURRENT 0x05 #define LP5521_REG_G_CURRENT 0x06 #define LP5521_REG_B_CURRENT 0x07 #define LP5521_REG_CONFIG 0x08 #define LP5521_REG_R_CHANNEL_PC 0x09 #define LP5521_REG_G_CHANNEL_PC 0x0A #define LP5521_REG_B_CHANNEL_PC 0x0B #define LP5521_REG_STATUS 0x0C #define LP5521_REG_RESET 0x0D #define LP5521_REG_GPO 0x0E #define LP5521_REG_R_PROG_MEM 0x10 #define LP5521_REG_G_PROG_MEM 0x30 #define LP5521_REG_B_PROG_MEM 0x50 #define LP5521_PROG_MEM_BASE LP5521_REG_R_PROG_MEM #define LP5521_PROG_MEM_SIZE 0x20 /* Base register to set LED current */ #define LP5521_REG_LED_CURRENT_BASE LP5521_REG_R_CURRENT /* Base register to set the brightness */ #define LP5521_REG_LED_PWM_BASE LP5521_REG_R_PWM /* Bits in ENABLE register */ #define LP5521_MASTER_ENABLE 0x40 /* Chip master enable */ #define LP5521_LOGARITHMIC_PWM 0x80 /* Logarithmic PWM adjustment */ #define LP5521_EXEC_RUN 0x2A /* Bits in CONFIG register */ #define LP5521_PWM_HF 0x40 /* PWM: 0 = 256Hz, 1 = 558Hz */ #define LP5521_PWRSAVE_EN 0x20 /* 1 = Power save mode */ #define LP5521_CP_MODE_OFF 0 /* Charge pump (CP) off */ #define LP5521_CP_MODE_BYPASS 8 /* CP forced to bypass mode */ #define LP5521_CP_MODE_1X5 0x10 /* CP forced to 1.5x mode */ #define LP5521_CP_MODE_AUTO 0x18 /* Automatic mode selection */ #define LP5521_R_TO_BATT 4 /* R out: 0 = CP, 1 = Vbat */ #define LP5521_CLK_SRC_EXT 0 /* Ext-clk source (CLK_32K) */ #define LP5521_CLK_INT 1 /* Internal clock */ #define LP5521_CLK_AUTO 2 /* Automatic clock selection */ /* Status */ #define LP5521_EXT_CLK_USED 0x08 struct lp5521_engine { int id; u8 mode; u8 prog_page; u8 engine_mask; }; struct lp5521_led { int id; u8 chan_nr; u8 led_current; u8 max_current; struct led_classdev cdev; struct work_struct brightness_work; u8 brightness; }; struct lp5521_chip { struct lp5521_platform_data *pdata; struct mutex lock; /* Serialize control */ struct i2c_client *client; struct lp5521_engine engines[LP5521_MAX_ENGINES]; struct lp5521_led leds[LP5521_MAX_LEDS]; u8 num_channels; u8 num_leds; }; static inline struct lp5521_led *cdev_to_led(struct led_classdev *cdev) { return container_of(cdev, struct lp5521_led, cdev); } static inline struct lp5521_chip *engine_to_lp5521(struct lp5521_engine *engine) { return container_of(engine, struct lp5521_chip, engines[engine->id - 1]); } static inline struct lp5521_chip *led_to_lp5521(struct lp5521_led *led) { return container_of(led, struct lp5521_chip, leds[led->id]); } static void lp5521_led_brightness_work(struct work_struct *work); static inline int lp5521_write(struct i2c_client *client, u8 reg, u8 value) { return i2c_smbus_write_byte_data(client, reg, value); } static int lp5521_read(struct i2c_client *client, u8 reg, u8 *buf) { s32 ret; ret = i2c_smbus_read_byte_data(client, reg); if (ret < 0) return -EIO; *buf = ret; return 0; } static int lp5521_set_engine_mode(struct lp5521_engine *engine, u8 mode) { struct lp5521_chip *chip = engine_to_lp5521(engine); struct i2c_client *client = chip->client; int ret; u8 engine_state; /* Only transition between RUN and DIRECT mode are handled here */ if (mode == LP5521_CMD_LOAD) return 0; if (mode == LP5521_CMD_DISABLED) mode = LP5521_CMD_DIRECT; ret = lp5521_read(client, LP5521_REG_OP_MODE, &engine_state); /* set mode only for this engine */ engine_state &= ~(engine->engine_mask); mode &= engine->engine_mask; engine_state |= mode; ret |= lp5521_write(client, LP5521_REG_OP_MODE, engine_state); return ret; } static int lp5521_load_program(struct lp5521_engine *eng, const u8 *pattern) { struct lp5521_chip *chip = engine_to_lp5521(eng); struct i2c_client *client = chip->client; int ret; int addr; u8 mode; /* move current engine to direct mode and remember the state */ ret = lp5521_set_engine_mode(eng, LP5521_CMD_DIRECT); /* Mode change requires min 500 us delay. 1 - 2 ms with margin */ usleep_range(1000, 2000); ret |= lp5521_read(client, LP5521_REG_OP_MODE, &mode); /* For loading, all the engines to load mode */ lp5521_write(client, LP5521_REG_OP_MODE, LP5521_CMD_DIRECT); /* Mode change requires min 500 us delay. 1 - 2 ms with margin */ usleep_range(1000, 2000); lp5521_write(client, LP5521_REG_OP_MODE, LP5521_CMD_LOAD); /* Mode change requires min 500 us delay. 1 - 2 ms with margin */ usleep_range(1000, 2000); addr = LP5521_PROG_MEM_BASE + eng->prog_page * LP5521_PROG_MEM_SIZE; i2c_smbus_write_i2c_block_data(client, addr, LP5521_PROG_MEM_SIZE, pattern); ret |= lp5521_write(client, LP5521_REG_OP_MODE, mode); return ret; } static int lp5521_set_led_current(struct lp5521_chip *chip, int led, u8 curr) { return lp5521_write(chip->client, LP5521_REG_LED_CURRENT_BASE + chip->leds[led].chan_nr, curr); } static void lp5521_init_engine(struct lp5521_chip *chip) { int i; for (i = 0; i < ARRAY_SIZE(chip->engines); i++) { chip->engines[i].id = i + 1; chip->engines[i].engine_mask = LP5521_ENG_MASK_BASE >> (i * 2); chip->engines[i].prog_page = i; } } static int lp5521_configure(struct i2c_client *client) { struct lp5521_chip *chip = i2c_get_clientdata(client); int ret; lp5521_init_engine(chip); /* Set all PWMs to direct control mode */ ret = lp5521_write(client, LP5521_REG_OP_MODE, 0x3F); /* Enable auto-powersave, set charge pump to auto, red to battery */ ret |= lp5521_write(client, LP5521_REG_CONFIG, LP5521_PWRSAVE_EN | LP5521_CP_MODE_AUTO | LP5521_R_TO_BATT); /* Initialize all channels PWM to zero -> leds off */ ret |= lp5521_write(client, LP5521_REG_R_PWM, 0); ret |= lp5521_write(client, LP5521_REG_G_PWM, 0); ret |= lp5521_write(client, LP5521_REG_B_PWM, 0); /* Set engines are set to run state when OP_MODE enables engines */ ret |= lp5521_write(client, LP5521_REG_ENABLE, LP5521_MASTER_ENABLE | LP5521_LOGARITHMIC_PWM | LP5521_EXEC_RUN); /* enable takes 500us. 1 - 2 ms leaves some margin */ usleep_range(1000, 2000); return ret; } static int lp5521_run_selftest(struct lp5521_chip *chip, char *buf) { int ret; u8 status; ret = lp5521_read(chip->client, LP5521_REG_STATUS, &status); if (ret < 0) return ret; /* Check that ext clock is really in use if requested */ if (chip->pdata && chip->pdata->clock_mode == LP5521_CLOCK_EXT) if ((status & LP5521_EXT_CLK_USED) == 0) return -EIO; return 0; } static void lp5521_set_brightness(struct led_classdev *cdev, enum led_brightness brightness) { struct lp5521_led *led = cdev_to_led(cdev); led->brightness = (u8)brightness; schedule_work(&led->brightness_work); } static void lp5521_led_brightness_work(struct work_struct *work) { struct lp5521_led *led = container_of(work, struct lp5521_led, brightness_work); struct lp5521_chip *chip = led_to_lp5521(led); struct i2c_client *client = chip->client; mutex_lock(&chip->lock); lp5521_write(client, LP5521_REG_LED_PWM_BASE + led->chan_nr, led->brightness); mutex_unlock(&chip->lock); } /* Detect the chip by setting its ENABLE register and reading it back. */ static int lp5521_detect(struct i2c_client *client) { int ret; u8 buf; ret = lp5521_write(client, LP5521_REG_ENABLE, LP5521_MASTER_ENABLE | LP5521_LOGARITHMIC_PWM); if (ret) return ret; /* enable takes 500us. 1 - 2 ms leaves some margin */ usleep_range(1000, 2000); ret = lp5521_read(client, LP5521_REG_ENABLE, &buf); if (ret) return ret; if (buf != (LP5521_MASTER_ENABLE | LP5521_LOGARITHMIC_PWM)) return -ENODEV; return 0; } /* Set engine mode and create appropriate sysfs attributes, if required. */ static int lp5521_set_mode(struct lp5521_engine *engine, u8 mode) { int ret = 0; /* if in that mode already do nothing, except for run */ if (mode == engine->mode && mode != LP5521_CMD_RUN) return 0; if (mode == LP5521_CMD_RUN) { ret = lp5521_set_engine_mode(engine, LP5521_CMD_RUN); } else if (mode == LP5521_CMD_LOAD) { lp5521_set_engine_mode(engine, LP5521_CMD_DISABLED); lp5521_set_engine_mode(engine, LP5521_CMD_LOAD); } else if (mode == LP5521_CMD_DISABLED) { lp5521_set_engine_mode(engine, LP5521_CMD_DISABLED); } engine->mode = mode; return ret; } static int lp5521_do_store_load(struct lp5521_engine *engine, const char *buf, size_t len) { struct lp5521_chip *chip = engine_to_lp5521(engine); struct i2c_client *client = chip->client; int ret, nrchars, offset = 0, i = 0; char c[3]; unsigned cmd; u8 pattern[LP5521_PROGRAM_LENGTH] = {0}; while ((offset < len - 1) && (i < LP5521_PROGRAM_LENGTH)) { /* separate sscanfs because length is working only for %s */ ret = sscanf(buf + offset, "%2s%n ", c, &nrchars); if (ret != 2) goto fail; ret = sscanf(c, "%2x", &cmd); if (ret != 1) goto fail; pattern[i] = (u8)cmd; offset += nrchars; i++; } /* Each instruction is 16bit long. Check that length is even */ if (i % 2) goto fail; mutex_lock(&chip->lock); if (engine->mode == LP5521_CMD_LOAD) ret = lp5521_load_program(engine, pattern); else ret = -EINVAL; mutex_unlock(&chip->lock); if (ret) { dev_err(&client->dev, "failed loading pattern\n"); return ret; } return len; fail: dev_err(&client->dev, "wrong pattern format\n"); return -EINVAL; } static ssize_t store_engine_load(struct device *dev, struct device_attribute *attr, const char *buf, size_t len, int nr) { struct i2c_client *client = to_i2c_client(dev); struct lp5521_chip *chip = i2c_get_clientdata(client); return lp5521_do_store_load(&chip->engines[nr - 1], buf, len); } #define store_load(nr) \ static ssize_t store_engine##nr##_load(struct device *dev, \ struct device_attribute *attr, \ const char *buf, size_t len) \ { \ return store_engine_load(dev, attr, buf, len, nr); \ } store_load(1) store_load(2) store_load(3) static ssize_t show_engine_mode(struct device *dev, struct device_attribute *attr, char *buf, int nr) { struct i2c_client *client = to_i2c_client(dev); struct lp5521_chip *chip = i2c_get_clientdata(client); switch (chip->engines[nr - 1].mode) { case LP5521_CMD_RUN: return sprintf(buf, "run\n"); case LP5521_CMD_LOAD: return sprintf(buf, "load\n"); case LP5521_CMD_DISABLED: return sprintf(buf, "disabled\n"); default: return sprintf(buf, "disabled\n"); } } #define show_mode(nr) \ static ssize_t show_engine##nr##_mode(struct device *dev, \ struct device_attribute *attr, \ char *buf) \ { \ return show_engine_mode(dev, attr, buf, nr); \ } show_mode(1) show_mode(2) show_mode(3) static ssize_t store_engine_mode(struct device *dev, struct device_attribute *attr, const char *buf, size_t len, int nr) { struct i2c_client *client = to_i2c_client(dev); struct lp5521_chip *chip = i2c_get_clientdata(client); struct lp5521_engine *engine = &chip->engines[nr - 1]; mutex_lock(&chip->lock); if (!strncmp(buf, "run", 3)) lp5521_set_mode(engine, LP5521_CMD_RUN); else if (!strncmp(buf, "load", 4)) lp5521_set_mode(engine, LP5521_CMD_LOAD); else if (!strncmp(buf, "disabled", 8)) lp5521_set_mode(engine, LP5521_CMD_DISABLED); mutex_unlock(&chip->lock); return len; } #define store_mode(nr) \ static ssize_t store_engine##nr##_mode(struct device *dev, \ struct device_attribute *attr, \ const char *buf, size_t len) \ { \ return store_engine_mode(dev, attr, buf, len, nr); \ } store_mode(1) store_mode(2) store_mode(3) static ssize_t show_max_current(struct device *dev, struct device_attribute *attr, char *buf) { struct led_classdev *led_cdev = dev_get_drvdata(dev); struct lp5521_led *led = cdev_to_led(led_cdev); return sprintf(buf, "%d\n", led->max_current); } static ssize_t show_current(struct device *dev, struct device_attribute *attr, char *buf) { struct led_classdev *led_cdev = dev_get_drvdata(dev); struct lp5521_led *led = cdev_to_led(led_cdev); return sprintf(buf, "%d\n", led->led_current); } static ssize_t store_current(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct led_classdev *led_cdev = dev_get_drvdata(dev); struct lp5521_led *led = cdev_to_led(led_cdev); struct lp5521_chip *chip = led_to_lp5521(led); ssize_t ret; unsigned long curr; if (strict_strtoul(buf, 0, &curr)) return -EINVAL; if (curr > led->max_current) return -EINVAL; mutex_lock(&chip->lock); ret = lp5521_set_led_current(chip, led->id, curr); mutex_unlock(&chip->lock); if (ret < 0) return ret; led->led_current = (u8)curr; return len; } static ssize_t lp5521_selftest(struct device *dev, struct device_attribute *attr, char *buf) { struct i2c_client *client = to_i2c_client(dev); struct lp5521_chip *chip = i2c_get_clientdata(client); int ret; mutex_lock(&chip->lock); ret = lp5521_run_selftest(chip, buf); mutex_unlock(&chip->lock); return sprintf(buf, "%s\n", ret ? "FAIL" : "OK"); } /* led class device attributes */ static DEVICE_ATTR(led_current, S_IRUGO | S_IWUSR, show_current, store_current); static DEVICE_ATTR(max_current, S_IRUGO , show_max_current, NULL); static struct attribute *lp5521_led_attributes[] = { &dev_attr_led_current.attr, &dev_attr_max_current.attr, NULL, }; static struct attribute_group lp5521_led_attribute_group = { .attrs = lp5521_led_attributes }; /* device attributes */ static DEVICE_ATTR(engine1_mode, S_IRUGO | S_IWUSR, show_engine1_mode, store_engine1_mode); static DEVICE_ATTR(engine2_mode, S_IRUGO | S_IWUSR, show_engine2_mode, store_engine2_mode); static DEVICE_ATTR(engine3_mode, S_IRUGO | S_IWUSR, show_engine3_mode, store_engine3_mode); static DEVICE_ATTR(engine1_load, S_IWUSR, NULL, store_engine1_load); static DEVICE_ATTR(engine2_load, S_IWUSR, NULL, store_engine2_load); static DEVICE_ATTR(engine3_load, S_IWUSR, NULL, store_engine3_load); static DEVICE_ATTR(selftest, S_IRUGO, lp5521_selftest, NULL); static struct attribute *lp5521_attributes[] = { &dev_attr_engine1_mode.attr, &dev_attr_engine2_mode.attr, &dev_attr_engine3_mode.attr, &dev_attr_selftest.attr, &dev_attr_engine1_load.attr, &dev_attr_engine2_load.attr, &dev_attr_engine3_load.attr, NULL }; static const struct attribute_group lp5521_group = { .attrs = lp5521_attributes, }; static int lp5521_register_sysfs(struct i2c_client *client) { struct device *dev = &client->dev; return sysfs_create_group(&dev->kobj, &lp5521_group); } static void lp5521_unregister_sysfs(struct i2c_client *client) { struct lp5521_chip *chip = i2c_get_clientdata(client); struct device *dev = &client->dev; int i; sysfs_remove_group(&dev->kobj, &lp5521_group); for (i = 0; i < chip->num_leds; i++) sysfs_remove_group(&chip->leds[i].cdev.dev->kobj, &lp5521_led_attribute_group); } static int __devinit lp5521_init_led(struct lp5521_led *led, struct i2c_client *client, int chan, struct lp5521_platform_data *pdata) { struct device *dev = &client->dev; char name[32]; int res; if (chan >= LP5521_MAX_LEDS) return -EINVAL; if (pdata->led_config[chan].led_current == 0) return 0; led->led_current = pdata->led_config[chan].led_current; led->max_current = pdata->led_config[chan].max_current; led->chan_nr = pdata->led_config[chan].chan_nr; if (led->chan_nr >= LP5521_MAX_LEDS) { dev_err(dev, "Use channel numbers between 0 and %d\n", LP5521_MAX_LEDS - 1); return -EINVAL; } snprintf(name, sizeof(name), "%s:channel%d", pdata->label ?: client->name, chan); led->cdev.brightness_set = lp5521_set_brightness; led->cdev.name = name; res = led_classdev_register(dev, &led->cdev); if (res < 0) { dev_err(dev, "couldn't register led on channel %d\n", chan); return res; } res = sysfs_create_group(&led->cdev.dev->kobj, &lp5521_led_attribute_group); if (res < 0) { dev_err(dev, "couldn't register current attribute\n"); led_classdev_unregister(&led->cdev); return res; } return 0; } static int __devinit lp5521_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct lp5521_chip *chip; struct lp5521_platform_data *pdata; int ret, i, led; chip = kzalloc(sizeof(*chip), GFP_KERNEL); if (!chip) return -ENOMEM; i2c_set_clientdata(client, chip); chip->client = client; pdata = client->dev.platform_data; if (!pdata) { dev_err(&client->dev, "no platform data\n"); ret = -EINVAL; goto fail1; } mutex_init(&chip->lock); chip->pdata = pdata; if (pdata->setup_resources) { ret = pdata->setup_resources(); if (ret < 0) goto fail1; } if (pdata->enable) { pdata->enable(0); usleep_range(1000, 2000); /* Keep enable down at least 1ms */ pdata->enable(1); usleep_range(1000, 2000); /* 500us abs min. */ } lp5521_write(client, LP5521_REG_RESET, 0xff); usleep_range(10000, 20000); /* * Exact value is not available. 10 - 20ms * appears to be enough for reset. */ ret = lp5521_detect(client); if (ret) { dev_err(&client->dev, "Chip not found\n"); goto fail2; } dev_info(&client->dev, "%s programmable led chip found\n", id->name); ret = lp5521_configure(client); if (ret < 0) { dev_err(&client->dev, "error configuring chip\n"); goto fail2; } /* Initialize leds */ chip->num_channels = pdata->num_channels; chip->num_leds = 0; led = 0; for (i = 0; i < pdata->num_channels; i++) { /* Do not initialize channels that are not connected */ if (pdata->led_config[i].led_current == 0) continue; ret = lp5521_init_led(&chip->leds[led], client, i, pdata); if (ret) { dev_err(&client->dev, "error initializing leds\n"); goto fail3; } chip->num_leds++; chip->leds[led].id = led; /* Set initial LED current */ lp5521_set_led_current(chip, led, chip->leds[led].led_current); INIT_WORK(&(chip->leds[led].brightness_work), lp5521_led_brightness_work); led++; } ret = lp5521_register_sysfs(client); if (ret) { dev_err(&client->dev, "registering sysfs failed\n"); goto fail3; } return ret; fail3: for (i = 0; i < chip->num_leds; i++) { led_classdev_unregister(&chip->leds[i].cdev); cancel_work_sync(&chip->leds[i].brightness_work); } fail2: if (pdata->enable) pdata->enable(0); if (pdata->release_resources) pdata->release_resources(); fail1: kfree(chip); return ret; } static int lp5521_remove(struct i2c_client *client) { struct lp5521_chip *chip = i2c_get_clientdata(client); int i; lp5521_unregister_sysfs(client); for (i = 0; i < chip->num_leds; i++) { led_classdev_unregister(&chip->leds[i].cdev); cancel_work_sync(&chip->leds[i].brightness_work); } if (chip->pdata->enable) chip->pdata->enable(0); if (chip->pdata->release_resources) chip->pdata->release_resources(); kfree(chip); return 0; } static const struct i2c_device_id lp5521_id[] = { { "lp5521", 0 }, /* Three channel chip */ { } }; MODULE_DEVICE_TABLE(i2c, lp5521_id); static struct i2c_driver lp5521_driver = { .driver = { .name = "lp5521", }, .probe = lp5521_probe, .remove = lp5521_remove, .id_table = lp5521_id, }; static int __init lp5521_init(void) { int ret; ret = i2c_add_driver(&lp5521_driver); if (ret < 0) printk(KERN_ALERT "Adding lp5521 driver failed\n"); return ret; } static void __exit lp5521_exit(void) { i2c_del_driver(&lp5521_driver); } module_init(lp5521_init); module_exit(lp5521_exit); MODULE_AUTHOR("Mathias Nyman, Yuri Zaporozhets, Samu Onkalo"); MODULE_DESCRIPTION("LP5521 LED engine"); MODULE_LICENSE("GPL v2");
gpl-2.0
HRTKernel/Hacker_Kernel_SM-G920P
drivers/s390/scsi/zfcp_ccw.c
2286
9177
/* * zfcp device driver * * Registration and callback for the s390 common I/O layer. * * Copyright IBM Corp. 2002, 2010 */ #define KMSG_COMPONENT "zfcp" #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt #include <linux/module.h> #include "zfcp_ext.h" #include "zfcp_reqlist.h" #define ZFCP_MODEL_PRIV 0x4 static DEFINE_SPINLOCK(zfcp_ccw_adapter_ref_lock); struct zfcp_adapter *zfcp_ccw_adapter_by_cdev(struct ccw_device *cdev) { struct zfcp_adapter *adapter; unsigned long flags; spin_lock_irqsave(&zfcp_ccw_adapter_ref_lock, flags); adapter = dev_get_drvdata(&cdev->dev); if (adapter) kref_get(&adapter->ref); spin_unlock_irqrestore(&zfcp_ccw_adapter_ref_lock, flags); return adapter; } void zfcp_ccw_adapter_put(struct zfcp_adapter *adapter) { unsigned long flags; spin_lock_irqsave(&zfcp_ccw_adapter_ref_lock, flags); kref_put(&adapter->ref, zfcp_adapter_release); spin_unlock_irqrestore(&zfcp_ccw_adapter_ref_lock, flags); } /** * zfcp_ccw_activate - activate adapter and wait for it to finish * @cdev: pointer to belonging ccw device * @clear: Status flags to clear. * @tag: s390dbf trace record tag */ static int zfcp_ccw_activate(struct ccw_device *cdev, int clear, char *tag) { struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev); if (!adapter) return 0; zfcp_erp_clear_adapter_status(adapter, clear); zfcp_erp_set_adapter_status(adapter, ZFCP_STATUS_COMMON_RUNNING); zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED, tag); zfcp_erp_wait(adapter); flush_work(&adapter->scan_work); /* ok to call even if nothing queued */ zfcp_ccw_adapter_put(adapter); return 0; } static struct ccw_device_id zfcp_ccw_device_id[] = { { CCW_DEVICE_DEVTYPE(0x1731, 0x3, 0x1732, 0x3) }, { CCW_DEVICE_DEVTYPE(0x1731, 0x3, 0x1732, ZFCP_MODEL_PRIV) }, {}, }; MODULE_DEVICE_TABLE(ccw, zfcp_ccw_device_id); /** * zfcp_ccw_priv_sch - check if subchannel is privileged * @adapter: Adapter/Subchannel to check */ int zfcp_ccw_priv_sch(struct zfcp_adapter *adapter) { return adapter->ccw_device->id.dev_model == ZFCP_MODEL_PRIV; } /** * zfcp_ccw_probe - probe function of zfcp driver * @cdev: pointer to belonging ccw device * * This function gets called by the common i/o layer for each FCP * device found on the current system. This is only a stub to make cio * work: To only allocate adapter resources for devices actually used, * the allocation is deferred to the first call to ccw_set_online. */ static int zfcp_ccw_probe(struct ccw_device *cdev) { return 0; } /** * zfcp_ccw_remove - remove function of zfcp driver * @cdev: pointer to belonging ccw device * * This function gets called by the common i/o layer and removes an adapter * from the system. Task of this function is to get rid of all units and * ports that belong to this adapter. And in addition all resources of this * adapter will be freed too. */ static void zfcp_ccw_remove(struct ccw_device *cdev) { struct zfcp_adapter *adapter; struct zfcp_port *port, *p; struct zfcp_unit *unit, *u; LIST_HEAD(unit_remove_lh); LIST_HEAD(port_remove_lh); ccw_device_set_offline(cdev); adapter = zfcp_ccw_adapter_by_cdev(cdev); if (!adapter) return; write_lock_irq(&adapter->port_list_lock); list_for_each_entry_safe(port, p, &adapter->port_list, list) { write_lock(&port->unit_list_lock); list_for_each_entry_safe(unit, u, &port->unit_list, list) list_move(&unit->list, &unit_remove_lh); write_unlock(&port->unit_list_lock); list_move(&port->list, &port_remove_lh); } write_unlock_irq(&adapter->port_list_lock); zfcp_ccw_adapter_put(adapter); /* put from zfcp_ccw_adapter_by_cdev */ list_for_each_entry_safe(unit, u, &unit_remove_lh, list) zfcp_device_unregister(&unit->dev, &zfcp_sysfs_unit_attrs); list_for_each_entry_safe(port, p, &port_remove_lh, list) zfcp_device_unregister(&port->dev, &zfcp_sysfs_port_attrs); zfcp_adapter_unregister(adapter); } /** * zfcp_ccw_set_online - set_online function of zfcp driver * @cdev: pointer to belonging ccw device * * This function gets called by the common i/o layer and sets an * adapter into state online. The first call will allocate all * adapter resources that will be retained until the device is removed * via zfcp_ccw_remove. * * Setting an fcp device online means that it will be registered with * the SCSI stack, that the QDIO queues will be set up and that the * adapter will be opened. */ static int zfcp_ccw_set_online(struct ccw_device *cdev) { struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev); if (!adapter) { adapter = zfcp_adapter_enqueue(cdev); if (IS_ERR(adapter)) { dev_err(&cdev->dev, "Setting up data structures for the " "FCP adapter failed\n"); return PTR_ERR(adapter); } kref_get(&adapter->ref); } /* initialize request counter */ BUG_ON(!zfcp_reqlist_isempty(adapter->req_list)); adapter->req_no = 0; zfcp_ccw_activate(cdev, 0, "ccsonl1"); /* scan for remote ports either at the end of any successful adapter recovery or only after the adapter recovery for setting a device online */ zfcp_fc_inverse_conditional_port_scan(adapter); flush_work(&adapter->scan_work); /* ok to call even if nothing queued */ zfcp_ccw_adapter_put(adapter); return 0; } /** * zfcp_ccw_offline_sync - shut down adapter and wait for it to finish * @cdev: pointer to belonging ccw device * @set: Status flags to set. * @tag: s390dbf trace record tag * * This function gets called by the common i/o layer and sets an adapter * into state offline. */ static int zfcp_ccw_offline_sync(struct ccw_device *cdev, int set, char *tag) { struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev); if (!adapter) return 0; zfcp_erp_set_adapter_status(adapter, set); zfcp_erp_adapter_shutdown(adapter, 0, tag); zfcp_erp_wait(adapter); zfcp_ccw_adapter_put(adapter); return 0; } /** * zfcp_ccw_set_offline - set_offline function of zfcp driver * @cdev: pointer to belonging ccw device * * This function gets called by the common i/o layer and sets an adapter * into state offline. */ static int zfcp_ccw_set_offline(struct ccw_device *cdev) { return zfcp_ccw_offline_sync(cdev, 0, "ccsoff1"); } /** * zfcp_ccw_notify - ccw notify function * @cdev: pointer to belonging ccw device * @event: indicates if adapter was detached or attached * * This function gets called by the common i/o layer if an adapter has gone * or reappeared. */ static int zfcp_ccw_notify(struct ccw_device *cdev, int event) { struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev); if (!adapter) return 1; switch (event) { case CIO_GONE: if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_SUSPENDED) { /* notification ignore */ zfcp_dbf_hba_basic("ccnigo1", adapter); break; } dev_warn(&cdev->dev, "The FCP device has been detached\n"); zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti1"); break; case CIO_NO_PATH: dev_warn(&cdev->dev, "The CHPID for the FCP device is offline\n"); zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti2"); break; case CIO_OPER: if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_SUSPENDED) { /* notification ignore */ zfcp_dbf_hba_basic("ccniop1", adapter); break; } dev_info(&cdev->dev, "The FCP device is operational again\n"); zfcp_erp_set_adapter_status(adapter, ZFCP_STATUS_COMMON_RUNNING); zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED, "ccnoti4"); break; case CIO_BOXED: dev_warn(&cdev->dev, "The FCP device did not respond within " "the specified time\n"); zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti5"); break; } zfcp_ccw_adapter_put(adapter); return 1; } /** * zfcp_ccw_shutdown - handle shutdown from cio * @cdev: device for adapter to shutdown. */ static void zfcp_ccw_shutdown(struct ccw_device *cdev) { struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev); if (!adapter) return; zfcp_erp_adapter_shutdown(adapter, 0, "ccshut1"); zfcp_erp_wait(adapter); zfcp_erp_thread_kill(adapter); zfcp_ccw_adapter_put(adapter); } static int zfcp_ccw_suspend(struct ccw_device *cdev) { zfcp_ccw_offline_sync(cdev, ZFCP_STATUS_ADAPTER_SUSPENDED, "ccsusp1"); return 0; } static int zfcp_ccw_thaw(struct ccw_device *cdev) { /* trace records for thaw and final shutdown during suspend can only be found in system dump until the end of suspend but not after resume because it's based on the memory image right after the very first suspend (freeze) callback */ zfcp_ccw_activate(cdev, 0, "ccthaw1"); return 0; } static int zfcp_ccw_resume(struct ccw_device *cdev) { zfcp_ccw_activate(cdev, ZFCP_STATUS_ADAPTER_SUSPENDED, "ccresu1"); return 0; } struct ccw_driver zfcp_ccw_driver = { .driver = { .owner = THIS_MODULE, .name = "zfcp", }, .ids = zfcp_ccw_device_id, .probe = zfcp_ccw_probe, .remove = zfcp_ccw_remove, .set_online = zfcp_ccw_set_online, .set_offline = zfcp_ccw_set_offline, .notify = zfcp_ccw_notify, .shutdown = zfcp_ccw_shutdown, .freeze = zfcp_ccw_suspend, .thaw = zfcp_ccw_thaw, .restore = zfcp_ccw_resume, };
gpl-2.0
klquicksall/Galaxy-Nexus-JB
net/sched/act_nat.c
2542
7389
/* * Stateless NAT actions * * Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. */ #include <linux/errno.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/netfilter.h> #include <linux/rtnetlink.h> #include <linux/skbuff.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/string.h> #include <linux/tc_act/tc_nat.h> #include <net/act_api.h> #include <net/icmp.h> #include <net/ip.h> #include <net/netlink.h> #include <net/tc_act/tc_nat.h> #include <net/tcp.h> #include <net/udp.h> #define NAT_TAB_MASK 15 static struct tcf_common *tcf_nat_ht[NAT_TAB_MASK + 1]; static u32 nat_idx_gen; static DEFINE_RWLOCK(nat_lock); static struct tcf_hashinfo nat_hash_info = { .htab = tcf_nat_ht, .hmask = NAT_TAB_MASK, .lock = &nat_lock, }; static const struct nla_policy nat_policy[TCA_NAT_MAX + 1] = { [TCA_NAT_PARMS] = { .len = sizeof(struct tc_nat) }, }; static int tcf_nat_init(struct nlattr *nla, struct nlattr *est, struct tc_action *a, int ovr, int bind) { struct nlattr *tb[TCA_NAT_MAX + 1]; struct tc_nat *parm; int ret = 0, err; struct tcf_nat *p; struct tcf_common *pc; if (nla == NULL) return -EINVAL; err = nla_parse_nested(tb, TCA_NAT_MAX, nla, nat_policy); if (err < 0) return err; if (tb[TCA_NAT_PARMS] == NULL) return -EINVAL; parm = nla_data(tb[TCA_NAT_PARMS]); pc = tcf_hash_check(parm->index, a, bind, &nat_hash_info); if (!pc) { pc = tcf_hash_create(parm->index, est, a, sizeof(*p), bind, &nat_idx_gen, &nat_hash_info); if (IS_ERR(pc)) return PTR_ERR(pc); p = to_tcf_nat(pc); ret = ACT_P_CREATED; } else { p = to_tcf_nat(pc); if (!ovr) { tcf_hash_release(pc, bind, &nat_hash_info); return -EEXIST; } } spin_lock_bh(&p->tcf_lock); p->old_addr = parm->old_addr; p->new_addr = parm->new_addr; p->mask = parm->mask; p->flags = parm->flags; p->tcf_action = parm->action; spin_unlock_bh(&p->tcf_lock); if (ret == ACT_P_CREATED) tcf_hash_insert(pc, &nat_hash_info); return ret; } static int tcf_nat_cleanup(struct tc_action *a, int bind) { struct tcf_nat *p = a->priv; return tcf_hash_release(&p->common, bind, &nat_hash_info); } static int tcf_nat(struct sk_buff *skb, struct tc_action *a, struct tcf_result *res) { struct tcf_nat *p = a->priv; struct iphdr *iph; __be32 old_addr; __be32 new_addr; __be32 mask; __be32 addr; int egress; int action; int ihl; int noff; spin_lock(&p->tcf_lock); p->tcf_tm.lastuse = jiffies; old_addr = p->old_addr; new_addr = p->new_addr; mask = p->mask; egress = p->flags & TCA_NAT_FLAG_EGRESS; action = p->tcf_action; bstats_update(&p->tcf_bstats, skb); spin_unlock(&p->tcf_lock); if (unlikely(action == TC_ACT_SHOT)) goto drop; noff = skb_network_offset(skb); if (!pskb_may_pull(skb, sizeof(*iph) + noff)) goto drop; iph = ip_hdr(skb); if (egress) addr = iph->saddr; else addr = iph->daddr; if (!((old_addr ^ addr) & mask)) { if (skb_cloned(skb) && !skb_clone_writable(skb, sizeof(*iph) + noff) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) goto drop; new_addr &= mask; new_addr |= addr & ~mask; /* Rewrite IP header */ iph = ip_hdr(skb); if (egress) iph->saddr = new_addr; else iph->daddr = new_addr; csum_replace4(&iph->check, addr, new_addr); } else if ((iph->frag_off & htons(IP_OFFSET)) || iph->protocol != IPPROTO_ICMP) { goto out; } ihl = iph->ihl * 4; /* It would be nice to share code with stateful NAT. */ switch (iph->frag_off & htons(IP_OFFSET) ? 0 : iph->protocol) { case IPPROTO_TCP: { struct tcphdr *tcph; if (!pskb_may_pull(skb, ihl + sizeof(*tcph) + noff) || (skb_cloned(skb) && !skb_clone_writable(skb, ihl + sizeof(*tcph) + noff) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC))) goto drop; tcph = (void *)(skb_network_header(skb) + ihl); inet_proto_csum_replace4(&tcph->check, skb, addr, new_addr, 1); break; } case IPPROTO_UDP: { struct udphdr *udph; if (!pskb_may_pull(skb, ihl + sizeof(*udph) + noff) || (skb_cloned(skb) && !skb_clone_writable(skb, ihl + sizeof(*udph) + noff) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC))) goto drop; udph = (void *)(skb_network_header(skb) + ihl); if (udph->check || skb->ip_summed == CHECKSUM_PARTIAL) { inet_proto_csum_replace4(&udph->check, skb, addr, new_addr, 1); if (!udph->check) udph->check = CSUM_MANGLED_0; } break; } case IPPROTO_ICMP: { struct icmphdr *icmph; if (!pskb_may_pull(skb, ihl + sizeof(*icmph) + noff)) goto drop; icmph = (void *)(skb_network_header(skb) + ihl); if ((icmph->type != ICMP_DEST_UNREACH) && (icmph->type != ICMP_TIME_EXCEEDED) && (icmph->type != ICMP_PARAMETERPROB)) break; if (!pskb_may_pull(skb, ihl + sizeof(*icmph) + sizeof(*iph) + noff)) goto drop; icmph = (void *)(skb_network_header(skb) + ihl); iph = (void *)(icmph + 1); if (egress) addr = iph->daddr; else addr = iph->saddr; if ((old_addr ^ addr) & mask) break; if (skb_cloned(skb) && !skb_clone_writable(skb, ihl + sizeof(*icmph) + sizeof(*iph) + noff) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) goto drop; icmph = (void *)(skb_network_header(skb) + ihl); iph = (void *)(icmph + 1); new_addr &= mask; new_addr |= addr & ~mask; /* XXX Fix up the inner checksums. */ if (egress) iph->daddr = new_addr; else iph->saddr = new_addr; inet_proto_csum_replace4(&icmph->checksum, skb, addr, new_addr, 0); break; } default: break; } out: return action; drop: spin_lock(&p->tcf_lock); p->tcf_qstats.drops++; spin_unlock(&p->tcf_lock); return TC_ACT_SHOT; } static int tcf_nat_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref) { unsigned char *b = skb_tail_pointer(skb); struct tcf_nat *p = a->priv; struct tc_nat opt = { .old_addr = p->old_addr, .new_addr = p->new_addr, .mask = p->mask, .flags = p->flags, .index = p->tcf_index, .action = p->tcf_action, .refcnt = p->tcf_refcnt - ref, .bindcnt = p->tcf_bindcnt - bind, }; struct tcf_t t; NLA_PUT(skb, TCA_NAT_PARMS, sizeof(opt), &opt); t.install = jiffies_to_clock_t(jiffies - p->tcf_tm.install); t.lastuse = jiffies_to_clock_t(jiffies - p->tcf_tm.lastuse); t.expires = jiffies_to_clock_t(p->tcf_tm.expires); NLA_PUT(skb, TCA_NAT_TM, sizeof(t), &t); return skb->len; nla_put_failure: nlmsg_trim(skb, b); return -1; } static struct tc_action_ops act_nat_ops = { .kind = "nat", .hinfo = &nat_hash_info, .type = TCA_ACT_NAT, .capab = TCA_CAP_NONE, .owner = THIS_MODULE, .act = tcf_nat, .dump = tcf_nat_dump, .cleanup = tcf_nat_cleanup, .lookup = tcf_hash_search, .init = tcf_nat_init, .walk = tcf_generic_walker }; MODULE_DESCRIPTION("Stateless NAT actions"); MODULE_LICENSE("GPL"); static int __init nat_init_module(void) { return tcf_register_action(&act_nat_ops); } static void __exit nat_cleanup_module(void) { tcf_unregister_action(&act_nat_ops); } module_init(nat_init_module); module_exit(nat_cleanup_module);
gpl-2.0
flar2/xoom-ElementalX
net/rds/iw_send.c
2542
29845
/* * Copyright (c) 2006 Oracle. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * */ #include <linux/kernel.h> #include <linux/in.h> #include <linux/device.h> #include <linux/dmapool.h> #include "rds.h" #include "iw.h" static void rds_iw_send_rdma_complete(struct rds_message *rm, int wc_status) { int notify_status; switch (wc_status) { case IB_WC_WR_FLUSH_ERR: return; case IB_WC_SUCCESS: notify_status = RDS_RDMA_SUCCESS; break; case IB_WC_REM_ACCESS_ERR: notify_status = RDS_RDMA_REMOTE_ERROR; break; default: notify_status = RDS_RDMA_OTHER_ERROR; break; } rds_rdma_send_complete(rm, notify_status); } static void rds_iw_send_unmap_rdma(struct rds_iw_connection *ic, struct rm_rdma_op *op) { if (op->op_mapped) { ib_dma_unmap_sg(ic->i_cm_id->device, op->op_sg, op->op_nents, op->op_write ? DMA_TO_DEVICE : DMA_FROM_DEVICE); op->op_mapped = 0; } } static void rds_iw_send_unmap_rm(struct rds_iw_connection *ic, struct rds_iw_send_work *send, int wc_status) { struct rds_message *rm = send->s_rm; rdsdebug("ic %p send %p rm %p\n", ic, send, rm); ib_dma_unmap_sg(ic->i_cm_id->device, rm->data.op_sg, rm->data.op_nents, DMA_TO_DEVICE); if (rm->rdma.op_active) { rds_iw_send_unmap_rdma(ic, &rm->rdma); /* If the user asked for a completion notification on this * message, we can implement three different semantics: * 1. Notify when we received the ACK on the RDS message * that was queued with the RDMA. This provides reliable * notification of RDMA status at the expense of a one-way * packet delay. * 2. Notify when the IB stack gives us the completion event for * the RDMA operation. * 3. Notify when the IB stack gives us the completion event for * the accompanying RDS messages. * Here, we implement approach #3. To implement approach #2, * call rds_rdma_send_complete from the cq_handler. To implement #1, * don't call rds_rdma_send_complete at all, and fall back to the notify * handling in the ACK processing code. * * Note: There's no need to explicitly sync any RDMA buffers using * ib_dma_sync_sg_for_cpu - the completion for the RDMA * operation itself unmapped the RDMA buffers, which takes care * of synching. */ rds_iw_send_rdma_complete(rm, wc_status); if (rm->rdma.op_write) rds_stats_add(s_send_rdma_bytes, rm->rdma.op_bytes); else rds_stats_add(s_recv_rdma_bytes, rm->rdma.op_bytes); } /* If anyone waited for this message to get flushed out, wake * them up now */ rds_message_unmapped(rm); rds_message_put(rm); send->s_rm = NULL; } void rds_iw_send_init_ring(struct rds_iw_connection *ic) { struct rds_iw_send_work *send; u32 i; for (i = 0, send = ic->i_sends; i < ic->i_send_ring.w_nr; i++, send++) { struct ib_sge *sge; send->s_rm = NULL; send->s_op = NULL; send->s_mapping = NULL; send->s_wr.next = NULL; send->s_wr.wr_id = i; send->s_wr.sg_list = send->s_sge; send->s_wr.num_sge = 1; send->s_wr.opcode = IB_WR_SEND; send->s_wr.send_flags = 0; send->s_wr.ex.imm_data = 0; sge = rds_iw_data_sge(ic, send->s_sge); sge->lkey = 0; sge = rds_iw_header_sge(ic, send->s_sge); sge->addr = ic->i_send_hdrs_dma + (i * sizeof(struct rds_header)); sge->length = sizeof(struct rds_header); sge->lkey = 0; send->s_mr = ib_alloc_fast_reg_mr(ic->i_pd, fastreg_message_size); if (IS_ERR(send->s_mr)) { printk(KERN_WARNING "RDS/IW: ib_alloc_fast_reg_mr failed\n"); break; } send->s_page_list = ib_alloc_fast_reg_page_list( ic->i_cm_id->device, fastreg_message_size); if (IS_ERR(send->s_page_list)) { printk(KERN_WARNING "RDS/IW: ib_alloc_fast_reg_page_list failed\n"); break; } } } void rds_iw_send_clear_ring(struct rds_iw_connection *ic) { struct rds_iw_send_work *send; u32 i; for (i = 0, send = ic->i_sends; i < ic->i_send_ring.w_nr; i++, send++) { BUG_ON(!send->s_mr); ib_dereg_mr(send->s_mr); BUG_ON(!send->s_page_list); ib_free_fast_reg_page_list(send->s_page_list); if (send->s_wr.opcode == 0xdead) continue; if (send->s_rm) rds_iw_send_unmap_rm(ic, send, IB_WC_WR_FLUSH_ERR); if (send->s_op) rds_iw_send_unmap_rdma(ic, send->s_op); } } /* * The _oldest/_free ring operations here race cleanly with the alloc/unalloc * operations performed in the send path. As the sender allocs and potentially * unallocs the next free entry in the ring it doesn't alter which is * the next to be freed, which is what this is concerned with. */ void rds_iw_send_cq_comp_handler(struct ib_cq *cq, void *context) { struct rds_connection *conn = context; struct rds_iw_connection *ic = conn->c_transport_data; struct ib_wc wc; struct rds_iw_send_work *send; u32 completed; u32 oldest; u32 i; int ret; rdsdebug("cq %p conn %p\n", cq, conn); rds_iw_stats_inc(s_iw_tx_cq_call); ret = ib_req_notify_cq(cq, IB_CQ_NEXT_COMP); if (ret) rdsdebug("ib_req_notify_cq send failed: %d\n", ret); while (ib_poll_cq(cq, 1, &wc) > 0) { rdsdebug("wc wr_id 0x%llx status %u byte_len %u imm_data %u\n", (unsigned long long)wc.wr_id, wc.status, wc.byte_len, be32_to_cpu(wc.ex.imm_data)); rds_iw_stats_inc(s_iw_tx_cq_event); if (wc.status != IB_WC_SUCCESS) { printk(KERN_ERR "WC Error: status = %d opcode = %d\n", wc.status, wc.opcode); break; } if (wc.opcode == IB_WC_LOCAL_INV && wc.wr_id == RDS_IW_LOCAL_INV_WR_ID) { ic->i_fastreg_posted = 0; continue; } if (wc.opcode == IB_WC_FAST_REG_MR && wc.wr_id == RDS_IW_FAST_REG_WR_ID) { ic->i_fastreg_posted = 1; continue; } if (wc.wr_id == RDS_IW_ACK_WR_ID) { if (ic->i_ack_queued + HZ/2 < jiffies) rds_iw_stats_inc(s_iw_tx_stalled); rds_iw_ack_send_complete(ic); continue; } oldest = rds_iw_ring_oldest(&ic->i_send_ring); completed = rds_iw_ring_completed(&ic->i_send_ring, wc.wr_id, oldest); for (i = 0; i < completed; i++) { send = &ic->i_sends[oldest]; /* In the error case, wc.opcode sometimes contains garbage */ switch (send->s_wr.opcode) { case IB_WR_SEND: if (send->s_rm) rds_iw_send_unmap_rm(ic, send, wc.status); break; case IB_WR_FAST_REG_MR: case IB_WR_RDMA_WRITE: case IB_WR_RDMA_READ: case IB_WR_RDMA_READ_WITH_INV: /* Nothing to be done - the SG list will be unmapped * when the SEND completes. */ break; default: if (printk_ratelimit()) printk(KERN_NOTICE "RDS/IW: %s: unexpected opcode 0x%x in WR!\n", __func__, send->s_wr.opcode); break; } send->s_wr.opcode = 0xdead; send->s_wr.num_sge = 1; if (send->s_queued + HZ/2 < jiffies) rds_iw_stats_inc(s_iw_tx_stalled); /* If a RDMA operation produced an error, signal this right * away. If we don't, the subsequent SEND that goes with this * RDMA will be canceled with ERR_WFLUSH, and the application * never learn that the RDMA failed. */ if (unlikely(wc.status == IB_WC_REM_ACCESS_ERR && send->s_op)) { struct rds_message *rm; rm = rds_send_get_message(conn, send->s_op); if (rm) rds_iw_send_rdma_complete(rm, wc.status); } oldest = (oldest + 1) % ic->i_send_ring.w_nr; } rds_iw_ring_free(&ic->i_send_ring, completed); if (test_and_clear_bit(RDS_LL_SEND_FULL, &conn->c_flags) || test_bit(0, &conn->c_map_queued)) queue_delayed_work(rds_wq, &conn->c_send_w, 0); /* We expect errors as the qp is drained during shutdown */ if (wc.status != IB_WC_SUCCESS && rds_conn_up(conn)) { rds_iw_conn_error(conn, "send completion on %pI4 " "had status %u, disconnecting and reconnecting\n", &conn->c_faddr, wc.status); } } } /* * This is the main function for allocating credits when sending * messages. * * Conceptually, we have two counters: * - send credits: this tells us how many WRs we're allowed * to submit without overruning the receiver's queue. For * each SEND WR we post, we decrement this by one. * * - posted credits: this tells us how many WRs we recently * posted to the receive queue. This value is transferred * to the peer as a "credit update" in a RDS header field. * Every time we transmit credits to the peer, we subtract * the amount of transferred credits from this counter. * * It is essential that we avoid situations where both sides have * exhausted their send credits, and are unable to send new credits * to the peer. We achieve this by requiring that we send at least * one credit update to the peer before exhausting our credits. * When new credits arrive, we subtract one credit that is withheld * until we've posted new buffers and are ready to transmit these * credits (see rds_iw_send_add_credits below). * * The RDS send code is essentially single-threaded; rds_send_xmit * grabs c_send_lock to ensure exclusive access to the send ring. * However, the ACK sending code is independent and can race with * message SENDs. * * In the send path, we need to update the counters for send credits * and the counter of posted buffers atomically - when we use the * last available credit, we cannot allow another thread to race us * and grab the posted credits counter. Hence, we have to use a * spinlock to protect the credit counter, or use atomics. * * Spinlocks shared between the send and the receive path are bad, * because they create unnecessary delays. An early implementation * using a spinlock showed a 5% degradation in throughput at some * loads. * * This implementation avoids spinlocks completely, putting both * counters into a single atomic, and updating that atomic using * atomic_add (in the receive path, when receiving fresh credits), * and using atomic_cmpxchg when updating the two counters. */ int rds_iw_send_grab_credits(struct rds_iw_connection *ic, u32 wanted, u32 *adv_credits, int need_posted, int max_posted) { unsigned int avail, posted, got = 0, advertise; long oldval, newval; *adv_credits = 0; if (!ic->i_flowctl) return wanted; try_again: advertise = 0; oldval = newval = atomic_read(&ic->i_credits); posted = IB_GET_POST_CREDITS(oldval); avail = IB_GET_SEND_CREDITS(oldval); rdsdebug("rds_iw_send_grab_credits(%u): credits=%u posted=%u\n", wanted, avail, posted); /* The last credit must be used to send a credit update. */ if (avail && !posted) avail--; if (avail < wanted) { struct rds_connection *conn = ic->i_cm_id->context; /* Oops, there aren't that many credits left! */ set_bit(RDS_LL_SEND_FULL, &conn->c_flags); got = avail; } else { /* Sometimes you get what you want, lalala. */ got = wanted; } newval -= IB_SET_SEND_CREDITS(got); /* * If need_posted is non-zero, then the caller wants * the posted regardless of whether any send credits are * available. */ if (posted && (got || need_posted)) { advertise = min_t(unsigned int, posted, max_posted); newval -= IB_SET_POST_CREDITS(advertise); } /* Finally bill everything */ if (atomic_cmpxchg(&ic->i_credits, oldval, newval) != oldval) goto try_again; *adv_credits = advertise; return got; } void rds_iw_send_add_credits(struct rds_connection *conn, unsigned int credits) { struct rds_iw_connection *ic = conn->c_transport_data; if (credits == 0) return; rdsdebug("rds_iw_send_add_credits(%u): current=%u%s\n", credits, IB_GET_SEND_CREDITS(atomic_read(&ic->i_credits)), test_bit(RDS_LL_SEND_FULL, &conn->c_flags) ? ", ll_send_full" : ""); atomic_add(IB_SET_SEND_CREDITS(credits), &ic->i_credits); if (test_and_clear_bit(RDS_LL_SEND_FULL, &conn->c_flags)) queue_delayed_work(rds_wq, &conn->c_send_w, 0); WARN_ON(IB_GET_SEND_CREDITS(credits) >= 16384); rds_iw_stats_inc(s_iw_rx_credit_updates); } void rds_iw_advertise_credits(struct rds_connection *conn, unsigned int posted) { struct rds_iw_connection *ic = conn->c_transport_data; if (posted == 0) return; atomic_add(IB_SET_POST_CREDITS(posted), &ic->i_credits); /* Decide whether to send an update to the peer now. * If we would send a credit update for every single buffer we * post, we would end up with an ACK storm (ACK arrives, * consumes buffer, we refill the ring, send ACK to remote * advertising the newly posted buffer... ad inf) * * Performance pretty much depends on how often we send * credit updates - too frequent updates mean lots of ACKs. * Too infrequent updates, and the peer will run out of * credits and has to throttle. * For the time being, 16 seems to be a good compromise. */ if (IB_GET_POST_CREDITS(atomic_read(&ic->i_credits)) >= 16) set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); } static inline void rds_iw_xmit_populate_wr(struct rds_iw_connection *ic, struct rds_iw_send_work *send, unsigned int pos, unsigned long buffer, unsigned int length, int send_flags) { struct ib_sge *sge; WARN_ON(pos != send - ic->i_sends); send->s_wr.send_flags = send_flags; send->s_wr.opcode = IB_WR_SEND; send->s_wr.num_sge = 2; send->s_wr.next = NULL; send->s_queued = jiffies; send->s_op = NULL; if (length != 0) { sge = rds_iw_data_sge(ic, send->s_sge); sge->addr = buffer; sge->length = length; sge->lkey = rds_iw_local_dma_lkey(ic); sge = rds_iw_header_sge(ic, send->s_sge); } else { /* We're sending a packet with no payload. There is only * one SGE */ send->s_wr.num_sge = 1; sge = &send->s_sge[0]; } sge->addr = ic->i_send_hdrs_dma + (pos * sizeof(struct rds_header)); sge->length = sizeof(struct rds_header); sge->lkey = rds_iw_local_dma_lkey(ic); } /* * This can be called multiple times for a given message. The first time * we see a message we map its scatterlist into the IB device so that * we can provide that mapped address to the IB scatter gather entries * in the IB work requests. We translate the scatterlist into a series * of work requests that fragment the message. These work requests complete * in order so we pass ownership of the message to the completion handler * once we send the final fragment. * * The RDS core uses the c_send_lock to only enter this function once * per connection. This makes sure that the tx ring alloc/unalloc pairs * don't get out of sync and confuse the ring. */ int rds_iw_xmit(struct rds_connection *conn, struct rds_message *rm, unsigned int hdr_off, unsigned int sg, unsigned int off) { struct rds_iw_connection *ic = conn->c_transport_data; struct ib_device *dev = ic->i_cm_id->device; struct rds_iw_send_work *send = NULL; struct rds_iw_send_work *first; struct rds_iw_send_work *prev; struct ib_send_wr *failed_wr; struct scatterlist *scat; u32 pos; u32 i; u32 work_alloc; u32 credit_alloc; u32 posted; u32 adv_credits = 0; int send_flags = 0; int sent; int ret; int flow_controlled = 0; BUG_ON(off % RDS_FRAG_SIZE); BUG_ON(hdr_off != 0 && hdr_off != sizeof(struct rds_header)); /* Fastreg support */ if (rds_rdma_cookie_key(rm->m_rdma_cookie) && !ic->i_fastreg_posted) { ret = -EAGAIN; goto out; } /* FIXME we may overallocate here */ if (be32_to_cpu(rm->m_inc.i_hdr.h_len) == 0) i = 1; else i = ceil(be32_to_cpu(rm->m_inc.i_hdr.h_len), RDS_FRAG_SIZE); work_alloc = rds_iw_ring_alloc(&ic->i_send_ring, i, &pos); if (work_alloc == 0) { set_bit(RDS_LL_SEND_FULL, &conn->c_flags); rds_iw_stats_inc(s_iw_tx_ring_full); ret = -ENOMEM; goto out; } credit_alloc = work_alloc; if (ic->i_flowctl) { credit_alloc = rds_iw_send_grab_credits(ic, work_alloc, &posted, 0, RDS_MAX_ADV_CREDIT); adv_credits += posted; if (credit_alloc < work_alloc) { rds_iw_ring_unalloc(&ic->i_send_ring, work_alloc - credit_alloc); work_alloc = credit_alloc; flow_controlled++; } if (work_alloc == 0) { set_bit(RDS_LL_SEND_FULL, &conn->c_flags); rds_iw_stats_inc(s_iw_tx_throttle); ret = -ENOMEM; goto out; } } /* map the message the first time we see it */ if (!ic->i_rm) { /* printk(KERN_NOTICE "rds_iw_xmit prep msg dport=%u flags=0x%x len=%d\n", be16_to_cpu(rm->m_inc.i_hdr.h_dport), rm->m_inc.i_hdr.h_flags, be32_to_cpu(rm->m_inc.i_hdr.h_len)); */ if (rm->data.op_nents) { rm->data.op_count = ib_dma_map_sg(dev, rm->data.op_sg, rm->data.op_nents, DMA_TO_DEVICE); rdsdebug("ic %p mapping rm %p: %d\n", ic, rm, rm->data.op_count); if (rm->data.op_count == 0) { rds_iw_stats_inc(s_iw_tx_sg_mapping_failure); rds_iw_ring_unalloc(&ic->i_send_ring, work_alloc); ret = -ENOMEM; /* XXX ? */ goto out; } } else { rm->data.op_count = 0; } ic->i_unsignaled_wrs = rds_iw_sysctl_max_unsig_wrs; ic->i_unsignaled_bytes = rds_iw_sysctl_max_unsig_bytes; rds_message_addref(rm); ic->i_rm = rm; /* Finalize the header */ if (test_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags)) rm->m_inc.i_hdr.h_flags |= RDS_FLAG_ACK_REQUIRED; if (test_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags)) rm->m_inc.i_hdr.h_flags |= RDS_FLAG_RETRANSMITTED; /* If it has a RDMA op, tell the peer we did it. This is * used by the peer to release use-once RDMA MRs. */ if (rm->rdma.op_active) { struct rds_ext_header_rdma ext_hdr; ext_hdr.h_rdma_rkey = cpu_to_be32(rm->rdma.op_rkey); rds_message_add_extension(&rm->m_inc.i_hdr, RDS_EXTHDR_RDMA, &ext_hdr, sizeof(ext_hdr)); } if (rm->m_rdma_cookie) { rds_message_add_rdma_dest_extension(&rm->m_inc.i_hdr, rds_rdma_cookie_key(rm->m_rdma_cookie), rds_rdma_cookie_offset(rm->m_rdma_cookie)); } /* Note - rds_iw_piggyb_ack clears the ACK_REQUIRED bit, so * we should not do this unless we have a chance of at least * sticking the header into the send ring. Which is why we * should call rds_iw_ring_alloc first. */ rm->m_inc.i_hdr.h_ack = cpu_to_be64(rds_iw_piggyb_ack(ic)); rds_message_make_checksum(&rm->m_inc.i_hdr); /* * Update adv_credits since we reset the ACK_REQUIRED bit. */ rds_iw_send_grab_credits(ic, 0, &posted, 1, RDS_MAX_ADV_CREDIT - adv_credits); adv_credits += posted; BUG_ON(adv_credits > 255); } send = &ic->i_sends[pos]; first = send; prev = NULL; scat = &rm->data.op_sg[sg]; sent = 0; i = 0; /* Sometimes you want to put a fence between an RDMA * READ and the following SEND. * We could either do this all the time * or when requested by the user. Right now, we let * the application choose. */ if (rm->rdma.op_active && rm->rdma.op_fence) send_flags = IB_SEND_FENCE; /* * We could be copying the header into the unused tail of the page. * That would need to be changed in the future when those pages might * be mapped userspace pages or page cache pages. So instead we always * use a second sge and our long-lived ring of mapped headers. We send * the header after the data so that the data payload can be aligned on * the receiver. */ /* handle a 0-len message */ if (be32_to_cpu(rm->m_inc.i_hdr.h_len) == 0) { rds_iw_xmit_populate_wr(ic, send, pos, 0, 0, send_flags); goto add_header; } /* if there's data reference it with a chain of work reqs */ for (; i < work_alloc && scat != &rm->data.op_sg[rm->data.op_count]; i++) { unsigned int len; send = &ic->i_sends[pos]; len = min(RDS_FRAG_SIZE, ib_sg_dma_len(dev, scat) - off); rds_iw_xmit_populate_wr(ic, send, pos, ib_sg_dma_address(dev, scat) + off, len, send_flags); /* * We want to delay signaling completions just enough to get * the batching benefits but not so much that we create dead time * on the wire. */ if (ic->i_unsignaled_wrs-- == 0) { ic->i_unsignaled_wrs = rds_iw_sysctl_max_unsig_wrs; send->s_wr.send_flags |= IB_SEND_SIGNALED | IB_SEND_SOLICITED; } ic->i_unsignaled_bytes -= len; if (ic->i_unsignaled_bytes <= 0) { ic->i_unsignaled_bytes = rds_iw_sysctl_max_unsig_bytes; send->s_wr.send_flags |= IB_SEND_SIGNALED | IB_SEND_SOLICITED; } /* * Always signal the last one if we're stopping due to flow control. */ if (flow_controlled && i == (work_alloc-1)) send->s_wr.send_flags |= IB_SEND_SIGNALED | IB_SEND_SOLICITED; rdsdebug("send %p wr %p num_sge %u next %p\n", send, &send->s_wr, send->s_wr.num_sge, send->s_wr.next); sent += len; off += len; if (off == ib_sg_dma_len(dev, scat)) { scat++; off = 0; } add_header: /* Tack on the header after the data. The header SGE should already * have been set up to point to the right header buffer. */ memcpy(&ic->i_send_hdrs[pos], &rm->m_inc.i_hdr, sizeof(struct rds_header)); if (0) { struct rds_header *hdr = &ic->i_send_hdrs[pos]; printk(KERN_NOTICE "send WR dport=%u flags=0x%x len=%d\n", be16_to_cpu(hdr->h_dport), hdr->h_flags, be32_to_cpu(hdr->h_len)); } if (adv_credits) { struct rds_header *hdr = &ic->i_send_hdrs[pos]; /* add credit and redo the header checksum */ hdr->h_credit = adv_credits; rds_message_make_checksum(hdr); adv_credits = 0; rds_iw_stats_inc(s_iw_tx_credit_updates); } if (prev) prev->s_wr.next = &send->s_wr; prev = send; pos = (pos + 1) % ic->i_send_ring.w_nr; } /* Account the RDS header in the number of bytes we sent, but just once. * The caller has no concept of fragmentation. */ if (hdr_off == 0) sent += sizeof(struct rds_header); /* if we finished the message then send completion owns it */ if (scat == &rm->data.op_sg[rm->data.op_count]) { prev->s_rm = ic->i_rm; prev->s_wr.send_flags |= IB_SEND_SIGNALED | IB_SEND_SOLICITED; ic->i_rm = NULL; } if (i < work_alloc) { rds_iw_ring_unalloc(&ic->i_send_ring, work_alloc - i); work_alloc = i; } if (ic->i_flowctl && i < credit_alloc) rds_iw_send_add_credits(conn, credit_alloc - i); /* XXX need to worry about failed_wr and partial sends. */ failed_wr = &first->s_wr; ret = ib_post_send(ic->i_cm_id->qp, &first->s_wr, &failed_wr); rdsdebug("ic %p first %p (wr %p) ret %d wr %p\n", ic, first, &first->s_wr, ret, failed_wr); BUG_ON(failed_wr != &first->s_wr); if (ret) { printk(KERN_WARNING "RDS/IW: ib_post_send to %pI4 " "returned %d\n", &conn->c_faddr, ret); rds_iw_ring_unalloc(&ic->i_send_ring, work_alloc); if (prev->s_rm) { ic->i_rm = prev->s_rm; prev->s_rm = NULL; } goto out; } ret = sent; out: BUG_ON(adv_credits); return ret; } static void rds_iw_build_send_fastreg(struct rds_iw_device *rds_iwdev, struct rds_iw_connection *ic, struct rds_iw_send_work *send, int nent, int len, u64 sg_addr) { BUG_ON(nent > send->s_page_list->max_page_list_len); /* * Perform a WR for the fast_reg_mr. Each individual page * in the sg list is added to the fast reg page list and placed * inside the fast_reg_mr WR. */ send->s_wr.opcode = IB_WR_FAST_REG_MR; send->s_wr.wr.fast_reg.length = len; send->s_wr.wr.fast_reg.rkey = send->s_mr->rkey; send->s_wr.wr.fast_reg.page_list = send->s_page_list; send->s_wr.wr.fast_reg.page_list_len = nent; send->s_wr.wr.fast_reg.page_shift = PAGE_SHIFT; send->s_wr.wr.fast_reg.access_flags = IB_ACCESS_REMOTE_WRITE; send->s_wr.wr.fast_reg.iova_start = sg_addr; ib_update_fast_reg_key(send->s_mr, send->s_remap_count++); } int rds_iw_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op) { struct rds_iw_connection *ic = conn->c_transport_data; struct rds_iw_send_work *send = NULL; struct rds_iw_send_work *first; struct rds_iw_send_work *prev; struct ib_send_wr *failed_wr; struct rds_iw_device *rds_iwdev; struct scatterlist *scat; unsigned long len; u64 remote_addr = op->op_remote_addr; u32 pos, fr_pos; u32 work_alloc; u32 i; u32 j; int sent; int ret; int num_sge; rds_iwdev = ib_get_client_data(ic->i_cm_id->device, &rds_iw_client); /* map the message the first time we see it */ if (!op->op_mapped) { op->op_count = ib_dma_map_sg(ic->i_cm_id->device, op->op_sg, op->op_nents, (op->op_write) ? DMA_TO_DEVICE : DMA_FROM_DEVICE); rdsdebug("ic %p mapping op %p: %d\n", ic, op, op->op_count); if (op->op_count == 0) { rds_iw_stats_inc(s_iw_tx_sg_mapping_failure); ret = -ENOMEM; /* XXX ? */ goto out; } op->op_mapped = 1; } if (!op->op_write) { /* Alloc space on the send queue for the fastreg */ work_alloc = rds_iw_ring_alloc(&ic->i_send_ring, 1, &fr_pos); if (work_alloc != 1) { rds_iw_ring_unalloc(&ic->i_send_ring, work_alloc); rds_iw_stats_inc(s_iw_tx_ring_full); ret = -ENOMEM; goto out; } } /* * Instead of knowing how to return a partial rdma read/write we insist that there * be enough work requests to send the entire message. */ i = ceil(op->op_count, rds_iwdev->max_sge); work_alloc = rds_iw_ring_alloc(&ic->i_send_ring, i, &pos); if (work_alloc != i) { rds_iw_ring_unalloc(&ic->i_send_ring, work_alloc); rds_iw_stats_inc(s_iw_tx_ring_full); ret = -ENOMEM; goto out; } send = &ic->i_sends[pos]; if (!op->op_write) { first = prev = &ic->i_sends[fr_pos]; } else { first = send; prev = NULL; } scat = &op->op_sg[0]; sent = 0; num_sge = op->op_count; for (i = 0; i < work_alloc && scat != &op->op_sg[op->op_count]; i++) { send->s_wr.send_flags = 0; send->s_queued = jiffies; /* * We want to delay signaling completions just enough to get * the batching benefits but not so much that we create dead time on the wire. */ if (ic->i_unsignaled_wrs-- == 0) { ic->i_unsignaled_wrs = rds_iw_sysctl_max_unsig_wrs; send->s_wr.send_flags = IB_SEND_SIGNALED; } /* To avoid the need to have the plumbing to invalidate the fastreg_mr used * for local access after RDS is finished with it, using * IB_WR_RDMA_READ_WITH_INV will invalidate it after the read has completed. */ if (op->op_write) send->s_wr.opcode = IB_WR_RDMA_WRITE; else send->s_wr.opcode = IB_WR_RDMA_READ_WITH_INV; send->s_wr.wr.rdma.remote_addr = remote_addr; send->s_wr.wr.rdma.rkey = op->op_rkey; send->s_op = op; if (num_sge > rds_iwdev->max_sge) { send->s_wr.num_sge = rds_iwdev->max_sge; num_sge -= rds_iwdev->max_sge; } else send->s_wr.num_sge = num_sge; send->s_wr.next = NULL; if (prev) prev->s_wr.next = &send->s_wr; for (j = 0; j < send->s_wr.num_sge && scat != &op->op_sg[op->op_count]; j++) { len = ib_sg_dma_len(ic->i_cm_id->device, scat); if (send->s_wr.opcode == IB_WR_RDMA_READ_WITH_INV) send->s_page_list->page_list[j] = ib_sg_dma_address(ic->i_cm_id->device, scat); else { send->s_sge[j].addr = ib_sg_dma_address(ic->i_cm_id->device, scat); send->s_sge[j].length = len; send->s_sge[j].lkey = rds_iw_local_dma_lkey(ic); } sent += len; rdsdebug("ic %p sent %d remote_addr %llu\n", ic, sent, remote_addr); remote_addr += len; scat++; } if (send->s_wr.opcode == IB_WR_RDMA_READ_WITH_INV) { send->s_wr.num_sge = 1; send->s_sge[0].addr = conn->c_xmit_rm->m_rs->rs_user_addr; send->s_sge[0].length = conn->c_xmit_rm->m_rs->rs_user_bytes; send->s_sge[0].lkey = ic->i_sends[fr_pos].s_mr->lkey; } rdsdebug("send %p wr %p num_sge %u next %p\n", send, &send->s_wr, send->s_wr.num_sge, send->s_wr.next); prev = send; if (++send == &ic->i_sends[ic->i_send_ring.w_nr]) send = ic->i_sends; } /* if we finished the message then send completion owns it */ if (scat == &op->op_sg[op->op_count]) first->s_wr.send_flags = IB_SEND_SIGNALED; if (i < work_alloc) { rds_iw_ring_unalloc(&ic->i_send_ring, work_alloc - i); work_alloc = i; } /* On iWARP, local memory access by a remote system (ie, RDMA Read) is not * recommended. Putting the lkey on the wire is a security hole, as it can * allow for memory access to all of memory on the remote system. Some * adapters do not allow using the lkey for this at all. To bypass this use a * fastreg_mr (or possibly a dma_mr) */ if (!op->op_write) { rds_iw_build_send_fastreg(rds_iwdev, ic, &ic->i_sends[fr_pos], op->op_count, sent, conn->c_xmit_rm->m_rs->rs_user_addr); work_alloc++; } failed_wr = &first->s_wr; ret = ib_post_send(ic->i_cm_id->qp, &first->s_wr, &failed_wr); rdsdebug("ic %p first %p (wr %p) ret %d wr %p\n", ic, first, &first->s_wr, ret, failed_wr); BUG_ON(failed_wr != &first->s_wr); if (ret) { printk(KERN_WARNING "RDS/IW: rdma ib_post_send to %pI4 " "returned %d\n", &conn->c_faddr, ret); rds_iw_ring_unalloc(&ic->i_send_ring, work_alloc); goto out; } out: return ret; } void rds_iw_xmit_complete(struct rds_connection *conn) { struct rds_iw_connection *ic = conn->c_transport_data; /* We may have a pending ACK or window update we were unable * to send previously (due to flow control). Try again. */ rds_iw_attempt_ack(ic); }
gpl-2.0
aopp/android_kernel_google_msm
drivers/mmc/core/sdio_bus.c
4334
7633
/* * linux/drivers/mmc/core/sdio_bus.c * * Copyright 2007 Pierre Ossman * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or (at * your option) any later version. * * SDIO function driver model */ #include <linux/device.h> #include <linux/err.h> #include <linux/export.h> #include <linux/slab.h> #include <linux/pm_runtime.h> #include <linux/mmc/card.h> #include <linux/mmc/host.h> #include <linux/mmc/sdio_func.h> #include "sdio_cis.h" #include "sdio_bus.h" #ifdef CONFIG_MMC_EMBEDDED_SDIO #include <linux/mmc/host.h> #endif /* show configuration fields */ #define sdio_config_attr(field, format_string) \ static ssize_t \ field##_show(struct device *dev, struct device_attribute *attr, char *buf) \ { \ struct sdio_func *func; \ \ func = dev_to_sdio_func (dev); \ return sprintf (buf, format_string, func->field); \ } sdio_config_attr(class, "0x%02x\n"); sdio_config_attr(vendor, "0x%04x\n"); sdio_config_attr(device, "0x%04x\n"); static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, char *buf) { struct sdio_func *func = dev_to_sdio_func (dev); return sprintf(buf, "sdio:c%02Xv%04Xd%04X\n", func->class, func->vendor, func->device); } static struct device_attribute sdio_dev_attrs[] = { __ATTR_RO(class), __ATTR_RO(vendor), __ATTR_RO(device), __ATTR_RO(modalias), __ATTR_NULL, }; static const struct sdio_device_id *sdio_match_one(struct sdio_func *func, const struct sdio_device_id *id) { if (id->class != (__u8)SDIO_ANY_ID && id->class != func->class) return NULL; if (id->vendor != (__u16)SDIO_ANY_ID && id->vendor != func->vendor) return NULL; if (id->device != (__u16)SDIO_ANY_ID && id->device != func->device) return NULL; return id; } static const struct sdio_device_id *sdio_match_device(struct sdio_func *func, struct sdio_driver *sdrv) { const struct sdio_device_id *ids; ids = sdrv->id_table; if (ids) { while (ids->class || ids->vendor || ids->device) { if (sdio_match_one(func, ids)) return ids; ids++; } } return NULL; } static int sdio_bus_match(struct device *dev, struct device_driver *drv) { struct sdio_func *func = dev_to_sdio_func(dev); struct sdio_driver *sdrv = to_sdio_driver(drv); if (sdio_match_device(func, sdrv)) return 1; return 0; } static int sdio_bus_uevent(struct device *dev, struct kobj_uevent_env *env) { struct sdio_func *func = dev_to_sdio_func(dev); if (add_uevent_var(env, "SDIO_CLASS=%02X", func->class)) return -ENOMEM; if (add_uevent_var(env, "SDIO_ID=%04X:%04X", func->vendor, func->device)) return -ENOMEM; if (add_uevent_var(env, "MODALIAS=sdio:c%02Xv%04Xd%04X", func->class, func->vendor, func->device)) return -ENOMEM; return 0; } static int sdio_bus_probe(struct device *dev) { struct sdio_driver *drv = to_sdio_driver(dev->driver); struct sdio_func *func = dev_to_sdio_func(dev); const struct sdio_device_id *id; int ret; id = sdio_match_device(func, drv); if (!id) return -ENODEV; /* Unbound SDIO functions are always suspended. * During probe, the function is set active and the usage count * is incremented. If the driver supports runtime PM, * it should call pm_runtime_put_noidle() in its probe routine and * pm_runtime_get_noresume() in its remove routine. */ if (func->card->host->caps & MMC_CAP_POWER_OFF_CARD) { ret = pm_runtime_get_sync(dev); if (ret < 0) goto out; } /* Set the default block size so the driver is sure it's something * sensible. */ sdio_claim_host(func); ret = sdio_set_block_size(func, 0); sdio_release_host(func); if (ret) goto disable_runtimepm; ret = drv->probe(func, id); if (ret) goto disable_runtimepm; return 0; disable_runtimepm: if (func->card->host->caps & MMC_CAP_POWER_OFF_CARD) pm_runtime_put_noidle(dev); out: return ret; } static int sdio_bus_remove(struct device *dev) { struct sdio_driver *drv = to_sdio_driver(dev->driver); struct sdio_func *func = dev_to_sdio_func(dev); int ret = 0; /* Make sure card is powered before invoking ->remove() */ if (func->card->host->caps & MMC_CAP_POWER_OFF_CARD) pm_runtime_get_sync(dev); drv->remove(func); if (func->irq_handler) { pr_warning("WARNING: driver %s did not remove " "its interrupt handler!\n", drv->name); sdio_claim_host(func); sdio_release_irq(func); sdio_release_host(func); } /* First, undo the increment made directly above */ if (func->card->host->caps & MMC_CAP_POWER_OFF_CARD) pm_runtime_put_noidle(dev); /* Then undo the runtime PM settings in sdio_bus_probe() */ if (func->card->host->caps & MMC_CAP_POWER_OFF_CARD) pm_runtime_put_sync(dev); return ret; } #ifdef CONFIG_PM static int pm_no_operation(struct device *dev) { return 0; } static const struct dev_pm_ops sdio_bus_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(pm_no_operation, pm_no_operation) SET_RUNTIME_PM_OPS( pm_generic_runtime_suspend, pm_generic_runtime_resume, pm_generic_runtime_idle ) }; #define SDIO_PM_OPS_PTR (&sdio_bus_pm_ops) #else /* !CONFIG_PM */ #define SDIO_PM_OPS_PTR NULL #endif /* !CONFIG_PM */ static struct bus_type sdio_bus_type = { .name = "sdio", .dev_attrs = sdio_dev_attrs, .match = sdio_bus_match, .uevent = sdio_bus_uevent, .probe = sdio_bus_probe, .remove = sdio_bus_remove, .pm = SDIO_PM_OPS_PTR, }; int sdio_register_bus(void) { return bus_register(&sdio_bus_type); } void sdio_unregister_bus(void) { bus_unregister(&sdio_bus_type); } /** * sdio_register_driver - register a function driver * @drv: SDIO function driver */ int sdio_register_driver(struct sdio_driver *drv) { drv->drv.name = drv->name; drv->drv.bus = &sdio_bus_type; return driver_register(&drv->drv); } EXPORT_SYMBOL_GPL(sdio_register_driver); /** * sdio_unregister_driver - unregister a function driver * @drv: SDIO function driver */ void sdio_unregister_driver(struct sdio_driver *drv) { drv->drv.bus = &sdio_bus_type; driver_unregister(&drv->drv); } EXPORT_SYMBOL_GPL(sdio_unregister_driver); static void sdio_release_func(struct device *dev) { struct sdio_func *func = dev_to_sdio_func(dev); #ifdef CONFIG_MMC_EMBEDDED_SDIO /* * If this device is embedded then we never allocated * cis tables for this func */ if (!func->card->host->embedded_sdio_data.funcs) #endif sdio_free_func_cis(func); if (func->info) kfree(func->info); kfree(func); } /* * Allocate and initialise a new SDIO function structure. */ struct sdio_func *sdio_alloc_func(struct mmc_card *card) { struct sdio_func *func; func = kzalloc(sizeof(struct sdio_func), GFP_KERNEL); if (!func) return ERR_PTR(-ENOMEM); func->card = card; device_initialize(&func->dev); func->dev.parent = &card->dev; func->dev.bus = &sdio_bus_type; func->dev.release = sdio_release_func; return func; } /* * Register a new SDIO function with the driver model. */ int sdio_add_func(struct sdio_func *func) { int ret; dev_set_name(&func->dev, "%s:%d", mmc_card_id(func->card), func->num); ret = device_add(&func->dev); if (ret == 0) sdio_func_set_present(func); return ret; } /* * Unregister a SDIO function with the driver model, and * (eventually) free it. * This function can be called through error paths where sdio_add_func() was * never executed (because a failure occurred at an earlier point). */ void sdio_remove_func(struct sdio_func *func) { if (!sdio_func_present(func)) return; device_del(&func->dev); put_device(&func->dev); }
gpl-2.0
mathkid95/linux_lg_lollipop
arch/powerpc/kernel/of_platform.c
4590
3009
/* * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corp. * <benh@kernel.crashing.org> * and Arnd Bergmann, IBM Corp. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * */ #undef DEBUG #include <linux/string.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/export.h> #include <linux/mod_devicetable.h> #include <linux/pci.h> #include <linux/of.h> #include <linux/of_device.h> #include <linux/of_platform.h> #include <linux/atomic.h> #include <asm/errno.h> #include <asm/topology.h> #include <asm/pci-bridge.h> #include <asm/ppc-pci.h> #include <asm/eeh.h> #ifdef CONFIG_PPC_OF_PLATFORM_PCI /* The probing of PCI controllers from of_platform is currently * 64 bits only, mostly due to gratuitous differences between * the 32 and 64 bits PCI code on PowerPC and the 32 bits one * lacking some bits needed here. */ static int __devinit of_pci_phb_probe(struct platform_device *dev) { struct pci_controller *phb; /* Check if we can do that ... */ if (ppc_md.pci_setup_phb == NULL) return -ENODEV; pr_info("Setting up PCI bus %s\n", dev->dev.of_node->full_name); /* Alloc and setup PHB data structure */ phb = pcibios_alloc_controller(dev->dev.of_node); if (!phb) return -ENODEV; /* Setup parent in sysfs */ phb->parent = &dev->dev; /* Setup the PHB using arch provided callback */ if (ppc_md.pci_setup_phb(phb)) { pcibios_free_controller(phb); return -ENODEV; } /* Process "ranges" property */ pci_process_bridge_OF_ranges(phb, dev->dev.of_node, 0); /* Init pci_dn data structures */ pci_devs_phb_init_dynamic(phb); /* Create EEH devices for the PHB */ eeh_dev_phb_init_dynamic(phb); /* Register devices with EEH */ #ifdef CONFIG_EEH if (dev->dev.of_node->child) eeh_add_device_tree_early(dev->dev.of_node); #endif /* CONFIG_EEH */ /* Scan the bus */ pcibios_scan_phb(phb); if (phb->bus == NULL) return -ENXIO; /* Claim resources. This might need some rework as well depending * wether we are doing probe-only or not, like assigning unassigned * resources etc... */ pcibios_claim_one_bus(phb->bus); /* Finish EEH setup */ #ifdef CONFIG_EEH eeh_add_device_tree_late(phb->bus); #endif /* Add probed PCI devices to the device model */ pci_bus_add_devices(phb->bus); return 0; } static struct of_device_id of_pci_phb_ids[] = { { .type = "pci", }, { .type = "pcix", }, { .type = "pcie", }, { .type = "pciex", }, { .type = "ht", }, {} }; static struct platform_driver of_pci_phb_driver = { .probe = of_pci_phb_probe, .driver = { .name = "of-pci", .owner = THIS_MODULE, .of_match_table = of_pci_phb_ids, }, }; static __init int of_pci_phb_init(void) { return platform_driver_register(&of_pci_phb_driver); } device_initcall(of_pci_phb_init); #endif /* CONFIG_PPC_OF_PLATFORM_PCI */
gpl-2.0
laufersteppenwolf/android_kernel_lge_d680
arch/arm/mach-omap1/dma.c
4846
8656
/* * OMAP1/OMAP7xx - specific DMA driver * * Copyright (C) 2003 - 2008 Nokia Corporation * Author: Juha Yrjölä <juha.yrjola@nokia.com> * DMA channel linking for 1610 by Samuel Ortiz <samuel.ortiz@nokia.com> * Graphics DMA and LCD DMA graphics tranformations * by Imre Deak <imre.deak@nokia.com> * OMAP2/3 support Copyright (C) 2004-2007 Texas Instruments, Inc. * Some functions based on earlier dma-omap.c Copyright (C) 2001 RidgeRun, Inc. * * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com/ * Converted DMA library into platform driver * - G, Manjunath Kondaiah <manjugk@ti.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/err.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/init.h> #include <linux/device.h> #include <linux/io.h> #include <plat/dma.h> #include <plat/tc.h> #include <plat/irqs.h> #define OMAP1_DMA_BASE (0xfffed800) #define OMAP1_LOGICAL_DMA_CH_COUNT 17 #define OMAP1_DMA_STRIDE 0x40 static u32 errata; static u32 enable_1510_mode; static u8 dma_stride; static enum omap_reg_offsets dma_common_ch_start, dma_common_ch_end; static u16 reg_map[] = { [GCR] = 0x400, [GSCR] = 0x404, [GRST1] = 0x408, [HW_ID] = 0x442, [PCH2_ID] = 0x444, [PCH0_ID] = 0x446, [PCH1_ID] = 0x448, [PCHG_ID] = 0x44a, [PCHD_ID] = 0x44c, [CAPS_0] = 0x44e, [CAPS_1] = 0x452, [CAPS_2] = 0x456, [CAPS_3] = 0x458, [CAPS_4] = 0x45a, [PCH2_SR] = 0x460, [PCH0_SR] = 0x480, [PCH1_SR] = 0x482, [PCHD_SR] = 0x4c0, /* Common Registers */ [CSDP] = 0x00, [CCR] = 0x02, [CICR] = 0x04, [CSR] = 0x06, [CEN] = 0x10, [CFN] = 0x12, [CSFI] = 0x14, [CSEI] = 0x16, [CPC] = 0x18, /* 15xx only */ [CSAC] = 0x18, [CDAC] = 0x1a, [CDEI] = 0x1c, [CDFI] = 0x1e, [CLNK_CTRL] = 0x28, /* Channel specific register offsets */ [CSSA] = 0x08, [CDSA] = 0x0c, [COLOR] = 0x20, [CCR2] = 0x24, [LCH_CTRL] = 0x2a, }; static struct resource res[] __initdata = { [0] = { .start = OMAP1_DMA_BASE, .end = OMAP1_DMA_BASE + SZ_2K - 1, .flags = IORESOURCE_MEM, }, [1] = { .name = "0", .start = INT_DMA_CH0_6, .flags = IORESOURCE_IRQ, }, [2] = { .name = "1", .start = INT_DMA_CH1_7, .flags = IORESOURCE_IRQ, }, [3] = { .name = "2", .start = INT_DMA_CH2_8, .flags = IORESOURCE_IRQ, }, [4] = { .name = "3", .start = INT_DMA_CH3, .flags = IORESOURCE_IRQ, }, [5] = { .name = "4", .start = INT_DMA_CH4, .flags = IORESOURCE_IRQ, }, [6] = { .name = "5", .start = INT_DMA_CH5, .flags = IORESOURCE_IRQ, }, /* Handled in lcd_dma.c */ [7] = { .name = "6", .start = INT_1610_DMA_CH6, .flags = IORESOURCE_IRQ, }, /* irq's for omap16xx and omap7xx */ [8] = { .name = "7", .start = INT_1610_DMA_CH7, .flags = IORESOURCE_IRQ, }, [9] = { .name = "8", .start = INT_1610_DMA_CH8, .flags = IORESOURCE_IRQ, }, [10] = { .name = "9", .start = INT_1610_DMA_CH9, .flags = IORESOURCE_IRQ, }, [11] = { .name = "10", .start = INT_1610_DMA_CH10, .flags = IORESOURCE_IRQ, }, [12] = { .name = "11", .start = INT_1610_DMA_CH11, .flags = IORESOURCE_IRQ, }, [13] = { .name = "12", .start = INT_1610_DMA_CH12, .flags = IORESOURCE_IRQ, }, [14] = { .name = "13", .start = INT_1610_DMA_CH13, .flags = IORESOURCE_IRQ, }, [15] = { .name = "14", .start = INT_1610_DMA_CH14, .flags = IORESOURCE_IRQ, }, [16] = { .name = "15", .start = INT_1610_DMA_CH15, .flags = IORESOURCE_IRQ, }, [17] = { .name = "16", .start = INT_DMA_LCD, .flags = IORESOURCE_IRQ, }, }; static void __iomem *dma_base; static inline void dma_write(u32 val, int reg, int lch) { u8 stride; u32 offset; stride = (reg >= dma_common_ch_start) ? dma_stride : 0; offset = reg_map[reg] + (stride * lch); __raw_writew(val, dma_base + offset); if ((reg > CLNK_CTRL && reg < CCEN) || (reg > PCHD_ID && reg < CAPS_2)) { u32 offset2 = reg_map[reg] + 2 + (stride * lch); __raw_writew(val >> 16, dma_base + offset2); } } static inline u32 dma_read(int reg, int lch) { u8 stride; u32 offset, val; stride = (reg >= dma_common_ch_start) ? dma_stride : 0; offset = reg_map[reg] + (stride * lch); val = __raw_readw(dma_base + offset); if ((reg > CLNK_CTRL && reg < CCEN) || (reg > PCHD_ID && reg < CAPS_2)) { u16 upper; u32 offset2 = reg_map[reg] + 2 + (stride * lch); upper = __raw_readw(dma_base + offset2); val |= (upper << 16); } return val; } static void omap1_clear_lch_regs(int lch) { int i = dma_common_ch_start; for (; i <= dma_common_ch_end; i += 1) dma_write(0, i, lch); } static void omap1_clear_dma(int lch) { u32 l; l = dma_read(CCR, lch); l &= ~OMAP_DMA_CCR_EN; dma_write(l, CCR, lch); /* Clear pending interrupts */ l = dma_read(CSR, lch); } static void omap1_show_dma_caps(void) { if (enable_1510_mode) { printk(KERN_INFO "DMA support for OMAP15xx initialized\n"); } else { u16 w; printk(KERN_INFO "OMAP DMA hardware version %d\n", dma_read(HW_ID, 0)); printk(KERN_INFO "DMA capabilities: %08x:%08x:%04x:%04x:%04x\n", dma_read(CAPS_0, 0), dma_read(CAPS_1, 0), dma_read(CAPS_2, 0), dma_read(CAPS_3, 0), dma_read(CAPS_4, 0)); /* Disable OMAP 3.0/3.1 compatibility mode. */ w = dma_read(GSCR, 0); w |= 1 << 3; dma_write(w, GSCR, 0); } return; } static u32 configure_dma_errata(void) { /* * Erratum 3.2/3.3: sometimes 0 is returned if CSAC/CDAC is * read before the DMA controller finished disabling the channel. */ if (!cpu_is_omap15xx()) SET_DMA_ERRATA(DMA_ERRATA_3_3); return errata; } static int __init omap1_system_dma_init(void) { struct omap_system_dma_plat_info *p; struct omap_dma_dev_attr *d; struct platform_device *pdev; int ret; pdev = platform_device_alloc("omap_dma_system", 0); if (!pdev) { pr_err("%s: Unable to device alloc for dma\n", __func__); return -ENOMEM; } dma_base = ioremap(res[0].start, resource_size(&res[0])); if (!dma_base) { pr_err("%s: Unable to ioremap\n", __func__); ret = -ENODEV; goto exit_device_put; } ret = platform_device_add_resources(pdev, res, ARRAY_SIZE(res)); if (ret) { dev_err(&pdev->dev, "%s: Unable to add resources for %s%d\n", __func__, pdev->name, pdev->id); goto exit_device_put; } p = kzalloc(sizeof(struct omap_system_dma_plat_info), GFP_KERNEL); if (!p) { dev_err(&pdev->dev, "%s: Unable to allocate 'p' for %s\n", __func__, pdev->name); ret = -ENOMEM; goto exit_device_del; } d = kzalloc(sizeof(struct omap_dma_dev_attr), GFP_KERNEL); if (!d) { dev_err(&pdev->dev, "%s: Unable to allocate 'd' for %s\n", __func__, pdev->name); ret = -ENOMEM; goto exit_release_p; } d->lch_count = OMAP1_LOGICAL_DMA_CH_COUNT; /* Valid attributes for omap1 plus processors */ if (cpu_is_omap15xx()) d->dev_caps = ENABLE_1510_MODE; enable_1510_mode = d->dev_caps & ENABLE_1510_MODE; d->dev_caps |= SRC_PORT; d->dev_caps |= DST_PORT; d->dev_caps |= SRC_INDEX; d->dev_caps |= DST_INDEX; d->dev_caps |= IS_BURST_ONLY4; d->dev_caps |= CLEAR_CSR_ON_READ; d->dev_caps |= IS_WORD_16; d->chan = kzalloc(sizeof(struct omap_dma_lch) * (d->lch_count), GFP_KERNEL); if (!d->chan) { dev_err(&pdev->dev, "%s: Memory allocation failed" "for d->chan!!!\n", __func__); goto exit_release_d; } if (cpu_is_omap15xx()) d->chan_count = 9; else if (cpu_is_omap16xx() || cpu_is_omap7xx()) { if (!(d->dev_caps & ENABLE_1510_MODE)) d->chan_count = 16; else d->chan_count = 9; } p->dma_attr = d; p->show_dma_caps = omap1_show_dma_caps; p->clear_lch_regs = omap1_clear_lch_regs; p->clear_dma = omap1_clear_dma; p->dma_write = dma_write; p->dma_read = dma_read; p->disable_irq_lch = NULL; p->errata = configure_dma_errata(); ret = platform_device_add_data(pdev, p, sizeof(*p)); if (ret) { dev_err(&pdev->dev, "%s: Unable to add resources for %s%d\n", __func__, pdev->name, pdev->id); goto exit_release_chan; } ret = platform_device_add(pdev); if (ret) { dev_err(&pdev->dev, "%s: Unable to add resources for %s%d\n", __func__, pdev->name, pdev->id); goto exit_release_chan; } dma_stride = OMAP1_DMA_STRIDE; dma_common_ch_start = CPC; dma_common_ch_end = COLOR; return ret; exit_release_chan: kfree(d->chan); exit_release_d: kfree(d); exit_release_p: kfree(p); exit_device_del: platform_device_del(pdev); exit_device_put: platform_device_put(pdev); return ret; } arch_initcall(omap1_system_dma_init);
gpl-2.0
MCP1/android_kernel_motorola_msm8960dt-common
drivers/mfd/da9052-core.c
4846
15426
/* * Device access for Dialog DA9052 PMICs. * * Copyright(c) 2011 Dialog Semiconductor Ltd. * * Author: David Dajun Chen <dchen@diasemi.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/device.h> #include <linux/delay.h> #include <linux/input.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/mfd/core.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/mfd/da9052/da9052.h> #include <linux/mfd/da9052/pdata.h> #include <linux/mfd/da9052/reg.h> #define DA9052_NUM_IRQ_REGS 4 #define DA9052_IRQ_MASK_POS_1 0x01 #define DA9052_IRQ_MASK_POS_2 0x02 #define DA9052_IRQ_MASK_POS_3 0x04 #define DA9052_IRQ_MASK_POS_4 0x08 #define DA9052_IRQ_MASK_POS_5 0x10 #define DA9052_IRQ_MASK_POS_6 0x20 #define DA9052_IRQ_MASK_POS_7 0x40 #define DA9052_IRQ_MASK_POS_8 0x80 static bool da9052_reg_readable(struct device *dev, unsigned int reg) { switch (reg) { case DA9052_PAGE0_CON_REG: case DA9052_STATUS_A_REG: case DA9052_STATUS_B_REG: case DA9052_STATUS_C_REG: case DA9052_STATUS_D_REG: case DA9052_EVENT_A_REG: case DA9052_EVENT_B_REG: case DA9052_EVENT_C_REG: case DA9052_EVENT_D_REG: case DA9052_FAULTLOG_REG: case DA9052_IRQ_MASK_A_REG: case DA9052_IRQ_MASK_B_REG: case DA9052_IRQ_MASK_C_REG: case DA9052_IRQ_MASK_D_REG: case DA9052_CONTROL_A_REG: case DA9052_CONTROL_B_REG: case DA9052_CONTROL_C_REG: case DA9052_CONTROL_D_REG: case DA9052_PDDIS_REG: case DA9052_INTERFACE_REG: case DA9052_RESET_REG: case DA9052_GPIO_0_1_REG: case DA9052_GPIO_2_3_REG: case DA9052_GPIO_4_5_REG: case DA9052_GPIO_6_7_REG: case DA9052_GPIO_14_15_REG: case DA9052_ID_0_1_REG: case DA9052_ID_2_3_REG: case DA9052_ID_4_5_REG: case DA9052_ID_6_7_REG: case DA9052_ID_8_9_REG: case DA9052_ID_10_11_REG: case DA9052_ID_12_13_REG: case DA9052_ID_14_15_REG: case DA9052_ID_16_17_REG: case DA9052_ID_18_19_REG: case DA9052_ID_20_21_REG: case DA9052_SEQ_STATUS_REG: case DA9052_SEQ_A_REG: case DA9052_SEQ_B_REG: case DA9052_SEQ_TIMER_REG: case DA9052_BUCKA_REG: case DA9052_BUCKB_REG: case DA9052_BUCKCORE_REG: case DA9052_BUCKPRO_REG: case DA9052_BUCKMEM_REG: case DA9052_BUCKPERI_REG: case DA9052_LDO1_REG: case DA9052_LDO2_REG: case DA9052_LDO3_REG: case DA9052_LDO4_REG: case DA9052_LDO5_REG: case DA9052_LDO6_REG: case DA9052_LDO7_REG: case DA9052_LDO8_REG: case DA9052_LDO9_REG: case DA9052_LDO10_REG: case DA9052_SUPPLY_REG: case DA9052_PULLDOWN_REG: case DA9052_CHGBUCK_REG: case DA9052_WAITCONT_REG: case DA9052_ISET_REG: case DA9052_BATCHG_REG: case DA9052_CHG_CONT_REG: case DA9052_INPUT_CONT_REG: case DA9052_CHG_TIME_REG: case DA9052_BBAT_CONT_REG: case DA9052_BOOST_REG: case DA9052_LED_CONT_REG: case DA9052_LEDMIN123_REG: case DA9052_LED1_CONF_REG: case DA9052_LED2_CONF_REG: case DA9052_LED3_CONF_REG: case DA9052_LED1CONT_REG: case DA9052_LED2CONT_REG: case DA9052_LED3CONT_REG: case DA9052_LED_CONT_4_REG: case DA9052_LED_CONT_5_REG: case DA9052_ADC_MAN_REG: case DA9052_ADC_CONT_REG: case DA9052_ADC_RES_L_REG: case DA9052_ADC_RES_H_REG: case DA9052_VDD_RES_REG: case DA9052_VDD_MON_REG: case DA9052_ICHG_AV_REG: case DA9052_ICHG_THD_REG: case DA9052_ICHG_END_REG: case DA9052_TBAT_RES_REG: case DA9052_TBAT_HIGHP_REG: case DA9052_TBAT_HIGHN_REG: case DA9052_TBAT_LOW_REG: case DA9052_T_OFFSET_REG: case DA9052_ADCIN4_RES_REG: case DA9052_AUTO4_HIGH_REG: case DA9052_AUTO4_LOW_REG: case DA9052_ADCIN5_RES_REG: case DA9052_AUTO5_HIGH_REG: case DA9052_AUTO5_LOW_REG: case DA9052_ADCIN6_RES_REG: case DA9052_AUTO6_HIGH_REG: case DA9052_AUTO6_LOW_REG: case DA9052_TJUNC_RES_REG: case DA9052_TSI_CONT_A_REG: case DA9052_TSI_CONT_B_REG: case DA9052_TSI_X_MSB_REG: case DA9052_TSI_Y_MSB_REG: case DA9052_TSI_LSB_REG: case DA9052_TSI_Z_MSB_REG: case DA9052_COUNT_S_REG: case DA9052_COUNT_MI_REG: case DA9052_COUNT_H_REG: case DA9052_COUNT_D_REG: case DA9052_COUNT_MO_REG: case DA9052_COUNT_Y_REG: case DA9052_ALARM_MI_REG: case DA9052_ALARM_H_REG: case DA9052_ALARM_D_REG: case DA9052_ALARM_MO_REG: case DA9052_ALARM_Y_REG: case DA9052_SECOND_A_REG: case DA9052_SECOND_B_REG: case DA9052_SECOND_C_REG: case DA9052_SECOND_D_REG: case DA9052_PAGE1_CON_REG: return true; default: return false; } } static bool da9052_reg_writeable(struct device *dev, unsigned int reg) { switch (reg) { case DA9052_PAGE0_CON_REG: case DA9052_EVENT_A_REG: case DA9052_EVENT_B_REG: case DA9052_EVENT_C_REG: case DA9052_EVENT_D_REG: case DA9052_IRQ_MASK_A_REG: case DA9052_IRQ_MASK_B_REG: case DA9052_IRQ_MASK_C_REG: case DA9052_IRQ_MASK_D_REG: case DA9052_CONTROL_A_REG: case DA9052_CONTROL_B_REG: case DA9052_CONTROL_C_REG: case DA9052_CONTROL_D_REG: case DA9052_PDDIS_REG: case DA9052_RESET_REG: case DA9052_GPIO_0_1_REG: case DA9052_GPIO_2_3_REG: case DA9052_GPIO_4_5_REG: case DA9052_GPIO_6_7_REG: case DA9052_GPIO_14_15_REG: case DA9052_ID_0_1_REG: case DA9052_ID_2_3_REG: case DA9052_ID_4_5_REG: case DA9052_ID_6_7_REG: case DA9052_ID_8_9_REG: case DA9052_ID_10_11_REG: case DA9052_ID_12_13_REG: case DA9052_ID_14_15_REG: case DA9052_ID_16_17_REG: case DA9052_ID_18_19_REG: case DA9052_ID_20_21_REG: case DA9052_SEQ_STATUS_REG: case DA9052_SEQ_A_REG: case DA9052_SEQ_B_REG: case DA9052_SEQ_TIMER_REG: case DA9052_BUCKA_REG: case DA9052_BUCKB_REG: case DA9052_BUCKCORE_REG: case DA9052_BUCKPRO_REG: case DA9052_BUCKMEM_REG: case DA9052_BUCKPERI_REG: case DA9052_LDO1_REG: case DA9052_LDO2_REG: case DA9052_LDO3_REG: case DA9052_LDO4_REG: case DA9052_LDO5_REG: case DA9052_LDO6_REG: case DA9052_LDO7_REG: case DA9052_LDO8_REG: case DA9052_LDO9_REG: case DA9052_LDO10_REG: case DA9052_SUPPLY_REG: case DA9052_PULLDOWN_REG: case DA9052_CHGBUCK_REG: case DA9052_WAITCONT_REG: case DA9052_ISET_REG: case DA9052_BATCHG_REG: case DA9052_CHG_CONT_REG: case DA9052_INPUT_CONT_REG: case DA9052_BBAT_CONT_REG: case DA9052_BOOST_REG: case DA9052_LED_CONT_REG: case DA9052_LEDMIN123_REG: case DA9052_LED1_CONF_REG: case DA9052_LED2_CONF_REG: case DA9052_LED3_CONF_REG: case DA9052_LED1CONT_REG: case DA9052_LED2CONT_REG: case DA9052_LED3CONT_REG: case DA9052_LED_CONT_4_REG: case DA9052_LED_CONT_5_REG: case DA9052_ADC_MAN_REG: case DA9052_ADC_CONT_REG: case DA9052_ADC_RES_L_REG: case DA9052_ADC_RES_H_REG: case DA9052_VDD_RES_REG: case DA9052_VDD_MON_REG: case DA9052_ICHG_THD_REG: case DA9052_ICHG_END_REG: case DA9052_TBAT_HIGHP_REG: case DA9052_TBAT_HIGHN_REG: case DA9052_TBAT_LOW_REG: case DA9052_T_OFFSET_REG: case DA9052_AUTO4_HIGH_REG: case DA9052_AUTO4_LOW_REG: case DA9052_AUTO5_HIGH_REG: case DA9052_AUTO5_LOW_REG: case DA9052_AUTO6_HIGH_REG: case DA9052_AUTO6_LOW_REG: case DA9052_TSI_CONT_A_REG: case DA9052_TSI_CONT_B_REG: case DA9052_COUNT_S_REG: case DA9052_COUNT_MI_REG: case DA9052_COUNT_H_REG: case DA9052_COUNT_D_REG: case DA9052_COUNT_MO_REG: case DA9052_COUNT_Y_REG: case DA9052_ALARM_MI_REG: case DA9052_ALARM_H_REG: case DA9052_ALARM_D_REG: case DA9052_ALARM_MO_REG: case DA9052_ALARM_Y_REG: case DA9052_PAGE1_CON_REG: return true; default: return false; } } static bool da9052_reg_volatile(struct device *dev, unsigned int reg) { switch (reg) { case DA9052_STATUS_A_REG: case DA9052_STATUS_B_REG: case DA9052_STATUS_C_REG: case DA9052_STATUS_D_REG: case DA9052_EVENT_A_REG: case DA9052_EVENT_B_REG: case DA9052_EVENT_C_REG: case DA9052_EVENT_D_REG: case DA9052_FAULTLOG_REG: case DA9052_CHG_TIME_REG: case DA9052_ADC_RES_L_REG: case DA9052_ADC_RES_H_REG: case DA9052_VDD_RES_REG: case DA9052_ICHG_AV_REG: case DA9052_TBAT_RES_REG: case DA9052_ADCIN4_RES_REG: case DA9052_ADCIN5_RES_REG: case DA9052_ADCIN6_RES_REG: case DA9052_TJUNC_RES_REG: case DA9052_TSI_X_MSB_REG: case DA9052_TSI_Y_MSB_REG: case DA9052_TSI_LSB_REG: case DA9052_TSI_Z_MSB_REG: case DA9052_COUNT_S_REG: case DA9052_COUNT_MI_REG: case DA9052_COUNT_H_REG: case DA9052_COUNT_D_REG: case DA9052_COUNT_MO_REG: case DA9052_COUNT_Y_REG: case DA9052_ALARM_MI_REG: return true; default: return false; } } static struct resource da9052_rtc_resource = { .name = "ALM", .start = DA9052_IRQ_ALARM, .end = DA9052_IRQ_ALARM, .flags = IORESOURCE_IRQ, }; static struct resource da9052_onkey_resource = { .name = "ONKEY", .start = DA9052_IRQ_NONKEY, .end = DA9052_IRQ_NONKEY, .flags = IORESOURCE_IRQ, }; static struct resource da9052_bat_resources[] = { { .name = "BATT TEMP", .start = DA9052_IRQ_TBAT, .end = DA9052_IRQ_TBAT, .flags = IORESOURCE_IRQ, }, { .name = "DCIN DET", .start = DA9052_IRQ_DCIN, .end = DA9052_IRQ_DCIN, .flags = IORESOURCE_IRQ, }, { .name = "DCIN REM", .start = DA9052_IRQ_DCINREM, .end = DA9052_IRQ_DCINREM, .flags = IORESOURCE_IRQ, }, { .name = "VBUS DET", .start = DA9052_IRQ_VBUS, .end = DA9052_IRQ_VBUS, .flags = IORESOURCE_IRQ, }, { .name = "VBUS REM", .start = DA9052_IRQ_VBUSREM, .end = DA9052_IRQ_VBUSREM, .flags = IORESOURCE_IRQ, }, { .name = "CHG END", .start = DA9052_IRQ_CHGEND, .end = DA9052_IRQ_CHGEND, .flags = IORESOURCE_IRQ, }, }; static struct resource da9052_tsi_resources[] = { { .name = "PENDWN", .start = DA9052_IRQ_PENDOWN, .end = DA9052_IRQ_PENDOWN, .flags = IORESOURCE_IRQ, }, { .name = "TSIRDY", .start = DA9052_IRQ_TSIREADY, .end = DA9052_IRQ_TSIREADY, .flags = IORESOURCE_IRQ, }, }; static struct mfd_cell __devinitdata da9052_subdev_info[] = { { .name = "da9052-regulator", .id = 1, }, { .name = "da9052-regulator", .id = 2, }, { .name = "da9052-regulator", .id = 3, }, { .name = "da9052-regulator", .id = 4, }, { .name = "da9052-regulator", .id = 5, }, { .name = "da9052-regulator", .id = 6, }, { .name = "da9052-regulator", .id = 7, }, { .name = "da9052-regulator", .id = 8, }, { .name = "da9052-regulator", .id = 9, }, { .name = "da9052-regulator", .id = 10, }, { .name = "da9052-regulator", .id = 11, }, { .name = "da9052-regulator", .id = 12, }, { .name = "da9052-regulator", .id = 13, }, { .name = "da9052-regulator", .id = 14, }, { .name = "da9052-onkey", .resources = &da9052_onkey_resource, .num_resources = 1, }, { .name = "da9052-rtc", .resources = &da9052_rtc_resource, .num_resources = 1, }, { .name = "da9052-gpio", }, { .name = "da9052-hwmon", }, { .name = "da9052-leds", }, { .name = "da9052-wled1", }, { .name = "da9052-wled2", }, { .name = "da9052-wled3", }, { .name = "da9052-tsi", .resources = da9052_tsi_resources, .num_resources = ARRAY_SIZE(da9052_tsi_resources), }, { .name = "da9052-bat", .resources = da9052_bat_resources, .num_resources = ARRAY_SIZE(da9052_bat_resources), }, { .name = "da9052-watchdog", }, }; static struct regmap_irq da9052_irqs[] = { [DA9052_IRQ_DCIN] = { .reg_offset = 0, .mask = DA9052_IRQ_MASK_POS_1, }, [DA9052_IRQ_VBUS] = { .reg_offset = 0, .mask = DA9052_IRQ_MASK_POS_2, }, [DA9052_IRQ_DCINREM] = { .reg_offset = 0, .mask = DA9052_IRQ_MASK_POS_3, }, [DA9052_IRQ_VBUSREM] = { .reg_offset = 0, .mask = DA9052_IRQ_MASK_POS_4, }, [DA9052_IRQ_VDDLOW] = { .reg_offset = 0, .mask = DA9052_IRQ_MASK_POS_5, }, [DA9052_IRQ_ALARM] = { .reg_offset = 0, .mask = DA9052_IRQ_MASK_POS_6, }, [DA9052_IRQ_SEQRDY] = { .reg_offset = 0, .mask = DA9052_IRQ_MASK_POS_7, }, [DA9052_IRQ_COMP1V2] = { .reg_offset = 0, .mask = DA9052_IRQ_MASK_POS_8, }, [DA9052_IRQ_NONKEY] = { .reg_offset = 1, .mask = DA9052_IRQ_MASK_POS_1, }, [DA9052_IRQ_IDFLOAT] = { .reg_offset = 1, .mask = DA9052_IRQ_MASK_POS_2, }, [DA9052_IRQ_IDGND] = { .reg_offset = 1, .mask = DA9052_IRQ_MASK_POS_3, }, [DA9052_IRQ_CHGEND] = { .reg_offset = 1, .mask = DA9052_IRQ_MASK_POS_4, }, [DA9052_IRQ_TBAT] = { .reg_offset = 1, .mask = DA9052_IRQ_MASK_POS_5, }, [DA9052_IRQ_ADC_EOM] = { .reg_offset = 1, .mask = DA9052_IRQ_MASK_POS_6, }, [DA9052_IRQ_PENDOWN] = { .reg_offset = 1, .mask = DA9052_IRQ_MASK_POS_7, }, [DA9052_IRQ_TSIREADY] = { .reg_offset = 1, .mask = DA9052_IRQ_MASK_POS_8, }, [DA9052_IRQ_GPI0] = { .reg_offset = 2, .mask = DA9052_IRQ_MASK_POS_1, }, [DA9052_IRQ_GPI1] = { .reg_offset = 2, .mask = DA9052_IRQ_MASK_POS_2, }, [DA9052_IRQ_GPI2] = { .reg_offset = 2, .mask = DA9052_IRQ_MASK_POS_3, }, [DA9052_IRQ_GPI3] = { .reg_offset = 2, .mask = DA9052_IRQ_MASK_POS_4, }, [DA9052_IRQ_GPI4] = { .reg_offset = 2, .mask = DA9052_IRQ_MASK_POS_5, }, [DA9052_IRQ_GPI5] = { .reg_offset = 2, .mask = DA9052_IRQ_MASK_POS_6, }, [DA9052_IRQ_GPI6] = { .reg_offset = 2, .mask = DA9052_IRQ_MASK_POS_7, }, [DA9052_IRQ_GPI7] = { .reg_offset = 2, .mask = DA9052_IRQ_MASK_POS_8, }, [DA9052_IRQ_GPI8] = { .reg_offset = 3, .mask = DA9052_IRQ_MASK_POS_1, }, [DA9052_IRQ_GPI9] = { .reg_offset = 3, .mask = DA9052_IRQ_MASK_POS_2, }, [DA9052_IRQ_GPI10] = { .reg_offset = 3, .mask = DA9052_IRQ_MASK_POS_3, }, [DA9052_IRQ_GPI11] = { .reg_offset = 3, .mask = DA9052_IRQ_MASK_POS_4, }, [DA9052_IRQ_GPI12] = { .reg_offset = 3, .mask = DA9052_IRQ_MASK_POS_5, }, [DA9052_IRQ_GPI13] = { .reg_offset = 3, .mask = DA9052_IRQ_MASK_POS_6, }, [DA9052_IRQ_GPI14] = { .reg_offset = 3, .mask = DA9052_IRQ_MASK_POS_7, }, [DA9052_IRQ_GPI15] = { .reg_offset = 3, .mask = DA9052_IRQ_MASK_POS_8, }, }; static struct regmap_irq_chip da9052_regmap_irq_chip = { .name = "da9052_irq", .status_base = DA9052_EVENT_A_REG, .mask_base = DA9052_IRQ_MASK_A_REG, .ack_base = DA9052_EVENT_A_REG, .num_regs = DA9052_NUM_IRQ_REGS, .irqs = da9052_irqs, .num_irqs = ARRAY_SIZE(da9052_irqs), }; struct regmap_config da9052_regmap_config = { .reg_bits = 8, .val_bits = 8, .cache_type = REGCACHE_RBTREE, .max_register = DA9052_PAGE1_CON_REG, .readable_reg = da9052_reg_readable, .writeable_reg = da9052_reg_writeable, .volatile_reg = da9052_reg_volatile, }; EXPORT_SYMBOL_GPL(da9052_regmap_config); int __devinit da9052_device_init(struct da9052 *da9052, u8 chip_id) { struct da9052_pdata *pdata = da9052->dev->platform_data; struct irq_desc *desc; int ret; if (pdata && pdata->init != NULL) pdata->init(da9052); da9052->chip_id = chip_id; if (!pdata || !pdata->irq_base) da9052->irq_base = -1; else da9052->irq_base = pdata->irq_base; ret = regmap_add_irq_chip(da9052->regmap, da9052->chip_irq, IRQF_TRIGGER_LOW | IRQF_ONESHOT, da9052->irq_base, &da9052_regmap_irq_chip, NULL); if (ret < 0) goto regmap_err; desc = irq_to_desc(da9052->chip_irq); da9052->irq_base = regmap_irq_chip_get_base(desc->action->dev_id); ret = mfd_add_devices(da9052->dev, -1, da9052_subdev_info, ARRAY_SIZE(da9052_subdev_info), NULL, 0); if (ret) goto err; return 0; err: mfd_remove_devices(da9052->dev); regmap_err: return ret; } void da9052_device_exit(struct da9052 *da9052) { regmap_del_irq_chip(da9052->chip_irq, irq_get_irq_data(da9052->irq_base)->chip_data); mfd_remove_devices(da9052->dev); } MODULE_AUTHOR("David Dajun Chen <dchen@diasemi.com>"); MODULE_DESCRIPTION("DA9052 MFD Core"); MODULE_LICENSE("GPL");
gpl-2.0
find7a/android_kernel_oppo_msm8974
arch/s390/kernel/module.c
5102
12744
/* * arch/s390/kernel/module.c - Kernel module help for s390. * * S390 version * Copyright (C) 2002, 2003 IBM Deutschland Entwicklung GmbH, * IBM Corporation * Author(s): Arnd Bergmann (arndb@de.ibm.com) * Martin Schwidefsky (schwidefsky@de.ibm.com) * * based on i386 version * Copyright (C) 2001 Rusty Russell. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/module.h> #include <linux/elf.h> #include <linux/vmalloc.h> #include <linux/fs.h> #include <linux/string.h> #include <linux/kernel.h> #include <linux/moduleloader.h> #include <linux/bug.h> #if 0 #define DEBUGP printk #else #define DEBUGP(fmt , ...) #endif #ifndef CONFIG_64BIT #define PLT_ENTRY_SIZE 12 #else /* CONFIG_64BIT */ #define PLT_ENTRY_SIZE 20 #endif /* CONFIG_64BIT */ /* Free memory returned from module_alloc */ void module_free(struct module *mod, void *module_region) { if (mod) { vfree(mod->arch.syminfo); mod->arch.syminfo = NULL; } vfree(module_region); } static void check_rela(Elf_Rela *rela, struct module *me) { struct mod_arch_syminfo *info; info = me->arch.syminfo + ELF_R_SYM (rela->r_info); switch (ELF_R_TYPE (rela->r_info)) { case R_390_GOT12: /* 12 bit GOT offset. */ case R_390_GOT16: /* 16 bit GOT offset. */ case R_390_GOT20: /* 20 bit GOT offset. */ case R_390_GOT32: /* 32 bit GOT offset. */ case R_390_GOT64: /* 64 bit GOT offset. */ case R_390_GOTENT: /* 32 bit PC rel. to GOT entry shifted by 1. */ case R_390_GOTPLT12: /* 12 bit offset to jump slot. */ case R_390_GOTPLT16: /* 16 bit offset to jump slot. */ case R_390_GOTPLT20: /* 20 bit offset to jump slot. */ case R_390_GOTPLT32: /* 32 bit offset to jump slot. */ case R_390_GOTPLT64: /* 64 bit offset to jump slot. */ case R_390_GOTPLTENT: /* 32 bit rel. offset to jump slot >> 1. */ if (info->got_offset == -1UL) { info->got_offset = me->arch.got_size; me->arch.got_size += sizeof(void*); } break; case R_390_PLT16DBL: /* 16 bit PC rel. PLT shifted by 1. */ case R_390_PLT32DBL: /* 32 bit PC rel. PLT shifted by 1. */ case R_390_PLT32: /* 32 bit PC relative PLT address. */ case R_390_PLT64: /* 64 bit PC relative PLT address. */ case R_390_PLTOFF16: /* 16 bit offset from GOT to PLT. */ case R_390_PLTOFF32: /* 32 bit offset from GOT to PLT. */ case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */ if (info->plt_offset == -1UL) { info->plt_offset = me->arch.plt_size; me->arch.plt_size += PLT_ENTRY_SIZE; } break; case R_390_COPY: case R_390_GLOB_DAT: case R_390_JMP_SLOT: case R_390_RELATIVE: /* Only needed if we want to support loading of modules linked with -shared. */ break; } } /* * Account for GOT and PLT relocations. We can't add sections for * got and plt but we can increase the core module size. */ int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs, char *secstrings, struct module *me) { Elf_Shdr *symtab; Elf_Sym *symbols; Elf_Rela *rela; char *strings; int nrela, i, j; /* Find symbol table and string table. */ symtab = NULL; for (i = 0; i < hdr->e_shnum; i++) switch (sechdrs[i].sh_type) { case SHT_SYMTAB: symtab = sechdrs + i; break; } if (!symtab) { printk(KERN_ERR "module %s: no symbol table\n", me->name); return -ENOEXEC; } /* Allocate one syminfo structure per symbol. */ me->arch.nsyms = symtab->sh_size / sizeof(Elf_Sym); me->arch.syminfo = vmalloc(me->arch.nsyms * sizeof(struct mod_arch_syminfo)); if (!me->arch.syminfo) return -ENOMEM; symbols = (void *) hdr + symtab->sh_offset; strings = (void *) hdr + sechdrs[symtab->sh_link].sh_offset; for (i = 0; i < me->arch.nsyms; i++) { if (symbols[i].st_shndx == SHN_UNDEF && strcmp(strings + symbols[i].st_name, "_GLOBAL_OFFSET_TABLE_") == 0) /* "Define" it as absolute. */ symbols[i].st_shndx = SHN_ABS; me->arch.syminfo[i].got_offset = -1UL; me->arch.syminfo[i].plt_offset = -1UL; me->arch.syminfo[i].got_initialized = 0; me->arch.syminfo[i].plt_initialized = 0; } /* Search for got/plt relocations. */ me->arch.got_size = me->arch.plt_size = 0; for (i = 0; i < hdr->e_shnum; i++) { if (sechdrs[i].sh_type != SHT_RELA) continue; nrela = sechdrs[i].sh_size / sizeof(Elf_Rela); rela = (void *) hdr + sechdrs[i].sh_offset; for (j = 0; j < nrela; j++) check_rela(rela + j, me); } /* Increase core size by size of got & plt and set start offsets for got and plt. */ me->core_size = ALIGN(me->core_size, 4); me->arch.got_offset = me->core_size; me->core_size += me->arch.got_size; me->arch.plt_offset = me->core_size; me->core_size += me->arch.plt_size; return 0; } static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, struct module *me) { struct mod_arch_syminfo *info; Elf_Addr loc, val; int r_type, r_sym; /* This is where to make the change */ loc = base + rela->r_offset; /* This is the symbol it is referring to. Note that all undefined symbols have been resolved. */ r_sym = ELF_R_SYM(rela->r_info); r_type = ELF_R_TYPE(rela->r_info); info = me->arch.syminfo + r_sym; val = symtab[r_sym].st_value; switch (r_type) { case R_390_8: /* Direct 8 bit. */ case R_390_12: /* Direct 12 bit. */ case R_390_16: /* Direct 16 bit. */ case R_390_20: /* Direct 20 bit. */ case R_390_32: /* Direct 32 bit. */ case R_390_64: /* Direct 64 bit. */ val += rela->r_addend; if (r_type == R_390_8) *(unsigned char *) loc = val; else if (r_type == R_390_12) *(unsigned short *) loc = (val & 0xfff) | (*(unsigned short *) loc & 0xf000); else if (r_type == R_390_16) *(unsigned short *) loc = val; else if (r_type == R_390_20) *(unsigned int *) loc = (*(unsigned int *) loc & 0xf00000ff) | (val & 0xfff) << 16 | (val & 0xff000) >> 4; else if (r_type == R_390_32) *(unsigned int *) loc = val; else if (r_type == R_390_64) *(unsigned long *) loc = val; break; case R_390_PC16: /* PC relative 16 bit. */ case R_390_PC16DBL: /* PC relative 16 bit shifted by 1. */ case R_390_PC32DBL: /* PC relative 32 bit shifted by 1. */ case R_390_PC32: /* PC relative 32 bit. */ case R_390_PC64: /* PC relative 64 bit. */ val += rela->r_addend - loc; if (r_type == R_390_PC16) *(unsigned short *) loc = val; else if (r_type == R_390_PC16DBL) *(unsigned short *) loc = val >> 1; else if (r_type == R_390_PC32DBL) *(unsigned int *) loc = val >> 1; else if (r_type == R_390_PC32) *(unsigned int *) loc = val; else if (r_type == R_390_PC64) *(unsigned long *) loc = val; break; case R_390_GOT12: /* 12 bit GOT offset. */ case R_390_GOT16: /* 16 bit GOT offset. */ case R_390_GOT20: /* 20 bit GOT offset. */ case R_390_GOT32: /* 32 bit GOT offset. */ case R_390_GOT64: /* 64 bit GOT offset. */ case R_390_GOTENT: /* 32 bit PC rel. to GOT entry shifted by 1. */ case R_390_GOTPLT12: /* 12 bit offset to jump slot. */ case R_390_GOTPLT20: /* 20 bit offset to jump slot. */ case R_390_GOTPLT16: /* 16 bit offset to jump slot. */ case R_390_GOTPLT32: /* 32 bit offset to jump slot. */ case R_390_GOTPLT64: /* 64 bit offset to jump slot. */ case R_390_GOTPLTENT: /* 32 bit rel. offset to jump slot >> 1. */ if (info->got_initialized == 0) { Elf_Addr *gotent; gotent = me->module_core + me->arch.got_offset + info->got_offset; *gotent = val; info->got_initialized = 1; } val = info->got_offset + rela->r_addend; if (r_type == R_390_GOT12 || r_type == R_390_GOTPLT12) *(unsigned short *) loc = (val & 0xfff) | (*(unsigned short *) loc & 0xf000); else if (r_type == R_390_GOT16 || r_type == R_390_GOTPLT16) *(unsigned short *) loc = val; else if (r_type == R_390_GOT20 || r_type == R_390_GOTPLT20) *(unsigned int *) loc = (*(unsigned int *) loc & 0xf00000ff) | (val & 0xfff) << 16 | (val & 0xff000) >> 4; else if (r_type == R_390_GOT32 || r_type == R_390_GOTPLT32) *(unsigned int *) loc = val; else if (r_type == R_390_GOTENT || r_type == R_390_GOTPLTENT) *(unsigned int *) loc = (val + (Elf_Addr) me->module_core - loc) >> 1; else if (r_type == R_390_GOT64 || r_type == R_390_GOTPLT64) *(unsigned long *) loc = val; break; case R_390_PLT16DBL: /* 16 bit PC rel. PLT shifted by 1. */ case R_390_PLT32DBL: /* 32 bit PC rel. PLT shifted by 1. */ case R_390_PLT32: /* 32 bit PC relative PLT address. */ case R_390_PLT64: /* 64 bit PC relative PLT address. */ case R_390_PLTOFF16: /* 16 bit offset from GOT to PLT. */ case R_390_PLTOFF32: /* 32 bit offset from GOT to PLT. */ case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */ if (info->plt_initialized == 0) { unsigned int *ip; ip = me->module_core + me->arch.plt_offset + info->plt_offset; #ifndef CONFIG_64BIT ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */ ip[1] = 0x100607f1; ip[2] = val; #else /* CONFIG_64BIT */ ip[0] = 0x0d10e310; /* basr 1,0; lg 1,10(1); br 1 */ ip[1] = 0x100a0004; ip[2] = 0x07f10000; ip[3] = (unsigned int) (val >> 32); ip[4] = (unsigned int) val; #endif /* CONFIG_64BIT */ info->plt_initialized = 1; } if (r_type == R_390_PLTOFF16 || r_type == R_390_PLTOFF32 || r_type == R_390_PLTOFF64) val = me->arch.plt_offset - me->arch.got_offset + info->plt_offset + rela->r_addend; else { if (!((r_type == R_390_PLT16DBL && val - loc + 0xffffUL < 0x1ffffeUL) || (r_type == R_390_PLT32DBL && val - loc + 0xffffffffULL < 0x1fffffffeULL))) val = (Elf_Addr) me->module_core + me->arch.plt_offset + info->plt_offset; val += rela->r_addend - loc; } if (r_type == R_390_PLT16DBL) *(unsigned short *) loc = val >> 1; else if (r_type == R_390_PLTOFF16) *(unsigned short *) loc = val; else if (r_type == R_390_PLT32DBL) *(unsigned int *) loc = val >> 1; else if (r_type == R_390_PLT32 || r_type == R_390_PLTOFF32) *(unsigned int *) loc = val; else if (r_type == R_390_PLT64 || r_type == R_390_PLTOFF64) *(unsigned long *) loc = val; break; case R_390_GOTOFF16: /* 16 bit offset to GOT. */ case R_390_GOTOFF32: /* 32 bit offset to GOT. */ case R_390_GOTOFF64: /* 64 bit offset to GOT. */ val = val + rela->r_addend - ((Elf_Addr) me->module_core + me->arch.got_offset); if (r_type == R_390_GOTOFF16) *(unsigned short *) loc = val; else if (r_type == R_390_GOTOFF32) *(unsigned int *) loc = val; else if (r_type == R_390_GOTOFF64) *(unsigned long *) loc = val; break; case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */ case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */ val = (Elf_Addr) me->module_core + me->arch.got_offset + rela->r_addend - loc; if (r_type == R_390_GOTPC) *(unsigned int *) loc = val; else if (r_type == R_390_GOTPCDBL) *(unsigned int *) loc = val >> 1; break; case R_390_COPY: case R_390_GLOB_DAT: /* Create GOT entry. */ case R_390_JMP_SLOT: /* Create PLT entry. */ case R_390_RELATIVE: /* Adjust by program base. */ /* Only needed if we want to support loading of modules linked with -shared. */ break; default: printk(KERN_ERR "module %s: Unknown relocation: %u\n", me->name, r_type); return -ENOEXEC; } return 0; } int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab, unsigned int symindex, unsigned int relsec, struct module *me) { Elf_Addr base; Elf_Sym *symtab; Elf_Rela *rela; unsigned long i, n; int rc; DEBUGP("Applying relocate section %u to %u\n", relsec, sechdrs[relsec].sh_info); base = sechdrs[sechdrs[relsec].sh_info].sh_addr; symtab = (Elf_Sym *) sechdrs[symindex].sh_addr; rela = (Elf_Rela *) sechdrs[relsec].sh_addr; n = sechdrs[relsec].sh_size / sizeof(Elf_Rela); for (i = 0; i < n; i++, rela++) { rc = apply_rela(rela, base, symtab, me); if (rc) return rc; } return 0; } int module_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, struct module *me) { vfree(me->arch.syminfo); me->arch.syminfo = NULL; return 0; }
gpl-2.0
ali-filth/android_kernel_samsung_msm8226
drivers/hid/hid-holtekff.c
5102
6427
/* * Force feedback support for Holtek On Line Grip based gamepads * * These include at least a Brazilian "Clone Joypad Super Power Fire" * which uses vendor ID 0x1241 and identifies as "HOLTEK On Line Grip". * * Copyright (c) 2011 Anssi Hannula <anssi.hannula@iki.fi> */ /* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/hid.h> #include <linux/input.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/usb.h> #include "hid-ids.h" #ifdef CONFIG_HOLTEK_FF #include "usbhid/usbhid.h" MODULE_LICENSE("GPL"); MODULE_AUTHOR("Anssi Hannula <anssi.hannula@iki.fi>"); MODULE_DESCRIPTION("Force feedback support for Holtek On Line Grip based devices"); /* * These commands and parameters are currently known: * * byte 0: command id: * 01 set effect parameters * 02 play specified effect * 03 stop specified effect * 04 stop all effects * 06 stop all effects * (the difference between 04 and 06 isn't known; win driver * sends 06,04 on application init, and 06 otherwise) * * Commands 01 and 02 need to be sent as pairs, i.e. you need to send 01 * before each 02. * * The rest of the bytes are parameters. Command 01 takes all of them, and * commands 02,03 take only the effect id. * * byte 1: * bits 0-3: effect id: * 1: very strong rumble * 2: periodic rumble, short intervals * 3: very strong rumble * 4: periodic rumble, long intervals * 5: weak periodic rumble, long intervals * 6: weak periodic rumble, short intervals * 7: periodic rumble, short intervals * 8: strong periodic rumble, short intervals * 9: very strong rumble * a: causes an error * b: very strong periodic rumble, very short intervals * c-f: nothing * bit 6: right (weak) motor enabled * bit 7: left (strong) motor enabled * * bytes 2-3: time in milliseconds, big-endian * bytes 5-6: unknown (win driver seems to use at least 10e0 with effect 1 * and 0014 with effect 6) * byte 7: * bits 0-3: effect magnitude */ #define HOLTEKFF_MSG_LENGTH 7 static const u8 start_effect_1[] = { 0x02, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 }; static const u8 stop_all4[] = { 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; static const u8 stop_all6[] = { 0x06, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; struct holtekff_device { struct hid_field *field; }; static void holtekff_send(struct holtekff_device *holtekff, struct hid_device *hid, const u8 data[HOLTEKFF_MSG_LENGTH]) { int i; for (i = 0; i < HOLTEKFF_MSG_LENGTH; i++) { holtekff->field->value[i] = data[i]; } dbg_hid("sending %02x %02x %02x %02x %02x %02x %02x\n", data[0], data[1], data[2], data[3], data[4], data[5], data[6]); usbhid_submit_report(hid, holtekff->field->report, USB_DIR_OUT); } static int holtekff_play(struct input_dev *dev, void *data, struct ff_effect *effect) { struct hid_device *hid = input_get_drvdata(dev); struct holtekff_device *holtekff = data; int left, right; /* effect type 1, length 65535 msec */ u8 buf[HOLTEKFF_MSG_LENGTH] = { 0x01, 0x01, 0xff, 0xff, 0x10, 0xe0, 0x00 }; left = effect->u.rumble.strong_magnitude; right = effect->u.rumble.weak_magnitude; dbg_hid("called with 0x%04x 0x%04x\n", left, right); if (!left && !right) { holtekff_send(holtekff, hid, stop_all6); return 0; } if (left) buf[1] |= 0x80; if (right) buf[1] |= 0x40; /* The device takes a single magnitude, so we just sum them up. */ buf[6] = min(0xf, (left >> 12) + (right >> 12)); holtekff_send(holtekff, hid, buf); holtekff_send(holtekff, hid, start_effect_1); return 0; } static int holtekff_init(struct hid_device *hid) { struct holtekff_device *holtekff; struct hid_report *report; struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list); struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list; struct input_dev *dev = hidinput->input; int error; if (list_empty(report_list)) { hid_err(hid, "no output report found\n"); return -ENODEV; } report = list_entry(report_list->next, struct hid_report, list); if (report->maxfield < 1 || report->field[0]->report_count != 7) { hid_err(hid, "unexpected output report layout\n"); return -ENODEV; } holtekff = kzalloc(sizeof(*holtekff), GFP_KERNEL); if (!holtekff) return -ENOMEM; set_bit(FF_RUMBLE, dev->ffbit); holtekff->field = report->field[0]; /* initialize the same way as win driver does */ holtekff_send(holtekff, hid, stop_all4); holtekff_send(holtekff, hid, stop_all6); error = input_ff_create_memless(dev, holtekff, holtekff_play); if (error) { kfree(holtekff); return error; } hid_info(hid, "Force feedback for Holtek On Line Grip based devices by Anssi Hannula <anssi.hannula@iki.fi>\n"); return 0; } #else static inline int holtekff_init(struct hid_device *hid) { return 0; } #endif static int holtek_probe(struct hid_device *hdev, const struct hid_device_id *id) { int ret; ret = hid_parse(hdev); if (ret) { hid_err(hdev, "parse failed\n"); goto err; } ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT & ~HID_CONNECT_FF); if (ret) { hid_err(hdev, "hw start failed\n"); goto err; } holtekff_init(hdev); return 0; err: return ret; } static const struct hid_device_id holtek_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK, USB_DEVICE_ID_HOLTEK_ON_LINE_GRIP) }, { } }; MODULE_DEVICE_TABLE(hid, holtek_devices); static struct hid_driver holtek_driver = { .name = "holtek", .id_table = holtek_devices, .probe = holtek_probe, }; static int __init holtek_init(void) { return hid_register_driver(&holtek_driver); } static void __exit holtek_exit(void) { hid_unregister_driver(&holtek_driver); } module_init(holtek_init); module_exit(holtek_exit);
gpl-2.0
Trinityhaxxor/Trinity_Kernel_msm8660_XperiaS
arch/arm/mach-s3c24xx/dma-s3c2440.c
5102
4876
/* linux/arch/arm/mach-s3c2440/dma.c * * Copyright (c) 2006 Simtec Electronics * Ben Dooks <ben@simtec.co.uk> * * S3C2440 DMA selection * * http://armlinux.simtec.co.uk/ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/device.h> #include <linux/serial_core.h> #include <mach/map.h> #include <mach/dma.h> #include <plat/dma-s3c24xx.h> #include <plat/cpu.h> #include <plat/regs-serial.h> #include <mach/regs-gpio.h> #include <plat/regs-ac97.h> #include <plat/regs-dma.h> #include <mach/regs-mem.h> #include <mach/regs-lcd.h> #include <mach/regs-sdi.h> #include <plat/regs-iis.h> #include <plat/regs-spi.h> static struct s3c24xx_dma_map __initdata s3c2440_dma_mappings[] = { [DMACH_XD0] = { .name = "xdreq0", .channels[0] = S3C2410_DCON_CH0_XDREQ0 | DMA_CH_VALID, }, [DMACH_XD1] = { .name = "xdreq1", .channels[1] = S3C2410_DCON_CH1_XDREQ1 | DMA_CH_VALID, }, [DMACH_SDI] = { .name = "sdi", .channels[0] = S3C2410_DCON_CH0_SDI | DMA_CH_VALID, .channels[1] = S3C2440_DCON_CH1_SDI | DMA_CH_VALID, .channels[2] = S3C2410_DCON_CH2_SDI | DMA_CH_VALID, .channels[3] = S3C2410_DCON_CH3_SDI | DMA_CH_VALID, }, [DMACH_SPI0] = { .name = "spi0", .channels[1] = S3C2410_DCON_CH1_SPI | DMA_CH_VALID, }, [DMACH_SPI1] = { .name = "spi1", .channels[3] = S3C2410_DCON_CH3_SPI | DMA_CH_VALID, }, [DMACH_UART0] = { .name = "uart0", .channels[0] = S3C2410_DCON_CH0_UART0 | DMA_CH_VALID, }, [DMACH_UART1] = { .name = "uart1", .channels[1] = S3C2410_DCON_CH1_UART1 | DMA_CH_VALID, }, [DMACH_UART2] = { .name = "uart2", .channels[3] = S3C2410_DCON_CH3_UART2 | DMA_CH_VALID, }, [DMACH_TIMER] = { .name = "timer", .channels[0] = S3C2410_DCON_CH0_TIMER | DMA_CH_VALID, .channels[2] = S3C2410_DCON_CH2_TIMER | DMA_CH_VALID, .channels[3] = S3C2410_DCON_CH3_TIMER | DMA_CH_VALID, }, [DMACH_I2S_IN] = { .name = "i2s-sdi", .channels[1] = S3C2410_DCON_CH1_I2SSDI | DMA_CH_VALID, .channels[2] = S3C2410_DCON_CH2_I2SSDI | DMA_CH_VALID, }, [DMACH_I2S_OUT] = { .name = "i2s-sdo", .channels[0] = S3C2440_DCON_CH0_I2SSDO | DMA_CH_VALID, .channels[2] = S3C2410_DCON_CH2_I2SSDO | DMA_CH_VALID, }, [DMACH_PCM_IN] = { .name = "pcm-in", .channels[0] = S3C2440_DCON_CH0_PCMIN | DMA_CH_VALID, .channels[2] = S3C2440_DCON_CH2_PCMIN | DMA_CH_VALID, }, [DMACH_PCM_OUT] = { .name = "pcm-out", .channels[1] = S3C2440_DCON_CH1_PCMOUT | DMA_CH_VALID, .channels[3] = S3C2440_DCON_CH3_PCMOUT | DMA_CH_VALID, }, [DMACH_MIC_IN] = { .name = "mic-in", .channels[2] = S3C2440_DCON_CH2_MICIN | DMA_CH_VALID, .channels[3] = S3C2440_DCON_CH3_MICIN | DMA_CH_VALID, }, [DMACH_USB_EP1] = { .name = "usb-ep1", .channels[0] = S3C2410_DCON_CH0_USBEP1 | DMA_CH_VALID, }, [DMACH_USB_EP2] = { .name = "usb-ep2", .channels[1] = S3C2410_DCON_CH1_USBEP2 | DMA_CH_VALID, }, [DMACH_USB_EP3] = { .name = "usb-ep3", .channels[2] = S3C2410_DCON_CH2_USBEP3 | DMA_CH_VALID, }, [DMACH_USB_EP4] = { .name = "usb-ep4", .channels[3] = S3C2410_DCON_CH3_USBEP4 | DMA_CH_VALID, }, }; static void s3c2440_dma_select(struct s3c2410_dma_chan *chan, struct s3c24xx_dma_map *map) { chan->dcon = map->channels[chan->number] & ~DMA_CH_VALID; } static struct s3c24xx_dma_selection __initdata s3c2440_dma_sel = { .select = s3c2440_dma_select, .dcon_mask = 7 << 24, .map = s3c2440_dma_mappings, .map_size = ARRAY_SIZE(s3c2440_dma_mappings), }; static struct s3c24xx_dma_order __initdata s3c2440_dma_order = { .channels = { [DMACH_SDI] = { .list = { [0] = 3 | DMA_CH_VALID, [1] = 2 | DMA_CH_VALID, [2] = 1 | DMA_CH_VALID, [3] = 0 | DMA_CH_VALID, }, }, [DMACH_I2S_IN] = { .list = { [0] = 1 | DMA_CH_VALID, [1] = 2 | DMA_CH_VALID, }, }, [DMACH_I2S_OUT] = { .list = { [0] = 2 | DMA_CH_VALID, [1] = 1 | DMA_CH_VALID, }, }, [DMACH_PCM_IN] = { .list = { [0] = 2 | DMA_CH_VALID, [1] = 1 | DMA_CH_VALID, }, }, [DMACH_PCM_OUT] = { .list = { [0] = 1 | DMA_CH_VALID, [1] = 3 | DMA_CH_VALID, }, }, [DMACH_MIC_IN] = { .list = { [0] = 3 | DMA_CH_VALID, [1] = 2 | DMA_CH_VALID, }, }, }, }; static int __init s3c2440_dma_add(struct device *dev, struct subsys_interface *sif) { s3c2410_dma_init(); s3c24xx_dma_order_set(&s3c2440_dma_order); return s3c24xx_dma_init_map(&s3c2440_dma_sel); } static struct subsys_interface s3c2440_dma_interface = { .name = "s3c2440_dma", .subsys = &s3c2440_subsys, .add_dev = s3c2440_dma_add, }; static int __init s3c2440_dma_init(void) { return subsys_interface_register(&s3c2440_dma_interface); } arch_initcall(s3c2440_dma_init);
gpl-2.0
yajnab/linux-sunxi
drivers/scsi/libsas/sas_dump.c
8174
2318
/* * Serial Attached SCSI (SAS) Dump/Debugging routines * * Copyright (C) 2005 Adaptec, Inc. All rights reserved. * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com> * * This file is licensed under GPLv2. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of the * License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * */ #include "sas_dump.h" static const char *sas_hae_str[] = { [0] = "HAE_RESET", }; static const char *sas_porte_str[] = { [0] = "PORTE_BYTES_DMAED", [1] = "PORTE_BROADCAST_RCVD", [2] = "PORTE_LINK_RESET_ERR", [3] = "PORTE_TIMER_EVENT", [4] = "PORTE_HARD_RESET", }; static const char *sas_phye_str[] = { [0] = "PHYE_LOSS_OF_SIGNAL", [1] = "PHYE_OOB_DONE", [2] = "PHYE_OOB_ERROR", [3] = "PHYE_SPINUP_HOLD", }; void sas_dprint_porte(int phyid, enum port_event pe) { SAS_DPRINTK("phy%d: port event: %s\n", phyid, sas_porte_str[pe]); } void sas_dprint_phye(int phyid, enum phy_event pe) { SAS_DPRINTK("phy%d: phy event: %s\n", phyid, sas_phye_str[pe]); } void sas_dprint_hae(struct sas_ha_struct *sas_ha, enum ha_event he) { SAS_DPRINTK("ha %s: %s event\n", dev_name(sas_ha->dev), sas_hae_str[he]); } void sas_dump_port(struct asd_sas_port *port) { SAS_DPRINTK("port%d: class:0x%x\n", port->id, port->class); SAS_DPRINTK("port%d: sas_addr:%llx\n", port->id, SAS_ADDR(port->sas_addr)); SAS_DPRINTK("port%d: attached_sas_addr:%llx\n", port->id, SAS_ADDR(port->attached_sas_addr)); SAS_DPRINTK("port%d: iproto:0x%x\n", port->id, port->iproto); SAS_DPRINTK("port%d: tproto:0x%x\n", port->id, port->tproto); SAS_DPRINTK("port%d: oob_mode:0x%x\n", port->id, port->oob_mode); SAS_DPRINTK("port%d: num_phys:%d\n", port->id, port->num_phys); }
gpl-2.0
bashrc/linux-sunxi
drivers/mfd/wm8350-regmap.c
9198
126729
/* * wm8350-regmap.c -- Wolfson Microelectronics WM8350 register map * * This file splits out the tables describing the defaults and access * status of the WM8350 registers since they are rather large. * * Copyright 2007, 2008 Wolfson Microelectronics PLC. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/mfd/wm8350/core.h> #ifdef CONFIG_MFD_WM8350_CONFIG_MODE_0 #undef WM8350_HAVE_CONFIG_MODE #define WM8350_HAVE_CONFIG_MODE const u16 wm8350_mode0_defaults[] = { 0x17FF, /* R0 - Reset/ID */ 0x1000, /* R1 - ID */ 0x0000, /* R2 */ 0x1002, /* R3 - System Control 1 */ 0x0004, /* R4 - System Control 2 */ 0x0000, /* R5 - System Hibernate */ 0x8A00, /* R6 - Interface Control */ 0x0000, /* R7 */ 0x8000, /* R8 - Power mgmt (1) */ 0x0000, /* R9 - Power mgmt (2) */ 0x0000, /* R10 - Power mgmt (3) */ 0x2000, /* R11 - Power mgmt (4) */ 0x0E00, /* R12 - Power mgmt (5) */ 0x0000, /* R13 - Power mgmt (6) */ 0x0000, /* R14 - Power mgmt (7) */ 0x0000, /* R15 */ 0x0000, /* R16 - RTC Seconds/Minutes */ 0x0100, /* R17 - RTC Hours/Day */ 0x0101, /* R18 - RTC Date/Month */ 0x1400, /* R19 - RTC Year */ 0x0000, /* R20 - Alarm Seconds/Minutes */ 0x0000, /* R21 - Alarm Hours/Day */ 0x0000, /* R22 - Alarm Date/Month */ 0x0320, /* R23 - RTC Time Control */ 0x0000, /* R24 - System Interrupts */ 0x0000, /* R25 - Interrupt Status 1 */ 0x0000, /* R26 - Interrupt Status 2 */ 0x0000, /* R27 - Power Up Interrupt Status */ 0x0000, /* R28 - Under Voltage Interrupt status */ 0x0000, /* R29 - Over Current Interrupt status */ 0x0000, /* R30 - GPIO Interrupt Status */ 0x0000, /* R31 - Comparator Interrupt Status */ 0x3FFF, /* R32 - System Interrupts Mask */ 0x0000, /* R33 - Interrupt Status 1 Mask */ 0x0000, /* R34 - Interrupt Status 2 Mask */ 0x0000, /* R35 - Power Up Interrupt Status Mask */ 0x0000, /* R36 - Under Voltage Interrupt status Mask */ 0x0000, /* R37 - Over Current Interrupt status Mask */ 0x0000, /* R38 - GPIO Interrupt Status Mask */ 0x0000, /* R39 - Comparator Interrupt Status Mask */ 0x0040, /* R40 - Clock Control 1 */ 0x0000, /* R41 - Clock Control 2 */ 0x3B00, /* R42 - FLL Control 1 */ 0x7086, /* R43 - FLL Control 2 */ 0xC226, /* R44 - FLL Control 3 */ 0x0000, /* R45 - FLL Control 4 */ 0x0000, /* R46 */ 0x0000, /* R47 */ 0x0000, /* R48 - DAC Control */ 0x0000, /* R49 */ 0x00C0, /* R50 - DAC Digital Volume L */ 0x00C0, /* R51 - DAC Digital Volume R */ 0x0000, /* R52 */ 0x0040, /* R53 - DAC LR Rate */ 0x0000, /* R54 - DAC Clock Control */ 0x0000, /* R55 */ 0x0000, /* R56 */ 0x0000, /* R57 */ 0x4000, /* R58 - DAC Mute */ 0x0000, /* R59 - DAC Mute Volume */ 0x0000, /* R60 - DAC Side */ 0x0000, /* R61 */ 0x0000, /* R62 */ 0x0000, /* R63 */ 0x8000, /* R64 - ADC Control */ 0x0000, /* R65 */ 0x00C0, /* R66 - ADC Digital Volume L */ 0x00C0, /* R67 - ADC Digital Volume R */ 0x0000, /* R68 - ADC Divider */ 0x0000, /* R69 */ 0x0040, /* R70 - ADC LR Rate */ 0x0000, /* R71 */ 0x0303, /* R72 - Input Control */ 0x0000, /* R73 - IN3 Input Control */ 0x0000, /* R74 - Mic Bias Control */ 0x0000, /* R75 */ 0x0000, /* R76 - Output Control */ 0x0000, /* R77 - Jack Detect */ 0x0000, /* R78 - Anti Pop Control */ 0x0000, /* R79 */ 0x0040, /* R80 - Left Input Volume */ 0x0040, /* R81 - Right Input Volume */ 0x0000, /* R82 */ 0x0000, /* R83 */ 0x0000, /* R84 */ 0x0000, /* R85 */ 0x0000, /* R86 */ 0x0000, /* R87 */ 0x0800, /* R88 - Left Mixer Control */ 0x1000, /* R89 - Right Mixer Control */ 0x0000, /* R90 */ 0x0000, /* R91 */ 0x0000, /* R92 - OUT3 Mixer Control */ 0x0000, /* R93 - OUT4 Mixer Control */ 0x0000, /* R94 */ 0x0000, /* R95 */ 0x0000, /* R96 - Output Left Mixer Volume */ 0x0000, /* R97 - Output Right Mixer Volume */ 0x0000, /* R98 - Input Mixer Volume L */ 0x0000, /* R99 - Input Mixer Volume R */ 0x0000, /* R100 - Input Mixer Volume */ 0x0000, /* R101 */ 0x0000, /* R102 */ 0x0000, /* R103 */ 0x00E4, /* R104 - LOUT1 Volume */ 0x00E4, /* R105 - ROUT1 Volume */ 0x00E4, /* R106 - LOUT2 Volume */ 0x02E4, /* R107 - ROUT2 Volume */ 0x0000, /* R108 */ 0x0000, /* R109 */ 0x0000, /* R110 */ 0x0000, /* R111 - BEEP Volume */ 0x0A00, /* R112 - AI Formating */ 0x0000, /* R113 - ADC DAC COMP */ 0x0020, /* R114 - AI ADC Control */ 0x0020, /* R115 - AI DAC Control */ 0x0000, /* R116 - AIF Test */ 0x0000, /* R117 */ 0x0000, /* R118 */ 0x0000, /* R119 */ 0x0000, /* R120 */ 0x0000, /* R121 */ 0x0000, /* R122 */ 0x0000, /* R123 */ 0x0000, /* R124 */ 0x0000, /* R125 */ 0x0000, /* R126 */ 0x0000, /* R127 */ 0x1FFF, /* R128 - GPIO Debounce */ 0x0000, /* R129 - GPIO Pin pull up Control */ 0x03FC, /* R130 - GPIO Pull down Control */ 0x0000, /* R131 - GPIO Interrupt Mode */ 0x0000, /* R132 */ 0x0000, /* R133 - GPIO Control */ 0x0FFC, /* R134 - GPIO Configuration (i/o) */ 0x0FFC, /* R135 - GPIO Pin Polarity / Type */ 0x0000, /* R136 */ 0x0000, /* R137 */ 0x0000, /* R138 */ 0x0000, /* R139 */ 0x0013, /* R140 - GPIO Function Select 1 */ 0x0000, /* R141 - GPIO Function Select 2 */ 0x0000, /* R142 - GPIO Function Select 3 */ 0x0003, /* R143 - GPIO Function Select 4 */ 0x0000, /* R144 - Digitiser Control (1) */ 0x0002, /* R145 - Digitiser Control (2) */ 0x0000, /* R146 */ 0x0000, /* R147 */ 0x0000, /* R148 */ 0x0000, /* R149 */ 0x0000, /* R150 */ 0x0000, /* R151 */ 0x7000, /* R152 - AUX1 Readback */ 0x7000, /* R153 - AUX2 Readback */ 0x7000, /* R154 - AUX3 Readback */ 0x7000, /* R155 - AUX4 Readback */ 0x0000, /* R156 - USB Voltage Readback */ 0x0000, /* R157 - LINE Voltage Readback */ 0x0000, /* R158 - BATT Voltage Readback */ 0x0000, /* R159 - Chip Temp Readback */ 0x0000, /* R160 */ 0x0000, /* R161 */ 0x0000, /* R162 */ 0x0000, /* R163 - Generic Comparator Control */ 0x0000, /* R164 - Generic comparator 1 */ 0x0000, /* R165 - Generic comparator 2 */ 0x0000, /* R166 - Generic comparator 3 */ 0x0000, /* R167 - Generic comparator 4 */ 0xA00F, /* R168 - Battery Charger Control 1 */ 0x0B06, /* R169 - Battery Charger Control 2 */ 0x0000, /* R170 - Battery Charger Control 3 */ 0x0000, /* R171 */ 0x0000, /* R172 - Current Sink Driver A */ 0x0000, /* R173 - CSA Flash control */ 0x0000, /* R174 - Current Sink Driver B */ 0x0000, /* R175 - CSB Flash control */ 0x0000, /* R176 - DCDC/LDO requested */ 0x002D, /* R177 - DCDC Active options */ 0x0000, /* R178 - DCDC Sleep options */ 0x0025, /* R179 - Power-check comparator */ 0x000E, /* R180 - DCDC1 Control */ 0x0000, /* R181 - DCDC1 Timeouts */ 0x1006, /* R182 - DCDC1 Low Power */ 0x0018, /* R183 - DCDC2 Control */ 0x0000, /* R184 - DCDC2 Timeouts */ 0x0000, /* R185 */ 0x0000, /* R186 - DCDC3 Control */ 0x0000, /* R187 - DCDC3 Timeouts */ 0x0006, /* R188 - DCDC3 Low Power */ 0x0000, /* R189 - DCDC4 Control */ 0x0000, /* R190 - DCDC4 Timeouts */ 0x0006, /* R191 - DCDC4 Low Power */ 0x0008, /* R192 - DCDC5 Control */ 0x0000, /* R193 - DCDC5 Timeouts */ 0x0000, /* R194 */ 0x0000, /* R195 - DCDC6 Control */ 0x0000, /* R196 - DCDC6 Timeouts */ 0x0006, /* R197 - DCDC6 Low Power */ 0x0000, /* R198 */ 0x0003, /* R199 - Limit Switch Control */ 0x001C, /* R200 - LDO1 Control */ 0x0000, /* R201 - LDO1 Timeouts */ 0x001C, /* R202 - LDO1 Low Power */ 0x001B, /* R203 - LDO2 Control */ 0x0000, /* R204 - LDO2 Timeouts */ 0x001C, /* R205 - LDO2 Low Power */ 0x001B, /* R206 - LDO3 Control */ 0x0000, /* R207 - LDO3 Timeouts */ 0x001C, /* R208 - LDO3 Low Power */ 0x001B, /* R209 - LDO4 Control */ 0x0000, /* R210 - LDO4 Timeouts */ 0x001C, /* R211 - LDO4 Low Power */ 0x0000, /* R212 */ 0x0000, /* R213 */ 0x0000, /* R214 */ 0x0000, /* R215 - VCC_FAULT Masks */ 0x001F, /* R216 - Main Bandgap Control */ 0x0000, /* R217 - OSC Control */ 0x9000, /* R218 - RTC Tick Control */ 0x0000, /* R219 */ 0x4000, /* R220 - RAM BIST 1 */ 0x0000, /* R221 */ 0x0000, /* R222 */ 0x0000, /* R223 */ 0x0000, /* R224 */ 0x0000, /* R225 - DCDC/LDO status */ 0x0000, /* R226 */ 0x0000, /* R227 */ 0x0000, /* R228 */ 0x0000, /* R229 */ 0xE000, /* R230 - GPIO Pin Status */ 0x0000, /* R231 */ 0x0000, /* R232 */ 0x0000, /* R233 */ 0x0000, /* R234 */ 0x0000, /* R235 */ 0x0000, /* R236 */ 0x0000, /* R237 */ 0x0000, /* R238 */ 0x0000, /* R239 */ 0x0000, /* R240 */ 0x0000, /* R241 */ 0x0000, /* R242 */ 0x0000, /* R243 */ 0x0000, /* R244 */ 0x0000, /* R245 */ 0x0000, /* R246 */ 0x0000, /* R247 */ 0x0000, /* R248 */ 0x0000, /* R249 */ 0x0000, /* R250 */ 0x0000, /* R251 */ 0x0000, /* R252 */ 0x0000, /* R253 */ 0x0000, /* R254 */ 0x0000, /* R255 */ }; #endif #ifdef CONFIG_MFD_WM8350_CONFIG_MODE_1 #undef WM8350_HAVE_CONFIG_MODE #define WM8350_HAVE_CONFIG_MODE const u16 wm8350_mode1_defaults[] = { 0x17FF, /* R0 - Reset/ID */ 0x1000, /* R1 - ID */ 0x0000, /* R2 */ 0x1002, /* R3 - System Control 1 */ 0x0014, /* R4 - System Control 2 */ 0x0000, /* R5 - System Hibernate */ 0x8A00, /* R6 - Interface Control */ 0x0000, /* R7 */ 0x8000, /* R8 - Power mgmt (1) */ 0x0000, /* R9 - Power mgmt (2) */ 0x0000, /* R10 - Power mgmt (3) */ 0x2000, /* R11 - Power mgmt (4) */ 0x0E00, /* R12 - Power mgmt (5) */ 0x0000, /* R13 - Power mgmt (6) */ 0x0000, /* R14 - Power mgmt (7) */ 0x0000, /* R15 */ 0x0000, /* R16 - RTC Seconds/Minutes */ 0x0100, /* R17 - RTC Hours/Day */ 0x0101, /* R18 - RTC Date/Month */ 0x1400, /* R19 - RTC Year */ 0x0000, /* R20 - Alarm Seconds/Minutes */ 0x0000, /* R21 - Alarm Hours/Day */ 0x0000, /* R22 - Alarm Date/Month */ 0x0320, /* R23 - RTC Time Control */ 0x0000, /* R24 - System Interrupts */ 0x0000, /* R25 - Interrupt Status 1 */ 0x0000, /* R26 - Interrupt Status 2 */ 0x0000, /* R27 - Power Up Interrupt Status */ 0x0000, /* R28 - Under Voltage Interrupt status */ 0x0000, /* R29 - Over Current Interrupt status */ 0x0000, /* R30 - GPIO Interrupt Status */ 0x0000, /* R31 - Comparator Interrupt Status */ 0x3FFF, /* R32 - System Interrupts Mask */ 0x0000, /* R33 - Interrupt Status 1 Mask */ 0x0000, /* R34 - Interrupt Status 2 Mask */ 0x0000, /* R35 - Power Up Interrupt Status Mask */ 0x0000, /* R36 - Under Voltage Interrupt status Mask */ 0x0000, /* R37 - Over Current Interrupt status Mask */ 0x0000, /* R38 - GPIO Interrupt Status Mask */ 0x0000, /* R39 - Comparator Interrupt Status Mask */ 0x0040, /* R40 - Clock Control 1 */ 0x0000, /* R41 - Clock Control 2 */ 0x3B00, /* R42 - FLL Control 1 */ 0x7086, /* R43 - FLL Control 2 */ 0xC226, /* R44 - FLL Control 3 */ 0x0000, /* R45 - FLL Control 4 */ 0x0000, /* R46 */ 0x0000, /* R47 */ 0x0000, /* R48 - DAC Control */ 0x0000, /* R49 */ 0x00C0, /* R50 - DAC Digital Volume L */ 0x00C0, /* R51 - DAC Digital Volume R */ 0x0000, /* R52 */ 0x0040, /* R53 - DAC LR Rate */ 0x0000, /* R54 - DAC Clock Control */ 0x0000, /* R55 */ 0x0000, /* R56 */ 0x0000, /* R57 */ 0x4000, /* R58 - DAC Mute */ 0x0000, /* R59 - DAC Mute Volume */ 0x0000, /* R60 - DAC Side */ 0x0000, /* R61 */ 0x0000, /* R62 */ 0x0000, /* R63 */ 0x8000, /* R64 - ADC Control */ 0x0000, /* R65 */ 0x00C0, /* R66 - ADC Digital Volume L */ 0x00C0, /* R67 - ADC Digital Volume R */ 0x0000, /* R68 - ADC Divider */ 0x0000, /* R69 */ 0x0040, /* R70 - ADC LR Rate */ 0x0000, /* R71 */ 0x0303, /* R72 - Input Control */ 0x0000, /* R73 - IN3 Input Control */ 0x0000, /* R74 - Mic Bias Control */ 0x0000, /* R75 */ 0x0000, /* R76 - Output Control */ 0x0000, /* R77 - Jack Detect */ 0x0000, /* R78 - Anti Pop Control */ 0x0000, /* R79 */ 0x0040, /* R80 - Left Input Volume */ 0x0040, /* R81 - Right Input Volume */ 0x0000, /* R82 */ 0x0000, /* R83 */ 0x0000, /* R84 */ 0x0000, /* R85 */ 0x0000, /* R86 */ 0x0000, /* R87 */ 0x0800, /* R88 - Left Mixer Control */ 0x1000, /* R89 - Right Mixer Control */ 0x0000, /* R90 */ 0x0000, /* R91 */ 0x0000, /* R92 - OUT3 Mixer Control */ 0x0000, /* R93 - OUT4 Mixer Control */ 0x0000, /* R94 */ 0x0000, /* R95 */ 0x0000, /* R96 - Output Left Mixer Volume */ 0x0000, /* R97 - Output Right Mixer Volume */ 0x0000, /* R98 - Input Mixer Volume L */ 0x0000, /* R99 - Input Mixer Volume R */ 0x0000, /* R100 - Input Mixer Volume */ 0x0000, /* R101 */ 0x0000, /* R102 */ 0x0000, /* R103 */ 0x00E4, /* R104 - LOUT1 Volume */ 0x00E4, /* R105 - ROUT1 Volume */ 0x00E4, /* R106 - LOUT2 Volume */ 0x02E4, /* R107 - ROUT2 Volume */ 0x0000, /* R108 */ 0x0000, /* R109 */ 0x0000, /* R110 */ 0x0000, /* R111 - BEEP Volume */ 0x0A00, /* R112 - AI Formating */ 0x0000, /* R113 - ADC DAC COMP */ 0x0020, /* R114 - AI ADC Control */ 0x0020, /* R115 - AI DAC Control */ 0x0000, /* R116 - AIF Test */ 0x0000, /* R117 */ 0x0000, /* R118 */ 0x0000, /* R119 */ 0x0000, /* R120 */ 0x0000, /* R121 */ 0x0000, /* R122 */ 0x0000, /* R123 */ 0x0000, /* R124 */ 0x0000, /* R125 */ 0x0000, /* R126 */ 0x0000, /* R127 */ 0x1FFF, /* R128 - GPIO Debounce */ 0x0000, /* R129 - GPIO Pin pull up Control */ 0x03FC, /* R130 - GPIO Pull down Control */ 0x0000, /* R131 - GPIO Interrupt Mode */ 0x0000, /* R132 */ 0x0000, /* R133 - GPIO Control */ 0x00FB, /* R134 - GPIO Configuration (i/o) */ 0x04FE, /* R135 - GPIO Pin Polarity / Type */ 0x0000, /* R136 */ 0x0000, /* R137 */ 0x0000, /* R138 */ 0x0000, /* R139 */ 0x0312, /* R140 - GPIO Function Select 1 */ 0x1003, /* R141 - GPIO Function Select 2 */ 0x1331, /* R142 - GPIO Function Select 3 */ 0x0003, /* R143 - GPIO Function Select 4 */ 0x0000, /* R144 - Digitiser Control (1) */ 0x0002, /* R145 - Digitiser Control (2) */ 0x0000, /* R146 */ 0x0000, /* R147 */ 0x0000, /* R148 */ 0x0000, /* R149 */ 0x0000, /* R150 */ 0x0000, /* R151 */ 0x7000, /* R152 - AUX1 Readback */ 0x7000, /* R153 - AUX2 Readback */ 0x7000, /* R154 - AUX3 Readback */ 0x7000, /* R155 - AUX4 Readback */ 0x0000, /* R156 - USB Voltage Readback */ 0x0000, /* R157 - LINE Voltage Readback */ 0x0000, /* R158 - BATT Voltage Readback */ 0x0000, /* R159 - Chip Temp Readback */ 0x0000, /* R160 */ 0x0000, /* R161 */ 0x0000, /* R162 */ 0x0000, /* R163 - Generic Comparator Control */ 0x0000, /* R164 - Generic comparator 1 */ 0x0000, /* R165 - Generic comparator 2 */ 0x0000, /* R166 - Generic comparator 3 */ 0x0000, /* R167 - Generic comparator 4 */ 0xA00F, /* R168 - Battery Charger Control 1 */ 0x0B06, /* R169 - Battery Charger Control 2 */ 0x0000, /* R170 - Battery Charger Control 3 */ 0x0000, /* R171 */ 0x0000, /* R172 - Current Sink Driver A */ 0x0000, /* R173 - CSA Flash control */ 0x0000, /* R174 - Current Sink Driver B */ 0x0000, /* R175 - CSB Flash control */ 0x0000, /* R176 - DCDC/LDO requested */ 0x002D, /* R177 - DCDC Active options */ 0x0000, /* R178 - DCDC Sleep options */ 0x0025, /* R179 - Power-check comparator */ 0x0062, /* R180 - DCDC1 Control */ 0x0400, /* R181 - DCDC1 Timeouts */ 0x1006, /* R182 - DCDC1 Low Power */ 0x0018, /* R183 - DCDC2 Control */ 0x0000, /* R184 - DCDC2 Timeouts */ 0x0000, /* R185 */ 0x0026, /* R186 - DCDC3 Control */ 0x0400, /* R187 - DCDC3 Timeouts */ 0x0006, /* R188 - DCDC3 Low Power */ 0x0062, /* R189 - DCDC4 Control */ 0x0400, /* R190 - DCDC4 Timeouts */ 0x0006, /* R191 - DCDC4 Low Power */ 0x0008, /* R192 - DCDC5 Control */ 0x0000, /* R193 - DCDC5 Timeouts */ 0x0000, /* R194 */ 0x0026, /* R195 - DCDC6 Control */ 0x0800, /* R196 - DCDC6 Timeouts */ 0x0006, /* R197 - DCDC6 Low Power */ 0x0000, /* R198 */ 0x0003, /* R199 - Limit Switch Control */ 0x0006, /* R200 - LDO1 Control */ 0x0400, /* R201 - LDO1 Timeouts */ 0x001C, /* R202 - LDO1 Low Power */ 0x0006, /* R203 - LDO2 Control */ 0x0400, /* R204 - LDO2 Timeouts */ 0x001C, /* R205 - LDO2 Low Power */ 0x001B, /* R206 - LDO3 Control */ 0x0000, /* R207 - LDO3 Timeouts */ 0x001C, /* R208 - LDO3 Low Power */ 0x001B, /* R209 - LDO4 Control */ 0x0000, /* R210 - LDO4 Timeouts */ 0x001C, /* R211 - LDO4 Low Power */ 0x0000, /* R212 */ 0x0000, /* R213 */ 0x0000, /* R214 */ 0x0000, /* R215 - VCC_FAULT Masks */ 0x001F, /* R216 - Main Bandgap Control */ 0x0000, /* R217 - OSC Control */ 0x9000, /* R218 - RTC Tick Control */ 0x0000, /* R219 */ 0x4000, /* R220 - RAM BIST 1 */ 0x0000, /* R221 */ 0x0000, /* R222 */ 0x0000, /* R223 */ 0x0000, /* R224 */ 0x0000, /* R225 - DCDC/LDO status */ 0x0000, /* R226 */ 0x0000, /* R227 */ 0x0000, /* R228 */ 0x0000, /* R229 */ 0xE000, /* R230 - GPIO Pin Status */ 0x0000, /* R231 */ 0x0000, /* R232 */ 0x0000, /* R233 */ 0x0000, /* R234 */ 0x0000, /* R235 */ 0x0000, /* R236 */ 0x0000, /* R237 */ 0x0000, /* R238 */ 0x0000, /* R239 */ 0x0000, /* R240 */ 0x0000, /* R241 */ 0x0000, /* R242 */ 0x0000, /* R243 */ 0x0000, /* R244 */ 0x0000, /* R245 */ 0x0000, /* R246 */ 0x0000, /* R247 */ 0x0000, /* R248 */ 0x0000, /* R249 */ 0x0000, /* R250 */ 0x0000, /* R251 */ 0x0000, /* R252 */ 0x0000, /* R253 */ 0x0000, /* R254 */ 0x0000, /* R255 */ }; #endif #ifdef CONFIG_MFD_WM8350_CONFIG_MODE_2 #undef WM8350_HAVE_CONFIG_MODE #define WM8350_HAVE_CONFIG_MODE const u16 wm8350_mode2_defaults[] = { 0x17FF, /* R0 - Reset/ID */ 0x1000, /* R1 - ID */ 0x0000, /* R2 */ 0x1002, /* R3 - System Control 1 */ 0x0014, /* R4 - System Control 2 */ 0x0000, /* R5 - System Hibernate */ 0x8A00, /* R6 - Interface Control */ 0x0000, /* R7 */ 0x8000, /* R8 - Power mgmt (1) */ 0x0000, /* R9 - Power mgmt (2) */ 0x0000, /* R10 - Power mgmt (3) */ 0x2000, /* R11 - Power mgmt (4) */ 0x0E00, /* R12 - Power mgmt (5) */ 0x0000, /* R13 - Power mgmt (6) */ 0x0000, /* R14 - Power mgmt (7) */ 0x0000, /* R15 */ 0x0000, /* R16 - RTC Seconds/Minutes */ 0x0100, /* R17 - RTC Hours/Day */ 0x0101, /* R18 - RTC Date/Month */ 0x1400, /* R19 - RTC Year */ 0x0000, /* R20 - Alarm Seconds/Minutes */ 0x0000, /* R21 - Alarm Hours/Day */ 0x0000, /* R22 - Alarm Date/Month */ 0x0320, /* R23 - RTC Time Control */ 0x0000, /* R24 - System Interrupts */ 0x0000, /* R25 - Interrupt Status 1 */ 0x0000, /* R26 - Interrupt Status 2 */ 0x0000, /* R27 - Power Up Interrupt Status */ 0x0000, /* R28 - Under Voltage Interrupt status */ 0x0000, /* R29 - Over Current Interrupt status */ 0x0000, /* R30 - GPIO Interrupt Status */ 0x0000, /* R31 - Comparator Interrupt Status */ 0x3FFF, /* R32 - System Interrupts Mask */ 0x0000, /* R33 - Interrupt Status 1 Mask */ 0x0000, /* R34 - Interrupt Status 2 Mask */ 0x0000, /* R35 - Power Up Interrupt Status Mask */ 0x0000, /* R36 - Under Voltage Interrupt status Mask */ 0x0000, /* R37 - Over Current Interrupt status Mask */ 0x0000, /* R38 - GPIO Interrupt Status Mask */ 0x0000, /* R39 - Comparator Interrupt Status Mask */ 0x0040, /* R40 - Clock Control 1 */ 0x0000, /* R41 - Clock Control 2 */ 0x3B00, /* R42 - FLL Control 1 */ 0x7086, /* R43 - FLL Control 2 */ 0xC226, /* R44 - FLL Control 3 */ 0x0000, /* R45 - FLL Control 4 */ 0x0000, /* R46 */ 0x0000, /* R47 */ 0x0000, /* R48 - DAC Control */ 0x0000, /* R49 */ 0x00C0, /* R50 - DAC Digital Volume L */ 0x00C0, /* R51 - DAC Digital Volume R */ 0x0000, /* R52 */ 0x0040, /* R53 - DAC LR Rate */ 0x0000, /* R54 - DAC Clock Control */ 0x0000, /* R55 */ 0x0000, /* R56 */ 0x0000, /* R57 */ 0x4000, /* R58 - DAC Mute */ 0x0000, /* R59 - DAC Mute Volume */ 0x0000, /* R60 - DAC Side */ 0x0000, /* R61 */ 0x0000, /* R62 */ 0x0000, /* R63 */ 0x8000, /* R64 - ADC Control */ 0x0000, /* R65 */ 0x00C0, /* R66 - ADC Digital Volume L */ 0x00C0, /* R67 - ADC Digital Volume R */ 0x0000, /* R68 - ADC Divider */ 0x0000, /* R69 */ 0x0040, /* R70 - ADC LR Rate */ 0x0000, /* R71 */ 0x0303, /* R72 - Input Control */ 0x0000, /* R73 - IN3 Input Control */ 0x0000, /* R74 - Mic Bias Control */ 0x0000, /* R75 */ 0x0000, /* R76 - Output Control */ 0x0000, /* R77 - Jack Detect */ 0x0000, /* R78 - Anti Pop Control */ 0x0000, /* R79 */ 0x0040, /* R80 - Left Input Volume */ 0x0040, /* R81 - Right Input Volume */ 0x0000, /* R82 */ 0x0000, /* R83 */ 0x0000, /* R84 */ 0x0000, /* R85 */ 0x0000, /* R86 */ 0x0000, /* R87 */ 0x0800, /* R88 - Left Mixer Control */ 0x1000, /* R89 - Right Mixer Control */ 0x0000, /* R90 */ 0x0000, /* R91 */ 0x0000, /* R92 - OUT3 Mixer Control */ 0x0000, /* R93 - OUT4 Mixer Control */ 0x0000, /* R94 */ 0x0000, /* R95 */ 0x0000, /* R96 - Output Left Mixer Volume */ 0x0000, /* R97 - Output Right Mixer Volume */ 0x0000, /* R98 - Input Mixer Volume L */ 0x0000, /* R99 - Input Mixer Volume R */ 0x0000, /* R100 - Input Mixer Volume */ 0x0000, /* R101 */ 0x0000, /* R102 */ 0x0000, /* R103 */ 0x00E4, /* R104 - LOUT1 Volume */ 0x00E4, /* R105 - ROUT1 Volume */ 0x00E4, /* R106 - LOUT2 Volume */ 0x02E4, /* R107 - ROUT2 Volume */ 0x0000, /* R108 */ 0x0000, /* R109 */ 0x0000, /* R110 */ 0x0000, /* R111 - BEEP Volume */ 0x0A00, /* R112 - AI Formating */ 0x0000, /* R113 - ADC DAC COMP */ 0x0020, /* R114 - AI ADC Control */ 0x0020, /* R115 - AI DAC Control */ 0x0000, /* R116 - AIF Test */ 0x0000, /* R117 */ 0x0000, /* R118 */ 0x0000, /* R119 */ 0x0000, /* R120 */ 0x0000, /* R121 */ 0x0000, /* R122 */ 0x0000, /* R123 */ 0x0000, /* R124 */ 0x0000, /* R125 */ 0x0000, /* R126 */ 0x0000, /* R127 */ 0x1FFF, /* R128 - GPIO Debounce */ 0x0000, /* R129 - GPIO Pin pull up Control */ 0x03FC, /* R130 - GPIO Pull down Control */ 0x0000, /* R131 - GPIO Interrupt Mode */ 0x0000, /* R132 */ 0x0000, /* R133 - GPIO Control */ 0x08FB, /* R134 - GPIO Configuration (i/o) */ 0x0CFE, /* R135 - GPIO Pin Polarity / Type */ 0x0000, /* R136 */ 0x0000, /* R137 */ 0x0000, /* R138 */ 0x0000, /* R139 */ 0x0312, /* R140 - GPIO Function Select 1 */ 0x0003, /* R141 - GPIO Function Select 2 */ 0x2331, /* R142 - GPIO Function Select 3 */ 0x0003, /* R143 - GPIO Function Select 4 */ 0x0000, /* R144 - Digitiser Control (1) */ 0x0002, /* R145 - Digitiser Control (2) */ 0x0000, /* R146 */ 0x0000, /* R147 */ 0x0000, /* R148 */ 0x0000, /* R149 */ 0x0000, /* R150 */ 0x0000, /* R151 */ 0x7000, /* R152 - AUX1 Readback */ 0x7000, /* R153 - AUX2 Readback */ 0x7000, /* R154 - AUX3 Readback */ 0x7000, /* R155 - AUX4 Readback */ 0x0000, /* R156 - USB Voltage Readback */ 0x0000, /* R157 - LINE Voltage Readback */ 0x0000, /* R158 - BATT Voltage Readback */ 0x0000, /* R159 - Chip Temp Readback */ 0x0000, /* R160 */ 0x0000, /* R161 */ 0x0000, /* R162 */ 0x0000, /* R163 - Generic Comparator Control */ 0x0000, /* R164 - Generic comparator 1 */ 0x0000, /* R165 - Generic comparator 2 */ 0x0000, /* R166 - Generic comparator 3 */ 0x0000, /* R167 - Generic comparator 4 */ 0xA00F, /* R168 - Battery Charger Control 1 */ 0x0B06, /* R169 - Battery Charger Control 2 */ 0x0000, /* R170 - Battery Charger Control 3 */ 0x0000, /* R171 */ 0x0000, /* R172 - Current Sink Driver A */ 0x0000, /* R173 - CSA Flash control */ 0x0000, /* R174 - Current Sink Driver B */ 0x0000, /* R175 - CSB Flash control */ 0x0000, /* R176 - DCDC/LDO requested */ 0x002D, /* R177 - DCDC Active options */ 0x0000, /* R178 - DCDC Sleep options */ 0x0025, /* R179 - Power-check comparator */ 0x000E, /* R180 - DCDC1 Control */ 0x0400, /* R181 - DCDC1 Timeouts */ 0x1006, /* R182 - DCDC1 Low Power */ 0x0018, /* R183 - DCDC2 Control */ 0x0000, /* R184 - DCDC2 Timeouts */ 0x0000, /* R185 */ 0x002E, /* R186 - DCDC3 Control */ 0x0800, /* R187 - DCDC3 Timeouts */ 0x0006, /* R188 - DCDC3 Low Power */ 0x000E, /* R189 - DCDC4 Control */ 0x0800, /* R190 - DCDC4 Timeouts */ 0x0006, /* R191 - DCDC4 Low Power */ 0x0008, /* R192 - DCDC5 Control */ 0x0000, /* R193 - DCDC5 Timeouts */ 0x0000, /* R194 */ 0x0026, /* R195 - DCDC6 Control */ 0x0C00, /* R196 - DCDC6 Timeouts */ 0x0006, /* R197 - DCDC6 Low Power */ 0x0000, /* R198 */ 0x0003, /* R199 - Limit Switch Control */ 0x001A, /* R200 - LDO1 Control */ 0x0800, /* R201 - LDO1 Timeouts */ 0x001C, /* R202 - LDO1 Low Power */ 0x0010, /* R203 - LDO2 Control */ 0x0800, /* R204 - LDO2 Timeouts */ 0x001C, /* R205 - LDO2 Low Power */ 0x000A, /* R206 - LDO3 Control */ 0x0C00, /* R207 - LDO3 Timeouts */ 0x001C, /* R208 - LDO3 Low Power */ 0x001A, /* R209 - LDO4 Control */ 0x0800, /* R210 - LDO4 Timeouts */ 0x001C, /* R211 - LDO4 Low Power */ 0x0000, /* R212 */ 0x0000, /* R213 */ 0x0000, /* R214 */ 0x0000, /* R215 - VCC_FAULT Masks */ 0x001F, /* R216 - Main Bandgap Control */ 0x0000, /* R217 - OSC Control */ 0x9000, /* R218 - RTC Tick Control */ 0x0000, /* R219 */ 0x4000, /* R220 - RAM BIST 1 */ 0x0000, /* R221 */ 0x0000, /* R222 */ 0x0000, /* R223 */ 0x0000, /* R224 */ 0x0000, /* R225 - DCDC/LDO status */ 0x0000, /* R226 */ 0x0000, /* R227 */ 0x0000, /* R228 */ 0x0000, /* R229 */ 0xE000, /* R230 - GPIO Pin Status */ 0x0000, /* R231 */ 0x0000, /* R232 */ 0x0000, /* R233 */ 0x0000, /* R234 */ 0x0000, /* R235 */ 0x0000, /* R236 */ 0x0000, /* R237 */ 0x0000, /* R238 */ 0x0000, /* R239 */ 0x0000, /* R240 */ 0x0000, /* R241 */ 0x0000, /* R242 */ 0x0000, /* R243 */ 0x0000, /* R244 */ 0x0000, /* R245 */ 0x0000, /* R246 */ 0x0000, /* R247 */ 0x0000, /* R248 */ 0x0000, /* R249 */ 0x0000, /* R250 */ 0x0000, /* R251 */ 0x0000, /* R252 */ 0x0000, /* R253 */ 0x0000, /* R254 */ 0x0000, /* R255 */ }; #endif #ifdef CONFIG_MFD_WM8350_CONFIG_MODE_3 #undef WM8350_HAVE_CONFIG_MODE #define WM8350_HAVE_CONFIG_MODE const u16 wm8350_mode3_defaults[] = { 0x17FF, /* R0 - Reset/ID */ 0x1000, /* R1 - ID */ 0x0000, /* R2 */ 0x1000, /* R3 - System Control 1 */ 0x0004, /* R4 - System Control 2 */ 0x0000, /* R5 - System Hibernate */ 0x8A00, /* R6 - Interface Control */ 0x0000, /* R7 */ 0x8000, /* R8 - Power mgmt (1) */ 0x0000, /* R9 - Power mgmt (2) */ 0x0000, /* R10 - Power mgmt (3) */ 0x2000, /* R11 - Power mgmt (4) */ 0x0E00, /* R12 - Power mgmt (5) */ 0x0000, /* R13 - Power mgmt (6) */ 0x0000, /* R14 - Power mgmt (7) */ 0x0000, /* R15 */ 0x0000, /* R16 - RTC Seconds/Minutes */ 0x0100, /* R17 - RTC Hours/Day */ 0x0101, /* R18 - RTC Date/Month */ 0x1400, /* R19 - RTC Year */ 0x0000, /* R20 - Alarm Seconds/Minutes */ 0x0000, /* R21 - Alarm Hours/Day */ 0x0000, /* R22 - Alarm Date/Month */ 0x0320, /* R23 - RTC Time Control */ 0x0000, /* R24 - System Interrupts */ 0x0000, /* R25 - Interrupt Status 1 */ 0x0000, /* R26 - Interrupt Status 2 */ 0x0000, /* R27 - Power Up Interrupt Status */ 0x0000, /* R28 - Under Voltage Interrupt status */ 0x0000, /* R29 - Over Current Interrupt status */ 0x0000, /* R30 - GPIO Interrupt Status */ 0x0000, /* R31 - Comparator Interrupt Status */ 0x3FFF, /* R32 - System Interrupts Mask */ 0x0000, /* R33 - Interrupt Status 1 Mask */ 0x0000, /* R34 - Interrupt Status 2 Mask */ 0x0000, /* R35 - Power Up Interrupt Status Mask */ 0x0000, /* R36 - Under Voltage Interrupt status Mask */ 0x0000, /* R37 - Over Current Interrupt status Mask */ 0x0000, /* R38 - GPIO Interrupt Status Mask */ 0x0000, /* R39 - Comparator Interrupt Status Mask */ 0x0040, /* R40 - Clock Control 1 */ 0x0000, /* R41 - Clock Control 2 */ 0x3B00, /* R42 - FLL Control 1 */ 0x7086, /* R43 - FLL Control 2 */ 0xC226, /* R44 - FLL Control 3 */ 0x0000, /* R45 - FLL Control 4 */ 0x0000, /* R46 */ 0x0000, /* R47 */ 0x0000, /* R48 - DAC Control */ 0x0000, /* R49 */ 0x00C0, /* R50 - DAC Digital Volume L */ 0x00C0, /* R51 - DAC Digital Volume R */ 0x0000, /* R52 */ 0x0040, /* R53 - DAC LR Rate */ 0x0000, /* R54 - DAC Clock Control */ 0x0000, /* R55 */ 0x0000, /* R56 */ 0x0000, /* R57 */ 0x4000, /* R58 - DAC Mute */ 0x0000, /* R59 - DAC Mute Volume */ 0x0000, /* R60 - DAC Side */ 0x0000, /* R61 */ 0x0000, /* R62 */ 0x0000, /* R63 */ 0x8000, /* R64 - ADC Control */ 0x0000, /* R65 */ 0x00C0, /* R66 - ADC Digital Volume L */ 0x00C0, /* R67 - ADC Digital Volume R */ 0x0000, /* R68 - ADC Divider */ 0x0000, /* R69 */ 0x0040, /* R70 - ADC LR Rate */ 0x0000, /* R71 */ 0x0303, /* R72 - Input Control */ 0x0000, /* R73 - IN3 Input Control */ 0x0000, /* R74 - Mic Bias Control */ 0x0000, /* R75 */ 0x0000, /* R76 - Output Control */ 0x0000, /* R77 - Jack Detect */ 0x0000, /* R78 - Anti Pop Control */ 0x0000, /* R79 */ 0x0040, /* R80 - Left Input Volume */ 0x0040, /* R81 - Right Input Volume */ 0x0000, /* R82 */ 0x0000, /* R83 */ 0x0000, /* R84 */ 0x0000, /* R85 */ 0x0000, /* R86 */ 0x0000, /* R87 */ 0x0800, /* R88 - Left Mixer Control */ 0x1000, /* R89 - Right Mixer Control */ 0x0000, /* R90 */ 0x0000, /* R91 */ 0x0000, /* R92 - OUT3 Mixer Control */ 0x0000, /* R93 - OUT4 Mixer Control */ 0x0000, /* R94 */ 0x0000, /* R95 */ 0x0000, /* R96 - Output Left Mixer Volume */ 0x0000, /* R97 - Output Right Mixer Volume */ 0x0000, /* R98 - Input Mixer Volume L */ 0x0000, /* R99 - Input Mixer Volume R */ 0x0000, /* R100 - Input Mixer Volume */ 0x0000, /* R101 */ 0x0000, /* R102 */ 0x0000, /* R103 */ 0x00E4, /* R104 - LOUT1 Volume */ 0x00E4, /* R105 - ROUT1 Volume */ 0x00E4, /* R106 - LOUT2 Volume */ 0x02E4, /* R107 - ROUT2 Volume */ 0x0000, /* R108 */ 0x0000, /* R109 */ 0x0000, /* R110 */ 0x0000, /* R111 - BEEP Volume */ 0x0A00, /* R112 - AI Formating */ 0x0000, /* R113 - ADC DAC COMP */ 0x0020, /* R114 - AI ADC Control */ 0x0020, /* R115 - AI DAC Control */ 0x0000, /* R116 - AIF Test */ 0x0000, /* R117 */ 0x0000, /* R118 */ 0x0000, /* R119 */ 0x0000, /* R120 */ 0x0000, /* R121 */ 0x0000, /* R122 */ 0x0000, /* R123 */ 0x0000, /* R124 */ 0x0000, /* R125 */ 0x0000, /* R126 */ 0x0000, /* R127 */ 0x1FFF, /* R128 - GPIO Debounce */ 0x0000, /* R129 - GPIO Pin pull up Control */ 0x03FC, /* R130 - GPIO Pull down Control */ 0x0000, /* R131 - GPIO Interrupt Mode */ 0x0000, /* R132 */ 0x0000, /* R133 - GPIO Control */ 0x0A7B, /* R134 - GPIO Configuration (i/o) */ 0x06FE, /* R135 - GPIO Pin Polarity / Type */ 0x0000, /* R136 */ 0x0000, /* R137 */ 0x0000, /* R138 */ 0x0000, /* R139 */ 0x1312, /* R140 - GPIO Function Select 1 */ 0x1030, /* R141 - GPIO Function Select 2 */ 0x2231, /* R142 - GPIO Function Select 3 */ 0x0003, /* R143 - GPIO Function Select 4 */ 0x0000, /* R144 - Digitiser Control (1) */ 0x0002, /* R145 - Digitiser Control (2) */ 0x0000, /* R146 */ 0x0000, /* R147 */ 0x0000, /* R148 */ 0x0000, /* R149 */ 0x0000, /* R150 */ 0x0000, /* R151 */ 0x7000, /* R152 - AUX1 Readback */ 0x7000, /* R153 - AUX2 Readback */ 0x7000, /* R154 - AUX3 Readback */ 0x7000, /* R155 - AUX4 Readback */ 0x0000, /* R156 - USB Voltage Readback */ 0x0000, /* R157 - LINE Voltage Readback */ 0x0000, /* R158 - BATT Voltage Readback */ 0x0000, /* R159 - Chip Temp Readback */ 0x0000, /* R160 */ 0x0000, /* R161 */ 0x0000, /* R162 */ 0x0000, /* R163 - Generic Comparator Control */ 0x0000, /* R164 - Generic comparator 1 */ 0x0000, /* R165 - Generic comparator 2 */ 0x0000, /* R166 - Generic comparator 3 */ 0x0000, /* R167 - Generic comparator 4 */ 0xA00F, /* R168 - Battery Charger Control 1 */ 0x0B06, /* R169 - Battery Charger Control 2 */ 0x0000, /* R170 - Battery Charger Control 3 */ 0x0000, /* R171 */ 0x0000, /* R172 - Current Sink Driver A */ 0x0000, /* R173 - CSA Flash control */ 0x0000, /* R174 - Current Sink Driver B */ 0x0000, /* R175 - CSB Flash control */ 0x0000, /* R176 - DCDC/LDO requested */ 0x002D, /* R177 - DCDC Active options */ 0x0000, /* R178 - DCDC Sleep options */ 0x0025, /* R179 - Power-check comparator */ 0x000E, /* R180 - DCDC1 Control */ 0x0400, /* R181 - DCDC1 Timeouts */ 0x1006, /* R182 - DCDC1 Low Power */ 0x0018, /* R183 - DCDC2 Control */ 0x0000, /* R184 - DCDC2 Timeouts */ 0x0000, /* R185 */ 0x000E, /* R186 - DCDC3 Control */ 0x0400, /* R187 - DCDC3 Timeouts */ 0x0006, /* R188 - DCDC3 Low Power */ 0x0026, /* R189 - DCDC4 Control */ 0x0400, /* R190 - DCDC4 Timeouts */ 0x0006, /* R191 - DCDC4 Low Power */ 0x0008, /* R192 - DCDC5 Control */ 0x0000, /* R193 - DCDC5 Timeouts */ 0x0000, /* R194 */ 0x0026, /* R195 - DCDC6 Control */ 0x0400, /* R196 - DCDC6 Timeouts */ 0x0006, /* R197 - DCDC6 Low Power */ 0x0000, /* R198 */ 0x0003, /* R199 - Limit Switch Control */ 0x001C, /* R200 - LDO1 Control */ 0x0000, /* R201 - LDO1 Timeouts */ 0x001C, /* R202 - LDO1 Low Power */ 0x001C, /* R203 - LDO2 Control */ 0x0400, /* R204 - LDO2 Timeouts */ 0x001C, /* R205 - LDO2 Low Power */ 0x001C, /* R206 - LDO3 Control */ 0x0400, /* R207 - LDO3 Timeouts */ 0x001C, /* R208 - LDO3 Low Power */ 0x001F, /* R209 - LDO4 Control */ 0x0400, /* R210 - LDO4 Timeouts */ 0x001C, /* R211 - LDO4 Low Power */ 0x0000, /* R212 */ 0x0000, /* R213 */ 0x0000, /* R214 */ 0x0000, /* R215 - VCC_FAULT Masks */ 0x001F, /* R216 - Main Bandgap Control */ 0x0000, /* R217 - OSC Control */ 0x9000, /* R218 - RTC Tick Control */ 0x0000, /* R219 */ 0x4000, /* R220 - RAM BIST 1 */ 0x0000, /* R221 */ 0x0000, /* R222 */ 0x0000, /* R223 */ 0x0000, /* R224 */ 0x0000, /* R225 - DCDC/LDO status */ 0x0000, /* R226 */ 0x0000, /* R227 */ 0x0000, /* R228 */ 0x0000, /* R229 */ 0xE000, /* R230 - GPIO Pin Status */ 0x0000, /* R231 */ 0x0000, /* R232 */ 0x0000, /* R233 */ 0x0000, /* R234 */ 0x0000, /* R235 */ 0x0000, /* R236 */ 0x0000, /* R237 */ 0x0000, /* R238 */ 0x0000, /* R239 */ 0x0000, /* R240 */ 0x0000, /* R241 */ 0x0000, /* R242 */ 0x0000, /* R243 */ 0x0000, /* R244 */ 0x0000, /* R245 */ 0x0000, /* R246 */ 0x0000, /* R247 */ 0x0000, /* R248 */ 0x0000, /* R249 */ 0x0000, /* R250 */ 0x0000, /* R251 */ 0x0000, /* R252 */ 0x0000, /* R253 */ 0x0000, /* R254 */ 0x0000, /* R255 */ }; #endif #ifdef CONFIG_MFD_WM8351_CONFIG_MODE_0 #undef WM8350_HAVE_CONFIG_MODE #define WM8350_HAVE_CONFIG_MODE const u16 wm8351_mode0_defaults[] = { 0x6143, /* R0 - Reset/ID */ 0x0000, /* R1 - ID */ 0x0001, /* R2 - Revision */ 0x1C02, /* R3 - System Control 1 */ 0x0004, /* R4 - System Control 2 */ 0x0000, /* R5 - System Hibernate */ 0x8A00, /* R6 - Interface Control */ 0x0000, /* R7 */ 0x8000, /* R8 - Power mgmt (1) */ 0x0000, /* R9 - Power mgmt (2) */ 0x0000, /* R10 - Power mgmt (3) */ 0x2000, /* R11 - Power mgmt (4) */ 0x0E00, /* R12 - Power mgmt (5) */ 0x0000, /* R13 - Power mgmt (6) */ 0x0000, /* R14 - Power mgmt (7) */ 0x0000, /* R15 */ 0x0000, /* R16 - RTC Seconds/Minutes */ 0x0100, /* R17 - RTC Hours/Day */ 0x0101, /* R18 - RTC Date/Month */ 0x1400, /* R19 - RTC Year */ 0x0000, /* R20 - Alarm Seconds/Minutes */ 0x0000, /* R21 - Alarm Hours/Day */ 0x0000, /* R22 - Alarm Date/Month */ 0x0320, /* R23 - RTC Time Control */ 0x0000, /* R24 - System Interrupts */ 0x0000, /* R25 - Interrupt Status 1 */ 0x0000, /* R26 - Interrupt Status 2 */ 0x0000, /* R27 */ 0x0000, /* R28 - Under Voltage Interrupt status */ 0x0000, /* R29 - Over Current Interrupt status */ 0x0000, /* R30 - GPIO Interrupt Status */ 0x0000, /* R31 - Comparator Interrupt Status */ 0x3FFF, /* R32 - System Interrupts Mask */ 0x0000, /* R33 - Interrupt Status 1 Mask */ 0x0000, /* R34 - Interrupt Status 2 Mask */ 0x0000, /* R35 */ 0x0000, /* R36 - Under Voltage Interrupt status Mask */ 0x0000, /* R37 - Over Current Interrupt status Mask */ 0x0000, /* R38 - GPIO Interrupt Status Mask */ 0x0000, /* R39 - Comparator Interrupt Status Mask */ 0x0040, /* R40 - Clock Control 1 */ 0x0000, /* R41 - Clock Control 2 */ 0x3A00, /* R42 - FLL Control 1 */ 0x7086, /* R43 - FLL Control 2 */ 0xC226, /* R44 - FLL Control 3 */ 0x0000, /* R45 - FLL Control 4 */ 0x0000, /* R46 */ 0x0000, /* R47 */ 0x0000, /* R48 - DAC Control */ 0x0000, /* R49 */ 0x00C0, /* R50 - DAC Digital Volume L */ 0x00C0, /* R51 - DAC Digital Volume R */ 0x0000, /* R52 */ 0x0040, /* R53 - DAC LR Rate */ 0x0000, /* R54 - DAC Clock Control */ 0x0000, /* R55 */ 0x0000, /* R56 */ 0x0000, /* R57 */ 0x4000, /* R58 - DAC Mute */ 0x0000, /* R59 - DAC Mute Volume */ 0x0000, /* R60 - DAC Side */ 0x0000, /* R61 */ 0x0000, /* R62 */ 0x0000, /* R63 */ 0x8000, /* R64 - ADC Control */ 0x0000, /* R65 */ 0x00C0, /* R66 - ADC Digital Volume L */ 0x00C0, /* R67 - ADC Digital Volume R */ 0x0000, /* R68 - ADC Divider */ 0x0000, /* R69 */ 0x0040, /* R70 - ADC LR Rate */ 0x0000, /* R71 */ 0x0303, /* R72 - Input Control */ 0x0000, /* R73 - IN3 Input Control */ 0x0000, /* R74 - Mic Bias Control */ 0x0000, /* R75 */ 0x0000, /* R76 - Output Control */ 0x0000, /* R77 - Jack Detect */ 0x0000, /* R78 - Anti Pop Control */ 0x0000, /* R79 */ 0x0040, /* R80 - Left Input Volume */ 0x0040, /* R81 - Right Input Volume */ 0x0000, /* R82 */ 0x0000, /* R83 */ 0x0000, /* R84 */ 0x0000, /* R85 */ 0x0000, /* R86 */ 0x0000, /* R87 */ 0x0800, /* R88 - Left Mixer Control */ 0x1000, /* R89 - Right Mixer Control */ 0x0000, /* R90 */ 0x0000, /* R91 */ 0x0000, /* R92 - OUT3 Mixer Control */ 0x0000, /* R93 - OUT4 Mixer Control */ 0x0000, /* R94 */ 0x0000, /* R95 */ 0x0000, /* R96 - Output Left Mixer Volume */ 0x0000, /* R97 - Output Right Mixer Volume */ 0x0000, /* R98 - Input Mixer Volume L */ 0x0000, /* R99 - Input Mixer Volume R */ 0x0000, /* R100 - Input Mixer Volume */ 0x0000, /* R101 */ 0x0000, /* R102 */ 0x0000, /* R103 */ 0x00E4, /* R104 - OUT1L Volume */ 0x00E4, /* R105 - OUT1R Volume */ 0x00E4, /* R106 - OUT2L Volume */ 0x02E4, /* R107 - OUT2R Volume */ 0x0000, /* R108 */ 0x0000, /* R109 */ 0x0000, /* R110 */ 0x0000, /* R111 - BEEP Volume */ 0x0A00, /* R112 - AI Formating */ 0x0000, /* R113 - ADC DAC COMP */ 0x0020, /* R114 - AI ADC Control */ 0x0020, /* R115 - AI DAC Control */ 0x0000, /* R116 */ 0x0000, /* R117 */ 0x0000, /* R118 */ 0x0000, /* R119 */ 0x0000, /* R120 */ 0x0000, /* R121 */ 0x0000, /* R122 */ 0x0000, /* R123 */ 0x0000, /* R124 */ 0x0000, /* R125 */ 0x0000, /* R126 */ 0x0000, /* R127 */ 0x1FFF, /* R128 - GPIO Debounce */ 0x0000, /* R129 - GPIO Pin pull up Control */ 0x0000, /* R130 - GPIO Pull down Control */ 0x0000, /* R131 - GPIO Interrupt Mode */ 0x0000, /* R132 */ 0x0000, /* R133 - GPIO Control */ 0x0FFC, /* R134 - GPIO Configuration (i/o) */ 0x0FFC, /* R135 - GPIO Pin Polarity / Type */ 0x0000, /* R136 */ 0x0000, /* R137 */ 0x0000, /* R138 */ 0x0000, /* R139 */ 0x0013, /* R140 - GPIO Function Select 1 */ 0x0000, /* R141 - GPIO Function Select 2 */ 0x0000, /* R142 - GPIO Function Select 3 */ 0x0003, /* R143 - GPIO Function Select 4 */ 0x0000, /* R144 - Digitiser Control (1) */ 0x0002, /* R145 - Digitiser Control (2) */ 0x0000, /* R146 */ 0x0000, /* R147 */ 0x0000, /* R148 */ 0x0000, /* R149 */ 0x0000, /* R150 */ 0x0000, /* R151 */ 0x7000, /* R152 - AUX1 Readback */ 0x7000, /* R153 - AUX2 Readback */ 0x7000, /* R154 - AUX3 Readback */ 0x7000, /* R155 - AUX4 Readback */ 0x0000, /* R156 - USB Voltage Readback */ 0x0000, /* R157 - LINE Voltage Readback */ 0x0000, /* R158 - BATT Voltage Readback */ 0x0000, /* R159 - Chip Temp Readback */ 0x0000, /* R160 */ 0x0000, /* R161 */ 0x0000, /* R162 */ 0x0000, /* R163 - Generic Comparator Control */ 0x0000, /* R164 - Generic comparator 1 */ 0x0000, /* R165 - Generic comparator 2 */ 0x0000, /* R166 - Generic comparator 3 */ 0x0000, /* R167 - Generic comparator 4 */ 0xA00F, /* R168 - Battery Charger Control 1 */ 0x0B06, /* R169 - Battery Charger Control 2 */ 0x0000, /* R170 - Battery Charger Control 3 */ 0x0000, /* R171 */ 0x0000, /* R172 - Current Sink Driver A */ 0x0000, /* R173 - CSA Flash control */ 0x0000, /* R174 */ 0x0000, /* R175 */ 0x0000, /* R176 - DCDC/LDO requested */ 0x032D, /* R177 - DCDC Active options */ 0x0000, /* R178 - DCDC Sleep options */ 0x0025, /* R179 - Power-check comparator */ 0x000E, /* R180 - DCDC1 Control */ 0x0000, /* R181 - DCDC1 Timeouts */ 0x1006, /* R182 - DCDC1 Low Power */ 0x0018, /* R183 - DCDC2 Control */ 0x0000, /* R184 - DCDC2 Timeouts */ 0x0000, /* R185 */ 0x0000, /* R186 - DCDC3 Control */ 0x0000, /* R187 - DCDC3 Timeouts */ 0x0006, /* R188 - DCDC3 Low Power */ 0x0000, /* R189 - DCDC4 Control */ 0x0000, /* R190 - DCDC4 Timeouts */ 0x0006, /* R191 - DCDC4 Low Power */ 0x0008, /* R192 */ 0x0000, /* R193 */ 0x0000, /* R194 */ 0x0000, /* R195 */ 0x0000, /* R196 */ 0x0006, /* R197 */ 0x0000, /* R198 */ 0x0003, /* R199 - Limit Switch Control */ 0x001C, /* R200 - LDO1 Control */ 0x0000, /* R201 - LDO1 Timeouts */ 0x001C, /* R202 - LDO1 Low Power */ 0x001B, /* R203 - LDO2 Control */ 0x0000, /* R204 - LDO2 Timeouts */ 0x001C, /* R205 - LDO2 Low Power */ 0x001B, /* R206 - LDO3 Control */ 0x0000, /* R207 - LDO3 Timeouts */ 0x001C, /* R208 - LDO3 Low Power */ 0x001B, /* R209 - LDO4 Control */ 0x0000, /* R210 - LDO4 Timeouts */ 0x001C, /* R211 - LDO4 Low Power */ 0x0000, /* R212 */ 0x0000, /* R213 */ 0x0000, /* R214 */ 0x0000, /* R215 - VCC_FAULT Masks */ 0x001F, /* R216 - Main Bandgap Control */ 0x0000, /* R217 - OSC Control */ 0x9000, /* R218 - RTC Tick Control */ 0x0000, /* R219 - Security1 */ 0x4000, /* R220 */ 0x0000, /* R221 */ 0x0000, /* R222 */ 0x0000, /* R223 */ 0x0000, /* R224 - Signal overrides */ 0x0000, /* R225 - DCDC/LDO status */ 0x0000, /* R226 - Charger Overides/status */ 0x0000, /* R227 - misc overrides */ 0x0000, /* R228 - Supply overrides/status 1 */ 0x0000, /* R229 - Supply overrides/status 2 */ 0xE000, /* R230 - GPIO Pin Status */ 0x0000, /* R231 - comparotor overrides */ 0x0000, /* R232 */ 0x0000, /* R233 - State Machine status */ 0x1200, /* R234 - FLL Test 1 */ 0x0000, /* R235 */ 0x8000, /* R236 */ 0x0000, /* R237 */ 0x0000, /* R238 */ 0x0000, /* R239 */ 0x0003, /* R240 */ 0x0000, /* R241 */ 0x0000, /* R242 */ 0x0004, /* R243 */ 0x0300, /* R244 */ 0x0000, /* R245 */ 0x0200, /* R246 */ 0x0000, /* R247 */ 0x1000, /* R248 - DCDC1 Test Controls */ 0x1000, /* R249 */ 0x1000, /* R250 - DCDC3 Test Controls */ 0x1000, /* R251 - DCDC4 Test Controls */ }; #endif #ifdef CONFIG_MFD_WM8351_CONFIG_MODE_1 #undef WM8350_HAVE_CONFIG_MODE #define WM8350_HAVE_CONFIG_MODE const u16 wm8351_mode1_defaults[] = { 0x6143, /* R0 - Reset/ID */ 0x0000, /* R1 - ID */ 0x0001, /* R2 - Revision */ 0x1C02, /* R3 - System Control 1 */ 0x0204, /* R4 - System Control 2 */ 0x0000, /* R5 - System Hibernate */ 0x8A00, /* R6 - Interface Control */ 0x0000, /* R7 */ 0x8000, /* R8 - Power mgmt (1) */ 0x0000, /* R9 - Power mgmt (2) */ 0x0000, /* R10 - Power mgmt (3) */ 0x2000, /* R11 - Power mgmt (4) */ 0x0E00, /* R12 - Power mgmt (5) */ 0x0000, /* R13 - Power mgmt (6) */ 0x0000, /* R14 - Power mgmt (7) */ 0x0000, /* R15 */ 0x0000, /* R16 - RTC Seconds/Minutes */ 0x0100, /* R17 - RTC Hours/Day */ 0x0101, /* R18 - RTC Date/Month */ 0x1400, /* R19 - RTC Year */ 0x0000, /* R20 - Alarm Seconds/Minutes */ 0x0000, /* R21 - Alarm Hours/Day */ 0x0000, /* R22 - Alarm Date/Month */ 0x0320, /* R23 - RTC Time Control */ 0x0000, /* R24 - System Interrupts */ 0x0000, /* R25 - Interrupt Status 1 */ 0x0000, /* R26 - Interrupt Status 2 */ 0x0000, /* R27 */ 0x0000, /* R28 - Under Voltage Interrupt status */ 0x0000, /* R29 - Over Current Interrupt status */ 0x0000, /* R30 - GPIO Interrupt Status */ 0x0000, /* R31 - Comparator Interrupt Status */ 0x3FFF, /* R32 - System Interrupts Mask */ 0x0000, /* R33 - Interrupt Status 1 Mask */ 0x0000, /* R34 - Interrupt Status 2 Mask */ 0x0000, /* R35 */ 0x0000, /* R36 - Under Voltage Interrupt status Mask */ 0x0000, /* R37 - Over Current Interrupt status Mask */ 0x0000, /* R38 - GPIO Interrupt Status Mask */ 0x0000, /* R39 - Comparator Interrupt Status Mask */ 0x0040, /* R40 - Clock Control 1 */ 0x0000, /* R41 - Clock Control 2 */ 0x3A00, /* R42 - FLL Control 1 */ 0x7086, /* R43 - FLL Control 2 */ 0xC226, /* R44 - FLL Control 3 */ 0x0000, /* R45 - FLL Control 4 */ 0x0000, /* R46 */ 0x0000, /* R47 */ 0x0000, /* R48 - DAC Control */ 0x0000, /* R49 */ 0x00C0, /* R50 - DAC Digital Volume L */ 0x00C0, /* R51 - DAC Digital Volume R */ 0x0000, /* R52 */ 0x0040, /* R53 - DAC LR Rate */ 0x0000, /* R54 - DAC Clock Control */ 0x0000, /* R55 */ 0x0000, /* R56 */ 0x0000, /* R57 */ 0x4000, /* R58 - DAC Mute */ 0x0000, /* R59 - DAC Mute Volume */ 0x0000, /* R60 - DAC Side */ 0x0000, /* R61 */ 0x0000, /* R62 */ 0x0000, /* R63 */ 0x8000, /* R64 - ADC Control */ 0x0000, /* R65 */ 0x00C0, /* R66 - ADC Digital Volume L */ 0x00C0, /* R67 - ADC Digital Volume R */ 0x0000, /* R68 - ADC Divider */ 0x0000, /* R69 */ 0x0040, /* R70 - ADC LR Rate */ 0x0000, /* R71 */ 0x0303, /* R72 - Input Control */ 0x0000, /* R73 - IN3 Input Control */ 0x0000, /* R74 - Mic Bias Control */ 0x0000, /* R75 */ 0x0000, /* R76 - Output Control */ 0x0000, /* R77 - Jack Detect */ 0x0000, /* R78 - Anti Pop Control */ 0x0000, /* R79 */ 0x0040, /* R80 - Left Input Volume */ 0x0040, /* R81 - Right Input Volume */ 0x0000, /* R82 */ 0x0000, /* R83 */ 0x0000, /* R84 */ 0x0000, /* R85 */ 0x0000, /* R86 */ 0x0000, /* R87 */ 0x0800, /* R88 - Left Mixer Control */ 0x1000, /* R89 - Right Mixer Control */ 0x0000, /* R90 */ 0x0000, /* R91 */ 0x0000, /* R92 - OUT3 Mixer Control */ 0x0000, /* R93 - OUT4 Mixer Control */ 0x0000, /* R94 */ 0x0000, /* R95 */ 0x0000, /* R96 - Output Left Mixer Volume */ 0x0000, /* R97 - Output Right Mixer Volume */ 0x0000, /* R98 - Input Mixer Volume L */ 0x0000, /* R99 - Input Mixer Volume R */ 0x0000, /* R100 - Input Mixer Volume */ 0x0000, /* R101 */ 0x0000, /* R102 */ 0x0000, /* R103 */ 0x00E4, /* R104 - OUT1L Volume */ 0x00E4, /* R105 - OUT1R Volume */ 0x00E4, /* R106 - OUT2L Volume */ 0x02E4, /* R107 - OUT2R Volume */ 0x0000, /* R108 */ 0x0000, /* R109 */ 0x0000, /* R110 */ 0x0000, /* R111 - BEEP Volume */ 0x0A00, /* R112 - AI Formating */ 0x0000, /* R113 - ADC DAC COMP */ 0x0020, /* R114 - AI ADC Control */ 0x0020, /* R115 - AI DAC Control */ 0x0000, /* R116 */ 0x0000, /* R117 */ 0x0000, /* R118 */ 0x0000, /* R119 */ 0x0000, /* R120 */ 0x0000, /* R121 */ 0x0000, /* R122 */ 0x0000, /* R123 */ 0x0000, /* R124 */ 0x0000, /* R125 */ 0x0000, /* R126 */ 0x0000, /* R127 */ 0x1FFF, /* R128 - GPIO Debounce */ 0x0000, /* R129 - GPIO Pin pull up Control */ 0x0000, /* R130 - GPIO Pull down Control */ 0x0000, /* R131 - GPIO Interrupt Mode */ 0x0000, /* R132 */ 0x0000, /* R133 - GPIO Control */ 0x0CFB, /* R134 - GPIO Configuration (i/o) */ 0x0C1F, /* R135 - GPIO Pin Polarity / Type */ 0x0000, /* R136 */ 0x0000, /* R137 */ 0x0000, /* R138 */ 0x0000, /* R139 */ 0x0300, /* R140 - GPIO Function Select 1 */ 0x1110, /* R141 - GPIO Function Select 2 */ 0x0013, /* R142 - GPIO Function Select 3 */ 0x0003, /* R143 - GPIO Function Select 4 */ 0x0000, /* R144 - Digitiser Control (1) */ 0x0002, /* R145 - Digitiser Control (2) */ 0x0000, /* R146 */ 0x0000, /* R147 */ 0x0000, /* R148 */ 0x0000, /* R149 */ 0x0000, /* R150 */ 0x0000, /* R151 */ 0x7000, /* R152 - AUX1 Readback */ 0x7000, /* R153 - AUX2 Readback */ 0x7000, /* R154 - AUX3 Readback */ 0x7000, /* R155 - AUX4 Readback */ 0x0000, /* R156 - USB Voltage Readback */ 0x0000, /* R157 - LINE Voltage Readback */ 0x0000, /* R158 - BATT Voltage Readback */ 0x0000, /* R159 - Chip Temp Readback */ 0x0000, /* R160 */ 0x0000, /* R161 */ 0x0000, /* R162 */ 0x0000, /* R163 - Generic Comparator Control */ 0x0000, /* R164 - Generic comparator 1 */ 0x0000, /* R165 - Generic comparator 2 */ 0x0000, /* R166 - Generic comparator 3 */ 0x0000, /* R167 - Generic comparator 4 */ 0xA00F, /* R168 - Battery Charger Control 1 */ 0x0B06, /* R169 - Battery Charger Control 2 */ 0x0000, /* R170 - Battery Charger Control 3 */ 0x0000, /* R171 */ 0x0000, /* R172 - Current Sink Driver A */ 0x0000, /* R173 - CSA Flash control */ 0x0000, /* R174 */ 0x0000, /* R175 */ 0x0000, /* R176 - DCDC/LDO requested */ 0x032D, /* R177 - DCDC Active options */ 0x0000, /* R178 - DCDC Sleep options */ 0x0025, /* R179 - Power-check comparator */ 0x000E, /* R180 - DCDC1 Control */ 0x0C00, /* R181 - DCDC1 Timeouts */ 0x1006, /* R182 - DCDC1 Low Power */ 0x0018, /* R183 - DCDC2 Control */ 0x0000, /* R184 - DCDC2 Timeouts */ 0x0000, /* R185 */ 0x0026, /* R186 - DCDC3 Control */ 0x0400, /* R187 - DCDC3 Timeouts */ 0x0006, /* R188 - DCDC3 Low Power */ 0x0062, /* R189 - DCDC4 Control */ 0x0800, /* R190 - DCDC4 Timeouts */ 0x0006, /* R191 - DCDC4 Low Power */ 0x0008, /* R192 */ 0x0000, /* R193 */ 0x0000, /* R194 */ 0x000A, /* R195 */ 0x1000, /* R196 */ 0x0006, /* R197 */ 0x0000, /* R198 */ 0x0003, /* R199 - Limit Switch Control */ 0x0006, /* R200 - LDO1 Control */ 0x0000, /* R201 - LDO1 Timeouts */ 0x001C, /* R202 - LDO1 Low Power */ 0x0010, /* R203 - LDO2 Control */ 0x0C00, /* R204 - LDO2 Timeouts */ 0x001C, /* R205 - LDO2 Low Power */ 0x001F, /* R206 - LDO3 Control */ 0x0800, /* R207 - LDO3 Timeouts */ 0x001C, /* R208 - LDO3 Low Power */ 0x000A, /* R209 - LDO4 Control */ 0x0800, /* R210 - LDO4 Timeouts */ 0x001C, /* R211 - LDO4 Low Power */ 0x0000, /* R212 */ 0x0000, /* R213 */ 0x0000, /* R214 */ 0x0000, /* R215 - VCC_FAULT Masks */ 0x001F, /* R216 - Main Bandgap Control */ 0x0000, /* R217 - OSC Control */ 0x9000, /* R218 - RTC Tick Control */ 0x0000, /* R219 - Security1 */ 0x4000, /* R220 */ 0x0000, /* R221 */ 0x0000, /* R222 */ 0x0000, /* R223 */ 0x0000, /* R224 - Signal overrides */ 0x0000, /* R225 - DCDC/LDO status */ 0x0000, /* R226 - Charger Overides/status */ 0x0000, /* R227 - misc overrides */ 0x0000, /* R228 - Supply overrides/status 1 */ 0x0000, /* R229 - Supply overrides/status 2 */ 0xE000, /* R230 - GPIO Pin Status */ 0x0000, /* R231 - comparotor overrides */ 0x0000, /* R232 */ 0x0000, /* R233 - State Machine status */ 0x1200, /* R234 - FLL Test 1 */ 0x0000, /* R235 */ 0x8000, /* R236 */ 0x0000, /* R237 */ 0x0000, /* R238 */ 0x0000, /* R239 */ 0x0003, /* R240 */ 0x0000, /* R241 */ 0x0000, /* R242 */ 0x0004, /* R243 */ 0x0300, /* R244 */ 0x0000, /* R245 */ 0x0200, /* R246 */ 0x1000, /* R247 */ 0x1000, /* R248 - DCDC1 Test Controls */ 0x1000, /* R249 */ 0x1000, /* R250 - DCDC3 Test Controls */ 0x1000, /* R251 - DCDC4 Test Controls */ }; #endif #ifdef CONFIG_MFD_WM8351_CONFIG_MODE_2 #undef WM8350_HAVE_CONFIG_MODE #define WM8350_HAVE_CONFIG_MODE const u16 wm8351_mode2_defaults[] = { 0x6143, /* R0 - Reset/ID */ 0x0000, /* R1 - ID */ 0x0001, /* R2 - Revision */ 0x1C02, /* R3 - System Control 1 */ 0x0214, /* R4 - System Control 2 */ 0x0000, /* R5 - System Hibernate */ 0x8A00, /* R6 - Interface Control */ 0x0000, /* R7 */ 0x8000, /* R8 - Power mgmt (1) */ 0x0000, /* R9 - Power mgmt (2) */ 0x0000, /* R10 - Power mgmt (3) */ 0x2000, /* R11 - Power mgmt (4) */ 0x0E00, /* R12 - Power mgmt (5) */ 0x0000, /* R13 - Power mgmt (6) */ 0x0000, /* R14 - Power mgmt (7) */ 0x0000, /* R15 */ 0x0000, /* R16 - RTC Seconds/Minutes */ 0x0100, /* R17 - RTC Hours/Day */ 0x0101, /* R18 - RTC Date/Month */ 0x1400, /* R19 - RTC Year */ 0x0000, /* R20 - Alarm Seconds/Minutes */ 0x0000, /* R21 - Alarm Hours/Day */ 0x0000, /* R22 - Alarm Date/Month */ 0x0320, /* R23 - RTC Time Control */ 0x0000, /* R24 - System Interrupts */ 0x0000, /* R25 - Interrupt Status 1 */ 0x0000, /* R26 - Interrupt Status 2 */ 0x0000, /* R27 */ 0x0000, /* R28 - Under Voltage Interrupt status */ 0x0000, /* R29 - Over Current Interrupt status */ 0x0000, /* R30 - GPIO Interrupt Status */ 0x0000, /* R31 - Comparator Interrupt Status */ 0x3FFF, /* R32 - System Interrupts Mask */ 0x0000, /* R33 - Interrupt Status 1 Mask */ 0x0000, /* R34 - Interrupt Status 2 Mask */ 0x0000, /* R35 */ 0x0000, /* R36 - Under Voltage Interrupt status Mask */ 0x0000, /* R37 - Over Current Interrupt status Mask */ 0x0000, /* R38 - GPIO Interrupt Status Mask */ 0x0000, /* R39 - Comparator Interrupt Status Mask */ 0x0040, /* R40 - Clock Control 1 */ 0x0000, /* R41 - Clock Control 2 */ 0x3A00, /* R42 - FLL Control 1 */ 0x7086, /* R43 - FLL Control 2 */ 0xC226, /* R44 - FLL Control 3 */ 0x0000, /* R45 - FLL Control 4 */ 0x0000, /* R46 */ 0x0000, /* R47 */ 0x0000, /* R48 - DAC Control */ 0x0000, /* R49 */ 0x00C0, /* R50 - DAC Digital Volume L */ 0x00C0, /* R51 - DAC Digital Volume R */ 0x0000, /* R52 */ 0x0040, /* R53 - DAC LR Rate */ 0x0000, /* R54 - DAC Clock Control */ 0x0000, /* R55 */ 0x0000, /* R56 */ 0x0000, /* R57 */ 0x4000, /* R58 - DAC Mute */ 0x0000, /* R59 - DAC Mute Volume */ 0x0000, /* R60 - DAC Side */ 0x0000, /* R61 */ 0x0000, /* R62 */ 0x0000, /* R63 */ 0x8000, /* R64 - ADC Control */ 0x0000, /* R65 */ 0x00C0, /* R66 - ADC Digital Volume L */ 0x00C0, /* R67 - ADC Digital Volume R */ 0x0000, /* R68 - ADC Divider */ 0x0000, /* R69 */ 0x0040, /* R70 - ADC LR Rate */ 0x0000, /* R71 */ 0x0303, /* R72 - Input Control */ 0x0000, /* R73 - IN3 Input Control */ 0x0000, /* R74 - Mic Bias Control */ 0x0000, /* R75 */ 0x0000, /* R76 - Output Control */ 0x0000, /* R77 - Jack Detect */ 0x0000, /* R78 - Anti Pop Control */ 0x0000, /* R79 */ 0x0040, /* R80 - Left Input Volume */ 0x0040, /* R81 - Right Input Volume */ 0x0000, /* R82 */ 0x0000, /* R83 */ 0x0000, /* R84 */ 0x0000, /* R85 */ 0x0000, /* R86 */ 0x0000, /* R87 */ 0x0800, /* R88 - Left Mixer Control */ 0x1000, /* R89 - Right Mixer Control */ 0x0000, /* R90 */ 0x0000, /* R91 */ 0x0000, /* R92 - OUT3 Mixer Control */ 0x0000, /* R93 - OUT4 Mixer Control */ 0x0000, /* R94 */ 0x0000, /* R95 */ 0x0000, /* R96 - Output Left Mixer Volume */ 0x0000, /* R97 - Output Right Mixer Volume */ 0x0000, /* R98 - Input Mixer Volume L */ 0x0000, /* R99 - Input Mixer Volume R */ 0x0000, /* R100 - Input Mixer Volume */ 0x0000, /* R101 */ 0x0000, /* R102 */ 0x0000, /* R103 */ 0x00E4, /* R104 - OUT1L Volume */ 0x00E4, /* R105 - OUT1R Volume */ 0x00E4, /* R106 - OUT2L Volume */ 0x02E4, /* R107 - OUT2R Volume */ 0x0000, /* R108 */ 0x0000, /* R109 */ 0x0000, /* R110 */ 0x0000, /* R111 - BEEP Volume */ 0x0A00, /* R112 - AI Formating */ 0x0000, /* R113 - ADC DAC COMP */ 0x0020, /* R114 - AI ADC Control */ 0x0020, /* R115 - AI DAC Control */ 0x0000, /* R116 */ 0x0000, /* R117 */ 0x0000, /* R118 */ 0x0000, /* R119 */ 0x0000, /* R120 */ 0x0000, /* R121 */ 0x0000, /* R122 */ 0x0000, /* R123 */ 0x0000, /* R124 */ 0x0000, /* R125 */ 0x0000, /* R126 */ 0x0000, /* R127 */ 0x1FFF, /* R128 - GPIO Debounce */ 0x0000, /* R129 - GPIO Pin pull up Control */ 0x0110, /* R130 - GPIO Pull down Control */ 0x0000, /* R131 - GPIO Interrupt Mode */ 0x0000, /* R132 */ 0x0000, /* R133 - GPIO Control */ 0x09FA, /* R134 - GPIO Configuration (i/o) */ 0x0DF6, /* R135 - GPIO Pin Polarity / Type */ 0x0000, /* R136 */ 0x0000, /* R137 */ 0x0000, /* R138 */ 0x0000, /* R139 */ 0x1310, /* R140 - GPIO Function Select 1 */ 0x0003, /* R141 - GPIO Function Select 2 */ 0x2000, /* R142 - GPIO Function Select 3 */ 0x0000, /* R143 - GPIO Function Select 4 */ 0x0000, /* R144 - Digitiser Control (1) */ 0x0002, /* R145 - Digitiser Control (2) */ 0x0000, /* R146 */ 0x0000, /* R147 */ 0x0000, /* R148 */ 0x0000, /* R149 */ 0x0000, /* R150 */ 0x0000, /* R151 */ 0x7000, /* R152 - AUX1 Readback */ 0x7000, /* R153 - AUX2 Readback */ 0x7000, /* R154 - AUX3 Readback */ 0x7000, /* R155 - AUX4 Readback */ 0x0000, /* R156 - USB Voltage Readback */ 0x0000, /* R157 - LINE Voltage Readback */ 0x0000, /* R158 - BATT Voltage Readback */ 0x0000, /* R159 - Chip Temp Readback */ 0x0000, /* R160 */ 0x0000, /* R161 */ 0x0000, /* R162 */ 0x0000, /* R163 - Generic Comparator Control */ 0x0000, /* R164 - Generic comparator 1 */ 0x0000, /* R165 - Generic comparator 2 */ 0x0000, /* R166 - Generic comparator 3 */ 0x0000, /* R167 - Generic comparator 4 */ 0xA00F, /* R168 - Battery Charger Control 1 */ 0x0B06, /* R169 - Battery Charger Control 2 */ 0x0000, /* R170 - Battery Charger Control 3 */ 0x0000, /* R171 */ 0x0000, /* R172 - Current Sink Driver A */ 0x0000, /* R173 - CSA Flash control */ 0x0000, /* R174 */ 0x0000, /* R175 */ 0x0000, /* R176 - DCDC/LDO requested */ 0x032D, /* R177 - DCDC Active options */ 0x0000, /* R178 - DCDC Sleep options */ 0x0025, /* R179 - Power-check comparator */ 0x001A, /* R180 - DCDC1 Control */ 0x0800, /* R181 - DCDC1 Timeouts */ 0x1006, /* R182 - DCDC1 Low Power */ 0x0018, /* R183 - DCDC2 Control */ 0x0000, /* R184 - DCDC2 Timeouts */ 0x0000, /* R185 */ 0x0056, /* R186 - DCDC3 Control */ 0x0400, /* R187 - DCDC3 Timeouts */ 0x0006, /* R188 - DCDC3 Low Power */ 0x0026, /* R189 - DCDC4 Control */ 0x0C00, /* R190 - DCDC4 Timeouts */ 0x0006, /* R191 - DCDC4 Low Power */ 0x0008, /* R192 */ 0x0000, /* R193 */ 0x0000, /* R194 */ 0x0026, /* R195 */ 0x0C00, /* R196 */ 0x0006, /* R197 */ 0x0000, /* R198 */ 0x0003, /* R199 - Limit Switch Control */ 0x001C, /* R200 - LDO1 Control */ 0x0400, /* R201 - LDO1 Timeouts */ 0x001C, /* R202 - LDO1 Low Power */ 0x0010, /* R203 - LDO2 Control */ 0x0C00, /* R204 - LDO2 Timeouts */ 0x001C, /* R205 - LDO2 Low Power */ 0x0015, /* R206 - LDO3 Control */ 0x0000, /* R207 - LDO3 Timeouts */ 0x001C, /* R208 - LDO3 Low Power */ 0x001A, /* R209 - LDO4 Control */ 0x0000, /* R210 - LDO4 Timeouts */ 0x001C, /* R211 - LDO4 Low Power */ 0x0000, /* R212 */ 0x0000, /* R213 */ 0x0000, /* R214 */ 0x0000, /* R215 - VCC_FAULT Masks */ 0x001F, /* R216 - Main Bandgap Control */ 0x0000, /* R217 - OSC Control */ 0x9000, /* R218 - RTC Tick Control */ 0x0000, /* R219 - Security1 */ 0x4000, /* R220 */ 0x0000, /* R221 */ 0x0000, /* R222 */ 0x0000, /* R223 */ 0x0000, /* R224 - Signal overrides */ 0x0000, /* R225 - DCDC/LDO status */ 0x0000, /* R226 - Charger Overides/status */ 0x0000, /* R227 - misc overrides */ 0x0000, /* R228 - Supply overrides/status 1 */ 0x0000, /* R229 - Supply overrides/status 2 */ 0xE000, /* R230 - GPIO Pin Status */ 0x0000, /* R231 - comparotor overrides */ 0x0000, /* R232 */ 0x0000, /* R233 - State Machine status */ 0x1200, /* R234 - FLL Test 1 */ 0x0000, /* R235 */ 0x8000, /* R236 */ 0x0000, /* R237 */ 0x0000, /* R238 */ 0x0000, /* R239 */ 0x0003, /* R240 */ 0x0000, /* R241 */ 0x0000, /* R242 */ 0x0004, /* R243 */ 0x0300, /* R244 */ 0x0000, /* R245 */ 0x0200, /* R246 */ 0x0000, /* R247 */ 0x1000, /* R248 - DCDC1 Test Controls */ 0x1000, /* R249 */ 0x1000, /* R250 - DCDC3 Test Controls */ 0x1000, /* R251 - DCDC4 Test Controls */ }; #endif #ifdef CONFIG_MFD_WM8351_CONFIG_MODE_3 #undef WM8350_HAVE_CONFIG_MODE #define WM8350_HAVE_CONFIG_MODE const u16 wm8351_mode3_defaults[] = { 0x6143, /* R0 - Reset/ID */ 0x0000, /* R1 - ID */ 0x0001, /* R2 - Revision */ 0x1C02, /* R3 - System Control 1 */ 0x0204, /* R4 - System Control 2 */ 0x0000, /* R5 - System Hibernate */ 0x8A00, /* R6 - Interface Control */ 0x0000, /* R7 */ 0x8000, /* R8 - Power mgmt (1) */ 0x0000, /* R9 - Power mgmt (2) */ 0x0000, /* R10 - Power mgmt (3) */ 0x2000, /* R11 - Power mgmt (4) */ 0x0E00, /* R12 - Power mgmt (5) */ 0x0000, /* R13 - Power mgmt (6) */ 0x0000, /* R14 - Power mgmt (7) */ 0x0000, /* R15 */ 0x0000, /* R16 - RTC Seconds/Minutes */ 0x0100, /* R17 - RTC Hours/Day */ 0x0101, /* R18 - RTC Date/Month */ 0x1400, /* R19 - RTC Year */ 0x0000, /* R20 - Alarm Seconds/Minutes */ 0x0000, /* R21 - Alarm Hours/Day */ 0x0000, /* R22 - Alarm Date/Month */ 0x0320, /* R23 - RTC Time Control */ 0x0000, /* R24 - System Interrupts */ 0x0000, /* R25 - Interrupt Status 1 */ 0x0000, /* R26 - Interrupt Status 2 */ 0x0000, /* R27 */ 0x0000, /* R28 - Under Voltage Interrupt status */ 0x0000, /* R29 - Over Current Interrupt status */ 0x0000, /* R30 - GPIO Interrupt Status */ 0x0000, /* R31 - Comparator Interrupt Status */ 0x3FFF, /* R32 - System Interrupts Mask */ 0x0000, /* R33 - Interrupt Status 1 Mask */ 0x0000, /* R34 - Interrupt Status 2 Mask */ 0x0000, /* R35 */ 0x0000, /* R36 - Under Voltage Interrupt status Mask */ 0x0000, /* R37 - Over Current Interrupt status Mask */ 0x0000, /* R38 - GPIO Interrupt Status Mask */ 0x0000, /* R39 - Comparator Interrupt Status Mask */ 0x0040, /* R40 - Clock Control 1 */ 0x0000, /* R41 - Clock Control 2 */ 0x3A00, /* R42 - FLL Control 1 */ 0x7086, /* R43 - FLL Control 2 */ 0xC226, /* R44 - FLL Control 3 */ 0x0000, /* R45 - FLL Control 4 */ 0x0000, /* R46 */ 0x0000, /* R47 */ 0x0000, /* R48 - DAC Control */ 0x0000, /* R49 */ 0x00C0, /* R50 - DAC Digital Volume L */ 0x00C0, /* R51 - DAC Digital Volume R */ 0x0000, /* R52 */ 0x0040, /* R53 - DAC LR Rate */ 0x0000, /* R54 - DAC Clock Control */ 0x0000, /* R55 */ 0x0000, /* R56 */ 0x0000, /* R57 */ 0x4000, /* R58 - DAC Mute */ 0x0000, /* R59 - DAC Mute Volume */ 0x0000, /* R60 - DAC Side */ 0x0000, /* R61 */ 0x0000, /* R62 */ 0x0000, /* R63 */ 0x8000, /* R64 - ADC Control */ 0x0000, /* R65 */ 0x00C0, /* R66 - ADC Digital Volume L */ 0x00C0, /* R67 - ADC Digital Volume R */ 0x0000, /* R68 - ADC Divider */ 0x0000, /* R69 */ 0x0040, /* R70 - ADC LR Rate */ 0x0000, /* R71 */ 0x0303, /* R72 - Input Control */ 0x0000, /* R73 - IN3 Input Control */ 0x0000, /* R74 - Mic Bias Control */ 0x0000, /* R75 */ 0x0000, /* R76 - Output Control */ 0x0000, /* R77 - Jack Detect */ 0x0000, /* R78 - Anti Pop Control */ 0x0000, /* R79 */ 0x0040, /* R80 - Left Input Volume */ 0x0040, /* R81 - Right Input Volume */ 0x0000, /* R82 */ 0x0000, /* R83 */ 0x0000, /* R84 */ 0x0000, /* R85 */ 0x0000, /* R86 */ 0x0000, /* R87 */ 0x0800, /* R88 - Left Mixer Control */ 0x1000, /* R89 - Right Mixer Control */ 0x0000, /* R90 */ 0x0000, /* R91 */ 0x0000, /* R92 - OUT3 Mixer Control */ 0x0000, /* R93 - OUT4 Mixer Control */ 0x0000, /* R94 */ 0x0000, /* R95 */ 0x0000, /* R96 - Output Left Mixer Volume */ 0x0000, /* R97 - Output Right Mixer Volume */ 0x0000, /* R98 - Input Mixer Volume L */ 0x0000, /* R99 - Input Mixer Volume R */ 0x0000, /* R100 - Input Mixer Volume */ 0x0000, /* R101 */ 0x0000, /* R102 */ 0x0000, /* R103 */ 0x00E4, /* R104 - OUT1L Volume */ 0x00E4, /* R105 - OUT1R Volume */ 0x00E4, /* R106 - OUT2L Volume */ 0x02E4, /* R107 - OUT2R Volume */ 0x0000, /* R108 */ 0x0000, /* R109 */ 0x0000, /* R110 */ 0x0000, /* R111 - BEEP Volume */ 0x0A00, /* R112 - AI Formating */ 0x0000, /* R113 - ADC DAC COMP */ 0x0020, /* R114 - AI ADC Control */ 0x0020, /* R115 - AI DAC Control */ 0x0000, /* R116 */ 0x0000, /* R117 */ 0x0000, /* R118 */ 0x0000, /* R119 */ 0x0000, /* R120 */ 0x0000, /* R121 */ 0x0000, /* R122 */ 0x0000, /* R123 */ 0x0000, /* R124 */ 0x0000, /* R125 */ 0x0000, /* R126 */ 0x0000, /* R127 */ 0x1FFF, /* R128 - GPIO Debounce */ 0x0010, /* R129 - GPIO Pin pull up Control */ 0x0000, /* R130 - GPIO Pull down Control */ 0x0000, /* R131 - GPIO Interrupt Mode */ 0x0000, /* R132 */ 0x0000, /* R133 - GPIO Control */ 0x0BFB, /* R134 - GPIO Configuration (i/o) */ 0x0FFD, /* R135 - GPIO Pin Polarity / Type */ 0x0000, /* R136 */ 0x0000, /* R137 */ 0x0000, /* R138 */ 0x0000, /* R139 */ 0x0310, /* R140 - GPIO Function Select 1 */ 0x0001, /* R141 - GPIO Function Select 2 */ 0x2300, /* R142 - GPIO Function Select 3 */ 0x0003, /* R143 - GPIO Function Select 4 */ 0x0000, /* R144 - Digitiser Control (1) */ 0x0002, /* R145 - Digitiser Control (2) */ 0x0000, /* R146 */ 0x0000, /* R147 */ 0x0000, /* R148 */ 0x0000, /* R149 */ 0x0000, /* R150 */ 0x0000, /* R151 */ 0x7000, /* R152 - AUX1 Readback */ 0x7000, /* R153 - AUX2 Readback */ 0x7000, /* R154 - AUX3 Readback */ 0x7000, /* R155 - AUX4 Readback */ 0x0000, /* R156 - USB Voltage Readback */ 0x0000, /* R157 - LINE Voltage Readback */ 0x0000, /* R158 - BATT Voltage Readback */ 0x0000, /* R159 - Chip Temp Readback */ 0x0000, /* R160 */ 0x0000, /* R161 */ 0x0000, /* R162 */ 0x0000, /* R163 - Generic Comparator Control */ 0x0000, /* R164 - Generic comparator 1 */ 0x0000, /* R165 - Generic comparator 2 */ 0x0000, /* R166 - Generic comparator 3 */ 0x0000, /* R167 - Generic comparator 4 */ 0xA00F, /* R168 - Battery Charger Control 1 */ 0x0B06, /* R169 - Battery Charger Control 2 */ 0x0000, /* R170 - Battery Charger Control 3 */ 0x0000, /* R171 */ 0x0000, /* R172 - Current Sink Driver A */ 0x0000, /* R173 - CSA Flash control */ 0x0000, /* R174 */ 0x0000, /* R175 */ 0x0000, /* R176 - DCDC/LDO requested */ 0x032D, /* R177 - DCDC Active options */ 0x0000, /* R178 - DCDC Sleep options */ 0x0025, /* R179 - Power-check comparator */ 0x000E, /* R180 - DCDC1 Control */ 0x0400, /* R181 - DCDC1 Timeouts */ 0x1006, /* R182 - DCDC1 Low Power */ 0x0018, /* R183 - DCDC2 Control */ 0x0000, /* R184 - DCDC2 Timeouts */ 0x0000, /* R185 */ 0x0026, /* R186 - DCDC3 Control */ 0x0800, /* R187 - DCDC3 Timeouts */ 0x0006, /* R188 - DCDC3 Low Power */ 0x0062, /* R189 - DCDC4 Control */ 0x1400, /* R190 - DCDC4 Timeouts */ 0x0006, /* R191 - DCDC4 Low Power */ 0x0008, /* R192 */ 0x0000, /* R193 */ 0x0000, /* R194 */ 0x0026, /* R195 */ 0x0400, /* R196 */ 0x0006, /* R197 */ 0x0000, /* R198 */ 0x0003, /* R199 - Limit Switch Control */ 0x0006, /* R200 - LDO1 Control */ 0x0C00, /* R201 - LDO1 Timeouts */ 0x001C, /* R202 - LDO1 Low Power */ 0x0016, /* R203 - LDO2 Control */ 0x0000, /* R204 - LDO2 Timeouts */ 0x001C, /* R205 - LDO2 Low Power */ 0x0019, /* R206 - LDO3 Control */ 0x0000, /* R207 - LDO3 Timeouts */ 0x001C, /* R208 - LDO3 Low Power */ 0x001A, /* R209 - LDO4 Control */ 0x1000, /* R210 - LDO4 Timeouts */ 0x001C, /* R211 - LDO4 Low Power */ 0x0000, /* R212 */ 0x0000, /* R213 */ 0x0000, /* R214 */ 0x0000, /* R215 - VCC_FAULT Masks */ 0x001F, /* R216 - Main Bandgap Control */ 0x0000, /* R217 - OSC Control */ 0x9000, /* R218 - RTC Tick Control */ 0x0000, /* R219 - Security1 */ 0x4000, /* R220 */ 0x0000, /* R221 */ 0x0000, /* R222 */ 0x0000, /* R223 */ 0x0000, /* R224 - Signal overrides */ 0x0000, /* R225 - DCDC/LDO status */ 0x0000, /* R226 - Charger Overides/status */ 0x0000, /* R227 - misc overrides */ 0x0000, /* R228 - Supply overrides/status 1 */ 0x0000, /* R229 - Supply overrides/status 2 */ 0xE000, /* R230 - GPIO Pin Status */ 0x0000, /* R231 - comparotor overrides */ 0x0000, /* R232 */ 0x0000, /* R233 - State Machine status */ 0x1200, /* R234 - FLL Test 1 */ 0x0000, /* R235 */ 0x8000, /* R236 */ 0x0000, /* R237 */ 0x0000, /* R238 */ 0x0000, /* R239 */ 0x0003, /* R240 */ 0x0000, /* R241 */ 0x0000, /* R242 */ 0x0004, /* R243 */ 0x0300, /* R244 */ 0x0000, /* R245 */ 0x0200, /* R246 */ 0x0000, /* R247 */ 0x1000, /* R248 - DCDC1 Test Controls */ 0x1000, /* R249 */ 0x1000, /* R250 - DCDC3 Test Controls */ 0x1000, /* R251 - DCDC4 Test Controls */ }; #endif #ifdef CONFIG_MFD_WM8352_CONFIG_MODE_0 #undef WM8350_HAVE_CONFIG_MODE #define WM8350_HAVE_CONFIG_MODE const u16 wm8352_mode0_defaults[] = { 0x6143, /* R0 - Reset/ID */ 0x0000, /* R1 - ID */ 0x0002, /* R2 - Revision */ 0x1C02, /* R3 - System Control 1 */ 0x0004, /* R4 - System Control 2 */ 0x0000, /* R5 - System Hibernate */ 0x8A00, /* R6 - Interface Control */ 0x0000, /* R7 */ 0x8000, /* R8 - Power mgmt (1) */ 0x0000, /* R9 - Power mgmt (2) */ 0x0000, /* R10 - Power mgmt (3) */ 0x2000, /* R11 - Power mgmt (4) */ 0x0E00, /* R12 - Power mgmt (5) */ 0x0000, /* R13 - Power mgmt (6) */ 0x0000, /* R14 - Power mgmt (7) */ 0x0000, /* R15 */ 0x0000, /* R16 - RTC Seconds/Minutes */ 0x0100, /* R17 - RTC Hours/Day */ 0x0101, /* R18 - RTC Date/Month */ 0x1400, /* R19 - RTC Year */ 0x0000, /* R20 - Alarm Seconds/Minutes */ 0x0000, /* R21 - Alarm Hours/Day */ 0x0000, /* R22 - Alarm Date/Month */ 0x0320, /* R23 - RTC Time Control */ 0x0000, /* R24 - System Interrupts */ 0x0000, /* R25 - Interrupt Status 1 */ 0x0000, /* R26 - Interrupt Status 2 */ 0x0000, /* R27 */ 0x0000, /* R28 - Under Voltage Interrupt status */ 0x0000, /* R29 - Over Current Interrupt status */ 0x0000, /* R30 - GPIO Interrupt Status */ 0x0000, /* R31 - Comparator Interrupt Status */ 0x3FFF, /* R32 - System Interrupts Mask */ 0x0000, /* R33 - Interrupt Status 1 Mask */ 0x0000, /* R34 - Interrupt Status 2 Mask */ 0x0000, /* R35 */ 0x0000, /* R36 - Under Voltage Interrupt status Mask */ 0x0000, /* R37 - Over Current Interrupt status Mask */ 0x0000, /* R38 - GPIO Interrupt Status Mask */ 0x0000, /* R39 - Comparator Interrupt Status Mask */ 0x0040, /* R40 - Clock Control 1 */ 0x0000, /* R41 - Clock Control 2 */ 0x3A00, /* R42 - FLL Control 1 */ 0x7086, /* R43 - FLL Control 2 */ 0xC226, /* R44 - FLL Control 3 */ 0x0000, /* R45 - FLL Control 4 */ 0x0000, /* R46 */ 0x0000, /* R47 */ 0x0000, /* R48 - DAC Control */ 0x0000, /* R49 */ 0x00C0, /* R50 - DAC Digital Volume L */ 0x00C0, /* R51 - DAC Digital Volume R */ 0x0000, /* R52 */ 0x0040, /* R53 - DAC LR Rate */ 0x0000, /* R54 - DAC Clock Control */ 0x0000, /* R55 */ 0x0000, /* R56 */ 0x0000, /* R57 */ 0x4000, /* R58 - DAC Mute */ 0x0000, /* R59 - DAC Mute Volume */ 0x0000, /* R60 - DAC Side */ 0x0000, /* R61 */ 0x0000, /* R62 */ 0x0000, /* R63 */ 0x8000, /* R64 - ADC Control */ 0x0000, /* R65 */ 0x00C0, /* R66 - ADC Digital Volume L */ 0x00C0, /* R67 - ADC Digital Volume R */ 0x0000, /* R68 - ADC Divider */ 0x0000, /* R69 */ 0x0040, /* R70 - ADC LR Rate */ 0x0000, /* R71 */ 0x0303, /* R72 - Input Control */ 0x0000, /* R73 - IN3 Input Control */ 0x0000, /* R74 - Mic Bias Control */ 0x0000, /* R75 */ 0x0000, /* R76 - Output Control */ 0x0000, /* R77 - Jack Detect */ 0x0000, /* R78 - Anti Pop Control */ 0x0000, /* R79 */ 0x0040, /* R80 - Left Input Volume */ 0x0040, /* R81 - Right Input Volume */ 0x0000, /* R82 */ 0x0000, /* R83 */ 0x0000, /* R84 */ 0x0000, /* R85 */ 0x0000, /* R86 */ 0x0000, /* R87 */ 0x0800, /* R88 - Left Mixer Control */ 0x1000, /* R89 - Right Mixer Control */ 0x0000, /* R90 */ 0x0000, /* R91 */ 0x0000, /* R92 - OUT3 Mixer Control */ 0x0000, /* R93 - OUT4 Mixer Control */ 0x0000, /* R94 */ 0x0000, /* R95 */ 0x0000, /* R96 - Output Left Mixer Volume */ 0x0000, /* R97 - Output Right Mixer Volume */ 0x0000, /* R98 - Input Mixer Volume L */ 0x0000, /* R99 - Input Mixer Volume R */ 0x0000, /* R100 - Input Mixer Volume */ 0x0000, /* R101 */ 0x0000, /* R102 */ 0x0000, /* R103 */ 0x00E4, /* R104 - OUT1L Volume */ 0x00E4, /* R105 - OUT1R Volume */ 0x00E4, /* R106 - OUT2L Volume */ 0x02E4, /* R107 - OUT2R Volume */ 0x0000, /* R108 */ 0x0000, /* R109 */ 0x0000, /* R110 */ 0x0000, /* R111 - BEEP Volume */ 0x0A00, /* R112 - AI Formating */ 0x0000, /* R113 - ADC DAC COMP */ 0x0020, /* R114 - AI ADC Control */ 0x0020, /* R115 - AI DAC Control */ 0x0000, /* R116 */ 0x0000, /* R117 */ 0x0000, /* R118 */ 0x0000, /* R119 */ 0x0000, /* R120 */ 0x0000, /* R121 */ 0x0000, /* R122 */ 0x0000, /* R123 */ 0x0000, /* R124 */ 0x0000, /* R125 */ 0x0000, /* R126 */ 0x0000, /* R127 */ 0x1FFF, /* R128 - GPIO Debounce */ 0x0000, /* R129 - GPIO Pin pull up Control */ 0x0000, /* R130 - GPIO Pull down Control */ 0x0000, /* R131 - GPIO Interrupt Mode */ 0x0000, /* R132 */ 0x0000, /* R133 - GPIO Control */ 0x0FFC, /* R134 - GPIO Configuration (i/o) */ 0x0FFC, /* R135 - GPIO Pin Polarity / Type */ 0x0000, /* R136 */ 0x0000, /* R137 */ 0x0000, /* R138 */ 0x0000, /* R139 */ 0x0013, /* R140 - GPIO Function Select 1 */ 0x0000, /* R141 - GPIO Function Select 2 */ 0x0000, /* R142 - GPIO Function Select 3 */ 0x0003, /* R143 - GPIO Function Select 4 */ 0x0000, /* R144 - Digitiser Control (1) */ 0x0002, /* R145 - Digitiser Control (2) */ 0x0000, /* R146 */ 0x0000, /* R147 */ 0x0000, /* R148 */ 0x0000, /* R149 */ 0x0000, /* R150 */ 0x0000, /* R151 */ 0x7000, /* R152 - AUX1 Readback */ 0x7000, /* R153 - AUX2 Readback */ 0x7000, /* R154 - AUX3 Readback */ 0x7000, /* R155 - AUX4 Readback */ 0x0000, /* R156 - USB Voltage Readback */ 0x0000, /* R157 - LINE Voltage Readback */ 0x0000, /* R158 - BATT Voltage Readback */ 0x0000, /* R159 - Chip Temp Readback */ 0x0000, /* R160 */ 0x0000, /* R161 */ 0x0000, /* R162 */ 0x0000, /* R163 - Generic Comparator Control */ 0x0000, /* R164 - Generic comparator 1 */ 0x0000, /* R165 - Generic comparator 2 */ 0x0000, /* R166 - Generic comparator 3 */ 0x0000, /* R167 - Generic comparator 4 */ 0xA00F, /* R168 - Battery Charger Control 1 */ 0x0B06, /* R169 - Battery Charger Control 2 */ 0x0000, /* R170 - Battery Charger Control 3 */ 0x0000, /* R171 */ 0x0000, /* R172 - Current Sink Driver A */ 0x0000, /* R173 - CSA Flash control */ 0x0000, /* R174 - Current Sink Driver B */ 0x0000, /* R175 - CSB Flash control */ 0x0000, /* R176 - DCDC/LDO requested */ 0x032D, /* R177 - DCDC Active options */ 0x0000, /* R178 - DCDC Sleep options */ 0x0025, /* R179 - Power-check comparator */ 0x000E, /* R180 - DCDC1 Control */ 0x0000, /* R181 - DCDC1 Timeouts */ 0x1006, /* R182 - DCDC1 Low Power */ 0x0018, /* R183 - DCDC2 Control */ 0x0000, /* R184 - DCDC2 Timeouts */ 0x0000, /* R185 */ 0x0000, /* R186 - DCDC3 Control */ 0x0000, /* R187 - DCDC3 Timeouts */ 0x0006, /* R188 - DCDC3 Low Power */ 0x0000, /* R189 - DCDC4 Control */ 0x0000, /* R190 - DCDC4 Timeouts */ 0x0006, /* R191 - DCDC4 Low Power */ 0x0008, /* R192 - DCDC5 Control */ 0x0000, /* R193 - DCDC5 Timeouts */ 0x0000, /* R194 */ 0x0000, /* R195 - DCDC6 Control */ 0x0000, /* R196 - DCDC6 Timeouts */ 0x0006, /* R197 - DCDC6 Low Power */ 0x0000, /* R198 */ 0x0003, /* R199 - Limit Switch Control */ 0x001C, /* R200 - LDO1 Control */ 0x0000, /* R201 - LDO1 Timeouts */ 0x001C, /* R202 - LDO1 Low Power */ 0x001B, /* R203 - LDO2 Control */ 0x0000, /* R204 - LDO2 Timeouts */ 0x001C, /* R205 - LDO2 Low Power */ 0x001B, /* R206 - LDO3 Control */ 0x0000, /* R207 - LDO3 Timeouts */ 0x001C, /* R208 - LDO3 Low Power */ 0x001B, /* R209 - LDO4 Control */ 0x0000, /* R210 - LDO4 Timeouts */ 0x001C, /* R211 - LDO4 Low Power */ 0x0000, /* R212 */ 0x0000, /* R213 */ 0x0000, /* R214 */ 0x0000, /* R215 - VCC_FAULT Masks */ 0x001F, /* R216 - Main Bandgap Control */ 0x0000, /* R217 - OSC Control */ 0x9000, /* R218 - RTC Tick Control */ 0x0000, /* R219 - Security1 */ 0x4000, /* R220 */ 0x0000, /* R221 */ 0x0000, /* R222 */ 0x0000, /* R223 */ 0x0000, /* R224 - Signal overrides */ 0x0000, /* R225 - DCDC/LDO status */ 0x0000, /* R226 - Charger Overides/status */ 0x0000, /* R227 - misc overrides */ 0x0000, /* R228 - Supply overrides/status 1 */ 0x0000, /* R229 - Supply overrides/status 2 */ 0xE000, /* R230 - GPIO Pin Status */ 0x0000, /* R231 - comparotor overrides */ 0x0000, /* R232 */ 0x0000, /* R233 - State Machine status */ 0x1200, /* R234 */ 0x0000, /* R235 */ 0x8000, /* R236 */ 0x0000, /* R237 */ 0x0000, /* R238 */ 0x0000, /* R239 */ 0x0003, /* R240 */ 0x0000, /* R241 */ 0x0000, /* R242 */ 0x0004, /* R243 */ 0x0300, /* R244 */ 0x0000, /* R245 */ 0x0200, /* R246 */ 0x0000, /* R247 */ 0x1000, /* R248 - DCDC1 Test Controls */ 0x5000, /* R249 */ 0x1000, /* R250 - DCDC3 Test Controls */ 0x1000, /* R251 - DCDC4 Test Controls */ 0x5100, /* R252 */ 0x1000, /* R253 - DCDC6 Test Controls */ }; #endif #ifdef CONFIG_MFD_WM8352_CONFIG_MODE_1 #undef WM8350_HAVE_CONFIG_MODE #define WM8350_HAVE_CONFIG_MODE const u16 wm8352_mode1_defaults[] = { 0x6143, /* R0 - Reset/ID */ 0x0000, /* R1 - ID */ 0x0002, /* R2 - Revision */ 0x1C02, /* R3 - System Control 1 */ 0x0204, /* R4 - System Control 2 */ 0x0000, /* R5 - System Hibernate */ 0x8A00, /* R6 - Interface Control */ 0x0000, /* R7 */ 0x8000, /* R8 - Power mgmt (1) */ 0x0000, /* R9 - Power mgmt (2) */ 0x0000, /* R10 - Power mgmt (3) */ 0x2000, /* R11 - Power mgmt (4) */ 0x0E00, /* R12 - Power mgmt (5) */ 0x0000, /* R13 - Power mgmt (6) */ 0x0000, /* R14 - Power mgmt (7) */ 0x0000, /* R15 */ 0x0000, /* R16 - RTC Seconds/Minutes */ 0x0100, /* R17 - RTC Hours/Day */ 0x0101, /* R18 - RTC Date/Month */ 0x1400, /* R19 - RTC Year */ 0x0000, /* R20 - Alarm Seconds/Minutes */ 0x0000, /* R21 - Alarm Hours/Day */ 0x0000, /* R22 - Alarm Date/Month */ 0x0320, /* R23 - RTC Time Control */ 0x0000, /* R24 - System Interrupts */ 0x0000, /* R25 - Interrupt Status 1 */ 0x0000, /* R26 - Interrupt Status 2 */ 0x0000, /* R27 */ 0x0000, /* R28 - Under Voltage Interrupt status */ 0x0000, /* R29 - Over Current Interrupt status */ 0x0000, /* R30 - GPIO Interrupt Status */ 0x0000, /* R31 - Comparator Interrupt Status */ 0x3FFF, /* R32 - System Interrupts Mask */ 0x0000, /* R33 - Interrupt Status 1 Mask */ 0x0000, /* R34 - Interrupt Status 2 Mask */ 0x0000, /* R35 */ 0x0000, /* R36 - Under Voltage Interrupt status Mask */ 0x0000, /* R37 - Over Current Interrupt status Mask */ 0x0000, /* R38 - GPIO Interrupt Status Mask */ 0x0000, /* R39 - Comparator Interrupt Status Mask */ 0x0040, /* R40 - Clock Control 1 */ 0x0000, /* R41 - Clock Control 2 */ 0x3A00, /* R42 - FLL Control 1 */ 0x7086, /* R43 - FLL Control 2 */ 0xC226, /* R44 - FLL Control 3 */ 0x0000, /* R45 - FLL Control 4 */ 0x0000, /* R46 */ 0x0000, /* R47 */ 0x0000, /* R48 - DAC Control */ 0x0000, /* R49 */ 0x00C0, /* R50 - DAC Digital Volume L */ 0x00C0, /* R51 - DAC Digital Volume R */ 0x0000, /* R52 */ 0x0040, /* R53 - DAC LR Rate */ 0x0000, /* R54 - DAC Clock Control */ 0x0000, /* R55 */ 0x0000, /* R56 */ 0x0000, /* R57 */ 0x4000, /* R58 - DAC Mute */ 0x0000, /* R59 - DAC Mute Volume */ 0x0000, /* R60 - DAC Side */ 0x0000, /* R61 */ 0x0000, /* R62 */ 0x0000, /* R63 */ 0x8000, /* R64 - ADC Control */ 0x0000, /* R65 */ 0x00C0, /* R66 - ADC Digital Volume L */ 0x00C0, /* R67 - ADC Digital Volume R */ 0x0000, /* R68 - ADC Divider */ 0x0000, /* R69 */ 0x0040, /* R70 - ADC LR Rate */ 0x0000, /* R71 */ 0x0303, /* R72 - Input Control */ 0x0000, /* R73 - IN3 Input Control */ 0x0000, /* R74 - Mic Bias Control */ 0x0000, /* R75 */ 0x0000, /* R76 - Output Control */ 0x0000, /* R77 - Jack Detect */ 0x0000, /* R78 - Anti Pop Control */ 0x0000, /* R79 */ 0x0040, /* R80 - Left Input Volume */ 0x0040, /* R81 - Right Input Volume */ 0x0000, /* R82 */ 0x0000, /* R83 */ 0x0000, /* R84 */ 0x0000, /* R85 */ 0x0000, /* R86 */ 0x0000, /* R87 */ 0x0800, /* R88 - Left Mixer Control */ 0x1000, /* R89 - Right Mixer Control */ 0x0000, /* R90 */ 0x0000, /* R91 */ 0x0000, /* R92 - OUT3 Mixer Control */ 0x0000, /* R93 - OUT4 Mixer Control */ 0x0000, /* R94 */ 0x0000, /* R95 */ 0x0000, /* R96 - Output Left Mixer Volume */ 0x0000, /* R97 - Output Right Mixer Volume */ 0x0000, /* R98 - Input Mixer Volume L */ 0x0000, /* R99 - Input Mixer Volume R */ 0x0000, /* R100 - Input Mixer Volume */ 0x0000, /* R101 */ 0x0000, /* R102 */ 0x0000, /* R103 */ 0x00E4, /* R104 - OUT1L Volume */ 0x00E4, /* R105 - OUT1R Volume */ 0x00E4, /* R106 - OUT2L Volume */ 0x02E4, /* R107 - OUT2R Volume */ 0x0000, /* R108 */ 0x0000, /* R109 */ 0x0000, /* R110 */ 0x0000, /* R111 - BEEP Volume */ 0x0A00, /* R112 - AI Formating */ 0x0000, /* R113 - ADC DAC COMP */ 0x0020, /* R114 - AI ADC Control */ 0x0020, /* R115 - AI DAC Control */ 0x0000, /* R116 */ 0x0000, /* R117 */ 0x0000, /* R118 */ 0x0000, /* R119 */ 0x0000, /* R120 */ 0x0000, /* R121 */ 0x0000, /* R122 */ 0x0000, /* R123 */ 0x0000, /* R124 */ 0x0000, /* R125 */ 0x0000, /* R126 */ 0x0000, /* R127 */ 0x1FFF, /* R128 - GPIO Debounce */ 0x0000, /* R129 - GPIO Pin pull up Control */ 0x0000, /* R130 - GPIO Pull down Control */ 0x0000, /* R131 - GPIO Interrupt Mode */ 0x0000, /* R132 */ 0x0000, /* R133 - GPIO Control */ 0x0BFB, /* R134 - GPIO Configuration (i/o) */ 0x0FFF, /* R135 - GPIO Pin Polarity / Type */ 0x0000, /* R136 */ 0x0000, /* R137 */ 0x0000, /* R138 */ 0x0000, /* R139 */ 0x0300, /* R140 - GPIO Function Select 1 */ 0x0000, /* R141 - GPIO Function Select 2 */ 0x2300, /* R142 - GPIO Function Select 3 */ 0x0003, /* R143 - GPIO Function Select 4 */ 0x0000, /* R144 - Digitiser Control (1) */ 0x0002, /* R145 - Digitiser Control (2) */ 0x0000, /* R146 */ 0x0000, /* R147 */ 0x0000, /* R148 */ 0x0000, /* R149 */ 0x0000, /* R150 */ 0x0000, /* R151 */ 0x7000, /* R152 - AUX1 Readback */ 0x7000, /* R153 - AUX2 Readback */ 0x7000, /* R154 - AUX3 Readback */ 0x7000, /* R155 - AUX4 Readback */ 0x0000, /* R156 - USB Voltage Readback */ 0x0000, /* R157 - LINE Voltage Readback */ 0x0000, /* R158 - BATT Voltage Readback */ 0x0000, /* R159 - Chip Temp Readback */ 0x0000, /* R160 */ 0x0000, /* R161 */ 0x0000, /* R162 */ 0x0000, /* R163 - Generic Comparator Control */ 0x0000, /* R164 - Generic comparator 1 */ 0x0000, /* R165 - Generic comparator 2 */ 0x0000, /* R166 - Generic comparator 3 */ 0x0000, /* R167 - Generic comparator 4 */ 0xA00F, /* R168 - Battery Charger Control 1 */ 0x0B06, /* R169 - Battery Charger Control 2 */ 0x0000, /* R170 - Battery Charger Control 3 */ 0x0000, /* R171 */ 0x0000, /* R172 - Current Sink Driver A */ 0x0000, /* R173 - CSA Flash control */ 0x0000, /* R174 - Current Sink Driver B */ 0x0000, /* R175 - CSB Flash control */ 0x0000, /* R176 - DCDC/LDO requested */ 0x032D, /* R177 - DCDC Active options */ 0x0000, /* R178 - DCDC Sleep options */ 0x0025, /* R179 - Power-check comparator */ 0x0062, /* R180 - DCDC1 Control */ 0x0400, /* R181 - DCDC1 Timeouts */ 0x1006, /* R182 - DCDC1 Low Power */ 0x0018, /* R183 - DCDC2 Control */ 0x0000, /* R184 - DCDC2 Timeouts */ 0x0000, /* R185 */ 0x0006, /* R186 - DCDC3 Control */ 0x0800, /* R187 - DCDC3 Timeouts */ 0x0006, /* R188 - DCDC3 Low Power */ 0x0006, /* R189 - DCDC4 Control */ 0x0C00, /* R190 - DCDC4 Timeouts */ 0x0006, /* R191 - DCDC4 Low Power */ 0x0008, /* R192 - DCDC5 Control */ 0x0000, /* R193 - DCDC5 Timeouts */ 0x0000, /* R194 */ 0x0026, /* R195 - DCDC6 Control */ 0x1000, /* R196 - DCDC6 Timeouts */ 0x0006, /* R197 - DCDC6 Low Power */ 0x0000, /* R198 */ 0x0003, /* R199 - Limit Switch Control */ 0x0002, /* R200 - LDO1 Control */ 0x0000, /* R201 - LDO1 Timeouts */ 0x001C, /* R202 - LDO1 Low Power */ 0x001A, /* R203 - LDO2 Control */ 0x0000, /* R204 - LDO2 Timeouts */ 0x001C, /* R205 - LDO2 Low Power */ 0x001F, /* R206 - LDO3 Control */ 0x0000, /* R207 - LDO3 Timeouts */ 0x001C, /* R208 - LDO3 Low Power */ 0x001F, /* R209 - LDO4 Control */ 0x0000, /* R210 - LDO4 Timeouts */ 0x001C, /* R211 - LDO4 Low Power */ 0x0000, /* R212 */ 0x0000, /* R213 */ 0x0000, /* R214 */ 0x0000, /* R215 - VCC_FAULT Masks */ 0x001F, /* R216 - Main Bandgap Control */ 0x0000, /* R217 - OSC Control */ 0x9000, /* R218 - RTC Tick Control */ 0x0000, /* R219 - Security1 */ 0x4000, /* R220 */ 0x0000, /* R221 */ 0x0000, /* R222 */ 0x0000, /* R223 */ 0x0000, /* R224 - Signal overrides */ 0x0000, /* R225 - DCDC/LDO status */ 0x0000, /* R226 - Charger Overides/status */ 0x0000, /* R227 - misc overrides */ 0x0000, /* R228 - Supply overrides/status 1 */ 0x0000, /* R229 - Supply overrides/status 2 */ 0xE000, /* R230 - GPIO Pin Status */ 0x0000, /* R231 - comparotor overrides */ 0x0000, /* R232 */ 0x0000, /* R233 - State Machine status */ 0x1200, /* R234 */ 0x0000, /* R235 */ 0x8000, /* R236 */ 0x0000, /* R237 */ 0x0000, /* R238 */ 0x0000, /* R239 */ 0x0003, /* R240 */ 0x0000, /* R241 */ 0x0000, /* R242 */ 0x0004, /* R243 */ 0x0300, /* R244 */ 0x0000, /* R245 */ 0x0200, /* R246 */ 0x0000, /* R247 */ 0x1000, /* R248 - DCDC1 Test Controls */ 0x5000, /* R249 */ 0x1000, /* R250 - DCDC3 Test Controls */ 0x1000, /* R251 - DCDC4 Test Controls */ 0x5100, /* R252 */ 0x1000, /* R253 - DCDC6 Test Controls */ }; #endif #ifdef CONFIG_MFD_WM8352_CONFIG_MODE_2 #undef WM8350_HAVE_CONFIG_MODE #define WM8350_HAVE_CONFIG_MODE const u16 wm8352_mode2_defaults[] = { 0x6143, /* R0 - Reset/ID */ 0x0000, /* R1 - ID */ 0x0002, /* R2 - Revision */ 0x1C02, /* R3 - System Control 1 */ 0x0204, /* R4 - System Control 2 */ 0x0000, /* R5 - System Hibernate */ 0x8A00, /* R6 - Interface Control */ 0x0000, /* R7 */ 0x8000, /* R8 - Power mgmt (1) */ 0x0000, /* R9 - Power mgmt (2) */ 0x0000, /* R10 - Power mgmt (3) */ 0x2000, /* R11 - Power mgmt (4) */ 0x0E00, /* R12 - Power mgmt (5) */ 0x0000, /* R13 - Power mgmt (6) */ 0x0000, /* R14 - Power mgmt (7) */ 0x0000, /* R15 */ 0x0000, /* R16 - RTC Seconds/Minutes */ 0x0100, /* R17 - RTC Hours/Day */ 0x0101, /* R18 - RTC Date/Month */ 0x1400, /* R19 - RTC Year */ 0x0000, /* R20 - Alarm Seconds/Minutes */ 0x0000, /* R21 - Alarm Hours/Day */ 0x0000, /* R22 - Alarm Date/Month */ 0x0320, /* R23 - RTC Time Control */ 0x0000, /* R24 - System Interrupts */ 0x0000, /* R25 - Interrupt Status 1 */ 0x0000, /* R26 - Interrupt Status 2 */ 0x0000, /* R27 */ 0x0000, /* R28 - Under Voltage Interrupt status */ 0x0000, /* R29 - Over Current Interrupt status */ 0x0000, /* R30 - GPIO Interrupt Status */ 0x0000, /* R31 - Comparator Interrupt Status */ 0x3FFF, /* R32 - System Interrupts Mask */ 0x0000, /* R33 - Interrupt Status 1 Mask */ 0x0000, /* R34 - Interrupt Status 2 Mask */ 0x0000, /* R35 */ 0x0000, /* R36 - Under Voltage Interrupt status Mask */ 0x0000, /* R37 - Over Current Interrupt status Mask */ 0x0000, /* R38 - GPIO Interrupt Status Mask */ 0x0000, /* R39 - Comparator Interrupt Status Mask */ 0x0040, /* R40 - Clock Control 1 */ 0x0000, /* R41 - Clock Control 2 */ 0x3A00, /* R42 - FLL Control 1 */ 0x7086, /* R43 - FLL Control 2 */ 0xC226, /* R44 - FLL Control 3 */ 0x0000, /* R45 - FLL Control 4 */ 0x0000, /* R46 */ 0x0000, /* R47 */ 0x0000, /* R48 - DAC Control */ 0x0000, /* R49 */ 0x00C0, /* R50 - DAC Digital Volume L */ 0x00C0, /* R51 - DAC Digital Volume R */ 0x0000, /* R52 */ 0x0040, /* R53 - DAC LR Rate */ 0x0000, /* R54 - DAC Clock Control */ 0x0000, /* R55 */ 0x0000, /* R56 */ 0x0000, /* R57 */ 0x4000, /* R58 - DAC Mute */ 0x0000, /* R59 - DAC Mute Volume */ 0x0000, /* R60 - DAC Side */ 0x0000, /* R61 */ 0x0000, /* R62 */ 0x0000, /* R63 */ 0x8000, /* R64 - ADC Control */ 0x0000, /* R65 */ 0x00C0, /* R66 - ADC Digital Volume L */ 0x00C0, /* R67 - ADC Digital Volume R */ 0x0000, /* R68 - ADC Divider */ 0x0000, /* R69 */ 0x0040, /* R70 - ADC LR Rate */ 0x0000, /* R71 */ 0x0303, /* R72 - Input Control */ 0x0000, /* R73 - IN3 Input Control */ 0x0000, /* R74 - Mic Bias Control */ 0x0000, /* R75 */ 0x0000, /* R76 - Output Control */ 0x0000, /* R77 - Jack Detect */ 0x0000, /* R78 - Anti Pop Control */ 0x0000, /* R79 */ 0x0040, /* R80 - Left Input Volume */ 0x0040, /* R81 - Right Input Volume */ 0x0000, /* R82 */ 0x0000, /* R83 */ 0x0000, /* R84 */ 0x0000, /* R85 */ 0x0000, /* R86 */ 0x0000, /* R87 */ 0x0800, /* R88 - Left Mixer Control */ 0x1000, /* R89 - Right Mixer Control */ 0x0000, /* R90 */ 0x0000, /* R91 */ 0x0000, /* R92 - OUT3 Mixer Control */ 0x0000, /* R93 - OUT4 Mixer Control */ 0x0000, /* R94 */ 0x0000, /* R95 */ 0x0000, /* R96 - Output Left Mixer Volume */ 0x0000, /* R97 - Output Right Mixer Volume */ 0x0000, /* R98 - Input Mixer Volume L */ 0x0000, /* R99 - Input Mixer Volume R */ 0x0000, /* R100 - Input Mixer Volume */ 0x0000, /* R101 */ 0x0000, /* R102 */ 0x0000, /* R103 */ 0x00E4, /* R104 - OUT1L Volume */ 0x00E4, /* R105 - OUT1R Volume */ 0x00E4, /* R106 - OUT2L Volume */ 0x02E4, /* R107 - OUT2R Volume */ 0x0000, /* R108 */ 0x0000, /* R109 */ 0x0000, /* R110 */ 0x0000, /* R111 - BEEP Volume */ 0x0A00, /* R112 - AI Formating */ 0x0000, /* R113 - ADC DAC COMP */ 0x0020, /* R114 - AI ADC Control */ 0x0020, /* R115 - AI DAC Control */ 0x0000, /* R116 */ 0x0000, /* R117 */ 0x0000, /* R118 */ 0x0000, /* R119 */ 0x0000, /* R120 */ 0x0000, /* R121 */ 0x0000, /* R122 */ 0x0000, /* R123 */ 0x0000, /* R124 */ 0x0000, /* R125 */ 0x0000, /* R126 */ 0x0000, /* R127 */ 0x1FFF, /* R128 - GPIO Debounce */ 0x0000, /* R129 - GPIO Pin pull up Control */ 0x0110, /* R130 - GPIO Pull down Control */ 0x0000, /* R131 - GPIO Interrupt Mode */ 0x0000, /* R132 */ 0x0000, /* R133 - GPIO Control */ 0x09DA, /* R134 - GPIO Configuration (i/o) */ 0x0DD6, /* R135 - GPIO Pin Polarity / Type */ 0x0000, /* R136 */ 0x0000, /* R137 */ 0x0000, /* R138 */ 0x0000, /* R139 */ 0x1310, /* R140 - GPIO Function Select 1 */ 0x0033, /* R141 - GPIO Function Select 2 */ 0x2000, /* R142 - GPIO Function Select 3 */ 0x0000, /* R143 - GPIO Function Select 4 */ 0x0000, /* R144 - Digitiser Control (1) */ 0x0002, /* R145 - Digitiser Control (2) */ 0x0000, /* R146 */ 0x0000, /* R147 */ 0x0000, /* R148 */ 0x0000, /* R149 */ 0x0000, /* R150 */ 0x0000, /* R151 */ 0x7000, /* R152 - AUX1 Readback */ 0x7000, /* R153 - AUX2 Readback */ 0x7000, /* R154 - AUX3 Readback */ 0x7000, /* R155 - AUX4 Readback */ 0x0000, /* R156 - USB Voltage Readback */ 0x0000, /* R157 - LINE Voltage Readback */ 0x0000, /* R158 - BATT Voltage Readback */ 0x0000, /* R159 - Chip Temp Readback */ 0x0000, /* R160 */ 0x0000, /* R161 */ 0x0000, /* R162 */ 0x0000, /* R163 - Generic Comparator Control */ 0x0000, /* R164 - Generic comparator 1 */ 0x0000, /* R165 - Generic comparator 2 */ 0x0000, /* R166 - Generic comparator 3 */ 0x0000, /* R167 - Generic comparator 4 */ 0xA00F, /* R168 - Battery Charger Control 1 */ 0x0B06, /* R169 - Battery Charger Control 2 */ 0x0000, /* R170 - Battery Charger Control 3 */ 0x0000, /* R171 */ 0x0000, /* R172 - Current Sink Driver A */ 0x0000, /* R173 - CSA Flash control */ 0x0000, /* R174 - Current Sink Driver B */ 0x0000, /* R175 - CSB Flash control */ 0x0000, /* R176 - DCDC/LDO requested */ 0x032D, /* R177 - DCDC Active options */ 0x0000, /* R178 - DCDC Sleep options */ 0x0025, /* R179 - Power-check comparator */ 0x000E, /* R180 - DCDC1 Control */ 0x0800, /* R181 - DCDC1 Timeouts */ 0x1006, /* R182 - DCDC1 Low Power */ 0x0018, /* R183 - DCDC2 Control */ 0x0000, /* R184 - DCDC2 Timeouts */ 0x0000, /* R185 */ 0x0056, /* R186 - DCDC3 Control */ 0x1800, /* R187 - DCDC3 Timeouts */ 0x0006, /* R188 - DCDC3 Low Power */ 0x000E, /* R189 - DCDC4 Control */ 0x1000, /* R190 - DCDC4 Timeouts */ 0x0006, /* R191 - DCDC4 Low Power */ 0x0008, /* R192 - DCDC5 Control */ 0x0000, /* R193 - DCDC5 Timeouts */ 0x0000, /* R194 */ 0x0026, /* R195 - DCDC6 Control */ 0x0C00, /* R196 - DCDC6 Timeouts */ 0x0006, /* R197 - DCDC6 Low Power */ 0x0000, /* R198 */ 0x0003, /* R199 - Limit Switch Control */ 0x001C, /* R200 - LDO1 Control */ 0x0000, /* R201 - LDO1 Timeouts */ 0x001C, /* R202 - LDO1 Low Power */ 0x0006, /* R203 - LDO2 Control */ 0x0400, /* R204 - LDO2 Timeouts */ 0x001C, /* R205 - LDO2 Low Power */ 0x001C, /* R206 - LDO3 Control */ 0x1400, /* R207 - LDO3 Timeouts */ 0x001C, /* R208 - LDO3 Low Power */ 0x001A, /* R209 - LDO4 Control */ 0x0000, /* R210 - LDO4 Timeouts */ 0x001C, /* R211 - LDO4 Low Power */ 0x0000, /* R212 */ 0x0000, /* R213 */ 0x0000, /* R214 */ 0x0000, /* R215 - VCC_FAULT Masks */ 0x001F, /* R216 - Main Bandgap Control */ 0x0000, /* R217 - OSC Control */ 0x9000, /* R218 - RTC Tick Control */ 0x0000, /* R219 - Security1 */ 0x4000, /* R220 */ 0x0000, /* R221 */ 0x0000, /* R222 */ 0x0000, /* R223 */ 0x0000, /* R224 - Signal overrides */ 0x0000, /* R225 - DCDC/LDO status */ 0x0000, /* R226 - Charger Overides/status */ 0x0000, /* R227 - misc overrides */ 0x0000, /* R228 - Supply overrides/status 1 */ 0x0000, /* R229 - Supply overrides/status 2 */ 0xE000, /* R230 - GPIO Pin Status */ 0x0000, /* R231 - comparotor overrides */ 0x0000, /* R232 */ 0x0000, /* R233 - State Machine status */ 0x1200, /* R234 */ 0x0000, /* R235 */ 0x8000, /* R236 */ 0x0000, /* R237 */ 0x0000, /* R238 */ 0x0000, /* R239 */ 0x0003, /* R240 */ 0x0000, /* R241 */ 0x0000, /* R242 */ 0x0004, /* R243 */ 0x0300, /* R244 */ 0x0000, /* R245 */ 0x0200, /* R246 */ 0x0000, /* R247 */ 0x1000, /* R248 - DCDC1 Test Controls */ 0x5000, /* R249 */ 0x1000, /* R250 - DCDC3 Test Controls */ 0x1000, /* R251 - DCDC4 Test Controls */ 0x5100, /* R252 */ 0x1000, /* R253 - DCDC6 Test Controls */ }; #endif #ifdef CONFIG_MFD_WM8352_CONFIG_MODE_3 #undef WM8350_HAVE_CONFIG_MODE #define WM8350_HAVE_CONFIG_MODE const u16 wm8352_mode3_defaults[] = { 0x6143, /* R0 - Reset/ID */ 0x0000, /* R1 - ID */ 0x0002, /* R2 - Revision */ 0x1C02, /* R3 - System Control 1 */ 0x0204, /* R4 - System Control 2 */ 0x0000, /* R5 - System Hibernate */ 0x8A00, /* R6 - Interface Control */ 0x0000, /* R7 */ 0x8000, /* R8 - Power mgmt (1) */ 0x0000, /* R9 - Power mgmt (2) */ 0x0000, /* R10 - Power mgmt (3) */ 0x2000, /* R11 - Power mgmt (4) */ 0x0E00, /* R12 - Power mgmt (5) */ 0x0000, /* R13 - Power mgmt (6) */ 0x0000, /* R14 - Power mgmt (7) */ 0x0000, /* R15 */ 0x0000, /* R16 - RTC Seconds/Minutes */ 0x0100, /* R17 - RTC Hours/Day */ 0x0101, /* R18 - RTC Date/Month */ 0x1400, /* R19 - RTC Year */ 0x0000, /* R20 - Alarm Seconds/Minutes */ 0x0000, /* R21 - Alarm Hours/Day */ 0x0000, /* R22 - Alarm Date/Month */ 0x0320, /* R23 - RTC Time Control */ 0x0000, /* R24 - System Interrupts */ 0x0000, /* R25 - Interrupt Status 1 */ 0x0000, /* R26 - Interrupt Status 2 */ 0x0000, /* R27 */ 0x0000, /* R28 - Under Voltage Interrupt status */ 0x0000, /* R29 - Over Current Interrupt status */ 0x0000, /* R30 - GPIO Interrupt Status */ 0x0000, /* R31 - Comparator Interrupt Status */ 0x3FFF, /* R32 - System Interrupts Mask */ 0x0000, /* R33 - Interrupt Status 1 Mask */ 0x0000, /* R34 - Interrupt Status 2 Mask */ 0x0000, /* R35 */ 0x0000, /* R36 - Under Voltage Interrupt status Mask */ 0x0000, /* R37 - Over Current Interrupt status Mask */ 0x0000, /* R38 - GPIO Interrupt Status Mask */ 0x0000, /* R39 - Comparator Interrupt Status Mask */ 0x0040, /* R40 - Clock Control 1 */ 0x0000, /* R41 - Clock Control 2 */ 0x3A00, /* R42 - FLL Control 1 */ 0x7086, /* R43 - FLL Control 2 */ 0xC226, /* R44 - FLL Control 3 */ 0x0000, /* R45 - FLL Control 4 */ 0x0000, /* R46 */ 0x0000, /* R47 */ 0x0000, /* R48 - DAC Control */ 0x0000, /* R49 */ 0x00C0, /* R50 - DAC Digital Volume L */ 0x00C0, /* R51 - DAC Digital Volume R */ 0x0000, /* R52 */ 0x0040, /* R53 - DAC LR Rate */ 0x0000, /* R54 - DAC Clock Control */ 0x0000, /* R55 */ 0x0000, /* R56 */ 0x0000, /* R57 */ 0x4000, /* R58 - DAC Mute */ 0x0000, /* R59 - DAC Mute Volume */ 0x0000, /* R60 - DAC Side */ 0x0000, /* R61 */ 0x0000, /* R62 */ 0x0000, /* R63 */ 0x8000, /* R64 - ADC Control */ 0x0000, /* R65 */ 0x00C0, /* R66 - ADC Digital Volume L */ 0x00C0, /* R67 - ADC Digital Volume R */ 0x0000, /* R68 - ADC Divider */ 0x0000, /* R69 */ 0x0040, /* R70 - ADC LR Rate */ 0x0000, /* R71 */ 0x0303, /* R72 - Input Control */ 0x0000, /* R73 - IN3 Input Control */ 0x0000, /* R74 - Mic Bias Control */ 0x0000, /* R75 */ 0x0000, /* R76 - Output Control */ 0x0000, /* R77 - Jack Detect */ 0x0000, /* R78 - Anti Pop Control */ 0x0000, /* R79 */ 0x0040, /* R80 - Left Input Volume */ 0x0040, /* R81 - Right Input Volume */ 0x0000, /* R82 */ 0x0000, /* R83 */ 0x0000, /* R84 */ 0x0000, /* R85 */ 0x0000, /* R86 */ 0x0000, /* R87 */ 0x0800, /* R88 - Left Mixer Control */ 0x1000, /* R89 - Right Mixer Control */ 0x0000, /* R90 */ 0x0000, /* R91 */ 0x0000, /* R92 - OUT3 Mixer Control */ 0x0000, /* R93 - OUT4 Mixer Control */ 0x0000, /* R94 */ 0x0000, /* R95 */ 0x0000, /* R96 - Output Left Mixer Volume */ 0x0000, /* R97 - Output Right Mixer Volume */ 0x0000, /* R98 - Input Mixer Volume L */ 0x0000, /* R99 - Input Mixer Volume R */ 0x0000, /* R100 - Input Mixer Volume */ 0x0000, /* R101 */ 0x0000, /* R102 */ 0x0000, /* R103 */ 0x00E4, /* R104 - OUT1L Volume */ 0x00E4, /* R105 - OUT1R Volume */ 0x00E4, /* R106 - OUT2L Volume */ 0x02E4, /* R107 - OUT2R Volume */ 0x0000, /* R108 */ 0x0000, /* R109 */ 0x0000, /* R110 */ 0x0000, /* R111 - BEEP Volume */ 0x0A00, /* R112 - AI Formating */ 0x0000, /* R113 - ADC DAC COMP */ 0x0020, /* R114 - AI ADC Control */ 0x0020, /* R115 - AI DAC Control */ 0x0000, /* R116 */ 0x0000, /* R117 */ 0x0000, /* R118 */ 0x0000, /* R119 */ 0x0000, /* R120 */ 0x0000, /* R121 */ 0x0000, /* R122 */ 0x0000, /* R123 */ 0x0000, /* R124 */ 0x0000, /* R125 */ 0x0000, /* R126 */ 0x0000, /* R127 */ 0x1FFF, /* R128 - GPIO Debounce */ 0x0010, /* R129 - GPIO Pin pull up Control */ 0x0000, /* R130 - GPIO Pull down Control */ 0x0000, /* R131 - GPIO Interrupt Mode */ 0x0000, /* R132 */ 0x0000, /* R133 - GPIO Control */ 0x0BFB, /* R134 - GPIO Configuration (i/o) */ 0x0FFD, /* R135 - GPIO Pin Polarity / Type */ 0x0000, /* R136 */ 0x0000, /* R137 */ 0x0000, /* R138 */ 0x0000, /* R139 */ 0x0310, /* R140 - GPIO Function Select 1 */ 0x0001, /* R141 - GPIO Function Select 2 */ 0x2300, /* R142 - GPIO Function Select 3 */ 0x0003, /* R143 - GPIO Function Select 4 */ 0x0000, /* R144 - Digitiser Control (1) */ 0x0002, /* R145 - Digitiser Control (2) */ 0x0000, /* R146 */ 0x0000, /* R147 */ 0x0000, /* R148 */ 0x0000, /* R149 */ 0x0000, /* R150 */ 0x0000, /* R151 */ 0x7000, /* R152 - AUX1 Readback */ 0x7000, /* R153 - AUX2 Readback */ 0x7000, /* R154 - AUX3 Readback */ 0x7000, /* R155 - AUX4 Readback */ 0x0000, /* R156 - USB Voltage Readback */ 0x0000, /* R157 - LINE Voltage Readback */ 0x0000, /* R158 - BATT Voltage Readback */ 0x0000, /* R159 - Chip Temp Readback */ 0x0000, /* R160 */ 0x0000, /* R161 */ 0x0000, /* R162 */ 0x0000, /* R163 - Generic Comparator Control */ 0x0000, /* R164 - Generic comparator 1 */ 0x0000, /* R165 - Generic comparator 2 */ 0x0000, /* R166 - Generic comparator 3 */ 0x0000, /* R167 - Generic comparator 4 */ 0xA00F, /* R168 - Battery Charger Control 1 */ 0x0B06, /* R169 - Battery Charger Control 2 */ 0x0000, /* R170 - Battery Charger Control 3 */ 0x0000, /* R171 */ 0x0000, /* R172 - Current Sink Driver A */ 0x0000, /* R173 - CSA Flash control */ 0x0000, /* R174 - Current Sink Driver B */ 0x0000, /* R175 - CSB Flash control */ 0x0000, /* R176 - DCDC/LDO requested */ 0x032D, /* R177 - DCDC Active options */ 0x0000, /* R178 - DCDC Sleep options */ 0x0025, /* R179 - Power-check comparator */ 0x0006, /* R180 - DCDC1 Control */ 0x0400, /* R181 - DCDC1 Timeouts */ 0x1006, /* R182 - DCDC1 Low Power */ 0x0018, /* R183 - DCDC2 Control */ 0x0000, /* R184 - DCDC2 Timeouts */ 0x0000, /* R185 */ 0x0050, /* R186 - DCDC3 Control */ 0x0C00, /* R187 - DCDC3 Timeouts */ 0x0006, /* R188 - DCDC3 Low Power */ 0x000E, /* R189 - DCDC4 Control */ 0x0400, /* R190 - DCDC4 Timeouts */ 0x0006, /* R191 - DCDC4 Low Power */ 0x0008, /* R192 - DCDC5 Control */ 0x0000, /* R193 - DCDC5 Timeouts */ 0x0000, /* R194 */ 0x0029, /* R195 - DCDC6 Control */ 0x0800, /* R196 - DCDC6 Timeouts */ 0x0006, /* R197 - DCDC6 Low Power */ 0x0000, /* R198 */ 0x0003, /* R199 - Limit Switch Control */ 0x001D, /* R200 - LDO1 Control */ 0x1000, /* R201 - LDO1 Timeouts */ 0x001C, /* R202 - LDO1 Low Power */ 0x0017, /* R203 - LDO2 Control */ 0x1000, /* R204 - LDO2 Timeouts */ 0x001C, /* R205 - LDO2 Low Power */ 0x0006, /* R206 - LDO3 Control */ 0x1000, /* R207 - LDO3 Timeouts */ 0x001C, /* R208 - LDO3 Low Power */ 0x0010, /* R209 - LDO4 Control */ 0x1000, /* R210 - LDO4 Timeouts */ 0x001C, /* R211 - LDO4 Low Power */ 0x0000, /* R212 */ 0x0000, /* R213 */ 0x0000, /* R214 */ 0x0000, /* R215 - VCC_FAULT Masks */ 0x001F, /* R216 - Main Bandgap Control */ 0x0000, /* R217 - OSC Control */ 0x9000, /* R218 - RTC Tick Control */ 0x0000, /* R219 - Security1 */ 0x4000, /* R220 */ 0x0000, /* R221 */ 0x0000, /* R222 */ 0x0000, /* R223 */ 0x0000, /* R224 - Signal overrides */ 0x0000, /* R225 - DCDC/LDO status */ 0x0000, /* R226 - Charger Overides/status */ 0x0000, /* R227 - misc overrides */ 0x0000, /* R228 - Supply overrides/status 1 */ 0x0000, /* R229 - Supply overrides/status 2 */ 0xE000, /* R230 - GPIO Pin Status */ 0x0000, /* R231 - comparotor overrides */ 0x0000, /* R232 */ 0x0000, /* R233 - State Machine status */ 0x1200, /* R234 */ 0x0000, /* R235 */ 0x8000, /* R236 */ 0x0000, /* R237 */ 0x0000, /* R238 */ 0x0000, /* R239 */ 0x0003, /* R240 */ 0x0000, /* R241 */ 0x0000, /* R242 */ 0x0004, /* R243 */ 0x0300, /* R244 */ 0x0000, /* R245 */ 0x0200, /* R246 */ 0x0000, /* R247 */ 0x1000, /* R248 - DCDC1 Test Controls */ 0x5000, /* R249 */ 0x1000, /* R250 - DCDC3 Test Controls */ 0x1000, /* R251 - DCDC4 Test Controls */ 0x5100, /* R252 */ 0x1000, /* R253 - DCDC6 Test Controls */ }; #endif /* * Access masks. */ const struct wm8350_reg_access wm8350_reg_io_map[] = { /* read write volatile */ { 0xFFFF, 0xFFFF, 0xFFFF }, /* R0 - Reset/ID */ { 0x7CFF, 0x0C00, 0x7FFF }, /* R1 - ID */ { 0x007F, 0x0000, 0x0000 }, /* R2 - ROM Mask ID */ { 0xBE3B, 0xBE3B, 0x8000 }, /* R3 - System Control 1 */ { 0xFEF7, 0xFEF7, 0xF800 }, /* R4 - System Control 2 */ { 0x80FF, 0x80FF, 0x8000 }, /* R5 - System Hibernate */ { 0xFB0E, 0xFB0E, 0x0000 }, /* R6 - Interface Control */ { 0x0000, 0x0000, 0x0000 }, /* R7 */ { 0xE537, 0xE537, 0xFFFF }, /* R8 - Power mgmt (1) */ { 0x0FF3, 0x0FF3, 0xFFFF }, /* R9 - Power mgmt (2) */ { 0x008F, 0x008F, 0xFFFF }, /* R10 - Power mgmt (3) */ { 0x6D3C, 0x6D3C, 0xFFFF }, /* R11 - Power mgmt (4) */ { 0x1F8F, 0x1F8F, 0xFFFF }, /* R12 - Power mgmt (5) */ { 0x8F3F, 0x8F3F, 0xFFFF }, /* R13 - Power mgmt (6) */ { 0x0003, 0x0003, 0xFFFF }, /* R14 - Power mgmt (7) */ { 0x0000, 0x0000, 0x0000 }, /* R15 */ { 0x7F7F, 0x7F7F, 0xFFFF }, /* R16 - RTC Seconds/Minutes */ { 0x073F, 0x073F, 0xFFFF }, /* R17 - RTC Hours/Day */ { 0x1F3F, 0x1F3F, 0xFFFF }, /* R18 - RTC Date/Month */ { 0x3FFF, 0x00FF, 0xFFFF }, /* R19 - RTC Year */ { 0x7F7F, 0x7F7F, 0x0000 }, /* R20 - Alarm Seconds/Minutes */ { 0x0F3F, 0x0F3F, 0x0000 }, /* R21 - Alarm Hours/Day */ { 0x1F3F, 0x1F3F, 0x0000 }, /* R22 - Alarm Date/Month */ { 0xEF7F, 0xEA7F, 0xFFFF }, /* R23 - RTC Time Control */ { 0x3BFF, 0x0000, 0xFFFF }, /* R24 - System Interrupts */ { 0xFEE7, 0x0000, 0xFFFF }, /* R25 - Interrupt Status 1 */ { 0x35FF, 0x0000, 0xFFFF }, /* R26 - Interrupt Status 2 */ { 0x0F3F, 0x0000, 0xFFFF }, /* R27 - Power Up Interrupt Status */ { 0x0F3F, 0x0000, 0xFFFF }, /* R28 - Under Voltage Interrupt status */ { 0x8000, 0x0000, 0xFFFF }, /* R29 - Over Current Interrupt status */ { 0x1FFF, 0x0000, 0xFFFF }, /* R30 - GPIO Interrupt Status */ { 0xEF7F, 0x0000, 0xFFFF }, /* R31 - Comparator Interrupt Status */ { 0x3FFF, 0x3FFF, 0x0000 }, /* R32 - System Interrupts Mask */ { 0xFEE7, 0xFEE7, 0x0000 }, /* R33 - Interrupt Status 1 Mask */ { 0xF5FF, 0xF5FF, 0x0000 }, /* R34 - Interrupt Status 2 Mask */ { 0x0F3F, 0x0F3F, 0x0000 }, /* R35 - Power Up Interrupt Status Mask */ { 0x0F3F, 0x0F3F, 0x0000 }, /* R36 - Under Voltage Int status Mask */ { 0x8000, 0x8000, 0x0000 }, /* R37 - Over Current Int status Mask */ { 0x1FFF, 0x1FFF, 0x0000 }, /* R38 - GPIO Interrupt Status Mask */ { 0xEF7F, 0xEF7F, 0x0000 }, /* R39 - Comparator IntStatus Mask */ { 0xC9F7, 0xC9F7, 0xFFFF }, /* R40 - Clock Control 1 */ { 0x8001, 0x8001, 0x0000 }, /* R41 - Clock Control 2 */ { 0xFFF7, 0xFFF7, 0xFFFF }, /* R42 - FLL Control 1 */ { 0xFBFF, 0xFBFF, 0x0000 }, /* R43 - FLL Control 2 */ { 0xFFFF, 0xFFFF, 0x0000 }, /* R44 - FLL Control 3 */ { 0x0033, 0x0033, 0x0000 }, /* R45 - FLL Control 4 */ { 0x0000, 0x0000, 0x0000 }, /* R46 */ { 0x0000, 0x0000, 0x0000 }, /* R47 */ { 0x3033, 0x3033, 0x0000 }, /* R48 - DAC Control */ { 0x0000, 0x0000, 0x0000 }, /* R49 */ { 0x81FF, 0x81FF, 0xFFFF }, /* R50 - DAC Digital Volume L */ { 0x81FF, 0x81FF, 0xFFFF }, /* R51 - DAC Digital Volume R */ { 0x0000, 0x0000, 0x0000 }, /* R52 */ { 0x0FFF, 0x0FFF, 0xFFFF }, /* R53 - DAC LR Rate */ { 0x0017, 0x0017, 0x0000 }, /* R54 - DAC Clock Control */ { 0x0000, 0x0000, 0x0000 }, /* R55 */ { 0x0000, 0x0000, 0x0000 }, /* R56 */ { 0x0000, 0x0000, 0x0000 }, /* R57 */ { 0x4000, 0x4000, 0x0000 }, /* R58 - DAC Mute */ { 0x7000, 0x7000, 0x0000 }, /* R59 - DAC Mute Volume */ { 0x3C00, 0x3C00, 0x0000 }, /* R60 - DAC Side */ { 0x0000, 0x0000, 0x0000 }, /* R61 */ { 0x0000, 0x0000, 0x0000 }, /* R62 */ { 0x0000, 0x0000, 0x0000 }, /* R63 */ { 0x8303, 0x8303, 0xFFFF }, /* R64 - ADC Control */ { 0x0000, 0x0000, 0x0000 }, /* R65 */ { 0x81FF, 0x81FF, 0xFFFF }, /* R66 - ADC Digital Volume L */ { 0x81FF, 0x81FF, 0xFFFF }, /* R67 - ADC Digital Volume R */ { 0x0FFF, 0x0FFF, 0x0000 }, /* R68 - ADC Divider */ { 0x0000, 0x0000, 0x0000 }, /* R69 */ { 0x0FFF, 0x0FFF, 0xFFFF }, /* R70 - ADC LR Rate */ { 0x0000, 0x0000, 0x0000 }, /* R71 */ { 0x0707, 0x0707, 0xFFFF }, /* R72 - Input Control */ { 0xC0C0, 0xC0C0, 0xFFFF }, /* R73 - IN3 Input Control */ { 0xC09F, 0xC09F, 0xFFFF }, /* R74 - Mic Bias Control */ { 0x0000, 0x0000, 0x0000 }, /* R75 */ { 0x0F15, 0x0F15, 0xFFFF }, /* R76 - Output Control */ { 0xC000, 0xC000, 0xFFFF }, /* R77 - Jack Detect */ { 0x03FF, 0x03FF, 0x0000 }, /* R78 - Anti Pop Control */ { 0x0000, 0x0000, 0x0000 }, /* R79 */ { 0xE1FC, 0xE1FC, 0x8000 }, /* R80 - Left Input Volume */ { 0xE1FC, 0xE1FC, 0x8000 }, /* R81 - Right Input Volume */ { 0x0000, 0x0000, 0x0000 }, /* R82 */ { 0x0000, 0x0000, 0x0000 }, /* R83 */ { 0x0000, 0x0000, 0x0000 }, /* R84 */ { 0x0000, 0x0000, 0x0000 }, /* R85 */ { 0x0000, 0x0000, 0x0000 }, /* R86 */ { 0x0000, 0x0000, 0x0000 }, /* R87 */ { 0x9807, 0x9807, 0xFFFF }, /* R88 - Left Mixer Control */ { 0x980B, 0x980B, 0xFFFF }, /* R89 - Right Mixer Control */ { 0x0000, 0x0000, 0x0000 }, /* R90 */ { 0x0000, 0x0000, 0x0000 }, /* R91 */ { 0x8909, 0x8909, 0xFFFF }, /* R92 - OUT3 Mixer Control */ { 0x9E07, 0x9E07, 0xFFFF }, /* R93 - OUT4 Mixer Control */ { 0x0000, 0x0000, 0x0000 }, /* R94 */ { 0x0000, 0x0000, 0x0000 }, /* R95 */ { 0x0EEE, 0x0EEE, 0x0000 }, /* R96 - Output Left Mixer Volume */ { 0xE0EE, 0xE0EE, 0x0000 }, /* R97 - Output Right Mixer Volume */ { 0x0E0F, 0x0E0F, 0x0000 }, /* R98 - Input Mixer Volume L */ { 0xE0E1, 0xE0E1, 0x0000 }, /* R99 - Input Mixer Volume R */ { 0x800E, 0x800E, 0x0000 }, /* R100 - Input Mixer Volume */ { 0x0000, 0x0000, 0x0000 }, /* R101 */ { 0x0000, 0x0000, 0x0000 }, /* R102 */ { 0x0000, 0x0000, 0x0000 }, /* R103 */ { 0xE1FC, 0xE1FC, 0xFFFF }, /* R104 - LOUT1 Volume */ { 0xE1FC, 0xE1FC, 0xFFFF }, /* R105 - ROUT1 Volume */ { 0xE1FC, 0xE1FC, 0xFFFF }, /* R106 - LOUT2 Volume */ { 0xE7FC, 0xE7FC, 0xFFFF }, /* R107 - ROUT2 Volume */ { 0x0000, 0x0000, 0x0000 }, /* R108 */ { 0x0000, 0x0000, 0x0000 }, /* R109 */ { 0x0000, 0x0000, 0x0000 }, /* R110 */ { 0x80E0, 0x80E0, 0xFFFF }, /* R111 - BEEP Volume */ { 0xBF00, 0xBF00, 0x0000 }, /* R112 - AI Formating */ { 0x00F1, 0x00F1, 0x0000 }, /* R113 - ADC DAC COMP */ { 0x00F8, 0x00F8, 0x0000 }, /* R114 - AI ADC Control */ { 0x40FB, 0x40FB, 0x0000 }, /* R115 - AI DAC Control */ { 0x7C30, 0x7C30, 0x0000 }, /* R116 - AIF Test */ { 0x0000, 0x0000, 0x0000 }, /* R117 */ { 0x0000, 0x0000, 0x0000 }, /* R118 */ { 0x0000, 0x0000, 0x0000 }, /* R119 */ { 0x0000, 0x0000, 0x0000 }, /* R120 */ { 0x0000, 0x0000, 0x0000 }, /* R121 */ { 0x0000, 0x0000, 0x0000 }, /* R122 */ { 0x0000, 0x0000, 0x0000 }, /* R123 */ { 0x0000, 0x0000, 0x0000 }, /* R124 */ { 0x0000, 0x0000, 0x0000 }, /* R125 */ { 0x0000, 0x0000, 0x0000 }, /* R126 */ { 0x0000, 0x0000, 0x0000 }, /* R127 */ { 0x1FFF, 0x1FFF, 0x0000 }, /* R128 - GPIO Debounce */ { 0x1FFF, 0x1FFF, 0x0000 }, /* R129 - GPIO Pin pull up Control */ { 0x1FFF, 0x1FFF, 0x0000 }, /* R130 - GPIO Pull down Control */ { 0x1FFF, 0x1FFF, 0x0000 }, /* R131 - GPIO Interrupt Mode */ { 0x0000, 0x0000, 0x0000 }, /* R132 */ { 0x00C0, 0x00C0, 0x0000 }, /* R133 - GPIO Control */ { 0x1FFF, 0x1FFF, 0x0000 }, /* R134 - GPIO Configuration (i/o) */ { 0x1FFF, 0x1FFF, 0x0000 }, /* R135 - GPIO Pin Polarity / Type */ { 0x0000, 0x0000, 0x0000 }, /* R136 */ { 0x0000, 0x0000, 0x0000 }, /* R137 */ { 0x0000, 0x0000, 0x0000 }, /* R138 */ { 0x0000, 0x0000, 0x0000 }, /* R139 */ { 0xFFFF, 0xFFFF, 0x0000 }, /* R140 - GPIO Function Select 1 */ { 0xFFFF, 0xFFFF, 0x0000 }, /* R141 - GPIO Function Select 2 */ { 0xFFFF, 0xFFFF, 0x0000 }, /* R142 - GPIO Function Select 3 */ { 0x000F, 0x000F, 0x0000 }, /* R143 - GPIO Function Select 4 */ { 0xF0FF, 0xF0FF, 0xA000 }, /* R144 - Digitiser Control (1) */ { 0x3707, 0x3707, 0x0000 }, /* R145 - Digitiser Control (2) */ { 0x0000, 0x0000, 0x0000 }, /* R146 */ { 0x0000, 0x0000, 0x0000 }, /* R147 */ { 0x0000, 0x0000, 0x0000 }, /* R148 */ { 0x0000, 0x0000, 0x0000 }, /* R149 */ { 0x0000, 0x0000, 0x0000 }, /* R150 */ { 0x0000, 0x0000, 0x0000 }, /* R151 */ { 0x7FFF, 0x7000, 0xFFFF }, /* R152 - AUX1 Readback */ { 0x7FFF, 0x7000, 0xFFFF }, /* R153 - AUX2 Readback */ { 0x7FFF, 0x7000, 0xFFFF }, /* R154 - AUX3 Readback */ { 0x7FFF, 0x7000, 0xFFFF }, /* R155 - AUX4 Readback */ { 0x0FFF, 0x0000, 0xFFFF }, /* R156 - USB Voltage Readback */ { 0x0FFF, 0x0000, 0xFFFF }, /* R157 - LINE Voltage Readback */ { 0x0FFF, 0x0000, 0xFFFF }, /* R158 - BATT Voltage Readback */ { 0x0FFF, 0x0000, 0xFFFF }, /* R159 - Chip Temp Readback */ { 0x0000, 0x0000, 0x0000 }, /* R160 */ { 0x0000, 0x0000, 0x0000 }, /* R161 */ { 0x0000, 0x0000, 0x0000 }, /* R162 */ { 0x000F, 0x000F, 0x0000 }, /* R163 - Generic Comparator Control */ { 0xFFFF, 0xFFFF, 0x0000 }, /* R164 - Generic comparator 1 */ { 0xFFFF, 0xFFFF, 0x0000 }, /* R165 - Generic comparator 2 */ { 0xFFFF, 0xFFFF, 0x0000 }, /* R166 - Generic comparator 3 */ { 0xFFFF, 0xFFFF, 0x0000 }, /* R167 - Generic comparator 4 */ { 0xBFFF, 0xBFFF, 0x8000 }, /* R168 - Battery Charger Control 1 */ { 0xFFFF, 0x4FFF, 0xB000 }, /* R169 - Battery Charger Control 2 */ { 0x007F, 0x007F, 0x0000 }, /* R170 - Battery Charger Control 3 */ { 0x0000, 0x0000, 0x0000 }, /* R171 */ { 0x903F, 0x903F, 0xFFFF }, /* R172 - Current Sink Driver A */ { 0xE333, 0xE333, 0xFFFF }, /* R173 - CSA Flash control */ { 0x903F, 0x903F, 0xFFFF }, /* R174 - Current Sink Driver B */ { 0xE333, 0xE333, 0xFFFF }, /* R175 - CSB Flash control */ { 0x8F3F, 0x8F3F, 0xFFFF }, /* R176 - DCDC/LDO requested */ { 0x332D, 0x332D, 0x0000 }, /* R177 - DCDC Active options */ { 0x002D, 0x002D, 0x0000 }, /* R178 - DCDC Sleep options */ { 0x5177, 0x5177, 0x8000 }, /* R179 - Power-check comparator */ { 0x047F, 0x047F, 0x0000 }, /* R180 - DCDC1 Control */ { 0xFFC0, 0xFFC0, 0x0000 }, /* R181 - DCDC1 Timeouts */ { 0x737F, 0x737F, 0x0000 }, /* R182 - DCDC1 Low Power */ { 0x535B, 0x535B, 0x0000 }, /* R183 - DCDC2 Control */ { 0xFFC0, 0xFFC0, 0x0000 }, /* R184 - DCDC2 Timeouts */ { 0x0000, 0x0000, 0x0000 }, /* R185 */ { 0x047F, 0x047F, 0x0000 }, /* R186 - DCDC3 Control */ { 0xFFC0, 0xFFC0, 0x0000 }, /* R187 - DCDC3 Timeouts */ { 0x737F, 0x737F, 0x0000 }, /* R188 - DCDC3 Low Power */ { 0x047F, 0x047F, 0x0000 }, /* R189 - DCDC4 Control */ { 0xFFC0, 0xFFC0, 0x0000 }, /* R190 - DCDC4 Timeouts */ { 0x737F, 0x737F, 0x0000 }, /* R191 - DCDC4 Low Power */ { 0x535B, 0x535B, 0x0000 }, /* R192 - DCDC5 Control */ { 0xFFC0, 0xFFC0, 0x0000 }, /* R193 - DCDC5 Timeouts */ { 0x0000, 0x0000, 0x0000 }, /* R194 */ { 0x047F, 0x047F, 0x0000 }, /* R195 - DCDC6 Control */ { 0xFFC0, 0xFFC0, 0x0000 }, /* R196 - DCDC6 Timeouts */ { 0x737F, 0x737F, 0x0000 }, /* R197 - DCDC6 Low Power */ { 0x0000, 0x0000, 0x0000 }, /* R198 */ { 0xFFD3, 0xFFD3, 0x0000 }, /* R199 - Limit Switch Control */ { 0x441F, 0x441F, 0x0000 }, /* R200 - LDO1 Control */ { 0xFFC0, 0xFFC0, 0x0000 }, /* R201 - LDO1 Timeouts */ { 0x331F, 0x331F, 0x0000 }, /* R202 - LDO1 Low Power */ { 0x441F, 0x441F, 0x0000 }, /* R203 - LDO2 Control */ { 0xFFC0, 0xFFC0, 0x0000 }, /* R204 - LDO2 Timeouts */ { 0x331F, 0x331F, 0x0000 }, /* R205 - LDO2 Low Power */ { 0x441F, 0x441F, 0x0000 }, /* R206 - LDO3 Control */ { 0xFFC0, 0xFFC0, 0x0000 }, /* R207 - LDO3 Timeouts */ { 0x331F, 0x331F, 0x0000 }, /* R208 - LDO3 Low Power */ { 0x441F, 0x441F, 0x0000 }, /* R209 - LDO4 Control */ { 0xFFC0, 0xFFC0, 0x0000 }, /* R210 - LDO4 Timeouts */ { 0x331F, 0x331F, 0x0000 }, /* R211 - LDO4 Low Power */ { 0x0000, 0x0000, 0x0000 }, /* R212 */ { 0x0000, 0x0000, 0x0000 }, /* R213 */ { 0x0000, 0x0000, 0x0000 }, /* R214 */ { 0x8F3F, 0x8F3F, 0x0000 }, /* R215 - VCC_FAULT Masks */ { 0xFF3F, 0xE03F, 0x0000 }, /* R216 - Main Bandgap Control */ { 0xEF2F, 0xE02F, 0x0000 }, /* R217 - OSC Control */ { 0xF3FF, 0xB3FF, 0xc000 }, /* R218 - RTC Tick Control */ { 0xFFFF, 0xFFFF, 0x0000 }, /* R219 - Security */ { 0x09FF, 0x01FF, 0x0000 }, /* R220 - RAM BIST 1 */ { 0x0000, 0x0000, 0x0000 }, /* R221 */ { 0xFFFF, 0xFFFF, 0xFFFF }, /* R222 */ { 0xFFFF, 0xFFFF, 0xFFFF }, /* R223 */ { 0x0000, 0x0000, 0x0000 }, /* R224 */ { 0x8F3F, 0x0000, 0xFFFF }, /* R225 - DCDC/LDO status */ { 0x0000, 0x0000, 0xFFFF }, /* R226 - Charger status */ { 0x34FE, 0x0000, 0xFFFF }, /* R227 */ { 0x0000, 0x0000, 0x0000 }, /* R228 */ { 0x0000, 0x0000, 0x0000 }, /* R229 */ { 0xFFFF, 0x1FFF, 0xFFFF }, /* R230 - GPIO Pin Status */ { 0xFFFF, 0x1FFF, 0xFFFF }, /* R231 */ { 0xFFFF, 0x1FFF, 0xFFFF }, /* R232 */ { 0xFFFF, 0x1FFF, 0xFFFF }, /* R233 */ { 0x0000, 0x0000, 0x0000 }, /* R234 */ { 0x0000, 0x0000, 0x0000 }, /* R235 */ { 0x0000, 0x0000, 0x0000 }, /* R236 */ { 0x0000, 0x0000, 0x0000 }, /* R237 */ { 0x0000, 0x0000, 0x0000 }, /* R238 */ { 0x0000, 0x0000, 0x0000 }, /* R239 */ { 0x0000, 0x0000, 0x0000 }, /* R240 */ { 0x0000, 0x0000, 0x0000 }, /* R241 */ { 0x0000, 0x0000, 0x0000 }, /* R242 */ { 0x0000, 0x0000, 0x0000 }, /* R243 */ { 0x0000, 0x0000, 0x0000 }, /* R244 */ { 0x0000, 0x0000, 0x0000 }, /* R245 */ { 0x0000, 0x0000, 0x0000 }, /* R246 */ { 0x0000, 0x0000, 0x0000 }, /* R247 */ { 0xFFFF, 0x0010, 0xFFFF }, /* R248 */ { 0x0000, 0x0000, 0x0000 }, /* R249 */ { 0xFFFF, 0x0010, 0xFFFF }, /* R250 */ { 0xFFFF, 0x0010, 0xFFFF }, /* R251 */ { 0x0000, 0x0000, 0x0000 }, /* R252 */ { 0xFFFF, 0x0010, 0xFFFF }, /* R253 */ { 0x0000, 0x0000, 0x0000 }, /* R254 */ { 0x0000, 0x0000, 0x0000 }, /* R255 */ };
gpl-2.0
ElectryDev/octokitty
sound/pci/echoaudio/darla20_dsp.c
12526
3506
/*************************************************************************** Copyright Echo Digital Audio Corporation (c) 1998 - 2004 All rights reserved www.echoaudio.com This file is part of Echo Digital Audio's generic driver library. Echo Digital Audio's generic driver library is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. ************************************************************************* Translation from C++ and adaptation for use in ALSA-Driver were made by Giuliano Pochini <pochini@shiny.it> ****************************************************************************/ static int init_hw(struct echoaudio *chip, u16 device_id, u16 subdevice_id) { int err; DE_INIT(("init_hw() - Darla20\n")); if (snd_BUG_ON((subdevice_id & 0xfff0) != DARLA20)) return -ENODEV; if ((err = init_dsp_comm_page(chip))) { DE_INIT(("init_hw - could not initialize DSP comm page\n")); return err; } chip->device_id = device_id; chip->subdevice_id = subdevice_id; chip->bad_board = TRUE; chip->dsp_code_to_load = FW_DARLA20_DSP; chip->spdif_status = GD_SPDIF_STATUS_UNDEF; chip->clock_state = GD_CLOCK_UNDEF; /* Since this card has no ASIC, mark it as loaded so everything works OK */ chip->asic_loaded = TRUE; chip->input_clock_types = ECHO_CLOCK_BIT_INTERNAL; if ((err = load_firmware(chip)) < 0) return err; chip->bad_board = FALSE; DE_INIT(("init_hw done\n")); return err; } static int set_mixer_defaults(struct echoaudio *chip) { return init_line_levels(chip); } /* The Darla20 has no external clock sources */ static u32 detect_input_clocks(const struct echoaudio *chip) { return ECHO_CLOCK_BIT_INTERNAL; } /* The Darla20 has no ASIC. Just do nothing */ static int load_asic(struct echoaudio *chip) { return 0; } static int set_sample_rate(struct echoaudio *chip, u32 rate) { u8 clock_state, spdif_status; if (wait_handshake(chip)) return -EIO; switch (rate) { case 44100: clock_state = GD_CLOCK_44; spdif_status = GD_SPDIF_STATUS_44; break; case 48000: clock_state = GD_CLOCK_48; spdif_status = GD_SPDIF_STATUS_48; break; default: clock_state = GD_CLOCK_NOCHANGE; spdif_status = GD_SPDIF_STATUS_NOCHANGE; break; } if (chip->clock_state == clock_state) clock_state = GD_CLOCK_NOCHANGE; if (spdif_status == chip->spdif_status) spdif_status = GD_SPDIF_STATUS_NOCHANGE; chip->comm_page->sample_rate = cpu_to_le32(rate); chip->comm_page->gd_clock_state = clock_state; chip->comm_page->gd_spdif_status = spdif_status; chip->comm_page->gd_resampler_state = 3; /* magic number - should always be 3 */ /* Save the new audio state if it changed */ if (clock_state != GD_CLOCK_NOCHANGE) chip->clock_state = clock_state; if (spdif_status != GD_SPDIF_STATUS_NOCHANGE) chip->spdif_status = spdif_status; chip->sample_rate = rate; clear_handshake(chip); return send_vector(chip, DSP_VC_SET_GD_AUDIO_STATE); }
gpl-2.0
javelinanddart/android_kernel_htc_msm8974
net/rxrpc/ar-accept.c
13550
12529
/* incoming call handling * * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/module.h> #include <linux/net.h> #include <linux/skbuff.h> #include <linux/errqueue.h> #include <linux/udp.h> #include <linux/in.h> #include <linux/in6.h> #include <linux/icmp.h> #include <linux/gfp.h> #include <net/sock.h> #include <net/af_rxrpc.h> #include <net/ip.h> #include "ar-internal.h" /* * generate a connection-level abort */ static int rxrpc_busy(struct rxrpc_local *local, struct sockaddr_rxrpc *srx, struct rxrpc_header *hdr) { struct msghdr msg; struct kvec iov[1]; size_t len; int ret; _enter("%d,,", local->debug_id); msg.msg_name = &srx->transport.sin; msg.msg_namelen = sizeof(srx->transport.sin); msg.msg_control = NULL; msg.msg_controllen = 0; msg.msg_flags = 0; hdr->seq = 0; hdr->type = RXRPC_PACKET_TYPE_BUSY; hdr->flags = 0; hdr->userStatus = 0; hdr->_rsvd = 0; iov[0].iov_base = hdr; iov[0].iov_len = sizeof(*hdr); len = iov[0].iov_len; hdr->serial = htonl(1); _proto("Tx BUSY %%%u", ntohl(hdr->serial)); ret = kernel_sendmsg(local->socket, &msg, iov, 1, len); if (ret < 0) { _leave(" = -EAGAIN [sendmsg failed: %d]", ret); return -EAGAIN; } _leave(" = 0"); return 0; } /* * accept an incoming call that needs peer, transport and/or connection setting * up */ static int rxrpc_accept_incoming_call(struct rxrpc_local *local, struct rxrpc_sock *rx, struct sk_buff *skb, struct sockaddr_rxrpc *srx) { struct rxrpc_connection *conn; struct rxrpc_transport *trans; struct rxrpc_skb_priv *sp, *nsp; struct rxrpc_peer *peer; struct rxrpc_call *call; struct sk_buff *notification; int ret; _enter(""); sp = rxrpc_skb(skb); /* get a notification message to send to the server app */ notification = alloc_skb(0, GFP_NOFS); if (!notification) { _debug("no memory"); ret = -ENOMEM; goto error_nofree; } rxrpc_new_skb(notification); notification->mark = RXRPC_SKB_MARK_NEW_CALL; peer = rxrpc_get_peer(srx, GFP_NOIO); if (IS_ERR(peer)) { _debug("no peer"); ret = -EBUSY; goto error; } trans = rxrpc_get_transport(local, peer, GFP_NOIO); rxrpc_put_peer(peer); if (IS_ERR(trans)) { _debug("no trans"); ret = -EBUSY; goto error; } conn = rxrpc_incoming_connection(trans, &sp->hdr, GFP_NOIO); rxrpc_put_transport(trans); if (IS_ERR(conn)) { _debug("no conn"); ret = PTR_ERR(conn); goto error; } call = rxrpc_incoming_call(rx, conn, &sp->hdr, GFP_NOIO); rxrpc_put_connection(conn); if (IS_ERR(call)) { _debug("no call"); ret = PTR_ERR(call); goto error; } /* attach the call to the socket */ read_lock_bh(&local->services_lock); if (rx->sk.sk_state == RXRPC_CLOSE) goto invalid_service; write_lock(&rx->call_lock); if (!test_and_set_bit(RXRPC_CALL_INIT_ACCEPT, &call->flags)) { rxrpc_get_call(call); spin_lock(&call->conn->state_lock); if (sp->hdr.securityIndex > 0 && call->conn->state == RXRPC_CONN_SERVER_UNSECURED) { _debug("await conn sec"); list_add_tail(&call->accept_link, &rx->secureq); call->conn->state = RXRPC_CONN_SERVER_CHALLENGING; atomic_inc(&call->conn->usage); set_bit(RXRPC_CONN_CHALLENGE, &call->conn->events); rxrpc_queue_conn(call->conn); } else { _debug("conn ready"); call->state = RXRPC_CALL_SERVER_ACCEPTING; list_add_tail(&call->accept_link, &rx->acceptq); rxrpc_get_call(call); nsp = rxrpc_skb(notification); nsp->call = call; ASSERTCMP(atomic_read(&call->usage), >=, 3); _debug("notify"); spin_lock(&call->lock); ret = rxrpc_queue_rcv_skb(call, notification, true, false); spin_unlock(&call->lock); notification = NULL; BUG_ON(ret < 0); } spin_unlock(&call->conn->state_lock); _debug("queued"); } write_unlock(&rx->call_lock); _debug("process"); rxrpc_fast_process_packet(call, skb); _debug("done"); read_unlock_bh(&local->services_lock); rxrpc_free_skb(notification); rxrpc_put_call(call); _leave(" = 0"); return 0; invalid_service: _debug("invalid"); read_unlock_bh(&local->services_lock); read_lock_bh(&call->state_lock); if (!test_bit(RXRPC_CALL_RELEASE, &call->flags) && !test_and_set_bit(RXRPC_CALL_RELEASE, &call->events)) { rxrpc_get_call(call); rxrpc_queue_call(call); } read_unlock_bh(&call->state_lock); rxrpc_put_call(call); ret = -ECONNREFUSED; error: rxrpc_free_skb(notification); error_nofree: _leave(" = %d", ret); return ret; } /* * accept incoming calls that need peer, transport and/or connection setting up * - the packets we get are all incoming client DATA packets that have seq == 1 */ void rxrpc_accept_incoming_calls(struct work_struct *work) { struct rxrpc_local *local = container_of(work, struct rxrpc_local, acceptor); struct rxrpc_skb_priv *sp; struct sockaddr_rxrpc srx; struct rxrpc_sock *rx; struct sk_buff *skb; __be16 service_id; int ret; _enter("%d", local->debug_id); read_lock_bh(&rxrpc_local_lock); if (atomic_read(&local->usage) > 0) rxrpc_get_local(local); else local = NULL; read_unlock_bh(&rxrpc_local_lock); if (!local) { _leave(" [local dead]"); return; } process_next_packet: skb = skb_dequeue(&local->accept_queue); if (!skb) { rxrpc_put_local(local); _leave("\n"); return; } _net("incoming call skb %p", skb); sp = rxrpc_skb(skb); /* determine the remote address */ memset(&srx, 0, sizeof(srx)); srx.srx_family = AF_RXRPC; srx.transport.family = local->srx.transport.family; srx.transport_type = local->srx.transport_type; switch (srx.transport.family) { case AF_INET: srx.transport_len = sizeof(struct sockaddr_in); srx.transport.sin.sin_port = udp_hdr(skb)->source; srx.transport.sin.sin_addr.s_addr = ip_hdr(skb)->saddr; break; default: goto busy; } /* get the socket providing the service */ service_id = sp->hdr.serviceId; read_lock_bh(&local->services_lock); list_for_each_entry(rx, &local->services, listen_link) { if (rx->service_id == service_id && rx->sk.sk_state != RXRPC_CLOSE) goto found_service; } read_unlock_bh(&local->services_lock); goto invalid_service; found_service: _debug("found service %hd", ntohs(rx->service_id)); if (sk_acceptq_is_full(&rx->sk)) goto backlog_full; sk_acceptq_added(&rx->sk); sock_hold(&rx->sk); read_unlock_bh(&local->services_lock); ret = rxrpc_accept_incoming_call(local, rx, skb, &srx); if (ret < 0) sk_acceptq_removed(&rx->sk); sock_put(&rx->sk); switch (ret) { case -ECONNRESET: /* old calls are ignored */ case -ECONNABORTED: /* aborted calls are reaborted or ignored */ case 0: goto process_next_packet; case -ECONNREFUSED: goto invalid_service; case -EBUSY: goto busy; case -EKEYREJECTED: goto security_mismatch; default: BUG(); } backlog_full: read_unlock_bh(&local->services_lock); busy: rxrpc_busy(local, &srx, &sp->hdr); rxrpc_free_skb(skb); goto process_next_packet; invalid_service: skb->priority = RX_INVALID_OPERATION; rxrpc_reject_packet(local, skb); goto process_next_packet; /* can't change connection security type mid-flow */ security_mismatch: skb->priority = RX_PROTOCOL_ERROR; rxrpc_reject_packet(local, skb); goto process_next_packet; } /* * handle acceptance of a call by userspace * - assign the user call ID to the call at the front of the queue */ struct rxrpc_call *rxrpc_accept_call(struct rxrpc_sock *rx, unsigned long user_call_ID) { struct rxrpc_call *call; struct rb_node *parent, **pp; int ret; _enter(",%lx", user_call_ID); ASSERT(!irqs_disabled()); write_lock(&rx->call_lock); ret = -ENODATA; if (list_empty(&rx->acceptq)) goto out; /* check the user ID isn't already in use */ ret = -EBADSLT; pp = &rx->calls.rb_node; parent = NULL; while (*pp) { parent = *pp; call = rb_entry(parent, struct rxrpc_call, sock_node); if (user_call_ID < call->user_call_ID) pp = &(*pp)->rb_left; else if (user_call_ID > call->user_call_ID) pp = &(*pp)->rb_right; else goto out; } /* dequeue the first call and check it's still valid */ call = list_entry(rx->acceptq.next, struct rxrpc_call, accept_link); list_del_init(&call->accept_link); sk_acceptq_removed(&rx->sk); write_lock_bh(&call->state_lock); switch (call->state) { case RXRPC_CALL_SERVER_ACCEPTING: call->state = RXRPC_CALL_SERVER_RECV_REQUEST; break; case RXRPC_CALL_REMOTELY_ABORTED: case RXRPC_CALL_LOCALLY_ABORTED: ret = -ECONNABORTED; goto out_release; case RXRPC_CALL_NETWORK_ERROR: ret = call->conn->error; goto out_release; case RXRPC_CALL_DEAD: ret = -ETIME; goto out_discard; default: BUG(); } /* formalise the acceptance */ call->user_call_ID = user_call_ID; rb_link_node(&call->sock_node, parent, pp); rb_insert_color(&call->sock_node, &rx->calls); if (test_and_set_bit(RXRPC_CALL_HAS_USERID, &call->flags)) BUG(); if (test_and_set_bit(RXRPC_CALL_ACCEPTED, &call->events)) BUG(); rxrpc_queue_call(call); rxrpc_get_call(call); write_unlock_bh(&call->state_lock); write_unlock(&rx->call_lock); _leave(" = %p{%d}", call, call->debug_id); return call; /* if the call is already dying or dead, then we leave the socket's ref * on it to be released by rxrpc_dead_call_expired() as induced by * rxrpc_release_call() */ out_release: _debug("release %p", call); if (!test_bit(RXRPC_CALL_RELEASED, &call->flags) && !test_and_set_bit(RXRPC_CALL_RELEASE, &call->events)) rxrpc_queue_call(call); out_discard: write_unlock_bh(&call->state_lock); _debug("discard %p", call); out: write_unlock(&rx->call_lock); _leave(" = %d", ret); return ERR_PTR(ret); } /* * handle rejectance of a call by userspace * - reject the call at the front of the queue */ int rxrpc_reject_call(struct rxrpc_sock *rx) { struct rxrpc_call *call; int ret; _enter(""); ASSERT(!irqs_disabled()); write_lock(&rx->call_lock); ret = -ENODATA; if (list_empty(&rx->acceptq)) goto out; /* dequeue the first call and check it's still valid */ call = list_entry(rx->acceptq.next, struct rxrpc_call, accept_link); list_del_init(&call->accept_link); sk_acceptq_removed(&rx->sk); write_lock_bh(&call->state_lock); switch (call->state) { case RXRPC_CALL_SERVER_ACCEPTING: call->state = RXRPC_CALL_SERVER_BUSY; if (test_and_set_bit(RXRPC_CALL_REJECT_BUSY, &call->events)) rxrpc_queue_call(call); ret = 0; goto out_release; case RXRPC_CALL_REMOTELY_ABORTED: case RXRPC_CALL_LOCALLY_ABORTED: ret = -ECONNABORTED; goto out_release; case RXRPC_CALL_NETWORK_ERROR: ret = call->conn->error; goto out_release; case RXRPC_CALL_DEAD: ret = -ETIME; goto out_discard; default: BUG(); } /* if the call is already dying or dead, then we leave the socket's ref * on it to be released by rxrpc_dead_call_expired() as induced by * rxrpc_release_call() */ out_release: _debug("release %p", call); if (!test_bit(RXRPC_CALL_RELEASED, &call->flags) && !test_and_set_bit(RXRPC_CALL_RELEASE, &call->events)) rxrpc_queue_call(call); out_discard: write_unlock_bh(&call->state_lock); _debug("discard %p", call); out: write_unlock(&rx->call_lock); _leave(" = %d", ret); return ret; } /** * rxrpc_kernel_accept_call - Allow a kernel service to accept an incoming call * @sock: The socket on which the impending call is waiting * @user_call_ID: The tag to attach to the call * * Allow a kernel service to accept an incoming call, assuming the incoming * call is still valid. */ struct rxrpc_call *rxrpc_kernel_accept_call(struct socket *sock, unsigned long user_call_ID) { struct rxrpc_call *call; _enter(",%lx", user_call_ID); call = rxrpc_accept_call(rxrpc_sk(sock->sk), user_call_ID); _leave(" = %p", call); return call; } EXPORT_SYMBOL(rxrpc_kernel_accept_call); /** * rxrpc_kernel_reject_call - Allow a kernel service to reject an incoming call * @sock: The socket on which the impending call is waiting * * Allow a kernel service to reject an incoming call with a BUSY message, * assuming the incoming call is still valid. */ int rxrpc_kernel_reject_call(struct socket *sock) { int ret; _enter(""); ret = rxrpc_reject_call(rxrpc_sk(sock->sk)); _leave(" = %d", ret); return ret; } EXPORT_SYMBOL(rxrpc_kernel_reject_call);
gpl-2.0
crazyi/android_kernel_oppo_find7a
drivers/bluetooth/ath3k.c
239
11241
/* * Copyright (c) 2008-2009 Atheros Communications Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/device.h> #include <linux/firmware.h> #include <linux/usb.h> #include <net/bluetooth/bluetooth.h> #define VERSION "1.0" #define ATH3K_DNLOAD 0x01 #define ATH3K_GETSTATE 0x05 #define ATH3K_SET_NORMAL_MODE 0x07 #define ATH3K_GETVERSION 0x09 #define USB_REG_SWITCH_VID_PID 0x0a #define ATH3K_MODE_MASK 0x3F #define ATH3K_NORMAL_MODE 0x0E #define ATH3K_PATCH_UPDATE 0x80 #define ATH3K_SYSCFG_UPDATE 0x40 #define ATH3K_XTAL_FREQ_26M 0x00 #define ATH3K_XTAL_FREQ_40M 0x01 #define ATH3K_XTAL_FREQ_19P2 0x02 #define ATH3K_NAME_LEN 0xFF struct ath3k_version { unsigned int rom_version; unsigned int build_version; unsigned int ram_version; unsigned char ref_clock; unsigned char reserved[0x07]; }; static struct usb_device_id ath3k_table[] = { /* Atheros AR3011 */ { USB_DEVICE(0x0CF3, 0x3000) }, /* Atheros AR3011 with sflash firmware*/ { USB_DEVICE(0x0CF3, 0x3002) }, { USB_DEVICE(0x13d3, 0x3304) }, { USB_DEVICE(0x0930, 0x0215) }, { USB_DEVICE(0x0489, 0xE03D) }, { USB_DEVICE(0x0489, 0xE027) }, /* Atheros AR9285 Malbec with sflash firmware */ { USB_DEVICE(0x03F0, 0x311D) }, /* Atheros AR3012 with sflash firmware*/ { USB_DEVICE(0x0CF3, 0x0036) }, { USB_DEVICE(0x0CF3, 0x3004) }, { USB_DEVICE(0x0CF3, 0x311D) }, { USB_DEVICE(0x0CF3, 0x817a) }, { USB_DEVICE(0x13d3, 0x3375) }, { USB_DEVICE(0x04CA, 0x3005) }, { USB_DEVICE(0x13d3, 0x3362) }, { USB_DEVICE(0x0CF3, 0xE004) }, /* Atheros AR5BBU12 with sflash firmware */ { USB_DEVICE(0x0489, 0xE02C) }, { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, ath3k_table); #define BTUSB_ATH3012 0x80 /* This table is to load patch and sysconfig files * for AR3012 */ static struct usb_device_id ath3k_blist_tbl[] = { /* Atheros AR3012 with sflash firmware*/ { USB_DEVICE(0x0CF3, 0x0036), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0x311D), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0CF3, 0x817a), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 }, { } /* Terminating entry */ }; #define USB_REQ_DFU_DNLOAD 1 #define BULK_SIZE 4096 #define FW_HDR_SIZE 20 static int ath3k_load_firmware(struct usb_device *udev, const struct firmware *firmware) { u8 *send_buf; int err, pipe, len, size, sent = 0; int count = firmware->size; BT_DBG("udev %p", udev); pipe = usb_sndctrlpipe(udev, 0); send_buf = kmalloc(BULK_SIZE, GFP_ATOMIC); if (!send_buf) { BT_ERR("Can't allocate memory chunk for firmware"); return -ENOMEM; } memcpy(send_buf, firmware->data, 20); if ((err = usb_control_msg(udev, pipe, USB_REQ_DFU_DNLOAD, USB_TYPE_VENDOR, 0, 0, send_buf, 20, USB_CTRL_SET_TIMEOUT)) < 0) { BT_ERR("Can't change to loading configuration err"); goto error; } sent += 20; count -= 20; while (count) { size = min_t(uint, count, BULK_SIZE); pipe = usb_sndbulkpipe(udev, 0x02); memcpy(send_buf, firmware->data + sent, size); err = usb_bulk_msg(udev, pipe, send_buf, size, &len, 3000); if (err || (len != size)) { BT_ERR("Error in firmware loading err = %d," "len = %d, size = %d", err, len, size); goto error; } sent += size; count -= size; } error: kfree(send_buf); return err; } static int ath3k_get_state(struct usb_device *udev, unsigned char *state) { int pipe = 0; pipe = usb_rcvctrlpipe(udev, 0); return usb_control_msg(udev, pipe, ATH3K_GETSTATE, USB_TYPE_VENDOR | USB_DIR_IN, 0, 0, state, 0x01, USB_CTRL_SET_TIMEOUT); } static int ath3k_get_version(struct usb_device *udev, struct ath3k_version *version) { int pipe = 0; pipe = usb_rcvctrlpipe(udev, 0); return usb_control_msg(udev, pipe, ATH3K_GETVERSION, USB_TYPE_VENDOR | USB_DIR_IN, 0, 0, version, sizeof(struct ath3k_version), USB_CTRL_SET_TIMEOUT); } static int ath3k_load_fwfile(struct usb_device *udev, const struct firmware *firmware) { u8 *send_buf; int err, pipe, len, size, count, sent = 0; int ret; count = firmware->size; send_buf = kmalloc(BULK_SIZE, GFP_ATOMIC); if (!send_buf) { BT_ERR("Can't allocate memory chunk for firmware"); return -ENOMEM; } size = min_t(uint, count, FW_HDR_SIZE); memcpy(send_buf, firmware->data, size); pipe = usb_sndctrlpipe(udev, 0); ret = usb_control_msg(udev, pipe, ATH3K_DNLOAD, USB_TYPE_VENDOR, 0, 0, send_buf, size, USB_CTRL_SET_TIMEOUT); if (ret < 0) { BT_ERR("Can't change to loading configuration err"); kfree(send_buf); return ret; } sent += size; count -= size; while (count) { size = min_t(uint, count, BULK_SIZE); pipe = usb_sndbulkpipe(udev, 0x02); memcpy(send_buf, firmware->data + sent, size); err = usb_bulk_msg(udev, pipe, send_buf, size, &len, 3000); if (err || (len != size)) { BT_ERR("Error in firmware loading err = %d," "len = %d, size = %d", err, len, size); kfree(send_buf); return err; } sent += size; count -= size; } kfree(send_buf); return 0; } static int ath3k_switch_pid(struct usb_device *udev) { int pipe = 0; pipe = usb_sndctrlpipe(udev, 0); return usb_control_msg(udev, pipe, USB_REG_SWITCH_VID_PID, USB_TYPE_VENDOR, 0, 0, NULL, 0, USB_CTRL_SET_TIMEOUT); } static int ath3k_set_normal_mode(struct usb_device *udev) { unsigned char fw_state; int pipe = 0, ret; ret = ath3k_get_state(udev, &fw_state); if (ret < 0) { BT_ERR("Can't get state to change to normal mode err"); return ret; } if ((fw_state & ATH3K_MODE_MASK) == ATH3K_NORMAL_MODE) { BT_DBG("firmware was already in normal mode"); return 0; } pipe = usb_sndctrlpipe(udev, 0); return usb_control_msg(udev, pipe, ATH3K_SET_NORMAL_MODE, USB_TYPE_VENDOR, 0, 0, NULL, 0, USB_CTRL_SET_TIMEOUT); } static int ath3k_load_patch(struct usb_device *udev) { unsigned char fw_state; char filename[ATH3K_NAME_LEN] = {0}; const struct firmware *firmware; struct ath3k_version fw_version, pt_version; int ret; ret = ath3k_get_state(udev, &fw_state); if (ret < 0) { BT_ERR("Can't get state to change to load ram patch err"); return ret; } if (fw_state & ATH3K_PATCH_UPDATE) { BT_DBG("Patch was already downloaded"); return 0; } ret = ath3k_get_version(udev, &fw_version); if (ret < 0) { BT_ERR("Can't get version to change to load ram patch err"); return ret; } snprintf(filename, ATH3K_NAME_LEN, "ar3k/AthrBT_0x%08x.dfu", fw_version.rom_version); ret = request_firmware(&firmware, filename, &udev->dev); if (ret < 0) { BT_ERR("Patch file not found %s", filename); return ret; } pt_version.rom_version = *(int *)(firmware->data + firmware->size - 8); pt_version.build_version = *(int *) (firmware->data + firmware->size - 4); if ((pt_version.rom_version != fw_version.rom_version) || (pt_version.build_version <= fw_version.build_version)) { BT_ERR("Patch file version did not match with firmware"); release_firmware(firmware); return -EINVAL; } ret = ath3k_load_fwfile(udev, firmware); release_firmware(firmware); return ret; } static int ath3k_load_syscfg(struct usb_device *udev) { unsigned char fw_state; char filename[ATH3K_NAME_LEN] = {0}; const struct firmware *firmware; struct ath3k_version fw_version; int clk_value, ret; ret = ath3k_get_state(udev, &fw_state); if (ret < 0) { BT_ERR("Can't get state to change to load configration err"); return -EBUSY; } ret = ath3k_get_version(udev, &fw_version); if (ret < 0) { BT_ERR("Can't get version to change to load ram patch err"); return ret; } switch (fw_version.ref_clock) { case ATH3K_XTAL_FREQ_26M: clk_value = 26; break; case ATH3K_XTAL_FREQ_40M: clk_value = 40; break; case ATH3K_XTAL_FREQ_19P2: clk_value = 19; break; default: clk_value = 0; break; } snprintf(filename, ATH3K_NAME_LEN, "ar3k/ramps_0x%08x_%d%s", fw_version.rom_version, clk_value, ".dfu"); ret = request_firmware(&firmware, filename, &udev->dev); if (ret < 0) { BT_ERR("Configuration file not found %s", filename); return ret; } ret = ath3k_load_fwfile(udev, firmware); release_firmware(firmware); return ret; } static int ath3k_probe(struct usb_interface *intf, const struct usb_device_id *id) { const struct firmware *firmware; struct usb_device *udev = interface_to_usbdev(intf); int ret; BT_DBG("intf %p id %p", intf, id); if (intf->cur_altsetting->desc.bInterfaceNumber != 0) return -ENODEV; /* match device ID in ath3k blacklist table */ if (!id->driver_info) { const struct usb_device_id *match; match = usb_match_id(intf, ath3k_blist_tbl); if (match) id = match; } /* load patch and sysconfig files for AR3012 */ if (id->driver_info & BTUSB_ATH3012) { /* New firmware with patch and sysconfig files already loaded */ if (le16_to_cpu(udev->descriptor.bcdDevice) > 0x0001) return -ENODEV; ret = ath3k_load_patch(udev); if (ret < 0) { BT_ERR("Loading patch file failed"); return ret; } ret = ath3k_load_syscfg(udev); if (ret < 0) { BT_ERR("Loading sysconfig file failed"); return ret; } ret = ath3k_set_normal_mode(udev); if (ret < 0) { BT_ERR("Set normal mode failed"); return ret; } ath3k_switch_pid(udev); return 0; } if (request_firmware(&firmware, "ath3k-1.fw", &udev->dev) < 0) { BT_ERR("Error loading firmware"); return -EIO; } ret = ath3k_load_firmware(udev, firmware); release_firmware(firmware); return ret; } static void ath3k_disconnect(struct usb_interface *intf) { BT_DBG("ath3k_disconnect intf %p", intf); } static struct usb_driver ath3k_driver = { .name = "ath3k", .probe = ath3k_probe, .disconnect = ath3k_disconnect, .id_table = ath3k_table, }; static int __init ath3k_init(void) { BT_INFO("Atheros AR30xx firmware driver ver %s", VERSION); return usb_register(&ath3k_driver); } static void __exit ath3k_exit(void) { usb_deregister(&ath3k_driver); } module_init(ath3k_init); module_exit(ath3k_exit); MODULE_AUTHOR("Atheros Communications"); MODULE_DESCRIPTION("Atheros AR30xx firmware driver"); MODULE_VERSION(VERSION); MODULE_LICENSE("GPL"); MODULE_FIRMWARE("ath3k-1.fw");
gpl-2.0
lexi6725/linux-3.17.1
sound/pci/hda/hda_tegra.c
495
13337
/* * * Implementation of primary ALSA driver code base for NVIDIA Tegra HDA. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. * */ #include <linux/clk.h> #include <linux/clocksource.h> #include <linux/completion.h> #include <linux/delay.h> #include <linux/dma-mapping.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/mutex.h> #include <linux/of_device.h> #include <linux/slab.h> #include <linux/time.h> #include <sound/core.h> #include <sound/initval.h> #include "hda_codec.h" #include "hda_controller.h" #include "hda_priv.h" /* Defines for Nvidia Tegra HDA support */ #define HDA_BAR0 0x8000 #define HDA_CFG_CMD 0x1004 #define HDA_CFG_BAR0 0x1010 #define HDA_ENABLE_IO_SPACE (1 << 0) #define HDA_ENABLE_MEM_SPACE (1 << 1) #define HDA_ENABLE_BUS_MASTER (1 << 2) #define HDA_ENABLE_SERR (1 << 8) #define HDA_DISABLE_INTR (1 << 10) #define HDA_BAR0_INIT_PROGRAM 0xFFFFFFFF #define HDA_BAR0_FINAL_PROGRAM (1 << 14) /* IPFS */ #define HDA_IPFS_CONFIG 0x180 #define HDA_IPFS_EN_FPCI 0x1 #define HDA_IPFS_FPCI_BAR0 0x80 #define HDA_FPCI_BAR0_START 0x40 #define HDA_IPFS_INTR_MASK 0x188 #define HDA_IPFS_EN_INTR (1 << 16) /* max number of SDs */ #define NUM_CAPTURE_SD 1 #define NUM_PLAYBACK_SD 1 struct hda_tegra { struct azx chip; struct device *dev; struct clk *hda_clk; struct clk *hda2codec_2x_clk; struct clk *hda2hdmi_clk; void __iomem *regs; }; #ifdef CONFIG_PM static int power_save = CONFIG_SND_HDA_POWER_SAVE_DEFAULT; module_param(power_save, bint, 0644); MODULE_PARM_DESC(power_save, "Automatic power-saving timeout (in seconds, 0 = disable)."); #else static int power_save = 0; #endif /* * DMA page allocation ops. */ static int dma_alloc_pages(struct azx *chip, int type, size_t size, struct snd_dma_buffer *buf) { return snd_dma_alloc_pages(type, chip->card->dev, size, buf); } static void dma_free_pages(struct azx *chip, struct snd_dma_buffer *buf) { snd_dma_free_pages(buf); } static int substream_alloc_pages(struct azx *chip, struct snd_pcm_substream *substream, size_t size) { struct azx_dev *azx_dev = get_azx_dev(substream); azx_dev->bufsize = 0; azx_dev->period_bytes = 0; azx_dev->format_val = 0; return snd_pcm_lib_malloc_pages(substream, size); } static int substream_free_pages(struct azx *chip, struct snd_pcm_substream *substream) { return snd_pcm_lib_free_pages(substream); } /* * Register access ops. Tegra HDA register access is DWORD only. */ static void hda_tegra_writel(u32 value, u32 *addr) { writel(value, addr); } static u32 hda_tegra_readl(u32 *addr) { return readl(addr); } static void hda_tegra_writew(u16 value, u16 *addr) { unsigned int shift = ((unsigned long)(addr) & 0x3) << 3; void *dword_addr = (void *)((unsigned long)(addr) & ~0x3); u32 v; v = readl(dword_addr); v &= ~(0xffff << shift); v |= value << shift; writel(v, dword_addr); } static u16 hda_tegra_readw(u16 *addr) { unsigned int shift = ((unsigned long)(addr) & 0x3) << 3; void *dword_addr = (void *)((unsigned long)(addr) & ~0x3); u32 v; v = readl(dword_addr); return (v >> shift) & 0xffff; } static void hda_tegra_writeb(u8 value, u8 *addr) { unsigned int shift = ((unsigned long)(addr) & 0x3) << 3; void *dword_addr = (void *)((unsigned long)(addr) & ~0x3); u32 v; v = readl(dword_addr); v &= ~(0xff << shift); v |= value << shift; writel(v, dword_addr); } static u8 hda_tegra_readb(u8 *addr) { unsigned int shift = ((unsigned long)(addr) & 0x3) << 3; void *dword_addr = (void *)((unsigned long)(addr) & ~0x3); u32 v; v = readl(dword_addr); return (v >> shift) & 0xff; } static const struct hda_controller_ops hda_tegra_ops = { .reg_writel = hda_tegra_writel, .reg_readl = hda_tegra_readl, .reg_writew = hda_tegra_writew, .reg_readw = hda_tegra_readw, .reg_writeb = hda_tegra_writeb, .reg_readb = hda_tegra_readb, .dma_alloc_pages = dma_alloc_pages, .dma_free_pages = dma_free_pages, .substream_alloc_pages = substream_alloc_pages, .substream_free_pages = substream_free_pages, }; static void hda_tegra_init(struct hda_tegra *hda) { u32 v; /* Enable PCI access */ v = readl(hda->regs + HDA_IPFS_CONFIG); v |= HDA_IPFS_EN_FPCI; writel(v, hda->regs + HDA_IPFS_CONFIG); /* Enable MEM/IO space and bus master */ v = readl(hda->regs + HDA_CFG_CMD); v &= ~HDA_DISABLE_INTR; v |= HDA_ENABLE_MEM_SPACE | HDA_ENABLE_IO_SPACE | HDA_ENABLE_BUS_MASTER | HDA_ENABLE_SERR; writel(v, hda->regs + HDA_CFG_CMD); writel(HDA_BAR0_INIT_PROGRAM, hda->regs + HDA_CFG_BAR0); writel(HDA_BAR0_FINAL_PROGRAM, hda->regs + HDA_CFG_BAR0); writel(HDA_FPCI_BAR0_START, hda->regs + HDA_IPFS_FPCI_BAR0); v = readl(hda->regs + HDA_IPFS_INTR_MASK); v |= HDA_IPFS_EN_INTR; writel(v, hda->regs + HDA_IPFS_INTR_MASK); } static int hda_tegra_enable_clocks(struct hda_tegra *data) { int rc; rc = clk_prepare_enable(data->hda_clk); if (rc) return rc; rc = clk_prepare_enable(data->hda2codec_2x_clk); if (rc) goto disable_hda; rc = clk_prepare_enable(data->hda2hdmi_clk); if (rc) goto disable_codec_2x; return 0; disable_codec_2x: clk_disable_unprepare(data->hda2codec_2x_clk); disable_hda: clk_disable_unprepare(data->hda_clk); return rc; } #ifdef CONFIG_PM_SLEEP static void hda_tegra_disable_clocks(struct hda_tegra *data) { clk_disable_unprepare(data->hda2hdmi_clk); clk_disable_unprepare(data->hda2codec_2x_clk); clk_disable_unprepare(data->hda_clk); } /* * power management */ static int hda_tegra_suspend(struct device *dev) { struct snd_card *card = dev_get_drvdata(dev); struct azx *chip = card->private_data; struct azx_pcm *p; struct hda_tegra *hda = container_of(chip, struct hda_tegra, chip); snd_power_change_state(card, SNDRV_CTL_POWER_D3hot); list_for_each_entry(p, &chip->pcm_list, list) snd_pcm_suspend_all(p->pcm); if (chip->initialized) snd_hda_suspend(chip->bus); azx_stop_chip(chip); azx_enter_link_reset(chip); hda_tegra_disable_clocks(hda); return 0; } static int hda_tegra_resume(struct device *dev) { struct snd_card *card = dev_get_drvdata(dev); struct azx *chip = card->private_data; struct hda_tegra *hda = container_of(chip, struct hda_tegra, chip); hda_tegra_enable_clocks(hda); hda_tegra_init(hda); azx_init_chip(chip, 1); snd_hda_resume(chip->bus); snd_power_change_state(card, SNDRV_CTL_POWER_D0); return 0; } #endif /* CONFIG_PM_SLEEP */ static const struct dev_pm_ops hda_tegra_pm = { SET_SYSTEM_SLEEP_PM_OPS(hda_tegra_suspend, hda_tegra_resume) }; /* * destructor */ static int hda_tegra_dev_free(struct snd_device *device) { int i; struct azx *chip = device->device_data; azx_notifier_unregister(chip); if (chip->initialized) { for (i = 0; i < chip->num_streams; i++) azx_stream_stop(chip, &chip->azx_dev[i]); azx_stop_chip(chip); } azx_free_stream_pages(chip); return 0; } static int hda_tegra_init_chip(struct azx *chip, struct platform_device *pdev) { struct hda_tegra *hda = container_of(chip, struct hda_tegra, chip); struct device *dev = hda->dev; struct resource *res; int err; hda->hda_clk = devm_clk_get(dev, "hda"); if (IS_ERR(hda->hda_clk)) return PTR_ERR(hda->hda_clk); hda->hda2codec_2x_clk = devm_clk_get(dev, "hda2codec_2x"); if (IS_ERR(hda->hda2codec_2x_clk)) return PTR_ERR(hda->hda2codec_2x_clk); hda->hda2hdmi_clk = devm_clk_get(dev, "hda2hdmi"); if (IS_ERR(hda->hda2hdmi_clk)) return PTR_ERR(hda->hda2hdmi_clk); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); hda->regs = devm_ioremap_resource(dev, res); if (IS_ERR(chip->remap_addr)) return PTR_ERR(chip->remap_addr); chip->remap_addr = hda->regs + HDA_BAR0; chip->addr = res->start + HDA_BAR0; err = hda_tegra_enable_clocks(hda); if (err) return err; hda_tegra_init(hda); return 0; } /* * The codecs were powered up in snd_hda_codec_new(). * Now all initialization done, so turn them down if possible */ static void power_down_all_codecs(struct azx *chip) { struct hda_codec *codec; list_for_each_entry(codec, &chip->bus->codec_list, list) snd_hda_power_down(codec); } static int hda_tegra_first_init(struct azx *chip, struct platform_device *pdev) { struct snd_card *card = chip->card; int err; unsigned short gcap; int irq_id = platform_get_irq(pdev, 0); err = hda_tegra_init_chip(chip, pdev); if (err) return err; err = devm_request_irq(chip->card->dev, irq_id, azx_interrupt, IRQF_SHARED, KBUILD_MODNAME, chip); if (err) { dev_err(chip->card->dev, "unable to request IRQ %d, disabling device\n", irq_id); return err; } chip->irq = irq_id; synchronize_irq(chip->irq); gcap = azx_readw(chip, GCAP); dev_dbg(card->dev, "chipset global capabilities = 0x%x\n", gcap); /* read number of streams from GCAP register instead of using * hardcoded value */ chip->capture_streams = (gcap >> 8) & 0x0f; chip->playback_streams = (gcap >> 12) & 0x0f; if (!chip->playback_streams && !chip->capture_streams) { /* gcap didn't give any info, switching to old method */ chip->playback_streams = NUM_PLAYBACK_SD; chip->capture_streams = NUM_CAPTURE_SD; } chip->capture_index_offset = 0; chip->playback_index_offset = chip->capture_streams; chip->num_streams = chip->playback_streams + chip->capture_streams; chip->azx_dev = devm_kcalloc(card->dev, chip->num_streams, sizeof(*chip->azx_dev), GFP_KERNEL); if (!chip->azx_dev) return -ENOMEM; err = azx_alloc_stream_pages(chip); if (err < 0) return err; /* initialize streams */ azx_init_stream(chip); /* initialize chip */ azx_init_chip(chip, 1); /* codec detection */ if (!chip->codec_mask) { dev_err(card->dev, "no codecs found!\n"); return -ENODEV; } strcpy(card->driver, "tegra-hda"); strcpy(card->shortname, "tegra-hda"); snprintf(card->longname, sizeof(card->longname), "%s at 0x%lx irq %i", card->shortname, chip->addr, chip->irq); return 0; } /* * constructor */ static int hda_tegra_create(struct snd_card *card, unsigned int driver_caps, const struct hda_controller_ops *hda_ops, struct hda_tegra *hda) { static struct snd_device_ops ops = { .dev_free = hda_tegra_dev_free, }; struct azx *chip; int err; chip = &hda->chip; spin_lock_init(&chip->reg_lock); mutex_init(&chip->open_mutex); chip->card = card; chip->ops = hda_ops; chip->irq = -1; chip->driver_caps = driver_caps; chip->driver_type = driver_caps & 0xff; chip->dev_index = 0; INIT_LIST_HEAD(&chip->pcm_list); chip->codec_probe_mask = -1; chip->single_cmd = false; chip->snoop = true; err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops); if (err < 0) { dev_err(card->dev, "Error creating device\n"); return err; } return 0; } static const struct of_device_id hda_tegra_match[] = { { .compatible = "nvidia,tegra30-hda" }, {}, }; MODULE_DEVICE_TABLE(of, hda_tegra_match); static int hda_tegra_probe(struct platform_device *pdev) { struct snd_card *card; struct azx *chip; struct hda_tegra *hda; int err; const unsigned int driver_flags = AZX_DCAPS_RIRB_DELAY; hda = devm_kzalloc(&pdev->dev, sizeof(*hda), GFP_KERNEL); if (!hda) return -ENOMEM; hda->dev = &pdev->dev; chip = &hda->chip; err = snd_card_new(&pdev->dev, SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1, THIS_MODULE, 0, &card); if (err < 0) { dev_err(&pdev->dev, "Error creating card!\n"); return err; } err = hda_tegra_create(card, driver_flags, &hda_tegra_ops, hda); if (err < 0) goto out_free; card->private_data = chip; dev_set_drvdata(&pdev->dev, card); err = hda_tegra_first_init(chip, pdev); if (err < 0) goto out_free; /* create codec instances */ err = azx_codec_create(chip, NULL, 0, &power_save); if (err < 0) goto out_free; err = azx_codec_configure(chip); if (err < 0) goto out_free; /* create PCM streams */ err = snd_hda_build_pcms(chip->bus); if (err < 0) goto out_free; /* create mixer controls */ err = azx_mixer_create(chip); if (err < 0) goto out_free; err = snd_card_register(chip->card); if (err < 0) goto out_free; chip->running = 1; power_down_all_codecs(chip); azx_notifier_register(chip); return 0; out_free: snd_card_free(card); return err; } static int hda_tegra_remove(struct platform_device *pdev) { return snd_card_free(dev_get_drvdata(&pdev->dev)); } static struct platform_driver tegra_platform_hda = { .driver = { .name = "tegra-hda", .pm = &hda_tegra_pm, .of_match_table = hda_tegra_match, }, .probe = hda_tegra_probe, .remove = hda_tegra_remove, }; module_platform_driver(tegra_platform_hda); MODULE_DESCRIPTION("Tegra HDA bus driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
lujiefeng/gzsd210_Android4.0.4_kernel
sound/core/pcm_lib.c
751
61806
/* * Digital Audio (PCM) abstract layer * Copyright (c) by Jaroslav Kysela <perex@perex.cz> * Abramo Bagnara <abramo@alsa-project.org> * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/slab.h> #include <linux/time.h> #include <linux/math64.h> #include <sound/core.h> #include <sound/control.h> #include <sound/info.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/timer.h> /* * fill ring buffer with silence * runtime->silence_start: starting pointer to silence area * runtime->silence_filled: size filled with silence * runtime->silence_threshold: threshold from application * runtime->silence_size: maximal size from application * * when runtime->silence_size >= runtime->boundary - fill processed area with silence immediately */ void snd_pcm_playback_silence(struct snd_pcm_substream *substream, snd_pcm_uframes_t new_hw_ptr) { struct snd_pcm_runtime *runtime = substream->runtime; snd_pcm_uframes_t frames, ofs, transfer; if (runtime->silence_size < runtime->boundary) { snd_pcm_sframes_t noise_dist, n; if (runtime->silence_start != runtime->control->appl_ptr) { n = runtime->control->appl_ptr - runtime->silence_start; if (n < 0) n += runtime->boundary; if ((snd_pcm_uframes_t)n < runtime->silence_filled) runtime->silence_filled -= n; else runtime->silence_filled = 0; runtime->silence_start = runtime->control->appl_ptr; } if (runtime->silence_filled >= runtime->buffer_size) return; noise_dist = snd_pcm_playback_hw_avail(runtime) + runtime->silence_filled; if (noise_dist >= (snd_pcm_sframes_t) runtime->silence_threshold) return; frames = runtime->silence_threshold - noise_dist; if (frames > runtime->silence_size) frames = runtime->silence_size; } else { if (new_hw_ptr == ULONG_MAX) { /* initialization */ snd_pcm_sframes_t avail = snd_pcm_playback_hw_avail(runtime); if (avail > runtime->buffer_size) avail = runtime->buffer_size; runtime->silence_filled = avail > 0 ? avail : 0; runtime->silence_start = (runtime->status->hw_ptr + runtime->silence_filled) % runtime->boundary; } else { ofs = runtime->status->hw_ptr; frames = new_hw_ptr - ofs; if ((snd_pcm_sframes_t)frames < 0) frames += runtime->boundary; runtime->silence_filled -= frames; if ((snd_pcm_sframes_t)runtime->silence_filled < 0) { runtime->silence_filled = 0; runtime->silence_start = new_hw_ptr; } else { runtime->silence_start = ofs; } } frames = runtime->buffer_size - runtime->silence_filled; } if (snd_BUG_ON(frames > runtime->buffer_size)) return; if (frames == 0) return; ofs = runtime->silence_start % runtime->buffer_size; while (frames > 0) { transfer = ofs + frames > runtime->buffer_size ? runtime->buffer_size - ofs : frames; if (runtime->access == SNDRV_PCM_ACCESS_RW_INTERLEAVED || runtime->access == SNDRV_PCM_ACCESS_MMAP_INTERLEAVED) { if (substream->ops->silence) { int err; err = substream->ops->silence(substream, -1, ofs, transfer); snd_BUG_ON(err < 0); } else { char *hwbuf = runtime->dma_area + frames_to_bytes(runtime, ofs); snd_pcm_format_set_silence(runtime->format, hwbuf, transfer * runtime->channels); } } else { unsigned int c; unsigned int channels = runtime->channels; if (substream->ops->silence) { for (c = 0; c < channels; ++c) { int err; err = substream->ops->silence(substream, c, ofs, transfer); snd_BUG_ON(err < 0); } } else { size_t dma_csize = runtime->dma_bytes / channels; for (c = 0; c < channels; ++c) { char *hwbuf = runtime->dma_area + (c * dma_csize) + samples_to_bytes(runtime, ofs); snd_pcm_format_set_silence(runtime->format, hwbuf, transfer); } } } runtime->silence_filled += transfer; frames -= transfer; ofs = 0; } } static void pcm_debug_name(struct snd_pcm_substream *substream, char *name, size_t len) { snprintf(name, len, "pcmC%dD%d%c:%d", substream->pcm->card->number, substream->pcm->device, substream->stream ? 'c' : 'p', substream->number); } #define XRUN_DEBUG_BASIC (1<<0) #define XRUN_DEBUG_STACK (1<<1) /* dump also stack */ #define XRUN_DEBUG_JIFFIESCHECK (1<<2) /* do jiffies check */ #define XRUN_DEBUG_PERIODUPDATE (1<<3) /* full period update info */ #define XRUN_DEBUG_HWPTRUPDATE (1<<4) /* full hwptr update info */ #define XRUN_DEBUG_LOG (1<<5) /* show last 10 positions on err */ #define XRUN_DEBUG_LOGONCE (1<<6) /* do above only once */ #ifdef CONFIG_SND_PCM_XRUN_DEBUG #define xrun_debug(substream, mask) \ ((substream)->pstr->xrun_debug & (mask)) #else #define xrun_debug(substream, mask) 0 #endif #define dump_stack_on_xrun(substream) do { \ if (xrun_debug(substream, XRUN_DEBUG_STACK)) \ dump_stack(); \ } while (0) static void xrun(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; if (runtime->tstamp_mode == SNDRV_PCM_TSTAMP_ENABLE) snd_pcm_gettime(runtime, (struct timespec *)&runtime->status->tstamp); snd_pcm_stop(substream, SNDRV_PCM_STATE_XRUN); if (xrun_debug(substream, XRUN_DEBUG_BASIC)) { char name[16]; pcm_debug_name(substream, name, sizeof(name)); snd_printd(KERN_DEBUG "XRUN: %s\n", name); dump_stack_on_xrun(substream); } } #ifdef CONFIG_SND_PCM_XRUN_DEBUG #define hw_ptr_error(substream, fmt, args...) \ do { \ if (xrun_debug(substream, XRUN_DEBUG_BASIC)) { \ xrun_log_show(substream); \ if (printk_ratelimit()) { \ snd_printd("PCM: " fmt, ##args); \ } \ dump_stack_on_xrun(substream); \ } \ } while (0) #define XRUN_LOG_CNT 10 struct hwptr_log_entry { unsigned int in_interrupt; unsigned long jiffies; snd_pcm_uframes_t pos; snd_pcm_uframes_t period_size; snd_pcm_uframes_t buffer_size; snd_pcm_uframes_t old_hw_ptr; snd_pcm_uframes_t hw_ptr_base; }; struct snd_pcm_hwptr_log { unsigned int idx; unsigned int hit: 1; struct hwptr_log_entry entries[XRUN_LOG_CNT]; }; static void xrun_log(struct snd_pcm_substream *substream, snd_pcm_uframes_t pos, int in_interrupt) { struct snd_pcm_runtime *runtime = substream->runtime; struct snd_pcm_hwptr_log *log = runtime->hwptr_log; struct hwptr_log_entry *entry; if (log == NULL) { log = kzalloc(sizeof(*log), GFP_ATOMIC); if (log == NULL) return; runtime->hwptr_log = log; } else { if (xrun_debug(substream, XRUN_DEBUG_LOGONCE) && log->hit) return; } entry = &log->entries[log->idx]; entry->in_interrupt = in_interrupt; entry->jiffies = jiffies; entry->pos = pos; entry->period_size = runtime->period_size; entry->buffer_size = runtime->buffer_size; entry->old_hw_ptr = runtime->status->hw_ptr; entry->hw_ptr_base = runtime->hw_ptr_base; log->idx = (log->idx + 1) % XRUN_LOG_CNT; } static void xrun_log_show(struct snd_pcm_substream *substream) { struct snd_pcm_hwptr_log *log = substream->runtime->hwptr_log; struct hwptr_log_entry *entry; char name[16]; unsigned int idx; int cnt; if (log == NULL) return; if (xrun_debug(substream, XRUN_DEBUG_LOGONCE) && log->hit) return; pcm_debug_name(substream, name, sizeof(name)); for (cnt = 0, idx = log->idx; cnt < XRUN_LOG_CNT; cnt++) { entry = &log->entries[idx]; if (entry->period_size == 0) break; snd_printd("hwptr log: %s: %sj=%lu, pos=%ld/%ld/%ld, " "hwptr=%ld/%ld\n", name, entry->in_interrupt ? "[Q] " : "", entry->jiffies, (unsigned long)entry->pos, (unsigned long)entry->period_size, (unsigned long)entry->buffer_size, (unsigned long)entry->old_hw_ptr, (unsigned long)entry->hw_ptr_base); idx++; idx %= XRUN_LOG_CNT; } log->hit = 1; } #else /* ! CONFIG_SND_PCM_XRUN_DEBUG */ #define hw_ptr_error(substream, fmt, args...) do { } while (0) #define xrun_log(substream, pos, in_interrupt) do { } while (0) #define xrun_log_show(substream) do { } while (0) #endif int snd_pcm_update_state(struct snd_pcm_substream *substream, struct snd_pcm_runtime *runtime) { snd_pcm_uframes_t avail; if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) avail = snd_pcm_playback_avail(runtime); else avail = snd_pcm_capture_avail(runtime); if (avail > runtime->avail_max) runtime->avail_max = avail; if (runtime->status->state == SNDRV_PCM_STATE_DRAINING) { if (avail >= runtime->buffer_size) { snd_pcm_drain_done(substream); return -EPIPE; } } else { if (avail >= runtime->stop_threshold) { xrun(substream); return -EPIPE; } } if (runtime->twake) { if (avail >= runtime->twake) wake_up(&runtime->tsleep); } else if (avail >= runtime->control->avail_min) wake_up(&runtime->sleep); return 0; } static int snd_pcm_update_hw_ptr0(struct snd_pcm_substream *substream, unsigned int in_interrupt) { struct snd_pcm_runtime *runtime = substream->runtime; snd_pcm_uframes_t pos; snd_pcm_uframes_t old_hw_ptr, new_hw_ptr, hw_base; snd_pcm_sframes_t hdelta, delta; unsigned long jdelta; old_hw_ptr = runtime->status->hw_ptr; pos = substream->ops->pointer(substream); if (pos == SNDRV_PCM_POS_XRUN) { xrun(substream); return -EPIPE; } if (pos >= runtime->buffer_size) { if (printk_ratelimit()) { char name[16]; pcm_debug_name(substream, name, sizeof(name)); xrun_log_show(substream); snd_printd(KERN_ERR "BUG: %s, pos = %ld, " "buffer size = %ld, period size = %ld\n", name, pos, runtime->buffer_size, runtime->period_size); } pos = 0; } pos -= pos % runtime->min_align; if (xrun_debug(substream, XRUN_DEBUG_LOG)) xrun_log(substream, pos, in_interrupt); hw_base = runtime->hw_ptr_base; new_hw_ptr = hw_base + pos; if (in_interrupt) { /* we know that one period was processed */ /* delta = "expected next hw_ptr" for in_interrupt != 0 */ delta = runtime->hw_ptr_interrupt + runtime->period_size; if (delta > new_hw_ptr) { /* check for double acknowledged interrupts */ hdelta = jiffies - runtime->hw_ptr_jiffies; if (hdelta > runtime->hw_ptr_buffer_jiffies/2) { hw_base += runtime->buffer_size; if (hw_base >= runtime->boundary) hw_base = 0; new_hw_ptr = hw_base + pos; goto __delta; } } } /* new_hw_ptr might be lower than old_hw_ptr in case when */ /* pointer crosses the end of the ring buffer */ if (new_hw_ptr < old_hw_ptr) { hw_base += runtime->buffer_size; if (hw_base >= runtime->boundary) hw_base = 0; new_hw_ptr = hw_base + pos; } __delta: delta = new_hw_ptr - old_hw_ptr; if (delta < 0) delta += runtime->boundary; if (xrun_debug(substream, in_interrupt ? XRUN_DEBUG_PERIODUPDATE : XRUN_DEBUG_HWPTRUPDATE)) { char name[16]; pcm_debug_name(substream, name, sizeof(name)); snd_printd("%s_update: %s: pos=%u/%u/%u, " "hwptr=%ld/%ld/%ld/%ld\n", in_interrupt ? "period" : "hwptr", name, (unsigned int)pos, (unsigned int)runtime->period_size, (unsigned int)runtime->buffer_size, (unsigned long)delta, (unsigned long)old_hw_ptr, (unsigned long)new_hw_ptr, (unsigned long)runtime->hw_ptr_base); } if (runtime->no_period_wakeup) { snd_pcm_sframes_t xrun_threshold; /* * Without regular period interrupts, we have to check * the elapsed time to detect xruns. */ jdelta = jiffies - runtime->hw_ptr_jiffies; if (jdelta < runtime->hw_ptr_buffer_jiffies / 2) goto no_delta_check; hdelta = jdelta - delta * HZ / runtime->rate; xrun_threshold = runtime->hw_ptr_buffer_jiffies / 2 + 1; while (hdelta > xrun_threshold) { delta += runtime->buffer_size; hw_base += runtime->buffer_size; if (hw_base >= runtime->boundary) hw_base = 0; new_hw_ptr = hw_base + pos; hdelta -= runtime->hw_ptr_buffer_jiffies; } goto no_delta_check; } /* something must be really wrong */ if (delta >= runtime->buffer_size + runtime->period_size) { hw_ptr_error(substream, "Unexpected hw_pointer value %s" "(stream=%i, pos=%ld, new_hw_ptr=%ld, " "old_hw_ptr=%ld)\n", in_interrupt ? "[Q] " : "[P]", substream->stream, (long)pos, (long)new_hw_ptr, (long)old_hw_ptr); return 0; } /* Do jiffies check only in xrun_debug mode */ if (!xrun_debug(substream, XRUN_DEBUG_JIFFIESCHECK)) goto no_jiffies_check; /* Skip the jiffies check for hardwares with BATCH flag. * Such hardware usually just increases the position at each IRQ, * thus it can't give any strange position. */ if (runtime->hw.info & SNDRV_PCM_INFO_BATCH) goto no_jiffies_check; hdelta = delta; if (hdelta < runtime->delay) goto no_jiffies_check; hdelta -= runtime->delay; jdelta = jiffies - runtime->hw_ptr_jiffies; if (((hdelta * HZ) / runtime->rate) > jdelta + HZ/100) { delta = jdelta / (((runtime->period_size * HZ) / runtime->rate) + HZ/100); /* move new_hw_ptr according jiffies not pos variable */ new_hw_ptr = old_hw_ptr; hw_base = delta; /* use loop to avoid checks for delta overflows */ /* the delta value is small or zero in most cases */ while (delta > 0) { new_hw_ptr += runtime->period_size; if (new_hw_ptr >= runtime->boundary) new_hw_ptr -= runtime->boundary; delta--; } /* align hw_base to buffer_size */ hw_ptr_error(substream, "hw_ptr skipping! %s" "(pos=%ld, delta=%ld, period=%ld, " "jdelta=%lu/%lu/%lu, hw_ptr=%ld/%ld)\n", in_interrupt ? "[Q] " : "", (long)pos, (long)hdelta, (long)runtime->period_size, jdelta, ((hdelta * HZ) / runtime->rate), hw_base, (unsigned long)old_hw_ptr, (unsigned long)new_hw_ptr); /* reset values to proper state */ delta = 0; hw_base = new_hw_ptr - (new_hw_ptr % runtime->buffer_size); } no_jiffies_check: if (delta > runtime->period_size + runtime->period_size / 2) { hw_ptr_error(substream, "Lost interrupts? %s" "(stream=%i, delta=%ld, new_hw_ptr=%ld, " "old_hw_ptr=%ld)\n", in_interrupt ? "[Q] " : "", substream->stream, (long)delta, (long)new_hw_ptr, (long)old_hw_ptr); } no_delta_check: if (runtime->status->hw_ptr == new_hw_ptr) return 0; if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK && runtime->silence_size > 0) snd_pcm_playback_silence(substream, new_hw_ptr); if (in_interrupt) { delta = new_hw_ptr - runtime->hw_ptr_interrupt; if (delta < 0) delta += runtime->boundary; delta -= (snd_pcm_uframes_t)delta % runtime->period_size; runtime->hw_ptr_interrupt += delta; if (runtime->hw_ptr_interrupt >= runtime->boundary) runtime->hw_ptr_interrupt -= runtime->boundary; } runtime->hw_ptr_base = hw_base; runtime->status->hw_ptr = new_hw_ptr; runtime->hw_ptr_jiffies = jiffies; if (runtime->tstamp_mode == SNDRV_PCM_TSTAMP_ENABLE) snd_pcm_gettime(runtime, (struct timespec *)&runtime->status->tstamp); return snd_pcm_update_state(substream, runtime); } /* CAUTION: call it with irq disabled */ int snd_pcm_update_hw_ptr(struct snd_pcm_substream *substream) { return snd_pcm_update_hw_ptr0(substream, 0); } /** * snd_pcm_set_ops - set the PCM operators * @pcm: the pcm instance * @direction: stream direction, SNDRV_PCM_STREAM_XXX * @ops: the operator table * * Sets the given PCM operators to the pcm instance. */ void snd_pcm_set_ops(struct snd_pcm *pcm, int direction, struct snd_pcm_ops *ops) { struct snd_pcm_str *stream = &pcm->streams[direction]; struct snd_pcm_substream *substream; for (substream = stream->substream; substream != NULL; substream = substream->next) substream->ops = ops; } EXPORT_SYMBOL(snd_pcm_set_ops); /** * snd_pcm_sync - set the PCM sync id * @substream: the pcm substream * * Sets the PCM sync identifier for the card. */ void snd_pcm_set_sync(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; runtime->sync.id32[0] = substream->pcm->card->number; runtime->sync.id32[1] = -1; runtime->sync.id32[2] = -1; runtime->sync.id32[3] = -1; } EXPORT_SYMBOL(snd_pcm_set_sync); /* * Standard ioctl routine */ static inline unsigned int div32(unsigned int a, unsigned int b, unsigned int *r) { if (b == 0) { *r = 0; return UINT_MAX; } *r = a % b; return a / b; } static inline unsigned int div_down(unsigned int a, unsigned int b) { if (b == 0) return UINT_MAX; return a / b; } static inline unsigned int div_up(unsigned int a, unsigned int b) { unsigned int r; unsigned int q; if (b == 0) return UINT_MAX; q = div32(a, b, &r); if (r) ++q; return q; } static inline unsigned int mul(unsigned int a, unsigned int b) { if (a == 0) return 0; if (div_down(UINT_MAX, a) < b) return UINT_MAX; return a * b; } static inline unsigned int muldiv32(unsigned int a, unsigned int b, unsigned int c, unsigned int *r) { u_int64_t n = (u_int64_t) a * b; if (c == 0) { snd_BUG_ON(!n); *r = 0; return UINT_MAX; } n = div_u64_rem(n, c, r); if (n >= UINT_MAX) { *r = 0; return UINT_MAX; } return n; } /** * snd_interval_refine - refine the interval value of configurator * @i: the interval value to refine * @v: the interval value to refer to * * Refines the interval value with the reference value. * The interval is changed to the range satisfying both intervals. * The interval status (min, max, integer, etc.) are evaluated. * * Returns non-zero if the value is changed, zero if not changed. */ int snd_interval_refine(struct snd_interval *i, const struct snd_interval *v) { int changed = 0; if (snd_BUG_ON(snd_interval_empty(i))) return -EINVAL; if (i->min < v->min) { i->min = v->min; i->openmin = v->openmin; changed = 1; } else if (i->min == v->min && !i->openmin && v->openmin) { i->openmin = 1; changed = 1; } if (i->max > v->max) { i->max = v->max; i->openmax = v->openmax; changed = 1; } else if (i->max == v->max && !i->openmax && v->openmax) { i->openmax = 1; changed = 1; } if (!i->integer && v->integer) { i->integer = 1; changed = 1; } if (i->integer) { if (i->openmin) { i->min++; i->openmin = 0; } if (i->openmax) { i->max--; i->openmax = 0; } } else if (!i->openmin && !i->openmax && i->min == i->max) i->integer = 1; if (snd_interval_checkempty(i)) { snd_interval_none(i); return -EINVAL; } return changed; } EXPORT_SYMBOL(snd_interval_refine); static int snd_interval_refine_first(struct snd_interval *i) { if (snd_BUG_ON(snd_interval_empty(i))) return -EINVAL; if (snd_interval_single(i)) return 0; i->max = i->min; i->openmax = i->openmin; if (i->openmax) i->max++; return 1; } static int snd_interval_refine_last(struct snd_interval *i) { if (snd_BUG_ON(snd_interval_empty(i))) return -EINVAL; if (snd_interval_single(i)) return 0; i->min = i->max; i->openmin = i->openmax; if (i->openmin) i->min--; return 1; } void snd_interval_mul(const struct snd_interval *a, const struct snd_interval *b, struct snd_interval *c) { if (a->empty || b->empty) { snd_interval_none(c); return; } c->empty = 0; c->min = mul(a->min, b->min); c->openmin = (a->openmin || b->openmin); c->max = mul(a->max, b->max); c->openmax = (a->openmax || b->openmax); c->integer = (a->integer && b->integer); } /** * snd_interval_div - refine the interval value with division * @a: dividend * @b: divisor * @c: quotient * * c = a / b * * Returns non-zero if the value is changed, zero if not changed. */ void snd_interval_div(const struct snd_interval *a, const struct snd_interval *b, struct snd_interval *c) { unsigned int r; if (a->empty || b->empty) { snd_interval_none(c); return; } c->empty = 0; c->min = div32(a->min, b->max, &r); c->openmin = (r || a->openmin || b->openmax); if (b->min > 0) { c->max = div32(a->max, b->min, &r); if (r) { c->max++; c->openmax = 1; } else c->openmax = (a->openmax || b->openmin); } else { c->max = UINT_MAX; c->openmax = 0; } c->integer = 0; } /** * snd_interval_muldivk - refine the interval value * @a: dividend 1 * @b: dividend 2 * @k: divisor (as integer) * @c: result * * c = a * b / k * * Returns non-zero if the value is changed, zero if not changed. */ void snd_interval_muldivk(const struct snd_interval *a, const struct snd_interval *b, unsigned int k, struct snd_interval *c) { unsigned int r; if (a->empty || b->empty) { snd_interval_none(c); return; } c->empty = 0; c->min = muldiv32(a->min, b->min, k, &r); c->openmin = (r || a->openmin || b->openmin); c->max = muldiv32(a->max, b->max, k, &r); if (r) { c->max++; c->openmax = 1; } else c->openmax = (a->openmax || b->openmax); c->integer = 0; } /** * snd_interval_mulkdiv - refine the interval value * @a: dividend 1 * @k: dividend 2 (as integer) * @b: divisor * @c: result * * c = a * k / b * * Returns non-zero if the value is changed, zero if not changed. */ void snd_interval_mulkdiv(const struct snd_interval *a, unsigned int k, const struct snd_interval *b, struct snd_interval *c) { unsigned int r; if (a->empty || b->empty) { snd_interval_none(c); return; } c->empty = 0; c->min = muldiv32(a->min, k, b->max, &r); c->openmin = (r || a->openmin || b->openmax); if (b->min > 0) { c->max = muldiv32(a->max, k, b->min, &r); if (r) { c->max++; c->openmax = 1; } else c->openmax = (a->openmax || b->openmin); } else { c->max = UINT_MAX; c->openmax = 0; } c->integer = 0; } /* ---- */ /** * snd_interval_ratnum - refine the interval value * @i: interval to refine * @rats_count: number of ratnum_t * @rats: ratnum_t array * @nump: pointer to store the resultant numerator * @denp: pointer to store the resultant denominator * * Returns non-zero if the value is changed, zero if not changed. */ int snd_interval_ratnum(struct snd_interval *i, unsigned int rats_count, struct snd_ratnum *rats, unsigned int *nump, unsigned int *denp) { unsigned int best_num, best_den; int best_diff; unsigned int k; struct snd_interval t; int err; unsigned int result_num, result_den; int result_diff; best_num = best_den = best_diff = 0; for (k = 0; k < rats_count; ++k) { unsigned int num = rats[k].num; unsigned int den; unsigned int q = i->min; int diff; if (q == 0) q = 1; den = div_up(num, q); if (den < rats[k].den_min) continue; if (den > rats[k].den_max) den = rats[k].den_max; else { unsigned int r; r = (den - rats[k].den_min) % rats[k].den_step; if (r != 0) den -= r; } diff = num - q * den; if (diff < 0) diff = -diff; if (best_num == 0 || diff * best_den < best_diff * den) { best_diff = diff; best_den = den; best_num = num; } } if (best_den == 0) { i->empty = 1; return -EINVAL; } t.min = div_down(best_num, best_den); t.openmin = !!(best_num % best_den); result_num = best_num; result_diff = best_diff; result_den = best_den; best_num = best_den = best_diff = 0; for (k = 0; k < rats_count; ++k) { unsigned int num = rats[k].num; unsigned int den; unsigned int q = i->max; int diff; if (q == 0) { i->empty = 1; return -EINVAL; } den = div_down(num, q); if (den > rats[k].den_max) continue; if (den < rats[k].den_min) den = rats[k].den_min; else { unsigned int r; r = (den - rats[k].den_min) % rats[k].den_step; if (r != 0) den += rats[k].den_step - r; } diff = q * den - num; if (diff < 0) diff = -diff; if (best_num == 0 || diff * best_den < best_diff * den) { best_diff = diff; best_den = den; best_num = num; } } if (best_den == 0) { i->empty = 1; return -EINVAL; } t.max = div_up(best_num, best_den); t.openmax = !!(best_num % best_den); t.integer = 0; err = snd_interval_refine(i, &t); if (err < 0) return err; if (snd_interval_single(i)) { if (best_diff * result_den < result_diff * best_den) { result_num = best_num; result_den = best_den; } if (nump) *nump = result_num; if (denp) *denp = result_den; } return err; } EXPORT_SYMBOL(snd_interval_ratnum); /** * snd_interval_ratden - refine the interval value * @i: interval to refine * @rats_count: number of struct ratden * @rats: struct ratden array * @nump: pointer to store the resultant numerator * @denp: pointer to store the resultant denominator * * Returns non-zero if the value is changed, zero if not changed. */ static int snd_interval_ratden(struct snd_interval *i, unsigned int rats_count, struct snd_ratden *rats, unsigned int *nump, unsigned int *denp) { unsigned int best_num, best_diff, best_den; unsigned int k; struct snd_interval t; int err; best_num = best_den = best_diff = 0; for (k = 0; k < rats_count; ++k) { unsigned int num; unsigned int den = rats[k].den; unsigned int q = i->min; int diff; num = mul(q, den); if (num > rats[k].num_max) continue; if (num < rats[k].num_min) num = rats[k].num_max; else { unsigned int r; r = (num - rats[k].num_min) % rats[k].num_step; if (r != 0) num += rats[k].num_step - r; } diff = num - q * den; if (best_num == 0 || diff * best_den < best_diff * den) { best_diff = diff; best_den = den; best_num = num; } } if (best_den == 0) { i->empty = 1; return -EINVAL; } t.min = div_down(best_num, best_den); t.openmin = !!(best_num % best_den); best_num = best_den = best_diff = 0; for (k = 0; k < rats_count; ++k) { unsigned int num; unsigned int den = rats[k].den; unsigned int q = i->max; int diff; num = mul(q, den); if (num < rats[k].num_min) continue; if (num > rats[k].num_max) num = rats[k].num_max; else { unsigned int r; r = (num - rats[k].num_min) % rats[k].num_step; if (r != 0) num -= r; } diff = q * den - num; if (best_num == 0 || diff * best_den < best_diff * den) { best_diff = diff; best_den = den; best_num = num; } } if (best_den == 0) { i->empty = 1; return -EINVAL; } t.max = div_up(best_num, best_den); t.openmax = !!(best_num % best_den); t.integer = 0; err = snd_interval_refine(i, &t); if (err < 0) return err; if (snd_interval_single(i)) { if (nump) *nump = best_num; if (denp) *denp = best_den; } return err; } /** * snd_interval_list - refine the interval value from the list * @i: the interval value to refine * @count: the number of elements in the list * @list: the value list * @mask: the bit-mask to evaluate * * Refines the interval value from the list. * When mask is non-zero, only the elements corresponding to bit 1 are * evaluated. * * Returns non-zero if the value is changed, zero if not changed. */ int snd_interval_list(struct snd_interval *i, unsigned int count, unsigned int *list, unsigned int mask) { unsigned int k; struct snd_interval list_range; if (!count) { i->empty = 1; return -EINVAL; } snd_interval_any(&list_range); list_range.min = UINT_MAX; list_range.max = 0; for (k = 0; k < count; k++) { if (mask && !(mask & (1 << k))) continue; if (!snd_interval_test(i, list[k])) continue; list_range.min = min(list_range.min, list[k]); list_range.max = max(list_range.max, list[k]); } return snd_interval_refine(i, &list_range); } EXPORT_SYMBOL(snd_interval_list); static int snd_interval_step(struct snd_interval *i, unsigned int min, unsigned int step) { unsigned int n; int changed = 0; n = (i->min - min) % step; if (n != 0 || i->openmin) { i->min += step - n; changed = 1; } n = (i->max - min) % step; if (n != 0 || i->openmax) { i->max -= n; changed = 1; } if (snd_interval_checkempty(i)) { i->empty = 1; return -EINVAL; } return changed; } /* Info constraints helpers */ /** * snd_pcm_hw_rule_add - add the hw-constraint rule * @runtime: the pcm runtime instance * @cond: condition bits * @var: the variable to evaluate * @func: the evaluation function * @private: the private data pointer passed to function * @dep: the dependent variables * * Returns zero if successful, or a negative error code on failure. */ int snd_pcm_hw_rule_add(struct snd_pcm_runtime *runtime, unsigned int cond, int var, snd_pcm_hw_rule_func_t func, void *private, int dep, ...) { struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints; struct snd_pcm_hw_rule *c; unsigned int k; va_list args; va_start(args, dep); if (constrs->rules_num >= constrs->rules_all) { struct snd_pcm_hw_rule *new; unsigned int new_rules = constrs->rules_all + 16; new = kcalloc(new_rules, sizeof(*c), GFP_KERNEL); if (!new) { va_end(args); return -ENOMEM; } if (constrs->rules) { memcpy(new, constrs->rules, constrs->rules_num * sizeof(*c)); kfree(constrs->rules); } constrs->rules = new; constrs->rules_all = new_rules; } c = &constrs->rules[constrs->rules_num]; c->cond = cond; c->func = func; c->var = var; c->private = private; k = 0; while (1) { if (snd_BUG_ON(k >= ARRAY_SIZE(c->deps))) { va_end(args); return -EINVAL; } c->deps[k++] = dep; if (dep < 0) break; dep = va_arg(args, int); } constrs->rules_num++; va_end(args); return 0; } EXPORT_SYMBOL(snd_pcm_hw_rule_add); /** * snd_pcm_hw_constraint_mask - apply the given bitmap mask constraint * @runtime: PCM runtime instance * @var: hw_params variable to apply the mask * @mask: the bitmap mask * * Apply the constraint of the given bitmap mask to a 32-bit mask parameter. */ int snd_pcm_hw_constraint_mask(struct snd_pcm_runtime *runtime, snd_pcm_hw_param_t var, u_int32_t mask) { struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints; struct snd_mask *maskp = constrs_mask(constrs, var); *maskp->bits &= mask; memset(maskp->bits + 1, 0, (SNDRV_MASK_MAX-32) / 8); /* clear rest */ if (*maskp->bits == 0) return -EINVAL; return 0; } /** * snd_pcm_hw_constraint_mask64 - apply the given bitmap mask constraint * @runtime: PCM runtime instance * @var: hw_params variable to apply the mask * @mask: the 64bit bitmap mask * * Apply the constraint of the given bitmap mask to a 64-bit mask parameter. */ int snd_pcm_hw_constraint_mask64(struct snd_pcm_runtime *runtime, snd_pcm_hw_param_t var, u_int64_t mask) { struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints; struct snd_mask *maskp = constrs_mask(constrs, var); maskp->bits[0] &= (u_int32_t)mask; maskp->bits[1] &= (u_int32_t)(mask >> 32); memset(maskp->bits + 2, 0, (SNDRV_MASK_MAX-64) / 8); /* clear rest */ if (! maskp->bits[0] && ! maskp->bits[1]) return -EINVAL; return 0; } /** * snd_pcm_hw_constraint_integer - apply an integer constraint to an interval * @runtime: PCM runtime instance * @var: hw_params variable to apply the integer constraint * * Apply the constraint of integer to an interval parameter. */ int snd_pcm_hw_constraint_integer(struct snd_pcm_runtime *runtime, snd_pcm_hw_param_t var) { struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints; return snd_interval_setinteger(constrs_interval(constrs, var)); } EXPORT_SYMBOL(snd_pcm_hw_constraint_integer); /** * snd_pcm_hw_constraint_minmax - apply a min/max range constraint to an interval * @runtime: PCM runtime instance * @var: hw_params variable to apply the range * @min: the minimal value * @max: the maximal value * * Apply the min/max range constraint to an interval parameter. */ int snd_pcm_hw_constraint_minmax(struct snd_pcm_runtime *runtime, snd_pcm_hw_param_t var, unsigned int min, unsigned int max) { struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints; struct snd_interval t; t.min = min; t.max = max; t.openmin = t.openmax = 0; t.integer = 0; return snd_interval_refine(constrs_interval(constrs, var), &t); } EXPORT_SYMBOL(snd_pcm_hw_constraint_minmax); static int snd_pcm_hw_rule_list(struct snd_pcm_hw_params *params, struct snd_pcm_hw_rule *rule) { struct snd_pcm_hw_constraint_list *list = rule->private; return snd_interval_list(hw_param_interval(params, rule->var), list->count, list->list, list->mask); } /** * snd_pcm_hw_constraint_list - apply a list of constraints to a parameter * @runtime: PCM runtime instance * @cond: condition bits * @var: hw_params variable to apply the list constraint * @l: list * * Apply the list of constraints to an interval parameter. */ int snd_pcm_hw_constraint_list(struct snd_pcm_runtime *runtime, unsigned int cond, snd_pcm_hw_param_t var, struct snd_pcm_hw_constraint_list *l) { return snd_pcm_hw_rule_add(runtime, cond, var, snd_pcm_hw_rule_list, l, var, -1); } EXPORT_SYMBOL(snd_pcm_hw_constraint_list); static int snd_pcm_hw_rule_ratnums(struct snd_pcm_hw_params *params, struct snd_pcm_hw_rule *rule) { struct snd_pcm_hw_constraint_ratnums *r = rule->private; unsigned int num = 0, den = 0; int err; err = snd_interval_ratnum(hw_param_interval(params, rule->var), r->nrats, r->rats, &num, &den); if (err >= 0 && den && rule->var == SNDRV_PCM_HW_PARAM_RATE) { params->rate_num = num; params->rate_den = den; } return err; } /** * snd_pcm_hw_constraint_ratnums - apply ratnums constraint to a parameter * @runtime: PCM runtime instance * @cond: condition bits * @var: hw_params variable to apply the ratnums constraint * @r: struct snd_ratnums constriants */ int snd_pcm_hw_constraint_ratnums(struct snd_pcm_runtime *runtime, unsigned int cond, snd_pcm_hw_param_t var, struct snd_pcm_hw_constraint_ratnums *r) { return snd_pcm_hw_rule_add(runtime, cond, var, snd_pcm_hw_rule_ratnums, r, var, -1); } EXPORT_SYMBOL(snd_pcm_hw_constraint_ratnums); static int snd_pcm_hw_rule_ratdens(struct snd_pcm_hw_params *params, struct snd_pcm_hw_rule *rule) { struct snd_pcm_hw_constraint_ratdens *r = rule->private; unsigned int num = 0, den = 0; int err = snd_interval_ratden(hw_param_interval(params, rule->var), r->nrats, r->rats, &num, &den); if (err >= 0 && den && rule->var == SNDRV_PCM_HW_PARAM_RATE) { params->rate_num = num; params->rate_den = den; } return err; } /** * snd_pcm_hw_constraint_ratdens - apply ratdens constraint to a parameter * @runtime: PCM runtime instance * @cond: condition bits * @var: hw_params variable to apply the ratdens constraint * @r: struct snd_ratdens constriants */ int snd_pcm_hw_constraint_ratdens(struct snd_pcm_runtime *runtime, unsigned int cond, snd_pcm_hw_param_t var, struct snd_pcm_hw_constraint_ratdens *r) { return snd_pcm_hw_rule_add(runtime, cond, var, snd_pcm_hw_rule_ratdens, r, var, -1); } EXPORT_SYMBOL(snd_pcm_hw_constraint_ratdens); static int snd_pcm_hw_rule_msbits(struct snd_pcm_hw_params *params, struct snd_pcm_hw_rule *rule) { unsigned int l = (unsigned long) rule->private; int width = l & 0xffff; unsigned int msbits = l >> 16; struct snd_interval *i = hw_param_interval(params, SNDRV_PCM_HW_PARAM_SAMPLE_BITS); if (snd_interval_single(i) && snd_interval_value(i) == width) params->msbits = msbits; return 0; } /** * snd_pcm_hw_constraint_msbits - add a hw constraint msbits rule * @runtime: PCM runtime instance * @cond: condition bits * @width: sample bits width * @msbits: msbits width */ int snd_pcm_hw_constraint_msbits(struct snd_pcm_runtime *runtime, unsigned int cond, unsigned int width, unsigned int msbits) { unsigned long l = (msbits << 16) | width; return snd_pcm_hw_rule_add(runtime, cond, -1, snd_pcm_hw_rule_msbits, (void*) l, SNDRV_PCM_HW_PARAM_SAMPLE_BITS, -1); } EXPORT_SYMBOL(snd_pcm_hw_constraint_msbits); static int snd_pcm_hw_rule_step(struct snd_pcm_hw_params *params, struct snd_pcm_hw_rule *rule) { unsigned long step = (unsigned long) rule->private; return snd_interval_step(hw_param_interval(params, rule->var), 0, step); } /** * snd_pcm_hw_constraint_step - add a hw constraint step rule * @runtime: PCM runtime instance * @cond: condition bits * @var: hw_params variable to apply the step constraint * @step: step size */ int snd_pcm_hw_constraint_step(struct snd_pcm_runtime *runtime, unsigned int cond, snd_pcm_hw_param_t var, unsigned long step) { return snd_pcm_hw_rule_add(runtime, cond, var, snd_pcm_hw_rule_step, (void *) step, var, -1); } EXPORT_SYMBOL(snd_pcm_hw_constraint_step); static int snd_pcm_hw_rule_pow2(struct snd_pcm_hw_params *params, struct snd_pcm_hw_rule *rule) { static unsigned int pow2_sizes[] = { 1<<0, 1<<1, 1<<2, 1<<3, 1<<4, 1<<5, 1<<6, 1<<7, 1<<8, 1<<9, 1<<10, 1<<11, 1<<12, 1<<13, 1<<14, 1<<15, 1<<16, 1<<17, 1<<18, 1<<19, 1<<20, 1<<21, 1<<22, 1<<23, 1<<24, 1<<25, 1<<26, 1<<27, 1<<28, 1<<29, 1<<30 }; return snd_interval_list(hw_param_interval(params, rule->var), ARRAY_SIZE(pow2_sizes), pow2_sizes, 0); } /** * snd_pcm_hw_constraint_pow2 - add a hw constraint power-of-2 rule * @runtime: PCM runtime instance * @cond: condition bits * @var: hw_params variable to apply the power-of-2 constraint */ int snd_pcm_hw_constraint_pow2(struct snd_pcm_runtime *runtime, unsigned int cond, snd_pcm_hw_param_t var) { return snd_pcm_hw_rule_add(runtime, cond, var, snd_pcm_hw_rule_pow2, NULL, var, -1); } EXPORT_SYMBOL(snd_pcm_hw_constraint_pow2); static void _snd_pcm_hw_param_any(struct snd_pcm_hw_params *params, snd_pcm_hw_param_t var) { if (hw_is_mask(var)) { snd_mask_any(hw_param_mask(params, var)); params->cmask |= 1 << var; params->rmask |= 1 << var; return; } if (hw_is_interval(var)) { snd_interval_any(hw_param_interval(params, var)); params->cmask |= 1 << var; params->rmask |= 1 << var; return; } snd_BUG(); } void _snd_pcm_hw_params_any(struct snd_pcm_hw_params *params) { unsigned int k; memset(params, 0, sizeof(*params)); for (k = SNDRV_PCM_HW_PARAM_FIRST_MASK; k <= SNDRV_PCM_HW_PARAM_LAST_MASK; k++) _snd_pcm_hw_param_any(params, k); for (k = SNDRV_PCM_HW_PARAM_FIRST_INTERVAL; k <= SNDRV_PCM_HW_PARAM_LAST_INTERVAL; k++) _snd_pcm_hw_param_any(params, k); params->info = ~0U; } EXPORT_SYMBOL(_snd_pcm_hw_params_any); /** * snd_pcm_hw_param_value - return @params field @var value * @params: the hw_params instance * @var: parameter to retrieve * @dir: pointer to the direction (-1,0,1) or %NULL * * Return the value for field @var if it's fixed in configuration space * defined by @params. Return -%EINVAL otherwise. */ int snd_pcm_hw_param_value(const struct snd_pcm_hw_params *params, snd_pcm_hw_param_t var, int *dir) { if (hw_is_mask(var)) { const struct snd_mask *mask = hw_param_mask_c(params, var); if (!snd_mask_single(mask)) return -EINVAL; if (dir) *dir = 0; return snd_mask_value(mask); } if (hw_is_interval(var)) { const struct snd_interval *i = hw_param_interval_c(params, var); if (!snd_interval_single(i)) return -EINVAL; if (dir) *dir = i->openmin; return snd_interval_value(i); } return -EINVAL; } EXPORT_SYMBOL(snd_pcm_hw_param_value); void _snd_pcm_hw_param_setempty(struct snd_pcm_hw_params *params, snd_pcm_hw_param_t var) { if (hw_is_mask(var)) { snd_mask_none(hw_param_mask(params, var)); params->cmask |= 1 << var; params->rmask |= 1 << var; } else if (hw_is_interval(var)) { snd_interval_none(hw_param_interval(params, var)); params->cmask |= 1 << var; params->rmask |= 1 << var; } else { snd_BUG(); } } EXPORT_SYMBOL(_snd_pcm_hw_param_setempty); static int _snd_pcm_hw_param_first(struct snd_pcm_hw_params *params, snd_pcm_hw_param_t var) { int changed; if (hw_is_mask(var)) changed = snd_mask_refine_first(hw_param_mask(params, var)); else if (hw_is_interval(var)) changed = snd_interval_refine_first(hw_param_interval(params, var)); else return -EINVAL; if (changed) { params->cmask |= 1 << var; params->rmask |= 1 << var; } return changed; } /** * snd_pcm_hw_param_first - refine config space and return minimum value * @pcm: PCM instance * @params: the hw_params instance * @var: parameter to retrieve * @dir: pointer to the direction (-1,0,1) or %NULL * * Inside configuration space defined by @params remove from @var all * values > minimum. Reduce configuration space accordingly. * Return the minimum. */ int snd_pcm_hw_param_first(struct snd_pcm_substream *pcm, struct snd_pcm_hw_params *params, snd_pcm_hw_param_t var, int *dir) { int changed = _snd_pcm_hw_param_first(params, var); if (changed < 0) return changed; if (params->rmask) { int err = snd_pcm_hw_refine(pcm, params); if (snd_BUG_ON(err < 0)) return err; } return snd_pcm_hw_param_value(params, var, dir); } EXPORT_SYMBOL(snd_pcm_hw_param_first); static int _snd_pcm_hw_param_last(struct snd_pcm_hw_params *params, snd_pcm_hw_param_t var) { int changed; if (hw_is_mask(var)) changed = snd_mask_refine_last(hw_param_mask(params, var)); else if (hw_is_interval(var)) changed = snd_interval_refine_last(hw_param_interval(params, var)); else return -EINVAL; if (changed) { params->cmask |= 1 << var; params->rmask |= 1 << var; } return changed; } /** * snd_pcm_hw_param_last - refine config space and return maximum value * @pcm: PCM instance * @params: the hw_params instance * @var: parameter to retrieve * @dir: pointer to the direction (-1,0,1) or %NULL * * Inside configuration space defined by @params remove from @var all * values < maximum. Reduce configuration space accordingly. * Return the maximum. */ int snd_pcm_hw_param_last(struct snd_pcm_substream *pcm, struct snd_pcm_hw_params *params, snd_pcm_hw_param_t var, int *dir) { int changed = _snd_pcm_hw_param_last(params, var); if (changed < 0) return changed; if (params->rmask) { int err = snd_pcm_hw_refine(pcm, params); if (snd_BUG_ON(err < 0)) return err; } return snd_pcm_hw_param_value(params, var, dir); } EXPORT_SYMBOL(snd_pcm_hw_param_last); /** * snd_pcm_hw_param_choose - choose a configuration defined by @params * @pcm: PCM instance * @params: the hw_params instance * * Choose one configuration from configuration space defined by @params. * The configuration chosen is that obtained fixing in this order: * first access, first format, first subformat, min channels, * min rate, min period time, max buffer size, min tick time */ int snd_pcm_hw_params_choose(struct snd_pcm_substream *pcm, struct snd_pcm_hw_params *params) { static int vars[] = { SNDRV_PCM_HW_PARAM_ACCESS, SNDRV_PCM_HW_PARAM_FORMAT, SNDRV_PCM_HW_PARAM_SUBFORMAT, SNDRV_PCM_HW_PARAM_CHANNELS, SNDRV_PCM_HW_PARAM_RATE, SNDRV_PCM_HW_PARAM_PERIOD_TIME, SNDRV_PCM_HW_PARAM_BUFFER_SIZE, SNDRV_PCM_HW_PARAM_TICK_TIME, -1 }; int err, *v; for (v = vars; *v != -1; v++) { if (*v != SNDRV_PCM_HW_PARAM_BUFFER_SIZE) err = snd_pcm_hw_param_first(pcm, params, *v, NULL); else err = snd_pcm_hw_param_last(pcm, params, *v, NULL); if (snd_BUG_ON(err < 0)) return err; } return 0; } static int snd_pcm_lib_ioctl_reset(struct snd_pcm_substream *substream, void *arg) { struct snd_pcm_runtime *runtime = substream->runtime; unsigned long flags; snd_pcm_stream_lock_irqsave(substream, flags); if (snd_pcm_running(substream) && snd_pcm_update_hw_ptr(substream) >= 0) runtime->status->hw_ptr %= runtime->buffer_size; else runtime->status->hw_ptr = 0; snd_pcm_stream_unlock_irqrestore(substream, flags); return 0; } static int snd_pcm_lib_ioctl_channel_info(struct snd_pcm_substream *substream, void *arg) { struct snd_pcm_channel_info *info = arg; struct snd_pcm_runtime *runtime = substream->runtime; int width; if (!(runtime->info & SNDRV_PCM_INFO_MMAP)) { info->offset = -1; return 0; } width = snd_pcm_format_physical_width(runtime->format); if (width < 0) return width; info->offset = 0; switch (runtime->access) { case SNDRV_PCM_ACCESS_MMAP_INTERLEAVED: case SNDRV_PCM_ACCESS_RW_INTERLEAVED: info->first = info->channel * width; info->step = runtime->channels * width; break; case SNDRV_PCM_ACCESS_MMAP_NONINTERLEAVED: case SNDRV_PCM_ACCESS_RW_NONINTERLEAVED: { size_t size = runtime->dma_bytes / runtime->channels; info->first = info->channel * size * 8; info->step = width; break; } default: snd_BUG(); break; } return 0; } static int snd_pcm_lib_ioctl_fifo_size(struct snd_pcm_substream *substream, void *arg) { struct snd_pcm_hw_params *params = arg; snd_pcm_format_t format; int channels, width; params->fifo_size = substream->runtime->hw.fifo_size; if (!(substream->runtime->hw.info & SNDRV_PCM_INFO_FIFO_IN_FRAMES)) { format = params_format(params); channels = params_channels(params); width = snd_pcm_format_physical_width(format); params->fifo_size /= width * channels; } return 0; } /** * snd_pcm_lib_ioctl - a generic PCM ioctl callback * @substream: the pcm substream instance * @cmd: ioctl command * @arg: ioctl argument * * Processes the generic ioctl commands for PCM. * Can be passed as the ioctl callback for PCM ops. * * Returns zero if successful, or a negative error code on failure. */ int snd_pcm_lib_ioctl(struct snd_pcm_substream *substream, unsigned int cmd, void *arg) { switch (cmd) { case SNDRV_PCM_IOCTL1_INFO: return 0; case SNDRV_PCM_IOCTL1_RESET: return snd_pcm_lib_ioctl_reset(substream, arg); case SNDRV_PCM_IOCTL1_CHANNEL_INFO: return snd_pcm_lib_ioctl_channel_info(substream, arg); case SNDRV_PCM_IOCTL1_FIFO_SIZE: return snd_pcm_lib_ioctl_fifo_size(substream, arg); } return -ENXIO; } EXPORT_SYMBOL(snd_pcm_lib_ioctl); /** * snd_pcm_period_elapsed - update the pcm status for the next period * @substream: the pcm substream instance * * This function is called from the interrupt handler when the * PCM has processed the period size. It will update the current * pointer, wake up sleepers, etc. * * Even if more than one periods have elapsed since the last call, you * have to call this only once. */ void snd_pcm_period_elapsed(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime; unsigned long flags; if (PCM_RUNTIME_CHECK(substream)) return; runtime = substream->runtime; if (runtime->transfer_ack_begin) runtime->transfer_ack_begin(substream); snd_pcm_stream_lock_irqsave(substream, flags); if (!snd_pcm_running(substream) || snd_pcm_update_hw_ptr0(substream, 1) < 0) goto _end; if (substream->timer_running) snd_timer_interrupt(substream->timer, 1); _end: snd_pcm_stream_unlock_irqrestore(substream, flags); if (runtime->transfer_ack_end) runtime->transfer_ack_end(substream); kill_fasync(&runtime->fasync, SIGIO, POLL_IN); } EXPORT_SYMBOL(snd_pcm_period_elapsed); /* * Wait until avail_min data becomes available * Returns a negative error code if any error occurs during operation. * The available space is stored on availp. When err = 0 and avail = 0 * on the capture stream, it indicates the stream is in DRAINING state. */ static int wait_for_avail(struct snd_pcm_substream *substream, snd_pcm_uframes_t *availp) { struct snd_pcm_runtime *runtime = substream->runtime; int is_playback = substream->stream == SNDRV_PCM_STREAM_PLAYBACK; wait_queue_t wait; int err = 0; snd_pcm_uframes_t avail = 0; long wait_time, tout; init_waitqueue_entry(&wait, current); set_current_state(TASK_INTERRUPTIBLE); add_wait_queue(&runtime->tsleep, &wait); if (runtime->no_period_wakeup) wait_time = MAX_SCHEDULE_TIMEOUT; else { wait_time = 10; if (runtime->rate) { long t = runtime->period_size * 2 / runtime->rate; wait_time = max(t, wait_time); } wait_time = msecs_to_jiffies(wait_time * 1000); } for (;;) { if (signal_pending(current)) { err = -ERESTARTSYS; break; } /* * We need to check if space became available already * (and thus the wakeup happened already) first to close * the race of space already having become available. * This check must happen after been added to the waitqueue * and having current state be INTERRUPTIBLE. */ if (is_playback) avail = snd_pcm_playback_avail(runtime); else avail = snd_pcm_capture_avail(runtime); if (avail >= runtime->twake) break; snd_pcm_stream_unlock_irq(substream); tout = schedule_timeout(wait_time); snd_pcm_stream_lock_irq(substream); set_current_state(TASK_INTERRUPTIBLE); switch (runtime->status->state) { case SNDRV_PCM_STATE_SUSPENDED: err = -ESTRPIPE; goto _endloop; case SNDRV_PCM_STATE_XRUN: err = -EPIPE; goto _endloop; case SNDRV_PCM_STATE_DRAINING: if (is_playback) err = -EPIPE; else avail = 0; /* indicate draining */ goto _endloop; case SNDRV_PCM_STATE_OPEN: case SNDRV_PCM_STATE_SETUP: case SNDRV_PCM_STATE_DISCONNECTED: err = -EBADFD; goto _endloop; } if (!tout) { snd_printd("%s write error (DMA or IRQ trouble?)\n", is_playback ? "playback" : "capture"); err = -EIO; break; } } _endloop: set_current_state(TASK_RUNNING); remove_wait_queue(&runtime->tsleep, &wait); *availp = avail; return err; } static int snd_pcm_lib_write_transfer(struct snd_pcm_substream *substream, unsigned int hwoff, unsigned long data, unsigned int off, snd_pcm_uframes_t frames) { struct snd_pcm_runtime *runtime = substream->runtime; int err; char __user *buf = (char __user *) data + frames_to_bytes(runtime, off); if (substream->ops->copy) { if ((err = substream->ops->copy(substream, -1, hwoff, buf, frames)) < 0) return err; } else { char *hwbuf = runtime->dma_area + frames_to_bytes(runtime, hwoff); if (copy_from_user(hwbuf, buf, frames_to_bytes(runtime, frames))) return -EFAULT; } return 0; } typedef int (*transfer_f)(struct snd_pcm_substream *substream, unsigned int hwoff, unsigned long data, unsigned int off, snd_pcm_uframes_t size); static snd_pcm_sframes_t snd_pcm_lib_write1(struct snd_pcm_substream *substream, unsigned long data, snd_pcm_uframes_t size, int nonblock, transfer_f transfer) { struct snd_pcm_runtime *runtime = substream->runtime; snd_pcm_uframes_t xfer = 0; snd_pcm_uframes_t offset = 0; int err = 0; if (size == 0) return 0; snd_pcm_stream_lock_irq(substream); switch (runtime->status->state) { case SNDRV_PCM_STATE_PREPARED: case SNDRV_PCM_STATE_RUNNING: case SNDRV_PCM_STATE_PAUSED: break; case SNDRV_PCM_STATE_XRUN: err = -EPIPE; goto _end_unlock; case SNDRV_PCM_STATE_SUSPENDED: err = -ESTRPIPE; goto _end_unlock; default: err = -EBADFD; goto _end_unlock; } runtime->twake = runtime->control->avail_min ? : 1; while (size > 0) { snd_pcm_uframes_t frames, appl_ptr, appl_ofs; snd_pcm_uframes_t avail; snd_pcm_uframes_t cont; if (runtime->status->state == SNDRV_PCM_STATE_RUNNING) snd_pcm_update_hw_ptr(substream); avail = snd_pcm_playback_avail(runtime); if (!avail) { if (nonblock) { err = -EAGAIN; goto _end_unlock; } runtime->twake = min_t(snd_pcm_uframes_t, size, runtime->control->avail_min ? : 1); err = wait_for_avail(substream, &avail); if (err < 0) goto _end_unlock; } frames = size > avail ? avail : size; cont = runtime->buffer_size - runtime->control->appl_ptr % runtime->buffer_size; if (frames > cont) frames = cont; if (snd_BUG_ON(!frames)) { runtime->twake = 0; snd_pcm_stream_unlock_irq(substream); return -EINVAL; } appl_ptr = runtime->control->appl_ptr; appl_ofs = appl_ptr % runtime->buffer_size; snd_pcm_stream_unlock_irq(substream); err = transfer(substream, appl_ofs, data, offset, frames); snd_pcm_stream_lock_irq(substream); if (err < 0) goto _end_unlock; switch (runtime->status->state) { case SNDRV_PCM_STATE_XRUN: err = -EPIPE; goto _end_unlock; case SNDRV_PCM_STATE_SUSPENDED: err = -ESTRPIPE; goto _end_unlock; default: break; } appl_ptr += frames; if (appl_ptr >= runtime->boundary) appl_ptr -= runtime->boundary; runtime->control->appl_ptr = appl_ptr; if (substream->ops->ack) substream->ops->ack(substream); offset += frames; size -= frames; xfer += frames; if (runtime->status->state == SNDRV_PCM_STATE_PREPARED && snd_pcm_playback_hw_avail(runtime) >= (snd_pcm_sframes_t)runtime->start_threshold) { err = snd_pcm_start(substream); if (err < 0) goto _end_unlock; } } _end_unlock: runtime->twake = 0; if (xfer > 0 && err >= 0) snd_pcm_update_state(substream, runtime); snd_pcm_stream_unlock_irq(substream); return xfer > 0 ? (snd_pcm_sframes_t)xfer : err; } /* sanity-check for read/write methods */ static int pcm_sanity_check(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime; if (PCM_RUNTIME_CHECK(substream)) return -ENXIO; runtime = substream->runtime; if (snd_BUG_ON(!substream->ops->copy && !runtime->dma_area)) return -EINVAL; if (runtime->status->state == SNDRV_PCM_STATE_OPEN) return -EBADFD; return 0; } snd_pcm_sframes_t snd_pcm_lib_write(struct snd_pcm_substream *substream, const void __user *buf, snd_pcm_uframes_t size) { struct snd_pcm_runtime *runtime; int nonblock; int err; err = pcm_sanity_check(substream); if (err < 0) return err; runtime = substream->runtime; nonblock = !!(substream->f_flags & O_NONBLOCK); if (runtime->access != SNDRV_PCM_ACCESS_RW_INTERLEAVED && runtime->channels > 1) return -EINVAL; return snd_pcm_lib_write1(substream, (unsigned long)buf, size, nonblock, snd_pcm_lib_write_transfer); } EXPORT_SYMBOL(snd_pcm_lib_write); static int snd_pcm_lib_writev_transfer(struct snd_pcm_substream *substream, unsigned int hwoff, unsigned long data, unsigned int off, snd_pcm_uframes_t frames) { struct snd_pcm_runtime *runtime = substream->runtime; int err; void __user **bufs = (void __user **)data; int channels = runtime->channels; int c; if (substream->ops->copy) { if (snd_BUG_ON(!substream->ops->silence)) return -EINVAL; for (c = 0; c < channels; ++c, ++bufs) { if (*bufs == NULL) { if ((err = substream->ops->silence(substream, c, hwoff, frames)) < 0) return err; } else { char __user *buf = *bufs + samples_to_bytes(runtime, off); if ((err = substream->ops->copy(substream, c, hwoff, buf, frames)) < 0) return err; } } } else { /* default transfer behaviour */ size_t dma_csize = runtime->dma_bytes / channels; for (c = 0; c < channels; ++c, ++bufs) { char *hwbuf = runtime->dma_area + (c * dma_csize) + samples_to_bytes(runtime, hwoff); if (*bufs == NULL) { snd_pcm_format_set_silence(runtime->format, hwbuf, frames); } else { char __user *buf = *bufs + samples_to_bytes(runtime, off); if (copy_from_user(hwbuf, buf, samples_to_bytes(runtime, frames))) return -EFAULT; } } } return 0; } snd_pcm_sframes_t snd_pcm_lib_writev(struct snd_pcm_substream *substream, void __user **bufs, snd_pcm_uframes_t frames) { struct snd_pcm_runtime *runtime; int nonblock; int err; err = pcm_sanity_check(substream); if (err < 0) return err; runtime = substream->runtime; nonblock = !!(substream->f_flags & O_NONBLOCK); if (runtime->access != SNDRV_PCM_ACCESS_RW_NONINTERLEAVED) return -EINVAL; return snd_pcm_lib_write1(substream, (unsigned long)bufs, frames, nonblock, snd_pcm_lib_writev_transfer); } EXPORT_SYMBOL(snd_pcm_lib_writev); static int snd_pcm_lib_read_transfer(struct snd_pcm_substream *substream, unsigned int hwoff, unsigned long data, unsigned int off, snd_pcm_uframes_t frames) { struct snd_pcm_runtime *runtime = substream->runtime; int err; char __user *buf = (char __user *) data + frames_to_bytes(runtime, off); if (substream->ops->copy) { if ((err = substream->ops->copy(substream, -1, hwoff, buf, frames)) < 0) return err; } else { char *hwbuf = runtime->dma_area + frames_to_bytes(runtime, hwoff); if (copy_to_user(buf, hwbuf, frames_to_bytes(runtime, frames))) return -EFAULT; } return 0; } static snd_pcm_sframes_t snd_pcm_lib_read1(struct snd_pcm_substream *substream, unsigned long data, snd_pcm_uframes_t size, int nonblock, transfer_f transfer) { struct snd_pcm_runtime *runtime = substream->runtime; snd_pcm_uframes_t xfer = 0; snd_pcm_uframes_t offset = 0; int err = 0; if (size == 0) return 0; snd_pcm_stream_lock_irq(substream); switch (runtime->status->state) { case SNDRV_PCM_STATE_PREPARED: if (size >= runtime->start_threshold) { err = snd_pcm_start(substream); if (err < 0) goto _end_unlock; } break; case SNDRV_PCM_STATE_DRAINING: case SNDRV_PCM_STATE_RUNNING: case SNDRV_PCM_STATE_PAUSED: break; case SNDRV_PCM_STATE_XRUN: err = -EPIPE; goto _end_unlock; case SNDRV_PCM_STATE_SUSPENDED: err = -ESTRPIPE; goto _end_unlock; default: err = -EBADFD; goto _end_unlock; } runtime->twake = runtime->control->avail_min ? : 1; while (size > 0) { snd_pcm_uframes_t frames, appl_ptr, appl_ofs; snd_pcm_uframes_t avail; snd_pcm_uframes_t cont; if (runtime->status->state == SNDRV_PCM_STATE_RUNNING) snd_pcm_update_hw_ptr(substream); avail = snd_pcm_capture_avail(runtime); if (!avail) { if (runtime->status->state == SNDRV_PCM_STATE_DRAINING) { snd_pcm_stop(substream, SNDRV_PCM_STATE_SETUP); goto _end_unlock; } if (nonblock) { err = -EAGAIN; goto _end_unlock; } runtime->twake = min_t(snd_pcm_uframes_t, size, runtime->control->avail_min ? : 1); err = wait_for_avail(substream, &avail); if (err < 0) goto _end_unlock; if (!avail) continue; /* draining */ } frames = size > avail ? avail : size; cont = runtime->buffer_size - runtime->control->appl_ptr % runtime->buffer_size; if (frames > cont) frames = cont; if (snd_BUG_ON(!frames)) { runtime->twake = 0; snd_pcm_stream_unlock_irq(substream); return -EINVAL; } appl_ptr = runtime->control->appl_ptr; appl_ofs = appl_ptr % runtime->buffer_size; snd_pcm_stream_unlock_irq(substream); err = transfer(substream, appl_ofs, data, offset, frames); snd_pcm_stream_lock_irq(substream); if (err < 0) goto _end_unlock; switch (runtime->status->state) { case SNDRV_PCM_STATE_XRUN: err = -EPIPE; goto _end_unlock; case SNDRV_PCM_STATE_SUSPENDED: err = -ESTRPIPE; goto _end_unlock; default: break; } appl_ptr += frames; if (appl_ptr >= runtime->boundary) appl_ptr -= runtime->boundary; runtime->control->appl_ptr = appl_ptr; if (substream->ops->ack) substream->ops->ack(substream); offset += frames; size -= frames; xfer += frames; } _end_unlock: runtime->twake = 0; if (xfer > 0 && err >= 0) snd_pcm_update_state(substream, runtime); snd_pcm_stream_unlock_irq(substream); return xfer > 0 ? (snd_pcm_sframes_t)xfer : err; } snd_pcm_sframes_t snd_pcm_lib_read(struct snd_pcm_substream *substream, void __user *buf, snd_pcm_uframes_t size) { struct snd_pcm_runtime *runtime; int nonblock; int err; err = pcm_sanity_check(substream); if (err < 0) return err; runtime = substream->runtime; nonblock = !!(substream->f_flags & O_NONBLOCK); if (runtime->access != SNDRV_PCM_ACCESS_RW_INTERLEAVED) return -EINVAL; return snd_pcm_lib_read1(substream, (unsigned long)buf, size, nonblock, snd_pcm_lib_read_transfer); } EXPORT_SYMBOL(snd_pcm_lib_read); static int snd_pcm_lib_readv_transfer(struct snd_pcm_substream *substream, unsigned int hwoff, unsigned long data, unsigned int off, snd_pcm_uframes_t frames) { struct snd_pcm_runtime *runtime = substream->runtime; int err; void __user **bufs = (void __user **)data; int channels = runtime->channels; int c; if (substream->ops->copy) { for (c = 0; c < channels; ++c, ++bufs) { char __user *buf; if (*bufs == NULL) continue; buf = *bufs + samples_to_bytes(runtime, off); if ((err = substream->ops->copy(substream, c, hwoff, buf, frames)) < 0) return err; } } else { snd_pcm_uframes_t dma_csize = runtime->dma_bytes / channels; for (c = 0; c < channels; ++c, ++bufs) { char *hwbuf; char __user *buf; if (*bufs == NULL) continue; hwbuf = runtime->dma_area + (c * dma_csize) + samples_to_bytes(runtime, hwoff); buf = *bufs + samples_to_bytes(runtime, off); if (copy_to_user(buf, hwbuf, samples_to_bytes(runtime, frames))) return -EFAULT; } } return 0; } snd_pcm_sframes_t snd_pcm_lib_readv(struct snd_pcm_substream *substream, void __user **bufs, snd_pcm_uframes_t frames) { struct snd_pcm_runtime *runtime; int nonblock; int err; err = pcm_sanity_check(substream); if (err < 0) return err; runtime = substream->runtime; if (runtime->status->state == SNDRV_PCM_STATE_OPEN) return -EBADFD; nonblock = !!(substream->f_flags & O_NONBLOCK); if (runtime->access != SNDRV_PCM_ACCESS_RW_NONINTERLEAVED) return -EINVAL; return snd_pcm_lib_read1(substream, (unsigned long)bufs, frames, nonblock, snd_pcm_lib_readv_transfer); } EXPORT_SYMBOL(snd_pcm_lib_readv);
gpl-2.0
joeisgood99/Z5C-Copyleft-Kernel
net/9p/trans_fd.c
2287
24461
/* * linux/fs/9p/trans_fd.c * * Fd transport layer. Includes deprecated socket layer. * * Copyright (C) 2006 by Russ Cox <rsc@swtch.com> * Copyright (C) 2004-2005 by Latchesar Ionkov <lucho@ionkov.net> * Copyright (C) 2004-2008 by Eric Van Hensbergen <ericvh@gmail.com> * Copyright (C) 1997-2002 by Ron Minnich <rminnich@sarnoff.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to: * Free Software Foundation * 51 Franklin Street, Fifth Floor * Boston, MA 02111-1301 USA * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/in.h> #include <linux/module.h> #include <linux/net.h> #include <linux/ipv6.h> #include <linux/kthread.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/un.h> #include <linux/uaccess.h> #include <linux/inet.h> #include <linux/idr.h> #include <linux/file.h> #include <linux/parser.h> #include <linux/slab.h> #include <net/9p/9p.h> #include <net/9p/client.h> #include <net/9p/transport.h> #include <linux/syscalls.h> /* killme */ #define P9_PORT 564 #define MAX_SOCK_BUF (64*1024) #define MAXPOLLWADDR 2 /** * struct p9_fd_opts - per-transport options * @rfd: file descriptor for reading (trans=fd) * @wfd: file descriptor for writing (trans=fd) * @port: port to connect to (trans=tcp) * */ struct p9_fd_opts { int rfd; int wfd; u16 port; }; /** * struct p9_trans_fd - transport state * @rd: reference to file to read from * @wr: reference of file to write to * @conn: connection state reference * */ struct p9_trans_fd { struct file *rd; struct file *wr; struct p9_conn *conn; }; /* * Option Parsing (code inspired by NFS code) * - a little lazy - parse all fd-transport options */ enum { /* Options that take integer arguments */ Opt_port, Opt_rfdno, Opt_wfdno, Opt_err, }; static const match_table_t tokens = { {Opt_port, "port=%u"}, {Opt_rfdno, "rfdno=%u"}, {Opt_wfdno, "wfdno=%u"}, {Opt_err, NULL}, }; enum { Rworksched = 1, /* read work scheduled or running */ Rpending = 2, /* can read */ Wworksched = 4, /* write work scheduled or running */ Wpending = 8, /* can write */ }; struct p9_poll_wait { struct p9_conn *conn; wait_queue_t wait; wait_queue_head_t *wait_addr; }; /** * struct p9_conn - fd mux connection state information * @mux_list: list link for mux to manage multiple connections (?) * @client: reference to client instance for this connection * @err: error state * @req_list: accounting for requests which have been sent * @unsent_req_list: accounting for requests that haven't been sent * @req: current request being processed (if any) * @tmp_buf: temporary buffer to read in header * @rsize: amount to read for current frame * @rpos: read position in current frame * @rbuf: current read buffer * @wpos: write position for current frame * @wsize: amount of data to write for current frame * @wbuf: current write buffer * @poll_pending_link: pending links to be polled per conn * @poll_wait: array of wait_q's for various worker threads * @pt: poll state * @rq: current read work * @wq: current write work * @wsched: ???? * */ struct p9_conn { struct list_head mux_list; struct p9_client *client; int err; struct list_head req_list; struct list_head unsent_req_list; struct p9_req_t *req; char tmp_buf[7]; int rsize; int rpos; char *rbuf; int wpos; int wsize; char *wbuf; struct list_head poll_pending_link; struct p9_poll_wait poll_wait[MAXPOLLWADDR]; poll_table pt; struct work_struct rq; struct work_struct wq; unsigned long wsched; }; static void p9_poll_workfn(struct work_struct *work); static DEFINE_SPINLOCK(p9_poll_lock); static LIST_HEAD(p9_poll_pending_list); static DECLARE_WORK(p9_poll_work, p9_poll_workfn); static void p9_mux_poll_stop(struct p9_conn *m) { unsigned long flags; int i; for (i = 0; i < ARRAY_SIZE(m->poll_wait); i++) { struct p9_poll_wait *pwait = &m->poll_wait[i]; if (pwait->wait_addr) { remove_wait_queue(pwait->wait_addr, &pwait->wait); pwait->wait_addr = NULL; } } spin_lock_irqsave(&p9_poll_lock, flags); list_del_init(&m->poll_pending_link); spin_unlock_irqrestore(&p9_poll_lock, flags); } /** * p9_conn_cancel - cancel all pending requests with error * @m: mux data * @err: error code * */ static void p9_conn_cancel(struct p9_conn *m, int err) { struct p9_req_t *req, *rtmp; unsigned long flags; LIST_HEAD(cancel_list); p9_debug(P9_DEBUG_ERROR, "mux %p err %d\n", m, err); spin_lock_irqsave(&m->client->lock, flags); if (m->err) { spin_unlock_irqrestore(&m->client->lock, flags); return; } m->err = err; list_for_each_entry_safe(req, rtmp, &m->req_list, req_list) { req->status = REQ_STATUS_ERROR; if (!req->t_err) req->t_err = err; list_move(&req->req_list, &cancel_list); } list_for_each_entry_safe(req, rtmp, &m->unsent_req_list, req_list) { req->status = REQ_STATUS_ERROR; if (!req->t_err) req->t_err = err; list_move(&req->req_list, &cancel_list); } spin_unlock_irqrestore(&m->client->lock, flags); list_for_each_entry_safe(req, rtmp, &cancel_list, req_list) { p9_debug(P9_DEBUG_ERROR, "call back req %p\n", req); list_del(&req->req_list); p9_client_cb(m->client, req); } } static int p9_fd_poll(struct p9_client *client, struct poll_table_struct *pt) { int ret, n; struct p9_trans_fd *ts = NULL; if (client && client->status == Connected) ts = client->trans; if (!ts) return -EREMOTEIO; if (!ts->rd->f_op || !ts->rd->f_op->poll) return -EIO; if (!ts->wr->f_op || !ts->wr->f_op->poll) return -EIO; ret = ts->rd->f_op->poll(ts->rd, pt); if (ret < 0) return ret; if (ts->rd != ts->wr) { n = ts->wr->f_op->poll(ts->wr, pt); if (n < 0) return n; ret = (ret & ~POLLOUT) | (n & ~POLLIN); } return ret; } /** * p9_fd_read- read from a fd * @client: client instance * @v: buffer to receive data into * @len: size of receive buffer * */ static int p9_fd_read(struct p9_client *client, void *v, int len) { int ret; struct p9_trans_fd *ts = NULL; if (client && client->status != Disconnected) ts = client->trans; if (!ts) return -EREMOTEIO; if (!(ts->rd->f_flags & O_NONBLOCK)) p9_debug(P9_DEBUG_ERROR, "blocking read ...\n"); ret = kernel_read(ts->rd, ts->rd->f_pos, v, len); if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN) client->status = Disconnected; return ret; } /** * p9_read_work - called when there is some data to be read from a transport * @work: container of work to be done * */ static void p9_read_work(struct work_struct *work) { int n, err; struct p9_conn *m; m = container_of(work, struct p9_conn, rq); if (m->err < 0) return; p9_debug(P9_DEBUG_TRANS, "start mux %p pos %d\n", m, m->rpos); if (!m->rbuf) { m->rbuf = m->tmp_buf; m->rpos = 0; m->rsize = 7; /* start by reading header */ } clear_bit(Rpending, &m->wsched); p9_debug(P9_DEBUG_TRANS, "read mux %p pos %d size: %d = %d\n", m, m->rpos, m->rsize, m->rsize-m->rpos); err = p9_fd_read(m->client, m->rbuf + m->rpos, m->rsize - m->rpos); p9_debug(P9_DEBUG_TRANS, "mux %p got %d bytes\n", m, err); if (err == -EAGAIN) { goto end_clear; } if (err <= 0) goto error; m->rpos += err; if ((!m->req) && (m->rpos == m->rsize)) { /* header read in */ u16 tag; p9_debug(P9_DEBUG_TRANS, "got new header\n"); n = le32_to_cpu(*(__le32 *) m->rbuf); /* read packet size */ if (n >= m->client->msize) { p9_debug(P9_DEBUG_ERROR, "requested packet size too big: %d\n", n); err = -EIO; goto error; } tag = le16_to_cpu(*(__le16 *) (m->rbuf+5)); /* read tag */ p9_debug(P9_DEBUG_TRANS, "mux %p pkt: size: %d bytes tag: %d\n", m, n, tag); m->req = p9_tag_lookup(m->client, tag); if (!m->req || (m->req->status != REQ_STATUS_SENT && m->req->status != REQ_STATUS_FLSH)) { p9_debug(P9_DEBUG_ERROR, "Unexpected packet tag %d\n", tag); err = -EIO; goto error; } if (m->req->rc == NULL) { m->req->rc = kmalloc(sizeof(struct p9_fcall) + m->client->msize, GFP_NOFS); if (!m->req->rc) { m->req = NULL; err = -ENOMEM; goto error; } } m->rbuf = (char *)m->req->rc + sizeof(struct p9_fcall); memcpy(m->rbuf, m->tmp_buf, m->rsize); m->rsize = n; } /* not an else because some packets (like clunk) have no payload */ if ((m->req) && (m->rpos == m->rsize)) { /* packet is read in */ p9_debug(P9_DEBUG_TRANS, "got new packet\n"); spin_lock(&m->client->lock); if (m->req->status != REQ_STATUS_ERROR) m->req->status = REQ_STATUS_RCVD; list_del(&m->req->req_list); spin_unlock(&m->client->lock); p9_client_cb(m->client, m->req); m->rbuf = NULL; m->rpos = 0; m->rsize = 0; m->req = NULL; } end_clear: clear_bit(Rworksched, &m->wsched); if (!list_empty(&m->req_list)) { if (test_and_clear_bit(Rpending, &m->wsched)) n = POLLIN; else n = p9_fd_poll(m->client, NULL); if ((n & POLLIN) && !test_and_set_bit(Rworksched, &m->wsched)) { p9_debug(P9_DEBUG_TRANS, "sched read work %p\n", m); schedule_work(&m->rq); } } return; error: p9_conn_cancel(m, err); clear_bit(Rworksched, &m->wsched); } /** * p9_fd_write - write to a socket * @client: client instance * @v: buffer to send data from * @len: size of send buffer * */ static int p9_fd_write(struct p9_client *client, void *v, int len) { int ret; mm_segment_t oldfs; struct p9_trans_fd *ts = NULL; if (client && client->status != Disconnected) ts = client->trans; if (!ts) return -EREMOTEIO; if (!(ts->wr->f_flags & O_NONBLOCK)) p9_debug(P9_DEBUG_ERROR, "blocking write ...\n"); oldfs = get_fs(); set_fs(get_ds()); /* The cast to a user pointer is valid due to the set_fs() */ ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos); set_fs(oldfs); if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN) client->status = Disconnected; return ret; } /** * p9_write_work - called when a transport can send some data * @work: container for work to be done * */ static void p9_write_work(struct work_struct *work) { int n, err; struct p9_conn *m; struct p9_req_t *req; m = container_of(work, struct p9_conn, wq); if (m->err < 0) { clear_bit(Wworksched, &m->wsched); return; } if (!m->wsize) { spin_lock(&m->client->lock); if (list_empty(&m->unsent_req_list)) { clear_bit(Wworksched, &m->wsched); spin_unlock(&m->client->lock); return; } req = list_entry(m->unsent_req_list.next, struct p9_req_t, req_list); req->status = REQ_STATUS_SENT; p9_debug(P9_DEBUG_TRANS, "move req %p\n", req); list_move_tail(&req->req_list, &m->req_list); m->wbuf = req->tc->sdata; m->wsize = req->tc->size; m->wpos = 0; spin_unlock(&m->client->lock); } p9_debug(P9_DEBUG_TRANS, "mux %p pos %d size %d\n", m, m->wpos, m->wsize); clear_bit(Wpending, &m->wsched); err = p9_fd_write(m->client, m->wbuf + m->wpos, m->wsize - m->wpos); p9_debug(P9_DEBUG_TRANS, "mux %p sent %d bytes\n", m, err); if (err == -EAGAIN) goto end_clear; if (err < 0) goto error; else if (err == 0) { err = -EREMOTEIO; goto error; } m->wpos += err; if (m->wpos == m->wsize) m->wpos = m->wsize = 0; end_clear: clear_bit(Wworksched, &m->wsched); if (m->wsize || !list_empty(&m->unsent_req_list)) { if (test_and_clear_bit(Wpending, &m->wsched)) n = POLLOUT; else n = p9_fd_poll(m->client, NULL); if ((n & POLLOUT) && !test_and_set_bit(Wworksched, &m->wsched)) { p9_debug(P9_DEBUG_TRANS, "sched write work %p\n", m); schedule_work(&m->wq); } } return; error: p9_conn_cancel(m, err); clear_bit(Wworksched, &m->wsched); } static int p9_pollwake(wait_queue_t *wait, unsigned int mode, int sync, void *key) { struct p9_poll_wait *pwait = container_of(wait, struct p9_poll_wait, wait); struct p9_conn *m = pwait->conn; unsigned long flags; spin_lock_irqsave(&p9_poll_lock, flags); if (list_empty(&m->poll_pending_link)) list_add_tail(&m->poll_pending_link, &p9_poll_pending_list); spin_unlock_irqrestore(&p9_poll_lock, flags); schedule_work(&p9_poll_work); return 1; } /** * p9_pollwait - add poll task to the wait queue * @filp: file pointer being polled * @wait_address: wait_q to block on * @p: poll state * * called by files poll operation to add v9fs-poll task to files wait queue */ static void p9_pollwait(struct file *filp, wait_queue_head_t *wait_address, poll_table *p) { struct p9_conn *m = container_of(p, struct p9_conn, pt); struct p9_poll_wait *pwait = NULL; int i; for (i = 0; i < ARRAY_SIZE(m->poll_wait); i++) { if (m->poll_wait[i].wait_addr == NULL) { pwait = &m->poll_wait[i]; break; } } if (!pwait) { p9_debug(P9_DEBUG_ERROR, "not enough wait_address slots\n"); return; } pwait->conn = m; pwait->wait_addr = wait_address; init_waitqueue_func_entry(&pwait->wait, p9_pollwake); add_wait_queue(wait_address, &pwait->wait); } /** * p9_conn_create - allocate and initialize the per-session mux data * @client: client instance * * Note: Creates the polling task if this is the first session. */ static struct p9_conn *p9_conn_create(struct p9_client *client) { int n; struct p9_conn *m; p9_debug(P9_DEBUG_TRANS, "client %p msize %d\n", client, client->msize); m = kzalloc(sizeof(struct p9_conn), GFP_KERNEL); if (!m) return ERR_PTR(-ENOMEM); INIT_LIST_HEAD(&m->mux_list); m->client = client; INIT_LIST_HEAD(&m->req_list); INIT_LIST_HEAD(&m->unsent_req_list); INIT_WORK(&m->rq, p9_read_work); INIT_WORK(&m->wq, p9_write_work); INIT_LIST_HEAD(&m->poll_pending_link); init_poll_funcptr(&m->pt, p9_pollwait); n = p9_fd_poll(client, &m->pt); if (n & POLLIN) { p9_debug(P9_DEBUG_TRANS, "mux %p can read\n", m); set_bit(Rpending, &m->wsched); } if (n & POLLOUT) { p9_debug(P9_DEBUG_TRANS, "mux %p can write\n", m); set_bit(Wpending, &m->wsched); } return m; } /** * p9_poll_mux - polls a mux and schedules read or write works if necessary * @m: connection to poll * */ static void p9_poll_mux(struct p9_conn *m) { int n; if (m->err < 0) return; n = p9_fd_poll(m->client, NULL); if (n < 0 || n & (POLLERR | POLLHUP | POLLNVAL)) { p9_debug(P9_DEBUG_TRANS, "error mux %p err %d\n", m, n); if (n >= 0) n = -ECONNRESET; p9_conn_cancel(m, n); } if (n & POLLIN) { set_bit(Rpending, &m->wsched); p9_debug(P9_DEBUG_TRANS, "mux %p can read\n", m); if (!test_and_set_bit(Rworksched, &m->wsched)) { p9_debug(P9_DEBUG_TRANS, "sched read work %p\n", m); schedule_work(&m->rq); } } if (n & POLLOUT) { set_bit(Wpending, &m->wsched); p9_debug(P9_DEBUG_TRANS, "mux %p can write\n", m); if ((m->wsize || !list_empty(&m->unsent_req_list)) && !test_and_set_bit(Wworksched, &m->wsched)) { p9_debug(P9_DEBUG_TRANS, "sched write work %p\n", m); schedule_work(&m->wq); } } } /** * p9_fd_request - send 9P request * The function can sleep until the request is scheduled for sending. * The function can be interrupted. Return from the function is not * a guarantee that the request is sent successfully. * * @client: client instance * @req: request to be sent * */ static int p9_fd_request(struct p9_client *client, struct p9_req_t *req) { int n; struct p9_trans_fd *ts = client->trans; struct p9_conn *m = ts->conn; p9_debug(P9_DEBUG_TRANS, "mux %p task %p tcall %p id %d\n", m, current, req->tc, req->tc->id); if (m->err < 0) return m->err; spin_lock(&client->lock); req->status = REQ_STATUS_UNSENT; list_add_tail(&req->req_list, &m->unsent_req_list); spin_unlock(&client->lock); if (test_and_clear_bit(Wpending, &m->wsched)) n = POLLOUT; else n = p9_fd_poll(m->client, NULL); if (n & POLLOUT && !test_and_set_bit(Wworksched, &m->wsched)) schedule_work(&m->wq); return 0; } static int p9_fd_cancel(struct p9_client *client, struct p9_req_t *req) { int ret = 1; p9_debug(P9_DEBUG_TRANS, "client %p req %p\n", client, req); spin_lock(&client->lock); if (req->status == REQ_STATUS_UNSENT) { list_del(&req->req_list); req->status = REQ_STATUS_FLSHD; ret = 0; } else if (req->status == REQ_STATUS_SENT) req->status = REQ_STATUS_FLSH; spin_unlock(&client->lock); return ret; } /** * parse_opts - parse mount options into p9_fd_opts structure * @params: options string passed from mount * @opts: fd transport-specific structure to parse options into * * Returns 0 upon success, -ERRNO upon failure */ static int parse_opts(char *params, struct p9_fd_opts *opts) { char *p; substring_t args[MAX_OPT_ARGS]; int option; char *options, *tmp_options; opts->port = P9_PORT; opts->rfd = ~0; opts->wfd = ~0; if (!params) return 0; tmp_options = kstrdup(params, GFP_KERNEL); if (!tmp_options) { p9_debug(P9_DEBUG_ERROR, "failed to allocate copy of option string\n"); return -ENOMEM; } options = tmp_options; while ((p = strsep(&options, ",")) != NULL) { int token; int r; if (!*p) continue; token = match_token(p, tokens, args); if (token != Opt_err) { r = match_int(&args[0], &option); if (r < 0) { p9_debug(P9_DEBUG_ERROR, "integer field, but no integer?\n"); continue; } } switch (token) { case Opt_port: opts->port = option; break; case Opt_rfdno: opts->rfd = option; break; case Opt_wfdno: opts->wfd = option; break; default: continue; } } kfree(tmp_options); return 0; } static int p9_fd_open(struct p9_client *client, int rfd, int wfd) { struct p9_trans_fd *ts = kmalloc(sizeof(struct p9_trans_fd), GFP_KERNEL); if (!ts) return -ENOMEM; ts->rd = fget(rfd); ts->wr = fget(wfd); if (!ts->rd || !ts->wr) { if (ts->rd) fput(ts->rd); if (ts->wr) fput(ts->wr); kfree(ts); return -EIO; } client->trans = ts; client->status = Connected; return 0; } static int p9_socket_open(struct p9_client *client, struct socket *csocket) { struct p9_trans_fd *p; struct file *file; int ret; p = kmalloc(sizeof(struct p9_trans_fd), GFP_KERNEL); if (!p) return -ENOMEM; csocket->sk->sk_allocation = GFP_NOIO; file = sock_alloc_file(csocket, 0, NULL); if (IS_ERR(file)) { pr_err("%s (%d): failed to map fd\n", __func__, task_pid_nr(current)); sock_release(csocket); kfree(p); return PTR_ERR(file); } get_file(file); p->wr = p->rd = file; client->trans = p; client->status = Connected; p->rd->f_flags |= O_NONBLOCK; p->conn = p9_conn_create(client); if (IS_ERR(p->conn)) { ret = PTR_ERR(p->conn); p->conn = NULL; kfree(p); sockfd_put(csocket); sockfd_put(csocket); return ret; } return 0; } /** * p9_mux_destroy - cancels all pending requests and frees mux resources * @m: mux to destroy * */ static void p9_conn_destroy(struct p9_conn *m) { p9_debug(P9_DEBUG_TRANS, "mux %p prev %p next %p\n", m, m->mux_list.prev, m->mux_list.next); p9_mux_poll_stop(m); cancel_work_sync(&m->rq); cancel_work_sync(&m->wq); p9_conn_cancel(m, -ECONNRESET); m->client = NULL; kfree(m); } /** * p9_fd_close - shutdown file descriptor transport * @client: client instance * */ static void p9_fd_close(struct p9_client *client) { struct p9_trans_fd *ts; if (!client) return; ts = client->trans; if (!ts) return; client->status = Disconnected; p9_conn_destroy(ts->conn); if (ts->rd) fput(ts->rd); if (ts->wr) fput(ts->wr); kfree(ts); } /* * stolen from NFS - maybe should be made a generic function? */ static inline int valid_ipaddr4(const char *buf) { int rc, count, in[4]; rc = sscanf(buf, "%d.%d.%d.%d", &in[0], &in[1], &in[2], &in[3]); if (rc != 4) return -EINVAL; for (count = 0; count < 4; count++) { if (in[count] > 255) return -EINVAL; } return 0; } static int p9_fd_create_tcp(struct p9_client *client, const char *addr, char *args) { int err; struct socket *csocket; struct sockaddr_in sin_server; struct p9_fd_opts opts; err = parse_opts(args, &opts); if (err < 0) return err; if (valid_ipaddr4(addr) < 0) return -EINVAL; csocket = NULL; sin_server.sin_family = AF_INET; sin_server.sin_addr.s_addr = in_aton(addr); sin_server.sin_port = htons(opts.port); err = __sock_create(read_pnet(&current->nsproxy->net_ns), PF_INET, SOCK_STREAM, IPPROTO_TCP, &csocket, 1); if (err) { pr_err("%s (%d): problem creating socket\n", __func__, task_pid_nr(current)); return err; } err = csocket->ops->connect(csocket, (struct sockaddr *)&sin_server, sizeof(struct sockaddr_in), 0); if (err < 0) { pr_err("%s (%d): problem connecting socket to %s\n", __func__, task_pid_nr(current), addr); sock_release(csocket); return err; } return p9_socket_open(client, csocket); } static int p9_fd_create_unix(struct p9_client *client, const char *addr, char *args) { int err; struct socket *csocket; struct sockaddr_un sun_server; csocket = NULL; if (strlen(addr) >= UNIX_PATH_MAX) { pr_err("%s (%d): address too long: %s\n", __func__, task_pid_nr(current), addr); return -ENAMETOOLONG; } sun_server.sun_family = PF_UNIX; strcpy(sun_server.sun_path, addr); err = __sock_create(read_pnet(&current->nsproxy->net_ns), PF_UNIX, SOCK_STREAM, 0, &csocket, 1); if (err < 0) { pr_err("%s (%d): problem creating socket\n", __func__, task_pid_nr(current)); return err; } err = csocket->ops->connect(csocket, (struct sockaddr *)&sun_server, sizeof(struct sockaddr_un) - 1, 0); if (err < 0) { pr_err("%s (%d): problem connecting socket: %s: %d\n", __func__, task_pid_nr(current), addr, err); sock_release(csocket); return err; } return p9_socket_open(client, csocket); } static int p9_fd_create(struct p9_client *client, const char *addr, char *args) { int err; struct p9_fd_opts opts; struct p9_trans_fd *p; parse_opts(args, &opts); if (opts.rfd == ~0 || opts.wfd == ~0) { pr_err("Insufficient options for proto=fd\n"); return -ENOPROTOOPT; } err = p9_fd_open(client, opts.rfd, opts.wfd); if (err < 0) return err; p = (struct p9_trans_fd *) client->trans; p->conn = p9_conn_create(client); if (IS_ERR(p->conn)) { err = PTR_ERR(p->conn); p->conn = NULL; fput(p->rd); fput(p->wr); return err; } return 0; } static struct p9_trans_module p9_tcp_trans = { .name = "tcp", .maxsize = MAX_SOCK_BUF, .def = 1, .create = p9_fd_create_tcp, .close = p9_fd_close, .request = p9_fd_request, .cancel = p9_fd_cancel, .owner = THIS_MODULE, }; static struct p9_trans_module p9_unix_trans = { .name = "unix", .maxsize = MAX_SOCK_BUF, .def = 0, .create = p9_fd_create_unix, .close = p9_fd_close, .request = p9_fd_request, .cancel = p9_fd_cancel, .owner = THIS_MODULE, }; static struct p9_trans_module p9_fd_trans = { .name = "fd", .maxsize = MAX_SOCK_BUF, .def = 0, .create = p9_fd_create, .close = p9_fd_close, .request = p9_fd_request, .cancel = p9_fd_cancel, .owner = THIS_MODULE, }; /** * p9_poll_proc - poll worker thread * @a: thread state and arguments * * polls all v9fs transports for new events and queues the appropriate * work to the work queue * */ static void p9_poll_workfn(struct work_struct *work) { unsigned long flags; p9_debug(P9_DEBUG_TRANS, "start %p\n", current); spin_lock_irqsave(&p9_poll_lock, flags); while (!list_empty(&p9_poll_pending_list)) { struct p9_conn *conn = list_first_entry(&p9_poll_pending_list, struct p9_conn, poll_pending_link); list_del_init(&conn->poll_pending_link); spin_unlock_irqrestore(&p9_poll_lock, flags); p9_poll_mux(conn); spin_lock_irqsave(&p9_poll_lock, flags); } spin_unlock_irqrestore(&p9_poll_lock, flags); p9_debug(P9_DEBUG_TRANS, "finish\n"); } int p9_trans_fd_init(void) { v9fs_register_trans(&p9_tcp_trans); v9fs_register_trans(&p9_unix_trans); v9fs_register_trans(&p9_fd_trans); return 0; } void p9_trans_fd_exit(void) { flush_work(&p9_poll_work); v9fs_unregister_trans(&p9_tcp_trans); v9fs_unregister_trans(&p9_unix_trans); v9fs_unregister_trans(&p9_fd_trans); }
gpl-2.0
MotoG3/android_kernel_motorola_msm8916
drivers/pci/hotplug/cpqphp_ctrl.c
2287
76931
/* * Compaq Hot Plug Controller Driver * * Copyright (C) 1995,2001 Compaq Computer Corporation * Copyright (C) 2001 Greg Kroah-Hartman (greg@kroah.com) * Copyright (C) 2001 IBM Corp. * * All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or (at * your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * Send feedback to <greg@kroah.com> * */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/workqueue.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/wait.h> #include <linux/pci.h> #include <linux/pci_hotplug.h> #include <linux/kthread.h> #include "cpqphp.h" static u32 configure_new_device(struct controller* ctrl, struct pci_func *func, u8 behind_bridge, struct resource_lists *resources); static int configure_new_function(struct controller* ctrl, struct pci_func *func, u8 behind_bridge, struct resource_lists *resources); static void interrupt_event_handler(struct controller *ctrl); static struct task_struct *cpqhp_event_thread; static unsigned long pushbutton_pending; /* = 0 */ /* delay is in jiffies to wait for */ static void long_delay(int delay) { /* * XXX(hch): if someone is bored please convert all callers * to call msleep_interruptible directly. They really want * to specify timeouts in natural units and spend a lot of * effort converting them to jiffies.. */ msleep_interruptible(jiffies_to_msecs(delay)); } /* FIXME: The following line needs to be somewhere else... */ #define WRONG_BUS_FREQUENCY 0x07 static u8 handle_switch_change(u8 change, struct controller * ctrl) { int hp_slot; u8 rc = 0; u16 temp_word; struct pci_func *func; struct event_info *taskInfo; if (!change) return 0; /* Switch Change */ dbg("cpqsbd: Switch interrupt received.\n"); for (hp_slot = 0; hp_slot < 6; hp_slot++) { if (change & (0x1L << hp_slot)) { /* * this one changed. */ func = cpqhp_slot_find(ctrl->bus, (hp_slot + ctrl->slot_device_offset), 0); /* this is the structure that tells the worker thread * what to do */ taskInfo = &(ctrl->event_queue[ctrl->next_event]); ctrl->next_event = (ctrl->next_event + 1) % 10; taskInfo->hp_slot = hp_slot; rc++; temp_word = ctrl->ctrl_int_comp >> 16; func->presence_save = (temp_word >> hp_slot) & 0x01; func->presence_save |= (temp_word >> (hp_slot + 7)) & 0x02; if (ctrl->ctrl_int_comp & (0x1L << hp_slot)) { /* * Switch opened */ func->switch_save = 0; taskInfo->event_type = INT_SWITCH_OPEN; } else { /* * Switch closed */ func->switch_save = 0x10; taskInfo->event_type = INT_SWITCH_CLOSE; } } } return rc; } /** * cpqhp_find_slot - find the struct slot of given device * @ctrl: scan lots of this controller * @device: the device id to find */ static struct slot *cpqhp_find_slot(struct controller *ctrl, u8 device) { struct slot *slot = ctrl->slot; while (slot && (slot->device != device)) slot = slot->next; return slot; } static u8 handle_presence_change(u16 change, struct controller * ctrl) { int hp_slot; u8 rc = 0; u8 temp_byte; u16 temp_word; struct pci_func *func; struct event_info *taskInfo; struct slot *p_slot; if (!change) return 0; /* * Presence Change */ dbg("cpqsbd: Presence/Notify input change.\n"); dbg(" Changed bits are 0x%4.4x\n", change ); for (hp_slot = 0; hp_slot < 6; hp_slot++) { if (change & (0x0101 << hp_slot)) { /* * this one changed. */ func = cpqhp_slot_find(ctrl->bus, (hp_slot + ctrl->slot_device_offset), 0); taskInfo = &(ctrl->event_queue[ctrl->next_event]); ctrl->next_event = (ctrl->next_event + 1) % 10; taskInfo->hp_slot = hp_slot; rc++; p_slot = cpqhp_find_slot(ctrl, hp_slot + (readb(ctrl->hpc_reg + SLOT_MASK) >> 4)); if (!p_slot) return 0; /* If the switch closed, must be a button * If not in button mode, nevermind */ if (func->switch_save && (ctrl->push_button == 1)) { temp_word = ctrl->ctrl_int_comp >> 16; temp_byte = (temp_word >> hp_slot) & 0x01; temp_byte |= (temp_word >> (hp_slot + 7)) & 0x02; if (temp_byte != func->presence_save) { /* * button Pressed (doesn't do anything) */ dbg("hp_slot %d button pressed\n", hp_slot); taskInfo->event_type = INT_BUTTON_PRESS; } else { /* * button Released - TAKE ACTION!!!! */ dbg("hp_slot %d button released\n", hp_slot); taskInfo->event_type = INT_BUTTON_RELEASE; /* Cancel if we are still blinking */ if ((p_slot->state == BLINKINGON_STATE) || (p_slot->state == BLINKINGOFF_STATE)) { taskInfo->event_type = INT_BUTTON_CANCEL; dbg("hp_slot %d button cancel\n", hp_slot); } else if ((p_slot->state == POWERON_STATE) || (p_slot->state == POWEROFF_STATE)) { /* info(msg_button_ignore, p_slot->number); */ taskInfo->event_type = INT_BUTTON_IGNORE; dbg("hp_slot %d button ignore\n", hp_slot); } } } else { /* Switch is open, assume a presence change * Save the presence state */ temp_word = ctrl->ctrl_int_comp >> 16; func->presence_save = (temp_word >> hp_slot) & 0x01; func->presence_save |= (temp_word >> (hp_slot + 7)) & 0x02; if ((!(ctrl->ctrl_int_comp & (0x010000 << hp_slot))) || (!(ctrl->ctrl_int_comp & (0x01000000 << hp_slot)))) { /* Present */ taskInfo->event_type = INT_PRESENCE_ON; } else { /* Not Present */ taskInfo->event_type = INT_PRESENCE_OFF; } } } } return rc; } static u8 handle_power_fault(u8 change, struct controller * ctrl) { int hp_slot; u8 rc = 0; struct pci_func *func; struct event_info *taskInfo; if (!change) return 0; /* * power fault */ info("power fault interrupt\n"); for (hp_slot = 0; hp_slot < 6; hp_slot++) { if (change & (0x01 << hp_slot)) { /* * this one changed. */ func = cpqhp_slot_find(ctrl->bus, (hp_slot + ctrl->slot_device_offset), 0); taskInfo = &(ctrl->event_queue[ctrl->next_event]); ctrl->next_event = (ctrl->next_event + 1) % 10; taskInfo->hp_slot = hp_slot; rc++; if (ctrl->ctrl_int_comp & (0x00000100 << hp_slot)) { /* * power fault Cleared */ func->status = 0x00; taskInfo->event_type = INT_POWER_FAULT_CLEAR; } else { /* * power fault */ taskInfo->event_type = INT_POWER_FAULT; if (ctrl->rev < 4) { amber_LED_on (ctrl, hp_slot); green_LED_off (ctrl, hp_slot); set_SOGO (ctrl); /* this is a fatal condition, we want * to crash the machine to protect from * data corruption. simulated_NMI * shouldn't ever return */ /* FIXME simulated_NMI(hp_slot, ctrl); */ /* The following code causes a software * crash just in case simulated_NMI did * return */ /*FIXME panic(msg_power_fault); */ } else { /* set power fault status for this board */ func->status = 0xFF; info("power fault bit %x set\n", hp_slot); } } } } return rc; } /** * sort_by_size - sort nodes on the list by their length, smallest first. * @head: list to sort */ static int sort_by_size(struct pci_resource **head) { struct pci_resource *current_res; struct pci_resource *next_res; int out_of_order = 1; if (!(*head)) return 1; if (!((*head)->next)) return 0; while (out_of_order) { out_of_order = 0; /* Special case for swapping list head */ if (((*head)->next) && ((*head)->length > (*head)->next->length)) { out_of_order++; current_res = *head; *head = (*head)->next; current_res->next = (*head)->next; (*head)->next = current_res; } current_res = *head; while (current_res->next && current_res->next->next) { if (current_res->next->length > current_res->next->next->length) { out_of_order++; next_res = current_res->next; current_res->next = current_res->next->next; current_res = current_res->next; next_res->next = current_res->next; current_res->next = next_res; } else current_res = current_res->next; } } /* End of out_of_order loop */ return 0; } /** * sort_by_max_size - sort nodes on the list by their length, largest first. * @head: list to sort */ static int sort_by_max_size(struct pci_resource **head) { struct pci_resource *current_res; struct pci_resource *next_res; int out_of_order = 1; if (!(*head)) return 1; if (!((*head)->next)) return 0; while (out_of_order) { out_of_order = 0; /* Special case for swapping list head */ if (((*head)->next) && ((*head)->length < (*head)->next->length)) { out_of_order++; current_res = *head; *head = (*head)->next; current_res->next = (*head)->next; (*head)->next = current_res; } current_res = *head; while (current_res->next && current_res->next->next) { if (current_res->next->length < current_res->next->next->length) { out_of_order++; next_res = current_res->next; current_res->next = current_res->next->next; current_res = current_res->next; next_res->next = current_res->next; current_res->next = next_res; } else current_res = current_res->next; } } /* End of out_of_order loop */ return 0; } /** * do_pre_bridge_resource_split - find node of resources that are unused * @head: new list head * @orig_head: original list head * @alignment: max node size (?) */ static struct pci_resource *do_pre_bridge_resource_split(struct pci_resource **head, struct pci_resource **orig_head, u32 alignment) { struct pci_resource *prevnode = NULL; struct pci_resource *node; struct pci_resource *split_node; u32 rc; u32 temp_dword; dbg("do_pre_bridge_resource_split\n"); if (!(*head) || !(*orig_head)) return NULL; rc = cpqhp_resource_sort_and_combine(head); if (rc) return NULL; if ((*head)->base != (*orig_head)->base) return NULL; if ((*head)->length == (*orig_head)->length) return NULL; /* If we got here, there the bridge requires some of the resource, but * we may be able to split some off of the front */ node = *head; if (node->length & (alignment -1)) { /* this one isn't an aligned length, so we'll make a new entry * and split it up. */ split_node = kmalloc(sizeof(*split_node), GFP_KERNEL); if (!split_node) return NULL; temp_dword = (node->length | (alignment-1)) + 1 - alignment; split_node->base = node->base; split_node->length = temp_dword; node->length -= temp_dword; node->base += split_node->length; /* Put it in the list */ *head = split_node; split_node->next = node; } if (node->length < alignment) return NULL; /* Now unlink it */ if (*head == node) { *head = node->next; } else { prevnode = *head; while (prevnode->next != node) prevnode = prevnode->next; prevnode->next = node->next; } node->next = NULL; return node; } /** * do_bridge_resource_split - find one node of resources that aren't in use * @head: list head * @alignment: max node size (?) */ static struct pci_resource *do_bridge_resource_split(struct pci_resource **head, u32 alignment) { struct pci_resource *prevnode = NULL; struct pci_resource *node; u32 rc; u32 temp_dword; rc = cpqhp_resource_sort_and_combine(head); if (rc) return NULL; node = *head; while (node->next) { prevnode = node; node = node->next; kfree(prevnode); } if (node->length < alignment) goto error; if (node->base & (alignment - 1)) { /* Short circuit if adjusted size is too small */ temp_dword = (node->base | (alignment-1)) + 1; if ((node->length - (temp_dword - node->base)) < alignment) goto error; node->length -= (temp_dword - node->base); node->base = temp_dword; } if (node->length & (alignment - 1)) /* There's stuff in use after this node */ goto error; return node; error: kfree(node); return NULL; } /** * get_io_resource - find first node of given size not in ISA aliasing window. * @head: list to search * @size: size of node to find, must be a power of two. * * Description: This function sorts the resource list by size and then returns * returns the first node of "size" length that is not in the ISA aliasing * window. If it finds a node larger than "size" it will split it up. */ static struct pci_resource *get_io_resource(struct pci_resource **head, u32 size) { struct pci_resource *prevnode; struct pci_resource *node; struct pci_resource *split_node; u32 temp_dword; if (!(*head)) return NULL; if (cpqhp_resource_sort_and_combine(head)) return NULL; if (sort_by_size(head)) return NULL; for (node = *head; node; node = node->next) { if (node->length < size) continue; if (node->base & (size - 1)) { /* this one isn't base aligned properly * so we'll make a new entry and split it up */ temp_dword = (node->base | (size-1)) + 1; /* Short circuit if adjusted size is too small */ if ((node->length - (temp_dword - node->base)) < size) continue; split_node = kmalloc(sizeof(*split_node), GFP_KERNEL); if (!split_node) return NULL; split_node->base = node->base; split_node->length = temp_dword - node->base; node->base = temp_dword; node->length -= split_node->length; /* Put it in the list */ split_node->next = node->next; node->next = split_node; } /* End of non-aligned base */ /* Don't need to check if too small since we already did */ if (node->length > size) { /* this one is longer than we need * so we'll make a new entry and split it up */ split_node = kmalloc(sizeof(*split_node), GFP_KERNEL); if (!split_node) return NULL; split_node->base = node->base + size; split_node->length = node->length - size; node->length = size; /* Put it in the list */ split_node->next = node->next; node->next = split_node; } /* End of too big on top end */ /* For IO make sure it's not in the ISA aliasing space */ if (node->base & 0x300L) continue; /* If we got here, then it is the right size * Now take it out of the list and break */ if (*head == node) { *head = node->next; } else { prevnode = *head; while (prevnode->next != node) prevnode = prevnode->next; prevnode->next = node->next; } node->next = NULL; break; } return node; } /** * get_max_resource - get largest node which has at least the given size. * @head: the list to search the node in * @size: the minimum size of the node to find * * Description: Gets the largest node that is at least "size" big from the * list pointed to by head. It aligns the node on top and bottom * to "size" alignment before returning it. */ static struct pci_resource *get_max_resource(struct pci_resource **head, u32 size) { struct pci_resource *max; struct pci_resource *temp; struct pci_resource *split_node; u32 temp_dword; if (cpqhp_resource_sort_and_combine(head)) return NULL; if (sort_by_max_size(head)) return NULL; for (max = *head; max; max = max->next) { /* If not big enough we could probably just bail, * instead we'll continue to the next. */ if (max->length < size) continue; if (max->base & (size - 1)) { /* this one isn't base aligned properly * so we'll make a new entry and split it up */ temp_dword = (max->base | (size-1)) + 1; /* Short circuit if adjusted size is too small */ if ((max->length - (temp_dword - max->base)) < size) continue; split_node = kmalloc(sizeof(*split_node), GFP_KERNEL); if (!split_node) return NULL; split_node->base = max->base; split_node->length = temp_dword - max->base; max->base = temp_dword; max->length -= split_node->length; split_node->next = max->next; max->next = split_node; } if ((max->base + max->length) & (size - 1)) { /* this one isn't end aligned properly at the top * so we'll make a new entry and split it up */ split_node = kmalloc(sizeof(*split_node), GFP_KERNEL); if (!split_node) return NULL; temp_dword = ((max->base + max->length) & ~(size - 1)); split_node->base = temp_dword; split_node->length = max->length + max->base - split_node->base; max->length -= split_node->length; split_node->next = max->next; max->next = split_node; } /* Make sure it didn't shrink too much when we aligned it */ if (max->length < size) continue; /* Now take it out of the list */ temp = *head; if (temp == max) { *head = max->next; } else { while (temp && temp->next != max) { temp = temp->next; } temp->next = max->next; } max->next = NULL; break; } return max; } /** * get_resource - find resource of given size and split up larger ones. * @head: the list to search for resources * @size: the size limit to use * * Description: This function sorts the resource list by size and then * returns the first node of "size" length. If it finds a node * larger than "size" it will split it up. * * size must be a power of two. */ static struct pci_resource *get_resource(struct pci_resource **head, u32 size) { struct pci_resource *prevnode; struct pci_resource *node; struct pci_resource *split_node; u32 temp_dword; if (cpqhp_resource_sort_and_combine(head)) return NULL; if (sort_by_size(head)) return NULL; for (node = *head; node; node = node->next) { dbg("%s: req_size =%x node=%p, base=%x, length=%x\n", __func__, size, node, node->base, node->length); if (node->length < size) continue; if (node->base & (size - 1)) { dbg("%s: not aligned\n", __func__); /* this one isn't base aligned properly * so we'll make a new entry and split it up */ temp_dword = (node->base | (size-1)) + 1; /* Short circuit if adjusted size is too small */ if ((node->length - (temp_dword - node->base)) < size) continue; split_node = kmalloc(sizeof(*split_node), GFP_KERNEL); if (!split_node) return NULL; split_node->base = node->base; split_node->length = temp_dword - node->base; node->base = temp_dword; node->length -= split_node->length; split_node->next = node->next; node->next = split_node; } /* End of non-aligned base */ /* Don't need to check if too small since we already did */ if (node->length > size) { dbg("%s: too big\n", __func__); /* this one is longer than we need * so we'll make a new entry and split it up */ split_node = kmalloc(sizeof(*split_node), GFP_KERNEL); if (!split_node) return NULL; split_node->base = node->base + size; split_node->length = node->length - size; node->length = size; /* Put it in the list */ split_node->next = node->next; node->next = split_node; } /* End of too big on top end */ dbg("%s: got one!!!\n", __func__); /* If we got here, then it is the right size * Now take it out of the list */ if (*head == node) { *head = node->next; } else { prevnode = *head; while (prevnode->next != node) prevnode = prevnode->next; prevnode->next = node->next; } node->next = NULL; break; } return node; } /** * cpqhp_resource_sort_and_combine - sort nodes by base addresses and clean up * @head: the list to sort and clean up * * Description: Sorts all of the nodes in the list in ascending order by * their base addresses. Also does garbage collection by * combining adjacent nodes. * * Returns %0 if success. */ int cpqhp_resource_sort_and_combine(struct pci_resource **head) { struct pci_resource *node1; struct pci_resource *node2; int out_of_order = 1; dbg("%s: head = %p, *head = %p\n", __func__, head, *head); if (!(*head)) return 1; dbg("*head->next = %p\n",(*head)->next); if (!(*head)->next) return 0; /* only one item on the list, already sorted! */ dbg("*head->base = 0x%x\n",(*head)->base); dbg("*head->next->base = 0x%x\n",(*head)->next->base); while (out_of_order) { out_of_order = 0; /* Special case for swapping list head */ if (((*head)->next) && ((*head)->base > (*head)->next->base)) { node1 = *head; (*head) = (*head)->next; node1->next = (*head)->next; (*head)->next = node1; out_of_order++; } node1 = (*head); while (node1->next && node1->next->next) { if (node1->next->base > node1->next->next->base) { out_of_order++; node2 = node1->next; node1->next = node1->next->next; node1 = node1->next; node2->next = node1->next; node1->next = node2; } else node1 = node1->next; } } /* End of out_of_order loop */ node1 = *head; while (node1 && node1->next) { if ((node1->base + node1->length) == node1->next->base) { /* Combine */ dbg("8..\n"); node1->length += node1->next->length; node2 = node1->next; node1->next = node1->next->next; kfree(node2); } else node1 = node1->next; } return 0; } irqreturn_t cpqhp_ctrl_intr(int IRQ, void *data) { struct controller *ctrl = data; u8 schedule_flag = 0; u8 reset; u16 misc; u32 Diff; u32 temp_dword; misc = readw(ctrl->hpc_reg + MISC); /* * Check to see if it was our interrupt */ if (!(misc & 0x000C)) { return IRQ_NONE; } if (misc & 0x0004) { /* * Serial Output interrupt Pending */ /* Clear the interrupt */ misc |= 0x0004; writew(misc, ctrl->hpc_reg + MISC); /* Read to clear posted writes */ misc = readw(ctrl->hpc_reg + MISC); dbg ("%s - waking up\n", __func__); wake_up_interruptible(&ctrl->queue); } if (misc & 0x0008) { /* General-interrupt-input interrupt Pending */ Diff = readl(ctrl->hpc_reg + INT_INPUT_CLEAR) ^ ctrl->ctrl_int_comp; ctrl->ctrl_int_comp = readl(ctrl->hpc_reg + INT_INPUT_CLEAR); /* Clear the interrupt */ writel(Diff, ctrl->hpc_reg + INT_INPUT_CLEAR); /* Read it back to clear any posted writes */ temp_dword = readl(ctrl->hpc_reg + INT_INPUT_CLEAR); if (!Diff) /* Clear all interrupts */ writel(0xFFFFFFFF, ctrl->hpc_reg + INT_INPUT_CLEAR); schedule_flag += handle_switch_change((u8)(Diff & 0xFFL), ctrl); schedule_flag += handle_presence_change((u16)((Diff & 0xFFFF0000L) >> 16), ctrl); schedule_flag += handle_power_fault((u8)((Diff & 0xFF00L) >> 8), ctrl); } reset = readb(ctrl->hpc_reg + RESET_FREQ_MODE); if (reset & 0x40) { /* Bus reset has completed */ reset &= 0xCF; writeb(reset, ctrl->hpc_reg + RESET_FREQ_MODE); reset = readb(ctrl->hpc_reg + RESET_FREQ_MODE); wake_up_interruptible(&ctrl->queue); } if (schedule_flag) { wake_up_process(cpqhp_event_thread); dbg("Waking even thread"); } return IRQ_HANDLED; } /** * cpqhp_slot_create - Creates a node and adds it to the proper bus. * @busnumber: bus where new node is to be located * * Returns pointer to the new node or %NULL if unsuccessful. */ struct pci_func *cpqhp_slot_create(u8 busnumber) { struct pci_func *new_slot; struct pci_func *next; new_slot = kzalloc(sizeof(*new_slot), GFP_KERNEL); if (new_slot == NULL) return new_slot; new_slot->next = NULL; new_slot->configured = 1; if (cpqhp_slot_list[busnumber] == NULL) { cpqhp_slot_list[busnumber] = new_slot; } else { next = cpqhp_slot_list[busnumber]; while (next->next != NULL) next = next->next; next->next = new_slot; } return new_slot; } /** * slot_remove - Removes a node from the linked list of slots. * @old_slot: slot to remove * * Returns %0 if successful, !0 otherwise. */ static int slot_remove(struct pci_func * old_slot) { struct pci_func *next; if (old_slot == NULL) return 1; next = cpqhp_slot_list[old_slot->bus]; if (next == NULL) return 1; if (next == old_slot) { cpqhp_slot_list[old_slot->bus] = old_slot->next; cpqhp_destroy_board_resources(old_slot); kfree(old_slot); return 0; } while ((next->next != old_slot) && (next->next != NULL)) next = next->next; if (next->next == old_slot) { next->next = old_slot->next; cpqhp_destroy_board_resources(old_slot); kfree(old_slot); return 0; } else return 2; } /** * bridge_slot_remove - Removes a node from the linked list of slots. * @bridge: bridge to remove * * Returns %0 if successful, !0 otherwise. */ static int bridge_slot_remove(struct pci_func *bridge) { u8 subordinateBus, secondaryBus; u8 tempBus; struct pci_func *next; secondaryBus = (bridge->config_space[0x06] >> 8) & 0xFF; subordinateBus = (bridge->config_space[0x06] >> 16) & 0xFF; for (tempBus = secondaryBus; tempBus <= subordinateBus; tempBus++) { next = cpqhp_slot_list[tempBus]; while (!slot_remove(next)) next = cpqhp_slot_list[tempBus]; } next = cpqhp_slot_list[bridge->bus]; if (next == NULL) return 1; if (next == bridge) { cpqhp_slot_list[bridge->bus] = bridge->next; goto out; } while ((next->next != bridge) && (next->next != NULL)) next = next->next; if (next->next != bridge) return 2; next->next = bridge->next; out: kfree(bridge); return 0; } /** * cpqhp_slot_find - Looks for a node by bus, and device, multiple functions accessed * @bus: bus to find * @device: device to find * @index: is %0 for first function found, %1 for the second... * * Returns pointer to the node if successful, %NULL otherwise. */ struct pci_func *cpqhp_slot_find(u8 bus, u8 device, u8 index) { int found = -1; struct pci_func *func; func = cpqhp_slot_list[bus]; if ((func == NULL) || ((func->device == device) && (index == 0))) return func; if (func->device == device) found++; while (func->next != NULL) { func = func->next; if (func->device == device) found++; if (found == index) return func; } return NULL; } /* DJZ: I don't think is_bridge will work as is. * FIXME */ static int is_bridge(struct pci_func * func) { /* Check the header type */ if (((func->config_space[0x03] >> 16) & 0xFF) == 0x01) return 1; else return 0; } /** * set_controller_speed - set the frequency and/or mode of a specific controller segment. * @ctrl: controller to change frequency/mode for. * @adapter_speed: the speed of the adapter we want to match. * @hp_slot: the slot number where the adapter is installed. * * Returns %0 if we successfully change frequency and/or mode to match the * adapter speed. */ static u8 set_controller_speed(struct controller *ctrl, u8 adapter_speed, u8 hp_slot) { struct slot *slot; struct pci_bus *bus = ctrl->pci_bus; u8 reg; u8 slot_power = readb(ctrl->hpc_reg + SLOT_POWER); u16 reg16; u32 leds = readl(ctrl->hpc_reg + LED_CONTROL); if (bus->cur_bus_speed == adapter_speed) return 0; /* We don't allow freq/mode changes if we find another adapter running * in another slot on this controller */ for(slot = ctrl->slot; slot; slot = slot->next) { if (slot->device == (hp_slot + ctrl->slot_device_offset)) continue; if (!slot->hotplug_slot || !slot->hotplug_slot->info) continue; if (slot->hotplug_slot->info->adapter_status == 0) continue; /* If another adapter is running on the same segment but at a * lower speed/mode, we allow the new adapter to function at * this rate if supported */ if (bus->cur_bus_speed < adapter_speed) return 0; return 1; } /* If the controller doesn't support freq/mode changes and the * controller is running at a higher mode, we bail */ if ((bus->cur_bus_speed > adapter_speed) && (!ctrl->pcix_speed_capability)) return 1; /* But we allow the adapter to run at a lower rate if possible */ if ((bus->cur_bus_speed < adapter_speed) && (!ctrl->pcix_speed_capability)) return 0; /* We try to set the max speed supported by both the adapter and * controller */ if (bus->max_bus_speed < adapter_speed) { if (bus->cur_bus_speed == bus->max_bus_speed) return 0; adapter_speed = bus->max_bus_speed; } writel(0x0L, ctrl->hpc_reg + LED_CONTROL); writeb(0x00, ctrl->hpc_reg + SLOT_ENABLE); set_SOGO(ctrl); wait_for_ctrl_irq(ctrl); if (adapter_speed != PCI_SPEED_133MHz_PCIX) reg = 0xF5; else reg = 0xF4; pci_write_config_byte(ctrl->pci_dev, 0x41, reg); reg16 = readw(ctrl->hpc_reg + NEXT_CURR_FREQ); reg16 &= ~0x000F; switch(adapter_speed) { case(PCI_SPEED_133MHz_PCIX): reg = 0x75; reg16 |= 0xB; break; case(PCI_SPEED_100MHz_PCIX): reg = 0x74; reg16 |= 0xA; break; case(PCI_SPEED_66MHz_PCIX): reg = 0x73; reg16 |= 0x9; break; case(PCI_SPEED_66MHz): reg = 0x73; reg16 |= 0x1; break; default: /* 33MHz PCI 2.2 */ reg = 0x71; break; } reg16 |= 0xB << 12; writew(reg16, ctrl->hpc_reg + NEXT_CURR_FREQ); mdelay(5); /* Reenable interrupts */ writel(0, ctrl->hpc_reg + INT_MASK); pci_write_config_byte(ctrl->pci_dev, 0x41, reg); /* Restart state machine */ reg = ~0xF; pci_read_config_byte(ctrl->pci_dev, 0x43, &reg); pci_write_config_byte(ctrl->pci_dev, 0x43, reg); /* Only if mode change...*/ if (((bus->cur_bus_speed == PCI_SPEED_66MHz) && (adapter_speed == PCI_SPEED_66MHz_PCIX)) || ((bus->cur_bus_speed == PCI_SPEED_66MHz_PCIX) && (adapter_speed == PCI_SPEED_66MHz))) set_SOGO(ctrl); wait_for_ctrl_irq(ctrl); mdelay(1100); /* Restore LED/Slot state */ writel(leds, ctrl->hpc_reg + LED_CONTROL); writeb(slot_power, ctrl->hpc_reg + SLOT_ENABLE); set_SOGO(ctrl); wait_for_ctrl_irq(ctrl); bus->cur_bus_speed = adapter_speed; slot = cpqhp_find_slot(ctrl, hp_slot + ctrl->slot_device_offset); info("Successfully changed frequency/mode for adapter in slot %d\n", slot->number); return 0; } /* the following routines constitute the bulk of the * hotplug controller logic */ /** * board_replaced - Called after a board has been replaced in the system. * @func: PCI device/function information * @ctrl: hotplug controller * * This is only used if we don't have resources for hot add. * Turns power on for the board. * Checks to see if board is the same. * If board is same, reconfigures it. * If board isn't same, turns it back off. */ static u32 board_replaced(struct pci_func *func, struct controller *ctrl) { struct pci_bus *bus = ctrl->pci_bus; u8 hp_slot; u8 temp_byte; u8 adapter_speed; u32 rc = 0; hp_slot = func->device - ctrl->slot_device_offset; /* * The switch is open. */ if (readl(ctrl->hpc_reg + INT_INPUT_CLEAR) & (0x01L << hp_slot)) rc = INTERLOCK_OPEN; /* * The board is already on */ else if (is_slot_enabled (ctrl, hp_slot)) rc = CARD_FUNCTIONING; else { mutex_lock(&ctrl->crit_sect); /* turn on board without attaching to the bus */ enable_slot_power (ctrl, hp_slot); set_SOGO(ctrl); /* Wait for SOBS to be unset */ wait_for_ctrl_irq (ctrl); /* Change bits in slot power register to force another shift out * NOTE: this is to work around the timer bug */ temp_byte = readb(ctrl->hpc_reg + SLOT_POWER); writeb(0x00, ctrl->hpc_reg + SLOT_POWER); writeb(temp_byte, ctrl->hpc_reg + SLOT_POWER); set_SOGO(ctrl); /* Wait for SOBS to be unset */ wait_for_ctrl_irq (ctrl); adapter_speed = get_adapter_speed(ctrl, hp_slot); if (bus->cur_bus_speed != adapter_speed) if (set_controller_speed(ctrl, adapter_speed, hp_slot)) rc = WRONG_BUS_FREQUENCY; /* turn off board without attaching to the bus */ disable_slot_power (ctrl, hp_slot); set_SOGO(ctrl); /* Wait for SOBS to be unset */ wait_for_ctrl_irq (ctrl); mutex_unlock(&ctrl->crit_sect); if (rc) return rc; mutex_lock(&ctrl->crit_sect); slot_enable (ctrl, hp_slot); green_LED_blink (ctrl, hp_slot); amber_LED_off (ctrl, hp_slot); set_SOGO(ctrl); /* Wait for SOBS to be unset */ wait_for_ctrl_irq (ctrl); mutex_unlock(&ctrl->crit_sect); /* Wait for ~1 second because of hot plug spec */ long_delay(1*HZ); /* Check for a power fault */ if (func->status == 0xFF) { /* power fault occurred, but it was benign */ rc = POWER_FAILURE; func->status = 0; } else rc = cpqhp_valid_replace(ctrl, func); if (!rc) { /* It must be the same board */ rc = cpqhp_configure_board(ctrl, func); /* If configuration fails, turn it off * Get slot won't work for devices behind * bridges, but in this case it will always be * called for the "base" bus/dev/func of an * adapter. */ mutex_lock(&ctrl->crit_sect); amber_LED_on (ctrl, hp_slot); green_LED_off (ctrl, hp_slot); slot_disable (ctrl, hp_slot); set_SOGO(ctrl); /* Wait for SOBS to be unset */ wait_for_ctrl_irq (ctrl); mutex_unlock(&ctrl->crit_sect); if (rc) return rc; else return 1; } else { /* Something is wrong * Get slot won't work for devices behind bridges, but * in this case it will always be called for the "base" * bus/dev/func of an adapter. */ mutex_lock(&ctrl->crit_sect); amber_LED_on (ctrl, hp_slot); green_LED_off (ctrl, hp_slot); slot_disable (ctrl, hp_slot); set_SOGO(ctrl); /* Wait for SOBS to be unset */ wait_for_ctrl_irq (ctrl); mutex_unlock(&ctrl->crit_sect); } } return rc; } /** * board_added - Called after a board has been added to the system. * @func: PCI device/function info * @ctrl: hotplug controller * * Turns power on for the board. * Configures board. */ static u32 board_added(struct pci_func *func, struct controller *ctrl) { u8 hp_slot; u8 temp_byte; u8 adapter_speed; int index; u32 temp_register = 0xFFFFFFFF; u32 rc = 0; struct pci_func *new_slot = NULL; struct pci_bus *bus = ctrl->pci_bus; struct slot *p_slot; struct resource_lists res_lists; hp_slot = func->device - ctrl->slot_device_offset; dbg("%s: func->device, slot_offset, hp_slot = %d, %d ,%d\n", __func__, func->device, ctrl->slot_device_offset, hp_slot); mutex_lock(&ctrl->crit_sect); /* turn on board without attaching to the bus */ enable_slot_power(ctrl, hp_slot); set_SOGO(ctrl); /* Wait for SOBS to be unset */ wait_for_ctrl_irq (ctrl); /* Change bits in slot power register to force another shift out * NOTE: this is to work around the timer bug */ temp_byte = readb(ctrl->hpc_reg + SLOT_POWER); writeb(0x00, ctrl->hpc_reg + SLOT_POWER); writeb(temp_byte, ctrl->hpc_reg + SLOT_POWER); set_SOGO(ctrl); /* Wait for SOBS to be unset */ wait_for_ctrl_irq (ctrl); adapter_speed = get_adapter_speed(ctrl, hp_slot); if (bus->cur_bus_speed != adapter_speed) if (set_controller_speed(ctrl, adapter_speed, hp_slot)) rc = WRONG_BUS_FREQUENCY; /* turn off board without attaching to the bus */ disable_slot_power (ctrl, hp_slot); set_SOGO(ctrl); /* Wait for SOBS to be unset */ wait_for_ctrl_irq(ctrl); mutex_unlock(&ctrl->crit_sect); if (rc) return rc; p_slot = cpqhp_find_slot(ctrl, hp_slot + ctrl->slot_device_offset); /* turn on board and blink green LED */ dbg("%s: before down\n", __func__); mutex_lock(&ctrl->crit_sect); dbg("%s: after down\n", __func__); dbg("%s: before slot_enable\n", __func__); slot_enable (ctrl, hp_slot); dbg("%s: before green_LED_blink\n", __func__); green_LED_blink (ctrl, hp_slot); dbg("%s: before amber_LED_blink\n", __func__); amber_LED_off (ctrl, hp_slot); dbg("%s: before set_SOGO\n", __func__); set_SOGO(ctrl); /* Wait for SOBS to be unset */ dbg("%s: before wait_for_ctrl_irq\n", __func__); wait_for_ctrl_irq (ctrl); dbg("%s: after wait_for_ctrl_irq\n", __func__); dbg("%s: before up\n", __func__); mutex_unlock(&ctrl->crit_sect); dbg("%s: after up\n", __func__); /* Wait for ~1 second because of hot plug spec */ dbg("%s: before long_delay\n", __func__); long_delay(1*HZ); dbg("%s: after long_delay\n", __func__); dbg("%s: func status = %x\n", __func__, func->status); /* Check for a power fault */ if (func->status == 0xFF) { /* power fault occurred, but it was benign */ temp_register = 0xFFFFFFFF; dbg("%s: temp register set to %x by power fault\n", __func__, temp_register); rc = POWER_FAILURE; func->status = 0; } else { /* Get vendor/device ID u32 */ ctrl->pci_bus->number = func->bus; rc = pci_bus_read_config_dword (ctrl->pci_bus, PCI_DEVFN(func->device, func->function), PCI_VENDOR_ID, &temp_register); dbg("%s: pci_read_config_dword returns %d\n", __func__, rc); dbg("%s: temp_register is %x\n", __func__, temp_register); if (rc != 0) { /* Something's wrong here */ temp_register = 0xFFFFFFFF; dbg("%s: temp register set to %x by error\n", __func__, temp_register); } /* Preset return code. It will be changed later if things go okay. */ rc = NO_ADAPTER_PRESENT; } /* All F's is an empty slot or an invalid board */ if (temp_register != 0xFFFFFFFF) { res_lists.io_head = ctrl->io_head; res_lists.mem_head = ctrl->mem_head; res_lists.p_mem_head = ctrl->p_mem_head; res_lists.bus_head = ctrl->bus_head; res_lists.irqs = NULL; rc = configure_new_device(ctrl, func, 0, &res_lists); dbg("%s: back from configure_new_device\n", __func__); ctrl->io_head = res_lists.io_head; ctrl->mem_head = res_lists.mem_head; ctrl->p_mem_head = res_lists.p_mem_head; ctrl->bus_head = res_lists.bus_head; cpqhp_resource_sort_and_combine(&(ctrl->mem_head)); cpqhp_resource_sort_and_combine(&(ctrl->p_mem_head)); cpqhp_resource_sort_and_combine(&(ctrl->io_head)); cpqhp_resource_sort_and_combine(&(ctrl->bus_head)); if (rc) { mutex_lock(&ctrl->crit_sect); amber_LED_on (ctrl, hp_slot); green_LED_off (ctrl, hp_slot); slot_disable (ctrl, hp_slot); set_SOGO(ctrl); /* Wait for SOBS to be unset */ wait_for_ctrl_irq (ctrl); mutex_unlock(&ctrl->crit_sect); return rc; } else { cpqhp_save_slot_config(ctrl, func); } func->status = 0; func->switch_save = 0x10; func->is_a_board = 0x01; /* next, we will instantiate the linux pci_dev structures (with * appropriate driver notification, if already present) */ dbg("%s: configure linux pci_dev structure\n", __func__); index = 0; do { new_slot = cpqhp_slot_find(ctrl->bus, func->device, index++); if (new_slot && !new_slot->pci_dev) cpqhp_configure_device(ctrl, new_slot); } while (new_slot); mutex_lock(&ctrl->crit_sect); green_LED_on (ctrl, hp_slot); set_SOGO(ctrl); /* Wait for SOBS to be unset */ wait_for_ctrl_irq (ctrl); mutex_unlock(&ctrl->crit_sect); } else { mutex_lock(&ctrl->crit_sect); amber_LED_on (ctrl, hp_slot); green_LED_off (ctrl, hp_slot); slot_disable (ctrl, hp_slot); set_SOGO(ctrl); /* Wait for SOBS to be unset */ wait_for_ctrl_irq (ctrl); mutex_unlock(&ctrl->crit_sect); return rc; } return 0; } /** * remove_board - Turns off slot and LEDs * @func: PCI device/function info * @replace_flag: whether replacing or adding a new device * @ctrl: target controller */ static u32 remove_board(struct pci_func * func, u32 replace_flag, struct controller * ctrl) { int index; u8 skip = 0; u8 device; u8 hp_slot; u8 temp_byte; u32 rc; struct resource_lists res_lists; struct pci_func *temp_func; if (cpqhp_unconfigure_device(func)) return 1; device = func->device; hp_slot = func->device - ctrl->slot_device_offset; dbg("In %s, hp_slot = %d\n", __func__, hp_slot); /* When we get here, it is safe to change base address registers. * We will attempt to save the base address register lengths */ if (replace_flag || !ctrl->add_support) rc = cpqhp_save_base_addr_length(ctrl, func); else if (!func->bus_head && !func->mem_head && !func->p_mem_head && !func->io_head) { /* Here we check to see if we've saved any of the board's * resources already. If so, we'll skip the attempt to * determine what's being used. */ index = 0; temp_func = cpqhp_slot_find(func->bus, func->device, index++); while (temp_func) { if (temp_func->bus_head || temp_func->mem_head || temp_func->p_mem_head || temp_func->io_head) { skip = 1; break; } temp_func = cpqhp_slot_find(temp_func->bus, temp_func->device, index++); } if (!skip) rc = cpqhp_save_used_resources(ctrl, func); } /* Change status to shutdown */ if (func->is_a_board) func->status = 0x01; func->configured = 0; mutex_lock(&ctrl->crit_sect); green_LED_off (ctrl, hp_slot); slot_disable (ctrl, hp_slot); set_SOGO(ctrl); /* turn off SERR for slot */ temp_byte = readb(ctrl->hpc_reg + SLOT_SERR); temp_byte &= ~(0x01 << hp_slot); writeb(temp_byte, ctrl->hpc_reg + SLOT_SERR); /* Wait for SOBS to be unset */ wait_for_ctrl_irq (ctrl); mutex_unlock(&ctrl->crit_sect); if (!replace_flag && ctrl->add_support) { while (func) { res_lists.io_head = ctrl->io_head; res_lists.mem_head = ctrl->mem_head; res_lists.p_mem_head = ctrl->p_mem_head; res_lists.bus_head = ctrl->bus_head; cpqhp_return_board_resources(func, &res_lists); ctrl->io_head = res_lists.io_head; ctrl->mem_head = res_lists.mem_head; ctrl->p_mem_head = res_lists.p_mem_head; ctrl->bus_head = res_lists.bus_head; cpqhp_resource_sort_and_combine(&(ctrl->mem_head)); cpqhp_resource_sort_and_combine(&(ctrl->p_mem_head)); cpqhp_resource_sort_and_combine(&(ctrl->io_head)); cpqhp_resource_sort_and_combine(&(ctrl->bus_head)); if (is_bridge(func)) { bridge_slot_remove(func); } else slot_remove(func); func = cpqhp_slot_find(ctrl->bus, device, 0); } /* Setup slot structure with entry for empty slot */ func = cpqhp_slot_create(ctrl->bus); if (func == NULL) return 1; func->bus = ctrl->bus; func->device = device; func->function = 0; func->configured = 0; func->switch_save = 0x10; func->is_a_board = 0; func->p_task_event = NULL; } return 0; } static void pushbutton_helper_thread(unsigned long data) { pushbutton_pending = data; wake_up_process(cpqhp_event_thread); } /* this is the main worker thread */ static int event_thread(void* data) { struct controller *ctrl; while (1) { dbg("!!!!event_thread sleeping\n"); set_current_state(TASK_INTERRUPTIBLE); schedule(); if (kthread_should_stop()) break; /* Do stuff here */ if (pushbutton_pending) cpqhp_pushbutton_thread(pushbutton_pending); else for (ctrl = cpqhp_ctrl_list; ctrl; ctrl=ctrl->next) interrupt_event_handler(ctrl); } dbg("event_thread signals exit\n"); return 0; } int cpqhp_event_start_thread(void) { cpqhp_event_thread = kthread_run(event_thread, NULL, "phpd_event"); if (IS_ERR(cpqhp_event_thread)) { err ("Can't start up our event thread\n"); return PTR_ERR(cpqhp_event_thread); } return 0; } void cpqhp_event_stop_thread(void) { kthread_stop(cpqhp_event_thread); } static int update_slot_info(struct controller *ctrl, struct slot *slot) { struct hotplug_slot_info *info; int result; info = kmalloc(sizeof(*info), GFP_KERNEL); if (!info) return -ENOMEM; info->power_status = get_slot_enabled(ctrl, slot); info->attention_status = cpq_get_attention_status(ctrl, slot); info->latch_status = cpq_get_latch_status(ctrl, slot); info->adapter_status = get_presence_status(ctrl, slot); result = pci_hp_change_slot_info(slot->hotplug_slot, info); kfree (info); return result; } static void interrupt_event_handler(struct controller *ctrl) { int loop = 0; int change = 1; struct pci_func *func; u8 hp_slot; struct slot *p_slot; while (change) { change = 0; for (loop = 0; loop < 10; loop++) { /* dbg("loop %d\n", loop); */ if (ctrl->event_queue[loop].event_type != 0) { hp_slot = ctrl->event_queue[loop].hp_slot; func = cpqhp_slot_find(ctrl->bus, (hp_slot + ctrl->slot_device_offset), 0); if (!func) return; p_slot = cpqhp_find_slot(ctrl, hp_slot + ctrl->slot_device_offset); if (!p_slot) return; dbg("hp_slot %d, func %p, p_slot %p\n", hp_slot, func, p_slot); if (ctrl->event_queue[loop].event_type == INT_BUTTON_PRESS) { dbg("button pressed\n"); } else if (ctrl->event_queue[loop].event_type == INT_BUTTON_CANCEL) { dbg("button cancel\n"); del_timer(&p_slot->task_event); mutex_lock(&ctrl->crit_sect); if (p_slot->state == BLINKINGOFF_STATE) { /* slot is on */ dbg("turn on green LED\n"); green_LED_on (ctrl, hp_slot); } else if (p_slot->state == BLINKINGON_STATE) { /* slot is off */ dbg("turn off green LED\n"); green_LED_off (ctrl, hp_slot); } info(msg_button_cancel, p_slot->number); p_slot->state = STATIC_STATE; amber_LED_off (ctrl, hp_slot); set_SOGO(ctrl); /* Wait for SOBS to be unset */ wait_for_ctrl_irq (ctrl); mutex_unlock(&ctrl->crit_sect); } /*** button Released (No action on press...) */ else if (ctrl->event_queue[loop].event_type == INT_BUTTON_RELEASE) { dbg("button release\n"); if (is_slot_enabled (ctrl, hp_slot)) { dbg("slot is on\n"); p_slot->state = BLINKINGOFF_STATE; info(msg_button_off, p_slot->number); } else { dbg("slot is off\n"); p_slot->state = BLINKINGON_STATE; info(msg_button_on, p_slot->number); } mutex_lock(&ctrl->crit_sect); dbg("blink green LED and turn off amber\n"); amber_LED_off (ctrl, hp_slot); green_LED_blink (ctrl, hp_slot); set_SOGO(ctrl); /* Wait for SOBS to be unset */ wait_for_ctrl_irq (ctrl); mutex_unlock(&ctrl->crit_sect); init_timer(&p_slot->task_event); p_slot->hp_slot = hp_slot; p_slot->ctrl = ctrl; /* p_slot->physical_slot = physical_slot; */ p_slot->task_event.expires = jiffies + 5 * HZ; /* 5 second delay */ p_slot->task_event.function = pushbutton_helper_thread; p_slot->task_event.data = (u32) p_slot; dbg("add_timer p_slot = %p\n", p_slot); add_timer(&p_slot->task_event); } /***********POWER FAULT */ else if (ctrl->event_queue[loop].event_type == INT_POWER_FAULT) { dbg("power fault\n"); } else { /* refresh notification */ update_slot_info(ctrl, p_slot); } ctrl->event_queue[loop].event_type = 0; change = 1; } } /* End of FOR loop */ } return; } /** * cpqhp_pushbutton_thread - handle pushbutton events * @slot: target slot (struct) * * Scheduled procedure to handle blocking stuff for the pushbuttons. * Handles all pending events and exits. */ void cpqhp_pushbutton_thread(unsigned long slot) { u8 hp_slot; u8 device; struct pci_func *func; struct slot *p_slot = (struct slot *) slot; struct controller *ctrl = (struct controller *) p_slot->ctrl; pushbutton_pending = 0; hp_slot = p_slot->hp_slot; device = p_slot->device; if (is_slot_enabled(ctrl, hp_slot)) { p_slot->state = POWEROFF_STATE; /* power Down board */ func = cpqhp_slot_find(p_slot->bus, p_slot->device, 0); dbg("In power_down_board, func = %p, ctrl = %p\n", func, ctrl); if (!func) { dbg("Error! func NULL in %s\n", __func__); return ; } if (cpqhp_process_SS(ctrl, func) != 0) { amber_LED_on(ctrl, hp_slot); green_LED_on(ctrl, hp_slot); set_SOGO(ctrl); /* Wait for SOBS to be unset */ wait_for_ctrl_irq(ctrl); } p_slot->state = STATIC_STATE; } else { p_slot->state = POWERON_STATE; /* slot is off */ func = cpqhp_slot_find(p_slot->bus, p_slot->device, 0); dbg("In add_board, func = %p, ctrl = %p\n", func, ctrl); if (!func) { dbg("Error! func NULL in %s\n", __func__); return ; } if (ctrl != NULL) { if (cpqhp_process_SI(ctrl, func) != 0) { amber_LED_on(ctrl, hp_slot); green_LED_off(ctrl, hp_slot); set_SOGO(ctrl); /* Wait for SOBS to be unset */ wait_for_ctrl_irq (ctrl); } } p_slot->state = STATIC_STATE; } return; } int cpqhp_process_SI(struct controller *ctrl, struct pci_func *func) { u8 device, hp_slot; u16 temp_word; u32 tempdword; int rc; struct slot* p_slot; int physical_slot = 0; tempdword = 0; device = func->device; hp_slot = device - ctrl->slot_device_offset; p_slot = cpqhp_find_slot(ctrl, device); if (p_slot) physical_slot = p_slot->number; /* Check to see if the interlock is closed */ tempdword = readl(ctrl->hpc_reg + INT_INPUT_CLEAR); if (tempdword & (0x01 << hp_slot)) { return 1; } if (func->is_a_board) { rc = board_replaced(func, ctrl); } else { /* add board */ slot_remove(func); func = cpqhp_slot_create(ctrl->bus); if (func == NULL) return 1; func->bus = ctrl->bus; func->device = device; func->function = 0; func->configured = 0; func->is_a_board = 1; /* We have to save the presence info for these slots */ temp_word = ctrl->ctrl_int_comp >> 16; func->presence_save = (temp_word >> hp_slot) & 0x01; func->presence_save |= (temp_word >> (hp_slot + 7)) & 0x02; if (ctrl->ctrl_int_comp & (0x1L << hp_slot)) { func->switch_save = 0; } else { func->switch_save = 0x10; } rc = board_added(func, ctrl); if (rc) { if (is_bridge(func)) { bridge_slot_remove(func); } else slot_remove(func); /* Setup slot structure with entry for empty slot */ func = cpqhp_slot_create(ctrl->bus); if (func == NULL) return 1; func->bus = ctrl->bus; func->device = device; func->function = 0; func->configured = 0; func->is_a_board = 0; /* We have to save the presence info for these slots */ temp_word = ctrl->ctrl_int_comp >> 16; func->presence_save = (temp_word >> hp_slot) & 0x01; func->presence_save |= (temp_word >> (hp_slot + 7)) & 0x02; if (ctrl->ctrl_int_comp & (0x1L << hp_slot)) { func->switch_save = 0; } else { func->switch_save = 0x10; } } } if (rc) { dbg("%s: rc = %d\n", __func__, rc); } if (p_slot) update_slot_info(ctrl, p_slot); return rc; } int cpqhp_process_SS(struct controller *ctrl, struct pci_func *func) { u8 device, class_code, header_type, BCR; u8 index = 0; u8 replace_flag; u32 rc = 0; unsigned int devfn; struct slot* p_slot; struct pci_bus *pci_bus = ctrl->pci_bus; int physical_slot=0; device = func->device; func = cpqhp_slot_find(ctrl->bus, device, index++); p_slot = cpqhp_find_slot(ctrl, device); if (p_slot) { physical_slot = p_slot->number; } /* Make sure there are no video controllers here */ while (func && !rc) { pci_bus->number = func->bus; devfn = PCI_DEVFN(func->device, func->function); /* Check the Class Code */ rc = pci_bus_read_config_byte (pci_bus, devfn, 0x0B, &class_code); if (rc) return rc; if (class_code == PCI_BASE_CLASS_DISPLAY) { /* Display/Video adapter (not supported) */ rc = REMOVE_NOT_SUPPORTED; } else { /* See if it's a bridge */ rc = pci_bus_read_config_byte (pci_bus, devfn, PCI_HEADER_TYPE, &header_type); if (rc) return rc; /* If it's a bridge, check the VGA Enable bit */ if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) { rc = pci_bus_read_config_byte (pci_bus, devfn, PCI_BRIDGE_CONTROL, &BCR); if (rc) return rc; /* If the VGA Enable bit is set, remove isn't * supported */ if (BCR & PCI_BRIDGE_CTL_VGA) rc = REMOVE_NOT_SUPPORTED; } } func = cpqhp_slot_find(ctrl->bus, device, index++); } func = cpqhp_slot_find(ctrl->bus, device, 0); if ((func != NULL) && !rc) { /* FIXME: Replace flag should be passed into process_SS */ replace_flag = !(ctrl->add_support); rc = remove_board(func, replace_flag, ctrl); } else if (!rc) { rc = 1; } if (p_slot) update_slot_info(ctrl, p_slot); return rc; } /** * switch_leds - switch the leds, go from one site to the other. * @ctrl: controller to use * @num_of_slots: number of slots to use * @work_LED: LED control value * @direction: 1 to start from the left side, 0 to start right. */ static void switch_leds(struct controller *ctrl, const int num_of_slots, u32 *work_LED, const int direction) { int loop; for (loop = 0; loop < num_of_slots; loop++) { if (direction) *work_LED = *work_LED >> 1; else *work_LED = *work_LED << 1; writel(*work_LED, ctrl->hpc_reg + LED_CONTROL); set_SOGO(ctrl); /* Wait for SOGO interrupt */ wait_for_ctrl_irq(ctrl); /* Get ready for next iteration */ long_delay((2*HZ)/10); } } /** * cpqhp_hardware_test - runs hardware tests * @ctrl: target controller * @test_num: the number written to the "test" file in sysfs. * * For hot plug ctrl folks to play with. */ int cpqhp_hardware_test(struct controller *ctrl, int test_num) { u32 save_LED; u32 work_LED; int loop; int num_of_slots; num_of_slots = readb(ctrl->hpc_reg + SLOT_MASK) & 0x0f; switch (test_num) { case 1: /* Do stuff here! */ /* Do that funky LED thing */ /* so we can restore them later */ save_LED = readl(ctrl->hpc_reg + LED_CONTROL); work_LED = 0x01010101; switch_leds(ctrl, num_of_slots, &work_LED, 0); switch_leds(ctrl, num_of_slots, &work_LED, 1); switch_leds(ctrl, num_of_slots, &work_LED, 0); switch_leds(ctrl, num_of_slots, &work_LED, 1); work_LED = 0x01010000; writel(work_LED, ctrl->hpc_reg + LED_CONTROL); switch_leds(ctrl, num_of_slots, &work_LED, 0); switch_leds(ctrl, num_of_slots, &work_LED, 1); work_LED = 0x00000101; writel(work_LED, ctrl->hpc_reg + LED_CONTROL); switch_leds(ctrl, num_of_slots, &work_LED, 0); switch_leds(ctrl, num_of_slots, &work_LED, 1); work_LED = 0x01010000; writel(work_LED, ctrl->hpc_reg + LED_CONTROL); for (loop = 0; loop < num_of_slots; loop++) { set_SOGO(ctrl); /* Wait for SOGO interrupt */ wait_for_ctrl_irq (ctrl); /* Get ready for next iteration */ long_delay((3*HZ)/10); work_LED = work_LED >> 16; writel(work_LED, ctrl->hpc_reg + LED_CONTROL); set_SOGO(ctrl); /* Wait for SOGO interrupt */ wait_for_ctrl_irq (ctrl); /* Get ready for next iteration */ long_delay((3*HZ)/10); work_LED = work_LED << 16; writel(work_LED, ctrl->hpc_reg + LED_CONTROL); work_LED = work_LED << 1; writel(work_LED, ctrl->hpc_reg + LED_CONTROL); } /* put it back the way it was */ writel(save_LED, ctrl->hpc_reg + LED_CONTROL); set_SOGO(ctrl); /* Wait for SOBS to be unset */ wait_for_ctrl_irq (ctrl); break; case 2: /* Do other stuff here! */ break; case 3: /* and more... */ break; } return 0; } /** * configure_new_device - Configures the PCI header information of one board. * @ctrl: pointer to controller structure * @func: pointer to function structure * @behind_bridge: 1 if this is a recursive call, 0 if not * @resources: pointer to set of resource lists * * Returns 0 if success. */ static u32 configure_new_device(struct controller * ctrl, struct pci_func * func, u8 behind_bridge, struct resource_lists * resources) { u8 temp_byte, function, max_functions, stop_it; int rc; u32 ID; struct pci_func *new_slot; int index; new_slot = func; dbg("%s\n", __func__); /* Check for Multi-function device */ ctrl->pci_bus->number = func->bus; rc = pci_bus_read_config_byte (ctrl->pci_bus, PCI_DEVFN(func->device, func->function), 0x0E, &temp_byte); if (rc) { dbg("%s: rc = %d\n", __func__, rc); return rc; } if (temp_byte & 0x80) /* Multi-function device */ max_functions = 8; else max_functions = 1; function = 0; do { rc = configure_new_function(ctrl, new_slot, behind_bridge, resources); if (rc) { dbg("configure_new_function failed %d\n",rc); index = 0; while (new_slot) { new_slot = cpqhp_slot_find(new_slot->bus, new_slot->device, index++); if (new_slot) cpqhp_return_board_resources(new_slot, resources); } return rc; } function++; stop_it = 0; /* The following loop skips to the next present function * and creates a board structure */ while ((function < max_functions) && (!stop_it)) { pci_bus_read_config_dword (ctrl->pci_bus, PCI_DEVFN(func->device, function), 0x00, &ID); if (ID == 0xFFFFFFFF) { function++; } else { /* Setup slot structure. */ new_slot = cpqhp_slot_create(func->bus); if (new_slot == NULL) return 1; new_slot->bus = func->bus; new_slot->device = func->device; new_slot->function = function; new_slot->is_a_board = 1; new_slot->status = 0; stop_it++; } } } while (function < max_functions); dbg("returning from configure_new_device\n"); return 0; } /* * Configuration logic that involves the hotplug data structures and * their bookkeeping */ /** * configure_new_function - Configures the PCI header information of one device * @ctrl: pointer to controller structure * @func: pointer to function structure * @behind_bridge: 1 if this is a recursive call, 0 if not * @resources: pointer to set of resource lists * * Calls itself recursively for bridged devices. * Returns 0 if success. */ static int configure_new_function(struct controller *ctrl, struct pci_func *func, u8 behind_bridge, struct resource_lists *resources) { int cloop; u8 IRQ = 0; u8 temp_byte; u8 device; u8 class_code; u16 command; u16 temp_word; u32 temp_dword; u32 rc; u32 temp_register; u32 base; u32 ID; unsigned int devfn; struct pci_resource *mem_node; struct pci_resource *p_mem_node; struct pci_resource *io_node; struct pci_resource *bus_node; struct pci_resource *hold_mem_node; struct pci_resource *hold_p_mem_node; struct pci_resource *hold_IO_node; struct pci_resource *hold_bus_node; struct irq_mapping irqs; struct pci_func *new_slot; struct pci_bus *pci_bus; struct resource_lists temp_resources; pci_bus = ctrl->pci_bus; pci_bus->number = func->bus; devfn = PCI_DEVFN(func->device, func->function); /* Check for Bridge */ rc = pci_bus_read_config_byte(pci_bus, devfn, PCI_HEADER_TYPE, &temp_byte); if (rc) return rc; if ((temp_byte & 0x7F) == PCI_HEADER_TYPE_BRIDGE) { /* set Primary bus */ dbg("set Primary bus = %d\n", func->bus); rc = pci_bus_write_config_byte(pci_bus, devfn, PCI_PRIMARY_BUS, func->bus); if (rc) return rc; /* find range of busses to use */ dbg("find ranges of buses to use\n"); bus_node = get_max_resource(&(resources->bus_head), 1); /* If we don't have any busses to allocate, we can't continue */ if (!bus_node) return -ENOMEM; /* set Secondary bus */ temp_byte = bus_node->base; dbg("set Secondary bus = %d\n", bus_node->base); rc = pci_bus_write_config_byte(pci_bus, devfn, PCI_SECONDARY_BUS, temp_byte); if (rc) return rc; /* set subordinate bus */ temp_byte = bus_node->base + bus_node->length - 1; dbg("set subordinate bus = %d\n", bus_node->base + bus_node->length - 1); rc = pci_bus_write_config_byte(pci_bus, devfn, PCI_SUBORDINATE_BUS, temp_byte); if (rc) return rc; /* set subordinate Latency Timer and base Latency Timer */ temp_byte = 0x40; rc = pci_bus_write_config_byte(pci_bus, devfn, PCI_SEC_LATENCY_TIMER, temp_byte); if (rc) return rc; rc = pci_bus_write_config_byte(pci_bus, devfn, PCI_LATENCY_TIMER, temp_byte); if (rc) return rc; /* set Cache Line size */ temp_byte = 0x08; rc = pci_bus_write_config_byte(pci_bus, devfn, PCI_CACHE_LINE_SIZE, temp_byte); if (rc) return rc; /* Setup the IO, memory, and prefetchable windows */ io_node = get_max_resource(&(resources->io_head), 0x1000); if (!io_node) return -ENOMEM; mem_node = get_max_resource(&(resources->mem_head), 0x100000); if (!mem_node) return -ENOMEM; p_mem_node = get_max_resource(&(resources->p_mem_head), 0x100000); if (!p_mem_node) return -ENOMEM; dbg("Setup the IO, memory, and prefetchable windows\n"); dbg("io_node\n"); dbg("(base, len, next) (%x, %x, %p)\n", io_node->base, io_node->length, io_node->next); dbg("mem_node\n"); dbg("(base, len, next) (%x, %x, %p)\n", mem_node->base, mem_node->length, mem_node->next); dbg("p_mem_node\n"); dbg("(base, len, next) (%x, %x, %p)\n", p_mem_node->base, p_mem_node->length, p_mem_node->next); /* set up the IRQ info */ if (!resources->irqs) { irqs.barber_pole = 0; irqs.interrupt[0] = 0; irqs.interrupt[1] = 0; irqs.interrupt[2] = 0; irqs.interrupt[3] = 0; irqs.valid_INT = 0; } else { irqs.barber_pole = resources->irqs->barber_pole; irqs.interrupt[0] = resources->irqs->interrupt[0]; irqs.interrupt[1] = resources->irqs->interrupt[1]; irqs.interrupt[2] = resources->irqs->interrupt[2]; irqs.interrupt[3] = resources->irqs->interrupt[3]; irqs.valid_INT = resources->irqs->valid_INT; } /* set up resource lists that are now aligned on top and bottom * for anything behind the bridge. */ temp_resources.bus_head = bus_node; temp_resources.io_head = io_node; temp_resources.mem_head = mem_node; temp_resources.p_mem_head = p_mem_node; temp_resources.irqs = &irqs; /* Make copies of the nodes we are going to pass down so that * if there is a problem,we can just use these to free resources */ hold_bus_node = kmalloc(sizeof(*hold_bus_node), GFP_KERNEL); hold_IO_node = kmalloc(sizeof(*hold_IO_node), GFP_KERNEL); hold_mem_node = kmalloc(sizeof(*hold_mem_node), GFP_KERNEL); hold_p_mem_node = kmalloc(sizeof(*hold_p_mem_node), GFP_KERNEL); if (!hold_bus_node || !hold_IO_node || !hold_mem_node || !hold_p_mem_node) { kfree(hold_bus_node); kfree(hold_IO_node); kfree(hold_mem_node); kfree(hold_p_mem_node); return 1; } memcpy(hold_bus_node, bus_node, sizeof(struct pci_resource)); bus_node->base += 1; bus_node->length -= 1; bus_node->next = NULL; /* If we have IO resources copy them and fill in the bridge's * IO range registers */ memcpy(hold_IO_node, io_node, sizeof(struct pci_resource)); io_node->next = NULL; /* set IO base and Limit registers */ temp_byte = io_node->base >> 8; rc = pci_bus_write_config_byte(pci_bus, devfn, PCI_IO_BASE, temp_byte); temp_byte = (io_node->base + io_node->length - 1) >> 8; rc = pci_bus_write_config_byte(pci_bus, devfn, PCI_IO_LIMIT, temp_byte); /* Copy the memory resources and fill in the bridge's memory * range registers. */ memcpy(hold_mem_node, mem_node, sizeof(struct pci_resource)); mem_node->next = NULL; /* set Mem base and Limit registers */ temp_word = mem_node->base >> 16; rc = pci_bus_write_config_word(pci_bus, devfn, PCI_MEMORY_BASE, temp_word); temp_word = (mem_node->base + mem_node->length - 1) >> 16; rc = pci_bus_write_config_word(pci_bus, devfn, PCI_MEMORY_LIMIT, temp_word); memcpy(hold_p_mem_node, p_mem_node, sizeof(struct pci_resource)); p_mem_node->next = NULL; /* set Pre Mem base and Limit registers */ temp_word = p_mem_node->base >> 16; rc = pci_bus_write_config_word (pci_bus, devfn, PCI_PREF_MEMORY_BASE, temp_word); temp_word = (p_mem_node->base + p_mem_node->length - 1) >> 16; rc = pci_bus_write_config_word (pci_bus, devfn, PCI_PREF_MEMORY_LIMIT, temp_word); /* Adjust this to compensate for extra adjustment in first loop */ irqs.barber_pole--; rc = 0; /* Here we actually find the devices and configure them */ for (device = 0; (device <= 0x1F) && !rc; device++) { irqs.barber_pole = (irqs.barber_pole + 1) & 0x03; ID = 0xFFFFFFFF; pci_bus->number = hold_bus_node->base; pci_bus_read_config_dword (pci_bus, PCI_DEVFN(device, 0), 0x00, &ID); pci_bus->number = func->bus; if (ID != 0xFFFFFFFF) { /* device present */ /* Setup slot structure. */ new_slot = cpqhp_slot_create(hold_bus_node->base); if (new_slot == NULL) { rc = -ENOMEM; continue; } new_slot->bus = hold_bus_node->base; new_slot->device = device; new_slot->function = 0; new_slot->is_a_board = 1; new_slot->status = 0; rc = configure_new_device(ctrl, new_slot, 1, &temp_resources); dbg("configure_new_device rc=0x%x\n",rc); } /* End of IF (device in slot?) */ } /* End of FOR loop */ if (rc) goto free_and_out; /* save the interrupt routing information */ if (resources->irqs) { resources->irqs->interrupt[0] = irqs.interrupt[0]; resources->irqs->interrupt[1] = irqs.interrupt[1]; resources->irqs->interrupt[2] = irqs.interrupt[2]; resources->irqs->interrupt[3] = irqs.interrupt[3]; resources->irqs->valid_INT = irqs.valid_INT; } else if (!behind_bridge) { /* We need to hook up the interrupts here */ for (cloop = 0; cloop < 4; cloop++) { if (irqs.valid_INT & (0x01 << cloop)) { rc = cpqhp_set_irq(func->bus, func->device, cloop + 1, irqs.interrupt[cloop]); if (rc) goto free_and_out; } } /* end of for loop */ } /* Return unused bus resources * First use the temporary node to store information for * the board */ if (bus_node && temp_resources.bus_head) { hold_bus_node->length = bus_node->base - hold_bus_node->base; hold_bus_node->next = func->bus_head; func->bus_head = hold_bus_node; temp_byte = temp_resources.bus_head->base - 1; /* set subordinate bus */ rc = pci_bus_write_config_byte (pci_bus, devfn, PCI_SUBORDINATE_BUS, temp_byte); if (temp_resources.bus_head->length == 0) { kfree(temp_resources.bus_head); temp_resources.bus_head = NULL; } else { return_resource(&(resources->bus_head), temp_resources.bus_head); } } /* If we have IO space available and there is some left, * return the unused portion */ if (hold_IO_node && temp_resources.io_head) { io_node = do_pre_bridge_resource_split(&(temp_resources.io_head), &hold_IO_node, 0x1000); /* Check if we were able to split something off */ if (io_node) { hold_IO_node->base = io_node->base + io_node->length; temp_byte = (hold_IO_node->base) >> 8; rc = pci_bus_write_config_word (pci_bus, devfn, PCI_IO_BASE, temp_byte); return_resource(&(resources->io_head), io_node); } io_node = do_bridge_resource_split(&(temp_resources.io_head), 0x1000); /* Check if we were able to split something off */ if (io_node) { /* First use the temporary node to store * information for the board */ hold_IO_node->length = io_node->base - hold_IO_node->base; /* If we used any, add it to the board's list */ if (hold_IO_node->length) { hold_IO_node->next = func->io_head; func->io_head = hold_IO_node; temp_byte = (io_node->base - 1) >> 8; rc = pci_bus_write_config_byte (pci_bus, devfn, PCI_IO_LIMIT, temp_byte); return_resource(&(resources->io_head), io_node); } else { /* it doesn't need any IO */ temp_word = 0x0000; rc = pci_bus_write_config_word (pci_bus, devfn, PCI_IO_LIMIT, temp_word); return_resource(&(resources->io_head), io_node); kfree(hold_IO_node); } } else { /* it used most of the range */ hold_IO_node->next = func->io_head; func->io_head = hold_IO_node; } } else if (hold_IO_node) { /* it used the whole range */ hold_IO_node->next = func->io_head; func->io_head = hold_IO_node; } /* If we have memory space available and there is some left, * return the unused portion */ if (hold_mem_node && temp_resources.mem_head) { mem_node = do_pre_bridge_resource_split(&(temp_resources. mem_head), &hold_mem_node, 0x100000); /* Check if we were able to split something off */ if (mem_node) { hold_mem_node->base = mem_node->base + mem_node->length; temp_word = (hold_mem_node->base) >> 16; rc = pci_bus_write_config_word (pci_bus, devfn, PCI_MEMORY_BASE, temp_word); return_resource(&(resources->mem_head), mem_node); } mem_node = do_bridge_resource_split(&(temp_resources.mem_head), 0x100000); /* Check if we were able to split something off */ if (mem_node) { /* First use the temporary node to store * information for the board */ hold_mem_node->length = mem_node->base - hold_mem_node->base; if (hold_mem_node->length) { hold_mem_node->next = func->mem_head; func->mem_head = hold_mem_node; /* configure end address */ temp_word = (mem_node->base - 1) >> 16; rc = pci_bus_write_config_word (pci_bus, devfn, PCI_MEMORY_LIMIT, temp_word); /* Return unused resources to the pool */ return_resource(&(resources->mem_head), mem_node); } else { /* it doesn't need any Mem */ temp_word = 0x0000; rc = pci_bus_write_config_word (pci_bus, devfn, PCI_MEMORY_LIMIT, temp_word); return_resource(&(resources->mem_head), mem_node); kfree(hold_mem_node); } } else { /* it used most of the range */ hold_mem_node->next = func->mem_head; func->mem_head = hold_mem_node; } } else if (hold_mem_node) { /* it used the whole range */ hold_mem_node->next = func->mem_head; func->mem_head = hold_mem_node; } /* If we have prefetchable memory space available and there * is some left at the end, return the unused portion */ if (temp_resources.p_mem_head) { p_mem_node = do_pre_bridge_resource_split(&(temp_resources.p_mem_head), &hold_p_mem_node, 0x100000); /* Check if we were able to split something off */ if (p_mem_node) { hold_p_mem_node->base = p_mem_node->base + p_mem_node->length; temp_word = (hold_p_mem_node->base) >> 16; rc = pci_bus_write_config_word (pci_bus, devfn, PCI_PREF_MEMORY_BASE, temp_word); return_resource(&(resources->p_mem_head), p_mem_node); } p_mem_node = do_bridge_resource_split(&(temp_resources.p_mem_head), 0x100000); /* Check if we were able to split something off */ if (p_mem_node) { /* First use the temporary node to store * information for the board */ hold_p_mem_node->length = p_mem_node->base - hold_p_mem_node->base; /* If we used any, add it to the board's list */ if (hold_p_mem_node->length) { hold_p_mem_node->next = func->p_mem_head; func->p_mem_head = hold_p_mem_node; temp_word = (p_mem_node->base - 1) >> 16; rc = pci_bus_write_config_word (pci_bus, devfn, PCI_PREF_MEMORY_LIMIT, temp_word); return_resource(&(resources->p_mem_head), p_mem_node); } else { /* it doesn't need any PMem */ temp_word = 0x0000; rc = pci_bus_write_config_word (pci_bus, devfn, PCI_PREF_MEMORY_LIMIT, temp_word); return_resource(&(resources->p_mem_head), p_mem_node); kfree(hold_p_mem_node); } } else { /* it used the most of the range */ hold_p_mem_node->next = func->p_mem_head; func->p_mem_head = hold_p_mem_node; } } else if (hold_p_mem_node) { /* it used the whole range */ hold_p_mem_node->next = func->p_mem_head; func->p_mem_head = hold_p_mem_node; } /* We should be configuring an IRQ and the bridge's base address * registers if it needs them. Although we have never seen such * a device */ /* enable card */ command = 0x0157; /* = PCI_COMMAND_IO | * PCI_COMMAND_MEMORY | * PCI_COMMAND_MASTER | * PCI_COMMAND_INVALIDATE | * PCI_COMMAND_PARITY | * PCI_COMMAND_SERR */ rc = pci_bus_write_config_word (pci_bus, devfn, PCI_COMMAND, command); /* set Bridge Control Register */ command = 0x07; /* = PCI_BRIDGE_CTL_PARITY | * PCI_BRIDGE_CTL_SERR | * PCI_BRIDGE_CTL_NO_ISA */ rc = pci_bus_write_config_word (pci_bus, devfn, PCI_BRIDGE_CONTROL, command); } else if ((temp_byte & 0x7F) == PCI_HEADER_TYPE_NORMAL) { /* Standard device */ rc = pci_bus_read_config_byte (pci_bus, devfn, 0x0B, &class_code); if (class_code == PCI_BASE_CLASS_DISPLAY) { /* Display (video) adapter (not supported) */ return DEVICE_TYPE_NOT_SUPPORTED; } /* Figure out IO and memory needs */ for (cloop = 0x10; cloop <= 0x24; cloop += 4) { temp_register = 0xFFFFFFFF; dbg("CND: bus=%d, devfn=%d, offset=%d\n", pci_bus->number, devfn, cloop); rc = pci_bus_write_config_dword (pci_bus, devfn, cloop, temp_register); rc = pci_bus_read_config_dword (pci_bus, devfn, cloop, &temp_register); dbg("CND: base = 0x%x\n", temp_register); if (temp_register) { /* If this register is implemented */ if ((temp_register & 0x03L) == 0x01) { /* Map IO */ /* set base = amount of IO space */ base = temp_register & 0xFFFFFFFC; base = ~base + 1; dbg("CND: length = 0x%x\n", base); io_node = get_io_resource(&(resources->io_head), base); dbg("Got io_node start = %8.8x, length = %8.8x next (%p)\n", io_node->base, io_node->length, io_node->next); dbg("func (%p) io_head (%p)\n", func, func->io_head); /* allocate the resource to the board */ if (io_node) { base = io_node->base; io_node->next = func->io_head; func->io_head = io_node; } else return -ENOMEM; } else if ((temp_register & 0x0BL) == 0x08) { /* Map prefetchable memory */ base = temp_register & 0xFFFFFFF0; base = ~base + 1; dbg("CND: length = 0x%x\n", base); p_mem_node = get_resource(&(resources->p_mem_head), base); /* allocate the resource to the board */ if (p_mem_node) { base = p_mem_node->base; p_mem_node->next = func->p_mem_head; func->p_mem_head = p_mem_node; } else return -ENOMEM; } else if ((temp_register & 0x0BL) == 0x00) { /* Map memory */ base = temp_register & 0xFFFFFFF0; base = ~base + 1; dbg("CND: length = 0x%x\n", base); mem_node = get_resource(&(resources->mem_head), base); /* allocate the resource to the board */ if (mem_node) { base = mem_node->base; mem_node->next = func->mem_head; func->mem_head = mem_node; } else return -ENOMEM; } else { /* Reserved bits or requesting space below 1M */ return NOT_ENOUGH_RESOURCES; } rc = pci_bus_write_config_dword(pci_bus, devfn, cloop, base); /* Check for 64-bit base */ if ((temp_register & 0x07L) == 0x04) { cloop += 4; /* Upper 32 bits of address always zero * on today's systems */ /* FIXME this is probably not true on * Alpha and ia64??? */ base = 0; rc = pci_bus_write_config_dword(pci_bus, devfn, cloop, base); } } } /* End of base register loop */ if (cpqhp_legacy_mode) { /* Figure out which interrupt pin this function uses */ rc = pci_bus_read_config_byte (pci_bus, devfn, PCI_INTERRUPT_PIN, &temp_byte); /* If this function needs an interrupt and we are behind * a bridge and the pin is tied to something that's * alread mapped, set this one the same */ if (temp_byte && resources->irqs && (resources->irqs->valid_INT & (0x01 << ((temp_byte + resources->irqs->barber_pole - 1) & 0x03)))) { /* We have to share with something already set up */ IRQ = resources->irqs->interrupt[(temp_byte + resources->irqs->barber_pole - 1) & 0x03]; } else { /* Program IRQ based on card type */ rc = pci_bus_read_config_byte (pci_bus, devfn, 0x0B, &class_code); if (class_code == PCI_BASE_CLASS_STORAGE) IRQ = cpqhp_disk_irq; else IRQ = cpqhp_nic_irq; } /* IRQ Line */ rc = pci_bus_write_config_byte (pci_bus, devfn, PCI_INTERRUPT_LINE, IRQ); } if (!behind_bridge) { rc = cpqhp_set_irq(func->bus, func->device, temp_byte, IRQ); if (rc) return 1; } else { /* TBD - this code may also belong in the other clause * of this If statement */ resources->irqs->interrupt[(temp_byte + resources->irqs->barber_pole - 1) & 0x03] = IRQ; resources->irqs->valid_INT |= 0x01 << (temp_byte + resources->irqs->barber_pole - 1) & 0x03; } /* Latency Timer */ temp_byte = 0x40; rc = pci_bus_write_config_byte(pci_bus, devfn, PCI_LATENCY_TIMER, temp_byte); /* Cache Line size */ temp_byte = 0x08; rc = pci_bus_write_config_byte(pci_bus, devfn, PCI_CACHE_LINE_SIZE, temp_byte); /* disable ROM base Address */ temp_dword = 0x00L; rc = pci_bus_write_config_word(pci_bus, devfn, PCI_ROM_ADDRESS, temp_dword); /* enable card */ temp_word = 0x0157; /* = PCI_COMMAND_IO | * PCI_COMMAND_MEMORY | * PCI_COMMAND_MASTER | * PCI_COMMAND_INVALIDATE | * PCI_COMMAND_PARITY | * PCI_COMMAND_SERR */ rc = pci_bus_write_config_word (pci_bus, devfn, PCI_COMMAND, temp_word); } else { /* End of Not-A-Bridge else */ /* It's some strange type of PCI adapter (Cardbus?) */ return DEVICE_TYPE_NOT_SUPPORTED; } func->configured = 1; return 0; free_and_out: cpqhp_destroy_resource_list (&temp_resources); return_resource(&(resources-> bus_head), hold_bus_node); return_resource(&(resources-> io_head), hold_IO_node); return_resource(&(resources-> mem_head), hold_mem_node); return_resource(&(resources-> p_mem_head), hold_p_mem_node); return rc; }
gpl-2.0
sleshepic/G920T_OI1_kernel
fs/ubifs/lprops.c
2543
36765
/* * This file is part of UBIFS. * * Copyright (C) 2006-2008 Nokia Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 51 * Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * * Authors: Adrian Hunter * Artem Bityutskiy (Битюцкий Артём) */ /* * This file implements the functions that access LEB properties and their * categories. LEBs are categorized based on the needs of UBIFS, and the * categories are stored as either heaps or lists to provide a fast way of * finding a LEB in a particular category. For example, UBIFS may need to find * an empty LEB for the journal, or a very dirty LEB for garbage collection. */ #include "ubifs.h" /** * get_heap_comp_val - get the LEB properties value for heap comparisons. * @lprops: LEB properties * @cat: LEB category */ static int get_heap_comp_val(struct ubifs_lprops *lprops, int cat) { switch (cat) { case LPROPS_FREE: return lprops->free; case LPROPS_DIRTY_IDX: return lprops->free + lprops->dirty; default: return lprops->dirty; } } /** * move_up_lpt_heap - move a new heap entry up as far as possible. * @c: UBIFS file-system description object * @heap: LEB category heap * @lprops: LEB properties to move * @cat: LEB category * * New entries to a heap are added at the bottom and then moved up until the * parent's value is greater. In the case of LPT's category heaps, the value * is either the amount of free space or the amount of dirty space, depending * on the category. */ static void move_up_lpt_heap(struct ubifs_info *c, struct ubifs_lpt_heap *heap, struct ubifs_lprops *lprops, int cat) { int val1, val2, hpos; hpos = lprops->hpos; if (!hpos) return; /* Already top of the heap */ val1 = get_heap_comp_val(lprops, cat); /* Compare to parent and, if greater, move up the heap */ do { int ppos = (hpos - 1) / 2; val2 = get_heap_comp_val(heap->arr[ppos], cat); if (val2 >= val1) return; /* Greater than parent so move up */ heap->arr[ppos]->hpos = hpos; heap->arr[hpos] = heap->arr[ppos]; heap->arr[ppos] = lprops; lprops->hpos = ppos; hpos = ppos; } while (hpos); } /** * adjust_lpt_heap - move a changed heap entry up or down the heap. * @c: UBIFS file-system description object * @heap: LEB category heap * @lprops: LEB properties to move * @hpos: heap position of @lprops * @cat: LEB category * * Changed entries in a heap are moved up or down until the parent's value is * greater. In the case of LPT's category heaps, the value is either the amount * of free space or the amount of dirty space, depending on the category. */ static void adjust_lpt_heap(struct ubifs_info *c, struct ubifs_lpt_heap *heap, struct ubifs_lprops *lprops, int hpos, int cat) { int val1, val2, val3, cpos; val1 = get_heap_comp_val(lprops, cat); /* Compare to parent and, if greater than parent, move up the heap */ if (hpos) { int ppos = (hpos - 1) / 2; val2 = get_heap_comp_val(heap->arr[ppos], cat); if (val1 > val2) { /* Greater than parent so move up */ while (1) { heap->arr[ppos]->hpos = hpos; heap->arr[hpos] = heap->arr[ppos]; heap->arr[ppos] = lprops; lprops->hpos = ppos; hpos = ppos; if (!hpos) return; ppos = (hpos - 1) / 2; val2 = get_heap_comp_val(heap->arr[ppos], cat); if (val1 <= val2) return; /* Still greater than parent so keep going */ } } } /* Not greater than parent, so compare to children */ while (1) { /* Compare to left child */ cpos = hpos * 2 + 1; if (cpos >= heap->cnt) return; val2 = get_heap_comp_val(heap->arr[cpos], cat); if (val1 < val2) { /* Less than left child, so promote biggest child */ if (cpos + 1 < heap->cnt) { val3 = get_heap_comp_val(heap->arr[cpos + 1], cat); if (val3 > val2) cpos += 1; /* Right child is bigger */ } heap->arr[cpos]->hpos = hpos; heap->arr[hpos] = heap->arr[cpos]; heap->arr[cpos] = lprops; lprops->hpos = cpos; hpos = cpos; continue; } /* Compare to right child */ cpos += 1; if (cpos >= heap->cnt) return; val3 = get_heap_comp_val(heap->arr[cpos], cat); if (val1 < val3) { /* Less than right child, so promote right child */ heap->arr[cpos]->hpos = hpos; heap->arr[hpos] = heap->arr[cpos]; heap->arr[cpos] = lprops; lprops->hpos = cpos; hpos = cpos; continue; } return; } } /** * add_to_lpt_heap - add LEB properties to a LEB category heap. * @c: UBIFS file-system description object * @lprops: LEB properties to add * @cat: LEB category * * This function returns %1 if @lprops is added to the heap for LEB category * @cat, otherwise %0 is returned because the heap is full. */ static int add_to_lpt_heap(struct ubifs_info *c, struct ubifs_lprops *lprops, int cat) { struct ubifs_lpt_heap *heap = &c->lpt_heap[cat - 1]; if (heap->cnt >= heap->max_cnt) { const int b = LPT_HEAP_SZ / 2 - 1; int cpos, val1, val2; /* Compare to some other LEB on the bottom of heap */ /* Pick a position kind of randomly */ cpos = (((size_t)lprops >> 4) & b) + b; ubifs_assert(cpos >= b); ubifs_assert(cpos < LPT_HEAP_SZ); ubifs_assert(cpos < heap->cnt); val1 = get_heap_comp_val(lprops, cat); val2 = get_heap_comp_val(heap->arr[cpos], cat); if (val1 > val2) { struct ubifs_lprops *lp; lp = heap->arr[cpos]; lp->flags &= ~LPROPS_CAT_MASK; lp->flags |= LPROPS_UNCAT; list_add(&lp->list, &c->uncat_list); lprops->hpos = cpos; heap->arr[cpos] = lprops; move_up_lpt_heap(c, heap, lprops, cat); dbg_check_heap(c, heap, cat, lprops->hpos); return 1; /* Added to heap */ } dbg_check_heap(c, heap, cat, -1); return 0; /* Not added to heap */ } else { lprops->hpos = heap->cnt++; heap->arr[lprops->hpos] = lprops; move_up_lpt_heap(c, heap, lprops, cat); dbg_check_heap(c, heap, cat, lprops->hpos); return 1; /* Added to heap */ } } /** * remove_from_lpt_heap - remove LEB properties from a LEB category heap. * @c: UBIFS file-system description object * @lprops: LEB properties to remove * @cat: LEB category */ static void remove_from_lpt_heap(struct ubifs_info *c, struct ubifs_lprops *lprops, int cat) { struct ubifs_lpt_heap *heap; int hpos = lprops->hpos; heap = &c->lpt_heap[cat - 1]; ubifs_assert(hpos >= 0 && hpos < heap->cnt); ubifs_assert(heap->arr[hpos] == lprops); heap->cnt -= 1; if (hpos < heap->cnt) { heap->arr[hpos] = heap->arr[heap->cnt]; heap->arr[hpos]->hpos = hpos; adjust_lpt_heap(c, heap, heap->arr[hpos], hpos, cat); } dbg_check_heap(c, heap, cat, -1); } /** * lpt_heap_replace - replace lprops in a category heap. * @c: UBIFS file-system description object * @old_lprops: LEB properties to replace * @new_lprops: LEB properties with which to replace * @cat: LEB category * * During commit it is sometimes necessary to copy a pnode (see dirty_cow_pnode) * and the lprops that the pnode contains. When that happens, references in * the category heaps to those lprops must be updated to point to the new * lprops. This function does that. */ static void lpt_heap_replace(struct ubifs_info *c, struct ubifs_lprops *old_lprops, struct ubifs_lprops *new_lprops, int cat) { struct ubifs_lpt_heap *heap; int hpos = new_lprops->hpos; heap = &c->lpt_heap[cat - 1]; heap->arr[hpos] = new_lprops; } /** * ubifs_add_to_cat - add LEB properties to a category list or heap. * @c: UBIFS file-system description object * @lprops: LEB properties to add * @cat: LEB category to which to add * * LEB properties are categorized to enable fast find operations. */ void ubifs_add_to_cat(struct ubifs_info *c, struct ubifs_lprops *lprops, int cat) { switch (cat) { case LPROPS_DIRTY: case LPROPS_DIRTY_IDX: case LPROPS_FREE: if (add_to_lpt_heap(c, lprops, cat)) break; /* No more room on heap so make it un-categorized */ cat = LPROPS_UNCAT; /* Fall through */ case LPROPS_UNCAT: list_add(&lprops->list, &c->uncat_list); break; case LPROPS_EMPTY: list_add(&lprops->list, &c->empty_list); break; case LPROPS_FREEABLE: list_add(&lprops->list, &c->freeable_list); c->freeable_cnt += 1; break; case LPROPS_FRDI_IDX: list_add(&lprops->list, &c->frdi_idx_list); break; default: ubifs_assert(0); } lprops->flags &= ~LPROPS_CAT_MASK; lprops->flags |= cat; c->in_a_category_cnt += 1; ubifs_assert(c->in_a_category_cnt <= c->main_lebs); } /** * ubifs_remove_from_cat - remove LEB properties from a category list or heap. * @c: UBIFS file-system description object * @lprops: LEB properties to remove * @cat: LEB category from which to remove * * LEB properties are categorized to enable fast find operations. */ static void ubifs_remove_from_cat(struct ubifs_info *c, struct ubifs_lprops *lprops, int cat) { switch (cat) { case LPROPS_DIRTY: case LPROPS_DIRTY_IDX: case LPROPS_FREE: remove_from_lpt_heap(c, lprops, cat); break; case LPROPS_FREEABLE: c->freeable_cnt -= 1; ubifs_assert(c->freeable_cnt >= 0); /* Fall through */ case LPROPS_UNCAT: case LPROPS_EMPTY: case LPROPS_FRDI_IDX: ubifs_assert(!list_empty(&lprops->list)); list_del(&lprops->list); break; default: ubifs_assert(0); } c->in_a_category_cnt -= 1; ubifs_assert(c->in_a_category_cnt >= 0); } /** * ubifs_replace_cat - replace lprops in a category list or heap. * @c: UBIFS file-system description object * @old_lprops: LEB properties to replace * @new_lprops: LEB properties with which to replace * * During commit it is sometimes necessary to copy a pnode (see dirty_cow_pnode) * and the lprops that the pnode contains. When that happens, references in * category lists and heaps must be replaced. This function does that. */ void ubifs_replace_cat(struct ubifs_info *c, struct ubifs_lprops *old_lprops, struct ubifs_lprops *new_lprops) { int cat; cat = new_lprops->flags & LPROPS_CAT_MASK; switch (cat) { case LPROPS_DIRTY: case LPROPS_DIRTY_IDX: case LPROPS_FREE: lpt_heap_replace(c, old_lprops, new_lprops, cat); break; case LPROPS_UNCAT: case LPROPS_EMPTY: case LPROPS_FREEABLE: case LPROPS_FRDI_IDX: list_replace(&old_lprops->list, &new_lprops->list); break; default: ubifs_assert(0); } } /** * ubifs_ensure_cat - ensure LEB properties are categorized. * @c: UBIFS file-system description object * @lprops: LEB properties * * A LEB may have fallen off of the bottom of a heap, and ended up as * un-categorized even though it has enough space for us now. If that is the * case this function will put the LEB back onto a heap. */ void ubifs_ensure_cat(struct ubifs_info *c, struct ubifs_lprops *lprops) { int cat = lprops->flags & LPROPS_CAT_MASK; if (cat != LPROPS_UNCAT) return; cat = ubifs_categorize_lprops(c, lprops); if (cat == LPROPS_UNCAT) return; ubifs_remove_from_cat(c, lprops, LPROPS_UNCAT); ubifs_add_to_cat(c, lprops, cat); } /** * ubifs_categorize_lprops - categorize LEB properties. * @c: UBIFS file-system description object * @lprops: LEB properties to categorize * * LEB properties are categorized to enable fast find operations. This function * returns the LEB category to which the LEB properties belong. Note however * that if the LEB category is stored as a heap and the heap is full, the * LEB properties may have their category changed to %LPROPS_UNCAT. */ int ubifs_categorize_lprops(const struct ubifs_info *c, const struct ubifs_lprops *lprops) { if (lprops->flags & LPROPS_TAKEN) return LPROPS_UNCAT; if (lprops->free == c->leb_size) { ubifs_assert(!(lprops->flags & LPROPS_INDEX)); return LPROPS_EMPTY; } if (lprops->free + lprops->dirty == c->leb_size) { if (lprops->flags & LPROPS_INDEX) return LPROPS_FRDI_IDX; else return LPROPS_FREEABLE; } if (lprops->flags & LPROPS_INDEX) { if (lprops->dirty + lprops->free >= c->min_idx_node_sz) return LPROPS_DIRTY_IDX; } else { if (lprops->dirty >= c->dead_wm && lprops->dirty > lprops->free) return LPROPS_DIRTY; if (lprops->free > 0) return LPROPS_FREE; } return LPROPS_UNCAT; } /** * change_category - change LEB properties category. * @c: UBIFS file-system description object * @lprops: LEB properties to re-categorize * * LEB properties are categorized to enable fast find operations. When the LEB * properties change they must be re-categorized. */ static void change_category(struct ubifs_info *c, struct ubifs_lprops *lprops) { int old_cat = lprops->flags & LPROPS_CAT_MASK; int new_cat = ubifs_categorize_lprops(c, lprops); if (old_cat == new_cat) { struct ubifs_lpt_heap *heap; /* lprops on a heap now must be moved up or down */ if (new_cat < 1 || new_cat > LPROPS_HEAP_CNT) return; /* Not on a heap */ heap = &c->lpt_heap[new_cat - 1]; adjust_lpt_heap(c, heap, lprops, lprops->hpos, new_cat); } else { ubifs_remove_from_cat(c, lprops, old_cat); ubifs_add_to_cat(c, lprops, new_cat); } } /** * ubifs_calc_dark - calculate LEB dark space size. * @c: the UBIFS file-system description object * @spc: amount of free and dirty space in the LEB * * This function calculates and returns amount of dark space in an LEB which * has @spc bytes of free and dirty space. * * UBIFS is trying to account the space which might not be usable, and this * space is called "dark space". For example, if an LEB has only %512 free * bytes, it is dark space, because it cannot fit a large data node. */ int ubifs_calc_dark(const struct ubifs_info *c, int spc) { ubifs_assert(!(spc & 7)); if (spc < c->dark_wm) return spc; /* * If we have slightly more space then the dark space watermark, we can * anyway safely assume it we'll be able to write a node of the * smallest size there. */ if (spc - c->dark_wm < MIN_WRITE_SZ) return spc - MIN_WRITE_SZ; return c->dark_wm; } /** * is_lprops_dirty - determine if LEB properties are dirty. * @c: the UBIFS file-system description object * @lprops: LEB properties to test */ static int is_lprops_dirty(struct ubifs_info *c, struct ubifs_lprops *lprops) { struct ubifs_pnode *pnode; int pos; pos = (lprops->lnum - c->main_first) & (UBIFS_LPT_FANOUT - 1); pnode = (struct ubifs_pnode *)container_of(lprops - pos, struct ubifs_pnode, lprops[0]); return !test_bit(COW_CNODE, &pnode->flags) && test_bit(DIRTY_CNODE, &pnode->flags); } /** * ubifs_change_lp - change LEB properties. * @c: the UBIFS file-system description object * @lp: LEB properties to change * @free: new free space amount * @dirty: new dirty space amount * @flags: new flags * @idx_gc_cnt: change to the count of @idx_gc list * * This function changes LEB properties (@free, @dirty or @flag). However, the * property which has the %LPROPS_NC value is not changed. Returns a pointer to * the updated LEB properties on success and a negative error code on failure. * * Note, the LEB properties may have had to be copied (due to COW) and * consequently the pointer returned may not be the same as the pointer * passed. */ const struct ubifs_lprops *ubifs_change_lp(struct ubifs_info *c, const struct ubifs_lprops *lp, int free, int dirty, int flags, int idx_gc_cnt) { /* * This is the only function that is allowed to change lprops, so we * discard the "const" qualifier. */ struct ubifs_lprops *lprops = (struct ubifs_lprops *)lp; dbg_lp("LEB %d, free %d, dirty %d, flags %d", lprops->lnum, free, dirty, flags); ubifs_assert(mutex_is_locked(&c->lp_mutex)); ubifs_assert(c->lst.empty_lebs >= 0 && c->lst.empty_lebs <= c->main_lebs); ubifs_assert(c->freeable_cnt >= 0); ubifs_assert(c->freeable_cnt <= c->main_lebs); ubifs_assert(c->lst.taken_empty_lebs >= 0); ubifs_assert(c->lst.taken_empty_lebs <= c->lst.empty_lebs); ubifs_assert(!(c->lst.total_free & 7) && !(c->lst.total_dirty & 7)); ubifs_assert(!(c->lst.total_dead & 7) && !(c->lst.total_dark & 7)); ubifs_assert(!(c->lst.total_used & 7)); ubifs_assert(free == LPROPS_NC || free >= 0); ubifs_assert(dirty == LPROPS_NC || dirty >= 0); if (!is_lprops_dirty(c, lprops)) { lprops = ubifs_lpt_lookup_dirty(c, lprops->lnum); if (IS_ERR(lprops)) return lprops; } else ubifs_assert(lprops == ubifs_lpt_lookup_dirty(c, lprops->lnum)); ubifs_assert(!(lprops->free & 7) && !(lprops->dirty & 7)); spin_lock(&c->space_lock); if ((lprops->flags & LPROPS_TAKEN) && lprops->free == c->leb_size) c->lst.taken_empty_lebs -= 1; if (!(lprops->flags & LPROPS_INDEX)) { int old_spc; old_spc = lprops->free + lprops->dirty; if (old_spc < c->dead_wm) c->lst.total_dead -= old_spc; else c->lst.total_dark -= ubifs_calc_dark(c, old_spc); c->lst.total_used -= c->leb_size - old_spc; } if (free != LPROPS_NC) { free = ALIGN(free, 8); c->lst.total_free += free - lprops->free; /* Increase or decrease empty LEBs counter if needed */ if (free == c->leb_size) { if (lprops->free != c->leb_size) c->lst.empty_lebs += 1; } else if (lprops->free == c->leb_size) c->lst.empty_lebs -= 1; lprops->free = free; } if (dirty != LPROPS_NC) { dirty = ALIGN(dirty, 8); c->lst.total_dirty += dirty - lprops->dirty; lprops->dirty = dirty; } if (flags != LPROPS_NC) { /* Take care about indexing LEBs counter if needed */ if ((lprops->flags & LPROPS_INDEX)) { if (!(flags & LPROPS_INDEX)) c->lst.idx_lebs -= 1; } else if (flags & LPROPS_INDEX) c->lst.idx_lebs += 1; lprops->flags = flags; } if (!(lprops->flags & LPROPS_INDEX)) { int new_spc; new_spc = lprops->free + lprops->dirty; if (new_spc < c->dead_wm) c->lst.total_dead += new_spc; else c->lst.total_dark += ubifs_calc_dark(c, new_spc); c->lst.total_used += c->leb_size - new_spc; } if ((lprops->flags & LPROPS_TAKEN) && lprops->free == c->leb_size) c->lst.taken_empty_lebs += 1; change_category(c, lprops); c->idx_gc_cnt += idx_gc_cnt; spin_unlock(&c->space_lock); return lprops; } /** * ubifs_get_lp_stats - get lprops statistics. * @c: UBIFS file-system description object * @st: return statistics */ void ubifs_get_lp_stats(struct ubifs_info *c, struct ubifs_lp_stats *lst) { spin_lock(&c->space_lock); memcpy(lst, &c->lst, sizeof(struct ubifs_lp_stats)); spin_unlock(&c->space_lock); } /** * ubifs_change_one_lp - change LEB properties. * @c: the UBIFS file-system description object * @lnum: LEB to change properties for * @free: amount of free space * @dirty: amount of dirty space * @flags_set: flags to set * @flags_clean: flags to clean * @idx_gc_cnt: change to the count of idx_gc list * * This function changes properties of LEB @lnum. It is a helper wrapper over * 'ubifs_change_lp()' which hides lprops get/release. The arguments are the * same as in case of 'ubifs_change_lp()'. Returns zero in case of success and * a negative error code in case of failure. */ int ubifs_change_one_lp(struct ubifs_info *c, int lnum, int free, int dirty, int flags_set, int flags_clean, int idx_gc_cnt) { int err = 0, flags; const struct ubifs_lprops *lp; ubifs_get_lprops(c); lp = ubifs_lpt_lookup_dirty(c, lnum); if (IS_ERR(lp)) { err = PTR_ERR(lp); goto out; } flags = (lp->flags | flags_set) & ~flags_clean; lp = ubifs_change_lp(c, lp, free, dirty, flags, idx_gc_cnt); if (IS_ERR(lp)) err = PTR_ERR(lp); out: ubifs_release_lprops(c); if (err) ubifs_err("cannot change properties of LEB %d, error %d", lnum, err); return err; } /** * ubifs_update_one_lp - update LEB properties. * @c: the UBIFS file-system description object * @lnum: LEB to change properties for * @free: amount of free space * @dirty: amount of dirty space to add * @flags_set: flags to set * @flags_clean: flags to clean * * This function is the same as 'ubifs_change_one_lp()' but @dirty is added to * current dirty space, not substitutes it. */ int ubifs_update_one_lp(struct ubifs_info *c, int lnum, int free, int dirty, int flags_set, int flags_clean) { int err = 0, flags; const struct ubifs_lprops *lp; ubifs_get_lprops(c); lp = ubifs_lpt_lookup_dirty(c, lnum); if (IS_ERR(lp)) { err = PTR_ERR(lp); goto out; } flags = (lp->flags | flags_set) & ~flags_clean; lp = ubifs_change_lp(c, lp, free, lp->dirty + dirty, flags, 0); if (IS_ERR(lp)) err = PTR_ERR(lp); out: ubifs_release_lprops(c); if (err) ubifs_err("cannot update properties of LEB %d, error %d", lnum, err); return err; } /** * ubifs_read_one_lp - read LEB properties. * @c: the UBIFS file-system description object * @lnum: LEB to read properties for * @lp: where to store read properties * * This helper function reads properties of a LEB @lnum and stores them in @lp. * Returns zero in case of success and a negative error code in case of * failure. */ int ubifs_read_one_lp(struct ubifs_info *c, int lnum, struct ubifs_lprops *lp) { int err = 0; const struct ubifs_lprops *lpp; ubifs_get_lprops(c); lpp = ubifs_lpt_lookup(c, lnum); if (IS_ERR(lpp)) { err = PTR_ERR(lpp); ubifs_err("cannot read properties of LEB %d, error %d", lnum, err); goto out; } memcpy(lp, lpp, sizeof(struct ubifs_lprops)); out: ubifs_release_lprops(c); return err; } /** * ubifs_fast_find_free - try to find a LEB with free space quickly. * @c: the UBIFS file-system description object * * This function returns LEB properties for a LEB with free space or %NULL if * the function is unable to find a LEB quickly. */ const struct ubifs_lprops *ubifs_fast_find_free(struct ubifs_info *c) { struct ubifs_lprops *lprops; struct ubifs_lpt_heap *heap; ubifs_assert(mutex_is_locked(&c->lp_mutex)); heap = &c->lpt_heap[LPROPS_FREE - 1]; if (heap->cnt == 0) return NULL; lprops = heap->arr[0]; ubifs_assert(!(lprops->flags & LPROPS_TAKEN)); ubifs_assert(!(lprops->flags & LPROPS_INDEX)); return lprops; } /** * ubifs_fast_find_empty - try to find an empty LEB quickly. * @c: the UBIFS file-system description object * * This function returns LEB properties for an empty LEB or %NULL if the * function is unable to find an empty LEB quickly. */ const struct ubifs_lprops *ubifs_fast_find_empty(struct ubifs_info *c) { struct ubifs_lprops *lprops; ubifs_assert(mutex_is_locked(&c->lp_mutex)); if (list_empty(&c->empty_list)) return NULL; lprops = list_entry(c->empty_list.next, struct ubifs_lprops, list); ubifs_assert(!(lprops->flags & LPROPS_TAKEN)); ubifs_assert(!(lprops->flags & LPROPS_INDEX)); ubifs_assert(lprops->free == c->leb_size); return lprops; } /** * ubifs_fast_find_freeable - try to find a freeable LEB quickly. * @c: the UBIFS file-system description object * * This function returns LEB properties for a freeable LEB or %NULL if the * function is unable to find a freeable LEB quickly. */ const struct ubifs_lprops *ubifs_fast_find_freeable(struct ubifs_info *c) { struct ubifs_lprops *lprops; ubifs_assert(mutex_is_locked(&c->lp_mutex)); if (list_empty(&c->freeable_list)) return NULL; lprops = list_entry(c->freeable_list.next, struct ubifs_lprops, list); ubifs_assert(!(lprops->flags & LPROPS_TAKEN)); ubifs_assert(!(lprops->flags & LPROPS_INDEX)); ubifs_assert(lprops->free + lprops->dirty == c->leb_size); ubifs_assert(c->freeable_cnt > 0); return lprops; } /** * ubifs_fast_find_frdi_idx - try to find a freeable index LEB quickly. * @c: the UBIFS file-system description object * * This function returns LEB properties for a freeable index LEB or %NULL if the * function is unable to find a freeable index LEB quickly. */ const struct ubifs_lprops *ubifs_fast_find_frdi_idx(struct ubifs_info *c) { struct ubifs_lprops *lprops; ubifs_assert(mutex_is_locked(&c->lp_mutex)); if (list_empty(&c->frdi_idx_list)) return NULL; lprops = list_entry(c->frdi_idx_list.next, struct ubifs_lprops, list); ubifs_assert(!(lprops->flags & LPROPS_TAKEN)); ubifs_assert((lprops->flags & LPROPS_INDEX)); ubifs_assert(lprops->free + lprops->dirty == c->leb_size); return lprops; } /* * Everything below is related to debugging. */ /** * dbg_check_cats - check category heaps and lists. * @c: UBIFS file-system description object * * This function returns %0 on success and a negative error code on failure. */ int dbg_check_cats(struct ubifs_info *c) { struct ubifs_lprops *lprops; struct list_head *pos; int i, cat; if (!dbg_is_chk_gen(c) && !dbg_is_chk_lprops(c)) return 0; list_for_each_entry(lprops, &c->empty_list, list) { if (lprops->free != c->leb_size) { ubifs_err("non-empty LEB %d on empty list (free %d dirty %d flags %d)", lprops->lnum, lprops->free, lprops->dirty, lprops->flags); return -EINVAL; } if (lprops->flags & LPROPS_TAKEN) { ubifs_err("taken LEB %d on empty list (free %d dirty %d flags %d)", lprops->lnum, lprops->free, lprops->dirty, lprops->flags); return -EINVAL; } } i = 0; list_for_each_entry(lprops, &c->freeable_list, list) { if (lprops->free + lprops->dirty != c->leb_size) { ubifs_err("non-freeable LEB %d on freeable list (free %d dirty %d flags %d)", lprops->lnum, lprops->free, lprops->dirty, lprops->flags); return -EINVAL; } if (lprops->flags & LPROPS_TAKEN) { ubifs_err("taken LEB %d on freeable list (free %d dirty %d flags %d)", lprops->lnum, lprops->free, lprops->dirty, lprops->flags); return -EINVAL; } i += 1; } if (i != c->freeable_cnt) { ubifs_err("freeable list count %d expected %d", i, c->freeable_cnt); return -EINVAL; } i = 0; list_for_each(pos, &c->idx_gc) i += 1; if (i != c->idx_gc_cnt) { ubifs_err("idx_gc list count %d expected %d", i, c->idx_gc_cnt); return -EINVAL; } list_for_each_entry(lprops, &c->frdi_idx_list, list) { if (lprops->free + lprops->dirty != c->leb_size) { ubifs_err("non-freeable LEB %d on frdi_idx list (free %d dirty %d flags %d)", lprops->lnum, lprops->free, lprops->dirty, lprops->flags); return -EINVAL; } if (lprops->flags & LPROPS_TAKEN) { ubifs_err("taken LEB %d on frdi_idx list (free %d dirty %d flags %d)", lprops->lnum, lprops->free, lprops->dirty, lprops->flags); return -EINVAL; } if (!(lprops->flags & LPROPS_INDEX)) { ubifs_err("non-index LEB %d on frdi_idx list (free %d dirty %d flags %d)", lprops->lnum, lprops->free, lprops->dirty, lprops->flags); return -EINVAL; } } for (cat = 1; cat <= LPROPS_HEAP_CNT; cat++) { struct ubifs_lpt_heap *heap = &c->lpt_heap[cat - 1]; for (i = 0; i < heap->cnt; i++) { lprops = heap->arr[i]; if (!lprops) { ubifs_err("null ptr in LPT heap cat %d", cat); return -EINVAL; } if (lprops->hpos != i) { ubifs_err("bad ptr in LPT heap cat %d", cat); return -EINVAL; } if (lprops->flags & LPROPS_TAKEN) { ubifs_err("taken LEB in LPT heap cat %d", cat); return -EINVAL; } } } return 0; } void dbg_check_heap(struct ubifs_info *c, struct ubifs_lpt_heap *heap, int cat, int add_pos) { int i = 0, j, err = 0; if (!dbg_is_chk_gen(c) && !dbg_is_chk_lprops(c)) return; for (i = 0; i < heap->cnt; i++) { struct ubifs_lprops *lprops = heap->arr[i]; struct ubifs_lprops *lp; if (i != add_pos) if ((lprops->flags & LPROPS_CAT_MASK) != cat) { err = 1; goto out; } if (lprops->hpos != i) { err = 2; goto out; } lp = ubifs_lpt_lookup(c, lprops->lnum); if (IS_ERR(lp)) { err = 3; goto out; } if (lprops != lp) { ubifs_err("lprops %zx lp %zx lprops->lnum %d lp->lnum %d", (size_t)lprops, (size_t)lp, lprops->lnum, lp->lnum); err = 4; goto out; } for (j = 0; j < i; j++) { lp = heap->arr[j]; if (lp == lprops) { err = 5; goto out; } if (lp->lnum == lprops->lnum) { err = 6; goto out; } } } out: if (err) { ubifs_err("failed cat %d hpos %d err %d", cat, i, err); dump_stack(); ubifs_dump_heap(c, heap, cat); } } /** * scan_check_cb - scan callback. * @c: the UBIFS file-system description object * @lp: LEB properties to scan * @in_tree: whether the LEB properties are in main memory * @lst: lprops statistics to update * * This function returns a code that indicates whether the scan should continue * (%LPT_SCAN_CONTINUE), whether the LEB properties should be added to the tree * in main memory (%LPT_SCAN_ADD), or whether the scan should stop * (%LPT_SCAN_STOP). */ static int scan_check_cb(struct ubifs_info *c, const struct ubifs_lprops *lp, int in_tree, struct ubifs_lp_stats *lst) { struct ubifs_scan_leb *sleb; struct ubifs_scan_node *snod; int cat, lnum = lp->lnum, is_idx = 0, used = 0, free, dirty, ret; void *buf = NULL; cat = lp->flags & LPROPS_CAT_MASK; if (cat != LPROPS_UNCAT) { cat = ubifs_categorize_lprops(c, lp); if (cat != (lp->flags & LPROPS_CAT_MASK)) { ubifs_err("bad LEB category %d expected %d", (lp->flags & LPROPS_CAT_MASK), cat); return -EINVAL; } } /* Check lp is on its category list (if it has one) */ if (in_tree) { struct list_head *list = NULL; switch (cat) { case LPROPS_EMPTY: list = &c->empty_list; break; case LPROPS_FREEABLE: list = &c->freeable_list; break; case LPROPS_FRDI_IDX: list = &c->frdi_idx_list; break; case LPROPS_UNCAT: list = &c->uncat_list; break; } if (list) { struct ubifs_lprops *lprops; int found = 0; list_for_each_entry(lprops, list, list) { if (lprops == lp) { found = 1; break; } } if (!found) { ubifs_err("bad LPT list (category %d)", cat); return -EINVAL; } } } /* Check lp is on its category heap (if it has one) */ if (in_tree && cat > 0 && cat <= LPROPS_HEAP_CNT) { struct ubifs_lpt_heap *heap = &c->lpt_heap[cat - 1]; if ((lp->hpos != -1 && heap->arr[lp->hpos]->lnum != lnum) || lp != heap->arr[lp->hpos]) { ubifs_err("bad LPT heap (category %d)", cat); return -EINVAL; } } buf = __vmalloc(c->leb_size, GFP_NOFS, PAGE_KERNEL); if (!buf) return -ENOMEM; /* * After an unclean unmount, empty and freeable LEBs * may contain garbage - do not scan them. */ if (lp->free == c->leb_size) { lst->empty_lebs += 1; lst->total_free += c->leb_size; lst->total_dark += ubifs_calc_dark(c, c->leb_size); return LPT_SCAN_CONTINUE; } if (lp->free + lp->dirty == c->leb_size && !(lp->flags & LPROPS_INDEX)) { lst->total_free += lp->free; lst->total_dirty += lp->dirty; lst->total_dark += ubifs_calc_dark(c, c->leb_size); return LPT_SCAN_CONTINUE; } sleb = ubifs_scan(c, lnum, 0, buf, 0); if (IS_ERR(sleb)) { ret = PTR_ERR(sleb); if (ret == -EUCLEAN) { ubifs_dump_lprops(c); ubifs_dump_budg(c, &c->bi); } goto out; } is_idx = -1; list_for_each_entry(snod, &sleb->nodes, list) { int found, level = 0; cond_resched(); if (is_idx == -1) is_idx = (snod->type == UBIFS_IDX_NODE) ? 1 : 0; if (is_idx && snod->type != UBIFS_IDX_NODE) { ubifs_err("indexing node in data LEB %d:%d", lnum, snod->offs); goto out_destroy; } if (snod->type == UBIFS_IDX_NODE) { struct ubifs_idx_node *idx = snod->node; key_read(c, ubifs_idx_key(c, idx), &snod->key); level = le16_to_cpu(idx->level); } found = ubifs_tnc_has_node(c, &snod->key, level, lnum, snod->offs, is_idx); if (found) { if (found < 0) goto out_destroy; used += ALIGN(snod->len, 8); } } free = c->leb_size - sleb->endpt; dirty = sleb->endpt - used; if (free > c->leb_size || free < 0 || dirty > c->leb_size || dirty < 0) { ubifs_err("bad calculated accounting for LEB %d: free %d, dirty %d", lnum, free, dirty); goto out_destroy; } if (lp->free + lp->dirty == c->leb_size && free + dirty == c->leb_size) if ((is_idx && !(lp->flags & LPROPS_INDEX)) || (!is_idx && free == c->leb_size) || lp->free == c->leb_size) { /* * Empty or freeable LEBs could contain index * nodes from an uncompleted commit due to an * unclean unmount. Or they could be empty for * the same reason. Or it may simply not have been * unmapped. */ free = lp->free; dirty = lp->dirty; is_idx = 0; } if (is_idx && lp->free + lp->dirty == free + dirty && lnum != c->ihead_lnum) { /* * After an unclean unmount, an index LEB could have a different * amount of free space than the value recorded by lprops. That * is because the in-the-gaps method may use free space or * create free space (as a side-effect of using ubi_leb_change * and not writing the whole LEB). The incorrect free space * value is not a problem because the index is only ever * allocated empty LEBs, so there will never be an attempt to * write to the free space at the end of an index LEB - except * by the in-the-gaps method for which it is not a problem. */ free = lp->free; dirty = lp->dirty; } if (lp->free != free || lp->dirty != dirty) goto out_print; if (is_idx && !(lp->flags & LPROPS_INDEX)) { if (free == c->leb_size) /* Free but not unmapped LEB, it's fine */ is_idx = 0; else { ubifs_err("indexing node without indexing flag"); goto out_print; } } if (!is_idx && (lp->flags & LPROPS_INDEX)) { ubifs_err("data node with indexing flag"); goto out_print; } if (free == c->leb_size) lst->empty_lebs += 1; if (is_idx) lst->idx_lebs += 1; if (!(lp->flags & LPROPS_INDEX)) lst->total_used += c->leb_size - free - dirty; lst->total_free += free; lst->total_dirty += dirty; if (!(lp->flags & LPROPS_INDEX)) { int spc = free + dirty; if (spc < c->dead_wm) lst->total_dead += spc; else lst->total_dark += ubifs_calc_dark(c, spc); } ubifs_scan_destroy(sleb); vfree(buf); return LPT_SCAN_CONTINUE; out_print: ubifs_err("bad accounting of LEB %d: free %d, dirty %d flags %#x, should be free %d, dirty %d", lnum, lp->free, lp->dirty, lp->flags, free, dirty); ubifs_dump_leb(c, lnum); out_destroy: ubifs_scan_destroy(sleb); ret = -EINVAL; out: vfree(buf); return ret; } /** * dbg_check_lprops - check all LEB properties. * @c: UBIFS file-system description object * * This function checks all LEB properties and makes sure they are all correct. * It returns zero if everything is fine, %-EINVAL if there is an inconsistency * and other negative error codes in case of other errors. This function is * called while the file system is locked (because of commit start), so no * additional locking is required. Note that locking the LPT mutex would cause * a circular lock dependency with the TNC mutex. */ int dbg_check_lprops(struct ubifs_info *c) { int i, err; struct ubifs_lp_stats lst; if (!dbg_is_chk_lprops(c)) return 0; /* * As we are going to scan the media, the write buffers have to be * synchronized. */ for (i = 0; i < c->jhead_cnt; i++) { err = ubifs_wbuf_sync(&c->jheads[i].wbuf); if (err) return err; } memset(&lst, 0, sizeof(struct ubifs_lp_stats)); err = ubifs_lpt_scan_nolock(c, c->main_first, c->leb_cnt - 1, (ubifs_lpt_scan_callback)scan_check_cb, &lst); if (err && err != -ENOSPC) goto out; if (lst.empty_lebs != c->lst.empty_lebs || lst.idx_lebs != c->lst.idx_lebs || lst.total_free != c->lst.total_free || lst.total_dirty != c->lst.total_dirty || lst.total_used != c->lst.total_used) { ubifs_err("bad overall accounting"); ubifs_err("calculated: empty_lebs %d, idx_lebs %d, total_free %lld, total_dirty %lld, total_used %lld", lst.empty_lebs, lst.idx_lebs, lst.total_free, lst.total_dirty, lst.total_used); ubifs_err("read from lprops: empty_lebs %d, idx_lebs %d, total_free %lld, total_dirty %lld, total_used %lld", c->lst.empty_lebs, c->lst.idx_lebs, c->lst.total_free, c->lst.total_dirty, c->lst.total_used); err = -EINVAL; goto out; } if (lst.total_dead != c->lst.total_dead || lst.total_dark != c->lst.total_dark) { ubifs_err("bad dead/dark space accounting"); ubifs_err("calculated: total_dead %lld, total_dark %lld", lst.total_dead, lst.total_dark); ubifs_err("read from lprops: total_dead %lld, total_dark %lld", c->lst.total_dead, c->lst.total_dark); err = -EINVAL; goto out; } err = dbg_check_cats(c); out: return err; }
gpl-2.0
ffosilva/kernel
arch/powerpc/platforms/83xx/mpc836x_rdk.c
4079
1598
/* * MPC8360E-RDK board file. * * Copyright (c) 2006 Freescale Semiconductor, Inc. * Copyright (c) 2007-2008 MontaVista Software, Inc. * * Author: Anton Vorontsov <avorontsov@ru.mvista.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/kernel.h> #include <linux/pci.h> #include <linux/of_platform.h> #include <linux/io.h> #include <asm/prom.h> #include <asm/time.h> #include <asm/ipic.h> #include <asm/udbg.h> #include <asm/qe.h> #include <asm/qe_ic.h> #include <sysdev/fsl_soc.h> #include <sysdev/fsl_pci.h> #include "mpc83xx.h" machine_device_initcall(mpc836x_rdk, mpc83xx_declare_of_platform_devices); static void __init mpc836x_rdk_setup_arch(void) { if (ppc_md.progress) ppc_md.progress("mpc836x_rdk_setup_arch()", 0); mpc83xx_setup_pci(); #ifdef CONFIG_QUICC_ENGINE qe_reset(); #endif } /* * Called very early, MMU is off, device-tree isn't unflattened. */ static int __init mpc836x_rdk_probe(void) { unsigned long root = of_get_flat_dt_root(); return of_flat_dt_is_compatible(root, "fsl,mpc8360rdk"); } define_machine(mpc836x_rdk) { .name = "MPC836x RDK", .probe = mpc836x_rdk_probe, .setup_arch = mpc836x_rdk_setup_arch, .init_IRQ = mpc83xx_ipic_and_qe_init_IRQ, .get_irq = ipic_get_irq, .restart = mpc83xx_restart, .time_init = mpc83xx_time_init, .calibrate_decr = generic_calibrate_decr, .progress = udbg_progress, };
gpl-2.0
Arc-Team/android_kernel_htc_evita
drivers/hwmon/w83627hf.c
4847
56094
/* * w83627hf.c - Part of lm_sensors, Linux kernel modules for hardware * monitoring * Copyright (c) 1998 - 2003 Frodo Looijaard <frodol@dds.nl>, * Philip Edelbrock <phil@netroedge.com>, * and Mark Studebaker <mdsxyz123@yahoo.com> * Ported to 2.6 by Bernhard C. Schrenk <clemy@clemy.org> * Copyright (c) 2007 Jean Delvare <khali@linux-fr.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * Supports following chips: * * Chip #vin #fanin #pwm #temp wchipid vendid i2c ISA * w83627hf 9 3 2 3 0x20 0x5ca3 no yes(LPC) * w83627thf 7 3 3 3 0x90 0x5ca3 no yes(LPC) * w83637hf 7 3 3 3 0x80 0x5ca3 no yes(LPC) * w83687thf 7 3 3 3 0x90 0x5ca3 no yes(LPC) * w83697hf 8 2 2 2 0x60 0x5ca3 no yes(LPC) * * For other winbond chips, and for i2c support in the above chips, * use w83781d.c. * * Note: automatic ("cruise") fan control for 697, 637 & 627thf not * supported yet. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/jiffies.h> #include <linux/platform_device.h> #include <linux/hwmon.h> #include <linux/hwmon-sysfs.h> #include <linux/hwmon-vid.h> #include <linux/err.h> #include <linux/mutex.h> #include <linux/ioport.h> #include <linux/acpi.h> #include <linux/io.h> #include "lm75.h" static struct platform_device *pdev; #define DRVNAME "w83627hf" enum chips { w83627hf, w83627thf, w83697hf, w83637hf, w83687thf }; struct w83627hf_sio_data { enum chips type; int sioaddr; }; static u8 force_i2c = 0x1f; module_param(force_i2c, byte, 0); MODULE_PARM_DESC(force_i2c, "Initialize the i2c address of the sensors"); static bool init = 1; module_param(init, bool, 0); MODULE_PARM_DESC(init, "Set to zero to bypass chip initialization"); static unsigned short force_id; module_param(force_id, ushort, 0); MODULE_PARM_DESC(force_id, "Override the detected device ID"); /* modified from kernel/include/traps.c */ #define DEV 0x07 /* Register: Logical device select */ /* logical device numbers for superio_select (below) */ #define W83627HF_LD_FDC 0x00 #define W83627HF_LD_PRT 0x01 #define W83627HF_LD_UART1 0x02 #define W83627HF_LD_UART2 0x03 #define W83627HF_LD_KBC 0x05 #define W83627HF_LD_CIR 0x06 /* w83627hf only */ #define W83627HF_LD_GAME 0x07 #define W83627HF_LD_MIDI 0x07 #define W83627HF_LD_GPIO1 0x07 #define W83627HF_LD_GPIO5 0x07 /* w83627thf only */ #define W83627HF_LD_GPIO2 0x08 #define W83627HF_LD_GPIO3 0x09 #define W83627HF_LD_GPIO4 0x09 /* w83627thf only */ #define W83627HF_LD_ACPI 0x0a #define W83627HF_LD_HWM 0x0b #define DEVID 0x20 /* Register: Device ID */ #define W83627THF_GPIO5_EN 0x30 /* w83627thf only */ #define W83627THF_GPIO5_IOSR 0xf3 /* w83627thf only */ #define W83627THF_GPIO5_DR 0xf4 /* w83627thf only */ #define W83687THF_VID_EN 0x29 /* w83687thf only */ #define W83687THF_VID_CFG 0xF0 /* w83687thf only */ #define W83687THF_VID_DATA 0xF1 /* w83687thf only */ static inline void superio_outb(struct w83627hf_sio_data *sio, int reg, int val) { outb(reg, sio->sioaddr); outb(val, sio->sioaddr + 1); } static inline int superio_inb(struct w83627hf_sio_data *sio, int reg) { outb(reg, sio->sioaddr); return inb(sio->sioaddr + 1); } static inline void superio_select(struct w83627hf_sio_data *sio, int ld) { outb(DEV, sio->sioaddr); outb(ld, sio->sioaddr + 1); } static inline void superio_enter(struct w83627hf_sio_data *sio) { outb(0x87, sio->sioaddr); outb(0x87, sio->sioaddr); } static inline void superio_exit(struct w83627hf_sio_data *sio) { outb(0xAA, sio->sioaddr); } #define W627_DEVID 0x52 #define W627THF_DEVID 0x82 #define W697_DEVID 0x60 #define W637_DEVID 0x70 #define W687THF_DEVID 0x85 #define WINB_ACT_REG 0x30 #define WINB_BASE_REG 0x60 /* Constants specified below */ /* Alignment of the base address */ #define WINB_ALIGNMENT ~7 /* Offset & size of I/O region we are interested in */ #define WINB_REGION_OFFSET 5 #define WINB_REGION_SIZE 2 /* Where are the sensors address/data registers relative to the region offset */ #define W83781D_ADDR_REG_OFFSET 0 #define W83781D_DATA_REG_OFFSET 1 /* The W83781D registers */ /* The W83782D registers for nr=7,8 are in bank 5 */ #define W83781D_REG_IN_MAX(nr) ((nr < 7) ? (0x2b + (nr) * 2) : \ (0x554 + (((nr) - 7) * 2))) #define W83781D_REG_IN_MIN(nr) ((nr < 7) ? (0x2c + (nr) * 2) : \ (0x555 + (((nr) - 7) * 2))) #define W83781D_REG_IN(nr) ((nr < 7) ? (0x20 + (nr)) : \ (0x550 + (nr) - 7)) /* nr:0-2 for fans:1-3 */ #define W83627HF_REG_FAN_MIN(nr) (0x3b + (nr)) #define W83627HF_REG_FAN(nr) (0x28 + (nr)) #define W83627HF_REG_TEMP2_CONFIG 0x152 #define W83627HF_REG_TEMP3_CONFIG 0x252 /* these are zero-based, unlike config constants above */ static const u16 w83627hf_reg_temp[] = { 0x27, 0x150, 0x250 }; static const u16 w83627hf_reg_temp_hyst[] = { 0x3A, 0x153, 0x253 }; static const u16 w83627hf_reg_temp_over[] = { 0x39, 0x155, 0x255 }; #define W83781D_REG_BANK 0x4E #define W83781D_REG_CONFIG 0x40 #define W83781D_REG_ALARM1 0x459 #define W83781D_REG_ALARM2 0x45A #define W83781D_REG_ALARM3 0x45B #define W83781D_REG_BEEP_CONFIG 0x4D #define W83781D_REG_BEEP_INTS1 0x56 #define W83781D_REG_BEEP_INTS2 0x57 #define W83781D_REG_BEEP_INTS3 0x453 #define W83781D_REG_VID_FANDIV 0x47 #define W83781D_REG_CHIPID 0x49 #define W83781D_REG_WCHIPID 0x58 #define W83781D_REG_CHIPMAN 0x4F #define W83781D_REG_PIN 0x4B #define W83781D_REG_VBAT 0x5D #define W83627HF_REG_PWM1 0x5A #define W83627HF_REG_PWM2 0x5B static const u8 W83627THF_REG_PWM_ENABLE[] = { 0x04, /* FAN 1 mode */ 0x04, /* FAN 2 mode */ 0x12, /* FAN AUX mode */ }; static const u8 W83627THF_PWM_ENABLE_SHIFT[] = { 2, 4, 1 }; #define W83627THF_REG_PWM1 0x01 /* 697HF/637HF/687THF too */ #define W83627THF_REG_PWM2 0x03 /* 697HF/637HF/687THF too */ #define W83627THF_REG_PWM3 0x11 /* 637HF/687THF too */ #define W83627THF_REG_VRM_OVT_CFG 0x18 /* 637HF/687THF too */ static const u8 regpwm_627hf[] = { W83627HF_REG_PWM1, W83627HF_REG_PWM2 }; static const u8 regpwm[] = { W83627THF_REG_PWM1, W83627THF_REG_PWM2, W83627THF_REG_PWM3 }; #define W836X7HF_REG_PWM(type, nr) (((type) == w83627hf) ? \ regpwm_627hf[nr] : regpwm[nr]) #define W83627HF_REG_PWM_FREQ 0x5C /* Only for the 627HF */ #define W83637HF_REG_PWM_FREQ1 0x00 /* 697HF/687THF too */ #define W83637HF_REG_PWM_FREQ2 0x02 /* 697HF/687THF too */ #define W83637HF_REG_PWM_FREQ3 0x10 /* 687THF too */ static const u8 W83637HF_REG_PWM_FREQ[] = { W83637HF_REG_PWM_FREQ1, W83637HF_REG_PWM_FREQ2, W83637HF_REG_PWM_FREQ3 }; #define W83627HF_BASE_PWM_FREQ 46870 #define W83781D_REG_I2C_ADDR 0x48 #define W83781D_REG_I2C_SUBADDR 0x4A /* Sensor selection */ #define W83781D_REG_SCFG1 0x5D static const u8 BIT_SCFG1[] = { 0x02, 0x04, 0x08 }; #define W83781D_REG_SCFG2 0x59 static const u8 BIT_SCFG2[] = { 0x10, 0x20, 0x40 }; #define W83781D_DEFAULT_BETA 3435 /* * Conversions. Limit checking is only done on the TO_REG * variants. Note that you should be a bit careful with which arguments * these macros are called: arguments may be evaluated more than once. * Fixing this is just not worth it. */ #define IN_TO_REG(val) (SENSORS_LIMIT((((val) + 8)/16),0,255)) #define IN_FROM_REG(val) ((val) * 16) static inline u8 FAN_TO_REG(long rpm, int div) { if (rpm == 0) return 255; rpm = SENSORS_LIMIT(rpm, 1, 1000000); return SENSORS_LIMIT((1350000 + rpm * div / 2) / (rpm * div), 1, 254); } #define TEMP_MIN (-128000) #define TEMP_MAX ( 127000) /* * TEMP: 0.001C/bit (-128C to +127C) * REG: 1C/bit, two's complement */ static u8 TEMP_TO_REG(long temp) { int ntemp = SENSORS_LIMIT(temp, TEMP_MIN, TEMP_MAX); ntemp += (ntemp<0 ? -500 : 500); return (u8)(ntemp / 1000); } static int TEMP_FROM_REG(u8 reg) { return (s8)reg * 1000; } #define FAN_FROM_REG(val,div) ((val)==0?-1:(val)==255?0:1350000/((val)*(div))) #define PWM_TO_REG(val) (SENSORS_LIMIT((val),0,255)) static inline unsigned long pwm_freq_from_reg_627hf(u8 reg) { unsigned long freq; freq = W83627HF_BASE_PWM_FREQ >> reg; return freq; } static inline u8 pwm_freq_to_reg_627hf(unsigned long val) { u8 i; /* * Only 5 dividers (1 2 4 8 16) * Search for the nearest available frequency */ for (i = 0; i < 4; i++) { if (val > (((W83627HF_BASE_PWM_FREQ >> i) + (W83627HF_BASE_PWM_FREQ >> (i+1))) / 2)) break; } return i; } static inline unsigned long pwm_freq_from_reg(u8 reg) { /* Clock bit 8 -> 180 kHz or 24 MHz */ unsigned long clock = (reg & 0x80) ? 180000UL : 24000000UL; reg &= 0x7f; /* This should not happen but anyway... */ if (reg == 0) reg++; return clock / (reg << 8); } static inline u8 pwm_freq_to_reg(unsigned long val) { /* Minimum divider value is 0x01 and maximum is 0x7F */ if (val >= 93750) /* The highest we can do */ return 0x01; if (val >= 720) /* Use 24 MHz clock */ return 24000000UL / (val << 8); if (val < 6) /* The lowest we can do */ return 0xFF; else /* Use 180 kHz clock */ return 0x80 | (180000UL / (val << 8)); } #define BEEP_MASK_FROM_REG(val) ((val) & 0xff7fff) #define BEEP_MASK_TO_REG(val) ((val) & 0xff7fff) #define DIV_FROM_REG(val) (1 << (val)) static inline u8 DIV_TO_REG(long val) { int i; val = SENSORS_LIMIT(val, 1, 128) >> 1; for (i = 0; i < 7; i++) { if (val == 0) break; val >>= 1; } return (u8)i; } /* * For each registered chip, we need to keep some data in memory. * The structure is dynamically allocated. */ struct w83627hf_data { unsigned short addr; const char *name; struct device *hwmon_dev; struct mutex lock; enum chips type; struct mutex update_lock; char valid; /* !=0 if following fields are valid */ unsigned long last_updated; /* In jiffies */ u8 in[9]; /* Register value */ u8 in_max[9]; /* Register value */ u8 in_min[9]; /* Register value */ u8 fan[3]; /* Register value */ u8 fan_min[3]; /* Register value */ u16 temp[3]; /* Register value */ u16 temp_max[3]; /* Register value */ u16 temp_max_hyst[3]; /* Register value */ u8 fan_div[3]; /* Register encoding, shifted right */ u8 vid; /* Register encoding, combined */ u32 alarms; /* Register encoding, combined */ u32 beep_mask; /* Register encoding, combined */ u8 pwm[3]; /* Register value */ u8 pwm_enable[3]; /* 1 = manual * 2 = thermal cruise (also called SmartFan I) * 3 = fan speed cruise */ u8 pwm_freq[3]; /* Register value */ u16 sens[3]; /* 1 = pentium diode; 2 = 3904 diode; * 4 = thermistor */ u8 vrm; u8 vrm_ovt; /* Register value, 627THF/637HF/687THF only */ }; static int w83627hf_probe(struct platform_device *pdev); static int __devexit w83627hf_remove(struct platform_device *pdev); static int w83627hf_read_value(struct w83627hf_data *data, u16 reg); static int w83627hf_write_value(struct w83627hf_data *data, u16 reg, u16 value); static void w83627hf_update_fan_div(struct w83627hf_data *data); static struct w83627hf_data *w83627hf_update_device(struct device *dev); static void w83627hf_init_device(struct platform_device *pdev); static struct platform_driver w83627hf_driver = { .driver = { .owner = THIS_MODULE, .name = DRVNAME, }, .probe = w83627hf_probe, .remove = __devexit_p(w83627hf_remove), }; static ssize_t show_in_input(struct device *dev, struct device_attribute *devattr, char *buf) { int nr = to_sensor_dev_attr(devattr)->index; struct w83627hf_data *data = w83627hf_update_device(dev); return sprintf(buf, "%ld\n", (long)IN_FROM_REG(data->in[nr])); } static ssize_t show_in_min(struct device *dev, struct device_attribute *devattr, char *buf) { int nr = to_sensor_dev_attr(devattr)->index; struct w83627hf_data *data = w83627hf_update_device(dev); return sprintf(buf, "%ld\n", (long)IN_FROM_REG(data->in_min[nr])); } static ssize_t show_in_max(struct device *dev, struct device_attribute *devattr, char *buf) { int nr = to_sensor_dev_attr(devattr)->index; struct w83627hf_data *data = w83627hf_update_device(dev); return sprintf(buf, "%ld\n", (long)IN_FROM_REG(data->in_max[nr])); } static ssize_t store_in_min(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { int nr = to_sensor_dev_attr(devattr)->index; struct w83627hf_data *data = dev_get_drvdata(dev); long val; int err; err = kstrtol(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); data->in_min[nr] = IN_TO_REG(val); w83627hf_write_value(data, W83781D_REG_IN_MIN(nr), data->in_min[nr]); mutex_unlock(&data->update_lock); return count; } static ssize_t store_in_max(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { int nr = to_sensor_dev_attr(devattr)->index; struct w83627hf_data *data = dev_get_drvdata(dev); long val; int err; err = kstrtol(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); data->in_max[nr] = IN_TO_REG(val); w83627hf_write_value(data, W83781D_REG_IN_MAX(nr), data->in_max[nr]); mutex_unlock(&data->update_lock); return count; } #define sysfs_vin_decl(offset) \ static SENSOR_DEVICE_ATTR(in##offset##_input, S_IRUGO, \ show_in_input, NULL, offset); \ static SENSOR_DEVICE_ATTR(in##offset##_min, S_IRUGO|S_IWUSR, \ show_in_min, store_in_min, offset); \ static SENSOR_DEVICE_ATTR(in##offset##_max, S_IRUGO|S_IWUSR, \ show_in_max, store_in_max, offset); sysfs_vin_decl(1); sysfs_vin_decl(2); sysfs_vin_decl(3); sysfs_vin_decl(4); sysfs_vin_decl(5); sysfs_vin_decl(6); sysfs_vin_decl(7); sysfs_vin_decl(8); /* use a different set of functions for in0 */ static ssize_t show_in_0(struct w83627hf_data *data, char *buf, u8 reg) { long in0; if ((data->vrm_ovt & 0x01) && (w83627thf == data->type || w83637hf == data->type || w83687thf == data->type)) /* use VRM9 calculation */ in0 = (long)((reg * 488 + 70000 + 50) / 100); else /* use VRM8 (standard) calculation */ in0 = (long)IN_FROM_REG(reg); return sprintf(buf,"%ld\n", in0); } static ssize_t show_regs_in_0(struct device *dev, struct device_attribute *attr, char *buf) { struct w83627hf_data *data = w83627hf_update_device(dev); return show_in_0(data, buf, data->in[0]); } static ssize_t show_regs_in_min0(struct device *dev, struct device_attribute *attr, char *buf) { struct w83627hf_data *data = w83627hf_update_device(dev); return show_in_0(data, buf, data->in_min[0]); } static ssize_t show_regs_in_max0(struct device *dev, struct device_attribute *attr, char *buf) { struct w83627hf_data *data = w83627hf_update_device(dev); return show_in_0(data, buf, data->in_max[0]); } static ssize_t store_regs_in_min0(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct w83627hf_data *data = dev_get_drvdata(dev); unsigned long val; int err; err = kstrtoul(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); if ((data->vrm_ovt & 0x01) && (w83627thf == data->type || w83637hf == data->type || w83687thf == data->type)) /* use VRM9 calculation */ data->in_min[0] = SENSORS_LIMIT(((val * 100) - 70000 + 244) / 488, 0, 255); else /* use VRM8 (standard) calculation */ data->in_min[0] = IN_TO_REG(val); w83627hf_write_value(data, W83781D_REG_IN_MIN(0), data->in_min[0]); mutex_unlock(&data->update_lock); return count; } static ssize_t store_regs_in_max0(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct w83627hf_data *data = dev_get_drvdata(dev); unsigned long val; int err; err = kstrtoul(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); if ((data->vrm_ovt & 0x01) && (w83627thf == data->type || w83637hf == data->type || w83687thf == data->type)) /* use VRM9 calculation */ data->in_max[0] = SENSORS_LIMIT(((val * 100) - 70000 + 244) / 488, 0, 255); else /* use VRM8 (standard) calculation */ data->in_max[0] = IN_TO_REG(val); w83627hf_write_value(data, W83781D_REG_IN_MAX(0), data->in_max[0]); mutex_unlock(&data->update_lock); return count; } static DEVICE_ATTR(in0_input, S_IRUGO, show_regs_in_0, NULL); static DEVICE_ATTR(in0_min, S_IRUGO | S_IWUSR, show_regs_in_min0, store_regs_in_min0); static DEVICE_ATTR(in0_max, S_IRUGO | S_IWUSR, show_regs_in_max0, store_regs_in_max0); static ssize_t show_fan_input(struct device *dev, struct device_attribute *devattr, char *buf) { int nr = to_sensor_dev_attr(devattr)->index; struct w83627hf_data *data = w83627hf_update_device(dev); return sprintf(buf, "%ld\n", FAN_FROM_REG(data->fan[nr], (long)DIV_FROM_REG(data->fan_div[nr]))); } static ssize_t show_fan_min(struct device *dev, struct device_attribute *devattr, char *buf) { int nr = to_sensor_dev_attr(devattr)->index; struct w83627hf_data *data = w83627hf_update_device(dev); return sprintf(buf, "%ld\n", FAN_FROM_REG(data->fan_min[nr], (long)DIV_FROM_REG(data->fan_div[nr]))); } static ssize_t store_fan_min(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { int nr = to_sensor_dev_attr(devattr)->index; struct w83627hf_data *data = dev_get_drvdata(dev); unsigned long val; int err; err = kstrtoul(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); data->fan_min[nr] = FAN_TO_REG(val, DIV_FROM_REG(data->fan_div[nr])); w83627hf_write_value(data, W83627HF_REG_FAN_MIN(nr), data->fan_min[nr]); mutex_unlock(&data->update_lock); return count; } #define sysfs_fan_decl(offset) \ static SENSOR_DEVICE_ATTR(fan##offset##_input, S_IRUGO, \ show_fan_input, NULL, offset - 1); \ static SENSOR_DEVICE_ATTR(fan##offset##_min, S_IRUGO | S_IWUSR, \ show_fan_min, store_fan_min, offset - 1); sysfs_fan_decl(1); sysfs_fan_decl(2); sysfs_fan_decl(3); static ssize_t show_temp(struct device *dev, struct device_attribute *devattr, char *buf) { int nr = to_sensor_dev_attr(devattr)->index; struct w83627hf_data *data = w83627hf_update_device(dev); u16 tmp = data->temp[nr]; return sprintf(buf, "%ld\n", (nr) ? (long) LM75_TEMP_FROM_REG(tmp) : (long) TEMP_FROM_REG(tmp)); } static ssize_t show_temp_max(struct device *dev, struct device_attribute *devattr, char *buf) { int nr = to_sensor_dev_attr(devattr)->index; struct w83627hf_data *data = w83627hf_update_device(dev); u16 tmp = data->temp_max[nr]; return sprintf(buf, "%ld\n", (nr) ? (long) LM75_TEMP_FROM_REG(tmp) : (long) TEMP_FROM_REG(tmp)); } static ssize_t show_temp_max_hyst(struct device *dev, struct device_attribute *devattr, char *buf) { int nr = to_sensor_dev_attr(devattr)->index; struct w83627hf_data *data = w83627hf_update_device(dev); u16 tmp = data->temp_max_hyst[nr]; return sprintf(buf, "%ld\n", (nr) ? (long) LM75_TEMP_FROM_REG(tmp) : (long) TEMP_FROM_REG(tmp)); } static ssize_t store_temp_max(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { int nr = to_sensor_dev_attr(devattr)->index; struct w83627hf_data *data = dev_get_drvdata(dev); u16 tmp; long val; int err; err = kstrtol(buf, 10, &val); if (err) return err; tmp = (nr) ? LM75_TEMP_TO_REG(val) : TEMP_TO_REG(val); mutex_lock(&data->update_lock); data->temp_max[nr] = tmp; w83627hf_write_value(data, w83627hf_reg_temp_over[nr], tmp); mutex_unlock(&data->update_lock); return count; } static ssize_t store_temp_max_hyst(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { int nr = to_sensor_dev_attr(devattr)->index; struct w83627hf_data *data = dev_get_drvdata(dev); u16 tmp; long val; int err; err = kstrtol(buf, 10, &val); if (err) return err; tmp = (nr) ? LM75_TEMP_TO_REG(val) : TEMP_TO_REG(val); mutex_lock(&data->update_lock); data->temp_max_hyst[nr] = tmp; w83627hf_write_value(data, w83627hf_reg_temp_hyst[nr], tmp); mutex_unlock(&data->update_lock); return count; } #define sysfs_temp_decl(offset) \ static SENSOR_DEVICE_ATTR(temp##offset##_input, S_IRUGO, \ show_temp, NULL, offset - 1); \ static SENSOR_DEVICE_ATTR(temp##offset##_max, S_IRUGO|S_IWUSR, \ show_temp_max, store_temp_max, offset - 1); \ static SENSOR_DEVICE_ATTR(temp##offset##_max_hyst, S_IRUGO|S_IWUSR, \ show_temp_max_hyst, store_temp_max_hyst, offset - 1); sysfs_temp_decl(1); sysfs_temp_decl(2); sysfs_temp_decl(3); static ssize_t show_vid_reg(struct device *dev, struct device_attribute *attr, char *buf) { struct w83627hf_data *data = w83627hf_update_device(dev); return sprintf(buf, "%ld\n", (long) vid_from_reg(data->vid, data->vrm)); } static DEVICE_ATTR(cpu0_vid, S_IRUGO, show_vid_reg, NULL); static ssize_t show_vrm_reg(struct device *dev, struct device_attribute *attr, char *buf) { struct w83627hf_data *data = dev_get_drvdata(dev); return sprintf(buf, "%ld\n", (long) data->vrm); } static ssize_t store_vrm_reg(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct w83627hf_data *data = dev_get_drvdata(dev); unsigned long val; int err; err = kstrtoul(buf, 10, &val); if (err) return err; data->vrm = val; return count; } static DEVICE_ATTR(vrm, S_IRUGO | S_IWUSR, show_vrm_reg, store_vrm_reg); static ssize_t show_alarms_reg(struct device *dev, struct device_attribute *attr, char *buf) { struct w83627hf_data *data = w83627hf_update_device(dev); return sprintf(buf, "%ld\n", (long) data->alarms); } static DEVICE_ATTR(alarms, S_IRUGO, show_alarms_reg, NULL); static ssize_t show_alarm(struct device *dev, struct device_attribute *attr, char *buf) { struct w83627hf_data *data = w83627hf_update_device(dev); int bitnr = to_sensor_dev_attr(attr)->index; return sprintf(buf, "%u\n", (data->alarms >> bitnr) & 1); } static SENSOR_DEVICE_ATTR(in0_alarm, S_IRUGO, show_alarm, NULL, 0); static SENSOR_DEVICE_ATTR(in1_alarm, S_IRUGO, show_alarm, NULL, 1); static SENSOR_DEVICE_ATTR(in2_alarm, S_IRUGO, show_alarm, NULL, 2); static SENSOR_DEVICE_ATTR(in3_alarm, S_IRUGO, show_alarm, NULL, 3); static SENSOR_DEVICE_ATTR(in4_alarm, S_IRUGO, show_alarm, NULL, 8); static SENSOR_DEVICE_ATTR(in5_alarm, S_IRUGO, show_alarm, NULL, 9); static SENSOR_DEVICE_ATTR(in6_alarm, S_IRUGO, show_alarm, NULL, 10); static SENSOR_DEVICE_ATTR(in7_alarm, S_IRUGO, show_alarm, NULL, 16); static SENSOR_DEVICE_ATTR(in8_alarm, S_IRUGO, show_alarm, NULL, 17); static SENSOR_DEVICE_ATTR(fan1_alarm, S_IRUGO, show_alarm, NULL, 6); static SENSOR_DEVICE_ATTR(fan2_alarm, S_IRUGO, show_alarm, NULL, 7); static SENSOR_DEVICE_ATTR(fan3_alarm, S_IRUGO, show_alarm, NULL, 11); static SENSOR_DEVICE_ATTR(temp1_alarm, S_IRUGO, show_alarm, NULL, 4); static SENSOR_DEVICE_ATTR(temp2_alarm, S_IRUGO, show_alarm, NULL, 5); static SENSOR_DEVICE_ATTR(temp3_alarm, S_IRUGO, show_alarm, NULL, 13); static ssize_t show_beep_mask(struct device *dev, struct device_attribute *attr, char *buf) { struct w83627hf_data *data = w83627hf_update_device(dev); return sprintf(buf, "%ld\n", (long)BEEP_MASK_FROM_REG(data->beep_mask)); } static ssize_t store_beep_mask(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct w83627hf_data *data = dev_get_drvdata(dev); unsigned long val; int err; err = kstrtoul(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); /* preserve beep enable */ data->beep_mask = (data->beep_mask & 0x8000) | BEEP_MASK_TO_REG(val); w83627hf_write_value(data, W83781D_REG_BEEP_INTS1, data->beep_mask & 0xff); w83627hf_write_value(data, W83781D_REG_BEEP_INTS3, ((data->beep_mask) >> 16) & 0xff); w83627hf_write_value(data, W83781D_REG_BEEP_INTS2, (data->beep_mask >> 8) & 0xff); mutex_unlock(&data->update_lock); return count; } static DEVICE_ATTR(beep_mask, S_IRUGO | S_IWUSR, show_beep_mask, store_beep_mask); static ssize_t show_beep(struct device *dev, struct device_attribute *attr, char *buf) { struct w83627hf_data *data = w83627hf_update_device(dev); int bitnr = to_sensor_dev_attr(attr)->index; return sprintf(buf, "%u\n", (data->beep_mask >> bitnr) & 1); } static ssize_t store_beep(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct w83627hf_data *data = dev_get_drvdata(dev); int bitnr = to_sensor_dev_attr(attr)->index; u8 reg; unsigned long bit; int err; err = kstrtoul(buf, 10, &bit); if (err) return err; if (bit & ~1) return -EINVAL; mutex_lock(&data->update_lock); if (bit) data->beep_mask |= (1 << bitnr); else data->beep_mask &= ~(1 << bitnr); if (bitnr < 8) { reg = w83627hf_read_value(data, W83781D_REG_BEEP_INTS1); if (bit) reg |= (1 << bitnr); else reg &= ~(1 << bitnr); w83627hf_write_value(data, W83781D_REG_BEEP_INTS1, reg); } else if (bitnr < 16) { reg = w83627hf_read_value(data, W83781D_REG_BEEP_INTS2); if (bit) reg |= (1 << (bitnr - 8)); else reg &= ~(1 << (bitnr - 8)); w83627hf_write_value(data, W83781D_REG_BEEP_INTS2, reg); } else { reg = w83627hf_read_value(data, W83781D_REG_BEEP_INTS3); if (bit) reg |= (1 << (bitnr - 16)); else reg &= ~(1 << (bitnr - 16)); w83627hf_write_value(data, W83781D_REG_BEEP_INTS3, reg); } mutex_unlock(&data->update_lock); return count; } static SENSOR_DEVICE_ATTR(in0_beep, S_IRUGO | S_IWUSR, show_beep, store_beep, 0); static SENSOR_DEVICE_ATTR(in1_beep, S_IRUGO | S_IWUSR, show_beep, store_beep, 1); static SENSOR_DEVICE_ATTR(in2_beep, S_IRUGO | S_IWUSR, show_beep, store_beep, 2); static SENSOR_DEVICE_ATTR(in3_beep, S_IRUGO | S_IWUSR, show_beep, store_beep, 3); static SENSOR_DEVICE_ATTR(in4_beep, S_IRUGO | S_IWUSR, show_beep, store_beep, 8); static SENSOR_DEVICE_ATTR(in5_beep, S_IRUGO | S_IWUSR, show_beep, store_beep, 9); static SENSOR_DEVICE_ATTR(in6_beep, S_IRUGO | S_IWUSR, show_beep, store_beep, 10); static SENSOR_DEVICE_ATTR(in7_beep, S_IRUGO | S_IWUSR, show_beep, store_beep, 16); static SENSOR_DEVICE_ATTR(in8_beep, S_IRUGO | S_IWUSR, show_beep, store_beep, 17); static SENSOR_DEVICE_ATTR(fan1_beep, S_IRUGO | S_IWUSR, show_beep, store_beep, 6); static SENSOR_DEVICE_ATTR(fan2_beep, S_IRUGO | S_IWUSR, show_beep, store_beep, 7); static SENSOR_DEVICE_ATTR(fan3_beep, S_IRUGO | S_IWUSR, show_beep, store_beep, 11); static SENSOR_DEVICE_ATTR(temp1_beep, S_IRUGO | S_IWUSR, show_beep, store_beep, 4); static SENSOR_DEVICE_ATTR(temp2_beep, S_IRUGO | S_IWUSR, show_beep, store_beep, 5); static SENSOR_DEVICE_ATTR(temp3_beep, S_IRUGO | S_IWUSR, show_beep, store_beep, 13); static SENSOR_DEVICE_ATTR(beep_enable, S_IRUGO | S_IWUSR, show_beep, store_beep, 15); static ssize_t show_fan_div(struct device *dev, struct device_attribute *devattr, char *buf) { int nr = to_sensor_dev_attr(devattr)->index; struct w83627hf_data *data = w83627hf_update_device(dev); return sprintf(buf, "%ld\n", (long) DIV_FROM_REG(data->fan_div[nr])); } /* * Note: we save and restore the fan minimum here, because its value is * determined in part by the fan divisor. This follows the principle of * least surprise; the user doesn't expect the fan minimum to change just * because the divisor changed. */ static ssize_t store_fan_div(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { int nr = to_sensor_dev_attr(devattr)->index; struct w83627hf_data *data = dev_get_drvdata(dev); unsigned long min; u8 reg; unsigned long val; int err; err = kstrtoul(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); /* Save fan_min */ min = FAN_FROM_REG(data->fan_min[nr], DIV_FROM_REG(data->fan_div[nr])); data->fan_div[nr] = DIV_TO_REG(val); reg = (w83627hf_read_value(data, nr==2 ? W83781D_REG_PIN : W83781D_REG_VID_FANDIV) & (nr==0 ? 0xcf : 0x3f)) | ((data->fan_div[nr] & 0x03) << (nr==0 ? 4 : 6)); w83627hf_write_value(data, nr==2 ? W83781D_REG_PIN : W83781D_REG_VID_FANDIV, reg); reg = (w83627hf_read_value(data, W83781D_REG_VBAT) & ~(1 << (5 + nr))) | ((data->fan_div[nr] & 0x04) << (3 + nr)); w83627hf_write_value(data, W83781D_REG_VBAT, reg); /* Restore fan_min */ data->fan_min[nr] = FAN_TO_REG(min, DIV_FROM_REG(data->fan_div[nr])); w83627hf_write_value(data, W83627HF_REG_FAN_MIN(nr), data->fan_min[nr]); mutex_unlock(&data->update_lock); return count; } static SENSOR_DEVICE_ATTR(fan1_div, S_IRUGO|S_IWUSR, show_fan_div, store_fan_div, 0); static SENSOR_DEVICE_ATTR(fan2_div, S_IRUGO|S_IWUSR, show_fan_div, store_fan_div, 1); static SENSOR_DEVICE_ATTR(fan3_div, S_IRUGO|S_IWUSR, show_fan_div, store_fan_div, 2); static ssize_t show_pwm(struct device *dev, struct device_attribute *devattr, char *buf) { int nr = to_sensor_dev_attr(devattr)->index; struct w83627hf_data *data = w83627hf_update_device(dev); return sprintf(buf, "%ld\n", (long) data->pwm[nr]); } static ssize_t store_pwm(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { int nr = to_sensor_dev_attr(devattr)->index; struct w83627hf_data *data = dev_get_drvdata(dev); unsigned long val; int err; err = kstrtoul(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); if (data->type == w83627thf) { /* bits 0-3 are reserved in 627THF */ data->pwm[nr] = PWM_TO_REG(val) & 0xf0; w83627hf_write_value(data, W836X7HF_REG_PWM(data->type, nr), data->pwm[nr] | (w83627hf_read_value(data, W836X7HF_REG_PWM(data->type, nr)) & 0x0f)); } else { data->pwm[nr] = PWM_TO_REG(val); w83627hf_write_value(data, W836X7HF_REG_PWM(data->type, nr), data->pwm[nr]); } mutex_unlock(&data->update_lock); return count; } static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO|S_IWUSR, show_pwm, store_pwm, 0); static SENSOR_DEVICE_ATTR(pwm2, S_IRUGO|S_IWUSR, show_pwm, store_pwm, 1); static SENSOR_DEVICE_ATTR(pwm3, S_IRUGO|S_IWUSR, show_pwm, store_pwm, 2); static ssize_t show_pwm_enable(struct device *dev, struct device_attribute *devattr, char *buf) { int nr = to_sensor_dev_attr(devattr)->index; struct w83627hf_data *data = w83627hf_update_device(dev); return sprintf(buf, "%d\n", data->pwm_enable[nr]); } static ssize_t store_pwm_enable(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { int nr = to_sensor_dev_attr(devattr)->index; struct w83627hf_data *data = dev_get_drvdata(dev); u8 reg; unsigned long val; int err; err = kstrtoul(buf, 10, &val); if (err) return err; if (!val || val > 3) /* modes 1, 2 and 3 are supported */ return -EINVAL; mutex_lock(&data->update_lock); data->pwm_enable[nr] = val; reg = w83627hf_read_value(data, W83627THF_REG_PWM_ENABLE[nr]); reg &= ~(0x03 << W83627THF_PWM_ENABLE_SHIFT[nr]); reg |= (val - 1) << W83627THF_PWM_ENABLE_SHIFT[nr]; w83627hf_write_value(data, W83627THF_REG_PWM_ENABLE[nr], reg); mutex_unlock(&data->update_lock); return count; } static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO|S_IWUSR, show_pwm_enable, store_pwm_enable, 0); static SENSOR_DEVICE_ATTR(pwm2_enable, S_IRUGO|S_IWUSR, show_pwm_enable, store_pwm_enable, 1); static SENSOR_DEVICE_ATTR(pwm3_enable, S_IRUGO|S_IWUSR, show_pwm_enable, store_pwm_enable, 2); static ssize_t show_pwm_freq(struct device *dev, struct device_attribute *devattr, char *buf) { int nr = to_sensor_dev_attr(devattr)->index; struct w83627hf_data *data = w83627hf_update_device(dev); if (data->type == w83627hf) return sprintf(buf, "%ld\n", pwm_freq_from_reg_627hf(data->pwm_freq[nr])); else return sprintf(buf, "%ld\n", pwm_freq_from_reg(data->pwm_freq[nr])); } static ssize_t store_pwm_freq(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { int nr = to_sensor_dev_attr(devattr)->index; struct w83627hf_data *data = dev_get_drvdata(dev); static const u8 mask[]={0xF8, 0x8F}; unsigned long val; int err; err = kstrtoul(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); if (data->type == w83627hf) { data->pwm_freq[nr] = pwm_freq_to_reg_627hf(val); w83627hf_write_value(data, W83627HF_REG_PWM_FREQ, (data->pwm_freq[nr] << (nr*4)) | (w83627hf_read_value(data, W83627HF_REG_PWM_FREQ) & mask[nr])); } else { data->pwm_freq[nr] = pwm_freq_to_reg(val); w83627hf_write_value(data, W83637HF_REG_PWM_FREQ[nr], data->pwm_freq[nr]); } mutex_unlock(&data->update_lock); return count; } static SENSOR_DEVICE_ATTR(pwm1_freq, S_IRUGO|S_IWUSR, show_pwm_freq, store_pwm_freq, 0); static SENSOR_DEVICE_ATTR(pwm2_freq, S_IRUGO|S_IWUSR, show_pwm_freq, store_pwm_freq, 1); static SENSOR_DEVICE_ATTR(pwm3_freq, S_IRUGO|S_IWUSR, show_pwm_freq, store_pwm_freq, 2); static ssize_t show_temp_type(struct device *dev, struct device_attribute *devattr, char *buf) { int nr = to_sensor_dev_attr(devattr)->index; struct w83627hf_data *data = w83627hf_update_device(dev); return sprintf(buf, "%ld\n", (long) data->sens[nr]); } static ssize_t store_temp_type(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { int nr = to_sensor_dev_attr(devattr)->index; struct w83627hf_data *data = dev_get_drvdata(dev); unsigned long val; u32 tmp; int err; err = kstrtoul(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); switch (val) { case 1: /* PII/Celeron diode */ tmp = w83627hf_read_value(data, W83781D_REG_SCFG1); w83627hf_write_value(data, W83781D_REG_SCFG1, tmp | BIT_SCFG1[nr]); tmp = w83627hf_read_value(data, W83781D_REG_SCFG2); w83627hf_write_value(data, W83781D_REG_SCFG2, tmp | BIT_SCFG2[nr]); data->sens[nr] = val; break; case 2: /* 3904 */ tmp = w83627hf_read_value(data, W83781D_REG_SCFG1); w83627hf_write_value(data, W83781D_REG_SCFG1, tmp | BIT_SCFG1[nr]); tmp = w83627hf_read_value(data, W83781D_REG_SCFG2); w83627hf_write_value(data, W83781D_REG_SCFG2, tmp & ~BIT_SCFG2[nr]); data->sens[nr] = val; break; case W83781D_DEFAULT_BETA: dev_warn(dev, "Sensor type %d is deprecated, please use 4 " "instead\n", W83781D_DEFAULT_BETA); /* fall through */ case 4: /* thermistor */ tmp = w83627hf_read_value(data, W83781D_REG_SCFG1); w83627hf_write_value(data, W83781D_REG_SCFG1, tmp & ~BIT_SCFG1[nr]); data->sens[nr] = val; break; default: dev_err(dev, "Invalid sensor type %ld; must be 1, 2, or 4\n", (long) val); break; } mutex_unlock(&data->update_lock); return count; } #define sysfs_temp_type(offset) \ static SENSOR_DEVICE_ATTR(temp##offset##_type, S_IRUGO | S_IWUSR, \ show_temp_type, store_temp_type, offset - 1); sysfs_temp_type(1); sysfs_temp_type(2); sysfs_temp_type(3); static ssize_t show_name(struct device *dev, struct device_attribute *devattr, char *buf) { struct w83627hf_data *data = dev_get_drvdata(dev); return sprintf(buf, "%s\n", data->name); } static DEVICE_ATTR(name, S_IRUGO, show_name, NULL); static int __init w83627hf_find(int sioaddr, unsigned short *addr, struct w83627hf_sio_data *sio_data) { int err = -ENODEV; u16 val; static const __initdata char *names[] = { "W83627HF", "W83627THF", "W83697HF", "W83637HF", "W83687THF", }; sio_data->sioaddr = sioaddr; superio_enter(sio_data); val = force_id ? force_id : superio_inb(sio_data, DEVID); switch (val) { case W627_DEVID: sio_data->type = w83627hf; break; case W627THF_DEVID: sio_data->type = w83627thf; break; case W697_DEVID: sio_data->type = w83697hf; break; case W637_DEVID: sio_data->type = w83637hf; break; case W687THF_DEVID: sio_data->type = w83687thf; break; case 0xff: /* No device at all */ goto exit; default: pr_debug(DRVNAME ": Unsupported chip (DEVID=0x%02x)\n", val); goto exit; } superio_select(sio_data, W83627HF_LD_HWM); val = (superio_inb(sio_data, WINB_BASE_REG) << 8) | superio_inb(sio_data, WINB_BASE_REG + 1); *addr = val & WINB_ALIGNMENT; if (*addr == 0) { pr_warn("Base address not set, skipping\n"); goto exit; } val = superio_inb(sio_data, WINB_ACT_REG); if (!(val & 0x01)) { pr_warn("Enabling HWM logical device\n"); superio_outb(sio_data, WINB_ACT_REG, val | 0x01); } err = 0; pr_info(DRVNAME ": Found %s chip at %#x\n", names[sio_data->type], *addr); exit: superio_exit(sio_data); return err; } #define VIN_UNIT_ATTRS(_X_) \ &sensor_dev_attr_in##_X_##_input.dev_attr.attr, \ &sensor_dev_attr_in##_X_##_min.dev_attr.attr, \ &sensor_dev_attr_in##_X_##_max.dev_attr.attr, \ &sensor_dev_attr_in##_X_##_alarm.dev_attr.attr, \ &sensor_dev_attr_in##_X_##_beep.dev_attr.attr #define FAN_UNIT_ATTRS(_X_) \ &sensor_dev_attr_fan##_X_##_input.dev_attr.attr, \ &sensor_dev_attr_fan##_X_##_min.dev_attr.attr, \ &sensor_dev_attr_fan##_X_##_div.dev_attr.attr, \ &sensor_dev_attr_fan##_X_##_alarm.dev_attr.attr, \ &sensor_dev_attr_fan##_X_##_beep.dev_attr.attr #define TEMP_UNIT_ATTRS(_X_) \ &sensor_dev_attr_temp##_X_##_input.dev_attr.attr, \ &sensor_dev_attr_temp##_X_##_max.dev_attr.attr, \ &sensor_dev_attr_temp##_X_##_max_hyst.dev_attr.attr, \ &sensor_dev_attr_temp##_X_##_type.dev_attr.attr, \ &sensor_dev_attr_temp##_X_##_alarm.dev_attr.attr, \ &sensor_dev_attr_temp##_X_##_beep.dev_attr.attr static struct attribute *w83627hf_attributes[] = { &dev_attr_in0_input.attr, &dev_attr_in0_min.attr, &dev_attr_in0_max.attr, &sensor_dev_attr_in0_alarm.dev_attr.attr, &sensor_dev_attr_in0_beep.dev_attr.attr, VIN_UNIT_ATTRS(2), VIN_UNIT_ATTRS(3), VIN_UNIT_ATTRS(4), VIN_UNIT_ATTRS(7), VIN_UNIT_ATTRS(8), FAN_UNIT_ATTRS(1), FAN_UNIT_ATTRS(2), TEMP_UNIT_ATTRS(1), TEMP_UNIT_ATTRS(2), &dev_attr_alarms.attr, &sensor_dev_attr_beep_enable.dev_attr.attr, &dev_attr_beep_mask.attr, &sensor_dev_attr_pwm1.dev_attr.attr, &sensor_dev_attr_pwm2.dev_attr.attr, &dev_attr_name.attr, NULL }; static const struct attribute_group w83627hf_group = { .attrs = w83627hf_attributes, }; static struct attribute *w83627hf_attributes_opt[] = { VIN_UNIT_ATTRS(1), VIN_UNIT_ATTRS(5), VIN_UNIT_ATTRS(6), FAN_UNIT_ATTRS(3), TEMP_UNIT_ATTRS(3), &sensor_dev_attr_pwm3.dev_attr.attr, &sensor_dev_attr_pwm1_freq.dev_attr.attr, &sensor_dev_attr_pwm2_freq.dev_attr.attr, &sensor_dev_attr_pwm3_freq.dev_attr.attr, &sensor_dev_attr_pwm1_enable.dev_attr.attr, &sensor_dev_attr_pwm2_enable.dev_attr.attr, &sensor_dev_attr_pwm3_enable.dev_attr.attr, NULL }; static const struct attribute_group w83627hf_group_opt = { .attrs = w83627hf_attributes_opt, }; static int __devinit w83627hf_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct w83627hf_sio_data *sio_data = dev->platform_data; struct w83627hf_data *data; struct resource *res; int err, i; static const char *names[] = { "w83627hf", "w83627thf", "w83697hf", "w83637hf", "w83687thf", }; res = platform_get_resource(pdev, IORESOURCE_IO, 0); if (!request_region(res->start, WINB_REGION_SIZE, DRVNAME)) { dev_err(dev, "Failed to request region 0x%lx-0x%lx\n", (unsigned long)res->start, (unsigned long)(res->start + WINB_REGION_SIZE - 1)); err = -EBUSY; goto ERROR0; } data = kzalloc(sizeof(struct w83627hf_data), GFP_KERNEL); if (!data) { err = -ENOMEM; goto ERROR1; } data->addr = res->start; data->type = sio_data->type; data->name = names[sio_data->type]; mutex_init(&data->lock); mutex_init(&data->update_lock); platform_set_drvdata(pdev, data); /* Initialize the chip */ w83627hf_init_device(pdev); /* A few vars need to be filled upon startup */ for (i = 0; i <= 2; i++) data->fan_min[i] = w83627hf_read_value( data, W83627HF_REG_FAN_MIN(i)); w83627hf_update_fan_div(data); /* Register common device attributes */ err = sysfs_create_group(&dev->kobj, &w83627hf_group); if (err) goto ERROR3; /* Register chip-specific device attributes */ if (data->type == w83627hf || data->type == w83697hf) if ((err = device_create_file(dev, &sensor_dev_attr_in5_input.dev_attr)) || (err = device_create_file(dev, &sensor_dev_attr_in5_min.dev_attr)) || (err = device_create_file(dev, &sensor_dev_attr_in5_max.dev_attr)) || (err = device_create_file(dev, &sensor_dev_attr_in5_alarm.dev_attr)) || (err = device_create_file(dev, &sensor_dev_attr_in5_beep.dev_attr)) || (err = device_create_file(dev, &sensor_dev_attr_in6_input.dev_attr)) || (err = device_create_file(dev, &sensor_dev_attr_in6_min.dev_attr)) || (err = device_create_file(dev, &sensor_dev_attr_in6_max.dev_attr)) || (err = device_create_file(dev, &sensor_dev_attr_in6_alarm.dev_attr)) || (err = device_create_file(dev, &sensor_dev_attr_in6_beep.dev_attr)) || (err = device_create_file(dev, &sensor_dev_attr_pwm1_freq.dev_attr)) || (err = device_create_file(dev, &sensor_dev_attr_pwm2_freq.dev_attr))) goto ERROR4; if (data->type != w83697hf) if ((err = device_create_file(dev, &sensor_dev_attr_in1_input.dev_attr)) || (err = device_create_file(dev, &sensor_dev_attr_in1_min.dev_attr)) || (err = device_create_file(dev, &sensor_dev_attr_in1_max.dev_attr)) || (err = device_create_file(dev, &sensor_dev_attr_in1_alarm.dev_attr)) || (err = device_create_file(dev, &sensor_dev_attr_in1_beep.dev_attr)) || (err = device_create_file(dev, &sensor_dev_attr_fan3_input.dev_attr)) || (err = device_create_file(dev, &sensor_dev_attr_fan3_min.dev_attr)) || (err = device_create_file(dev, &sensor_dev_attr_fan3_div.dev_attr)) || (err = device_create_file(dev, &sensor_dev_attr_fan3_alarm.dev_attr)) || (err = device_create_file(dev, &sensor_dev_attr_fan3_beep.dev_attr)) || (err = device_create_file(dev, &sensor_dev_attr_temp3_input.dev_attr)) || (err = device_create_file(dev, &sensor_dev_attr_temp3_max.dev_attr)) || (err = device_create_file(dev, &sensor_dev_attr_temp3_max_hyst.dev_attr)) || (err = device_create_file(dev, &sensor_dev_attr_temp3_alarm.dev_attr)) || (err = device_create_file(dev, &sensor_dev_attr_temp3_beep.dev_attr)) || (err = device_create_file(dev, &sensor_dev_attr_temp3_type.dev_attr))) goto ERROR4; if (data->type != w83697hf && data->vid != 0xff) { /* Convert VID to voltage based on VRM */ data->vrm = vid_which_vrm(); if ((err = device_create_file(dev, &dev_attr_cpu0_vid)) || (err = device_create_file(dev, &dev_attr_vrm))) goto ERROR4; } if (data->type == w83627thf || data->type == w83637hf || data->type == w83687thf) { err = device_create_file(dev, &sensor_dev_attr_pwm3.dev_attr); if (err) goto ERROR4; } if (data->type == w83637hf || data->type == w83687thf) if ((err = device_create_file(dev, &sensor_dev_attr_pwm1_freq.dev_attr)) || (err = device_create_file(dev, &sensor_dev_attr_pwm2_freq.dev_attr)) || (err = device_create_file(dev, &sensor_dev_attr_pwm3_freq.dev_attr))) goto ERROR4; if (data->type != w83627hf) if ((err = device_create_file(dev, &sensor_dev_attr_pwm1_enable.dev_attr)) || (err = device_create_file(dev, &sensor_dev_attr_pwm2_enable.dev_attr))) goto ERROR4; if (data->type == w83627thf || data->type == w83637hf || data->type == w83687thf) { err = device_create_file(dev, &sensor_dev_attr_pwm3_enable.dev_attr); if (err) goto ERROR4; } data->hwmon_dev = hwmon_device_register(dev); if (IS_ERR(data->hwmon_dev)) { err = PTR_ERR(data->hwmon_dev); goto ERROR4; } return 0; ERROR4: sysfs_remove_group(&dev->kobj, &w83627hf_group); sysfs_remove_group(&dev->kobj, &w83627hf_group_opt); ERROR3: platform_set_drvdata(pdev, NULL); kfree(data); ERROR1: release_region(res->start, WINB_REGION_SIZE); ERROR0: return err; } static int __devexit w83627hf_remove(struct platform_device *pdev) { struct w83627hf_data *data = platform_get_drvdata(pdev); struct resource *res; hwmon_device_unregister(data->hwmon_dev); sysfs_remove_group(&pdev->dev.kobj, &w83627hf_group); sysfs_remove_group(&pdev->dev.kobj, &w83627hf_group_opt); platform_set_drvdata(pdev, NULL); kfree(data); res = platform_get_resource(pdev, IORESOURCE_IO, 0); release_region(res->start, WINB_REGION_SIZE); return 0; } /* Registers 0x50-0x5f are banked */ static inline void w83627hf_set_bank(struct w83627hf_data *data, u16 reg) { if ((reg & 0x00f0) == 0x50) { outb_p(W83781D_REG_BANK, data->addr + W83781D_ADDR_REG_OFFSET); outb_p(reg >> 8, data->addr + W83781D_DATA_REG_OFFSET); } } /* Not strictly necessary, but play it safe for now */ static inline void w83627hf_reset_bank(struct w83627hf_data *data, u16 reg) { if (reg & 0xff00) { outb_p(W83781D_REG_BANK, data->addr + W83781D_ADDR_REG_OFFSET); outb_p(0, data->addr + W83781D_DATA_REG_OFFSET); } } static int w83627hf_read_value(struct w83627hf_data *data, u16 reg) { int res, word_sized; mutex_lock(&data->lock); word_sized = (((reg & 0xff00) == 0x100) || ((reg & 0xff00) == 0x200)) && (((reg & 0x00ff) == 0x50) || ((reg & 0x00ff) == 0x53) || ((reg & 0x00ff) == 0x55)); w83627hf_set_bank(data, reg); outb_p(reg & 0xff, data->addr + W83781D_ADDR_REG_OFFSET); res = inb_p(data->addr + W83781D_DATA_REG_OFFSET); if (word_sized) { outb_p((reg & 0xff) + 1, data->addr + W83781D_ADDR_REG_OFFSET); res = (res << 8) + inb_p(data->addr + W83781D_DATA_REG_OFFSET); } w83627hf_reset_bank(data, reg); mutex_unlock(&data->lock); return res; } static int __devinit w83627thf_read_gpio5(struct platform_device *pdev) { struct w83627hf_sio_data *sio_data = pdev->dev.platform_data; int res = 0xff, sel; superio_enter(sio_data); superio_select(sio_data, W83627HF_LD_GPIO5); /* Make sure these GPIO pins are enabled */ if (!(superio_inb(sio_data, W83627THF_GPIO5_EN) & (1<<3))) { dev_dbg(&pdev->dev, "GPIO5 disabled, no VID function\n"); goto exit; } /* * Make sure the pins are configured for input * There must be at least five (VRM 9), and possibly 6 (VRM 10) */ sel = superio_inb(sio_data, W83627THF_GPIO5_IOSR) & 0x3f; if ((sel & 0x1f) != 0x1f) { dev_dbg(&pdev->dev, "GPIO5 not configured for VID " "function\n"); goto exit; } dev_info(&pdev->dev, "Reading VID from GPIO5\n"); res = superio_inb(sio_data, W83627THF_GPIO5_DR) & sel; exit: superio_exit(sio_data); return res; } static int __devinit w83687thf_read_vid(struct platform_device *pdev) { struct w83627hf_sio_data *sio_data = pdev->dev.platform_data; int res = 0xff; superio_enter(sio_data); superio_select(sio_data, W83627HF_LD_HWM); /* Make sure these GPIO pins are enabled */ if (!(superio_inb(sio_data, W83687THF_VID_EN) & (1 << 2))) { dev_dbg(&pdev->dev, "VID disabled, no VID function\n"); goto exit; } /* Make sure the pins are configured for input */ if (!(superio_inb(sio_data, W83687THF_VID_CFG) & (1 << 4))) { dev_dbg(&pdev->dev, "VID configured as output, " "no VID function\n"); goto exit; } res = superio_inb(sio_data, W83687THF_VID_DATA) & 0x3f; exit: superio_exit(sio_data); return res; } static int w83627hf_write_value(struct w83627hf_data *data, u16 reg, u16 value) { int word_sized; mutex_lock(&data->lock); word_sized = (((reg & 0xff00) == 0x100) || ((reg & 0xff00) == 0x200)) && (((reg & 0x00ff) == 0x53) || ((reg & 0x00ff) == 0x55)); w83627hf_set_bank(data, reg); outb_p(reg & 0xff, data->addr + W83781D_ADDR_REG_OFFSET); if (word_sized) { outb_p(value >> 8, data->addr + W83781D_DATA_REG_OFFSET); outb_p((reg & 0xff) + 1, data->addr + W83781D_ADDR_REG_OFFSET); } outb_p(value & 0xff, data->addr + W83781D_DATA_REG_OFFSET); w83627hf_reset_bank(data, reg); mutex_unlock(&data->lock); return 0; } static void __devinit w83627hf_init_device(struct platform_device *pdev) { struct w83627hf_data *data = platform_get_drvdata(pdev); int i; enum chips type = data->type; u8 tmp; /* Minimize conflicts with other winbond i2c-only clients... */ /* disable i2c subclients... how to disable main i2c client?? */ /* force i2c address to relatively uncommon address */ w83627hf_write_value(data, W83781D_REG_I2C_SUBADDR, 0x89); w83627hf_write_value(data, W83781D_REG_I2C_ADDR, force_i2c); /* Read VID only once */ if (type == w83627hf || type == w83637hf) { int lo = w83627hf_read_value(data, W83781D_REG_VID_FANDIV); int hi = w83627hf_read_value(data, W83781D_REG_CHIPID); data->vid = (lo & 0x0f) | ((hi & 0x01) << 4); } else if (type == w83627thf) { data->vid = w83627thf_read_gpio5(pdev); } else if (type == w83687thf) { data->vid = w83687thf_read_vid(pdev); } /* Read VRM & OVT Config only once */ if (type == w83627thf || type == w83637hf || type == w83687thf) { data->vrm_ovt = w83627hf_read_value(data, W83627THF_REG_VRM_OVT_CFG); } tmp = w83627hf_read_value(data, W83781D_REG_SCFG1); for (i = 1; i <= 3; i++) { if (!(tmp & BIT_SCFG1[i - 1])) { data->sens[i - 1] = 4; } else { if (w83627hf_read_value (data, W83781D_REG_SCFG2) & BIT_SCFG2[i - 1]) data->sens[i - 1] = 1; else data->sens[i - 1] = 2; } if ((type == w83697hf) && (i == 2)) break; } if(init) { /* Enable temp2 */ tmp = w83627hf_read_value(data, W83627HF_REG_TEMP2_CONFIG); if (tmp & 0x01) { dev_warn(&pdev->dev, "Enabling temp2, readings " "might not make sense\n"); w83627hf_write_value(data, W83627HF_REG_TEMP2_CONFIG, tmp & 0xfe); } /* Enable temp3 */ if (type != w83697hf) { tmp = w83627hf_read_value(data, W83627HF_REG_TEMP3_CONFIG); if (tmp & 0x01) { dev_warn(&pdev->dev, "Enabling temp3, " "readings might not make sense\n"); w83627hf_write_value(data, W83627HF_REG_TEMP3_CONFIG, tmp & 0xfe); } } } /* Start monitoring */ w83627hf_write_value(data, W83781D_REG_CONFIG, (w83627hf_read_value(data, W83781D_REG_CONFIG) & 0xf7) | 0x01); /* Enable VBAT monitoring if needed */ tmp = w83627hf_read_value(data, W83781D_REG_VBAT); if (!(tmp & 0x01)) w83627hf_write_value(data, W83781D_REG_VBAT, tmp | 0x01); } static void w83627hf_update_fan_div(struct w83627hf_data *data) { int reg; reg = w83627hf_read_value(data, W83781D_REG_VID_FANDIV); data->fan_div[0] = (reg >> 4) & 0x03; data->fan_div[1] = (reg >> 6) & 0x03; if (data->type != w83697hf) { data->fan_div[2] = (w83627hf_read_value(data, W83781D_REG_PIN) >> 6) & 0x03; } reg = w83627hf_read_value(data, W83781D_REG_VBAT); data->fan_div[0] |= (reg >> 3) & 0x04; data->fan_div[1] |= (reg >> 4) & 0x04; if (data->type != w83697hf) data->fan_div[2] |= (reg >> 5) & 0x04; } static struct w83627hf_data *w83627hf_update_device(struct device *dev) { struct w83627hf_data *data = dev_get_drvdata(dev); int i, num_temps = (data->type == w83697hf) ? 2 : 3; int num_pwms = (data->type == w83697hf) ? 2 : 3; mutex_lock(&data->update_lock); if (time_after(jiffies, data->last_updated + HZ + HZ / 2) || !data->valid) { for (i = 0; i <= 8; i++) { /* skip missing sensors */ if (((data->type == w83697hf) && (i == 1)) || ((data->type != w83627hf && data->type != w83697hf) && (i == 5 || i == 6))) continue; data->in[i] = w83627hf_read_value(data, W83781D_REG_IN(i)); data->in_min[i] = w83627hf_read_value(data, W83781D_REG_IN_MIN(i)); data->in_max[i] = w83627hf_read_value(data, W83781D_REG_IN_MAX(i)); } for (i = 0; i <= 2; i++) { data->fan[i] = w83627hf_read_value(data, W83627HF_REG_FAN(i)); data->fan_min[i] = w83627hf_read_value(data, W83627HF_REG_FAN_MIN(i)); } for (i = 0; i <= 2; i++) { u8 tmp = w83627hf_read_value(data, W836X7HF_REG_PWM(data->type, i)); /* bits 0-3 are reserved in 627THF */ if (data->type == w83627thf) tmp &= 0xf0; data->pwm[i] = tmp; if (i == 1 && (data->type == w83627hf || data->type == w83697hf)) break; } if (data->type == w83627hf) { u8 tmp = w83627hf_read_value(data, W83627HF_REG_PWM_FREQ); data->pwm_freq[0] = tmp & 0x07; data->pwm_freq[1] = (tmp >> 4) & 0x07; } else if (data->type != w83627thf) { for (i = 1; i <= 3; i++) { data->pwm_freq[i - 1] = w83627hf_read_value(data, W83637HF_REG_PWM_FREQ[i - 1]); if (i == 2 && (data->type == w83697hf)) break; } } if (data->type != w83627hf) { for (i = 0; i < num_pwms; i++) { u8 tmp = w83627hf_read_value(data, W83627THF_REG_PWM_ENABLE[i]); data->pwm_enable[i] = ((tmp >> W83627THF_PWM_ENABLE_SHIFT[i]) & 0x03) + 1; } } for (i = 0; i < num_temps; i++) { data->temp[i] = w83627hf_read_value( data, w83627hf_reg_temp[i]); data->temp_max[i] = w83627hf_read_value( data, w83627hf_reg_temp_over[i]); data->temp_max_hyst[i] = w83627hf_read_value( data, w83627hf_reg_temp_hyst[i]); } w83627hf_update_fan_div(data); data->alarms = w83627hf_read_value(data, W83781D_REG_ALARM1) | (w83627hf_read_value(data, W83781D_REG_ALARM2) << 8) | (w83627hf_read_value(data, W83781D_REG_ALARM3) << 16); i = w83627hf_read_value(data, W83781D_REG_BEEP_INTS2); data->beep_mask = (i << 8) | w83627hf_read_value(data, W83781D_REG_BEEP_INTS1) | w83627hf_read_value(data, W83781D_REG_BEEP_INTS3) << 16; data->last_updated = jiffies; data->valid = 1; } mutex_unlock(&data->update_lock); return data; } static int __init w83627hf_device_add(unsigned short address, const struct w83627hf_sio_data *sio_data) { struct resource res = { .start = address + WINB_REGION_OFFSET, .end = address + WINB_REGION_OFFSET + WINB_REGION_SIZE - 1, .name = DRVNAME, .flags = IORESOURCE_IO, }; int err; err = acpi_check_resource_conflict(&res); if (err) goto exit; pdev = platform_device_alloc(DRVNAME, address); if (!pdev) { err = -ENOMEM; pr_err("Device allocation failed\n"); goto exit; } err = platform_device_add_resources(pdev, &res, 1); if (err) { pr_err("Device resource addition failed (%d)\n", err); goto exit_device_put; } err = platform_device_add_data(pdev, sio_data, sizeof(struct w83627hf_sio_data)); if (err) { pr_err("Platform data allocation failed\n"); goto exit_device_put; } err = platform_device_add(pdev); if (err) { pr_err("Device addition failed (%d)\n", err); goto exit_device_put; } return 0; exit_device_put: platform_device_put(pdev); exit: return err; } static int __init sensors_w83627hf_init(void) { int err; unsigned short address; struct w83627hf_sio_data sio_data; if (w83627hf_find(0x2e, &address, &sio_data) && w83627hf_find(0x4e, &address, &sio_data)) return -ENODEV; err = platform_driver_register(&w83627hf_driver); if (err) goto exit; /* Sets global pdev as a side effect */ err = w83627hf_device_add(address, &sio_data); if (err) goto exit_driver; return 0; exit_driver: platform_driver_unregister(&w83627hf_driver); exit: return err; } static void __exit sensors_w83627hf_exit(void) { platform_device_unregister(pdev); platform_driver_unregister(&w83627hf_driver); } MODULE_AUTHOR("Frodo Looijaard <frodol@dds.nl>, " "Philip Edelbrock <phil@netroedge.com>, " "and Mark Studebaker <mdsxyz123@yahoo.com>"); MODULE_DESCRIPTION("W83627HF driver"); MODULE_LICENSE("GPL"); module_init(sensors_w83627hf_init); module_exit(sensors_w83627hf_exit);
gpl-2.0
AD5GB/wicked_kernel_lge_hammerhead
arch/cris/arch-v10/drivers/gpio.c
6895
22473
/* * Etrax general port I/O device * * Copyright (c) 1999-2007 Axis Communications AB * * Authors: Bjorn Wesen (initial version) * Ola Knutsson (LED handling) * Johan Adolfsson (read/set directions, write, port G) */ #include <linux/module.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/ioport.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/fs.h> #include <linux/string.h> #include <linux/poll.h> #include <linux/init.h> #include <linux/interrupt.h> #include <asm/etraxgpio.h> #include <arch/svinto.h> #include <asm/io.h> #include <asm/irq.h> #include <arch/io_interface_mux.h> #define GPIO_MAJOR 120 /* experimental MAJOR number */ #define D(x) #if 0 static int dp_cnt; #define DP(x) do { dp_cnt++; if (dp_cnt % 1000 == 0) x; }while(0) #else #define DP(x) #endif static char gpio_name[] = "etrax gpio"; #if 0 static wait_queue_head_t *gpio_wq; #endif static long gpio_ioctl(struct file *file, unsigned int cmd, unsigned long arg); static ssize_t gpio_write(struct file *file, const char __user *buf, size_t count, loff_t *off); static int gpio_open(struct inode *inode, struct file *filp); static int gpio_release(struct inode *inode, struct file *filp); static unsigned int gpio_poll(struct file *filp, struct poll_table_struct *wait); /* private data per open() of this driver */ struct gpio_private { struct gpio_private *next; /* These fields are for PA and PB only */ volatile unsigned char *port, *shadow; volatile unsigned char *dir, *dir_shadow; unsigned char changeable_dir; unsigned char changeable_bits; unsigned char clk_mask; unsigned char data_mask; unsigned char write_msb; unsigned char pad1, pad2, pad3; /* These fields are generic */ unsigned long highalarm, lowalarm; wait_queue_head_t alarm_wq; int minor; }; /* linked list of alarms to check for */ static struct gpio_private *alarmlist; static int gpio_some_alarms; /* Set if someone uses alarm */ static unsigned long gpio_pa_irq_enabled_mask; static DEFINE_SPINLOCK(gpio_lock); /* Protect directions etc */ /* Port A and B use 8 bit access, but Port G is 32 bit */ #define NUM_PORTS (GPIO_MINOR_B+1) static volatile unsigned char *ports[NUM_PORTS] = { R_PORT_PA_DATA, R_PORT_PB_DATA, }; static volatile unsigned char *shads[NUM_PORTS] = { &port_pa_data_shadow, &port_pb_data_shadow }; /* What direction bits that are user changeable 1=changeable*/ #ifndef CONFIG_ETRAX_PA_CHANGEABLE_DIR #define CONFIG_ETRAX_PA_CHANGEABLE_DIR 0x00 #endif #ifndef CONFIG_ETRAX_PB_CHANGEABLE_DIR #define CONFIG_ETRAX_PB_CHANGEABLE_DIR 0x00 #endif #ifndef CONFIG_ETRAX_PA_CHANGEABLE_BITS #define CONFIG_ETRAX_PA_CHANGEABLE_BITS 0xFF #endif #ifndef CONFIG_ETRAX_PB_CHANGEABLE_BITS #define CONFIG_ETRAX_PB_CHANGEABLE_BITS 0xFF #endif static unsigned char changeable_dir[NUM_PORTS] = { CONFIG_ETRAX_PA_CHANGEABLE_DIR, CONFIG_ETRAX_PB_CHANGEABLE_DIR }; static unsigned char changeable_bits[NUM_PORTS] = { CONFIG_ETRAX_PA_CHANGEABLE_BITS, CONFIG_ETRAX_PB_CHANGEABLE_BITS }; static volatile unsigned char *dir[NUM_PORTS] = { R_PORT_PA_DIR, R_PORT_PB_DIR }; static volatile unsigned char *dir_shadow[NUM_PORTS] = { &port_pa_dir_shadow, &port_pb_dir_shadow }; /* All bits in port g that can change dir. */ static const unsigned long int changeable_dir_g_mask = 0x01FFFF01; /* Port G is 32 bit, handle it special, some bits are both inputs and outputs at the same time, only some of the bits can change direction and some of them in groups of 8 bit. */ static unsigned long changeable_dir_g; static unsigned long dir_g_in_bits; static unsigned long dir_g_out_bits; static unsigned long dir_g_shadow; /* 1=output */ #define USE_PORTS(priv) ((priv)->minor <= GPIO_MINOR_B) static unsigned int gpio_poll(struct file *file, poll_table *wait) { unsigned int mask = 0; struct gpio_private *priv = file->private_data; unsigned long data; unsigned long flags; spin_lock_irqsave(&gpio_lock, flags); poll_wait(file, &priv->alarm_wq, wait); if (priv->minor == GPIO_MINOR_A) { unsigned long tmp; data = *R_PORT_PA_DATA; /* PA has support for high level interrupt - * lets activate for those low and with highalarm set */ tmp = ~data & priv->highalarm & 0xFF; tmp = (tmp << R_IRQ_MASK1_SET__pa0__BITNR); gpio_pa_irq_enabled_mask |= tmp; *R_IRQ_MASK1_SET = tmp; } else if (priv->minor == GPIO_MINOR_B) data = *R_PORT_PB_DATA; else if (priv->minor == GPIO_MINOR_G) data = *R_PORT_G_DATA; else { mask = 0; goto out; } if ((data & priv->highalarm) || (~data & priv->lowalarm)) { mask = POLLIN|POLLRDNORM; } out: spin_unlock_irqrestore(&gpio_lock, flags); DP(printk("gpio_poll ready: mask 0x%08X\n", mask)); return mask; } int etrax_gpio_wake_up_check(void) { struct gpio_private *priv; unsigned long data = 0; int ret = 0; unsigned long flags; spin_lock_irqsave(&gpio_lock, flags); priv = alarmlist; while (priv) { if (USE_PORTS(priv)) data = *priv->port; else if (priv->minor == GPIO_MINOR_G) data = *R_PORT_G_DATA; if ((data & priv->highalarm) || (~data & priv->lowalarm)) { DP(printk("etrax_gpio_wake_up_check %i\n",priv->minor)); wake_up_interruptible(&priv->alarm_wq); ret = 1; } priv = priv->next; } spin_unlock_irqrestore(&gpio_lock, flags); return ret; } static irqreturn_t gpio_poll_timer_interrupt(int irq, void *dev_id) { if (gpio_some_alarms) { etrax_gpio_wake_up_check(); return IRQ_HANDLED; } return IRQ_NONE; } static irqreturn_t gpio_interrupt(int irq, void *dev_id) { unsigned long tmp; unsigned long flags; spin_lock_irqsave(&gpio_lock, flags); /* Find what PA interrupts are active */ tmp = (*R_IRQ_READ1); /* Find those that we have enabled */ tmp &= gpio_pa_irq_enabled_mask; /* Clear them.. */ *R_IRQ_MASK1_CLR = tmp; gpio_pa_irq_enabled_mask &= ~tmp; spin_unlock_irqrestore(&gpio_lock, flags); if (gpio_some_alarms) return IRQ_RETVAL(etrax_gpio_wake_up_check()); return IRQ_NONE; } static void gpio_write_bit(struct gpio_private *priv, unsigned char data, int bit) { *priv->port = *priv->shadow &= ~(priv->clk_mask); if (data & 1 << bit) *priv->port = *priv->shadow |= priv->data_mask; else *priv->port = *priv->shadow &= ~(priv->data_mask); /* For FPGA: min 5.0ns (DCC) before CCLK high */ *priv->port = *priv->shadow |= priv->clk_mask; } static void gpio_write_byte(struct gpio_private *priv, unsigned char data) { int i; if (priv->write_msb) for (i = 7; i >= 0; i--) gpio_write_bit(priv, data, i); else for (i = 0; i <= 7; i++) gpio_write_bit(priv, data, i); } static ssize_t gpio_write(struct file *file, const char __user *buf, size_t count, loff_t *off) { struct gpio_private *priv = file->private_data; unsigned long flags; ssize_t retval = count; if (priv->minor != GPIO_MINOR_A && priv->minor != GPIO_MINOR_B) return -EFAULT; if (!access_ok(VERIFY_READ, buf, count)) return -EFAULT; spin_lock_irqsave(&gpio_lock, flags); /* It must have been configured using the IO_CFG_WRITE_MODE */ /* Perhaps a better error code? */ if (priv->clk_mask == 0 || priv->data_mask == 0) { retval = -EPERM; goto out; } D(printk(KERN_DEBUG "gpio_write: %02X to data 0x%02X " "clk 0x%02X msb: %i\n", count, priv->data_mask, priv->clk_mask, priv->write_msb)); while (count--) gpio_write_byte(priv, *buf++); out: spin_unlock_irqrestore(&gpio_lock, flags); return retval; } static int gpio_open(struct inode *inode, struct file *filp) { struct gpio_private *priv; int p = iminor(inode); unsigned long flags; if (p > GPIO_MINOR_LAST) return -EINVAL; priv = kzalloc(sizeof(struct gpio_private), GFP_KERNEL); if (!priv) return -ENOMEM; priv->minor = p; /* initialize the io/alarm struct */ if (USE_PORTS(priv)) { /* A and B */ priv->port = ports[p]; priv->shadow = shads[p]; priv->dir = dir[p]; priv->dir_shadow = dir_shadow[p]; priv->changeable_dir = changeable_dir[p]; priv->changeable_bits = changeable_bits[p]; } else { priv->port = NULL; priv->shadow = NULL; priv->dir = NULL; priv->dir_shadow = NULL; priv->changeable_dir = 0; priv->changeable_bits = 0; } priv->highalarm = 0; priv->lowalarm = 0; priv->clk_mask = 0; priv->data_mask = 0; init_waitqueue_head(&priv->alarm_wq); filp->private_data = priv; /* link it into our alarmlist */ spin_lock_irqsave(&gpio_lock, flags); priv->next = alarmlist; alarmlist = priv; spin_unlock_irqrestore(&gpio_lock, flags); return 0; } static int gpio_release(struct inode *inode, struct file *filp) { struct gpio_private *p; struct gpio_private *todel; unsigned long flags; spin_lock_irqsave(&gpio_lock, flags); p = alarmlist; todel = filp->private_data; /* unlink from alarmlist and free the private structure */ if (p == todel) { alarmlist = todel->next; } else { while (p->next != todel) p = p->next; p->next = todel->next; } kfree(todel); /* Check if there are still any alarms set */ p = alarmlist; while (p) { if (p->highalarm | p->lowalarm) { gpio_some_alarms = 1; goto out; } p = p->next; } gpio_some_alarms = 0; out: spin_unlock_irqrestore(&gpio_lock, flags); return 0; } /* Main device API. ioctl's to read/set/clear bits, as well as to * set alarms to wait for using a subsequent select(). */ unsigned long inline setget_input(struct gpio_private *priv, unsigned long arg) { /* Set direction 0=unchanged 1=input, * return mask with 1=input */ if (USE_PORTS(priv)) { *priv->dir = *priv->dir_shadow &= ~((unsigned char)arg & priv->changeable_dir); return ~(*priv->dir_shadow) & 0xFF; /* Only 8 bits */ } if (priv->minor != GPIO_MINOR_G) return 0; /* We must fiddle with R_GEN_CONFIG to change dir */ if (((arg & dir_g_in_bits) != arg) && (arg & changeable_dir_g)) { arg &= changeable_dir_g; /* Clear bits in genconfig to set to input */ if (arg & (1<<0)) { genconfig_shadow &= ~IO_MASK(R_GEN_CONFIG, g0dir); dir_g_in_bits |= (1<<0); dir_g_out_bits &= ~(1<<0); } if ((arg & 0x0000FF00) == 0x0000FF00) { genconfig_shadow &= ~IO_MASK(R_GEN_CONFIG, g8_15dir); dir_g_in_bits |= 0x0000FF00; dir_g_out_bits &= ~0x0000FF00; } if ((arg & 0x00FF0000) == 0x00FF0000) { genconfig_shadow &= ~IO_MASK(R_GEN_CONFIG, g16_23dir); dir_g_in_bits |= 0x00FF0000; dir_g_out_bits &= ~0x00FF0000; } if (arg & (1<<24)) { genconfig_shadow &= ~IO_MASK(R_GEN_CONFIG, g24dir); dir_g_in_bits |= (1<<24); dir_g_out_bits &= ~(1<<24); } D(printk(KERN_DEBUG "gpio: SETINPUT on port G set " "genconfig to 0x%08lX " "in_bits: 0x%08lX " "out_bits: 0x%08lX\n", (unsigned long)genconfig_shadow, dir_g_in_bits, dir_g_out_bits)); *R_GEN_CONFIG = genconfig_shadow; /* Must be a >120 ns delay before writing this again */ } return dir_g_in_bits; } /* setget_input */ unsigned long inline setget_output(struct gpio_private *priv, unsigned long arg) { if (USE_PORTS(priv)) { *priv->dir = *priv->dir_shadow |= ((unsigned char)arg & priv->changeable_dir); return *priv->dir_shadow; } if (priv->minor != GPIO_MINOR_G) return 0; /* We must fiddle with R_GEN_CONFIG to change dir */ if (((arg & dir_g_out_bits) != arg) && (arg & changeable_dir_g)) { /* Set bits in genconfig to set to output */ if (arg & (1<<0)) { genconfig_shadow |= IO_MASK(R_GEN_CONFIG, g0dir); dir_g_out_bits |= (1<<0); dir_g_in_bits &= ~(1<<0); } if ((arg & 0x0000FF00) == 0x0000FF00) { genconfig_shadow |= IO_MASK(R_GEN_CONFIG, g8_15dir); dir_g_out_bits |= 0x0000FF00; dir_g_in_bits &= ~0x0000FF00; } if ((arg & 0x00FF0000) == 0x00FF0000) { genconfig_shadow |= IO_MASK(R_GEN_CONFIG, g16_23dir); dir_g_out_bits |= 0x00FF0000; dir_g_in_bits &= ~0x00FF0000; } if (arg & (1<<24)) { genconfig_shadow |= IO_MASK(R_GEN_CONFIG, g24dir); dir_g_out_bits |= (1<<24); dir_g_in_bits &= ~(1<<24); } D(printk(KERN_INFO "gpio: SETOUTPUT on port G set " "genconfig to 0x%08lX " "in_bits: 0x%08lX " "out_bits: 0x%08lX\n", (unsigned long)genconfig_shadow, dir_g_in_bits, dir_g_out_bits)); *R_GEN_CONFIG = genconfig_shadow; /* Must be a >120 ns delay before writing this again */ } return dir_g_out_bits & 0x7FFFFFFF; } /* setget_output */ static int gpio_leds_ioctl(unsigned int cmd, unsigned long arg); static long gpio_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { unsigned long flags; unsigned long val; int ret = 0; struct gpio_private *priv = file->private_data; if (_IOC_TYPE(cmd) != ETRAXGPIO_IOCTYPE) return -EINVAL; switch (_IOC_NR(cmd)) { case IO_READBITS: /* Use IO_READ_INBITS and IO_READ_OUTBITS instead */ // read the port spin_lock_irqsave(&gpio_lock, flags); if (USE_PORTS(priv)) { ret = *priv->port; } else if (priv->minor == GPIO_MINOR_G) { ret = (*R_PORT_G_DATA) & 0x7FFFFFFF; } spin_unlock_irqrestore(&gpio_lock, flags); break; case IO_SETBITS: // set changeable bits with a 1 in arg spin_lock_irqsave(&gpio_lock, flags); if (USE_PORTS(priv)) { *priv->port = *priv->shadow |= ((unsigned char)arg & priv->changeable_bits); } else if (priv->minor == GPIO_MINOR_G) { *R_PORT_G_DATA = port_g_data_shadow |= (arg & dir_g_out_bits); } spin_unlock_irqrestore(&gpio_lock, flags); break; case IO_CLRBITS: // clear changeable bits with a 1 in arg spin_lock_irqsave(&gpio_lock, flags); if (USE_PORTS(priv)) { *priv->port = *priv->shadow &= ~((unsigned char)arg & priv->changeable_bits); } else if (priv->minor == GPIO_MINOR_G) { *R_PORT_G_DATA = port_g_data_shadow &= ~((unsigned long)arg & dir_g_out_bits); } spin_unlock_irqrestore(&gpio_lock, flags); break; case IO_HIGHALARM: // set alarm when bits with 1 in arg go high spin_lock_irqsave(&gpio_lock, flags); priv->highalarm |= arg; gpio_some_alarms = 1; spin_unlock_irqrestore(&gpio_lock, flags); break; case IO_LOWALARM: // set alarm when bits with 1 in arg go low spin_lock_irqsave(&gpio_lock, flags); priv->lowalarm |= arg; gpio_some_alarms = 1; spin_unlock_irqrestore(&gpio_lock, flags); break; case IO_CLRALARM: /* clear alarm for bits with 1 in arg */ spin_lock_irqsave(&gpio_lock, flags); priv->highalarm &= ~arg; priv->lowalarm &= ~arg; { /* Must update gpio_some_alarms */ struct gpio_private *p = alarmlist; int some_alarms; p = alarmlist; some_alarms = 0; while (p) { if (p->highalarm | p->lowalarm) { some_alarms = 1; break; } p = p->next; } gpio_some_alarms = some_alarms; } spin_unlock_irqrestore(&gpio_lock, flags); break; case IO_READDIR: /* Use IO_SETGET_INPUT/OUTPUT instead! */ /* Read direction 0=input 1=output */ spin_lock_irqsave(&gpio_lock, flags); if (USE_PORTS(priv)) { ret = *priv->dir_shadow; } else if (priv->minor == GPIO_MINOR_G) { /* Note: Some bits are both in and out, * Those that are dual is set here as well. */ ret = (dir_g_shadow | dir_g_out_bits) & 0x7FFFFFFF; } spin_unlock_irqrestore(&gpio_lock, flags); break; case IO_SETINPUT: /* Use IO_SETGET_INPUT instead! */ /* Set direction 0=unchanged 1=input, * return mask with 1=input */ spin_lock_irqsave(&gpio_lock, flags); ret = setget_input(priv, arg) & 0x7FFFFFFF; spin_unlock_irqrestore(&gpio_lock, flags); break; case IO_SETOUTPUT: /* Use IO_SETGET_OUTPUT instead! */ /* Set direction 0=unchanged 1=output, * return mask with 1=output */ spin_lock_irqsave(&gpio_lock, flags); ret = setget_output(priv, arg) & 0x7FFFFFFF; spin_unlock_irqrestore(&gpio_lock, flags); break; case IO_SHUTDOWN: spin_lock_irqsave(&gpio_lock, flags); SOFT_SHUTDOWN(); spin_unlock_irqrestore(&gpio_lock, flags); break; case IO_GET_PWR_BT: spin_lock_irqsave(&gpio_lock, flags); #if defined (CONFIG_ETRAX_SOFT_SHUTDOWN) ret = (*R_PORT_G_DATA & ( 1 << CONFIG_ETRAX_POWERBUTTON_BIT)); #else ret = 0; #endif spin_unlock_irqrestore(&gpio_lock, flags); break; case IO_CFG_WRITE_MODE: spin_lock_irqsave(&gpio_lock, flags); priv->clk_mask = arg & 0xFF; priv->data_mask = (arg >> 8) & 0xFF; priv->write_msb = (arg >> 16) & 0x01; /* Check if we're allowed to change the bits and * the direction is correct */ if (!((priv->clk_mask & priv->changeable_bits) && (priv->data_mask & priv->changeable_bits) && (priv->clk_mask & *priv->dir_shadow) && (priv->data_mask & *priv->dir_shadow))) { priv->clk_mask = 0; priv->data_mask = 0; ret = -EPERM; } spin_unlock_irqrestore(&gpio_lock, flags); break; case IO_READ_INBITS: /* *arg is result of reading the input pins */ spin_lock_irqsave(&gpio_lock, flags); if (USE_PORTS(priv)) { val = *priv->port; } else if (priv->minor == GPIO_MINOR_G) { val = *R_PORT_G_DATA; } spin_unlock_irqrestore(&gpio_lock, flags); if (copy_to_user((void __user *)arg, &val, sizeof(val))) ret = -EFAULT; break; case IO_READ_OUTBITS: /* *arg is result of reading the output shadow */ spin_lock_irqsave(&gpio_lock, flags); if (USE_PORTS(priv)) { val = *priv->shadow; } else if (priv->minor == GPIO_MINOR_G) { val = port_g_data_shadow; } spin_unlock_irqrestore(&gpio_lock, flags); if (copy_to_user((void __user *)arg, &val, sizeof(val))) ret = -EFAULT; break; case IO_SETGET_INPUT: /* bits set in *arg is set to input, * *arg updated with current input pins. */ if (copy_from_user(&val, (void __user *)arg, sizeof(val))) { ret = -EFAULT; break; } spin_lock_irqsave(&gpio_lock, flags); val = setget_input(priv, val); spin_unlock_irqrestore(&gpio_lock, flags); if (copy_to_user((void __user *)arg, &val, sizeof(val))) ret = -EFAULT; break; case IO_SETGET_OUTPUT: /* bits set in *arg is set to output, * *arg updated with current output pins. */ if (copy_from_user(&val, (void __user *)arg, sizeof(val))) { ret = -EFAULT; break; } spin_lock_irqsave(&gpio_lock, flags); val = setget_output(priv, val); spin_unlock_irqrestore(&gpio_lock, flags); if (copy_to_user((void __user *)arg, &val, sizeof(val))) ret = -EFAULT; break; default: spin_lock_irqsave(&gpio_lock, flags); if (priv->minor == GPIO_MINOR_LEDS) ret = gpio_leds_ioctl(cmd, arg); else ret = -EINVAL; spin_unlock_irqrestore(&gpio_lock, flags); } /* switch */ return ret; } static int gpio_leds_ioctl(unsigned int cmd, unsigned long arg) { unsigned char green; unsigned char red; switch (_IOC_NR(cmd)) { case IO_LEDACTIVE_SET: green = ((unsigned char)arg) & 1; red = (((unsigned char)arg) >> 1) & 1; CRIS_LED_ACTIVE_SET_G(green); CRIS_LED_ACTIVE_SET_R(red); break; case IO_LED_SETBIT: CRIS_LED_BIT_SET(arg); break; case IO_LED_CLRBIT: CRIS_LED_BIT_CLR(arg); break; default: return -EINVAL; } /* switch */ return 0; } static const struct file_operations gpio_fops = { .owner = THIS_MODULE, .poll = gpio_poll, .unlocked_ioctl = gpio_ioctl, .write = gpio_write, .open = gpio_open, .release = gpio_release, .llseek = noop_llseek, }; static void ioif_watcher(const unsigned int gpio_in_available, const unsigned int gpio_out_available, const unsigned char pa_available, const unsigned char pb_available) { unsigned long int flags; D(printk(KERN_DEBUG "gpio.c: ioif_watcher called\n")); D(printk(KERN_DEBUG "gpio.c: G in: 0x%08x G out: 0x%08x " "PA: 0x%02x PB: 0x%02x\n", gpio_in_available, gpio_out_available, pa_available, pb_available)); spin_lock_irqsave(&gpio_lock, flags); dir_g_in_bits = gpio_in_available; dir_g_out_bits = gpio_out_available; /* Initialise the dir_g_shadow etc. depending on genconfig */ /* 0=input 1=output */ if (genconfig_shadow & IO_STATE(R_GEN_CONFIG, g0dir, out)) dir_g_shadow |= (1 << 0); if (genconfig_shadow & IO_STATE(R_GEN_CONFIG, g8_15dir, out)) dir_g_shadow |= 0x0000FF00; if (genconfig_shadow & IO_STATE(R_GEN_CONFIG, g16_23dir, out)) dir_g_shadow |= 0x00FF0000; if (genconfig_shadow & IO_STATE(R_GEN_CONFIG, g24dir, out)) dir_g_shadow |= (1 << 24); changeable_dir_g = changeable_dir_g_mask; changeable_dir_g &= dir_g_out_bits; changeable_dir_g &= dir_g_in_bits; /* Correct the bits that can change direction */ dir_g_out_bits &= ~changeable_dir_g; dir_g_out_bits |= dir_g_shadow; dir_g_in_bits &= ~changeable_dir_g; dir_g_in_bits |= (~dir_g_shadow & changeable_dir_g); spin_unlock_irqrestore(&gpio_lock, flags); printk(KERN_INFO "GPIO port G: in_bits: 0x%08lX out_bits: 0x%08lX " "val: %08lX\n", dir_g_in_bits, dir_g_out_bits, (unsigned long)*R_PORT_G_DATA); printk(KERN_INFO "GPIO port G: dir: %08lX changeable: %08lX\n", dir_g_shadow, changeable_dir_g); } /* main driver initialization routine, called from mem.c */ static int __init gpio_init(void) { int res; #if defined (CONFIG_ETRAX_CSP0_LEDS) int i; #endif res = register_chrdev(GPIO_MAJOR, gpio_name, &gpio_fops); if (res < 0) { printk(KERN_ERR "gpio: couldn't get a major number.\n"); return res; } /* Clear all leds */ #if defined (CONFIG_ETRAX_CSP0_LEDS) || defined (CONFIG_ETRAX_PA_LEDS) || defined (CONFIG_ETRAX_PB_LEDS) CRIS_LED_NETWORK_SET(0); CRIS_LED_ACTIVE_SET(0); CRIS_LED_DISK_READ(0); CRIS_LED_DISK_WRITE(0); #if defined (CONFIG_ETRAX_CSP0_LEDS) for (i = 0; i < 32; i++) CRIS_LED_BIT_SET(i); #endif #endif /* The I/O interface allocation watcher will be called when * registering it. */ if (cris_io_interface_register_watcher(ioif_watcher)){ printk(KERN_WARNING "gpio_init: Failed to install IO " "if allocator watcher\n"); } printk(KERN_INFO "ETRAX 100LX GPIO driver v2.5, (c) 2001-2008 " "Axis Communications AB\n"); /* We call etrax_gpio_wake_up_check() from timer interrupt and * from cpu_idle() in kernel/process.c * The check in cpu_idle() reduces latency from ~15 ms to ~6 ms * in some tests. */ res = request_irq(TIMER0_IRQ_NBR, gpio_poll_timer_interrupt, IRQF_SHARED | IRQF_DISABLED, "gpio poll", gpio_name); if (res) { printk(KERN_CRIT "err: timer0 irq for gpio\n"); return res; } res = request_irq(PA_IRQ_NBR, gpio_interrupt, IRQF_SHARED | IRQF_DISABLED, "gpio PA", gpio_name); if (res) printk(KERN_CRIT "err: PA irq for gpio\n"); return res; } /* this makes sure that gpio_init is called during kernel boot */ module_init(gpio_init);
gpl-2.0
voidz777/android_kernel_htc_shooterk
arch/mips/pci/fixup-emma2rh.c
9199
3017
/* * Copyright (C) NEC Electronics Corporation 2004-2006 * * This file is based on the arch/mips/ddb5xxx/ddb5477/pci.c * * Copyright 2001 MontaVista Software Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/types.h> #include <linux/pci.h> #include <asm/bootinfo.h> #include <asm/emma/emma2rh.h> #define EMMA2RH_PCI_HOST_SLOT 0x09 #define EMMA2RH_USB_SLOT 0x03 #define PCI_DEVICE_ID_NEC_EMMA2RH 0x014b /* EMMA2RH PCI Host */ /* * we fix up irqs based on the slot number. * The first entry is at AD:11. * Fortunately this works because, although we have two pci buses, * they all have different slot numbers (except for rockhopper slot 20 * which is handled below). * */ #define MAX_SLOT_NUM 10 static unsigned char irq_map[][5] __initdata = { [3] = {0, MARKEINS_PCI_IRQ_INTB, MARKEINS_PCI_IRQ_INTC, MARKEINS_PCI_IRQ_INTD, 0,}, [4] = {0, MARKEINS_PCI_IRQ_INTA, 0, 0, 0,}, [5] = {0, 0, 0, 0, 0,}, [6] = {0, MARKEINS_PCI_IRQ_INTC, MARKEINS_PCI_IRQ_INTD, MARKEINS_PCI_IRQ_INTA, MARKEINS_PCI_IRQ_INTB,}, }; static void __devinit nec_usb_controller_fixup(struct pci_dev *dev) { if (PCI_SLOT(dev->devfn) == EMMA2RH_USB_SLOT) /* on board USB controller configuration */ pci_write_config_dword(dev, 0xe4, 1 << 5); } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_USB, nec_usb_controller_fixup); /* * Prevent the PCI layer from seeing the resources allocated to this device * if it is the host bridge by marking it as such. These resources are of * no consequence to the PCI layer (they are handled elsewhere). */ static void __devinit emma2rh_pci_host_fixup(struct pci_dev *dev) { int i; if (PCI_SLOT(dev->devfn) == EMMA2RH_PCI_HOST_SLOT) { dev->class &= 0xff; dev->class |= PCI_CLASS_BRIDGE_HOST << 8; for (i = 0; i < PCI_NUM_RESOURCES; i++) { dev->resource[i].start = 0; dev->resource[i].end = 0; dev->resource[i].flags = 0; } } } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_EMMA2RH, emma2rh_pci_host_fixup); int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { return irq_map[slot][pin]; } /* Do platform specific device initialization at pci_enable_device() time */ int pcibios_plat_dev_init(struct pci_dev *dev) { return 0; }
gpl-2.0
mifl/android_kernel_qcom_msm8x60
arch/arm/mach-iop33x/uart.c
9711
2290
/* * arch/arm/mach-iop33x/uart.c * * Author: Dave Jiang (dave.jiang@intel.com) * Copyright (C) 2004 Intel Corporation. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/mm.h> #include <linux/init.h> #include <linux/major.h> #include <linux/fs.h> #include <linux/platform_device.h> #include <linux/serial.h> #include <linux/tty.h> #include <linux/serial_8250.h> #include <linux/io.h> #include <asm/pgtable.h> #include <asm/page.h> #include <asm/mach/map.h> #include <asm/setup.h> #include <asm/memory.h> #include <mach/hardware.h> #include <asm/hardware/iop3xx.h> #include <asm/mach/arch.h> #define IOP33X_UART_XTAL 33334000 static struct plat_serial8250_port iop33x_uart0_data[] = { { .membase = (char *)IOP33X_UART0_VIRT, .mapbase = IOP33X_UART0_PHYS, .irq = IRQ_IOP33X_UART0, .uartclk = IOP33X_UART_XTAL, .regshift = 2, .iotype = UPIO_MEM, .flags = UPF_SKIP_TEST, }, { }, }; static struct resource iop33x_uart0_resources[] = { [0] = { .start = IOP33X_UART0_PHYS, .end = IOP33X_UART0_PHYS + 0x3f, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_IOP33X_UART0, .end = IRQ_IOP33X_UART0, .flags = IORESOURCE_IRQ, }, }; struct platform_device iop33x_uart0_device = { .name = "serial8250", .id = PLAT8250_DEV_PLATFORM, .dev = { .platform_data = iop33x_uart0_data, }, .num_resources = 2, .resource = iop33x_uart0_resources, }; static struct resource iop33x_uart1_resources[] = { [0] = { .start = IOP33X_UART1_PHYS, .end = IOP33X_UART1_PHYS + 0x3f, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_IOP33X_UART1, .end = IRQ_IOP33X_UART1, .flags = IORESOURCE_IRQ, }, }; static struct plat_serial8250_port iop33x_uart1_data[] = { { .membase = (char *)IOP33X_UART1_VIRT, .mapbase = IOP33X_UART1_PHYS, .irq = IRQ_IOP33X_UART1, .uartclk = IOP33X_UART_XTAL, .regshift = 2, .iotype = UPIO_MEM, .flags = UPF_SKIP_TEST, }, { }, }; struct platform_device iop33x_uart1_device = { .name = "serial8250", .id = PLAT8250_DEV_PLATFORM1, .dev = { .platform_data = iop33x_uart1_data, }, .num_resources = 2, .resource = iop33x_uart1_resources, };
gpl-2.0
emxys1/imx6rex-sato-sdk-linux-3.10.17
arch/m68k/platform/coldfire/cache.c
12015
1286
/***************************************************************************/ /* * cache.c -- general ColdFire Cache maintenance code * * Copyright (C) 2010, Greg Ungerer (gerg@snapgear.com) */ /***************************************************************************/ #include <linux/kernel.h> #include <asm/coldfire.h> #include <asm/mcfsim.h> /***************************************************************************/ #ifdef CACHE_PUSH /***************************************************************************/ /* * Use cpushl to push all dirty cache lines back to memory. * Older versions of GAS don't seem to know how to generate the * ColdFire cpushl instruction... Oh well, bit stuff it for now. */ void mcf_cache_push(void) { __asm__ __volatile__ ( "clrl %%d0\n\t" "1:\n\t" "movel %%d0,%%a0\n\t" "2:\n\t" ".word 0xf468\n\t" "addl %0,%%a0\n\t" "cmpl %1,%%a0\n\t" "blt 2b\n\t" "addql #1,%%d0\n\t" "cmpil %2,%%d0\n\t" "bne 1b\n\t" : /* No output */ : "i" (CACHE_LINE_SIZE), "i" (DCACHE_SIZE / CACHE_WAYS), "i" (CACHE_WAYS) : "d0", "a0" ); } /***************************************************************************/ #endif /* CACHE_PUSH */ /***************************************************************************/
gpl-2.0
CyanideL/android_kernel_oneplus_msm8974
arch/arm/mach-w90x900/nuc910.c
13039
1298
/* * linux/arch/arm/mach-w90x900/nuc910.c * * Based on linux/arch/arm/plat-s3c24xx/s3c244x.c by Ben Dooks * * Copyright (c) 2009 Nuvoton corporation. * * Wan ZongShun <mcuos.com@gmail.com> * * NUC910 cpu support * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation;version 2 of the License. * */ #include <linux/platform_device.h> #include <asm/mach/map.h> #include <mach/hardware.h> #include "cpu.h" #include "clock.h" /* define specific CPU platform device */ static struct platform_device *nuc910_dev[] __initdata = { &nuc900_device_ts, &nuc900_device_rtc, &nuc900_device_lcd, &nuc900_device_kpi, }; /* define specific CPU platform io map */ static struct map_desc nuc910evb_iodesc[] __initdata = { IODESC_ENT(USBEHCIHOST), IODESC_ENT(USBOHCIHOST), IODESC_ENT(KPI), IODESC_ENT(USBDEV), IODESC_ENT(ADC), }; /*Init NUC910 evb io*/ void __init nuc910_map_io(void) { nuc900_map_io(nuc910evb_iodesc, ARRAY_SIZE(nuc910evb_iodesc)); } /*Init NUC910 clock*/ void __init nuc910_init_clocks(void) { nuc900_init_clocks(); } /*Init NUC910 board info*/ void __init nuc910_board_init(void) { nuc900_board_init(nuc910_dev, ARRAY_SIZE(nuc910_dev)); }
gpl-2.0
Ca1ne/Enoch316
arch/arm/mach-w90x900/nuc910.c
13039
1298
/* * linux/arch/arm/mach-w90x900/nuc910.c * * Based on linux/arch/arm/plat-s3c24xx/s3c244x.c by Ben Dooks * * Copyright (c) 2009 Nuvoton corporation. * * Wan ZongShun <mcuos.com@gmail.com> * * NUC910 cpu support * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation;version 2 of the License. * */ #include <linux/platform_device.h> #include <asm/mach/map.h> #include <mach/hardware.h> #include "cpu.h" #include "clock.h" /* define specific CPU platform device */ static struct platform_device *nuc910_dev[] __initdata = { &nuc900_device_ts, &nuc900_device_rtc, &nuc900_device_lcd, &nuc900_device_kpi, }; /* define specific CPU platform io map */ static struct map_desc nuc910evb_iodesc[] __initdata = { IODESC_ENT(USBEHCIHOST), IODESC_ENT(USBOHCIHOST), IODESC_ENT(KPI), IODESC_ENT(USBDEV), IODESC_ENT(ADC), }; /*Init NUC910 evb io*/ void __init nuc910_map_io(void) { nuc900_map_io(nuc910evb_iodesc, ARRAY_SIZE(nuc910evb_iodesc)); } /*Init NUC910 clock*/ void __init nuc910_init_clocks(void) { nuc900_init_clocks(); } /*Init NUC910 board info*/ void __init nuc910_board_init(void) { nuc900_board_init(nuc910_dev, ARRAY_SIZE(nuc910_dev)); }
gpl-2.0
cornus/linux-samsung
arch/powerpc/boot/cuboot-katmai.c
14063
1294
/* * Old U-boot compatibility for Katmai * * Author: Hugh Blemings <hugh@au.ibm.com> * * Copyright 2007 Hugh Blemings, IBM Corporation. * Based on cuboot-ebony.c which is: * Copyright 2007 David Gibson, IBM Corporation. * Based on cuboot-83xx.c, which is: * Copyright (c) 2007 Freescale Semiconductor, Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation. */ #include "ops.h" #include "stdio.h" #include "reg.h" #include "dcr.h" #include "4xx.h" #include "44x.h" #include "cuboot.h" #define TARGET_4xx #define TARGET_44x #include "ppcboot.h" static bd_t bd; BSS_STACK(4096); static void katmai_fixups(void) { unsigned long sysclk = 33333000; /* 440SP Clock logic is all but identical to 440GX * so we just use that code for now at least */ ibm440spe_fixup_clocks(sysclk, 6 * 1843200, 0); ibm440spe_fixup_memsize(); dt_fixup_mac_address(0, bd.bi_enetaddr); ibm4xx_fixup_ebc_ranges("/plb/opb/ebc"); } void platform_init(unsigned long r3, unsigned long r4, unsigned long r5, unsigned long r6, unsigned long r7) { CUBOOT_INIT(); platform_ops.fixups = katmai_fixups; fdt_init(_dtb_start); serial_console_init(); }
gpl-2.0
ultrasystem/system
drivers/gpu/drm/i915/intel_fbdev.c
240
9590
/* * Copyright © 2007 David Airlie * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Authors: * David Airlie */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/mm.h> #include <linux/tty.h> #include <linux/sysrq.h> #include <linux/delay.h> #include <linux/fb.h> #include <linux/init.h> #include <linux/vga_switcheroo.h> #include <drm/drmP.h> #include <drm/drm_crtc.h> #include <drm/drm_fb_helper.h> #include "intel_drv.h" #include <drm/i915_drm.h> #include "i915_drv.h" static struct fb_ops intelfb_ops = { .owner = THIS_MODULE, .fb_check_var = drm_fb_helper_check_var, .fb_set_par = drm_fb_helper_set_par, .fb_fillrect = cfb_fillrect, .fb_copyarea = cfb_copyarea, .fb_imageblit = cfb_imageblit, .fb_pan_display = drm_fb_helper_pan_display, .fb_blank = drm_fb_helper_blank, .fb_setcmap = drm_fb_helper_setcmap, .fb_debug_enter = drm_fb_helper_debug_enter, .fb_debug_leave = drm_fb_helper_debug_leave, }; static int intelfb_alloc(struct drm_fb_helper *helper, struct drm_fb_helper_surface_size *sizes) { struct intel_fbdev *ifbdev = container_of(helper, struct intel_fbdev, helper); struct drm_device *dev = helper->dev; struct drm_mode_fb_cmd2 mode_cmd = {}; struct drm_i915_gem_object *obj; int size, ret; /* we don't do packed 24bpp */ if (sizes->surface_bpp == 24) sizes->surface_bpp = 32; mode_cmd.width = sizes->surface_width; mode_cmd.height = sizes->surface_height; mode_cmd.pitches[0] = ALIGN(mode_cmd.width * DIV_ROUND_UP(sizes->surface_bpp, 8), 64); mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp, sizes->surface_depth); size = mode_cmd.pitches[0] * mode_cmd.height; size = ALIGN(size, PAGE_SIZE); obj = i915_gem_object_create_stolen(dev, size); if (obj == NULL) obj = i915_gem_alloc_object(dev, size); if (!obj) { DRM_ERROR("failed to allocate framebuffer\n"); ret = -ENOMEM; goto out; } /* Flush everything out, we'll be doing GTT only from now on */ ret = intel_pin_and_fence_fb_obj(dev, obj, NULL); if (ret) { DRM_ERROR("failed to pin fb: %d\n", ret); goto out_unref; } ret = intel_framebuffer_init(dev, &ifbdev->ifb, &mode_cmd, obj); if (ret) goto out_unpin; return 0; out_unpin: i915_gem_object_unpin(obj); out_unref: drm_gem_object_unreference(&obj->base); out: return ret; } static int intelfb_create(struct drm_fb_helper *helper, struct drm_fb_helper_surface_size *sizes) { struct intel_fbdev *ifbdev = container_of(helper, struct intel_fbdev, helper); struct intel_framebuffer *intel_fb = &ifbdev->ifb; struct drm_device *dev = helper->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct fb_info *info; struct drm_framebuffer *fb; struct drm_i915_gem_object *obj; int size, ret; mutex_lock(&dev->struct_mutex); if (!intel_fb->obj) { DRM_DEBUG_KMS("no BIOS fb, allocating a new one\n"); ret = intelfb_alloc(helper, sizes); if (ret) goto out_unlock; } else { DRM_DEBUG_KMS("re-using BIOS fb\n"); sizes->fb_width = intel_fb->base.width; sizes->fb_height = intel_fb->base.height; } obj = intel_fb->obj; size = obj->base.size; info = framebuffer_alloc(0, &dev->pdev->dev); if (!info) { ret = -ENOMEM; goto out_unpin; } info->par = helper; fb = &ifbdev->ifb.base; ifbdev->helper.fb = fb; ifbdev->helper.fbdev = info; strcpy(info->fix.id, "inteldrmfb"); info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT; info->fbops = &intelfb_ops; ret = fb_alloc_cmap(&info->cmap, 256, 0); if (ret) { ret = -ENOMEM; goto out_unpin; } /* setup aperture base/size for vesafb takeover */ info->apertures = alloc_apertures(1); if (!info->apertures) { ret = -ENOMEM; goto out_unpin; } info->apertures->ranges[0].base = dev->mode_config.fb_base; info->apertures->ranges[0].size = dev_priv->gtt.mappable_end; info->fix.smem_start = dev->mode_config.fb_base + i915_gem_obj_ggtt_offset(obj); info->fix.smem_len = size; info->screen_base = ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj), size); if (!info->screen_base) { ret = -ENOSPC; goto out_unpin; } info->screen_size = size; /* This driver doesn't need a VT switch to restore the mode on resume */ info->skip_vt_switch = true; drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth); drm_fb_helper_fill_var(info, &ifbdev->helper, sizes->fb_width, sizes->fb_height); /* If the object is shmemfs backed, it will have given us zeroed pages. * If the object is stolen however, it will be full of whatever * garbage was left in there. */ if (ifbdev->ifb.obj->stolen) memset_io(info->screen_base, 0, info->screen_size); /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */ DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08lx, bo %p\n", fb->width, fb->height, i915_gem_obj_ggtt_offset(obj), obj); mutex_unlock(&dev->struct_mutex); vga_switcheroo_client_fb_set(dev->pdev, info); return 0; out_unpin: i915_gem_object_unpin(obj); drm_gem_object_unreference(&obj->base); out_unlock: mutex_unlock(&dev->struct_mutex); return ret; } /** Sets the color ramps on behalf of RandR */ static void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, u16 blue, int regno) { struct intel_crtc *intel_crtc = to_intel_crtc(crtc); intel_crtc->lut_r[regno] = red >> 8; intel_crtc->lut_g[regno] = green >> 8; intel_crtc->lut_b[regno] = blue >> 8; } static void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, u16 *blue, int regno) { struct intel_crtc *intel_crtc = to_intel_crtc(crtc); *red = intel_crtc->lut_r[regno] << 8; *green = intel_crtc->lut_g[regno] << 8; *blue = intel_crtc->lut_b[regno] << 8; } static struct drm_fb_helper_funcs intel_fb_helper_funcs = { .gamma_set = intel_crtc_fb_gamma_set, .gamma_get = intel_crtc_fb_gamma_get, .fb_probe = intelfb_create, }; static void intel_fbdev_destroy(struct drm_device *dev, struct intel_fbdev *ifbdev) { if (ifbdev->helper.fbdev) { struct fb_info *info = ifbdev->helper.fbdev; unregister_framebuffer(info); iounmap(info->screen_base); if (info->cmap.len) fb_dealloc_cmap(&info->cmap); framebuffer_release(info); } drm_fb_helper_fini(&ifbdev->helper); drm_framebuffer_unregister_private(&ifbdev->ifb.base); intel_framebuffer_fini(&ifbdev->ifb); } int intel_fbdev_init(struct drm_device *dev) { struct intel_fbdev *ifbdev; struct drm_i915_private *dev_priv = dev->dev_private; int ret; ifbdev = kzalloc(sizeof(*ifbdev), GFP_KERNEL); if (!ifbdev) return -ENOMEM; dev_priv->fbdev = ifbdev; ifbdev->helper.funcs = &intel_fb_helper_funcs; ret = drm_fb_helper_init(dev, &ifbdev->helper, INTEL_INFO(dev)->num_pipes, 4); if (ret) { kfree(ifbdev); return ret; } drm_fb_helper_single_add_all_connectors(&ifbdev->helper); return 0; } void intel_fbdev_initial_config(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; /* Due to peculiar init order wrt to hpd handling this is separate. */ drm_fb_helper_initial_config(&dev_priv->fbdev->helper, 32); } void intel_fbdev_fini(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; if (!dev_priv->fbdev) return; intel_fbdev_destroy(dev, dev_priv->fbdev); kfree(dev_priv->fbdev); dev_priv->fbdev = NULL; } void intel_fbdev_set_suspend(struct drm_device *dev, int state) { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_fbdev *ifbdev = dev_priv->fbdev; struct fb_info *info; if (!ifbdev) return; info = ifbdev->helper.fbdev; /* On resume from hibernation: If the object is shmemfs backed, it has * been restored from swap. If the object is stolen however, it will be * full of whatever garbage was left in there. */ if (state == FBINFO_STATE_RUNNING && ifbdev->ifb.obj->stolen) memset_io(info->screen_base, 0, info->screen_size); fb_set_suspend(info, state); } void intel_fbdev_output_poll_changed(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; drm_fb_helper_hotplug_event(&dev_priv->fbdev->helper); } void intel_fbdev_restore_mode(struct drm_device *dev) { int ret; struct drm_i915_private *dev_priv = dev->dev_private; if (INTEL_INFO(dev)->num_pipes == 0) return; drm_modeset_lock_all(dev); ret = drm_fb_helper_restore_fbdev_mode(&dev_priv->fbdev->helper); if (ret) DRM_DEBUG("failed to restore crtc mode\n"); drm_modeset_unlock_all(dev); }
gpl-2.0
TheTypoMaster/linux_kernel_2.6.32.67
arch/arm/mach-davinci/sram.c
496
1722
/* * mach-davinci/sram.c - DaVinci simple SRAM allocator * * Copyright (C) 2009 David Brownell * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/genalloc.h> #include <mach/common.h> #include <mach/memory.h> #include <mach/sram.h> static struct gen_pool *sram_pool; void *sram_alloc(size_t len, dma_addr_t *dma) { unsigned long vaddr; dma_addr_t dma_base = davinci_soc_info.sram_dma; if (dma) *dma = 0; if (!sram_pool || (dma && !dma_base)) return NULL; vaddr = gen_pool_alloc(sram_pool, len); if (!vaddr) return NULL; if (dma) *dma = dma_base + (vaddr - SRAM_VIRT); return (void *)vaddr; } EXPORT_SYMBOL(sram_alloc); void sram_free(void *addr, size_t len) { gen_pool_free(sram_pool, (unsigned long) addr, len); } EXPORT_SYMBOL(sram_free); /* * REVISIT This supports CPU and DMA access to/from SRAM, but it * doesn't (yet?) support some other notable uses of SRAM: as TCM * for data and/or instructions; and holding code needed to enter * and exit suspend states (while DRAM can't be used). */ static int __init sram_init(void) { unsigned len = davinci_soc_info.sram_len; int status = 0; if (len) { len = min_t(unsigned, len, SRAM_SIZE); sram_pool = gen_pool_create(ilog2(SRAM_GRANULARITY), -1); if (!sram_pool) status = -ENOMEM; } if (sram_pool) status = gen_pool_add(sram_pool, SRAM_VIRT, len, -1); WARN_ON(status < 0); return status; } core_initcall(sram_init);
gpl-2.0
gao-feng/auditns
drivers/mtd/nand/fsl_elbc_nand.c
1008
28706
/* Freescale Enhanced Local Bus Controller NAND driver * * Copyright © 2006-2007, 2010 Freescale Semiconductor * * Authors: Nick Spence <nick.spence@freescale.com>, * Scott Wood <scottwood@freescale.com> * Jack Lan <jack.lan@freescale.com> * Roy Zang <tie-fei.zang@freescale.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/module.h> #include <linux/types.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/ioport.h> #include <linux/of_platform.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/mtd/mtd.h> #include <linux/mtd/nand.h> #include <linux/mtd/nand_ecc.h> #include <linux/mtd/partitions.h> #include <asm/io.h> #include <asm/fsl_lbc.h> #define MAX_BANKS 8 #define ERR_BYTE 0xFF /* Value returned for read bytes when read failed */ #define FCM_TIMEOUT_MSECS 500 /* Maximum number of mSecs to wait for FCM */ /* mtd information per set */ struct fsl_elbc_mtd { struct mtd_info mtd; struct nand_chip chip; struct fsl_lbc_ctrl *ctrl; struct device *dev; int bank; /* Chip select bank number */ u8 __iomem *vbase; /* Chip select base virtual address */ int page_size; /* NAND page size (0=512, 1=2048) */ unsigned int fmr; /* FCM Flash Mode Register value */ }; /* Freescale eLBC FCM controller information */ struct fsl_elbc_fcm_ctrl { struct nand_hw_control controller; struct fsl_elbc_mtd *chips[MAX_BANKS]; u8 __iomem *addr; /* Address of assigned FCM buffer */ unsigned int page; /* Last page written to / read from */ unsigned int read_bytes; /* Number of bytes read during command */ unsigned int column; /* Saved column from SEQIN */ unsigned int index; /* Pointer to next byte to 'read' */ unsigned int status; /* status read from LTESR after last op */ unsigned int mdr; /* UPM/FCM Data Register value */ unsigned int use_mdr; /* Non zero if the MDR is to be set */ unsigned int oob; /* Non zero if operating on OOB data */ unsigned int counter; /* counter for the initializations */ unsigned int max_bitflips; /* Saved during READ0 cmd */ }; /* These map to the positions used by the FCM hardware ECC generator */ /* Small Page FLASH with FMR[ECCM] = 0 */ static struct nand_ecclayout fsl_elbc_oob_sp_eccm0 = { .eccbytes = 3, .eccpos = {6, 7, 8}, .oobfree = { {0, 5}, {9, 7} }, }; /* Small Page FLASH with FMR[ECCM] = 1 */ static struct nand_ecclayout fsl_elbc_oob_sp_eccm1 = { .eccbytes = 3, .eccpos = {8, 9, 10}, .oobfree = { {0, 5}, {6, 2}, {11, 5} }, }; /* Large Page FLASH with FMR[ECCM] = 0 */ static struct nand_ecclayout fsl_elbc_oob_lp_eccm0 = { .eccbytes = 12, .eccpos = {6, 7, 8, 22, 23, 24, 38, 39, 40, 54, 55, 56}, .oobfree = { {1, 5}, {9, 13}, {25, 13}, {41, 13}, {57, 7} }, }; /* Large Page FLASH with FMR[ECCM] = 1 */ static struct nand_ecclayout fsl_elbc_oob_lp_eccm1 = { .eccbytes = 12, .eccpos = {8, 9, 10, 24, 25, 26, 40, 41, 42, 56, 57, 58}, .oobfree = { {1, 7}, {11, 13}, {27, 13}, {43, 13}, {59, 5} }, }; /* * ELBC may use HW ECC, so that OOB offsets, that NAND core uses for bbt, * interfere with ECC positions, that's why we implement our own descriptors. * OOB {11, 5}, works for both SP and LP chips, with ECCM = 1 and ECCM = 0. */ static u8 bbt_pattern[] = {'B', 'b', 't', '0' }; static u8 mirror_pattern[] = {'1', 't', 'b', 'B' }; static struct nand_bbt_descr bbt_main_descr = { .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE | NAND_BBT_2BIT | NAND_BBT_VERSION, .offs = 11, .len = 4, .veroffs = 15, .maxblocks = 4, .pattern = bbt_pattern, }; static struct nand_bbt_descr bbt_mirror_descr = { .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE | NAND_BBT_2BIT | NAND_BBT_VERSION, .offs = 11, .len = 4, .veroffs = 15, .maxblocks = 4, .pattern = mirror_pattern, }; /*=================================*/ /* * Set up the FCM hardware block and page address fields, and the fcm * structure addr field to point to the correct FCM buffer in memory */ static void set_addr(struct mtd_info *mtd, int column, int page_addr, int oob) { struct nand_chip *chip = mtd->priv; struct fsl_elbc_mtd *priv = chip->priv; struct fsl_lbc_ctrl *ctrl = priv->ctrl; struct fsl_lbc_regs __iomem *lbc = ctrl->regs; struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = ctrl->nand; int buf_num; elbc_fcm_ctrl->page = page_addr; if (priv->page_size) { /* * large page size chip : FPAR[PI] save the lowest 6 bits, * FBAR[BLK] save the other bits. */ out_be32(&lbc->fbar, page_addr >> 6); out_be32(&lbc->fpar, ((page_addr << FPAR_LP_PI_SHIFT) & FPAR_LP_PI) | (oob ? FPAR_LP_MS : 0) | column); buf_num = (page_addr & 1) << 2; } else { /* * small page size chip : FPAR[PI] save the lowest 5 bits, * FBAR[BLK] save the other bits. */ out_be32(&lbc->fbar, page_addr >> 5); out_be32(&lbc->fpar, ((page_addr << FPAR_SP_PI_SHIFT) & FPAR_SP_PI) | (oob ? FPAR_SP_MS : 0) | column); buf_num = page_addr & 7; } elbc_fcm_ctrl->addr = priv->vbase + buf_num * 1024; elbc_fcm_ctrl->index = column; /* for OOB data point to the second half of the buffer */ if (oob) elbc_fcm_ctrl->index += priv->page_size ? 2048 : 512; dev_vdbg(priv->dev, "set_addr: bank=%d, " "elbc_fcm_ctrl->addr=0x%p (0x%p), " "index %x, pes %d ps %d\n", buf_num, elbc_fcm_ctrl->addr, priv->vbase, elbc_fcm_ctrl->index, chip->phys_erase_shift, chip->page_shift); } /* * execute FCM command and wait for it to complete */ static int fsl_elbc_run_command(struct mtd_info *mtd) { struct nand_chip *chip = mtd->priv; struct fsl_elbc_mtd *priv = chip->priv; struct fsl_lbc_ctrl *ctrl = priv->ctrl; struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = ctrl->nand; struct fsl_lbc_regs __iomem *lbc = ctrl->regs; /* Setup the FMR[OP] to execute without write protection */ out_be32(&lbc->fmr, priv->fmr | 3); if (elbc_fcm_ctrl->use_mdr) out_be32(&lbc->mdr, elbc_fcm_ctrl->mdr); dev_vdbg(priv->dev, "fsl_elbc_run_command: fmr=%08x fir=%08x fcr=%08x\n", in_be32(&lbc->fmr), in_be32(&lbc->fir), in_be32(&lbc->fcr)); dev_vdbg(priv->dev, "fsl_elbc_run_command: fbar=%08x fpar=%08x " "fbcr=%08x bank=%d\n", in_be32(&lbc->fbar), in_be32(&lbc->fpar), in_be32(&lbc->fbcr), priv->bank); ctrl->irq_status = 0; /* execute special operation */ out_be32(&lbc->lsor, priv->bank); /* wait for FCM complete flag or timeout */ wait_event_timeout(ctrl->irq_wait, ctrl->irq_status, FCM_TIMEOUT_MSECS * HZ/1000); elbc_fcm_ctrl->status = ctrl->irq_status; /* store mdr value in case it was needed */ if (elbc_fcm_ctrl->use_mdr) elbc_fcm_ctrl->mdr = in_be32(&lbc->mdr); elbc_fcm_ctrl->use_mdr = 0; if (elbc_fcm_ctrl->status != LTESR_CC) { dev_info(priv->dev, "command failed: fir %x fcr %x status %x mdr %x\n", in_be32(&lbc->fir), in_be32(&lbc->fcr), elbc_fcm_ctrl->status, elbc_fcm_ctrl->mdr); return -EIO; } if (chip->ecc.mode != NAND_ECC_HW) return 0; elbc_fcm_ctrl->max_bitflips = 0; if (elbc_fcm_ctrl->read_bytes == mtd->writesize + mtd->oobsize) { uint32_t lteccr = in_be32(&lbc->lteccr); /* * if command was a full page read and the ELBC * has the LTECCR register, then bits 12-15 (ppc order) of * LTECCR indicates which 512 byte sub-pages had fixed errors. * bits 28-31 are uncorrectable errors, marked elsewhere. * for small page nand only 1 bit is used. * if the ELBC doesn't have the lteccr register it reads 0 * FIXME: 4 bits can be corrected on NANDs with 2k pages, so * count the number of sub-pages with bitflips and update * ecc_stats.corrected accordingly. */ if (lteccr & 0x000F000F) out_be32(&lbc->lteccr, 0x000F000F); /* clear lteccr */ if (lteccr & 0x000F0000) { mtd->ecc_stats.corrected++; elbc_fcm_ctrl->max_bitflips = 1; } } return 0; } static void fsl_elbc_do_read(struct nand_chip *chip, int oob) { struct fsl_elbc_mtd *priv = chip->priv; struct fsl_lbc_ctrl *ctrl = priv->ctrl; struct fsl_lbc_regs __iomem *lbc = ctrl->regs; if (priv->page_size) { out_be32(&lbc->fir, (FIR_OP_CM0 << FIR_OP0_SHIFT) | (FIR_OP_CA << FIR_OP1_SHIFT) | (FIR_OP_PA << FIR_OP2_SHIFT) | (FIR_OP_CM1 << FIR_OP3_SHIFT) | (FIR_OP_RBW << FIR_OP4_SHIFT)); out_be32(&lbc->fcr, (NAND_CMD_READ0 << FCR_CMD0_SHIFT) | (NAND_CMD_READSTART << FCR_CMD1_SHIFT)); } else { out_be32(&lbc->fir, (FIR_OP_CM0 << FIR_OP0_SHIFT) | (FIR_OP_CA << FIR_OP1_SHIFT) | (FIR_OP_PA << FIR_OP2_SHIFT) | (FIR_OP_RBW << FIR_OP3_SHIFT)); if (oob) out_be32(&lbc->fcr, NAND_CMD_READOOB << FCR_CMD0_SHIFT); else out_be32(&lbc->fcr, NAND_CMD_READ0 << FCR_CMD0_SHIFT); } } /* cmdfunc send commands to the FCM */ static void fsl_elbc_cmdfunc(struct mtd_info *mtd, unsigned int command, int column, int page_addr) { struct nand_chip *chip = mtd->priv; struct fsl_elbc_mtd *priv = chip->priv; struct fsl_lbc_ctrl *ctrl = priv->ctrl; struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = ctrl->nand; struct fsl_lbc_regs __iomem *lbc = ctrl->regs; elbc_fcm_ctrl->use_mdr = 0; /* clear the read buffer */ elbc_fcm_ctrl->read_bytes = 0; if (command != NAND_CMD_PAGEPROG) elbc_fcm_ctrl->index = 0; switch (command) { /* READ0 and READ1 read the entire buffer to use hardware ECC. */ case NAND_CMD_READ1: column += 256; /* fall-through */ case NAND_CMD_READ0: dev_dbg(priv->dev, "fsl_elbc_cmdfunc: NAND_CMD_READ0, page_addr:" " 0x%x, column: 0x%x.\n", page_addr, column); out_be32(&lbc->fbcr, 0); /* read entire page to enable ECC */ set_addr(mtd, 0, page_addr, 0); elbc_fcm_ctrl->read_bytes = mtd->writesize + mtd->oobsize; elbc_fcm_ctrl->index += column; fsl_elbc_do_read(chip, 0); fsl_elbc_run_command(mtd); return; /* READOOB reads only the OOB because no ECC is performed. */ case NAND_CMD_READOOB: dev_vdbg(priv->dev, "fsl_elbc_cmdfunc: NAND_CMD_READOOB, page_addr:" " 0x%x, column: 0x%x.\n", page_addr, column); out_be32(&lbc->fbcr, mtd->oobsize - column); set_addr(mtd, column, page_addr, 1); elbc_fcm_ctrl->read_bytes = mtd->writesize + mtd->oobsize; fsl_elbc_do_read(chip, 1); fsl_elbc_run_command(mtd); return; case NAND_CMD_READID: case NAND_CMD_PARAM: dev_vdbg(priv->dev, "fsl_elbc_cmdfunc: NAND_CMD %x\n", command); out_be32(&lbc->fir, (FIR_OP_CM0 << FIR_OP0_SHIFT) | (FIR_OP_UA << FIR_OP1_SHIFT) | (FIR_OP_RBW << FIR_OP2_SHIFT)); out_be32(&lbc->fcr, command << FCR_CMD0_SHIFT); /* * although currently it's 8 bytes for READID, we always read * the maximum 256 bytes(for PARAM) */ out_be32(&lbc->fbcr, 256); elbc_fcm_ctrl->read_bytes = 256; elbc_fcm_ctrl->use_mdr = 1; elbc_fcm_ctrl->mdr = column; set_addr(mtd, 0, 0, 0); fsl_elbc_run_command(mtd); return; /* ERASE1 stores the block and page address */ case NAND_CMD_ERASE1: dev_vdbg(priv->dev, "fsl_elbc_cmdfunc: NAND_CMD_ERASE1, " "page_addr: 0x%x.\n", page_addr); set_addr(mtd, 0, page_addr, 0); return; /* ERASE2 uses the block and page address from ERASE1 */ case NAND_CMD_ERASE2: dev_vdbg(priv->dev, "fsl_elbc_cmdfunc: NAND_CMD_ERASE2.\n"); out_be32(&lbc->fir, (FIR_OP_CM0 << FIR_OP0_SHIFT) | (FIR_OP_PA << FIR_OP1_SHIFT) | (FIR_OP_CM2 << FIR_OP2_SHIFT) | (FIR_OP_CW1 << FIR_OP3_SHIFT) | (FIR_OP_RS << FIR_OP4_SHIFT)); out_be32(&lbc->fcr, (NAND_CMD_ERASE1 << FCR_CMD0_SHIFT) | (NAND_CMD_STATUS << FCR_CMD1_SHIFT) | (NAND_CMD_ERASE2 << FCR_CMD2_SHIFT)); out_be32(&lbc->fbcr, 0); elbc_fcm_ctrl->read_bytes = 0; elbc_fcm_ctrl->use_mdr = 1; fsl_elbc_run_command(mtd); return; /* SEQIN sets up the addr buffer and all registers except the length */ case NAND_CMD_SEQIN: { __be32 fcr; dev_vdbg(priv->dev, "fsl_elbc_cmdfunc: NAND_CMD_SEQIN/PAGE_PROG, " "page_addr: 0x%x, column: 0x%x.\n", page_addr, column); elbc_fcm_ctrl->column = column; elbc_fcm_ctrl->use_mdr = 1; if (column >= mtd->writesize) { /* OOB area */ column -= mtd->writesize; elbc_fcm_ctrl->oob = 1; } else { WARN_ON(column != 0); elbc_fcm_ctrl->oob = 0; } fcr = (NAND_CMD_STATUS << FCR_CMD1_SHIFT) | (NAND_CMD_SEQIN << FCR_CMD2_SHIFT) | (NAND_CMD_PAGEPROG << FCR_CMD3_SHIFT); if (priv->page_size) { out_be32(&lbc->fir, (FIR_OP_CM2 << FIR_OP0_SHIFT) | (FIR_OP_CA << FIR_OP1_SHIFT) | (FIR_OP_PA << FIR_OP2_SHIFT) | (FIR_OP_WB << FIR_OP3_SHIFT) | (FIR_OP_CM3 << FIR_OP4_SHIFT) | (FIR_OP_CW1 << FIR_OP5_SHIFT) | (FIR_OP_RS << FIR_OP6_SHIFT)); } else { out_be32(&lbc->fir, (FIR_OP_CM0 << FIR_OP0_SHIFT) | (FIR_OP_CM2 << FIR_OP1_SHIFT) | (FIR_OP_CA << FIR_OP2_SHIFT) | (FIR_OP_PA << FIR_OP3_SHIFT) | (FIR_OP_WB << FIR_OP4_SHIFT) | (FIR_OP_CM3 << FIR_OP5_SHIFT) | (FIR_OP_CW1 << FIR_OP6_SHIFT) | (FIR_OP_RS << FIR_OP7_SHIFT)); if (elbc_fcm_ctrl->oob) /* OOB area --> READOOB */ fcr |= NAND_CMD_READOOB << FCR_CMD0_SHIFT; else /* First 256 bytes --> READ0 */ fcr |= NAND_CMD_READ0 << FCR_CMD0_SHIFT; } out_be32(&lbc->fcr, fcr); set_addr(mtd, column, page_addr, elbc_fcm_ctrl->oob); return; } /* PAGEPROG reuses all of the setup from SEQIN and adds the length */ case NAND_CMD_PAGEPROG: { dev_vdbg(priv->dev, "fsl_elbc_cmdfunc: NAND_CMD_PAGEPROG " "writing %d bytes.\n", elbc_fcm_ctrl->index); /* if the write did not start at 0 or is not a full page * then set the exact length, otherwise use a full page * write so the HW generates the ECC. */ if (elbc_fcm_ctrl->oob || elbc_fcm_ctrl->column != 0 || elbc_fcm_ctrl->index != mtd->writesize + mtd->oobsize) out_be32(&lbc->fbcr, elbc_fcm_ctrl->index - elbc_fcm_ctrl->column); else out_be32(&lbc->fbcr, 0); fsl_elbc_run_command(mtd); return; } /* CMD_STATUS must read the status byte while CEB is active */ /* Note - it does not wait for the ready line */ case NAND_CMD_STATUS: out_be32(&lbc->fir, (FIR_OP_CM0 << FIR_OP0_SHIFT) | (FIR_OP_RBW << FIR_OP1_SHIFT)); out_be32(&lbc->fcr, NAND_CMD_STATUS << FCR_CMD0_SHIFT); out_be32(&lbc->fbcr, 1); set_addr(mtd, 0, 0, 0); elbc_fcm_ctrl->read_bytes = 1; fsl_elbc_run_command(mtd); /* The chip always seems to report that it is * write-protected, even when it is not. */ setbits8(elbc_fcm_ctrl->addr, NAND_STATUS_WP); return; /* RESET without waiting for the ready line */ case NAND_CMD_RESET: dev_dbg(priv->dev, "fsl_elbc_cmdfunc: NAND_CMD_RESET.\n"); out_be32(&lbc->fir, FIR_OP_CM0 << FIR_OP0_SHIFT); out_be32(&lbc->fcr, NAND_CMD_RESET << FCR_CMD0_SHIFT); fsl_elbc_run_command(mtd); return; default: dev_err(priv->dev, "fsl_elbc_cmdfunc: error, unsupported command 0x%x.\n", command); } } static void fsl_elbc_select_chip(struct mtd_info *mtd, int chip) { /* The hardware does not seem to support multiple * chips per bank. */ } /* * Write buf to the FCM Controller Data Buffer */ static void fsl_elbc_write_buf(struct mtd_info *mtd, const u8 *buf, int len) { struct nand_chip *chip = mtd->priv; struct fsl_elbc_mtd *priv = chip->priv; struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = priv->ctrl->nand; unsigned int bufsize = mtd->writesize + mtd->oobsize; if (len <= 0) { dev_err(priv->dev, "write_buf of %d bytes", len); elbc_fcm_ctrl->status = 0; return; } if ((unsigned int)len > bufsize - elbc_fcm_ctrl->index) { dev_err(priv->dev, "write_buf beyond end of buffer " "(%d requested, %u available)\n", len, bufsize - elbc_fcm_ctrl->index); len = bufsize - elbc_fcm_ctrl->index; } memcpy_toio(&elbc_fcm_ctrl->addr[elbc_fcm_ctrl->index], buf, len); /* * This is workaround for the weird elbc hangs during nand write, * Scott Wood says: "...perhaps difference in how long it takes a * write to make it through the localbus compared to a write to IMMR * is causing problems, and sync isn't helping for some reason." * Reading back the last byte helps though. */ in_8(&elbc_fcm_ctrl->addr[elbc_fcm_ctrl->index] + len - 1); elbc_fcm_ctrl->index += len; } /* * read a byte from either the FCM hardware buffer if it has any data left * otherwise issue a command to read a single byte. */ static u8 fsl_elbc_read_byte(struct mtd_info *mtd) { struct nand_chip *chip = mtd->priv; struct fsl_elbc_mtd *priv = chip->priv; struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = priv->ctrl->nand; /* If there are still bytes in the FCM, then use the next byte. */ if (elbc_fcm_ctrl->index < elbc_fcm_ctrl->read_bytes) return in_8(&elbc_fcm_ctrl->addr[elbc_fcm_ctrl->index++]); dev_err(priv->dev, "read_byte beyond end of buffer\n"); return ERR_BYTE; } /* * Read from the FCM Controller Data Buffer */ static void fsl_elbc_read_buf(struct mtd_info *mtd, u8 *buf, int len) { struct nand_chip *chip = mtd->priv; struct fsl_elbc_mtd *priv = chip->priv; struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = priv->ctrl->nand; int avail; if (len < 0) return; avail = min((unsigned int)len, elbc_fcm_ctrl->read_bytes - elbc_fcm_ctrl->index); memcpy_fromio(buf, &elbc_fcm_ctrl->addr[elbc_fcm_ctrl->index], avail); elbc_fcm_ctrl->index += avail; if (len > avail) dev_err(priv->dev, "read_buf beyond end of buffer " "(%d requested, %d available)\n", len, avail); } /* This function is called after Program and Erase Operations to * check for success or failure. */ static int fsl_elbc_wait(struct mtd_info *mtd, struct nand_chip *chip) { struct fsl_elbc_mtd *priv = chip->priv; struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = priv->ctrl->nand; if (elbc_fcm_ctrl->status != LTESR_CC) return NAND_STATUS_FAIL; /* The chip always seems to report that it is * write-protected, even when it is not. */ return (elbc_fcm_ctrl->mdr & 0xff) | NAND_STATUS_WP; } static int fsl_elbc_chip_init_tail(struct mtd_info *mtd) { struct nand_chip *chip = mtd->priv; struct fsl_elbc_mtd *priv = chip->priv; struct fsl_lbc_ctrl *ctrl = priv->ctrl; struct fsl_lbc_regs __iomem *lbc = ctrl->regs; unsigned int al; /* calculate FMR Address Length field */ al = 0; if (chip->pagemask & 0xffff0000) al++; if (chip->pagemask & 0xff000000) al++; priv->fmr |= al << FMR_AL_SHIFT; dev_dbg(priv->dev, "fsl_elbc_init: nand->numchips = %d\n", chip->numchips); dev_dbg(priv->dev, "fsl_elbc_init: nand->chipsize = %lld\n", chip->chipsize); dev_dbg(priv->dev, "fsl_elbc_init: nand->pagemask = %8x\n", chip->pagemask); dev_dbg(priv->dev, "fsl_elbc_init: nand->chip_delay = %d\n", chip->chip_delay); dev_dbg(priv->dev, "fsl_elbc_init: nand->badblockpos = %d\n", chip->badblockpos); dev_dbg(priv->dev, "fsl_elbc_init: nand->chip_shift = %d\n", chip->chip_shift); dev_dbg(priv->dev, "fsl_elbc_init: nand->page_shift = %d\n", chip->page_shift); dev_dbg(priv->dev, "fsl_elbc_init: nand->phys_erase_shift = %d\n", chip->phys_erase_shift); dev_dbg(priv->dev, "fsl_elbc_init: nand->ecclayout = %p\n", chip->ecclayout); dev_dbg(priv->dev, "fsl_elbc_init: nand->ecc.mode = %d\n", chip->ecc.mode); dev_dbg(priv->dev, "fsl_elbc_init: nand->ecc.steps = %d\n", chip->ecc.steps); dev_dbg(priv->dev, "fsl_elbc_init: nand->ecc.bytes = %d\n", chip->ecc.bytes); dev_dbg(priv->dev, "fsl_elbc_init: nand->ecc.total = %d\n", chip->ecc.total); dev_dbg(priv->dev, "fsl_elbc_init: nand->ecc.layout = %p\n", chip->ecc.layout); dev_dbg(priv->dev, "fsl_elbc_init: mtd->flags = %08x\n", mtd->flags); dev_dbg(priv->dev, "fsl_elbc_init: mtd->size = %lld\n", mtd->size); dev_dbg(priv->dev, "fsl_elbc_init: mtd->erasesize = %d\n", mtd->erasesize); dev_dbg(priv->dev, "fsl_elbc_init: mtd->writesize = %d\n", mtd->writesize); dev_dbg(priv->dev, "fsl_elbc_init: mtd->oobsize = %d\n", mtd->oobsize); /* adjust Option Register and ECC to match Flash page size */ if (mtd->writesize == 512) { priv->page_size = 0; clrbits32(&lbc->bank[priv->bank].or, OR_FCM_PGS); } else if (mtd->writesize == 2048) { priv->page_size = 1; setbits32(&lbc->bank[priv->bank].or, OR_FCM_PGS); /* adjust ecc setup if needed */ if ((in_be32(&lbc->bank[priv->bank].br) & BR_DECC) == BR_DECC_CHK_GEN) { chip->ecc.size = 512; chip->ecc.layout = (priv->fmr & FMR_ECCM) ? &fsl_elbc_oob_lp_eccm1 : &fsl_elbc_oob_lp_eccm0; } } else { dev_err(priv->dev, "fsl_elbc_init: page size %d is not supported\n", mtd->writesize); return -1; } return 0; } static int fsl_elbc_read_page(struct mtd_info *mtd, struct nand_chip *chip, uint8_t *buf, int oob_required, int page) { struct fsl_elbc_mtd *priv = chip->priv; struct fsl_lbc_ctrl *ctrl = priv->ctrl; struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = ctrl->nand; fsl_elbc_read_buf(mtd, buf, mtd->writesize); if (oob_required) fsl_elbc_read_buf(mtd, chip->oob_poi, mtd->oobsize); if (fsl_elbc_wait(mtd, chip) & NAND_STATUS_FAIL) mtd->ecc_stats.failed++; return elbc_fcm_ctrl->max_bitflips; } /* ECC will be calculated automatically, and errors will be detected in * waitfunc. */ static int fsl_elbc_write_page(struct mtd_info *mtd, struct nand_chip *chip, const uint8_t *buf, int oob_required) { fsl_elbc_write_buf(mtd, buf, mtd->writesize); fsl_elbc_write_buf(mtd, chip->oob_poi, mtd->oobsize); return 0; } static int fsl_elbc_chip_init(struct fsl_elbc_mtd *priv) { struct fsl_lbc_ctrl *ctrl = priv->ctrl; struct fsl_lbc_regs __iomem *lbc = ctrl->regs; struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = ctrl->nand; struct nand_chip *chip = &priv->chip; dev_dbg(priv->dev, "eLBC Set Information for bank %d\n", priv->bank); /* Fill in fsl_elbc_mtd structure */ priv->mtd.priv = chip; priv->mtd.owner = THIS_MODULE; /* set timeout to maximum */ priv->fmr = 15 << FMR_CWTO_SHIFT; if (in_be32(&lbc->bank[priv->bank].or) & OR_FCM_PGS) priv->fmr |= FMR_ECCM; /* fill in nand_chip structure */ /* set up function call table */ chip->read_byte = fsl_elbc_read_byte; chip->write_buf = fsl_elbc_write_buf; chip->read_buf = fsl_elbc_read_buf; chip->select_chip = fsl_elbc_select_chip; chip->cmdfunc = fsl_elbc_cmdfunc; chip->waitfunc = fsl_elbc_wait; chip->bbt_td = &bbt_main_descr; chip->bbt_md = &bbt_mirror_descr; /* set up nand options */ chip->bbt_options = NAND_BBT_USE_FLASH; chip->controller = &elbc_fcm_ctrl->controller; chip->priv = priv; chip->ecc.read_page = fsl_elbc_read_page; chip->ecc.write_page = fsl_elbc_write_page; /* If CS Base Register selects full hardware ECC then use it */ if ((in_be32(&lbc->bank[priv->bank].br) & BR_DECC) == BR_DECC_CHK_GEN) { chip->ecc.mode = NAND_ECC_HW; /* put in small page settings and adjust later if needed */ chip->ecc.layout = (priv->fmr & FMR_ECCM) ? &fsl_elbc_oob_sp_eccm1 : &fsl_elbc_oob_sp_eccm0; chip->ecc.size = 512; chip->ecc.bytes = 3; chip->ecc.strength = 1; } else { /* otherwise fall back to default software ECC */ chip->ecc.mode = NAND_ECC_SOFT; } return 0; } static int fsl_elbc_chip_remove(struct fsl_elbc_mtd *priv) { struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = priv->ctrl->nand; nand_release(&priv->mtd); kfree(priv->mtd.name); if (priv->vbase) iounmap(priv->vbase); elbc_fcm_ctrl->chips[priv->bank] = NULL; kfree(priv); return 0; } static DEFINE_MUTEX(fsl_elbc_nand_mutex); static int fsl_elbc_nand_probe(struct platform_device *pdev) { struct fsl_lbc_regs __iomem *lbc; struct fsl_elbc_mtd *priv; struct resource res; struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl; static const char *part_probe_types[] = { "cmdlinepart", "RedBoot", "ofpart", NULL }; int ret; int bank; struct device *dev; struct device_node *node = pdev->dev.of_node; struct mtd_part_parser_data ppdata; ppdata.of_node = pdev->dev.of_node; if (!fsl_lbc_ctrl_dev || !fsl_lbc_ctrl_dev->regs) return -ENODEV; lbc = fsl_lbc_ctrl_dev->regs; dev = fsl_lbc_ctrl_dev->dev; /* get, allocate and map the memory resource */ ret = of_address_to_resource(node, 0, &res); if (ret) { dev_err(dev, "failed to get resource\n"); return ret; } /* find which chip select it is connected to */ for (bank = 0; bank < MAX_BANKS; bank++) if ((in_be32(&lbc->bank[bank].br) & BR_V) && (in_be32(&lbc->bank[bank].br) & BR_MSEL) == BR_MS_FCM && (in_be32(&lbc->bank[bank].br) & in_be32(&lbc->bank[bank].or) & BR_BA) == fsl_lbc_addr(res.start)) break; if (bank >= MAX_BANKS) { dev_err(dev, "address did not match any chip selects\n"); return -ENODEV; } priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; mutex_lock(&fsl_elbc_nand_mutex); if (!fsl_lbc_ctrl_dev->nand) { elbc_fcm_ctrl = kzalloc(sizeof(*elbc_fcm_ctrl), GFP_KERNEL); if (!elbc_fcm_ctrl) { dev_err(dev, "failed to allocate memory\n"); mutex_unlock(&fsl_elbc_nand_mutex); ret = -ENOMEM; goto err; } elbc_fcm_ctrl->counter++; spin_lock_init(&elbc_fcm_ctrl->controller.lock); init_waitqueue_head(&elbc_fcm_ctrl->controller.wq); fsl_lbc_ctrl_dev->nand = elbc_fcm_ctrl; } else { elbc_fcm_ctrl = fsl_lbc_ctrl_dev->nand; } mutex_unlock(&fsl_elbc_nand_mutex); elbc_fcm_ctrl->chips[bank] = priv; priv->bank = bank; priv->ctrl = fsl_lbc_ctrl_dev; priv->dev = &pdev->dev; dev_set_drvdata(priv->dev, priv); priv->vbase = ioremap(res.start, resource_size(&res)); if (!priv->vbase) { dev_err(dev, "failed to map chip region\n"); ret = -ENOMEM; goto err; } priv->mtd.name = kasprintf(GFP_KERNEL, "%x.flash", (unsigned)res.start); if (!priv->mtd.name) { ret = -ENOMEM; goto err; } ret = fsl_elbc_chip_init(priv); if (ret) goto err; ret = nand_scan_ident(&priv->mtd, 1, NULL); if (ret) goto err; ret = fsl_elbc_chip_init_tail(&priv->mtd); if (ret) goto err; ret = nand_scan_tail(&priv->mtd); if (ret) goto err; /* First look for RedBoot table or partitions on the command * line, these take precedence over device tree information */ mtd_device_parse_register(&priv->mtd, part_probe_types, &ppdata, NULL, 0); printk(KERN_INFO "eLBC NAND device at 0x%llx, bank %d\n", (unsigned long long)res.start, priv->bank); return 0; err: fsl_elbc_chip_remove(priv); return ret; } static int fsl_elbc_nand_remove(struct platform_device *pdev) { struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = fsl_lbc_ctrl_dev->nand; struct fsl_elbc_mtd *priv = dev_get_drvdata(&pdev->dev); fsl_elbc_chip_remove(priv); mutex_lock(&fsl_elbc_nand_mutex); elbc_fcm_ctrl->counter--; if (!elbc_fcm_ctrl->counter) { fsl_lbc_ctrl_dev->nand = NULL; kfree(elbc_fcm_ctrl); } mutex_unlock(&fsl_elbc_nand_mutex); return 0; } static const struct of_device_id fsl_elbc_nand_match[] = { { .compatible = "fsl,elbc-fcm-nand", }, {} }; static struct platform_driver fsl_elbc_nand_driver = { .driver = { .name = "fsl,elbc-fcm-nand", .owner = THIS_MODULE, .of_match_table = fsl_elbc_nand_match, }, .probe = fsl_elbc_nand_probe, .remove = fsl_elbc_nand_remove, }; module_platform_driver(fsl_elbc_nand_driver); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Freescale"); MODULE_DESCRIPTION("Freescale Enhanced Local Bus Controller MTD NAND driver");
gpl-2.0
varunchitre15/thunderzap_sprout
drivers/net/wireless/mwifiex/sta_tx.c
2544
6234
/* * Marvell Wireless LAN device driver: station TX data handling * * Copyright (C) 2011, Marvell International Ltd. * * This software file (the "File") is distributed by Marvell International * Ltd. under the terms of the GNU General Public License Version 2, June 1991 * (the "License"). You may use, redistribute and/or modify this File in * accordance with the terms and conditions of the License, a copy of which * is available by writing to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt. * * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE * ARE EXPRESSLY DISCLAIMED. The License provides additional details about * this warranty disclaimer. */ #include "decl.h" #include "ioctl.h" #include "util.h" #include "fw.h" #include "main.h" #include "wmm.h" /* * This function fills the TxPD for tx packets. * * The Tx buffer received by this function should already have the * header space allocated for TxPD. * * This function inserts the TxPD in between interface header and actual * data and adjusts the buffer pointers accordingly. * * The following TxPD fields are set by this function, as required - * - BSS number * - Tx packet length and offset * - Priority * - Packet delay * - Priority specific Tx control * - Flags */ void *mwifiex_process_sta_txpd(struct mwifiex_private *priv, struct sk_buff *skb) { struct mwifiex_adapter *adapter = priv->adapter; struct txpd *local_tx_pd; struct mwifiex_txinfo *tx_info = MWIFIEX_SKB_TXCB(skb); u8 pad; u16 pkt_type, pkt_offset; if (!skb->len) { dev_err(adapter->dev, "Tx: bad packet length: %d\n", skb->len); tx_info->status_code = -1; return skb->data; } pkt_type = mwifiex_is_skb_mgmt_frame(skb) ? PKT_TYPE_MGMT : 0; /* If skb->data is not aligned; add padding */ pad = (4 - (((void *)skb->data - NULL) & 0x3)) % 4; BUG_ON(skb_headroom(skb) < (sizeof(*local_tx_pd) + INTF_HEADER_LEN + pad)); skb_push(skb, sizeof(*local_tx_pd) + pad); local_tx_pd = (struct txpd *) skb->data; memset(local_tx_pd, 0, sizeof(struct txpd)); local_tx_pd->bss_num = priv->bss_num; local_tx_pd->bss_type = priv->bss_type; local_tx_pd->tx_pkt_length = cpu_to_le16((u16)(skb->len - (sizeof(struct txpd) + pad))); local_tx_pd->priority = (u8) skb->priority; local_tx_pd->pkt_delay_2ms = mwifiex_wmm_compute_drv_pkt_delay(priv, skb); if (local_tx_pd->priority < ARRAY_SIZE(priv->wmm.user_pri_pkt_tx_ctrl)) /* * Set the priority specific tx_control field, setting of 0 will * cause the default value to be used later in this function */ local_tx_pd->tx_control = cpu_to_le32(priv->wmm.user_pri_pkt_tx_ctrl[local_tx_pd-> priority]); if (adapter->pps_uapsd_mode) { if (mwifiex_check_last_packet_indication(priv)) { adapter->tx_lock_flag = true; local_tx_pd->flags = MWIFIEX_TxPD_POWER_MGMT_LAST_PACKET; } } /* Offset of actual data */ pkt_offset = sizeof(struct txpd) + pad; if (pkt_type == PKT_TYPE_MGMT) { /* Set the packet type and add header for management frame */ local_tx_pd->tx_pkt_type = cpu_to_le16(pkt_type); pkt_offset += MWIFIEX_MGMT_FRAME_HEADER_SIZE; } local_tx_pd->tx_pkt_offset = cpu_to_le16(pkt_offset); /* make space for INTF_HEADER_LEN */ skb_push(skb, INTF_HEADER_LEN); if (!local_tx_pd->tx_control) /* TxCtrl set by user or default */ local_tx_pd->tx_control = cpu_to_le32(priv->pkt_tx_ctrl); return skb->data; } /* * This function tells firmware to send a NULL data packet. * * The function creates a NULL data packet with TxPD and sends to the * firmware for transmission, with highest priority setting. */ int mwifiex_send_null_packet(struct mwifiex_private *priv, u8 flags) { struct mwifiex_adapter *adapter = priv->adapter; struct txpd *local_tx_pd; /* sizeof(struct txpd) + Interface specific header */ #define NULL_PACKET_HDR 64 u32 data_len = NULL_PACKET_HDR; struct sk_buff *skb; int ret; struct mwifiex_txinfo *tx_info = NULL; if (adapter->surprise_removed) return -1; if (!priv->media_connected) return -1; if (adapter->data_sent) return -1; skb = dev_alloc_skb(data_len); if (!skb) return -1; tx_info = MWIFIEX_SKB_TXCB(skb); tx_info->bss_num = priv->bss_num; tx_info->bss_type = priv->bss_type; skb_reserve(skb, sizeof(struct txpd) + INTF_HEADER_LEN); skb_push(skb, sizeof(struct txpd)); local_tx_pd = (struct txpd *) skb->data; local_tx_pd->tx_control = cpu_to_le32(priv->pkt_tx_ctrl); local_tx_pd->flags = flags; local_tx_pd->priority = WMM_HIGHEST_PRIORITY; local_tx_pd->tx_pkt_offset = cpu_to_le16(sizeof(struct txpd)); local_tx_pd->bss_num = priv->bss_num; local_tx_pd->bss_type = priv->bss_type; if (adapter->iface_type == MWIFIEX_USB) { ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_USB_EP_DATA, skb, NULL); } else { skb_push(skb, INTF_HEADER_LEN); ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_TYPE_DATA, skb, NULL); } switch (ret) { case -EBUSY: adapter->data_sent = true; /* Fall through FAILURE handling */ case -1: dev_kfree_skb_any(skb); dev_err(adapter->dev, "%s: host_to_card failed: ret=%d\n", __func__, ret); adapter->dbg.num_tx_host_to_card_failure++; break; case 0: dev_kfree_skb_any(skb); dev_dbg(adapter->dev, "data: %s: host_to_card succeeded\n", __func__); adapter->tx_lock_flag = true; break; case -EINPROGRESS: break; default: break; } return ret; } /* * This function checks if we need to send last packet indication. */ u8 mwifiex_check_last_packet_indication(struct mwifiex_private *priv) { struct mwifiex_adapter *adapter = priv->adapter; u8 ret = false; if (!adapter->sleep_period.period) return ret; if (mwifiex_wmm_lists_empty(adapter)) ret = true; if (ret && !adapter->cmd_sent && !adapter->curr_cmd && !is_command_pending(adapter)) { adapter->delay_null_pkt = false; ret = true; } else { ret = false; adapter->delay_null_pkt = true; } return ret; }
gpl-2.0
mipltd/imx6merlin-linux-3.14.28
arch/arm/mach-imx/devices/platform-mxc_rnga.c
2800
1326
/* * Copyright (C) 2010 Pengutronix * Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de> * * This program is free software; you can redistribute it and/or modify it under * the terms of the GNU General Public License version 2 as published by the * Free Software Foundation. */ #include "../hardware.h" #include "devices-common.h" struct imx_mxc_rnga_data { resource_size_t iobase; }; #define imx_mxc_rnga_data_entry_single(soc) \ { \ .iobase = soc ## _RNGA_BASE_ADDR, \ } #ifdef CONFIG_SOC_IMX31 static const struct imx_mxc_rnga_data imx31_mxc_rnga_data __initconst = imx_mxc_rnga_data_entry_single(MX31); #endif /* ifdef CONFIG_SOC_IMX31 */ static struct platform_device *__init imx_add_mxc_rnga( const struct imx_mxc_rnga_data *data) { struct resource res[] = { { .start = data->iobase, .end = data->iobase + SZ_16K - 1, .flags = IORESOURCE_MEM, }, }; return imx_add_platform_device("mxc_rnga", -1, res, ARRAY_SIZE(res), NULL, 0); } static int __init imxXX_add_mxc_rnga(void) { struct platform_device *ret; #if defined(CONFIG_SOC_IMX31) if (cpu_is_mx31()) ret = imx_add_mxc_rnga(&imx31_mxc_rnga_data); else #endif /* if defined(CONFIG_SOC_IMX31) */ ret = ERR_PTR(-ENODEV); if (IS_ERR(ret)) return PTR_ERR(ret); return 0; } arch_initcall(imxXX_add_mxc_rnga);
gpl-2.0
gokulnatha/GT-I9505
fs/befs/linuxvfs.c
4592
24746
/* * linux/fs/befs/linuxvfs.c * * Copyright (C) 2001 Will Dyson <will_dyson@pobox.com * */ #include <linux/module.h> #include <linux/slab.h> #include <linux/fs.h> #include <linux/errno.h> #include <linux/stat.h> #include <linux/nls.h> #include <linux/buffer_head.h> #include <linux/vfs.h> #include <linux/parser.h> #include <linux/namei.h> #include "befs.h" #include "btree.h" #include "inode.h" #include "datastream.h" #include "super.h" #include "io.h" MODULE_DESCRIPTION("BeOS File System (BeFS) driver"); MODULE_AUTHOR("Will Dyson"); MODULE_LICENSE("GPL"); /* The units the vfs expects inode->i_blocks to be in */ #define VFS_BLOCK_SIZE 512 static int befs_readdir(struct file *, void *, filldir_t); static int befs_get_block(struct inode *, sector_t, struct buffer_head *, int); static int befs_readpage(struct file *file, struct page *page); static sector_t befs_bmap(struct address_space *mapping, sector_t block); static struct dentry *befs_lookup(struct inode *, struct dentry *, struct nameidata *); static struct inode *befs_iget(struct super_block *, unsigned long); static struct inode *befs_alloc_inode(struct super_block *sb); static void befs_destroy_inode(struct inode *inode); static int befs_init_inodecache(void); static void befs_destroy_inodecache(void); static void *befs_follow_link(struct dentry *, struct nameidata *); static void befs_put_link(struct dentry *, struct nameidata *, void *); static int befs_utf2nls(struct super_block *sb, const char *in, int in_len, char **out, int *out_len); static int befs_nls2utf(struct super_block *sb, const char *in, int in_len, char **out, int *out_len); static void befs_put_super(struct super_block *); static int befs_remount(struct super_block *, int *, char *); static int befs_statfs(struct dentry *, struct kstatfs *); static int parse_options(char *, befs_mount_options *); static const struct super_operations befs_sops = { .alloc_inode = befs_alloc_inode, /* allocate a new inode */ .destroy_inode = befs_destroy_inode, /* deallocate an inode */ .put_super = befs_put_super, /* uninit super */ .statfs = befs_statfs, /* statfs */ .remount_fs = befs_remount, .show_options = generic_show_options, }; /* slab cache for befs_inode_info objects */ static struct kmem_cache *befs_inode_cachep; static const struct file_operations befs_dir_operations = { .read = generic_read_dir, .readdir = befs_readdir, .llseek = generic_file_llseek, }; static const struct inode_operations befs_dir_inode_operations = { .lookup = befs_lookup, }; static const struct address_space_operations befs_aops = { .readpage = befs_readpage, .bmap = befs_bmap, }; static const struct inode_operations befs_symlink_inode_operations = { .readlink = generic_readlink, .follow_link = befs_follow_link, .put_link = befs_put_link, }; /* * Called by generic_file_read() to read a page of data * * In turn, simply calls a generic block read function and * passes it the address of befs_get_block, for mapping file * positions to disk blocks. */ static int befs_readpage(struct file *file, struct page *page) { return block_read_full_page(page, befs_get_block); } static sector_t befs_bmap(struct address_space *mapping, sector_t block) { return generic_block_bmap(mapping, block, befs_get_block); } /* * Generic function to map a file position (block) to a * disk offset (passed back in bh_result). * * Used by many higher level functions. * * Calls befs_fblock2brun() in datastream.c to do the real work. * * -WD 10-26-01 */ static int befs_get_block(struct inode *inode, sector_t block, struct buffer_head *bh_result, int create) { struct super_block *sb = inode->i_sb; befs_data_stream *ds = &BEFS_I(inode)->i_data.ds; befs_block_run run = BAD_IADDR; int res = 0; ulong disk_off; befs_debug(sb, "---> befs_get_block() for inode %lu, block %ld", inode->i_ino, block); if (block < 0) { befs_error(sb, "befs_get_block() was asked for a block " "number less than zero: block %ld in inode %lu", block, inode->i_ino); return -EIO; } if (create) { befs_error(sb, "befs_get_block() was asked to write to " "block %ld in inode %lu", block, inode->i_ino); return -EPERM; } res = befs_fblock2brun(sb, ds, block, &run); if (res != BEFS_OK) { befs_error(sb, "<--- befs_get_block() for inode %lu, block " "%ld ERROR", inode->i_ino, block); return -EFBIG; } disk_off = (ulong) iaddr2blockno(sb, &run); map_bh(bh_result, inode->i_sb, disk_off); befs_debug(sb, "<--- befs_get_block() for inode %lu, block %ld, " "disk address %lu", inode->i_ino, block, disk_off); return 0; } static struct dentry * befs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd) { struct inode *inode = NULL; struct super_block *sb = dir->i_sb; befs_data_stream *ds = &BEFS_I(dir)->i_data.ds; befs_off_t offset; int ret; int utfnamelen; char *utfname; const char *name = dentry->d_name.name; befs_debug(sb, "---> befs_lookup() " "name %s inode %ld", dentry->d_name.name, dir->i_ino); /* Convert to UTF-8 */ if (BEFS_SB(sb)->nls) { ret = befs_nls2utf(sb, name, strlen(name), &utfname, &utfnamelen); if (ret < 0) { befs_debug(sb, "<--- befs_lookup() ERROR"); return ERR_PTR(ret); } ret = befs_btree_find(sb, ds, utfname, &offset); kfree(utfname); } else { ret = befs_btree_find(sb, ds, dentry->d_name.name, &offset); } if (ret == BEFS_BT_NOT_FOUND) { befs_debug(sb, "<--- befs_lookup() %s not found", dentry->d_name.name); return ERR_PTR(-ENOENT); } else if (ret != BEFS_OK || offset == 0) { befs_warning(sb, "<--- befs_lookup() Error"); return ERR_PTR(-ENODATA); } inode = befs_iget(dir->i_sb, (ino_t) offset); if (IS_ERR(inode)) return ERR_CAST(inode); d_add(dentry, inode); befs_debug(sb, "<--- befs_lookup()"); return NULL; } static int befs_readdir(struct file *filp, void *dirent, filldir_t filldir) { struct inode *inode = filp->f_path.dentry->d_inode; struct super_block *sb = inode->i_sb; befs_data_stream *ds = &BEFS_I(inode)->i_data.ds; befs_off_t value; int result; size_t keysize; unsigned char d_type; char keybuf[BEFS_NAME_LEN + 1]; char *nlsname; int nlsnamelen; const char *dirname = filp->f_path.dentry->d_name.name; befs_debug(sb, "---> befs_readdir() " "name %s, inode %ld, filp->f_pos %Ld", dirname, inode->i_ino, filp->f_pos); result = befs_btree_read(sb, ds, filp->f_pos, BEFS_NAME_LEN + 1, keybuf, &keysize, &value); if (result == BEFS_ERR) { befs_debug(sb, "<--- befs_readdir() ERROR"); befs_error(sb, "IO error reading %s (inode %lu)", dirname, inode->i_ino); return -EIO; } else if (result == BEFS_BT_END) { befs_debug(sb, "<--- befs_readdir() END"); return 0; } else if (result == BEFS_BT_EMPTY) { befs_debug(sb, "<--- befs_readdir() Empty directory"); return 0; } d_type = DT_UNKNOWN; /* Convert to NLS */ if (BEFS_SB(sb)->nls) { result = befs_utf2nls(sb, keybuf, keysize, &nlsname, &nlsnamelen); if (result < 0) { befs_debug(sb, "<--- befs_readdir() ERROR"); return result; } result = filldir(dirent, nlsname, nlsnamelen, filp->f_pos, (ino_t) value, d_type); kfree(nlsname); } else { result = filldir(dirent, keybuf, keysize, filp->f_pos, (ino_t) value, d_type); } filp->f_pos++; befs_debug(sb, "<--- befs_readdir() filp->f_pos %Ld", filp->f_pos); return 0; } static struct inode * befs_alloc_inode(struct super_block *sb) { struct befs_inode_info *bi; bi = (struct befs_inode_info *)kmem_cache_alloc(befs_inode_cachep, GFP_KERNEL); if (!bi) return NULL; return &bi->vfs_inode; } static void befs_i_callback(struct rcu_head *head) { struct inode *inode = container_of(head, struct inode, i_rcu); kmem_cache_free(befs_inode_cachep, BEFS_I(inode)); } static void befs_destroy_inode(struct inode *inode) { call_rcu(&inode->i_rcu, befs_i_callback); } static void init_once(void *foo) { struct befs_inode_info *bi = (struct befs_inode_info *) foo; inode_init_once(&bi->vfs_inode); } static struct inode *befs_iget(struct super_block *sb, unsigned long ino) { struct buffer_head *bh = NULL; befs_inode *raw_inode = NULL; befs_sb_info *befs_sb = BEFS_SB(sb); befs_inode_info *befs_ino = NULL; struct inode *inode; long ret = -EIO; befs_debug(sb, "---> befs_read_inode() " "inode = %lu", ino); inode = iget_locked(sb, ino); if (IS_ERR(inode)) return inode; if (!(inode->i_state & I_NEW)) return inode; befs_ino = BEFS_I(inode); /* convert from vfs's inode number to befs's inode number */ befs_ino->i_inode_num = blockno2iaddr(sb, inode->i_ino); befs_debug(sb, " real inode number [%u, %hu, %hu]", befs_ino->i_inode_num.allocation_group, befs_ino->i_inode_num.start, befs_ino->i_inode_num.len); bh = befs_bread(sb, inode->i_ino); if (!bh) { befs_error(sb, "unable to read inode block - " "inode = %lu", inode->i_ino); goto unacquire_none; } raw_inode = (befs_inode *) bh->b_data; befs_dump_inode(sb, raw_inode); if (befs_check_inode(sb, raw_inode, inode->i_ino) != BEFS_OK) { befs_error(sb, "Bad inode: %lu", inode->i_ino); goto unacquire_bh; } inode->i_mode = (umode_t) fs32_to_cpu(sb, raw_inode->mode); /* * set uid and gid. But since current BeOS is single user OS, so * you can change by "uid" or "gid" options. */ inode->i_uid = befs_sb->mount_opts.use_uid ? befs_sb->mount_opts.uid : (uid_t) fs32_to_cpu(sb, raw_inode->uid); inode->i_gid = befs_sb->mount_opts.use_gid ? befs_sb->mount_opts.gid : (gid_t) fs32_to_cpu(sb, raw_inode->gid); set_nlink(inode, 1); /* * BEFS's time is 64 bits, but current VFS is 32 bits... * BEFS don't have access time. Nor inode change time. VFS * doesn't have creation time. * Also, the lower 16 bits of the last_modified_time and * create_time are just a counter to help ensure uniqueness * for indexing purposes. (PFD, page 54) */ inode->i_mtime.tv_sec = fs64_to_cpu(sb, raw_inode->last_modified_time) >> 16; inode->i_mtime.tv_nsec = 0; /* lower 16 bits are not a time */ inode->i_ctime = inode->i_mtime; inode->i_atime = inode->i_mtime; befs_ino->i_inode_num = fsrun_to_cpu(sb, raw_inode->inode_num); befs_ino->i_parent = fsrun_to_cpu(sb, raw_inode->parent); befs_ino->i_attribute = fsrun_to_cpu(sb, raw_inode->attributes); befs_ino->i_flags = fs32_to_cpu(sb, raw_inode->flags); if (S_ISLNK(inode->i_mode) && !(befs_ino->i_flags & BEFS_LONG_SYMLINK)){ inode->i_size = 0; inode->i_blocks = befs_sb->block_size / VFS_BLOCK_SIZE; strncpy(befs_ino->i_data.symlink, raw_inode->data.symlink, BEFS_SYMLINK_LEN - 1); befs_ino->i_data.symlink[BEFS_SYMLINK_LEN - 1] = '\0'; } else { int num_blks; befs_ino->i_data.ds = fsds_to_cpu(sb, &raw_inode->data.datastream); num_blks = befs_count_blocks(sb, &befs_ino->i_data.ds); inode->i_blocks = num_blks * (befs_sb->block_size / VFS_BLOCK_SIZE); inode->i_size = befs_ino->i_data.ds.size; } inode->i_mapping->a_ops = &befs_aops; if (S_ISREG(inode->i_mode)) { inode->i_fop = &generic_ro_fops; } else if (S_ISDIR(inode->i_mode)) { inode->i_op = &befs_dir_inode_operations; inode->i_fop = &befs_dir_operations; } else if (S_ISLNK(inode->i_mode)) { inode->i_op = &befs_symlink_inode_operations; } else { befs_error(sb, "Inode %lu is not a regular file, " "directory or symlink. THAT IS WRONG! BeFS has no " "on disk special files", inode->i_ino); goto unacquire_bh; } brelse(bh); befs_debug(sb, "<--- befs_read_inode()"); unlock_new_inode(inode); return inode; unacquire_bh: brelse(bh); unacquire_none: iget_failed(inode); befs_debug(sb, "<--- befs_read_inode() - Bad inode"); return ERR_PTR(ret); } /* Initialize the inode cache. Called at fs setup. * * Taken from NFS implementation by Al Viro. */ static int befs_init_inodecache(void) { befs_inode_cachep = kmem_cache_create("befs_inode_cache", sizeof (struct befs_inode_info), 0, (SLAB_RECLAIM_ACCOUNT| SLAB_MEM_SPREAD), init_once); if (befs_inode_cachep == NULL) { printk(KERN_ERR "befs_init_inodecache: " "Couldn't initialize inode slabcache\n"); return -ENOMEM; } return 0; } /* Called at fs teardown. * * Taken from NFS implementation by Al Viro. */ static void befs_destroy_inodecache(void) { kmem_cache_destroy(befs_inode_cachep); } /* * The inode of symbolic link is different to data stream. * The data stream become link name. Unless the LONG_SYMLINK * flag is set. */ static void * befs_follow_link(struct dentry *dentry, struct nameidata *nd) { befs_inode_info *befs_ino = BEFS_I(dentry->d_inode); char *link; if (befs_ino->i_flags & BEFS_LONG_SYMLINK) { struct super_block *sb = dentry->d_sb; befs_data_stream *data = &befs_ino->i_data.ds; befs_off_t len = data->size; if (len == 0) { befs_error(sb, "Long symlink with illegal length"); link = ERR_PTR(-EIO); } else { befs_debug(sb, "Follow long symlink"); link = kmalloc(len, GFP_NOFS); if (!link) { link = ERR_PTR(-ENOMEM); } else if (befs_read_lsymlink(sb, data, link, len) != len) { kfree(link); befs_error(sb, "Failed to read entire long symlink"); link = ERR_PTR(-EIO); } else { link[len - 1] = '\0'; } } } else { link = befs_ino->i_data.symlink; } nd_set_link(nd, link); return NULL; } static void befs_put_link(struct dentry *dentry, struct nameidata *nd, void *p) { befs_inode_info *befs_ino = BEFS_I(dentry->d_inode); if (befs_ino->i_flags & BEFS_LONG_SYMLINK) { char *link = nd_get_link(nd); if (!IS_ERR(link)) kfree(link); } } /* * UTF-8 to NLS charset convert routine * * * Changed 8/10/01 by Will Dyson. Now use uni2char() / char2uni() rather than * the nls tables directly */ static int befs_utf2nls(struct super_block *sb, const char *in, int in_len, char **out, int *out_len) { struct nls_table *nls = BEFS_SB(sb)->nls; int i, o; unicode_t uni; int unilen, utflen; char *result; /* The utf8->nls conversion won't make the final nls string bigger * than the utf one, but if the string is pure ascii they'll have the * same width and an extra char is needed to save the additional \0 */ int maxlen = in_len + 1; befs_debug(sb, "---> utf2nls()"); if (!nls) { befs_error(sb, "befs_utf2nls called with no NLS table loaded"); return -EINVAL; } *out = result = kmalloc(maxlen, GFP_NOFS); if (!*out) { befs_error(sb, "befs_utf2nls() cannot allocate memory"); *out_len = 0; return -ENOMEM; } for (i = o = 0; i < in_len; i += utflen, o += unilen) { /* convert from UTF-8 to Unicode */ utflen = utf8_to_utf32(&in[i], in_len - i, &uni); if (utflen < 0) goto conv_err; /* convert from Unicode to nls */ if (uni > MAX_WCHAR_T) goto conv_err; unilen = nls->uni2char(uni, &result[o], in_len - o); if (unilen < 0) goto conv_err; } result[o] = '\0'; *out_len = o; befs_debug(sb, "<--- utf2nls()"); return o; conv_err: befs_error(sb, "Name using character set %s contains a character that " "cannot be converted to unicode.", nls->charset); befs_debug(sb, "<--- utf2nls()"); kfree(result); return -EILSEQ; } /** * befs_nls2utf - Convert NLS string to utf8 encodeing * @sb: Superblock * @src: Input string buffer in NLS format * @srclen: Length of input string in bytes * @dest: The output string in UTF-8 format * @destlen: Length of the output buffer * * Converts input string @src, which is in the format of the loaded NLS map, * into a utf8 string. * * The destination string @dest is allocated by this function and the caller is * responsible for freeing it with kfree() * * On return, *@destlen is the length of @dest in bytes. * * On success, the return value is the number of utf8 characters written to * the output buffer @dest. * * On Failure, a negative number coresponding to the error code is returned. */ static int befs_nls2utf(struct super_block *sb, const char *in, int in_len, char **out, int *out_len) { struct nls_table *nls = BEFS_SB(sb)->nls; int i, o; wchar_t uni; int unilen, utflen; char *result; /* There're nls characters that will translate to 3-chars-wide UTF-8 * characters, a additional byte is needed to save the final \0 * in special cases */ int maxlen = (3 * in_len) + 1; befs_debug(sb, "---> nls2utf()\n"); if (!nls) { befs_error(sb, "befs_nls2utf called with no NLS table loaded."); return -EINVAL; } *out = result = kmalloc(maxlen, GFP_NOFS); if (!*out) { befs_error(sb, "befs_nls2utf() cannot allocate memory"); *out_len = 0; return -ENOMEM; } for (i = o = 0; i < in_len; i += unilen, o += utflen) { /* convert from nls to unicode */ unilen = nls->char2uni(&in[i], in_len - i, &uni); if (unilen < 0) goto conv_err; /* convert from unicode to UTF-8 */ utflen = utf32_to_utf8(uni, &result[o], 3); if (utflen <= 0) goto conv_err; } result[o] = '\0'; *out_len = o; befs_debug(sb, "<--- nls2utf()"); return i; conv_err: befs_error(sb, "Name using charecter set %s contains a charecter that " "cannot be converted to unicode.", nls->charset); befs_debug(sb, "<--- nls2utf()"); kfree(result); return -EILSEQ; } /** * Use the * */ enum { Opt_uid, Opt_gid, Opt_charset, Opt_debug, Opt_err, }; static const match_table_t befs_tokens = { {Opt_uid, "uid=%d"}, {Opt_gid, "gid=%d"}, {Opt_charset, "iocharset=%s"}, {Opt_debug, "debug"}, {Opt_err, NULL} }; static int parse_options(char *options, befs_mount_options * opts) { char *p; substring_t args[MAX_OPT_ARGS]; int option; /* Initialize options */ opts->uid = 0; opts->gid = 0; opts->use_uid = 0; opts->use_gid = 0; opts->iocharset = NULL; opts->debug = 0; if (!options) return 1; while ((p = strsep(&options, ",")) != NULL) { int token; if (!*p) continue; token = match_token(p, befs_tokens, args); switch (token) { case Opt_uid: if (match_int(&args[0], &option)) return 0; if (option < 0) { printk(KERN_ERR "BeFS: Invalid uid %d, " "using default\n", option); break; } opts->uid = option; opts->use_uid = 1; break; case Opt_gid: if (match_int(&args[0], &option)) return 0; if (option < 0) { printk(KERN_ERR "BeFS: Invalid gid %d, " "using default\n", option); break; } opts->gid = option; opts->use_gid = 1; break; case Opt_charset: kfree(opts->iocharset); opts->iocharset = match_strdup(&args[0]); if (!opts->iocharset) { printk(KERN_ERR "BeFS: allocation failure for " "iocharset string\n"); return 0; } break; case Opt_debug: opts->debug = 1; break; default: printk(KERN_ERR "BeFS: Unrecognized mount option \"%s\" " "or missing value\n", p); return 0; } } return 1; } /* This function has the responsibiltiy of getting the * filesystem ready for unmounting. * Basically, we free everything that we allocated in * befs_read_inode */ static void befs_put_super(struct super_block *sb) { kfree(BEFS_SB(sb)->mount_opts.iocharset); BEFS_SB(sb)->mount_opts.iocharset = NULL; unload_nls(BEFS_SB(sb)->nls); kfree(sb->s_fs_info); sb->s_fs_info = NULL; } /* Allocate private field of the superblock, fill it. * * Finish filling the public superblock fields * Make the root directory * Load a set of NLS translations if needed. */ static int befs_fill_super(struct super_block *sb, void *data, int silent) { struct buffer_head *bh; befs_sb_info *befs_sb; befs_super_block *disk_sb; struct inode *root; long ret = -EINVAL; const unsigned long sb_block = 0; const off_t x86_sb_off = 512; save_mount_options(sb, data); sb->s_fs_info = kmalloc(sizeof (*befs_sb), GFP_KERNEL); if (sb->s_fs_info == NULL) { printk(KERN_ERR "BeFS(%s): Unable to allocate memory for private " "portion of superblock. Bailing.\n", sb->s_id); goto unacquire_none; } befs_sb = BEFS_SB(sb); memset(befs_sb, 0, sizeof(befs_sb_info)); if (!parse_options((char *) data, &befs_sb->mount_opts)) { befs_error(sb, "cannot parse mount options"); goto unacquire_priv_sbp; } befs_debug(sb, "---> befs_fill_super()"); #ifndef CONFIG_BEFS_RW if (!(sb->s_flags & MS_RDONLY)) { befs_warning(sb, "No write support. Marking filesystem read-only"); sb->s_flags |= MS_RDONLY; } #endif /* CONFIG_BEFS_RW */ /* * Set dummy blocksize to read super block. * Will be set to real fs blocksize later. * * Linux 2.4.10 and later refuse to read blocks smaller than * the hardsect size for the device. But we also need to read at * least 1k to get the second 512 bytes of the volume. * -WD 10-26-01 */ sb_min_blocksize(sb, 1024); if (!(bh = sb_bread(sb, sb_block))) { befs_error(sb, "unable to read superblock"); goto unacquire_priv_sbp; } /* account for offset of super block on x86 */ disk_sb = (befs_super_block *) bh->b_data; if ((disk_sb->magic1 == BEFS_SUPER_MAGIC1_LE) || (disk_sb->magic1 == BEFS_SUPER_MAGIC1_BE)) { befs_debug(sb, "Using PPC superblock location"); } else { befs_debug(sb, "Using x86 superblock location"); disk_sb = (befs_super_block *) ((void *) bh->b_data + x86_sb_off); } if (befs_load_sb(sb, disk_sb) != BEFS_OK) goto unacquire_bh; befs_dump_super_block(sb, disk_sb); brelse(bh); if (befs_check_sb(sb) != BEFS_OK) goto unacquire_priv_sbp; if( befs_sb->num_blocks > ~((sector_t)0) ) { befs_error(sb, "blocks count: %Lu " "is larger than the host can use", befs_sb->num_blocks); goto unacquire_priv_sbp; } /* * set up enough so that it can read an inode * Fill in kernel superblock fields from private sb */ sb->s_magic = BEFS_SUPER_MAGIC; /* Set real blocksize of fs */ sb_set_blocksize(sb, (ulong) befs_sb->block_size); sb->s_op = &befs_sops; root = befs_iget(sb, iaddr2blockno(sb, &(befs_sb->root_dir))); if (IS_ERR(root)) { ret = PTR_ERR(root); goto unacquire_priv_sbp; } sb->s_root = d_make_root(root); if (!sb->s_root) { befs_error(sb, "get root inode failed"); goto unacquire_priv_sbp; } /* load nls library */ if (befs_sb->mount_opts.iocharset) { befs_debug(sb, "Loading nls: %s", befs_sb->mount_opts.iocharset); befs_sb->nls = load_nls(befs_sb->mount_opts.iocharset); if (!befs_sb->nls) { befs_warning(sb, "Cannot load nls %s" " loading default nls", befs_sb->mount_opts.iocharset); befs_sb->nls = load_nls_default(); } /* load default nls if none is specified in mount options */ } else { befs_debug(sb, "Loading default nls"); befs_sb->nls = load_nls_default(); } return 0; /*****************/ unacquire_bh: brelse(bh); unacquire_priv_sbp: kfree(befs_sb->mount_opts.iocharset); kfree(sb->s_fs_info); unacquire_none: sb->s_fs_info = NULL; return ret; } static int befs_remount(struct super_block *sb, int *flags, char *data) { if (!(*flags & MS_RDONLY)) return -EINVAL; return 0; } static int befs_statfs(struct dentry *dentry, struct kstatfs *buf) { struct super_block *sb = dentry->d_sb; u64 id = huge_encode_dev(sb->s_bdev->bd_dev); befs_debug(sb, "---> befs_statfs()"); buf->f_type = BEFS_SUPER_MAGIC; buf->f_bsize = sb->s_blocksize; buf->f_blocks = BEFS_SB(sb)->num_blocks; buf->f_bfree = BEFS_SB(sb)->num_blocks - BEFS_SB(sb)->used_blocks; buf->f_bavail = buf->f_bfree; buf->f_files = 0; /* UNKNOWN */ buf->f_ffree = 0; /* UNKNOWN */ buf->f_fsid.val[0] = (u32)id; buf->f_fsid.val[1] = (u32)(id >> 32); buf->f_namelen = BEFS_NAME_LEN; befs_debug(sb, "<--- befs_statfs()"); return 0; } static struct dentry * befs_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { return mount_bdev(fs_type, flags, dev_name, data, befs_fill_super); } static struct file_system_type befs_fs_type = { .owner = THIS_MODULE, .name = "befs", .mount = befs_mount, .kill_sb = kill_block_super, .fs_flags = FS_REQUIRES_DEV, }; static int __init init_befs_fs(void) { int err; printk(KERN_INFO "BeFS version: %s\n", BEFS_VERSION); err = befs_init_inodecache(); if (err) goto unacquire_none; err = register_filesystem(&befs_fs_type); if (err) goto unacquire_inodecache; return 0; unacquire_inodecache: befs_destroy_inodecache(); unacquire_none: return err; } static void __exit exit_befs_fs(void) { befs_destroy_inodecache(); unregister_filesystem(&befs_fs_type); } /* Macros that typecheck the init and exit functions, ensures that they are called at init and cleanup, and eliminates warnings about unused functions. */ module_init(init_befs_fs) module_exit(exit_befs_fs)
gpl-2.0
donkeykang/donkeyk
drivers/net/wimax/i2400m/control.c
5104
43889
/* * Intel Wireless WiMAX Connection 2400m * Miscellaneous control functions for managing the device * * * Copyright (C) 2007-2008 Intel Corporation. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name of Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * * Intel Corporation <linux-wimax@intel.com> * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> * - Initial implementation * * This is a collection of functions used to control the device (plus * a few helpers). * * There are utilities for handling TLV buffers, hooks on the device's * reports to act on device changes of state [i2400m_report_hook()], * on acks to commands [i2400m_msg_ack_hook()], a helper for sending * commands to the device and blocking until a reply arrives * [i2400m_msg_to_dev()], a few high level commands for manipulating * the device state, powersving mode and configuration plus the * routines to setup the device once communication is stablished with * it [i2400m_dev_initialize()]. * * ROADMAP * * i2400m_dev_initialize() Called by i2400m_dev_start() * i2400m_set_init_config() * i2400m_cmd_get_state() * i2400m_dev_shutdown() Called by i2400m_dev_stop() * i2400m_reset() * * i2400m_{cmd,get,set}_*() * i2400m_msg_to_dev() * i2400m_msg_check_status() * * i2400m_report_hook() Called on reception of an event * i2400m_report_state_hook() * i2400m_tlv_buffer_walk() * i2400m_tlv_match() * i2400m_report_tlv_system_state() * i2400m_report_tlv_rf_switches_status() * i2400m_report_tlv_media_status() * i2400m_cmd_enter_powersave() * * i2400m_msg_ack_hook() Called on reception of a reply to a * command, get or set */ #include <stdarg.h> #include "i2400m.h" #include <linux/kernel.h> #include <linux/slab.h> #include <linux/wimax/i2400m.h> #include <linux/export.h> #include <linux/moduleparam.h> #define D_SUBMODULE control #include "debug-levels.h" static int i2400m_idle_mode_disabled;/* 0 (idle mode enabled) by default */ module_param_named(idle_mode_disabled, i2400m_idle_mode_disabled, int, 0644); MODULE_PARM_DESC(idle_mode_disabled, "If true, the device will not enable idle mode negotiation " "with the base station (when connected) to save power."); /* 0 (power saving enabled) by default */ static int i2400m_power_save_disabled; module_param_named(power_save_disabled, i2400m_power_save_disabled, int, 0644); MODULE_PARM_DESC(power_save_disabled, "If true, the driver will not tell the device to enter " "power saving mode when it reports it is ready for it. " "False by default (so the device is told to do power " "saving)."); static int i2400m_passive_mode; /* 0 (passive mode disabled) by default */ module_param_named(passive_mode, i2400m_passive_mode, int, 0644); MODULE_PARM_DESC(passive_mode, "If true, the driver will not do any device setup " "and leave it up to user space, who must be properly " "setup."); /* * Return if a TLV is of a give type and size * * @tlv_hdr: pointer to the TLV * @tlv_type: type of the TLV we are looking for * @tlv_size: expected size of the TLV we are looking for (if -1, * don't check the size). This includes the header * Returns: 0 if the TLV matches * < 0 if it doesn't match at all * > 0 total TLV + payload size, if the type matches, but not * the size */ static ssize_t i2400m_tlv_match(const struct i2400m_tlv_hdr *tlv, enum i2400m_tlv tlv_type, ssize_t tlv_size) { if (le16_to_cpu(tlv->type) != tlv_type) /* Not our type? skip */ return -1; if (tlv_size != -1 && le16_to_cpu(tlv->length) + sizeof(*tlv) != tlv_size) { size_t size = le16_to_cpu(tlv->length) + sizeof(*tlv); printk(KERN_WARNING "W: tlv type 0x%x mismatched because of " "size (got %zu vs %zu expected)\n", tlv_type, size, tlv_size); return size; } return 0; } /* * Given a buffer of TLVs, iterate over them * * @i2400m: device instance * @tlv_buf: pointer to the beginning of the TLV buffer * @buf_size: buffer size in bytes * @tlv_pos: seek position; this is assumed to be a pointer returned * by i2400m_tlv_buffer_walk() [and thus, validated]. The * TLV returned will be the one following this one. * * Usage: * * tlv_itr = NULL; * while (tlv_itr = i2400m_tlv_buffer_walk(i2400m, buf, size, tlv_itr)) { * ... * // Do stuff with tlv_itr, DON'T MODIFY IT * ... * } */ static const struct i2400m_tlv_hdr *i2400m_tlv_buffer_walk( struct i2400m *i2400m, const void *tlv_buf, size_t buf_size, const struct i2400m_tlv_hdr *tlv_pos) { struct device *dev = i2400m_dev(i2400m); const struct i2400m_tlv_hdr *tlv_top = tlv_buf + buf_size; size_t offset, length, avail_size; unsigned type; if (tlv_pos == NULL) /* Take the first one? */ tlv_pos = tlv_buf; else /* Nope, the next one */ tlv_pos = (void *) tlv_pos + le16_to_cpu(tlv_pos->length) + sizeof(*tlv_pos); if (tlv_pos == tlv_top) { /* buffer done */ tlv_pos = NULL; goto error_beyond_end; } if (tlv_pos > tlv_top) { tlv_pos = NULL; WARN_ON(1); goto error_beyond_end; } offset = (void *) tlv_pos - (void *) tlv_buf; avail_size = buf_size - offset; if (avail_size < sizeof(*tlv_pos)) { dev_err(dev, "HW BUG? tlv_buf %p [%zu bytes], tlv @%zu: " "short header\n", tlv_buf, buf_size, offset); goto error_short_header; } type = le16_to_cpu(tlv_pos->type); length = le16_to_cpu(tlv_pos->length); if (avail_size < sizeof(*tlv_pos) + length) { dev_err(dev, "HW BUG? tlv_buf %p [%zu bytes], " "tlv type 0x%04x @%zu: " "short data (%zu bytes vs %zu needed)\n", tlv_buf, buf_size, type, offset, avail_size, sizeof(*tlv_pos) + length); goto error_short_header; } error_short_header: error_beyond_end: return tlv_pos; } /* * Find a TLV in a buffer of sequential TLVs * * @i2400m: device descriptor * @tlv_hdr: pointer to the first TLV in the sequence * @size: size of the buffer in bytes; all TLVs are assumed to fit * fully in the buffer (otherwise we'll complain). * @tlv_type: type of the TLV we are looking for * @tlv_size: expected size of the TLV we are looking for (if -1, * don't check the size). This includes the header * * Returns: NULL if the TLV is not found, otherwise a pointer to * it. If the sizes don't match, an error is printed and NULL * returned. */ static const struct i2400m_tlv_hdr *i2400m_tlv_find( struct i2400m *i2400m, const struct i2400m_tlv_hdr *tlv_hdr, size_t size, enum i2400m_tlv tlv_type, ssize_t tlv_size) { ssize_t match; struct device *dev = i2400m_dev(i2400m); const struct i2400m_tlv_hdr *tlv = NULL; while ((tlv = i2400m_tlv_buffer_walk(i2400m, tlv_hdr, size, tlv))) { match = i2400m_tlv_match(tlv, tlv_type, tlv_size); if (match == 0) /* found it :) */ break; if (match > 0) dev_warn(dev, "TLV type 0x%04x found with size " "mismatch (%zu vs %zu needed)\n", tlv_type, match, tlv_size); } return tlv; } static const struct { char *msg; int errno; } ms_to_errno[I2400M_MS_MAX] = { [I2400M_MS_DONE_OK] = { "", 0 }, [I2400M_MS_DONE_IN_PROGRESS] = { "", 0 }, [I2400M_MS_INVALID_OP] = { "invalid opcode", -ENOSYS }, [I2400M_MS_BAD_STATE] = { "invalid state", -EILSEQ }, [I2400M_MS_ILLEGAL_VALUE] = { "illegal value", -EINVAL }, [I2400M_MS_MISSING_PARAMS] = { "missing parameters", -ENOMSG }, [I2400M_MS_VERSION_ERROR] = { "bad version", -EIO }, [I2400M_MS_ACCESSIBILITY_ERROR] = { "accesibility error", -EIO }, [I2400M_MS_BUSY] = { "busy", -EBUSY }, [I2400M_MS_CORRUPTED_TLV] = { "corrupted TLV", -EILSEQ }, [I2400M_MS_UNINITIALIZED] = { "not unitialized", -EILSEQ }, [I2400M_MS_UNKNOWN_ERROR] = { "unknown error", -EIO }, [I2400M_MS_PRODUCTION_ERROR] = { "production error", -EIO }, [I2400M_MS_NO_RF] = { "no RF", -EIO }, [I2400M_MS_NOT_READY_FOR_POWERSAVE] = { "not ready for powersave", -EACCES }, [I2400M_MS_THERMAL_CRITICAL] = { "thermal critical", -EL3HLT }, }; /* * i2400m_msg_check_status - translate a message's status code * * @i2400m: device descriptor * @l3l4_hdr: message header * @strbuf: buffer to place a formatted error message (unless NULL). * @strbuf_size: max amount of available space; larger messages will * be truncated. * * Returns: errno code corresponding to the status code in @l3l4_hdr * and a message in @strbuf describing the error. */ int i2400m_msg_check_status(const struct i2400m_l3l4_hdr *l3l4_hdr, char *strbuf, size_t strbuf_size) { int result; enum i2400m_ms status = le16_to_cpu(l3l4_hdr->status); const char *str; if (status == 0) return 0; if (status >= ARRAY_SIZE(ms_to_errno)) { str = "unknown status code"; result = -EBADR; } else { str = ms_to_errno[status].msg; result = ms_to_errno[status].errno; } if (strbuf) snprintf(strbuf, strbuf_size, "%s (%d)", str, status); return result; } /* * Act on a TLV System State reported by the device * * @i2400m: device descriptor * @ss: validated System State TLV */ static void i2400m_report_tlv_system_state(struct i2400m *i2400m, const struct i2400m_tlv_system_state *ss) { struct device *dev = i2400m_dev(i2400m); struct wimax_dev *wimax_dev = &i2400m->wimax_dev; enum i2400m_system_state i2400m_state = le32_to_cpu(ss->state); d_fnstart(3, dev, "(i2400m %p ss %p [%u])\n", i2400m, ss, i2400m_state); if (i2400m->state != i2400m_state) { i2400m->state = i2400m_state; wake_up_all(&i2400m->state_wq); } switch (i2400m_state) { case I2400M_SS_UNINITIALIZED: case I2400M_SS_INIT: case I2400M_SS_CONFIG: case I2400M_SS_PRODUCTION: wimax_state_change(wimax_dev, WIMAX_ST_UNINITIALIZED); break; case I2400M_SS_RF_OFF: case I2400M_SS_RF_SHUTDOWN: wimax_state_change(wimax_dev, WIMAX_ST_RADIO_OFF); break; case I2400M_SS_READY: case I2400M_SS_STANDBY: case I2400M_SS_SLEEPACTIVE: wimax_state_change(wimax_dev, WIMAX_ST_READY); break; case I2400M_SS_CONNECTING: case I2400M_SS_WIMAX_CONNECTED: wimax_state_change(wimax_dev, WIMAX_ST_READY); break; case I2400M_SS_SCAN: case I2400M_SS_OUT_OF_ZONE: wimax_state_change(wimax_dev, WIMAX_ST_SCANNING); break; case I2400M_SS_IDLE: d_printf(1, dev, "entering BS-negotiated idle mode\n"); case I2400M_SS_DISCONNECTING: case I2400M_SS_DATA_PATH_CONNECTED: wimax_state_change(wimax_dev, WIMAX_ST_CONNECTED); break; default: /* Huh? just in case, shut it down */ dev_err(dev, "HW BUG? unknown state %u: shutting down\n", i2400m_state); i2400m_reset(i2400m, I2400M_RT_WARM); break; } d_fnend(3, dev, "(i2400m %p ss %p [%u]) = void\n", i2400m, ss, i2400m_state); } /* * Parse and act on a TLV Media Status sent by the device * * @i2400m: device descriptor * @ms: validated Media Status TLV * * This will set the carrier up on down based on the device's link * report. This is done asides of what the WiMAX stack does based on * the device's state as sometimes we need to do a link-renew (the BS * wants us to renew a DHCP lease, for example). * * In fact, doc says that every time we get a link-up, we should do a * DHCP negotiation... */ static void i2400m_report_tlv_media_status(struct i2400m *i2400m, const struct i2400m_tlv_media_status *ms) { struct device *dev = i2400m_dev(i2400m); struct wimax_dev *wimax_dev = &i2400m->wimax_dev; struct net_device *net_dev = wimax_dev->net_dev; enum i2400m_media_status status = le32_to_cpu(ms->media_status); d_fnstart(3, dev, "(i2400m %p ms %p [%u])\n", i2400m, ms, status); switch (status) { case I2400M_MEDIA_STATUS_LINK_UP: netif_carrier_on(net_dev); break; case I2400M_MEDIA_STATUS_LINK_DOWN: netif_carrier_off(net_dev); break; /* * This is the network telling us we need to retrain the DHCP * lease -- so far, we are trusting the WiMAX Network Service * in user space to pick this up and poke the DHCP client. */ case I2400M_MEDIA_STATUS_LINK_RENEW: netif_carrier_on(net_dev); break; default: dev_err(dev, "HW BUG? unknown media status %u\n", status); } d_fnend(3, dev, "(i2400m %p ms %p [%u]) = void\n", i2400m, ms, status); } /* * Process a TLV from a 'state report' * * @i2400m: device descriptor * @tlv: pointer to the TLV header; it has been already validated for * consistent size. * @tag: for error messages * * Act on the TLVs from a 'state report'. */ static void i2400m_report_state_parse_tlv(struct i2400m *i2400m, const struct i2400m_tlv_hdr *tlv, const char *tag) { struct device *dev = i2400m_dev(i2400m); const struct i2400m_tlv_media_status *ms; const struct i2400m_tlv_system_state *ss; const struct i2400m_tlv_rf_switches_status *rfss; if (0 == i2400m_tlv_match(tlv, I2400M_TLV_SYSTEM_STATE, sizeof(*ss))) { ss = container_of(tlv, typeof(*ss), hdr); d_printf(2, dev, "%s: system state TLV " "found (0x%04x), state 0x%08x\n", tag, I2400M_TLV_SYSTEM_STATE, le32_to_cpu(ss->state)); i2400m_report_tlv_system_state(i2400m, ss); } if (0 == i2400m_tlv_match(tlv, I2400M_TLV_RF_STATUS, sizeof(*rfss))) { rfss = container_of(tlv, typeof(*rfss), hdr); d_printf(2, dev, "%s: RF status TLV " "found (0x%04x), sw 0x%02x hw 0x%02x\n", tag, I2400M_TLV_RF_STATUS, le32_to_cpu(rfss->sw_rf_switch), le32_to_cpu(rfss->hw_rf_switch)); i2400m_report_tlv_rf_switches_status(i2400m, rfss); } if (0 == i2400m_tlv_match(tlv, I2400M_TLV_MEDIA_STATUS, sizeof(*ms))) { ms = container_of(tlv, typeof(*ms), hdr); d_printf(2, dev, "%s: Media Status TLV: %u\n", tag, le32_to_cpu(ms->media_status)); i2400m_report_tlv_media_status(i2400m, ms); } } /* * Parse a 'state report' and extract information * * @i2400m: device descriptor * @l3l4_hdr: pointer to message; it has been already validated for * consistent size. * @size: size of the message (header + payload). The header length * declaration is assumed to be congruent with @size (as in * sizeof(*l3l4_hdr) + l3l4_hdr->length == size) * * Walk over the TLVs in a report state and act on them. */ static void i2400m_report_state_hook(struct i2400m *i2400m, const struct i2400m_l3l4_hdr *l3l4_hdr, size_t size, const char *tag) { struct device *dev = i2400m_dev(i2400m); const struct i2400m_tlv_hdr *tlv; size_t tlv_size = le16_to_cpu(l3l4_hdr->length); d_fnstart(4, dev, "(i2400m %p, l3l4_hdr %p, size %zu, %s)\n", i2400m, l3l4_hdr, size, tag); tlv = NULL; while ((tlv = i2400m_tlv_buffer_walk(i2400m, &l3l4_hdr->pl, tlv_size, tlv))) i2400m_report_state_parse_tlv(i2400m, tlv, tag); d_fnend(4, dev, "(i2400m %p, l3l4_hdr %p, size %zu, %s) = void\n", i2400m, l3l4_hdr, size, tag); } /* * i2400m_report_hook - (maybe) act on a report * * @i2400m: device descriptor * @l3l4_hdr: pointer to message; it has been already validated for * consistent size. * @size: size of the message (header + payload). The header length * declaration is assumed to be congruent with @size (as in * sizeof(*l3l4_hdr) + l3l4_hdr->length == size) * * Extract information we might need (like carrien on/off) from a * device report. */ void i2400m_report_hook(struct i2400m *i2400m, const struct i2400m_l3l4_hdr *l3l4_hdr, size_t size) { struct device *dev = i2400m_dev(i2400m); unsigned msg_type; d_fnstart(3, dev, "(i2400m %p l3l4_hdr %p size %zu)\n", i2400m, l3l4_hdr, size); /* Chew on the message, we might need some information from * here */ msg_type = le16_to_cpu(l3l4_hdr->type); switch (msg_type) { case I2400M_MT_REPORT_STATE: /* carrier detection... */ i2400m_report_state_hook(i2400m, l3l4_hdr, size, "REPORT STATE"); break; /* If the device is ready for power save, then ask it to do * it. */ case I2400M_MT_REPORT_POWERSAVE_READY: /* zzzzz */ if (l3l4_hdr->status == cpu_to_le16(I2400M_MS_DONE_OK)) { if (i2400m_power_save_disabled) d_printf(1, dev, "ready for powersave, " "not requesting (disabled by module " "parameter)\n"); else { d_printf(1, dev, "ready for powersave, " "requesting\n"); i2400m_cmd_enter_powersave(i2400m); } } break; } d_fnend(3, dev, "(i2400m %p l3l4_hdr %p size %zu) = void\n", i2400m, l3l4_hdr, size); } /* * i2400m_msg_ack_hook - process cmd/set/get ack for internal status * * @i2400m: device descriptor * @l3l4_hdr: pointer to message; it has been already validated for * consistent size. * @size: size of the message * * Extract information we might need from acks to commands and act on * it. This is akin to i2400m_report_hook(). Note most of this * processing should be done in the function that calls the * command. This is here for some cases where it can't happen... */ static void i2400m_msg_ack_hook(struct i2400m *i2400m, const struct i2400m_l3l4_hdr *l3l4_hdr, size_t size) { int result; struct device *dev = i2400m_dev(i2400m); unsigned ack_type, ack_status; char strerr[32]; /* Chew on the message, we might need some information from * here */ ack_type = le16_to_cpu(l3l4_hdr->type); ack_status = le16_to_cpu(l3l4_hdr->status); switch (ack_type) { case I2400M_MT_CMD_ENTER_POWERSAVE: /* This is just left here for the sake of example, as * the processing is done somewhere else. */ if (0) { result = i2400m_msg_check_status( l3l4_hdr, strerr, sizeof(strerr)); if (result >= 0) d_printf(1, dev, "ready for power save: %zd\n", size); } break; } } /* * i2400m_msg_size_check() - verify message size and header are congruent * * It is ok if the total message size is larger than the expected * size, as there can be padding. */ int i2400m_msg_size_check(struct i2400m *i2400m, const struct i2400m_l3l4_hdr *l3l4_hdr, size_t msg_size) { int result; struct device *dev = i2400m_dev(i2400m); size_t expected_size; d_fnstart(4, dev, "(i2400m %p l3l4_hdr %p msg_size %zu)\n", i2400m, l3l4_hdr, msg_size); if (msg_size < sizeof(*l3l4_hdr)) { dev_err(dev, "bad size for message header " "(expected at least %zu, got %zu)\n", (size_t) sizeof(*l3l4_hdr), msg_size); result = -EIO; goto error_hdr_size; } expected_size = le16_to_cpu(l3l4_hdr->length) + sizeof(*l3l4_hdr); if (msg_size < expected_size) { dev_err(dev, "bad size for message code 0x%04x (expected %zu, " "got %zu)\n", le16_to_cpu(l3l4_hdr->type), expected_size, msg_size); result = -EIO; } else result = 0; error_hdr_size: d_fnend(4, dev, "(i2400m %p l3l4_hdr %p msg_size %zu) = %d\n", i2400m, l3l4_hdr, msg_size, result); return result; } /* * Cancel a wait for a command ACK * * @i2400m: device descriptor * @code: [negative] errno code to cancel with (don't use * -EINPROGRESS) * * If there is an ack already filled out, free it. */ void i2400m_msg_to_dev_cancel_wait(struct i2400m *i2400m, int code) { struct sk_buff *ack_skb; unsigned long flags; spin_lock_irqsave(&i2400m->rx_lock, flags); ack_skb = i2400m->ack_skb; if (ack_skb && !IS_ERR(ack_skb)) kfree_skb(ack_skb); i2400m->ack_skb = ERR_PTR(code); spin_unlock_irqrestore(&i2400m->rx_lock, flags); } /** * i2400m_msg_to_dev - Send a control message to the device and get a response * * @i2400m: device descriptor * * @msg_skb: an skb * * * @buf: pointer to the buffer containing the message to be sent; it * has to start with a &struct i2400M_l3l4_hdr and then * followed by the payload. Once this function returns, the * buffer can be reused. * * @buf_len: buffer size * * Returns: * * Pointer to skb containing the ack message. You need to check the * pointer with IS_ERR(), as it might be an error code. Error codes * could happen because: * * - the message wasn't formatted correctly * - couldn't send the message * - failed waiting for a response * - the ack message wasn't formatted correctly * * The returned skb has been allocated with wimax_msg_to_user_alloc(), * it contains the response in a netlink attribute and is ready to be * passed up to user space with wimax_msg_to_user_send(). To access * the payload and its length, use wimax_msg_{data,len}() on the skb. * * The skb has to be freed with kfree_skb() once done. * * Description: * * This function delivers a message/command to the device and waits * for an ack to be received. The format is described in * linux/wimax/i2400m.h. In summary, a command/get/set is followed by an * ack. * * This function will not check the ack status, that's left up to the * caller. Once done with the ack skb, it has to be kfree_skb()ed. * * The i2400m handles only one message at the same time, thus we need * the mutex to exclude other players. * * We write the message and then wait for an answer to come back. The * RX path intercepts control messages and handles them in * i2400m_rx_ctl(). Reports (notifications) are (maybe) processed * locally and then forwarded (as needed) to user space on the WiMAX * stack message pipe. Acks are saved and passed back to us through an * skb in i2400m->ack_skb which is ready to be given to generic * netlink if need be. */ struct sk_buff *i2400m_msg_to_dev(struct i2400m *i2400m, const void *buf, size_t buf_len) { int result; struct device *dev = i2400m_dev(i2400m); const struct i2400m_l3l4_hdr *msg_l3l4_hdr; struct sk_buff *ack_skb; const struct i2400m_l3l4_hdr *ack_l3l4_hdr; size_t ack_len; int ack_timeout; unsigned msg_type; unsigned long flags; d_fnstart(3, dev, "(i2400m %p buf %p len %zu)\n", i2400m, buf, buf_len); rmb(); /* Make sure we see what i2400m_dev_reset_handle() */ if (i2400m->boot_mode) return ERR_PTR(-EL3RST); msg_l3l4_hdr = buf; /* Check msg & payload consistency */ result = i2400m_msg_size_check(i2400m, msg_l3l4_hdr, buf_len); if (result < 0) goto error_bad_msg; msg_type = le16_to_cpu(msg_l3l4_hdr->type); d_printf(1, dev, "CMD/GET/SET 0x%04x %zu bytes\n", msg_type, buf_len); d_dump(2, dev, buf, buf_len); /* Setup the completion, ack_skb ("we are waiting") and send * the message to the device */ mutex_lock(&i2400m->msg_mutex); spin_lock_irqsave(&i2400m->rx_lock, flags); i2400m->ack_skb = ERR_PTR(-EINPROGRESS); spin_unlock_irqrestore(&i2400m->rx_lock, flags); init_completion(&i2400m->msg_completion); result = i2400m_tx(i2400m, buf, buf_len, I2400M_PT_CTRL); if (result < 0) { dev_err(dev, "can't send message 0x%04x: %d\n", le16_to_cpu(msg_l3l4_hdr->type), result); goto error_tx; } /* Some commands take longer to execute because of crypto ops, * so we give them some more leeway on timeout */ switch (msg_type) { case I2400M_MT_GET_TLS_OPERATION_RESULT: case I2400M_MT_CMD_SEND_EAP_RESPONSE: ack_timeout = 5 * HZ; break; default: ack_timeout = HZ; } if (unlikely(i2400m->trace_msg_from_user)) wimax_msg(&i2400m->wimax_dev, "echo", buf, buf_len, GFP_KERNEL); /* The RX path in rx.c will put any response for this message * in i2400m->ack_skb and wake us up. If we cancel the wait, * we need to change the value of i2400m->ack_skb to something * not -EINPROGRESS so RX knows there is no one waiting. */ result = wait_for_completion_interruptible_timeout( &i2400m->msg_completion, ack_timeout); if (result == 0) { dev_err(dev, "timeout waiting for reply to message 0x%04x\n", msg_type); result = -ETIMEDOUT; i2400m_msg_to_dev_cancel_wait(i2400m, result); goto error_wait_for_completion; } else if (result < 0) { dev_err(dev, "error waiting for reply to message 0x%04x: %d\n", msg_type, result); i2400m_msg_to_dev_cancel_wait(i2400m, result); goto error_wait_for_completion; } /* Pull out the ack data from i2400m->ack_skb -- see if it is * an error and act accordingly */ spin_lock_irqsave(&i2400m->rx_lock, flags); ack_skb = i2400m->ack_skb; if (IS_ERR(ack_skb)) result = PTR_ERR(ack_skb); else result = 0; i2400m->ack_skb = NULL; spin_unlock_irqrestore(&i2400m->rx_lock, flags); if (result < 0) goto error_ack_status; ack_l3l4_hdr = wimax_msg_data_len(ack_skb, &ack_len); /* Check the ack and deliver it if it is ok */ if (unlikely(i2400m->trace_msg_from_user)) wimax_msg(&i2400m->wimax_dev, "echo", ack_l3l4_hdr, ack_len, GFP_KERNEL); result = i2400m_msg_size_check(i2400m, ack_l3l4_hdr, ack_len); if (result < 0) { dev_err(dev, "HW BUG? reply to message 0x%04x: %d\n", msg_type, result); goto error_bad_ack_len; } if (msg_type != le16_to_cpu(ack_l3l4_hdr->type)) { dev_err(dev, "HW BUG? bad reply 0x%04x to message 0x%04x\n", le16_to_cpu(ack_l3l4_hdr->type), msg_type); result = -EIO; goto error_bad_ack_type; } i2400m_msg_ack_hook(i2400m, ack_l3l4_hdr, ack_len); mutex_unlock(&i2400m->msg_mutex); d_fnend(3, dev, "(i2400m %p buf %p len %zu) = %p\n", i2400m, buf, buf_len, ack_skb); return ack_skb; error_bad_ack_type: error_bad_ack_len: kfree_skb(ack_skb); error_ack_status: error_wait_for_completion: error_tx: mutex_unlock(&i2400m->msg_mutex); error_bad_msg: d_fnend(3, dev, "(i2400m %p buf %p len %zu) = %d\n", i2400m, buf, buf_len, result); return ERR_PTR(result); } /* * Definitions for the Enter Power Save command * * The Enter Power Save command requests the device to go into power * saving mode. The device will ack or nak the command depending on it * being ready for it. If it acks, we tell the USB subsystem to * * As well, the device might request to go into power saving mode by * sending a report (REPORT_POWERSAVE_READY), in which case, we issue * this command. The hookups in the RX coder allow */ enum { I2400M_WAKEUP_ENABLED = 0x01, I2400M_WAKEUP_DISABLED = 0x02, I2400M_TLV_TYPE_WAKEUP_MODE = 144, }; struct i2400m_cmd_enter_power_save { struct i2400m_l3l4_hdr hdr; struct i2400m_tlv_hdr tlv; __le32 val; } __packed; /* * Request entering power save * * This command is (mainly) executed when the device indicates that it * is ready to go into powersave mode via a REPORT_POWERSAVE_READY. */ int i2400m_cmd_enter_powersave(struct i2400m *i2400m) { int result; struct device *dev = i2400m_dev(i2400m); struct sk_buff *ack_skb; struct i2400m_cmd_enter_power_save *cmd; char strerr[32]; result = -ENOMEM; cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); if (cmd == NULL) goto error_alloc; cmd->hdr.type = cpu_to_le16(I2400M_MT_CMD_ENTER_POWERSAVE); cmd->hdr.length = cpu_to_le16(sizeof(*cmd) - sizeof(cmd->hdr)); cmd->hdr.version = cpu_to_le16(I2400M_L3L4_VERSION); cmd->tlv.type = cpu_to_le16(I2400M_TLV_TYPE_WAKEUP_MODE); cmd->tlv.length = cpu_to_le16(sizeof(cmd->val)); cmd->val = cpu_to_le32(I2400M_WAKEUP_ENABLED); ack_skb = i2400m_msg_to_dev(i2400m, cmd, sizeof(*cmd)); result = PTR_ERR(ack_skb); if (IS_ERR(ack_skb)) { dev_err(dev, "Failed to issue 'Enter power save' command: %d\n", result); goto error_msg_to_dev; } result = i2400m_msg_check_status(wimax_msg_data(ack_skb), strerr, sizeof(strerr)); if (result == -EACCES) d_printf(1, dev, "Cannot enter power save mode\n"); else if (result < 0) dev_err(dev, "'Enter power save' (0x%04x) command failed: " "%d - %s\n", I2400M_MT_CMD_ENTER_POWERSAVE, result, strerr); else d_printf(1, dev, "device ready to power save\n"); kfree_skb(ack_skb); error_msg_to_dev: kfree(cmd); error_alloc: return result; } EXPORT_SYMBOL_GPL(i2400m_cmd_enter_powersave); /* * Definitions for getting device information */ enum { I2400M_TLV_DETAILED_DEVICE_INFO = 140 }; /** * i2400m_get_device_info - Query the device for detailed device information * * @i2400m: device descriptor * * Returns: an skb whose skb->data points to a 'struct * i2400m_tlv_detailed_device_info'. When done, kfree_skb() it. The * skb is *guaranteed* to contain the whole TLV data structure. * * On error, IS_ERR(skb) is true and ERR_PTR(skb) is the error * code. */ struct sk_buff *i2400m_get_device_info(struct i2400m *i2400m) { int result; struct device *dev = i2400m_dev(i2400m); struct sk_buff *ack_skb; struct i2400m_l3l4_hdr *cmd; const struct i2400m_l3l4_hdr *ack; size_t ack_len; const struct i2400m_tlv_hdr *tlv; const struct i2400m_tlv_detailed_device_info *ddi; char strerr[32]; ack_skb = ERR_PTR(-ENOMEM); cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); if (cmd == NULL) goto error_alloc; cmd->type = cpu_to_le16(I2400M_MT_GET_DEVICE_INFO); cmd->length = 0; cmd->version = cpu_to_le16(I2400M_L3L4_VERSION); ack_skb = i2400m_msg_to_dev(i2400m, cmd, sizeof(*cmd)); if (IS_ERR(ack_skb)) { dev_err(dev, "Failed to issue 'get device info' command: %ld\n", PTR_ERR(ack_skb)); goto error_msg_to_dev; } ack = wimax_msg_data_len(ack_skb, &ack_len); result = i2400m_msg_check_status(ack, strerr, sizeof(strerr)); if (result < 0) { dev_err(dev, "'get device info' (0x%04x) command failed: " "%d - %s\n", I2400M_MT_GET_DEVICE_INFO, result, strerr); goto error_cmd_failed; } tlv = i2400m_tlv_find(i2400m, ack->pl, ack_len - sizeof(*ack), I2400M_TLV_DETAILED_DEVICE_INFO, sizeof(*ddi)); if (tlv == NULL) { dev_err(dev, "GET DEVICE INFO: " "detailed device info TLV not found (0x%04x)\n", I2400M_TLV_DETAILED_DEVICE_INFO); result = -EIO; goto error_no_tlv; } skb_pull(ack_skb, (void *) tlv - (void *) ack_skb->data); error_msg_to_dev: kfree(cmd); error_alloc: return ack_skb; error_no_tlv: error_cmd_failed: kfree_skb(ack_skb); kfree(cmd); return ERR_PTR(result); } /* Firmware interface versions we support */ enum { I2400M_HDIv_MAJOR = 9, I2400M_HDIv_MINOR = 1, I2400M_HDIv_MINOR_2 = 2, }; /** * i2400m_firmware_check - check firmware versions are compatible with * the driver * * @i2400m: device descriptor * * Returns: 0 if ok, < 0 errno code an error and a message in the * kernel log. * * Long function, but quite simple; first chunk launches the command * and double checks the reply for the right TLV. Then we process the * TLV (where the meat is). * * Once we process the TLV that gives us the firmware's interface * version, we encode it and save it in i2400m->fw_version for future * reference. */ int i2400m_firmware_check(struct i2400m *i2400m) { int result; struct device *dev = i2400m_dev(i2400m); struct sk_buff *ack_skb; struct i2400m_l3l4_hdr *cmd; const struct i2400m_l3l4_hdr *ack; size_t ack_len; const struct i2400m_tlv_hdr *tlv; const struct i2400m_tlv_l4_message_versions *l4mv; char strerr[32]; unsigned major, minor, branch; result = -ENOMEM; cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); if (cmd == NULL) goto error_alloc; cmd->type = cpu_to_le16(I2400M_MT_GET_LM_VERSION); cmd->length = 0; cmd->version = cpu_to_le16(I2400M_L3L4_VERSION); ack_skb = i2400m_msg_to_dev(i2400m, cmd, sizeof(*cmd)); if (IS_ERR(ack_skb)) { result = PTR_ERR(ack_skb); dev_err(dev, "Failed to issue 'get lm version' command: %-d\n", result); goto error_msg_to_dev; } ack = wimax_msg_data_len(ack_skb, &ack_len); result = i2400m_msg_check_status(ack, strerr, sizeof(strerr)); if (result < 0) { dev_err(dev, "'get lm version' (0x%04x) command failed: " "%d - %s\n", I2400M_MT_GET_LM_VERSION, result, strerr); goto error_cmd_failed; } tlv = i2400m_tlv_find(i2400m, ack->pl, ack_len - sizeof(*ack), I2400M_TLV_L4_MESSAGE_VERSIONS, sizeof(*l4mv)); if (tlv == NULL) { dev_err(dev, "get lm version: TLV not found (0x%04x)\n", I2400M_TLV_L4_MESSAGE_VERSIONS); result = -EIO; goto error_no_tlv; } l4mv = container_of(tlv, typeof(*l4mv), hdr); major = le16_to_cpu(l4mv->major); minor = le16_to_cpu(l4mv->minor); branch = le16_to_cpu(l4mv->branch); result = -EINVAL; if (major != I2400M_HDIv_MAJOR) { dev_err(dev, "unsupported major fw version " "%u.%u.%u\n", major, minor, branch); goto error_bad_major; } result = 0; if (minor < I2400M_HDIv_MINOR_2 && minor > I2400M_HDIv_MINOR) dev_warn(dev, "untested minor fw version %u.%u.%u\n", major, minor, branch); /* Yes, we ignore the branch -- we don't have to track it */ i2400m->fw_version = major << 16 | minor; dev_info(dev, "firmware interface version %u.%u.%u\n", major, minor, branch); error_bad_major: error_no_tlv: error_cmd_failed: kfree_skb(ack_skb); error_msg_to_dev: kfree(cmd); error_alloc: return result; } /* * Send an DoExitIdle command to the device to ask it to go out of * basestation-idle mode. * * @i2400m: device descriptor * * This starts a renegotiation with the basestation that might involve * another crypto handshake with user space. * * Returns: 0 if ok, < 0 errno code on error. */ int i2400m_cmd_exit_idle(struct i2400m *i2400m) { int result; struct device *dev = i2400m_dev(i2400m); struct sk_buff *ack_skb; struct i2400m_l3l4_hdr *cmd; char strerr[32]; result = -ENOMEM; cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); if (cmd == NULL) goto error_alloc; cmd->type = cpu_to_le16(I2400M_MT_CMD_EXIT_IDLE); cmd->length = 0; cmd->version = cpu_to_le16(I2400M_L3L4_VERSION); ack_skb = i2400m_msg_to_dev(i2400m, cmd, sizeof(*cmd)); result = PTR_ERR(ack_skb); if (IS_ERR(ack_skb)) { dev_err(dev, "Failed to issue 'exit idle' command: %d\n", result); goto error_msg_to_dev; } result = i2400m_msg_check_status(wimax_msg_data(ack_skb), strerr, sizeof(strerr)); kfree_skb(ack_skb); error_msg_to_dev: kfree(cmd); error_alloc: return result; } /* * Query the device for its state, update the WiMAX stack's idea of it * * @i2400m: device descriptor * * Returns: 0 if ok, < 0 errno code on error. * * Executes a 'Get State' command and parses the returned * TLVs. * * Because this is almost identical to a 'Report State', we use * i2400m_report_state_hook() to parse the answer. This will set the * carrier state, as well as the RF Kill switches state. */ static int i2400m_cmd_get_state(struct i2400m *i2400m) { int result; struct device *dev = i2400m_dev(i2400m); struct sk_buff *ack_skb; struct i2400m_l3l4_hdr *cmd; const struct i2400m_l3l4_hdr *ack; size_t ack_len; char strerr[32]; result = -ENOMEM; cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); if (cmd == NULL) goto error_alloc; cmd->type = cpu_to_le16(I2400M_MT_GET_STATE); cmd->length = 0; cmd->version = cpu_to_le16(I2400M_L3L4_VERSION); ack_skb = i2400m_msg_to_dev(i2400m, cmd, sizeof(*cmd)); if (IS_ERR(ack_skb)) { dev_err(dev, "Failed to issue 'get state' command: %ld\n", PTR_ERR(ack_skb)); result = PTR_ERR(ack_skb); goto error_msg_to_dev; } ack = wimax_msg_data_len(ack_skb, &ack_len); result = i2400m_msg_check_status(ack, strerr, sizeof(strerr)); if (result < 0) { dev_err(dev, "'get state' (0x%04x) command failed: " "%d - %s\n", I2400M_MT_GET_STATE, result, strerr); goto error_cmd_failed; } i2400m_report_state_hook(i2400m, ack, ack_len - sizeof(*ack), "GET STATE"); result = 0; kfree_skb(ack_skb); error_cmd_failed: error_msg_to_dev: kfree(cmd); error_alloc: return result; } /** * Set basic configuration settings * * @i2400m: device descriptor * @args: array of pointers to the TLV headers to send for * configuration (each followed by its payload). * TLV headers and payloads must be properly initialized, with the * right endianess (LE). * @arg_size: number of pointers in the @args array */ static int i2400m_set_init_config(struct i2400m *i2400m, const struct i2400m_tlv_hdr **arg, size_t args) { int result; struct device *dev = i2400m_dev(i2400m); struct sk_buff *ack_skb; struct i2400m_l3l4_hdr *cmd; char strerr[32]; unsigned argc, argsize, tlv_size; const struct i2400m_tlv_hdr *tlv_hdr; void *buf, *itr; d_fnstart(3, dev, "(i2400m %p arg %p args %zu)\n", i2400m, arg, args); result = 0; if (args == 0) goto none; /* Compute the size of all the TLVs, so we can alloc a * contiguous command block to copy them. */ argsize = 0; for (argc = 0; argc < args; argc++) { tlv_hdr = arg[argc]; argsize += sizeof(*tlv_hdr) + le16_to_cpu(tlv_hdr->length); } WARN_ON(argc >= 9); /* As per hw spec */ /* Alloc the space for the command and TLVs*/ result = -ENOMEM; buf = kzalloc(sizeof(*cmd) + argsize, GFP_KERNEL); if (buf == NULL) goto error_alloc; cmd = buf; cmd->type = cpu_to_le16(I2400M_MT_SET_INIT_CONFIG); cmd->length = cpu_to_le16(argsize); cmd->version = cpu_to_le16(I2400M_L3L4_VERSION); /* Copy the TLVs */ itr = buf + sizeof(*cmd); for (argc = 0; argc < args; argc++) { tlv_hdr = arg[argc]; tlv_size = sizeof(*tlv_hdr) + le16_to_cpu(tlv_hdr->length); memcpy(itr, tlv_hdr, tlv_size); itr += tlv_size; } /* Send the message! */ ack_skb = i2400m_msg_to_dev(i2400m, buf, sizeof(*cmd) + argsize); result = PTR_ERR(ack_skb); if (IS_ERR(ack_skb)) { dev_err(dev, "Failed to issue 'init config' command: %d\n", result); goto error_msg_to_dev; } result = i2400m_msg_check_status(wimax_msg_data(ack_skb), strerr, sizeof(strerr)); if (result < 0) dev_err(dev, "'init config' (0x%04x) command failed: %d - %s\n", I2400M_MT_SET_INIT_CONFIG, result, strerr); kfree_skb(ack_skb); error_msg_to_dev: kfree(buf); error_alloc: none: d_fnend(3, dev, "(i2400m %p arg %p args %zu) = %d\n", i2400m, arg, args, result); return result; } /** * i2400m_set_idle_timeout - Set the device's idle mode timeout * * @i2400m: i2400m device descriptor * * @msecs: milliseconds for the timeout to enter idle mode. Between * 100 to 300000 (5m); 0 to disable. In increments of 100. * * After this @msecs of the link being idle (no data being sent or * received), the device will negotiate with the basestation entering * idle mode for saving power. The connection is maintained, but * getting out of it (done in tx.c) will require some negotiation, * possible crypto re-handshake and a possible DHCP re-lease. * * Only available if fw_version >= 0x00090002. * * Returns: 0 if ok, < 0 errno code on error. */ int i2400m_set_idle_timeout(struct i2400m *i2400m, unsigned msecs) { int result; struct device *dev = i2400m_dev(i2400m); struct sk_buff *ack_skb; struct { struct i2400m_l3l4_hdr hdr; struct i2400m_tlv_config_idle_timeout cit; } *cmd; const struct i2400m_l3l4_hdr *ack; size_t ack_len; char strerr[32]; result = -ENOSYS; if (i2400m_le_v1_3(i2400m)) goto error_alloc; result = -ENOMEM; cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); if (cmd == NULL) goto error_alloc; cmd->hdr.type = cpu_to_le16(I2400M_MT_GET_STATE); cmd->hdr.length = cpu_to_le16(sizeof(*cmd) - sizeof(cmd->hdr)); cmd->hdr.version = cpu_to_le16(I2400M_L3L4_VERSION); cmd->cit.hdr.type = cpu_to_le16(I2400M_TLV_CONFIG_IDLE_TIMEOUT); cmd->cit.hdr.length = cpu_to_le16(sizeof(cmd->cit.timeout)); cmd->cit.timeout = cpu_to_le32(msecs); ack_skb = i2400m_msg_to_dev(i2400m, cmd, sizeof(*cmd)); if (IS_ERR(ack_skb)) { dev_err(dev, "Failed to issue 'set idle timeout' command: " "%ld\n", PTR_ERR(ack_skb)); result = PTR_ERR(ack_skb); goto error_msg_to_dev; } ack = wimax_msg_data_len(ack_skb, &ack_len); result = i2400m_msg_check_status(ack, strerr, sizeof(strerr)); if (result < 0) { dev_err(dev, "'set idle timeout' (0x%04x) command failed: " "%d - %s\n", I2400M_MT_GET_STATE, result, strerr); goto error_cmd_failed; } result = 0; kfree_skb(ack_skb); error_cmd_failed: error_msg_to_dev: kfree(cmd); error_alloc: return result; } /** * i2400m_dev_initialize - Initialize the device once communications are ready * * @i2400m: device descriptor * * Returns: 0 if ok, < 0 errno code on error. * * Configures the device to work the way we like it. * * At the point of this call, the device is registered with the WiMAX * and netdev stacks, firmware is uploaded and we can talk to the * device normally. */ int i2400m_dev_initialize(struct i2400m *i2400m) { int result; struct device *dev = i2400m_dev(i2400m); struct i2400m_tlv_config_idle_parameters idle_params; struct i2400m_tlv_config_idle_timeout idle_timeout; struct i2400m_tlv_config_d2h_data_format df; struct i2400m_tlv_config_dl_host_reorder dlhr; const struct i2400m_tlv_hdr *args[9]; unsigned argc = 0; d_fnstart(3, dev, "(i2400m %p)\n", i2400m); if (i2400m_passive_mode) goto out_passive; /* Disable idle mode? (enabled by default) */ if (i2400m_idle_mode_disabled) { if (i2400m_le_v1_3(i2400m)) { idle_params.hdr.type = cpu_to_le16(I2400M_TLV_CONFIG_IDLE_PARAMETERS); idle_params.hdr.length = cpu_to_le16( sizeof(idle_params) - sizeof(idle_params.hdr)); idle_params.idle_timeout = 0; idle_params.idle_paging_interval = 0; args[argc++] = &idle_params.hdr; } else { idle_timeout.hdr.type = cpu_to_le16(I2400M_TLV_CONFIG_IDLE_TIMEOUT); idle_timeout.hdr.length = cpu_to_le16( sizeof(idle_timeout) - sizeof(idle_timeout.hdr)); idle_timeout.timeout = 0; args[argc++] = &idle_timeout.hdr; } } if (i2400m_ge_v1_4(i2400m)) { /* Enable extended RX data format? */ df.hdr.type = cpu_to_le16(I2400M_TLV_CONFIG_D2H_DATA_FORMAT); df.hdr.length = cpu_to_le16( sizeof(df) - sizeof(df.hdr)); df.format = 1; args[argc++] = &df.hdr; /* Enable RX data reordering? * (switch flipped in rx.c:i2400m_rx_setup() after fw upload) */ if (i2400m->rx_reorder) { dlhr.hdr.type = cpu_to_le16(I2400M_TLV_CONFIG_DL_HOST_REORDER); dlhr.hdr.length = cpu_to_le16( sizeof(dlhr) - sizeof(dlhr.hdr)); dlhr.reorder = 1; args[argc++] = &dlhr.hdr; } } result = i2400m_set_init_config(i2400m, args, argc); if (result < 0) goto error; out_passive: /* * Update state: Here it just calls a get state; parsing the * result (System State TLV and RF Status TLV [done in the rx * path hooks]) will set the hardware and software RF-Kill * status. */ result = i2400m_cmd_get_state(i2400m); error: if (result < 0) dev_err(dev, "failed to initialize the device: %d\n", result); d_fnend(3, dev, "(i2400m %p) = %d\n", i2400m, result); return result; } /** * i2400m_dev_shutdown - Shutdown a running device * * @i2400m: device descriptor * * Release resources acquired during the running of the device; in * theory, should also tell the device to go to sleep, switch off the * radio, all that, but at this point, in most cases (driver * disconnection, reset handling) we can't even talk to the device. */ void i2400m_dev_shutdown(struct i2400m *i2400m) { struct device *dev = i2400m_dev(i2400m); d_fnstart(3, dev, "(i2400m %p)\n", i2400m); d_fnend(3, dev, "(i2400m %p) = void\n", i2400m); }
gpl-2.0
AOSParadox/kernel_msm
drivers/infiniband/hw/ipath/ipath_verbs.c
5360
63414
/* * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved. * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <rdma/ib_mad.h> #include <rdma/ib_user_verbs.h> #include <linux/io.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/utsname.h> #include <linux/rculist.h> #include "ipath_kernel.h" #include "ipath_verbs.h" #include "ipath_common.h" static unsigned int ib_ipath_qp_table_size = 251; module_param_named(qp_table_size, ib_ipath_qp_table_size, uint, S_IRUGO); MODULE_PARM_DESC(qp_table_size, "QP table size"); unsigned int ib_ipath_lkey_table_size = 12; module_param_named(lkey_table_size, ib_ipath_lkey_table_size, uint, S_IRUGO); MODULE_PARM_DESC(lkey_table_size, "LKEY table size in bits (2^n, 1 <= n <= 23)"); static unsigned int ib_ipath_max_pds = 0xFFFF; module_param_named(max_pds, ib_ipath_max_pds, uint, S_IWUSR | S_IRUGO); MODULE_PARM_DESC(max_pds, "Maximum number of protection domains to support"); static unsigned int ib_ipath_max_ahs = 0xFFFF; module_param_named(max_ahs, ib_ipath_max_ahs, uint, S_IWUSR | S_IRUGO); MODULE_PARM_DESC(max_ahs, "Maximum number of address handles to support"); unsigned int ib_ipath_max_cqes = 0x2FFFF; module_param_named(max_cqes, ib_ipath_max_cqes, uint, S_IWUSR | S_IRUGO); MODULE_PARM_DESC(max_cqes, "Maximum number of completion queue entries to support"); unsigned int ib_ipath_max_cqs = 0x1FFFF; module_param_named(max_cqs, ib_ipath_max_cqs, uint, S_IWUSR | S_IRUGO); MODULE_PARM_DESC(max_cqs, "Maximum number of completion queues to support"); unsigned int ib_ipath_max_qp_wrs = 0x3FFF; module_param_named(max_qp_wrs, ib_ipath_max_qp_wrs, uint, S_IWUSR | S_IRUGO); MODULE_PARM_DESC(max_qp_wrs, "Maximum number of QP WRs to support"); unsigned int ib_ipath_max_qps = 16384; module_param_named(max_qps, ib_ipath_max_qps, uint, S_IWUSR | S_IRUGO); MODULE_PARM_DESC(max_qps, "Maximum number of QPs to support"); unsigned int ib_ipath_max_sges = 0x60; module_param_named(max_sges, ib_ipath_max_sges, uint, S_IWUSR | S_IRUGO); MODULE_PARM_DESC(max_sges, "Maximum number of SGEs to support"); unsigned int ib_ipath_max_mcast_grps = 16384; module_param_named(max_mcast_grps, ib_ipath_max_mcast_grps, uint, S_IWUSR | S_IRUGO); MODULE_PARM_DESC(max_mcast_grps, "Maximum number of multicast groups to support"); unsigned int ib_ipath_max_mcast_qp_attached = 16; module_param_named(max_mcast_qp_attached, ib_ipath_max_mcast_qp_attached, uint, S_IWUSR | S_IRUGO); MODULE_PARM_DESC(max_mcast_qp_attached, "Maximum number of attached QPs to support"); unsigned int ib_ipath_max_srqs = 1024; module_param_named(max_srqs, ib_ipath_max_srqs, uint, S_IWUSR | S_IRUGO); MODULE_PARM_DESC(max_srqs, "Maximum number of SRQs to support"); unsigned int ib_ipath_max_srq_sges = 128; module_param_named(max_srq_sges, ib_ipath_max_srq_sges, uint, S_IWUSR | S_IRUGO); MODULE_PARM_DESC(max_srq_sges, "Maximum number of SRQ SGEs to support"); unsigned int ib_ipath_max_srq_wrs = 0x1FFFF; module_param_named(max_srq_wrs, ib_ipath_max_srq_wrs, uint, S_IWUSR | S_IRUGO); MODULE_PARM_DESC(max_srq_wrs, "Maximum number of SRQ WRs support"); static unsigned int ib_ipath_disable_sma; module_param_named(disable_sma, ib_ipath_disable_sma, uint, S_IWUSR | S_IRUGO); MODULE_PARM_DESC(disable_sma, "Disable the SMA"); /* * Note that it is OK to post send work requests in the SQE and ERR * states; ipath_do_send() will process them and generate error * completions as per IB 1.2 C10-96. */ const int ib_ipath_state_ops[IB_QPS_ERR + 1] = { [IB_QPS_RESET] = 0, [IB_QPS_INIT] = IPATH_POST_RECV_OK, [IB_QPS_RTR] = IPATH_POST_RECV_OK | IPATH_PROCESS_RECV_OK, [IB_QPS_RTS] = IPATH_POST_RECV_OK | IPATH_PROCESS_RECV_OK | IPATH_POST_SEND_OK | IPATH_PROCESS_SEND_OK | IPATH_PROCESS_NEXT_SEND_OK, [IB_QPS_SQD] = IPATH_POST_RECV_OK | IPATH_PROCESS_RECV_OK | IPATH_POST_SEND_OK | IPATH_PROCESS_SEND_OK, [IB_QPS_SQE] = IPATH_POST_RECV_OK | IPATH_PROCESS_RECV_OK | IPATH_POST_SEND_OK | IPATH_FLUSH_SEND, [IB_QPS_ERR] = IPATH_POST_RECV_OK | IPATH_FLUSH_RECV | IPATH_POST_SEND_OK | IPATH_FLUSH_SEND, }; struct ipath_ucontext { struct ib_ucontext ibucontext; }; static inline struct ipath_ucontext *to_iucontext(struct ib_ucontext *ibucontext) { return container_of(ibucontext, struct ipath_ucontext, ibucontext); } /* * Translate ib_wr_opcode into ib_wc_opcode. */ const enum ib_wc_opcode ib_ipath_wc_opcode[] = { [IB_WR_RDMA_WRITE] = IB_WC_RDMA_WRITE, [IB_WR_RDMA_WRITE_WITH_IMM] = IB_WC_RDMA_WRITE, [IB_WR_SEND] = IB_WC_SEND, [IB_WR_SEND_WITH_IMM] = IB_WC_SEND, [IB_WR_RDMA_READ] = IB_WC_RDMA_READ, [IB_WR_ATOMIC_CMP_AND_SWP] = IB_WC_COMP_SWAP, [IB_WR_ATOMIC_FETCH_AND_ADD] = IB_WC_FETCH_ADD }; /* * System image GUID. */ static __be64 sys_image_guid; /** * ipath_copy_sge - copy data to SGE memory * @ss: the SGE state * @data: the data to copy * @length: the length of the data */ void ipath_copy_sge(struct ipath_sge_state *ss, void *data, u32 length) { struct ipath_sge *sge = &ss->sge; while (length) { u32 len = sge->length; if (len > length) len = length; if (len > sge->sge_length) len = sge->sge_length; BUG_ON(len == 0); memcpy(sge->vaddr, data, len); sge->vaddr += len; sge->length -= len; sge->sge_length -= len; if (sge->sge_length == 0) { if (--ss->num_sge) *sge = *ss->sg_list++; } else if (sge->length == 0 && sge->mr != NULL) { if (++sge->n >= IPATH_SEGSZ) { if (++sge->m >= sge->mr->mapsz) break; sge->n = 0; } sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr; sge->length = sge->mr->map[sge->m]->segs[sge->n].length; } data += len; length -= len; } } /** * ipath_skip_sge - skip over SGE memory - XXX almost dup of prev func * @ss: the SGE state * @length: the number of bytes to skip */ void ipath_skip_sge(struct ipath_sge_state *ss, u32 length) { struct ipath_sge *sge = &ss->sge; while (length) { u32 len = sge->length; if (len > length) len = length; if (len > sge->sge_length) len = sge->sge_length; BUG_ON(len == 0); sge->vaddr += len; sge->length -= len; sge->sge_length -= len; if (sge->sge_length == 0) { if (--ss->num_sge) *sge = *ss->sg_list++; } else if (sge->length == 0 && sge->mr != NULL) { if (++sge->n >= IPATH_SEGSZ) { if (++sge->m >= sge->mr->mapsz) break; sge->n = 0; } sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr; sge->length = sge->mr->map[sge->m]->segs[sge->n].length; } length -= len; } } /* * Count the number of DMA descriptors needed to send length bytes of data. * Don't modify the ipath_sge_state to get the count. * Return zero if any of the segments is not aligned. */ static u32 ipath_count_sge(struct ipath_sge_state *ss, u32 length) { struct ipath_sge *sg_list = ss->sg_list; struct ipath_sge sge = ss->sge; u8 num_sge = ss->num_sge; u32 ndesc = 1; /* count the header */ while (length) { u32 len = sge.length; if (len > length) len = length; if (len > sge.sge_length) len = sge.sge_length; BUG_ON(len == 0); if (((long) sge.vaddr & (sizeof(u32) - 1)) || (len != length && (len & (sizeof(u32) - 1)))) { ndesc = 0; break; } ndesc++; sge.vaddr += len; sge.length -= len; sge.sge_length -= len; if (sge.sge_length == 0) { if (--num_sge) sge = *sg_list++; } else if (sge.length == 0 && sge.mr != NULL) { if (++sge.n >= IPATH_SEGSZ) { if (++sge.m >= sge.mr->mapsz) break; sge.n = 0; } sge.vaddr = sge.mr->map[sge.m]->segs[sge.n].vaddr; sge.length = sge.mr->map[sge.m]->segs[sge.n].length; } length -= len; } return ndesc; } /* * Copy from the SGEs to the data buffer. */ static void ipath_copy_from_sge(void *data, struct ipath_sge_state *ss, u32 length) { struct ipath_sge *sge = &ss->sge; while (length) { u32 len = sge->length; if (len > length) len = length; if (len > sge->sge_length) len = sge->sge_length; BUG_ON(len == 0); memcpy(data, sge->vaddr, len); sge->vaddr += len; sge->length -= len; sge->sge_length -= len; if (sge->sge_length == 0) { if (--ss->num_sge) *sge = *ss->sg_list++; } else if (sge->length == 0 && sge->mr != NULL) { if (++sge->n >= IPATH_SEGSZ) { if (++sge->m >= sge->mr->mapsz) break; sge->n = 0; } sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr; sge->length = sge->mr->map[sge->m]->segs[sge->n].length; } data += len; length -= len; } } /** * ipath_post_one_send - post one RC, UC, or UD send work request * @qp: the QP to post on * @wr: the work request to send */ static int ipath_post_one_send(struct ipath_qp *qp, struct ib_send_wr *wr) { struct ipath_swqe *wqe; u32 next; int i; int j; int acc; int ret; unsigned long flags; struct ipath_devdata *dd = to_idev(qp->ibqp.device)->dd; spin_lock_irqsave(&qp->s_lock, flags); if (qp->ibqp.qp_type != IB_QPT_SMI && !(dd->ipath_flags & IPATH_LINKACTIVE)) { ret = -ENETDOWN; goto bail; } /* Check that state is OK to post send. */ if (unlikely(!(ib_ipath_state_ops[qp->state] & IPATH_POST_SEND_OK))) goto bail_inval; /* IB spec says that num_sge == 0 is OK. */ if (wr->num_sge > qp->s_max_sge) goto bail_inval; /* * Don't allow RDMA reads or atomic operations on UC or * undefined operations. * Make sure buffer is large enough to hold the result for atomics. */ if (qp->ibqp.qp_type == IB_QPT_UC) { if ((unsigned) wr->opcode >= IB_WR_RDMA_READ) goto bail_inval; } else if (qp->ibqp.qp_type == IB_QPT_UD) { /* Check UD opcode */ if (wr->opcode != IB_WR_SEND && wr->opcode != IB_WR_SEND_WITH_IMM) goto bail_inval; /* Check UD destination address PD */ if (qp->ibqp.pd != wr->wr.ud.ah->pd) goto bail_inval; } else if ((unsigned) wr->opcode > IB_WR_ATOMIC_FETCH_AND_ADD) goto bail_inval; else if (wr->opcode >= IB_WR_ATOMIC_CMP_AND_SWP && (wr->num_sge == 0 || wr->sg_list[0].length < sizeof(u64) || wr->sg_list[0].addr & (sizeof(u64) - 1))) goto bail_inval; else if (wr->opcode >= IB_WR_RDMA_READ && !qp->s_max_rd_atomic) goto bail_inval; next = qp->s_head + 1; if (next >= qp->s_size) next = 0; if (next == qp->s_last) { ret = -ENOMEM; goto bail; } wqe = get_swqe_ptr(qp, qp->s_head); wqe->wr = *wr; wqe->length = 0; if (wr->num_sge) { acc = wr->opcode >= IB_WR_RDMA_READ ? IB_ACCESS_LOCAL_WRITE : 0; for (i = 0, j = 0; i < wr->num_sge; i++) { u32 length = wr->sg_list[i].length; int ok; if (length == 0) continue; ok = ipath_lkey_ok(qp, &wqe->sg_list[j], &wr->sg_list[i], acc); if (!ok) goto bail_inval; wqe->length += length; j++; } wqe->wr.num_sge = j; } if (qp->ibqp.qp_type == IB_QPT_UC || qp->ibqp.qp_type == IB_QPT_RC) { if (wqe->length > 0x80000000U) goto bail_inval; } else if (wqe->length > to_idev(qp->ibqp.device)->dd->ipath_ibmtu) goto bail_inval; wqe->ssn = qp->s_ssn++; qp->s_head = next; ret = 0; goto bail; bail_inval: ret = -EINVAL; bail: spin_unlock_irqrestore(&qp->s_lock, flags); return ret; } /** * ipath_post_send - post a send on a QP * @ibqp: the QP to post the send on * @wr: the list of work requests to post * @bad_wr: the first bad WR is put here * * This may be called from interrupt context. */ static int ipath_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, struct ib_send_wr **bad_wr) { struct ipath_qp *qp = to_iqp(ibqp); int err = 0; for (; wr; wr = wr->next) { err = ipath_post_one_send(qp, wr); if (err) { *bad_wr = wr; goto bail; } } /* Try to do the send work in the caller's context. */ ipath_do_send((unsigned long) qp); bail: return err; } /** * ipath_post_receive - post a receive on a QP * @ibqp: the QP to post the receive on * @wr: the WR to post * @bad_wr: the first bad WR is put here * * This may be called from interrupt context. */ static int ipath_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, struct ib_recv_wr **bad_wr) { struct ipath_qp *qp = to_iqp(ibqp); struct ipath_rwq *wq = qp->r_rq.wq; unsigned long flags; int ret; /* Check that state is OK to post receive. */ if (!(ib_ipath_state_ops[qp->state] & IPATH_POST_RECV_OK) || !wq) { *bad_wr = wr; ret = -EINVAL; goto bail; } for (; wr; wr = wr->next) { struct ipath_rwqe *wqe; u32 next; int i; if ((unsigned) wr->num_sge > qp->r_rq.max_sge) { *bad_wr = wr; ret = -EINVAL; goto bail; } spin_lock_irqsave(&qp->r_rq.lock, flags); next = wq->head + 1; if (next >= qp->r_rq.size) next = 0; if (next == wq->tail) { spin_unlock_irqrestore(&qp->r_rq.lock, flags); *bad_wr = wr; ret = -ENOMEM; goto bail; } wqe = get_rwqe_ptr(&qp->r_rq, wq->head); wqe->wr_id = wr->wr_id; wqe->num_sge = wr->num_sge; for (i = 0; i < wr->num_sge; i++) wqe->sg_list[i] = wr->sg_list[i]; /* Make sure queue entry is written before the head index. */ smp_wmb(); wq->head = next; spin_unlock_irqrestore(&qp->r_rq.lock, flags); } ret = 0; bail: return ret; } /** * ipath_qp_rcv - processing an incoming packet on a QP * @dev: the device the packet came on * @hdr: the packet header * @has_grh: true if the packet has a GRH * @data: the packet data * @tlen: the packet length * @qp: the QP the packet came on * * This is called from ipath_ib_rcv() to process an incoming packet * for the given QP. * Called at interrupt level. */ static void ipath_qp_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr, int has_grh, void *data, u32 tlen, struct ipath_qp *qp) { /* Check for valid receive state. */ if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_RECV_OK)) { dev->n_pkt_drops++; return; } switch (qp->ibqp.qp_type) { case IB_QPT_SMI: case IB_QPT_GSI: if (ib_ipath_disable_sma) break; /* FALLTHROUGH */ case IB_QPT_UD: ipath_ud_rcv(dev, hdr, has_grh, data, tlen, qp); break; case IB_QPT_RC: ipath_rc_rcv(dev, hdr, has_grh, data, tlen, qp); break; case IB_QPT_UC: ipath_uc_rcv(dev, hdr, has_grh, data, tlen, qp); break; default: break; } } /** * ipath_ib_rcv - process an incoming packet * @arg: the device pointer * @rhdr: the header of the packet * @data: the packet data * @tlen: the packet length * * This is called from ipath_kreceive() to process an incoming packet at * interrupt level. Tlen is the length of the header + data + CRC in bytes. */ void ipath_ib_rcv(struct ipath_ibdev *dev, void *rhdr, void *data, u32 tlen) { struct ipath_ib_header *hdr = rhdr; struct ipath_other_headers *ohdr; struct ipath_qp *qp; u32 qp_num; int lnh; u8 opcode; u16 lid; if (unlikely(dev == NULL)) goto bail; if (unlikely(tlen < 24)) { /* LRH+BTH+CRC */ dev->rcv_errors++; goto bail; } /* Check for a valid destination LID (see ch. 7.11.1). */ lid = be16_to_cpu(hdr->lrh[1]); if (lid < IPATH_MULTICAST_LID_BASE) { lid &= ~((1 << dev->dd->ipath_lmc) - 1); if (unlikely(lid != dev->dd->ipath_lid)) { dev->rcv_errors++; goto bail; } } /* Check for GRH */ lnh = be16_to_cpu(hdr->lrh[0]) & 3; if (lnh == IPATH_LRH_BTH) ohdr = &hdr->u.oth; else if (lnh == IPATH_LRH_GRH) ohdr = &hdr->u.l.oth; else { dev->rcv_errors++; goto bail; } opcode = be32_to_cpu(ohdr->bth[0]) >> 24; dev->opstats[opcode].n_bytes += tlen; dev->opstats[opcode].n_packets++; /* Get the destination QP number. */ qp_num = be32_to_cpu(ohdr->bth[1]) & IPATH_QPN_MASK; if (qp_num == IPATH_MULTICAST_QPN) { struct ipath_mcast *mcast; struct ipath_mcast_qp *p; if (lnh != IPATH_LRH_GRH) { dev->n_pkt_drops++; goto bail; } mcast = ipath_mcast_find(&hdr->u.l.grh.dgid); if (mcast == NULL) { dev->n_pkt_drops++; goto bail; } dev->n_multicast_rcv++; list_for_each_entry_rcu(p, &mcast->qp_list, list) ipath_qp_rcv(dev, hdr, 1, data, tlen, p->qp); /* * Notify ipath_multicast_detach() if it is waiting for us * to finish. */ if (atomic_dec_return(&mcast->refcount) <= 1) wake_up(&mcast->wait); } else { qp = ipath_lookup_qpn(&dev->qp_table, qp_num); if (qp) { dev->n_unicast_rcv++; ipath_qp_rcv(dev, hdr, lnh == IPATH_LRH_GRH, data, tlen, qp); /* * Notify ipath_destroy_qp() if it is waiting * for us to finish. */ if (atomic_dec_and_test(&qp->refcount)) wake_up(&qp->wait); } else dev->n_pkt_drops++; } bail:; } /** * ipath_ib_timer - verbs timer * @arg: the device pointer * * This is called from ipath_do_rcv_timer() at interrupt level to check for * QPs which need retransmits and to collect performance numbers. */ static void ipath_ib_timer(struct ipath_ibdev *dev) { struct ipath_qp *resend = NULL; struct ipath_qp *rnr = NULL; struct list_head *last; struct ipath_qp *qp; unsigned long flags; if (dev == NULL) return; spin_lock_irqsave(&dev->pending_lock, flags); /* Start filling the next pending queue. */ if (++dev->pending_index >= ARRAY_SIZE(dev->pending)) dev->pending_index = 0; /* Save any requests still in the new queue, they have timed out. */ last = &dev->pending[dev->pending_index]; while (!list_empty(last)) { qp = list_entry(last->next, struct ipath_qp, timerwait); list_del_init(&qp->timerwait); qp->timer_next = resend; resend = qp; atomic_inc(&qp->refcount); } last = &dev->rnrwait; if (!list_empty(last)) { qp = list_entry(last->next, struct ipath_qp, timerwait); if (--qp->s_rnr_timeout == 0) { do { list_del_init(&qp->timerwait); qp->timer_next = rnr; rnr = qp; atomic_inc(&qp->refcount); if (list_empty(last)) break; qp = list_entry(last->next, struct ipath_qp, timerwait); } while (qp->s_rnr_timeout == 0); } } /* * We should only be in the started state if pma_sample_start != 0 */ if (dev->pma_sample_status == IB_PMA_SAMPLE_STATUS_STARTED && --dev->pma_sample_start == 0) { dev->pma_sample_status = IB_PMA_SAMPLE_STATUS_RUNNING; ipath_snapshot_counters(dev->dd, &dev->ipath_sword, &dev->ipath_rword, &dev->ipath_spkts, &dev->ipath_rpkts, &dev->ipath_xmit_wait); } if (dev->pma_sample_status == IB_PMA_SAMPLE_STATUS_RUNNING) { if (dev->pma_sample_interval == 0) { u64 ta, tb, tc, td, te; dev->pma_sample_status = IB_PMA_SAMPLE_STATUS_DONE; ipath_snapshot_counters(dev->dd, &ta, &tb, &tc, &td, &te); dev->ipath_sword = ta - dev->ipath_sword; dev->ipath_rword = tb - dev->ipath_rword; dev->ipath_spkts = tc - dev->ipath_spkts; dev->ipath_rpkts = td - dev->ipath_rpkts; dev->ipath_xmit_wait = te - dev->ipath_xmit_wait; } else dev->pma_sample_interval--; } spin_unlock_irqrestore(&dev->pending_lock, flags); /* XXX What if timer fires again while this is running? */ while (resend != NULL) { qp = resend; resend = qp->timer_next; spin_lock_irqsave(&qp->s_lock, flags); if (qp->s_last != qp->s_tail && ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK) { dev->n_timeouts++; ipath_restart_rc(qp, qp->s_last_psn + 1); } spin_unlock_irqrestore(&qp->s_lock, flags); /* Notify ipath_destroy_qp() if it is waiting. */ if (atomic_dec_and_test(&qp->refcount)) wake_up(&qp->wait); } while (rnr != NULL) { qp = rnr; rnr = qp->timer_next; spin_lock_irqsave(&qp->s_lock, flags); if (ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK) ipath_schedule_send(qp); spin_unlock_irqrestore(&qp->s_lock, flags); /* Notify ipath_destroy_qp() if it is waiting. */ if (atomic_dec_and_test(&qp->refcount)) wake_up(&qp->wait); } } static void update_sge(struct ipath_sge_state *ss, u32 length) { struct ipath_sge *sge = &ss->sge; sge->vaddr += length; sge->length -= length; sge->sge_length -= length; if (sge->sge_length == 0) { if (--ss->num_sge) *sge = *ss->sg_list++; } else if (sge->length == 0 && sge->mr != NULL) { if (++sge->n >= IPATH_SEGSZ) { if (++sge->m >= sge->mr->mapsz) return; sge->n = 0; } sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr; sge->length = sge->mr->map[sge->m]->segs[sge->n].length; } } #ifdef __LITTLE_ENDIAN static inline u32 get_upper_bits(u32 data, u32 shift) { return data >> shift; } static inline u32 set_upper_bits(u32 data, u32 shift) { return data << shift; } static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off) { data <<= ((sizeof(u32) - n) * BITS_PER_BYTE); data >>= ((sizeof(u32) - n - off) * BITS_PER_BYTE); return data; } #else static inline u32 get_upper_bits(u32 data, u32 shift) { return data << shift; } static inline u32 set_upper_bits(u32 data, u32 shift) { return data >> shift; } static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off) { data >>= ((sizeof(u32) - n) * BITS_PER_BYTE); data <<= ((sizeof(u32) - n - off) * BITS_PER_BYTE); return data; } #endif static void copy_io(u32 __iomem *piobuf, struct ipath_sge_state *ss, u32 length, unsigned flush_wc) { u32 extra = 0; u32 data = 0; u32 last; while (1) { u32 len = ss->sge.length; u32 off; if (len > length) len = length; if (len > ss->sge.sge_length) len = ss->sge.sge_length; BUG_ON(len == 0); /* If the source address is not aligned, try to align it. */ off = (unsigned long)ss->sge.vaddr & (sizeof(u32) - 1); if (off) { u32 *addr = (u32 *)((unsigned long)ss->sge.vaddr & ~(sizeof(u32) - 1)); u32 v = get_upper_bits(*addr, off * BITS_PER_BYTE); u32 y; y = sizeof(u32) - off; if (len > y) len = y; if (len + extra >= sizeof(u32)) { data |= set_upper_bits(v, extra * BITS_PER_BYTE); len = sizeof(u32) - extra; if (len == length) { last = data; break; } __raw_writel(data, piobuf); piobuf++; extra = 0; data = 0; } else { /* Clear unused upper bytes */ data |= clear_upper_bytes(v, len, extra); if (len == length) { last = data; break; } extra += len; } } else if (extra) { /* Source address is aligned. */ u32 *addr = (u32 *) ss->sge.vaddr; int shift = extra * BITS_PER_BYTE; int ushift = 32 - shift; u32 l = len; while (l >= sizeof(u32)) { u32 v = *addr; data |= set_upper_bits(v, shift); __raw_writel(data, piobuf); data = get_upper_bits(v, ushift); piobuf++; addr++; l -= sizeof(u32); } /* * We still have 'extra' number of bytes leftover. */ if (l) { u32 v = *addr; if (l + extra >= sizeof(u32)) { data |= set_upper_bits(v, shift); len -= l + extra - sizeof(u32); if (len == length) { last = data; break; } __raw_writel(data, piobuf); piobuf++; extra = 0; data = 0; } else { /* Clear unused upper bytes */ data |= clear_upper_bytes(v, l, extra); if (len == length) { last = data; break; } extra += l; } } else if (len == length) { last = data; break; } } else if (len == length) { u32 w; /* * Need to round up for the last dword in the * packet. */ w = (len + 3) >> 2; __iowrite32_copy(piobuf, ss->sge.vaddr, w - 1); piobuf += w - 1; last = ((u32 *) ss->sge.vaddr)[w - 1]; break; } else { u32 w = len >> 2; __iowrite32_copy(piobuf, ss->sge.vaddr, w); piobuf += w; extra = len & (sizeof(u32) - 1); if (extra) { u32 v = ((u32 *) ss->sge.vaddr)[w]; /* Clear unused upper bytes */ data = clear_upper_bytes(v, extra, 0); } } update_sge(ss, len); length -= len; } /* Update address before sending packet. */ update_sge(ss, length); if (flush_wc) { /* must flush early everything before trigger word */ ipath_flush_wc(); __raw_writel(last, piobuf); /* be sure trigger word is written */ ipath_flush_wc(); } else __raw_writel(last, piobuf); } /* * Convert IB rate to delay multiplier. */ unsigned ipath_ib_rate_to_mult(enum ib_rate rate) { switch (rate) { case IB_RATE_2_5_GBPS: return 8; case IB_RATE_5_GBPS: return 4; case IB_RATE_10_GBPS: return 2; case IB_RATE_20_GBPS: return 1; default: return 0; } } /* * Convert delay multiplier to IB rate */ static enum ib_rate ipath_mult_to_ib_rate(unsigned mult) { switch (mult) { case 8: return IB_RATE_2_5_GBPS; case 4: return IB_RATE_5_GBPS; case 2: return IB_RATE_10_GBPS; case 1: return IB_RATE_20_GBPS; default: return IB_RATE_PORT_CURRENT; } } static inline struct ipath_verbs_txreq *get_txreq(struct ipath_ibdev *dev) { struct ipath_verbs_txreq *tx = NULL; unsigned long flags; spin_lock_irqsave(&dev->pending_lock, flags); if (!list_empty(&dev->txreq_free)) { struct list_head *l = dev->txreq_free.next; list_del(l); tx = list_entry(l, struct ipath_verbs_txreq, txreq.list); } spin_unlock_irqrestore(&dev->pending_lock, flags); return tx; } static inline void put_txreq(struct ipath_ibdev *dev, struct ipath_verbs_txreq *tx) { unsigned long flags; spin_lock_irqsave(&dev->pending_lock, flags); list_add(&tx->txreq.list, &dev->txreq_free); spin_unlock_irqrestore(&dev->pending_lock, flags); } static void sdma_complete(void *cookie, int status) { struct ipath_verbs_txreq *tx = cookie; struct ipath_qp *qp = tx->qp; struct ipath_ibdev *dev = to_idev(qp->ibqp.device); unsigned long flags; enum ib_wc_status ibs = status == IPATH_SDMA_TXREQ_S_OK ? IB_WC_SUCCESS : IB_WC_WR_FLUSH_ERR; if (atomic_dec_and_test(&qp->s_dma_busy)) { spin_lock_irqsave(&qp->s_lock, flags); if (tx->wqe) ipath_send_complete(qp, tx->wqe, ibs); if ((ib_ipath_state_ops[qp->state] & IPATH_FLUSH_SEND && qp->s_last != qp->s_head) || (qp->s_flags & IPATH_S_WAIT_DMA)) ipath_schedule_send(qp); spin_unlock_irqrestore(&qp->s_lock, flags); wake_up(&qp->wait_dma); } else if (tx->wqe) { spin_lock_irqsave(&qp->s_lock, flags); ipath_send_complete(qp, tx->wqe, ibs); spin_unlock_irqrestore(&qp->s_lock, flags); } if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_FREEBUF) kfree(tx->txreq.map_addr); put_txreq(dev, tx); if (atomic_dec_and_test(&qp->refcount)) wake_up(&qp->wait); } static void decrement_dma_busy(struct ipath_qp *qp) { unsigned long flags; if (atomic_dec_and_test(&qp->s_dma_busy)) { spin_lock_irqsave(&qp->s_lock, flags); if ((ib_ipath_state_ops[qp->state] & IPATH_FLUSH_SEND && qp->s_last != qp->s_head) || (qp->s_flags & IPATH_S_WAIT_DMA)) ipath_schedule_send(qp); spin_unlock_irqrestore(&qp->s_lock, flags); wake_up(&qp->wait_dma); } } /* * Compute the number of clock cycles of delay before sending the next packet. * The multipliers reflect the number of clocks for the fastest rate so * one tick at 4xDDR is 8 ticks at 1xSDR. * If the destination port will take longer to receive a packet than * the outgoing link can send it, we need to delay sending the next packet * by the difference in time it takes the receiver to receive and the sender * to send this packet. * Note that this delay is always correct for UC and RC but not always * optimal for UD. For UD, the destination HCA can be different for each * packet, in which case, we could send packets to a different destination * while "waiting" for the delay. The overhead for doing this without * HW support is more than just paying the cost of delaying some packets * unnecessarily. */ static inline unsigned ipath_pkt_delay(u32 plen, u8 snd_mult, u8 rcv_mult) { return (rcv_mult > snd_mult) ? (plen * (rcv_mult - snd_mult) + 1) >> 1 : 0; } static int ipath_verbs_send_dma(struct ipath_qp *qp, struct ipath_ib_header *hdr, u32 hdrwords, struct ipath_sge_state *ss, u32 len, u32 plen, u32 dwords) { struct ipath_ibdev *dev = to_idev(qp->ibqp.device); struct ipath_devdata *dd = dev->dd; struct ipath_verbs_txreq *tx; u32 *piobuf; u32 control; u32 ndesc; int ret; tx = qp->s_tx; if (tx) { qp->s_tx = NULL; /* resend previously constructed packet */ atomic_inc(&qp->s_dma_busy); ret = ipath_sdma_verbs_send(dd, tx->ss, tx->len, tx); if (ret) { qp->s_tx = tx; decrement_dma_busy(qp); } goto bail; } tx = get_txreq(dev); if (!tx) { ret = -EBUSY; goto bail; } /* * Get the saved delay count we computed for the previous packet * and save the delay count for this packet to be used next time * we get here. */ control = qp->s_pkt_delay; qp->s_pkt_delay = ipath_pkt_delay(plen, dd->delay_mult, qp->s_dmult); tx->qp = qp; atomic_inc(&qp->refcount); tx->wqe = qp->s_wqe; tx->txreq.callback = sdma_complete; tx->txreq.callback_cookie = tx; tx->txreq.flags = IPATH_SDMA_TXREQ_F_HEADTOHOST | IPATH_SDMA_TXREQ_F_INTREQ | IPATH_SDMA_TXREQ_F_FREEDESC; if (plen + 1 >= IPATH_SMALLBUF_DWORDS) tx->txreq.flags |= IPATH_SDMA_TXREQ_F_USELARGEBUF; /* VL15 packets bypass credit check */ if ((be16_to_cpu(hdr->lrh[0]) >> 12) == 15) { control |= 1ULL << 31; tx->txreq.flags |= IPATH_SDMA_TXREQ_F_VL15; } if (len) { /* * Don't try to DMA if it takes more descriptors than * the queue holds. */ ndesc = ipath_count_sge(ss, len); if (ndesc >= dd->ipath_sdma_descq_cnt) ndesc = 0; } else ndesc = 1; if (ndesc) { tx->hdr.pbc[0] = cpu_to_le32(plen); tx->hdr.pbc[1] = cpu_to_le32(control); memcpy(&tx->hdr.hdr, hdr, hdrwords << 2); tx->txreq.sg_count = ndesc; tx->map_len = (hdrwords + 2) << 2; tx->txreq.map_addr = &tx->hdr; atomic_inc(&qp->s_dma_busy); ret = ipath_sdma_verbs_send(dd, ss, dwords, tx); if (ret) { /* save ss and length in dwords */ tx->ss = ss; tx->len = dwords; qp->s_tx = tx; decrement_dma_busy(qp); } goto bail; } /* Allocate a buffer and copy the header and payload to it. */ tx->map_len = (plen + 1) << 2; piobuf = kmalloc(tx->map_len, GFP_ATOMIC); if (unlikely(piobuf == NULL)) { ret = -EBUSY; goto err_tx; } tx->txreq.map_addr = piobuf; tx->txreq.flags |= IPATH_SDMA_TXREQ_F_FREEBUF; tx->txreq.sg_count = 1; *piobuf++ = (__force u32) cpu_to_le32(plen); *piobuf++ = (__force u32) cpu_to_le32(control); memcpy(piobuf, hdr, hdrwords << 2); ipath_copy_from_sge(piobuf + hdrwords, ss, len); atomic_inc(&qp->s_dma_busy); ret = ipath_sdma_verbs_send(dd, NULL, 0, tx); /* * If we couldn't queue the DMA request, save the info * and try again later rather than destroying the * buffer and undoing the side effects of the copy. */ if (ret) { tx->ss = NULL; tx->len = 0; qp->s_tx = tx; decrement_dma_busy(qp); } dev->n_unaligned++; goto bail; err_tx: if (atomic_dec_and_test(&qp->refcount)) wake_up(&qp->wait); put_txreq(dev, tx); bail: return ret; } static int ipath_verbs_send_pio(struct ipath_qp *qp, struct ipath_ib_header *ibhdr, u32 hdrwords, struct ipath_sge_state *ss, u32 len, u32 plen, u32 dwords) { struct ipath_devdata *dd = to_idev(qp->ibqp.device)->dd; u32 *hdr = (u32 *) ibhdr; u32 __iomem *piobuf; unsigned flush_wc; u32 control; int ret; unsigned long flags; piobuf = ipath_getpiobuf(dd, plen, NULL); if (unlikely(piobuf == NULL)) { ret = -EBUSY; goto bail; } /* * Get the saved delay count we computed for the previous packet * and save the delay count for this packet to be used next time * we get here. */ control = qp->s_pkt_delay; qp->s_pkt_delay = ipath_pkt_delay(plen, dd->delay_mult, qp->s_dmult); /* VL15 packets bypass credit check */ if ((be16_to_cpu(ibhdr->lrh[0]) >> 12) == 15) control |= 1ULL << 31; /* * Write the length to the control qword plus any needed flags. * We have to flush after the PBC for correctness on some cpus * or WC buffer can be written out of order. */ writeq(((u64) control << 32) | plen, piobuf); piobuf += 2; flush_wc = dd->ipath_flags & IPATH_PIO_FLUSH_WC; if (len == 0) { /* * If there is just the header portion, must flush before * writing last word of header for correctness, and after * the last header word (trigger word). */ if (flush_wc) { ipath_flush_wc(); __iowrite32_copy(piobuf, hdr, hdrwords - 1); ipath_flush_wc(); __raw_writel(hdr[hdrwords - 1], piobuf + hdrwords - 1); ipath_flush_wc(); } else __iowrite32_copy(piobuf, hdr, hdrwords); goto done; } if (flush_wc) ipath_flush_wc(); __iowrite32_copy(piobuf, hdr, hdrwords); piobuf += hdrwords; /* The common case is aligned and contained in one segment. */ if (likely(ss->num_sge == 1 && len <= ss->sge.length && !((unsigned long)ss->sge.vaddr & (sizeof(u32) - 1)))) { u32 *addr = (u32 *) ss->sge.vaddr; /* Update address before sending packet. */ update_sge(ss, len); if (flush_wc) { __iowrite32_copy(piobuf, addr, dwords - 1); /* must flush early everything before trigger word */ ipath_flush_wc(); __raw_writel(addr[dwords - 1], piobuf + dwords - 1); /* be sure trigger word is written */ ipath_flush_wc(); } else __iowrite32_copy(piobuf, addr, dwords); goto done; } copy_io(piobuf, ss, len, flush_wc); done: if (qp->s_wqe) { spin_lock_irqsave(&qp->s_lock, flags); ipath_send_complete(qp, qp->s_wqe, IB_WC_SUCCESS); spin_unlock_irqrestore(&qp->s_lock, flags); } ret = 0; bail: return ret; } /** * ipath_verbs_send - send a packet * @qp: the QP to send on * @hdr: the packet header * @hdrwords: the number of 32-bit words in the header * @ss: the SGE to send * @len: the length of the packet in bytes */ int ipath_verbs_send(struct ipath_qp *qp, struct ipath_ib_header *hdr, u32 hdrwords, struct ipath_sge_state *ss, u32 len) { struct ipath_devdata *dd = to_idev(qp->ibqp.device)->dd; u32 plen; int ret; u32 dwords = (len + 3) >> 2; /* * Calculate the send buffer trigger address. * The +1 counts for the pbc control dword following the pbc length. */ plen = hdrwords + dwords + 1; /* * VL15 packets (IB_QPT_SMI) will always use PIO, so we * can defer SDMA restart until link goes ACTIVE without * worrying about just how we got there. */ if (qp->ibqp.qp_type == IB_QPT_SMI || !(dd->ipath_flags & IPATH_HAS_SEND_DMA)) ret = ipath_verbs_send_pio(qp, hdr, hdrwords, ss, len, plen, dwords); else ret = ipath_verbs_send_dma(qp, hdr, hdrwords, ss, len, plen, dwords); return ret; } int ipath_snapshot_counters(struct ipath_devdata *dd, u64 *swords, u64 *rwords, u64 *spkts, u64 *rpkts, u64 *xmit_wait) { int ret; if (!(dd->ipath_flags & IPATH_INITTED)) { /* no hardware, freeze, etc. */ ret = -EINVAL; goto bail; } *swords = ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordsendcnt); *rwords = ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordrcvcnt); *spkts = ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktsendcnt); *rpkts = ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktrcvcnt); *xmit_wait = ipath_snap_cntr(dd, dd->ipath_cregs->cr_sendstallcnt); ret = 0; bail: return ret; } /** * ipath_get_counters - get various chip counters * @dd: the infinipath device * @cntrs: counters are placed here * * Return the counters needed by recv_pma_get_portcounters(). */ int ipath_get_counters(struct ipath_devdata *dd, struct ipath_verbs_counters *cntrs) { struct ipath_cregs const *crp = dd->ipath_cregs; int ret; if (!(dd->ipath_flags & IPATH_INITTED)) { /* no hardware, freeze, etc. */ ret = -EINVAL; goto bail; } cntrs->symbol_error_counter = ipath_snap_cntr(dd, crp->cr_ibsymbolerrcnt); cntrs->link_error_recovery_counter = ipath_snap_cntr(dd, crp->cr_iblinkerrrecovcnt); /* * The link downed counter counts when the other side downs the * connection. We add in the number of times we downed the link * due to local link integrity errors to compensate. */ cntrs->link_downed_counter = ipath_snap_cntr(dd, crp->cr_iblinkdowncnt); cntrs->port_rcv_errors = ipath_snap_cntr(dd, crp->cr_rxdroppktcnt) + ipath_snap_cntr(dd, crp->cr_rcvovflcnt) + ipath_snap_cntr(dd, crp->cr_portovflcnt) + ipath_snap_cntr(dd, crp->cr_err_rlencnt) + ipath_snap_cntr(dd, crp->cr_invalidrlencnt) + ipath_snap_cntr(dd, crp->cr_errlinkcnt) + ipath_snap_cntr(dd, crp->cr_erricrccnt) + ipath_snap_cntr(dd, crp->cr_errvcrccnt) + ipath_snap_cntr(dd, crp->cr_errlpcrccnt) + ipath_snap_cntr(dd, crp->cr_badformatcnt) + dd->ipath_rxfc_unsupvl_errs; if (crp->cr_rxotherlocalphyerrcnt) cntrs->port_rcv_errors += ipath_snap_cntr(dd, crp->cr_rxotherlocalphyerrcnt); if (crp->cr_rxvlerrcnt) cntrs->port_rcv_errors += ipath_snap_cntr(dd, crp->cr_rxvlerrcnt); cntrs->port_rcv_remphys_errors = ipath_snap_cntr(dd, crp->cr_rcvebpcnt); cntrs->port_xmit_discards = ipath_snap_cntr(dd, crp->cr_unsupvlcnt); cntrs->port_xmit_data = ipath_snap_cntr(dd, crp->cr_wordsendcnt); cntrs->port_rcv_data = ipath_snap_cntr(dd, crp->cr_wordrcvcnt); cntrs->port_xmit_packets = ipath_snap_cntr(dd, crp->cr_pktsendcnt); cntrs->port_rcv_packets = ipath_snap_cntr(dd, crp->cr_pktrcvcnt); cntrs->local_link_integrity_errors = crp->cr_locallinkintegrityerrcnt ? ipath_snap_cntr(dd, crp->cr_locallinkintegrityerrcnt) : ((dd->ipath_flags & IPATH_GPIO_ERRINTRS) ? dd->ipath_lli_errs : dd->ipath_lli_errors); cntrs->excessive_buffer_overrun_errors = crp->cr_excessbufferovflcnt ? ipath_snap_cntr(dd, crp->cr_excessbufferovflcnt) : dd->ipath_overrun_thresh_errs; cntrs->vl15_dropped = crp->cr_vl15droppedpktcnt ? ipath_snap_cntr(dd, crp->cr_vl15droppedpktcnt) : 0; ret = 0; bail: return ret; } /** * ipath_ib_piobufavail - callback when a PIO buffer is available * @arg: the device pointer * * This is called from ipath_intr() at interrupt level when a PIO buffer is * available after ipath_verbs_send() returned an error that no buffers were * available. Return 1 if we consumed all the PIO buffers and we still have * QPs waiting for buffers (for now, just restart the send tasklet and * return zero). */ int ipath_ib_piobufavail(struct ipath_ibdev *dev) { struct list_head *list; struct ipath_qp *qplist; struct ipath_qp *qp; unsigned long flags; if (dev == NULL) goto bail; list = &dev->piowait; qplist = NULL; spin_lock_irqsave(&dev->pending_lock, flags); while (!list_empty(list)) { qp = list_entry(list->next, struct ipath_qp, piowait); list_del_init(&qp->piowait); qp->pio_next = qplist; qplist = qp; atomic_inc(&qp->refcount); } spin_unlock_irqrestore(&dev->pending_lock, flags); while (qplist != NULL) { qp = qplist; qplist = qp->pio_next; spin_lock_irqsave(&qp->s_lock, flags); if (ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK) ipath_schedule_send(qp); spin_unlock_irqrestore(&qp->s_lock, flags); /* Notify ipath_destroy_qp() if it is waiting. */ if (atomic_dec_and_test(&qp->refcount)) wake_up(&qp->wait); } bail: return 0; } static int ipath_query_device(struct ib_device *ibdev, struct ib_device_attr *props) { struct ipath_ibdev *dev = to_idev(ibdev); memset(props, 0, sizeof(*props)); props->device_cap_flags = IB_DEVICE_BAD_PKEY_CNTR | IB_DEVICE_BAD_QKEY_CNTR | IB_DEVICE_SHUTDOWN_PORT | IB_DEVICE_SYS_IMAGE_GUID | IB_DEVICE_RC_RNR_NAK_GEN | IB_DEVICE_PORT_ACTIVE_EVENT | IB_DEVICE_SRQ_RESIZE; props->page_size_cap = PAGE_SIZE; props->vendor_id = IPATH_SRC_OUI_1 << 16 | IPATH_SRC_OUI_2 << 8 | IPATH_SRC_OUI_3; props->vendor_part_id = dev->dd->ipath_deviceid; props->hw_ver = dev->dd->ipath_pcirev; props->sys_image_guid = dev->sys_image_guid; props->max_mr_size = ~0ull; props->max_qp = ib_ipath_max_qps; props->max_qp_wr = ib_ipath_max_qp_wrs; props->max_sge = ib_ipath_max_sges; props->max_cq = ib_ipath_max_cqs; props->max_ah = ib_ipath_max_ahs; props->max_cqe = ib_ipath_max_cqes; props->max_mr = dev->lk_table.max; props->max_fmr = dev->lk_table.max; props->max_map_per_fmr = 32767; props->max_pd = ib_ipath_max_pds; props->max_qp_rd_atom = IPATH_MAX_RDMA_ATOMIC; props->max_qp_init_rd_atom = 255; /* props->max_res_rd_atom */ props->max_srq = ib_ipath_max_srqs; props->max_srq_wr = ib_ipath_max_srq_wrs; props->max_srq_sge = ib_ipath_max_srq_sges; /* props->local_ca_ack_delay */ props->atomic_cap = IB_ATOMIC_GLOB; props->max_pkeys = ipath_get_npkeys(dev->dd); props->max_mcast_grp = ib_ipath_max_mcast_grps; props->max_mcast_qp_attach = ib_ipath_max_mcast_qp_attached; props->max_total_mcast_qp_attach = props->max_mcast_qp_attach * props->max_mcast_grp; return 0; } const u8 ipath_cvt_physportstate[32] = { [INFINIPATH_IBCS_LT_STATE_DISABLED] = IB_PHYSPORTSTATE_DISABLED, [INFINIPATH_IBCS_LT_STATE_LINKUP] = IB_PHYSPORTSTATE_LINKUP, [INFINIPATH_IBCS_LT_STATE_POLLACTIVE] = IB_PHYSPORTSTATE_POLL, [INFINIPATH_IBCS_LT_STATE_POLLQUIET] = IB_PHYSPORTSTATE_POLL, [INFINIPATH_IBCS_LT_STATE_SLEEPDELAY] = IB_PHYSPORTSTATE_SLEEP, [INFINIPATH_IBCS_LT_STATE_SLEEPQUIET] = IB_PHYSPORTSTATE_SLEEP, [INFINIPATH_IBCS_LT_STATE_CFGDEBOUNCE] = IB_PHYSPORTSTATE_CFG_TRAIN, [INFINIPATH_IBCS_LT_STATE_CFGRCVFCFG] = IB_PHYSPORTSTATE_CFG_TRAIN, [INFINIPATH_IBCS_LT_STATE_CFGWAITRMT] = IB_PHYSPORTSTATE_CFG_TRAIN, [INFINIPATH_IBCS_LT_STATE_CFGIDLE] = IB_PHYSPORTSTATE_CFG_TRAIN, [INFINIPATH_IBCS_LT_STATE_RECOVERRETRAIN] = IB_PHYSPORTSTATE_LINK_ERR_RECOVER, [INFINIPATH_IBCS_LT_STATE_RECOVERWAITRMT] = IB_PHYSPORTSTATE_LINK_ERR_RECOVER, [INFINIPATH_IBCS_LT_STATE_RECOVERIDLE] = IB_PHYSPORTSTATE_LINK_ERR_RECOVER, [0x10] = IB_PHYSPORTSTATE_CFG_TRAIN, [0x11] = IB_PHYSPORTSTATE_CFG_TRAIN, [0x12] = IB_PHYSPORTSTATE_CFG_TRAIN, [0x13] = IB_PHYSPORTSTATE_CFG_TRAIN, [0x14] = IB_PHYSPORTSTATE_CFG_TRAIN, [0x15] = IB_PHYSPORTSTATE_CFG_TRAIN, [0x16] = IB_PHYSPORTSTATE_CFG_TRAIN, [0x17] = IB_PHYSPORTSTATE_CFG_TRAIN }; u32 ipath_get_cr_errpkey(struct ipath_devdata *dd) { return ipath_read_creg32(dd, dd->ipath_cregs->cr_errpkey); } static int ipath_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr *props) { struct ipath_ibdev *dev = to_idev(ibdev); struct ipath_devdata *dd = dev->dd; enum ib_mtu mtu; u16 lid = dd->ipath_lid; u64 ibcstat; memset(props, 0, sizeof(*props)); props->lid = lid ? lid : be16_to_cpu(IB_LID_PERMISSIVE); props->lmc = dd->ipath_lmc; props->sm_lid = dev->sm_lid; props->sm_sl = dev->sm_sl; ibcstat = dd->ipath_lastibcstat; /* map LinkState to IB portinfo values. */ props->state = ipath_ib_linkstate(dd, ibcstat) + 1; /* See phys_state_show() */ props->phys_state = /* MEA: assumes shift == 0 */ ipath_cvt_physportstate[dd->ipath_lastibcstat & dd->ibcs_lts_mask]; props->port_cap_flags = dev->port_cap_flags; props->gid_tbl_len = 1; props->max_msg_sz = 0x80000000; props->pkey_tbl_len = ipath_get_npkeys(dd); props->bad_pkey_cntr = ipath_get_cr_errpkey(dd) - dev->z_pkey_violations; props->qkey_viol_cntr = dev->qkey_violations; props->active_width = dd->ipath_link_width_active; /* See rate_show() */ props->active_speed = dd->ipath_link_speed_active; props->max_vl_num = 1; /* VLCap = VL0 */ props->init_type_reply = 0; props->max_mtu = ipath_mtu4096 ? IB_MTU_4096 : IB_MTU_2048; switch (dd->ipath_ibmtu) { case 4096: mtu = IB_MTU_4096; break; case 2048: mtu = IB_MTU_2048; break; case 1024: mtu = IB_MTU_1024; break; case 512: mtu = IB_MTU_512; break; case 256: mtu = IB_MTU_256; break; default: mtu = IB_MTU_2048; } props->active_mtu = mtu; props->subnet_timeout = dev->subnet_timeout; return 0; } static int ipath_modify_device(struct ib_device *device, int device_modify_mask, struct ib_device_modify *device_modify) { int ret; if (device_modify_mask & ~(IB_DEVICE_MODIFY_SYS_IMAGE_GUID | IB_DEVICE_MODIFY_NODE_DESC)) { ret = -EOPNOTSUPP; goto bail; } if (device_modify_mask & IB_DEVICE_MODIFY_NODE_DESC) memcpy(device->node_desc, device_modify->node_desc, 64); if (device_modify_mask & IB_DEVICE_MODIFY_SYS_IMAGE_GUID) to_idev(device)->sys_image_guid = cpu_to_be64(device_modify->sys_image_guid); ret = 0; bail: return ret; } static int ipath_modify_port(struct ib_device *ibdev, u8 port, int port_modify_mask, struct ib_port_modify *props) { struct ipath_ibdev *dev = to_idev(ibdev); dev->port_cap_flags |= props->set_port_cap_mask; dev->port_cap_flags &= ~props->clr_port_cap_mask; if (port_modify_mask & IB_PORT_SHUTDOWN) ipath_set_linkstate(dev->dd, IPATH_IB_LINKDOWN); if (port_modify_mask & IB_PORT_RESET_QKEY_CNTR) dev->qkey_violations = 0; return 0; } static int ipath_query_gid(struct ib_device *ibdev, u8 port, int index, union ib_gid *gid) { struct ipath_ibdev *dev = to_idev(ibdev); int ret; if (index >= 1) { ret = -EINVAL; goto bail; } gid->global.subnet_prefix = dev->gid_prefix; gid->global.interface_id = dev->dd->ipath_guid; ret = 0; bail: return ret; } static struct ib_pd *ipath_alloc_pd(struct ib_device *ibdev, struct ib_ucontext *context, struct ib_udata *udata) { struct ipath_ibdev *dev = to_idev(ibdev); struct ipath_pd *pd; struct ib_pd *ret; /* * This is actually totally arbitrary. Some correctness tests * assume there's a maximum number of PDs that can be allocated. * We don't actually have this limit, but we fail the test if * we allow allocations of more than we report for this value. */ pd = kmalloc(sizeof *pd, GFP_KERNEL); if (!pd) { ret = ERR_PTR(-ENOMEM); goto bail; } spin_lock(&dev->n_pds_lock); if (dev->n_pds_allocated == ib_ipath_max_pds) { spin_unlock(&dev->n_pds_lock); kfree(pd); ret = ERR_PTR(-ENOMEM); goto bail; } dev->n_pds_allocated++; spin_unlock(&dev->n_pds_lock); /* ib_alloc_pd() will initialize pd->ibpd. */ pd->user = udata != NULL; ret = &pd->ibpd; bail: return ret; } static int ipath_dealloc_pd(struct ib_pd *ibpd) { struct ipath_pd *pd = to_ipd(ibpd); struct ipath_ibdev *dev = to_idev(ibpd->device); spin_lock(&dev->n_pds_lock); dev->n_pds_allocated--; spin_unlock(&dev->n_pds_lock); kfree(pd); return 0; } /** * ipath_create_ah - create an address handle * @pd: the protection domain * @ah_attr: the attributes of the AH * * This may be called from interrupt context. */ static struct ib_ah *ipath_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr) { struct ipath_ah *ah; struct ib_ah *ret; struct ipath_ibdev *dev = to_idev(pd->device); unsigned long flags; /* A multicast address requires a GRH (see ch. 8.4.1). */ if (ah_attr->dlid >= IPATH_MULTICAST_LID_BASE && ah_attr->dlid != IPATH_PERMISSIVE_LID && !(ah_attr->ah_flags & IB_AH_GRH)) { ret = ERR_PTR(-EINVAL); goto bail; } if (ah_attr->dlid == 0) { ret = ERR_PTR(-EINVAL); goto bail; } if (ah_attr->port_num < 1 || ah_attr->port_num > pd->device->phys_port_cnt) { ret = ERR_PTR(-EINVAL); goto bail; } ah = kmalloc(sizeof *ah, GFP_ATOMIC); if (!ah) { ret = ERR_PTR(-ENOMEM); goto bail; } spin_lock_irqsave(&dev->n_ahs_lock, flags); if (dev->n_ahs_allocated == ib_ipath_max_ahs) { spin_unlock_irqrestore(&dev->n_ahs_lock, flags); kfree(ah); ret = ERR_PTR(-ENOMEM); goto bail; } dev->n_ahs_allocated++; spin_unlock_irqrestore(&dev->n_ahs_lock, flags); /* ib_create_ah() will initialize ah->ibah. */ ah->attr = *ah_attr; ah->attr.static_rate = ipath_ib_rate_to_mult(ah_attr->static_rate); ret = &ah->ibah; bail: return ret; } /** * ipath_destroy_ah - destroy an address handle * @ibah: the AH to destroy * * This may be called from interrupt context. */ static int ipath_destroy_ah(struct ib_ah *ibah) { struct ipath_ibdev *dev = to_idev(ibah->device); struct ipath_ah *ah = to_iah(ibah); unsigned long flags; spin_lock_irqsave(&dev->n_ahs_lock, flags); dev->n_ahs_allocated--; spin_unlock_irqrestore(&dev->n_ahs_lock, flags); kfree(ah); return 0; } static int ipath_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr) { struct ipath_ah *ah = to_iah(ibah); *ah_attr = ah->attr; ah_attr->static_rate = ipath_mult_to_ib_rate(ah->attr.static_rate); return 0; } /** * ipath_get_npkeys - return the size of the PKEY table for port 0 * @dd: the infinipath device */ unsigned ipath_get_npkeys(struct ipath_devdata *dd) { return ARRAY_SIZE(dd->ipath_pd[0]->port_pkeys); } /** * ipath_get_pkey - return the indexed PKEY from the port PKEY table * @dd: the infinipath device * @index: the PKEY index */ unsigned ipath_get_pkey(struct ipath_devdata *dd, unsigned index) { unsigned ret; /* always a kernel port, no locking needed */ if (index >= ARRAY_SIZE(dd->ipath_pd[0]->port_pkeys)) ret = 0; else ret = dd->ipath_pd[0]->port_pkeys[index]; return ret; } static int ipath_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey) { struct ipath_ibdev *dev = to_idev(ibdev); int ret; if (index >= ipath_get_npkeys(dev->dd)) { ret = -EINVAL; goto bail; } *pkey = ipath_get_pkey(dev->dd, index); ret = 0; bail: return ret; } /** * ipath_alloc_ucontext - allocate a ucontest * @ibdev: the infiniband device * @udata: not used by the InfiniPath driver */ static struct ib_ucontext *ipath_alloc_ucontext(struct ib_device *ibdev, struct ib_udata *udata) { struct ipath_ucontext *context; struct ib_ucontext *ret; context = kmalloc(sizeof *context, GFP_KERNEL); if (!context) { ret = ERR_PTR(-ENOMEM); goto bail; } ret = &context->ibucontext; bail: return ret; } static int ipath_dealloc_ucontext(struct ib_ucontext *context) { kfree(to_iucontext(context)); return 0; } static int ipath_verbs_register_sysfs(struct ib_device *dev); static void __verbs_timer(unsigned long arg) { struct ipath_devdata *dd = (struct ipath_devdata *) arg; /* Handle verbs layer timeouts. */ ipath_ib_timer(dd->verbs_dev); mod_timer(&dd->verbs_timer, jiffies + 1); } static int enable_timer(struct ipath_devdata *dd) { /* * Early chips had a design flaw where the chip and kernel idea * of the tail register don't always agree, and therefore we won't * get an interrupt on the next packet received. * If the board supports per packet receive interrupts, use it. * Otherwise, the timer function periodically checks for packets * to cover this case. * Either way, the timer is needed for verbs layer related * processing. */ if (dd->ipath_flags & IPATH_GPIO_INTR) { ipath_write_kreg(dd, dd->ipath_kregs->kr_debugportselect, 0x2074076542310ULL); /* Enable GPIO bit 2 interrupt */ dd->ipath_gpio_mask |= (u64) (1 << IPATH_GPIO_PORT0_BIT); ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_mask, dd->ipath_gpio_mask); } init_timer(&dd->verbs_timer); dd->verbs_timer.function = __verbs_timer; dd->verbs_timer.data = (unsigned long)dd; dd->verbs_timer.expires = jiffies + 1; add_timer(&dd->verbs_timer); return 0; } static int disable_timer(struct ipath_devdata *dd) { /* Disable GPIO bit 2 interrupt */ if (dd->ipath_flags & IPATH_GPIO_INTR) { /* Disable GPIO bit 2 interrupt */ dd->ipath_gpio_mask &= ~((u64) (1 << IPATH_GPIO_PORT0_BIT)); ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_mask, dd->ipath_gpio_mask); /* * We might want to undo changes to debugportselect, * but how? */ } del_timer_sync(&dd->verbs_timer); return 0; } /** * ipath_register_ib_device - register our device with the infiniband core * @dd: the device data structure * Return the allocated ipath_ibdev pointer or NULL on error. */ int ipath_register_ib_device(struct ipath_devdata *dd) { struct ipath_verbs_counters cntrs; struct ipath_ibdev *idev; struct ib_device *dev; struct ipath_verbs_txreq *tx; unsigned i; int ret; idev = (struct ipath_ibdev *)ib_alloc_device(sizeof *idev); if (idev == NULL) { ret = -ENOMEM; goto bail; } dev = &idev->ibdev; if (dd->ipath_sdma_descq_cnt) { tx = kmalloc(dd->ipath_sdma_descq_cnt * sizeof *tx, GFP_KERNEL); if (tx == NULL) { ret = -ENOMEM; goto err_tx; } } else tx = NULL; idev->txreq_bufs = tx; /* Only need to initialize non-zero fields. */ spin_lock_init(&idev->n_pds_lock); spin_lock_init(&idev->n_ahs_lock); spin_lock_init(&idev->n_cqs_lock); spin_lock_init(&idev->n_qps_lock); spin_lock_init(&idev->n_srqs_lock); spin_lock_init(&idev->n_mcast_grps_lock); spin_lock_init(&idev->qp_table.lock); spin_lock_init(&idev->lk_table.lock); idev->sm_lid = __constant_be16_to_cpu(IB_LID_PERMISSIVE); /* Set the prefix to the default value (see ch. 4.1.1) */ idev->gid_prefix = __constant_cpu_to_be64(0xfe80000000000000ULL); ret = ipath_init_qp_table(idev, ib_ipath_qp_table_size); if (ret) goto err_qp; /* * The top ib_ipath_lkey_table_size bits are used to index the * table. The lower 8 bits can be owned by the user (copied from * the LKEY). The remaining bits act as a generation number or tag. */ idev->lk_table.max = 1 << ib_ipath_lkey_table_size; idev->lk_table.table = kzalloc(idev->lk_table.max * sizeof(*idev->lk_table.table), GFP_KERNEL); if (idev->lk_table.table == NULL) { ret = -ENOMEM; goto err_lk; } INIT_LIST_HEAD(&idev->pending_mmaps); spin_lock_init(&idev->pending_lock); idev->mmap_offset = PAGE_SIZE; spin_lock_init(&idev->mmap_offset_lock); INIT_LIST_HEAD(&idev->pending[0]); INIT_LIST_HEAD(&idev->pending[1]); INIT_LIST_HEAD(&idev->pending[2]); INIT_LIST_HEAD(&idev->piowait); INIT_LIST_HEAD(&idev->rnrwait); INIT_LIST_HEAD(&idev->txreq_free); idev->pending_index = 0; idev->port_cap_flags = IB_PORT_SYS_IMAGE_GUID_SUP | IB_PORT_CLIENT_REG_SUP; if (dd->ipath_flags & IPATH_HAS_LINK_LATENCY) idev->port_cap_flags |= IB_PORT_LINK_LATENCY_SUP; idev->pma_counter_select[0] = IB_PMA_PORT_XMIT_DATA; idev->pma_counter_select[1] = IB_PMA_PORT_RCV_DATA; idev->pma_counter_select[2] = IB_PMA_PORT_XMIT_PKTS; idev->pma_counter_select[3] = IB_PMA_PORT_RCV_PKTS; idev->pma_counter_select[4] = IB_PMA_PORT_XMIT_WAIT; /* Snapshot current HW counters to "clear" them. */ ipath_get_counters(dd, &cntrs); idev->z_symbol_error_counter = cntrs.symbol_error_counter; idev->z_link_error_recovery_counter = cntrs.link_error_recovery_counter; idev->z_link_downed_counter = cntrs.link_downed_counter; idev->z_port_rcv_errors = cntrs.port_rcv_errors; idev->z_port_rcv_remphys_errors = cntrs.port_rcv_remphys_errors; idev->z_port_xmit_discards = cntrs.port_xmit_discards; idev->z_port_xmit_data = cntrs.port_xmit_data; idev->z_port_rcv_data = cntrs.port_rcv_data; idev->z_port_xmit_packets = cntrs.port_xmit_packets; idev->z_port_rcv_packets = cntrs.port_rcv_packets; idev->z_local_link_integrity_errors = cntrs.local_link_integrity_errors; idev->z_excessive_buffer_overrun_errors = cntrs.excessive_buffer_overrun_errors; idev->z_vl15_dropped = cntrs.vl15_dropped; for (i = 0; i < dd->ipath_sdma_descq_cnt; i++, tx++) list_add(&tx->txreq.list, &idev->txreq_free); /* * The system image GUID is supposed to be the same for all * IB HCAs in a single system but since there can be other * device types in the system, we can't be sure this is unique. */ if (!sys_image_guid) sys_image_guid = dd->ipath_guid; idev->sys_image_guid = sys_image_guid; idev->ib_unit = dd->ipath_unit; idev->dd = dd; strlcpy(dev->name, "ipath%d", IB_DEVICE_NAME_MAX); dev->owner = THIS_MODULE; dev->node_guid = dd->ipath_guid; dev->uverbs_abi_ver = IPATH_UVERBS_ABI_VERSION; dev->uverbs_cmd_mask = (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) | (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) | (1ull << IB_USER_VERBS_CMD_QUERY_PORT) | (1ull << IB_USER_VERBS_CMD_ALLOC_PD) | (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) | (1ull << IB_USER_VERBS_CMD_CREATE_AH) | (1ull << IB_USER_VERBS_CMD_DESTROY_AH) | (1ull << IB_USER_VERBS_CMD_QUERY_AH) | (1ull << IB_USER_VERBS_CMD_REG_MR) | (1ull << IB_USER_VERBS_CMD_DEREG_MR) | (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) | (1ull << IB_USER_VERBS_CMD_CREATE_CQ) | (1ull << IB_USER_VERBS_CMD_RESIZE_CQ) | (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) | (1ull << IB_USER_VERBS_CMD_POLL_CQ) | (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) | (1ull << IB_USER_VERBS_CMD_CREATE_QP) | (1ull << IB_USER_VERBS_CMD_QUERY_QP) | (1ull << IB_USER_VERBS_CMD_MODIFY_QP) | (1ull << IB_USER_VERBS_CMD_DESTROY_QP) | (1ull << IB_USER_VERBS_CMD_POST_SEND) | (1ull << IB_USER_VERBS_CMD_POST_RECV) | (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) | (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) | (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) | (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) | (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) | (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) | (1ull << IB_USER_VERBS_CMD_POST_SRQ_RECV); dev->node_type = RDMA_NODE_IB_CA; dev->phys_port_cnt = 1; dev->num_comp_vectors = 1; dev->dma_device = &dd->pcidev->dev; dev->query_device = ipath_query_device; dev->modify_device = ipath_modify_device; dev->query_port = ipath_query_port; dev->modify_port = ipath_modify_port; dev->query_pkey = ipath_query_pkey; dev->query_gid = ipath_query_gid; dev->alloc_ucontext = ipath_alloc_ucontext; dev->dealloc_ucontext = ipath_dealloc_ucontext; dev->alloc_pd = ipath_alloc_pd; dev->dealloc_pd = ipath_dealloc_pd; dev->create_ah = ipath_create_ah; dev->destroy_ah = ipath_destroy_ah; dev->query_ah = ipath_query_ah; dev->create_srq = ipath_create_srq; dev->modify_srq = ipath_modify_srq; dev->query_srq = ipath_query_srq; dev->destroy_srq = ipath_destroy_srq; dev->create_qp = ipath_create_qp; dev->modify_qp = ipath_modify_qp; dev->query_qp = ipath_query_qp; dev->destroy_qp = ipath_destroy_qp; dev->post_send = ipath_post_send; dev->post_recv = ipath_post_receive; dev->post_srq_recv = ipath_post_srq_receive; dev->create_cq = ipath_create_cq; dev->destroy_cq = ipath_destroy_cq; dev->resize_cq = ipath_resize_cq; dev->poll_cq = ipath_poll_cq; dev->req_notify_cq = ipath_req_notify_cq; dev->get_dma_mr = ipath_get_dma_mr; dev->reg_phys_mr = ipath_reg_phys_mr; dev->reg_user_mr = ipath_reg_user_mr; dev->dereg_mr = ipath_dereg_mr; dev->alloc_fmr = ipath_alloc_fmr; dev->map_phys_fmr = ipath_map_phys_fmr; dev->unmap_fmr = ipath_unmap_fmr; dev->dealloc_fmr = ipath_dealloc_fmr; dev->attach_mcast = ipath_multicast_attach; dev->detach_mcast = ipath_multicast_detach; dev->process_mad = ipath_process_mad; dev->mmap = ipath_mmap; dev->dma_ops = &ipath_dma_mapping_ops; snprintf(dev->node_desc, sizeof(dev->node_desc), IPATH_IDSTR " %s", init_utsname()->nodename); ret = ib_register_device(dev, NULL); if (ret) goto err_reg; if (ipath_verbs_register_sysfs(dev)) goto err_class; enable_timer(dd); goto bail; err_class: ib_unregister_device(dev); err_reg: kfree(idev->lk_table.table); err_lk: kfree(idev->qp_table.table); err_qp: kfree(idev->txreq_bufs); err_tx: ib_dealloc_device(dev); ipath_dev_err(dd, "cannot register verbs: %d!\n", -ret); idev = NULL; bail: dd->verbs_dev = idev; return ret; } void ipath_unregister_ib_device(struct ipath_ibdev *dev) { struct ib_device *ibdev = &dev->ibdev; u32 qps_inuse; ib_unregister_device(ibdev); disable_timer(dev->dd); if (!list_empty(&dev->pending[0]) || !list_empty(&dev->pending[1]) || !list_empty(&dev->pending[2])) ipath_dev_err(dev->dd, "pending list not empty!\n"); if (!list_empty(&dev->piowait)) ipath_dev_err(dev->dd, "piowait list not empty!\n"); if (!list_empty(&dev->rnrwait)) ipath_dev_err(dev->dd, "rnrwait list not empty!\n"); if (!ipath_mcast_tree_empty()) ipath_dev_err(dev->dd, "multicast table memory leak!\n"); /* * Note that ipath_unregister_ib_device() can be called before all * the QPs are destroyed! */ qps_inuse = ipath_free_all_qps(&dev->qp_table); if (qps_inuse) ipath_dev_err(dev->dd, "QP memory leak! %u still in use\n", qps_inuse); kfree(dev->qp_table.table); kfree(dev->lk_table.table); kfree(dev->txreq_bufs); ib_dealloc_device(ibdev); } static ssize_t show_rev(struct device *device, struct device_attribute *attr, char *buf) { struct ipath_ibdev *dev = container_of(device, struct ipath_ibdev, ibdev.dev); return sprintf(buf, "%x\n", dev->dd->ipath_pcirev); } static ssize_t show_hca(struct device *device, struct device_attribute *attr, char *buf) { struct ipath_ibdev *dev = container_of(device, struct ipath_ibdev, ibdev.dev); int ret; ret = dev->dd->ipath_f_get_boardname(dev->dd, buf, 128); if (ret < 0) goto bail; strcat(buf, "\n"); ret = strlen(buf); bail: return ret; } static ssize_t show_stats(struct device *device, struct device_attribute *attr, char *buf) { struct ipath_ibdev *dev = container_of(device, struct ipath_ibdev, ibdev.dev); int i; int len; len = sprintf(buf, "RC resends %d\n" "RC no QACK %d\n" "RC ACKs %d\n" "RC SEQ NAKs %d\n" "RC RDMA seq %d\n" "RC RNR NAKs %d\n" "RC OTH NAKs %d\n" "RC timeouts %d\n" "RC RDMA dup %d\n" "piobuf wait %d\n" "unaligned %d\n" "PKT drops %d\n" "WQE errs %d\n", dev->n_rc_resends, dev->n_rc_qacks, dev->n_rc_acks, dev->n_seq_naks, dev->n_rdma_seq, dev->n_rnr_naks, dev->n_other_naks, dev->n_timeouts, dev->n_rdma_dup_busy, dev->n_piowait, dev->n_unaligned, dev->n_pkt_drops, dev->n_wqe_errs); for (i = 0; i < ARRAY_SIZE(dev->opstats); i++) { const struct ipath_opcode_stats *si = &dev->opstats[i]; if (!si->n_packets && !si->n_bytes) continue; len += sprintf(buf + len, "%02x %llu/%llu\n", i, (unsigned long long) si->n_packets, (unsigned long long) si->n_bytes); } return len; } static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL); static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL); static DEVICE_ATTR(board_id, S_IRUGO, show_hca, NULL); static DEVICE_ATTR(stats, S_IRUGO, show_stats, NULL); static struct device_attribute *ipath_class_attributes[] = { &dev_attr_hw_rev, &dev_attr_hca_type, &dev_attr_board_id, &dev_attr_stats }; static int ipath_verbs_register_sysfs(struct ib_device *dev) { int i; int ret; for (i = 0; i < ARRAY_SIZE(ipath_class_attributes); ++i) if (device_create_file(&dev->dev, ipath_class_attributes[i])) { ret = 1; goto bail; } ret = 0; bail: return ret; }
gpl-2.0
aicjofs/android_kernel_fuhu_t8400n
drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
5616
9123
/************************************************************************** * * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sub license, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial portions * of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE * USE OR OTHER DEALINGS IN THE SOFTWARE. * **************************************************************************/ #include "vmwgfx_drv.h" #include "vmwgfx_drm.h" #include "vmwgfx_kms.h" int vmw_getparam_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct vmw_private *dev_priv = vmw_priv(dev); struct drm_vmw_getparam_arg *param = (struct drm_vmw_getparam_arg *)data; switch (param->param) { case DRM_VMW_PARAM_NUM_STREAMS: param->value = vmw_overlay_num_overlays(dev_priv); break; case DRM_VMW_PARAM_NUM_FREE_STREAMS: param->value = vmw_overlay_num_free_overlays(dev_priv); break; case DRM_VMW_PARAM_3D: param->value = vmw_fifo_have_3d(dev_priv) ? 1 : 0; break; case DRM_VMW_PARAM_HW_CAPS: param->value = dev_priv->capabilities; break; case DRM_VMW_PARAM_FIFO_CAPS: param->value = dev_priv->fifo.capabilities; break; case DRM_VMW_PARAM_MAX_FB_SIZE: param->value = dev_priv->vram_size; break; case DRM_VMW_PARAM_FIFO_HW_VERSION: { __le32 __iomem *fifo_mem = dev_priv->mmio_virt; const struct vmw_fifo_state *fifo = &dev_priv->fifo; param->value = ioread32(fifo_mem + ((fifo->capabilities & SVGA_FIFO_CAP_3D_HWVERSION_REVISED) ? SVGA_FIFO_3D_HWVERSION_REVISED : SVGA_FIFO_3D_HWVERSION)); break; } default: DRM_ERROR("Illegal vmwgfx get param request: %d\n", param->param); return -EINVAL; } return 0; } int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_vmw_get_3d_cap_arg *arg = (struct drm_vmw_get_3d_cap_arg *) data; struct vmw_private *dev_priv = vmw_priv(dev); uint32_t size; __le32 __iomem *fifo_mem; void __user *buffer = (void __user *)((unsigned long)(arg->buffer)); void *bounce; int ret; if (unlikely(arg->pad64 != 0)) { DRM_ERROR("Illegal GET_3D_CAP argument.\n"); return -EINVAL; } size = (SVGA_FIFO_3D_CAPS_LAST - SVGA_FIFO_3D_CAPS + 1) << 2; if (arg->max_size < size) size = arg->max_size; bounce = vmalloc(size); if (unlikely(bounce == NULL)) { DRM_ERROR("Failed to allocate bounce buffer for 3D caps.\n"); return -ENOMEM; } fifo_mem = dev_priv->mmio_virt; memcpy_fromio(bounce, &fifo_mem[SVGA_FIFO_3D_CAPS], size); ret = copy_to_user(buffer, bounce, size); vfree(bounce); if (unlikely(ret != 0)) DRM_ERROR("Failed to report 3D caps info.\n"); return ret; } int vmw_present_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; struct vmw_private *dev_priv = vmw_priv(dev); struct drm_vmw_present_arg *arg = (struct drm_vmw_present_arg *)data; struct vmw_surface *surface; struct vmw_master *vmaster = vmw_master(file_priv->master); struct drm_vmw_rect __user *clips_ptr; struct drm_vmw_rect *clips = NULL; struct drm_mode_object *obj; struct vmw_framebuffer *vfb; uint32_t num_clips; int ret; num_clips = arg->num_clips; clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr; if (unlikely(num_clips == 0)) return 0; if (clips_ptr == NULL) { DRM_ERROR("Variable clips_ptr must be specified.\n"); ret = -EINVAL; goto out_clips; } clips = kcalloc(num_clips, sizeof(*clips), GFP_KERNEL); if (clips == NULL) { DRM_ERROR("Failed to allocate clip rect list.\n"); ret = -ENOMEM; goto out_clips; } ret = copy_from_user(clips, clips_ptr, num_clips * sizeof(*clips)); if (ret) { DRM_ERROR("Failed to copy clip rects from userspace.\n"); ret = -EFAULT; goto out_no_copy; } ret = mutex_lock_interruptible(&dev->mode_config.mutex); if (unlikely(ret != 0)) { ret = -ERESTARTSYS; goto out_no_mode_mutex; } obj = drm_mode_object_find(dev, arg->fb_id, DRM_MODE_OBJECT_FB); if (!obj) { DRM_ERROR("Invalid framebuffer id.\n"); ret = -EINVAL; goto out_no_fb; } vfb = vmw_framebuffer_to_vfb(obj_to_fb(obj)); ret = ttm_read_lock(&vmaster->lock, true); if (unlikely(ret != 0)) goto out_no_ttm_lock; ret = vmw_user_surface_lookup_handle(dev_priv, tfile, arg->sid, &surface); if (ret) goto out_no_surface; ret = vmw_kms_present(dev_priv, file_priv, vfb, surface, arg->sid, arg->dest_x, arg->dest_y, clips, num_clips); /* vmw_user_surface_lookup takes one ref so does new_fb */ vmw_surface_unreference(&surface); out_no_surface: ttm_read_unlock(&vmaster->lock); out_no_ttm_lock: out_no_fb: mutex_unlock(&dev->mode_config.mutex); out_no_mode_mutex: out_no_copy: kfree(clips); out_clips: return ret; } int vmw_present_readback_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct vmw_private *dev_priv = vmw_priv(dev); struct drm_vmw_present_readback_arg *arg = (struct drm_vmw_present_readback_arg *)data; struct drm_vmw_fence_rep __user *user_fence_rep = (struct drm_vmw_fence_rep __user *) (unsigned long)arg->fence_rep; struct vmw_master *vmaster = vmw_master(file_priv->master); struct drm_vmw_rect __user *clips_ptr; struct drm_vmw_rect *clips = NULL; struct drm_mode_object *obj; struct vmw_framebuffer *vfb; uint32_t num_clips; int ret; num_clips = arg->num_clips; clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr; if (unlikely(num_clips == 0)) return 0; if (clips_ptr == NULL) { DRM_ERROR("Argument clips_ptr must be specified.\n"); ret = -EINVAL; goto out_clips; } clips = kcalloc(num_clips, sizeof(*clips), GFP_KERNEL); if (clips == NULL) { DRM_ERROR("Failed to allocate clip rect list.\n"); ret = -ENOMEM; goto out_clips; } ret = copy_from_user(clips, clips_ptr, num_clips * sizeof(*clips)); if (ret) { DRM_ERROR("Failed to copy clip rects from userspace.\n"); ret = -EFAULT; goto out_no_copy; } ret = mutex_lock_interruptible(&dev->mode_config.mutex); if (unlikely(ret != 0)) { ret = -ERESTARTSYS; goto out_no_mode_mutex; } obj = drm_mode_object_find(dev, arg->fb_id, DRM_MODE_OBJECT_FB); if (!obj) { DRM_ERROR("Invalid framebuffer id.\n"); ret = -EINVAL; goto out_no_fb; } vfb = vmw_framebuffer_to_vfb(obj_to_fb(obj)); if (!vfb->dmabuf) { DRM_ERROR("Framebuffer not dmabuf backed.\n"); ret = -EINVAL; goto out_no_fb; } ret = ttm_read_lock(&vmaster->lock, true); if (unlikely(ret != 0)) goto out_no_ttm_lock; ret = vmw_kms_readback(dev_priv, file_priv, vfb, user_fence_rep, clips, num_clips); ttm_read_unlock(&vmaster->lock); out_no_ttm_lock: out_no_fb: mutex_unlock(&dev->mode_config.mutex); out_no_mode_mutex: out_no_copy: kfree(clips); out_clips: return ret; } /** * vmw_fops_poll - wrapper around the drm_poll function * * @filp: See the linux fops poll documentation. * @wait: See the linux fops poll documentation. * * Wrapper around the drm_poll function that makes sure the device is * processing the fifo if drm_poll decides to wait. */ unsigned int vmw_fops_poll(struct file *filp, struct poll_table_struct *wait) { struct drm_file *file_priv = filp->private_data; struct vmw_private *dev_priv = vmw_priv(file_priv->minor->dev); vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC); return drm_poll(filp, wait); } /** * vmw_fops_read - wrapper around the drm_read function * * @filp: See the linux fops read documentation. * @buffer: See the linux fops read documentation. * @count: See the linux fops read documentation. * offset: See the linux fops read documentation. * * Wrapper around the drm_read function that makes sure the device is * processing the fifo if drm_read decides to wait. */ ssize_t vmw_fops_read(struct file *filp, char __user *buffer, size_t count, loff_t *offset) { struct drm_file *file_priv = filp->private_data; struct vmw_private *dev_priv = vmw_priv(file_priv->minor->dev); vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC); return drm_read(filp, buffer, count, offset); }
gpl-2.0
hayoung-lee/willow_kernel
drivers/scsi/gdth_proc.c
8432
27071
/* gdth_proc.c * $Id: gdth_proc.c,v 1.43 2006/01/11 16:15:00 achim Exp $ */ #include <linux/completion.h> #include <linux/slab.h> int gdth_proc_info(struct Scsi_Host *host, char *buffer,char **start,off_t offset,int length, int inout) { gdth_ha_str *ha = shost_priv(host); TRACE2(("gdth_proc_info() length %d offs %d inout %d\n", length,(int)offset,inout)); if (inout) return(gdth_set_info(buffer,length,host,ha)); else return(gdth_get_info(buffer,start,offset,length,host,ha)); } static int gdth_set_info(char *buffer,int length,struct Scsi_Host *host, gdth_ha_str *ha) { int ret_val = -EINVAL; TRACE2(("gdth_set_info() ha %d\n",ha->hanum,)); if (length >= 4) { if (strncmp(buffer,"gdth",4) == 0) { buffer += 5; length -= 5; ret_val = gdth_set_asc_info(host, buffer, length, ha); } } return ret_val; } static int gdth_set_asc_info(struct Scsi_Host *host, char *buffer, int length, gdth_ha_str *ha) { int orig_length, drive, wb_mode; int i, found; gdth_cmd_str gdtcmd; gdth_cpar_str *pcpar; u64 paddr; char cmnd[MAX_COMMAND_SIZE]; memset(cmnd, 0xff, 12); memset(&gdtcmd, 0, sizeof(gdth_cmd_str)); TRACE2(("gdth_set_asc_info() ha %d\n",ha->hanum)); orig_length = length + 5; drive = -1; wb_mode = 0; found = FALSE; if (length >= 5 && strncmp(buffer,"flush",5)==0) { buffer += 6; length -= 6; if (length && *buffer>='0' && *buffer<='9') { drive = (int)(*buffer-'0'); ++buffer; --length; if (length && *buffer>='0' && *buffer<='9') { drive = drive*10 + (int)(*buffer-'0'); ++buffer; --length; } printk("GDT: Flushing host drive %d .. ",drive); } else { printk("GDT: Flushing all host drives .. "); } for (i = 0; i < MAX_HDRIVES; ++i) { if (ha->hdr[i].present) { if (drive != -1 && i != drive) continue; found = TRUE; gdtcmd.Service = CACHESERVICE; gdtcmd.OpCode = GDT_FLUSH; if (ha->cache_feat & GDT_64BIT) { gdtcmd.u.cache64.DeviceNo = i; gdtcmd.u.cache64.BlockNo = 1; } else { gdtcmd.u.cache.DeviceNo = i; gdtcmd.u.cache.BlockNo = 1; } gdth_execute(host, &gdtcmd, cmnd, 30, NULL); } } if (!found) printk("\nNo host drive found !\n"); else printk("Done.\n"); return(orig_length); } if (length >= 7 && strncmp(buffer,"wbp_off",7)==0) { buffer += 8; length -= 8; printk("GDT: Disabling write back permanently .. "); wb_mode = 1; } else if (length >= 6 && strncmp(buffer,"wbp_on",6)==0) { buffer += 7; length -= 7; printk("GDT: Enabling write back permanently .. "); wb_mode = 2; } else if (length >= 6 && strncmp(buffer,"wb_off",6)==0) { buffer += 7; length -= 7; printk("GDT: Disabling write back commands .. "); if (ha->cache_feat & GDT_WR_THROUGH) { gdth_write_through = TRUE; printk("Done.\n"); } else { printk("Not supported !\n"); } return(orig_length); } else if (length >= 5 && strncmp(buffer,"wb_on",5)==0) { buffer += 6; length -= 6; printk("GDT: Enabling write back commands .. "); gdth_write_through = FALSE; printk("Done.\n"); return(orig_length); } if (wb_mode) { if (!gdth_ioctl_alloc(ha, sizeof(gdth_cpar_str), TRUE, &paddr)) return(-EBUSY); pcpar = (gdth_cpar_str *)ha->pscratch; memcpy( pcpar, &ha->cpar, sizeof(gdth_cpar_str) ); gdtcmd.Service = CACHESERVICE; gdtcmd.OpCode = GDT_IOCTL; gdtcmd.u.ioctl.p_param = paddr; gdtcmd.u.ioctl.param_size = sizeof(gdth_cpar_str); gdtcmd.u.ioctl.subfunc = CACHE_CONFIG; gdtcmd.u.ioctl.channel = INVALID_CHANNEL; pcpar->write_back = wb_mode==1 ? 0:1; gdth_execute(host, &gdtcmd, cmnd, 30, NULL); gdth_ioctl_free(ha, GDTH_SCRATCH, ha->pscratch, paddr); printk("Done.\n"); return(orig_length); } printk("GDT: Unknown command: %s Length: %d\n",buffer,length); return(-EINVAL); } static int gdth_get_info(char *buffer,char **start,off_t offset,int length, struct Scsi_Host *host, gdth_ha_str *ha) { int size = 0,len = 0; int hlen; off_t begin = 0,pos = 0; int id, i, j, k, sec, flag; int no_mdrv = 0, drv_no, is_mirr; u32 cnt; u64 paddr; int rc = -ENOMEM; gdth_cmd_str *gdtcmd; gdth_evt_str *estr; char hrec[161]; struct timeval tv; char *buf; gdth_dskstat_str *pds; gdth_diskinfo_str *pdi; gdth_arrayinf_str *pai; gdth_defcnt_str *pdef; gdth_cdrinfo_str *pcdi; gdth_hget_str *phg; char cmnd[MAX_COMMAND_SIZE]; gdtcmd = kmalloc(sizeof(*gdtcmd), GFP_KERNEL); estr = kmalloc(sizeof(*estr), GFP_KERNEL); if (!gdtcmd || !estr) goto free_fail; memset(cmnd, 0xff, 12); memset(gdtcmd, 0, sizeof(gdth_cmd_str)); TRACE2(("gdth_get_info() ha %d\n",ha->hanum)); /* request is i.e. "cat /proc/scsi/gdth/0" */ /* format: %-15s\t%-10s\t%-15s\t%s */ /* driver parameters */ size = sprintf(buffer+len,"Driver Parameters:\n"); len += size; pos = begin + len; if (reserve_list[0] == 0xff) strcpy(hrec, "--"); else { hlen = sprintf(hrec, "%d", reserve_list[0]); for (i = 1; i < MAX_RES_ARGS; i++) { if (reserve_list[i] == 0xff) break; hlen += snprintf(hrec + hlen , 161 - hlen, ",%d", reserve_list[i]); } } size = sprintf(buffer+len, " reserve_mode: \t%d \treserve_list: \t%s\n", reserve_mode, hrec); len += size; pos = begin + len; size = sprintf(buffer+len, " max_ids: \t%-3d \thdr_channel: \t%d\n", max_ids, hdr_channel); len += size; pos = begin + len; /* controller information */ size = sprintf(buffer+len,"\nDisk Array Controller Information:\n"); len += size; pos = begin + len; strcpy(hrec, ha->binfo.type_string); size = sprintf(buffer+len, " Number: \t%d \tName: \t%s\n", ha->hanum, hrec); len += size; pos = begin + len; if (ha->more_proc) sprintf(hrec, "%d.%02d.%02d-%c%03X", (u8)(ha->binfo.upd_fw_ver>>24), (u8)(ha->binfo.upd_fw_ver>>16), (u8)(ha->binfo.upd_fw_ver), ha->bfeat.raid ? 'R':'N', ha->binfo.upd_revision); else sprintf(hrec, "%d.%02d", (u8)(ha->cpar.version>>8), (u8)(ha->cpar.version)); size = sprintf(buffer+len, " Driver Ver.: \t%-10s\tFirmware Ver.: \t%s\n", GDTH_VERSION_STR, hrec); len += size; pos = begin + len; if (ha->more_proc) { /* more information: 1. about controller */ size = sprintf(buffer+len, " Serial No.: \t0x%8X\tCache RAM size:\t%d KB\n", ha->binfo.ser_no, ha->binfo.memsize / 1024); len += size; pos = begin + len; } #ifdef GDTH_DMA_STATISTICS /* controller statistics */ size = sprintf(buffer+len,"\nController Statistics:\n"); len += size; pos = begin + len; size = sprintf(buffer+len, " 32-bit DMA buffer:\t%lu\t64-bit DMA buffer:\t%lu\n", ha->dma32_cnt, ha->dma64_cnt); len += size; pos = begin + len; #endif if (pos < offset) { len = 0; begin = pos; } if (pos > offset + length) goto stop_output; if (ha->more_proc) { /* more information: 2. about physical devices */ size = sprintf(buffer+len,"\nPhysical Devices:"); len += size; pos = begin + len; flag = FALSE; buf = gdth_ioctl_alloc(ha, GDTH_SCRATCH, FALSE, &paddr); if (!buf) goto stop_output; for (i = 0; i < ha->bus_cnt; ++i) { /* 2.a statistics (and retries/reassigns) */ TRACE2(("pdr_statistics() chn %d\n",i)); pds = (gdth_dskstat_str *)(buf + GDTH_SCRATCH/4); gdtcmd->Service = CACHESERVICE; gdtcmd->OpCode = GDT_IOCTL; gdtcmd->u.ioctl.p_param = paddr + GDTH_SCRATCH/4; gdtcmd->u.ioctl.param_size = 3*GDTH_SCRATCH/4; gdtcmd->u.ioctl.subfunc = DSK_STATISTICS | L_CTRL_PATTERN; gdtcmd->u.ioctl.channel = ha->raw[i].address | INVALID_CHANNEL; pds->bid = ha->raw[i].local_no; pds->first = 0; pds->entries = ha->raw[i].pdev_cnt; cnt = (3*GDTH_SCRATCH/4 - 5 * sizeof(u32)) / sizeof(pds->list[0]); if (pds->entries > cnt) pds->entries = cnt; if (gdth_execute(host, gdtcmd, cmnd, 30, NULL) != S_OK) pds->count = 0; /* other IOCTLs must fit into area GDTH_SCRATCH/4 */ for (j = 0; j < ha->raw[i].pdev_cnt; ++j) { /* 2.b drive info */ TRACE2(("scsi_drv_info() chn %d dev %d\n", i, ha->raw[i].id_list[j])); pdi = (gdth_diskinfo_str *)buf; gdtcmd->Service = CACHESERVICE; gdtcmd->OpCode = GDT_IOCTL; gdtcmd->u.ioctl.p_param = paddr; gdtcmd->u.ioctl.param_size = sizeof(gdth_diskinfo_str); gdtcmd->u.ioctl.subfunc = SCSI_DR_INFO | L_CTRL_PATTERN; gdtcmd->u.ioctl.channel = ha->raw[i].address | ha->raw[i].id_list[j]; if (gdth_execute(host, gdtcmd, cmnd, 30, NULL) == S_OK) { strncpy(hrec,pdi->vendor,8); strncpy(hrec+8,pdi->product,16); strncpy(hrec+24,pdi->revision,4); hrec[28] = 0; size = sprintf(buffer+len, "\n Chn/ID/LUN: \t%c/%02d/%d \tName: \t%s\n", 'A'+i,pdi->target_id,pdi->lun,hrec); len += size; pos = begin + len; flag = TRUE; pdi->no_ldrive &= 0xffff; if (pdi->no_ldrive == 0xffff) strcpy(hrec,"--"); else sprintf(hrec,"%d",pdi->no_ldrive); size = sprintf(buffer+len, " Capacity [MB]:\t%-6d \tTo Log. Drive: \t%s\n", pdi->blkcnt/(1024*1024/pdi->blksize), hrec); len += size; pos = begin + len; } else { pdi->devtype = 0xff; } if (pdi->devtype == 0) { /* search retries/reassigns */ for (k = 0; k < pds->count; ++k) { if (pds->list[k].tid == pdi->target_id && pds->list[k].lun == pdi->lun) { size = sprintf(buffer+len, " Retries: \t%-6d \tReassigns: \t%d\n", pds->list[k].retries, pds->list[k].reassigns); len += size; pos = begin + len; break; } } /* 2.c grown defects */ TRACE2(("scsi_drv_defcnt() chn %d dev %d\n", i, ha->raw[i].id_list[j])); pdef = (gdth_defcnt_str *)buf; gdtcmd->Service = CACHESERVICE; gdtcmd->OpCode = GDT_IOCTL; gdtcmd->u.ioctl.p_param = paddr; gdtcmd->u.ioctl.param_size = sizeof(gdth_defcnt_str); gdtcmd->u.ioctl.subfunc = SCSI_DEF_CNT | L_CTRL_PATTERN; gdtcmd->u.ioctl.channel = ha->raw[i].address | ha->raw[i].id_list[j]; pdef->sddc_type = 0x08; if (gdth_execute(host, gdtcmd, cmnd, 30, NULL) == S_OK) { size = sprintf(buffer+len, " Grown Defects:\t%d\n", pdef->sddc_cnt); len += size; pos = begin + len; } } if (pos < offset) { len = 0; begin = pos; } if (pos > offset + length) { gdth_ioctl_free(ha, GDTH_SCRATCH, buf, paddr); goto stop_output; } } } gdth_ioctl_free(ha, GDTH_SCRATCH, buf, paddr); if (!flag) { size = sprintf(buffer+len, "\n --\n"); len += size; pos = begin + len; } /* 3. about logical drives */ size = sprintf(buffer+len,"\nLogical Drives:"); len += size; pos = begin + len; flag = FALSE; buf = gdth_ioctl_alloc(ha, GDTH_SCRATCH, FALSE, &paddr); if (!buf) goto stop_output; for (i = 0; i < MAX_LDRIVES; ++i) { if (!ha->hdr[i].is_logdrv) continue; drv_no = i; j = k = 0; is_mirr = FALSE; do { /* 3.a log. drive info */ TRACE2(("cache_drv_info() drive no %d\n",drv_no)); pcdi = (gdth_cdrinfo_str *)buf; gdtcmd->Service = CACHESERVICE; gdtcmd->OpCode = GDT_IOCTL; gdtcmd->u.ioctl.p_param = paddr; gdtcmd->u.ioctl.param_size = sizeof(gdth_cdrinfo_str); gdtcmd->u.ioctl.subfunc = CACHE_DRV_INFO; gdtcmd->u.ioctl.channel = drv_no; if (gdth_execute(host, gdtcmd, cmnd, 30, NULL) != S_OK) break; pcdi->ld_dtype >>= 16; j++; if (pcdi->ld_dtype > 2) { strcpy(hrec, "missing"); } else if (pcdi->ld_error & 1) { strcpy(hrec, "fault"); } else if (pcdi->ld_error & 2) { strcpy(hrec, "invalid"); k++; j--; } else { strcpy(hrec, "ok"); } if (drv_no == i) { size = sprintf(buffer+len, "\n Number: \t%-2d \tStatus: \t%s\n", drv_no, hrec); len += size; pos = begin + len; flag = TRUE; no_mdrv = pcdi->cd_ldcnt; if (no_mdrv > 1 || pcdi->ld_slave != -1) { is_mirr = TRUE; strcpy(hrec, "RAID-1"); } else if (pcdi->ld_dtype == 0) { strcpy(hrec, "Disk"); } else if (pcdi->ld_dtype == 1) { strcpy(hrec, "RAID-0"); } else if (pcdi->ld_dtype == 2) { strcpy(hrec, "Chain"); } else { strcpy(hrec, "???"); } size = sprintf(buffer+len, " Capacity [MB]:\t%-6d \tType: \t%s\n", pcdi->ld_blkcnt/(1024*1024/pcdi->ld_blksize), hrec); len += size; pos = begin + len; } else { size = sprintf(buffer+len, " Slave Number: \t%-2d \tStatus: \t%s\n", drv_no & 0x7fff, hrec); len += size; pos = begin + len; } drv_no = pcdi->ld_slave; if (pos < offset) { len = 0; begin = pos; } if (pos > offset + length) { gdth_ioctl_free(ha, GDTH_SCRATCH, buf, paddr); goto stop_output; } } while (drv_no != -1); if (is_mirr) { size = sprintf(buffer+len, " Missing Drv.: \t%-2d \tInvalid Drv.: \t%d\n", no_mdrv - j - k, k); len += size; pos = begin + len; } if (!ha->hdr[i].is_arraydrv) strcpy(hrec, "--"); else sprintf(hrec, "%d", ha->hdr[i].master_no); size = sprintf(buffer+len, " To Array Drv.:\t%s\n", hrec); len += size; pos = begin + len; if (pos < offset) { len = 0; begin = pos; } if (pos > offset + length) { gdth_ioctl_free(ha, GDTH_SCRATCH, buf, paddr); goto stop_output; } } gdth_ioctl_free(ha, GDTH_SCRATCH, buf, paddr); if (!flag) { size = sprintf(buffer+len, "\n --\n"); len += size; pos = begin + len; } /* 4. about array drives */ size = sprintf(buffer+len,"\nArray Drives:"); len += size; pos = begin + len; flag = FALSE; buf = gdth_ioctl_alloc(ha, GDTH_SCRATCH, FALSE, &paddr); if (!buf) goto stop_output; for (i = 0; i < MAX_LDRIVES; ++i) { if (!(ha->hdr[i].is_arraydrv && ha->hdr[i].is_master)) continue; /* 4.a array drive info */ TRACE2(("array_info() drive no %d\n",i)); pai = (gdth_arrayinf_str *)buf; gdtcmd->Service = CACHESERVICE; gdtcmd->OpCode = GDT_IOCTL; gdtcmd->u.ioctl.p_param = paddr; gdtcmd->u.ioctl.param_size = sizeof(gdth_arrayinf_str); gdtcmd->u.ioctl.subfunc = ARRAY_INFO | LA_CTRL_PATTERN; gdtcmd->u.ioctl.channel = i; if (gdth_execute(host, gdtcmd, cmnd, 30, NULL) == S_OK) { if (pai->ai_state == 0) strcpy(hrec, "idle"); else if (pai->ai_state == 2) strcpy(hrec, "build"); else if (pai->ai_state == 4) strcpy(hrec, "ready"); else if (pai->ai_state == 6) strcpy(hrec, "fail"); else if (pai->ai_state == 8 || pai->ai_state == 10) strcpy(hrec, "rebuild"); else strcpy(hrec, "error"); if (pai->ai_ext_state & 0x10) strcat(hrec, "/expand"); else if (pai->ai_ext_state & 0x1) strcat(hrec, "/patch"); size = sprintf(buffer+len, "\n Number: \t%-2d \tStatus: \t%s\n", i,hrec); len += size; pos = begin + len; flag = TRUE; if (pai->ai_type == 0) strcpy(hrec, "RAID-0"); else if (pai->ai_type == 4) strcpy(hrec, "RAID-4"); else if (pai->ai_type == 5) strcpy(hrec, "RAID-5"); else strcpy(hrec, "RAID-10"); size = sprintf(buffer+len, " Capacity [MB]:\t%-6d \tType: \t%s\n", pai->ai_size/(1024*1024/pai->ai_secsize), hrec); len += size; pos = begin + len; if (pos < offset) { len = 0; begin = pos; } if (pos > offset + length) { gdth_ioctl_free(ha, GDTH_SCRATCH, buf, paddr); goto stop_output; } } } gdth_ioctl_free(ha, GDTH_SCRATCH, buf, paddr); if (!flag) { size = sprintf(buffer+len, "\n --\n"); len += size; pos = begin + len; } /* 5. about host drives */ size = sprintf(buffer+len,"\nHost Drives:"); len += size; pos = begin + len; flag = FALSE; buf = gdth_ioctl_alloc(ha, sizeof(gdth_hget_str), FALSE, &paddr); if (!buf) goto stop_output; for (i = 0; i < MAX_LDRIVES; ++i) { if (!ha->hdr[i].is_logdrv || (ha->hdr[i].is_arraydrv && !ha->hdr[i].is_master)) continue; /* 5.a get host drive list */ TRACE2(("host_get() drv_no %d\n",i)); phg = (gdth_hget_str *)buf; gdtcmd->Service = CACHESERVICE; gdtcmd->OpCode = GDT_IOCTL; gdtcmd->u.ioctl.p_param = paddr; gdtcmd->u.ioctl.param_size = sizeof(gdth_hget_str); gdtcmd->u.ioctl.subfunc = HOST_GET | LA_CTRL_PATTERN; gdtcmd->u.ioctl.channel = i; phg->entries = MAX_HDRIVES; phg->offset = GDTOFFSOF(gdth_hget_str, entry[0]); if (gdth_execute(host, gdtcmd, cmnd, 30, NULL) == S_OK) { ha->hdr[i].ldr_no = i; ha->hdr[i].rw_attribs = 0; ha->hdr[i].start_sec = 0; } else { for (j = 0; j < phg->entries; ++j) { k = phg->entry[j].host_drive; if (k >= MAX_LDRIVES) continue; ha->hdr[k].ldr_no = phg->entry[j].log_drive; ha->hdr[k].rw_attribs = phg->entry[j].rw_attribs; ha->hdr[k].start_sec = phg->entry[j].start_sec; } } } gdth_ioctl_free(ha, sizeof(gdth_hget_str), buf, paddr); for (i = 0; i < MAX_HDRIVES; ++i) { if (!(ha->hdr[i].present)) continue; size = sprintf(buffer+len, "\n Number: \t%-2d \tArr/Log. Drive:\t%d\n", i, ha->hdr[i].ldr_no); len += size; pos = begin + len; flag = TRUE; size = sprintf(buffer+len, " Capacity [MB]:\t%-6d \tStart Sector: \t%d\n", (u32)(ha->hdr[i].size/2048), ha->hdr[i].start_sec); len += size; pos = begin + len; if (pos < offset) { len = 0; begin = pos; } if (pos > offset + length) goto stop_output; } if (!flag) { size = sprintf(buffer+len, "\n --\n"); len += size; pos = begin + len; } } /* controller events */ size = sprintf(buffer+len,"\nController Events:\n"); len += size; pos = begin + len; for (id = -1;;) { id = gdth_read_event(ha, id, estr); if (estr->event_source == 0) break; if (estr->event_data.eu.driver.ionode == ha->hanum && estr->event_source == ES_ASYNC) { gdth_log_event(&estr->event_data, hrec); do_gettimeofday(&tv); sec = (int)(tv.tv_sec - estr->first_stamp); if (sec < 0) sec = 0; size = sprintf(buffer+len," date- %02d:%02d:%02d\t%s\n", sec/3600, sec%3600/60, sec%60, hrec); len += size; pos = begin + len; if (pos < offset) { len = 0; begin = pos; } if (pos > offset + length) goto stop_output; } if (id == -1) break; } stop_output: *start = buffer +(offset-begin); len -= (offset-begin); if (len > length) len = length; TRACE2(("get_info() len %d pos %d begin %d offset %d length %d size %d\n", len,(int)pos,(int)begin,(int)offset,length,size)); rc = len; free_fail: kfree(gdtcmd); kfree(estr); return rc; } static char *gdth_ioctl_alloc(gdth_ha_str *ha, int size, int scratch, u64 *paddr) { unsigned long flags; char *ret_val; if (size == 0) return NULL; spin_lock_irqsave(&ha->smp_lock, flags); if (!ha->scratch_busy && size <= GDTH_SCRATCH) { ha->scratch_busy = TRUE; ret_val = ha->pscratch; *paddr = ha->scratch_phys; } else if (scratch) { ret_val = NULL; } else { dma_addr_t dma_addr; ret_val = pci_alloc_consistent(ha->pdev, size, &dma_addr); *paddr = dma_addr; } spin_unlock_irqrestore(&ha->smp_lock, flags); return ret_val; } static void gdth_ioctl_free(gdth_ha_str *ha, int size, char *buf, u64 paddr) { unsigned long flags; if (buf == ha->pscratch) { spin_lock_irqsave(&ha->smp_lock, flags); ha->scratch_busy = FALSE; spin_unlock_irqrestore(&ha->smp_lock, flags); } else { pci_free_consistent(ha->pdev, size, buf, paddr); } } #ifdef GDTH_IOCTL_PROC static int gdth_ioctl_check_bin(gdth_ha_str *ha, u16 size) { unsigned long flags; int ret_val; spin_lock_irqsave(&ha->smp_lock, flags); ret_val = FALSE; if (ha->scratch_busy) { if (((gdth_iord_str *)ha->pscratch)->size == (u32)size) ret_val = TRUE; } spin_unlock_irqrestore(&ha->smp_lock, flags); return ret_val; } #endif static void gdth_wait_completion(gdth_ha_str *ha, int busnum, int id) { unsigned long flags; int i; Scsi_Cmnd *scp; struct gdth_cmndinfo *cmndinfo; u8 b, t; spin_lock_irqsave(&ha->smp_lock, flags); for (i = 0; i < GDTH_MAXCMDS; ++i) { scp = ha->cmd_tab[i].cmnd; cmndinfo = gdth_cmnd_priv(scp); b = scp->device->channel; t = scp->device->id; if (!SPECIAL_SCP(scp) && t == (u8)id && b == (u8)busnum) { cmndinfo->wait_for_completion = 0; spin_unlock_irqrestore(&ha->smp_lock, flags); while (!cmndinfo->wait_for_completion) barrier(); spin_lock_irqsave(&ha->smp_lock, flags); } } spin_unlock_irqrestore(&ha->smp_lock, flags); }
gpl-2.0
penhoi/linux-3.13.11.lbrpmu
drivers/scsi/aic7xxx/aic7xxx_core.c
8432
216265
/* * Core routines and tables shareable across OS platforms. * * Copyright (c) 1994-2002 Justin T. Gibbs. * Copyright (c) 2000-2002 Adaptec Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. * * $Id: //depot/aic7xxx/aic7xxx/aic7xxx.c#155 $ */ #ifdef __linux__ #include "aic7xxx_osm.h" #include "aic7xxx_inline.h" #include "aicasm/aicasm_insformat.h" #else #include <dev/aic7xxx/aic7xxx_osm.h> #include <dev/aic7xxx/aic7xxx_inline.h> #include <dev/aic7xxx/aicasm/aicasm_insformat.h> #endif /***************************** Lookup Tables **********************************/ static const char *const ahc_chip_names[] = { "NONE", "aic7770", "aic7850", "aic7855", "aic7859", "aic7860", "aic7870", "aic7880", "aic7895", "aic7895C", "aic7890/91", "aic7896/97", "aic7892", "aic7899" }; static const u_int num_chip_names = ARRAY_SIZE(ahc_chip_names); /* * Hardware error codes. */ struct ahc_hard_error_entry { uint8_t errno; const char *errmesg; }; static const struct ahc_hard_error_entry ahc_hard_errors[] = { { ILLHADDR, "Illegal Host Access" }, { ILLSADDR, "Illegal Sequencer Address referrenced" }, { ILLOPCODE, "Illegal Opcode in sequencer program" }, { SQPARERR, "Sequencer Parity Error" }, { DPARERR, "Data-path Parity Error" }, { MPARERR, "Scratch or SCB Memory Parity Error" }, { PCIERRSTAT, "PCI Error detected" }, { CIOPARERR, "CIOBUS Parity Error" }, }; static const u_int num_errors = ARRAY_SIZE(ahc_hard_errors); static const struct ahc_phase_table_entry ahc_phase_table[] = { { P_DATAOUT, MSG_NOOP, "in Data-out phase" }, { P_DATAIN, MSG_INITIATOR_DET_ERR, "in Data-in phase" }, { P_DATAOUT_DT, MSG_NOOP, "in DT Data-out phase" }, { P_DATAIN_DT, MSG_INITIATOR_DET_ERR, "in DT Data-in phase" }, { P_COMMAND, MSG_NOOP, "in Command phase" }, { P_MESGOUT, MSG_NOOP, "in Message-out phase" }, { P_STATUS, MSG_INITIATOR_DET_ERR, "in Status phase" }, { P_MESGIN, MSG_PARITY_ERROR, "in Message-in phase" }, { P_BUSFREE, MSG_NOOP, "while idle" }, { 0, MSG_NOOP, "in unknown phase" } }; /* * In most cases we only wish to itterate over real phases, so * exclude the last element from the count. */ static const u_int num_phases = ARRAY_SIZE(ahc_phase_table) - 1; /* * Valid SCSIRATE values. (p. 3-17) * Provides a mapping of tranfer periods in ns to the proper value to * stick in the scsixfer reg. */ static const struct ahc_syncrate ahc_syncrates[] = { /* ultra2 fast/ultra period rate */ { 0x42, 0x000, 9, "80.0" }, { 0x03, 0x000, 10, "40.0" }, { 0x04, 0x000, 11, "33.0" }, { 0x05, 0x100, 12, "20.0" }, { 0x06, 0x110, 15, "16.0" }, { 0x07, 0x120, 18, "13.4" }, { 0x08, 0x000, 25, "10.0" }, { 0x19, 0x010, 31, "8.0" }, { 0x1a, 0x020, 37, "6.67" }, { 0x1b, 0x030, 43, "5.7" }, { 0x1c, 0x040, 50, "5.0" }, { 0x00, 0x050, 56, "4.4" }, { 0x00, 0x060, 62, "4.0" }, { 0x00, 0x070, 68, "3.6" }, { 0x00, 0x000, 0, NULL } }; /* Our Sequencer Program */ #include "aic7xxx_seq.h" /**************************** Function Declarations ***************************/ static void ahc_force_renegotiation(struct ahc_softc *ahc, struct ahc_devinfo *devinfo); static struct ahc_tmode_tstate* ahc_alloc_tstate(struct ahc_softc *ahc, u_int scsi_id, char channel); #ifdef AHC_TARGET_MODE static void ahc_free_tstate(struct ahc_softc *ahc, u_int scsi_id, char channel, int force); #endif static const struct ahc_syncrate* ahc_devlimited_syncrate(struct ahc_softc *ahc, struct ahc_initiator_tinfo *, u_int *period, u_int *ppr_options, role_t role); static void ahc_update_pending_scbs(struct ahc_softc *ahc); static void ahc_fetch_devinfo(struct ahc_softc *ahc, struct ahc_devinfo *devinfo); static void ahc_scb_devinfo(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, struct scb *scb); static void ahc_assert_atn(struct ahc_softc *ahc); static void ahc_setup_initiator_msgout(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, struct scb *scb); static void ahc_build_transfer_msg(struct ahc_softc *ahc, struct ahc_devinfo *devinfo); static void ahc_construct_sdtr(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, u_int period, u_int offset); static void ahc_construct_wdtr(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, u_int bus_width); static void ahc_construct_ppr(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, u_int period, u_int offset, u_int bus_width, u_int ppr_options); static void ahc_clear_msg_state(struct ahc_softc *ahc); static void ahc_handle_proto_violation(struct ahc_softc *ahc); static void ahc_handle_message_phase(struct ahc_softc *ahc); typedef enum { AHCMSG_1B, AHCMSG_2B, AHCMSG_EXT } ahc_msgtype; static int ahc_sent_msg(struct ahc_softc *ahc, ahc_msgtype type, u_int msgval, int full); static int ahc_parse_msg(struct ahc_softc *ahc, struct ahc_devinfo *devinfo); static int ahc_handle_msg_reject(struct ahc_softc *ahc, struct ahc_devinfo *devinfo); static void ahc_handle_ign_wide_residue(struct ahc_softc *ahc, struct ahc_devinfo *devinfo); static void ahc_reinitialize_dataptrs(struct ahc_softc *ahc); static void ahc_handle_devreset(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, cam_status status, char *message, int verbose_level); #ifdef AHC_TARGET_MODE static void ahc_setup_target_msgin(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, struct scb *scb); #endif static bus_dmamap_callback_t ahc_dmamap_cb; static void ahc_build_free_scb_list(struct ahc_softc *ahc); static int ahc_init_scbdata(struct ahc_softc *ahc); static void ahc_fini_scbdata(struct ahc_softc *ahc); static void ahc_qinfifo_requeue(struct ahc_softc *ahc, struct scb *prev_scb, struct scb *scb); static int ahc_qinfifo_count(struct ahc_softc *ahc); static u_int ahc_rem_scb_from_disc_list(struct ahc_softc *ahc, u_int prev, u_int scbptr); static void ahc_add_curscb_to_free_list(struct ahc_softc *ahc); static u_int ahc_rem_wscb(struct ahc_softc *ahc, u_int scbpos, u_int prev); static void ahc_reset_current_bus(struct ahc_softc *ahc); #ifdef AHC_DUMP_SEQ static void ahc_dumpseq(struct ahc_softc *ahc); #endif static int ahc_loadseq(struct ahc_softc *ahc); static int ahc_check_patch(struct ahc_softc *ahc, const struct patch **start_patch, u_int start_instr, u_int *skip_addr); static void ahc_download_instr(struct ahc_softc *ahc, u_int instrptr, uint8_t *dconsts); #ifdef AHC_TARGET_MODE static void ahc_queue_lstate_event(struct ahc_softc *ahc, struct ahc_tmode_lstate *lstate, u_int initiator_id, u_int event_type, u_int event_arg); static void ahc_update_scsiid(struct ahc_softc *ahc, u_int targid_mask); static int ahc_handle_target_cmd(struct ahc_softc *ahc, struct target_cmd *cmd); #endif static u_int ahc_index_busy_tcl(struct ahc_softc *ahc, u_int tcl); static void ahc_unbusy_tcl(struct ahc_softc *ahc, u_int tcl); static void ahc_busy_tcl(struct ahc_softc *ahc, u_int tcl, u_int busyid); /************************** SCB and SCB queue management **********************/ static void ahc_run_untagged_queues(struct ahc_softc *ahc); static void ahc_run_untagged_queue(struct ahc_softc *ahc, struct scb_tailq *queue); /****************************** Initialization ********************************/ static void ahc_alloc_scbs(struct ahc_softc *ahc); static void ahc_shutdown(void *arg); /*************************** Interrupt Services *******************************/ static void ahc_clear_intstat(struct ahc_softc *ahc); static void ahc_run_qoutfifo(struct ahc_softc *ahc); #ifdef AHC_TARGET_MODE static void ahc_run_tqinfifo(struct ahc_softc *ahc, int paused); #endif static void ahc_handle_brkadrint(struct ahc_softc *ahc); static void ahc_handle_seqint(struct ahc_softc *ahc, u_int intstat); static void ahc_handle_scsiint(struct ahc_softc *ahc, u_int intstat); static void ahc_clear_critical_section(struct ahc_softc *ahc); /***************************** Error Recovery *********************************/ static void ahc_freeze_devq(struct ahc_softc *ahc, struct scb *scb); static int ahc_abort_scbs(struct ahc_softc *ahc, int target, char channel, int lun, u_int tag, role_t role, uint32_t status); static void ahc_calc_residual(struct ahc_softc *ahc, struct scb *scb); /*********************** Untagged Transaction Routines ************************/ static inline void ahc_freeze_untagged_queues(struct ahc_softc *ahc); static inline void ahc_release_untagged_queues(struct ahc_softc *ahc); /* * Block our completion routine from starting the next untagged * transaction for this target or target lun. */ static inline void ahc_freeze_untagged_queues(struct ahc_softc *ahc) { if ((ahc->flags & AHC_SCB_BTT) == 0) ahc->untagged_queue_lock++; } /* * Allow the next untagged transaction for this target or target lun * to be executed. We use a counting semaphore to allow the lock * to be acquired recursively. Once the count drops to zero, the * transaction queues will be run. */ static inline void ahc_release_untagged_queues(struct ahc_softc *ahc) { if ((ahc->flags & AHC_SCB_BTT) == 0) { ahc->untagged_queue_lock--; if (ahc->untagged_queue_lock == 0) ahc_run_untagged_queues(ahc); } } /************************* Sequencer Execution Control ************************/ /* * Work around any chip bugs related to halting sequencer execution. * On Ultra2 controllers, we must clear the CIOBUS stretch signal by * reading a register that will set this signal and deassert it. * Without this workaround, if the chip is paused, by an interrupt or * manual pause while accessing scb ram, accesses to certain registers * will hang the system (infinite pci retries). */ static void ahc_pause_bug_fix(struct ahc_softc *ahc) { if ((ahc->features & AHC_ULTRA2) != 0) (void)ahc_inb(ahc, CCSCBCTL); } /* * Determine whether the sequencer has halted code execution. * Returns non-zero status if the sequencer is stopped. */ int ahc_is_paused(struct ahc_softc *ahc) { return ((ahc_inb(ahc, HCNTRL) & PAUSE) != 0); } /* * Request that the sequencer stop and wait, indefinitely, for it * to stop. The sequencer will only acknowledge that it is paused * once it has reached an instruction boundary and PAUSEDIS is * cleared in the SEQCTL register. The sequencer may use PAUSEDIS * for critical sections. */ void ahc_pause(struct ahc_softc *ahc) { ahc_outb(ahc, HCNTRL, ahc->pause); /* * Since the sequencer can disable pausing in a critical section, we * must loop until it actually stops. */ while (ahc_is_paused(ahc) == 0) ; ahc_pause_bug_fix(ahc); } /* * Allow the sequencer to continue program execution. * We check here to ensure that no additional interrupt * sources that would cause the sequencer to halt have been * asserted. If, for example, a SCSI bus reset is detected * while we are fielding a different, pausing, interrupt type, * we don't want to release the sequencer before going back * into our interrupt handler and dealing with this new * condition. */ void ahc_unpause(struct ahc_softc *ahc) { if ((ahc_inb(ahc, INTSTAT) & (SCSIINT | SEQINT | BRKADRINT)) == 0) ahc_outb(ahc, HCNTRL, ahc->unpause); } /************************** Memory mapping routines ***************************/ static struct ahc_dma_seg * ahc_sg_bus_to_virt(struct scb *scb, uint32_t sg_busaddr) { int sg_index; sg_index = (sg_busaddr - scb->sg_list_phys)/sizeof(struct ahc_dma_seg); /* sg_list_phys points to entry 1, not 0 */ sg_index++; return (&scb->sg_list[sg_index]); } static uint32_t ahc_sg_virt_to_bus(struct scb *scb, struct ahc_dma_seg *sg) { int sg_index; /* sg_list_phys points to entry 1, not 0 */ sg_index = sg - &scb->sg_list[1]; return (scb->sg_list_phys + (sg_index * sizeof(*scb->sg_list))); } static uint32_t ahc_hscb_busaddr(struct ahc_softc *ahc, u_int index) { return (ahc->scb_data->hscb_busaddr + (sizeof(struct hardware_scb) * index)); } static void ahc_sync_scb(struct ahc_softc *ahc, struct scb *scb, int op) { ahc_dmamap_sync(ahc, ahc->scb_data->hscb_dmat, ahc->scb_data->hscb_dmamap, /*offset*/(scb->hscb - ahc->hscbs) * sizeof(*scb->hscb), /*len*/sizeof(*scb->hscb), op); } void ahc_sync_sglist(struct ahc_softc *ahc, struct scb *scb, int op) { if (scb->sg_count == 0) return; ahc_dmamap_sync(ahc, ahc->scb_data->sg_dmat, scb->sg_map->sg_dmamap, /*offset*/(scb->sg_list - scb->sg_map->sg_vaddr) * sizeof(struct ahc_dma_seg), /*len*/sizeof(struct ahc_dma_seg) * scb->sg_count, op); } #ifdef AHC_TARGET_MODE static uint32_t ahc_targetcmd_offset(struct ahc_softc *ahc, u_int index) { return (((uint8_t *)&ahc->targetcmds[index]) - ahc->qoutfifo); } #endif /*********************** Miscellaneous Support Functions ***********************/ /* * Determine whether the sequencer reported a residual * for this SCB/transaction. */ static void ahc_update_residual(struct ahc_softc *ahc, struct scb *scb) { uint32_t sgptr; sgptr = ahc_le32toh(scb->hscb->sgptr); if ((sgptr & SG_RESID_VALID) != 0) ahc_calc_residual(ahc, scb); } /* * Return pointers to the transfer negotiation information * for the specified our_id/remote_id pair. */ struct ahc_initiator_tinfo * ahc_fetch_transinfo(struct ahc_softc *ahc, char channel, u_int our_id, u_int remote_id, struct ahc_tmode_tstate **tstate) { /* * Transfer data structures are stored from the perspective * of the target role. Since the parameters for a connection * in the initiator role to a given target are the same as * when the roles are reversed, we pretend we are the target. */ if (channel == 'B') our_id += 8; *tstate = ahc->enabled_targets[our_id]; return (&(*tstate)->transinfo[remote_id]); } uint16_t ahc_inw(struct ahc_softc *ahc, u_int port) { uint16_t r = ahc_inb(ahc, port+1) << 8; return r | ahc_inb(ahc, port); } void ahc_outw(struct ahc_softc *ahc, u_int port, u_int value) { ahc_outb(ahc, port, value & 0xFF); ahc_outb(ahc, port+1, (value >> 8) & 0xFF); } uint32_t ahc_inl(struct ahc_softc *ahc, u_int port) { return ((ahc_inb(ahc, port)) | (ahc_inb(ahc, port+1) << 8) | (ahc_inb(ahc, port+2) << 16) | (ahc_inb(ahc, port+3) << 24)); } void ahc_outl(struct ahc_softc *ahc, u_int port, uint32_t value) { ahc_outb(ahc, port, (value) & 0xFF); ahc_outb(ahc, port+1, ((value) >> 8) & 0xFF); ahc_outb(ahc, port+2, ((value) >> 16) & 0xFF); ahc_outb(ahc, port+3, ((value) >> 24) & 0xFF); } uint64_t ahc_inq(struct ahc_softc *ahc, u_int port) { return ((ahc_inb(ahc, port)) | (ahc_inb(ahc, port+1) << 8) | (ahc_inb(ahc, port+2) << 16) | (ahc_inb(ahc, port+3) << 24) | (((uint64_t)ahc_inb(ahc, port+4)) << 32) | (((uint64_t)ahc_inb(ahc, port+5)) << 40) | (((uint64_t)ahc_inb(ahc, port+6)) << 48) | (((uint64_t)ahc_inb(ahc, port+7)) << 56)); } void ahc_outq(struct ahc_softc *ahc, u_int port, uint64_t value) { ahc_outb(ahc, port, value & 0xFF); ahc_outb(ahc, port+1, (value >> 8) & 0xFF); ahc_outb(ahc, port+2, (value >> 16) & 0xFF); ahc_outb(ahc, port+3, (value >> 24) & 0xFF); ahc_outb(ahc, port+4, (value >> 32) & 0xFF); ahc_outb(ahc, port+5, (value >> 40) & 0xFF); ahc_outb(ahc, port+6, (value >> 48) & 0xFF); ahc_outb(ahc, port+7, (value >> 56) & 0xFF); } /* * Get a free scb. If there are none, see if we can allocate a new SCB. */ struct scb * ahc_get_scb(struct ahc_softc *ahc) { struct scb *scb; if ((scb = SLIST_FIRST(&ahc->scb_data->free_scbs)) == NULL) { ahc_alloc_scbs(ahc); scb = SLIST_FIRST(&ahc->scb_data->free_scbs); if (scb == NULL) return (NULL); } SLIST_REMOVE_HEAD(&ahc->scb_data->free_scbs, links.sle); return (scb); } /* * Return an SCB resource to the free list. */ void ahc_free_scb(struct ahc_softc *ahc, struct scb *scb) { struct hardware_scb *hscb; hscb = scb->hscb; /* Clean up for the next user */ ahc->scb_data->scbindex[hscb->tag] = NULL; scb->flags = SCB_FREE; hscb->control = 0; SLIST_INSERT_HEAD(&ahc->scb_data->free_scbs, scb, links.sle); /* Notify the OSM that a resource is now available. */ ahc_platform_scb_free(ahc, scb); } struct scb * ahc_lookup_scb(struct ahc_softc *ahc, u_int tag) { struct scb* scb; scb = ahc->scb_data->scbindex[tag]; if (scb != NULL) ahc_sync_scb(ahc, scb, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); return (scb); } static void ahc_swap_with_next_hscb(struct ahc_softc *ahc, struct scb *scb) { struct hardware_scb *q_hscb; u_int saved_tag; /* * Our queuing method is a bit tricky. The card * knows in advance which HSCB to download, and we * can't disappoint it. To achieve this, the next * SCB to download is saved off in ahc->next_queued_scb. * When we are called to queue "an arbitrary scb", * we copy the contents of the incoming HSCB to the one * the sequencer knows about, swap HSCB pointers and * finally assign the SCB to the tag indexed location * in the scb_array. This makes sure that we can still * locate the correct SCB by SCB_TAG. */ q_hscb = ahc->next_queued_scb->hscb; saved_tag = q_hscb->tag; memcpy(q_hscb, scb->hscb, sizeof(*scb->hscb)); if ((scb->flags & SCB_CDB32_PTR) != 0) { q_hscb->shared_data.cdb_ptr = ahc_htole32(ahc_hscb_busaddr(ahc, q_hscb->tag) + offsetof(struct hardware_scb, cdb32)); } q_hscb->tag = saved_tag; q_hscb->next = scb->hscb->tag; /* Now swap HSCB pointers. */ ahc->next_queued_scb->hscb = scb->hscb; scb->hscb = q_hscb; /* Now define the mapping from tag to SCB in the scbindex */ ahc->scb_data->scbindex[scb->hscb->tag] = scb; } /* * Tell the sequencer about a new transaction to execute. */ void ahc_queue_scb(struct ahc_softc *ahc, struct scb *scb) { ahc_swap_with_next_hscb(ahc, scb); if (scb->hscb->tag == SCB_LIST_NULL || scb->hscb->next == SCB_LIST_NULL) panic("Attempt to queue invalid SCB tag %x:%x\n", scb->hscb->tag, scb->hscb->next); /* * Setup data "oddness". */ scb->hscb->lun &= LID; if (ahc_get_transfer_length(scb) & 0x1) scb->hscb->lun |= SCB_XFERLEN_ODD; /* * Keep a history of SCBs we've downloaded in the qinfifo. */ ahc->qinfifo[ahc->qinfifonext++] = scb->hscb->tag; /* * Make sure our data is consistent from the * perspective of the adapter. */ ahc_sync_scb(ahc, scb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); /* Tell the adapter about the newly queued SCB */ if ((ahc->features & AHC_QUEUE_REGS) != 0) { ahc_outb(ahc, HNSCB_QOFF, ahc->qinfifonext); } else { if ((ahc->features & AHC_AUTOPAUSE) == 0) ahc_pause(ahc); ahc_outb(ahc, KERNEL_QINPOS, ahc->qinfifonext); if ((ahc->features & AHC_AUTOPAUSE) == 0) ahc_unpause(ahc); } } struct scsi_sense_data * ahc_get_sense_buf(struct ahc_softc *ahc, struct scb *scb) { int offset; offset = scb - ahc->scb_data->scbarray; return (&ahc->scb_data->sense[offset]); } static uint32_t ahc_get_sense_bufaddr(struct ahc_softc *ahc, struct scb *scb) { int offset; offset = scb - ahc->scb_data->scbarray; return (ahc->scb_data->sense_busaddr + (offset * sizeof(struct scsi_sense_data))); } /************************** Interrupt Processing ******************************/ static void ahc_sync_qoutfifo(struct ahc_softc *ahc, int op) { ahc_dmamap_sync(ahc, ahc->shared_data_dmat, ahc->shared_data_dmamap, /*offset*/0, /*len*/256, op); } static void ahc_sync_tqinfifo(struct ahc_softc *ahc, int op) { #ifdef AHC_TARGET_MODE if ((ahc->flags & AHC_TARGETROLE) != 0) { ahc_dmamap_sync(ahc, ahc->shared_data_dmat, ahc->shared_data_dmamap, ahc_targetcmd_offset(ahc, 0), sizeof(struct target_cmd) * AHC_TMODE_CMDS, op); } #endif } /* * See if the firmware has posted any completed commands * into our in-core command complete fifos. */ #define AHC_RUN_QOUTFIFO 0x1 #define AHC_RUN_TQINFIFO 0x2 static u_int ahc_check_cmdcmpltqueues(struct ahc_softc *ahc) { u_int retval; retval = 0; ahc_dmamap_sync(ahc, ahc->shared_data_dmat, ahc->shared_data_dmamap, /*offset*/ahc->qoutfifonext, /*len*/1, BUS_DMASYNC_POSTREAD); if (ahc->qoutfifo[ahc->qoutfifonext] != SCB_LIST_NULL) retval |= AHC_RUN_QOUTFIFO; #ifdef AHC_TARGET_MODE if ((ahc->flags & AHC_TARGETROLE) != 0 && (ahc->flags & AHC_TQINFIFO_BLOCKED) == 0) { ahc_dmamap_sync(ahc, ahc->shared_data_dmat, ahc->shared_data_dmamap, ahc_targetcmd_offset(ahc, ahc->tqinfifofnext), /*len*/sizeof(struct target_cmd), BUS_DMASYNC_POSTREAD); if (ahc->targetcmds[ahc->tqinfifonext].cmd_valid != 0) retval |= AHC_RUN_TQINFIFO; } #endif return (retval); } /* * Catch an interrupt from the adapter */ int ahc_intr(struct ahc_softc *ahc) { u_int intstat; if ((ahc->pause & INTEN) == 0) { /* * Our interrupt is not enabled on the chip * and may be disabled for re-entrancy reasons, * so just return. This is likely just a shared * interrupt. */ return (0); } /* * Instead of directly reading the interrupt status register, * infer the cause of the interrupt by checking our in-core * completion queues. This avoids a costly PCI bus read in * most cases. */ if ((ahc->flags & (AHC_ALL_INTERRUPTS|AHC_EDGE_INTERRUPT)) == 0 && (ahc_check_cmdcmpltqueues(ahc) != 0)) intstat = CMDCMPLT; else { intstat = ahc_inb(ahc, INTSTAT); } if ((intstat & INT_PEND) == 0) { #if AHC_PCI_CONFIG > 0 if (ahc->unsolicited_ints > 500) { ahc->unsolicited_ints = 0; if ((ahc->chip & AHC_PCI) != 0 && (ahc_inb(ahc, ERROR) & PCIERRSTAT) != 0) ahc->bus_intr(ahc); } #endif ahc->unsolicited_ints++; return (0); } ahc->unsolicited_ints = 0; if (intstat & CMDCMPLT) { ahc_outb(ahc, CLRINT, CLRCMDINT); /* * Ensure that the chip sees that we've cleared * this interrupt before we walk the output fifo. * Otherwise, we may, due to posted bus writes, * clear the interrupt after we finish the scan, * and after the sequencer has added new entries * and asserted the interrupt again. */ ahc_flush_device_writes(ahc); ahc_run_qoutfifo(ahc); #ifdef AHC_TARGET_MODE if ((ahc->flags & AHC_TARGETROLE) != 0) ahc_run_tqinfifo(ahc, /*paused*/FALSE); #endif } /* * Handle statuses that may invalidate our cached * copy of INTSTAT separately. */ if (intstat == 0xFF && (ahc->features & AHC_REMOVABLE) != 0) { /* Hot eject. Do nothing */ } else if (intstat & BRKADRINT) { ahc_handle_brkadrint(ahc); } else if ((intstat & (SEQINT|SCSIINT)) != 0) { ahc_pause_bug_fix(ahc); if ((intstat & SEQINT) != 0) ahc_handle_seqint(ahc, intstat); if ((intstat & SCSIINT) != 0) ahc_handle_scsiint(ahc, intstat); } return (1); } /************************* Sequencer Execution Control ************************/ /* * Restart the sequencer program from address zero */ static void ahc_restart(struct ahc_softc *ahc) { uint8_t sblkctl; ahc_pause(ahc); /* No more pending messages. */ ahc_clear_msg_state(ahc); ahc_outb(ahc, SCSISIGO, 0); /* De-assert BSY */ ahc_outb(ahc, MSG_OUT, MSG_NOOP); /* No message to send */ ahc_outb(ahc, SXFRCTL1, ahc_inb(ahc, SXFRCTL1) & ~BITBUCKET); ahc_outb(ahc, LASTPHASE, P_BUSFREE); ahc_outb(ahc, SAVED_SCSIID, 0xFF); ahc_outb(ahc, SAVED_LUN, 0xFF); /* * Ensure that the sequencer's idea of TQINPOS * matches our own. The sequencer increments TQINPOS * only after it sees a DMA complete and a reset could * occur before the increment leaving the kernel to believe * the command arrived but the sequencer to not. */ ahc_outb(ahc, TQINPOS, ahc->tqinfifonext); /* Always allow reselection */ ahc_outb(ahc, SCSISEQ, ahc_inb(ahc, SCSISEQ_TEMPLATE) & (ENSELI|ENRSELI|ENAUTOATNP)); if ((ahc->features & AHC_CMD_CHAN) != 0) { /* Ensure that no DMA operations are in progress */ ahc_outb(ahc, CCSCBCNT, 0); ahc_outb(ahc, CCSGCTL, 0); ahc_outb(ahc, CCSCBCTL, 0); } /* * If we were in the process of DMA'ing SCB data into * an SCB, replace that SCB on the free list. This prevents * an SCB leak. */ if ((ahc_inb(ahc, SEQ_FLAGS2) & SCB_DMA) != 0) { ahc_add_curscb_to_free_list(ahc); ahc_outb(ahc, SEQ_FLAGS2, ahc_inb(ahc, SEQ_FLAGS2) & ~SCB_DMA); } /* * Clear any pending sequencer interrupt. It is no * longer relevant since we're resetting the Program * Counter. */ ahc_outb(ahc, CLRINT, CLRSEQINT); ahc_outb(ahc, MWI_RESIDUAL, 0); ahc_outb(ahc, SEQCTL, ahc->seqctl); ahc_outb(ahc, SEQADDR0, 0); ahc_outb(ahc, SEQADDR1, 0); /* * Take the LED out of diagnostic mode on PM resume, too */ sblkctl = ahc_inb(ahc, SBLKCTL); ahc_outb(ahc, SBLKCTL, (sblkctl & ~(DIAGLEDEN|DIAGLEDON))); ahc_unpause(ahc); } /************************* Input/Output Queues ********************************/ static void ahc_run_qoutfifo(struct ahc_softc *ahc) { struct scb *scb; u_int scb_index; ahc_sync_qoutfifo(ahc, BUS_DMASYNC_POSTREAD); while (ahc->qoutfifo[ahc->qoutfifonext] != SCB_LIST_NULL) { scb_index = ahc->qoutfifo[ahc->qoutfifonext]; if ((ahc->qoutfifonext & 0x03) == 0x03) { u_int modnext; /* * Clear 32bits of QOUTFIFO at a time * so that we don't clobber an incoming * byte DMA to the array on architectures * that only support 32bit load and store * operations. */ modnext = ahc->qoutfifonext & ~0x3; *((uint32_t *)(&ahc->qoutfifo[modnext])) = 0xFFFFFFFFUL; ahc_dmamap_sync(ahc, ahc->shared_data_dmat, ahc->shared_data_dmamap, /*offset*/modnext, /*len*/4, BUS_DMASYNC_PREREAD); } ahc->qoutfifonext++; scb = ahc_lookup_scb(ahc, scb_index); if (scb == NULL) { printk("%s: WARNING no command for scb %d " "(cmdcmplt)\nQOUTPOS = %d\n", ahc_name(ahc), scb_index, (ahc->qoutfifonext - 1) & 0xFF); continue; } /* * Save off the residual * if there is one. */ ahc_update_residual(ahc, scb); ahc_done(ahc, scb); } } static void ahc_run_untagged_queues(struct ahc_softc *ahc) { int i; for (i = 0; i < 16; i++) ahc_run_untagged_queue(ahc, &ahc->untagged_queues[i]); } static void ahc_run_untagged_queue(struct ahc_softc *ahc, struct scb_tailq *queue) { struct scb *scb; if (ahc->untagged_queue_lock != 0) return; if ((scb = TAILQ_FIRST(queue)) != NULL && (scb->flags & SCB_ACTIVE) == 0) { scb->flags |= SCB_ACTIVE; ahc_queue_scb(ahc, scb); } } /************************* Interrupt Handling *********************************/ static void ahc_handle_brkadrint(struct ahc_softc *ahc) { /* * We upset the sequencer :-( * Lookup the error message */ int i; int error; error = ahc_inb(ahc, ERROR); for (i = 0; error != 1 && i < num_errors; i++) error >>= 1; printk("%s: brkadrint, %s at seqaddr = 0x%x\n", ahc_name(ahc), ahc_hard_errors[i].errmesg, ahc_inb(ahc, SEQADDR0) | (ahc_inb(ahc, SEQADDR1) << 8)); ahc_dump_card_state(ahc); /* Tell everyone that this HBA is no longer available */ ahc_abort_scbs(ahc, CAM_TARGET_WILDCARD, ALL_CHANNELS, CAM_LUN_WILDCARD, SCB_LIST_NULL, ROLE_UNKNOWN, CAM_NO_HBA); /* Disable all interrupt sources by resetting the controller */ ahc_shutdown(ahc); } static void ahc_handle_seqint(struct ahc_softc *ahc, u_int intstat) { struct scb *scb; struct ahc_devinfo devinfo; ahc_fetch_devinfo(ahc, &devinfo); /* * Clear the upper byte that holds SEQINT status * codes and clear the SEQINT bit. We will unpause * the sequencer, if appropriate, after servicing * the request. */ ahc_outb(ahc, CLRINT, CLRSEQINT); switch (intstat & SEQINT_MASK) { case BAD_STATUS: { u_int scb_index; struct hardware_scb *hscb; /* * Set the default return value to 0 (don't * send sense). The sense code will change * this if needed. */ ahc_outb(ahc, RETURN_1, 0); /* * The sequencer will notify us when a command * has an error that would be of interest to * the kernel. This allows us to leave the sequencer * running in the common case of command completes * without error. The sequencer will already have * dma'd the SCB back up to us, so we can reference * the in kernel copy directly. */ scb_index = ahc_inb(ahc, SCB_TAG); scb = ahc_lookup_scb(ahc, scb_index); if (scb == NULL) { ahc_print_devinfo(ahc, &devinfo); printk("ahc_intr - referenced scb " "not valid during seqint 0x%x scb(%d)\n", intstat, scb_index); ahc_dump_card_state(ahc); panic("for safety"); goto unpause; } hscb = scb->hscb; /* Don't want to clobber the original sense code */ if ((scb->flags & SCB_SENSE) != 0) { /* * Clear the SCB_SENSE Flag and have * the sequencer do a normal command * complete. */ scb->flags &= ~SCB_SENSE; ahc_set_transaction_status(scb, CAM_AUTOSENSE_FAIL); break; } ahc_set_transaction_status(scb, CAM_SCSI_STATUS_ERROR); /* Freeze the queue until the client sees the error. */ ahc_freeze_devq(ahc, scb); ahc_freeze_scb(scb); ahc_set_scsi_status(scb, hscb->shared_data.status.scsi_status); switch (hscb->shared_data.status.scsi_status) { case SCSI_STATUS_OK: printk("%s: Interrupted for status of 0???\n", ahc_name(ahc)); break; case SCSI_STATUS_CMD_TERMINATED: case SCSI_STATUS_CHECK_COND: { struct ahc_dma_seg *sg; struct scsi_sense *sc; struct ahc_initiator_tinfo *targ_info; struct ahc_tmode_tstate *tstate; struct ahc_transinfo *tinfo; #ifdef AHC_DEBUG if (ahc_debug & AHC_SHOW_SENSE) { ahc_print_path(ahc, scb); printk("SCB %d: requests Check Status\n", scb->hscb->tag); } #endif if (ahc_perform_autosense(scb) == 0) break; targ_info = ahc_fetch_transinfo(ahc, devinfo.channel, devinfo.our_scsiid, devinfo.target, &tstate); tinfo = &targ_info->curr; sg = scb->sg_list; sc = (struct scsi_sense *)(&hscb->shared_data.cdb); /* * Save off the residual if there is one. */ ahc_update_residual(ahc, scb); #ifdef AHC_DEBUG if (ahc_debug & AHC_SHOW_SENSE) { ahc_print_path(ahc, scb); printk("Sending Sense\n"); } #endif sg->addr = ahc_get_sense_bufaddr(ahc, scb); sg->len = ahc_get_sense_bufsize(ahc, scb); sg->len |= AHC_DMA_LAST_SEG; /* Fixup byte order */ sg->addr = ahc_htole32(sg->addr); sg->len = ahc_htole32(sg->len); sc->opcode = REQUEST_SENSE; sc->byte2 = 0; if (tinfo->protocol_version <= SCSI_REV_2 && SCB_GET_LUN(scb) < 8) sc->byte2 = SCB_GET_LUN(scb) << 5; sc->unused[0] = 0; sc->unused[1] = 0; sc->length = sg->len; sc->control = 0; /* * We can't allow the target to disconnect. * This will be an untagged transaction and * having the target disconnect will make this * transaction indestinguishable from outstanding * tagged transactions. */ hscb->control = 0; /* * This request sense could be because the * the device lost power or in some other * way has lost our transfer negotiations. * Renegotiate if appropriate. Unit attention * errors will be reported before any data * phases occur. */ if (ahc_get_residual(scb) == ahc_get_transfer_length(scb)) { ahc_update_neg_request(ahc, &devinfo, tstate, targ_info, AHC_NEG_IF_NON_ASYNC); } if (tstate->auto_negotiate & devinfo.target_mask) { hscb->control |= MK_MESSAGE; scb->flags &= ~SCB_NEGOTIATE; scb->flags |= SCB_AUTO_NEGOTIATE; } hscb->cdb_len = sizeof(*sc); hscb->dataptr = sg->addr; hscb->datacnt = sg->len; hscb->sgptr = scb->sg_list_phys | SG_FULL_RESID; hscb->sgptr = ahc_htole32(hscb->sgptr); scb->sg_count = 1; scb->flags |= SCB_SENSE; ahc_qinfifo_requeue_tail(ahc, scb); ahc_outb(ahc, RETURN_1, SEND_SENSE); /* * Ensure we have enough time to actually * retrieve the sense. */ ahc_scb_timer_reset(scb, 5 * 1000000); break; } default: break; } break; } case NO_MATCH: { /* Ensure we don't leave the selection hardware on */ ahc_outb(ahc, SCSISEQ, ahc_inb(ahc, SCSISEQ) & (ENSELI|ENRSELI|ENAUTOATNP)); printk("%s:%c:%d: no active SCB for reconnecting " "target - issuing BUS DEVICE RESET\n", ahc_name(ahc), devinfo.channel, devinfo.target); printk("SAVED_SCSIID == 0x%x, SAVED_LUN == 0x%x, " "ARG_1 == 0x%x ACCUM = 0x%x\n", ahc_inb(ahc, SAVED_SCSIID), ahc_inb(ahc, SAVED_LUN), ahc_inb(ahc, ARG_1), ahc_inb(ahc, ACCUM)); printk("SEQ_FLAGS == 0x%x, SCBPTR == 0x%x, BTT == 0x%x, " "SINDEX == 0x%x\n", ahc_inb(ahc, SEQ_FLAGS), ahc_inb(ahc, SCBPTR), ahc_index_busy_tcl(ahc, BUILD_TCL(ahc_inb(ahc, SAVED_SCSIID), ahc_inb(ahc, SAVED_LUN))), ahc_inb(ahc, SINDEX)); printk("SCSIID == 0x%x, SCB_SCSIID == 0x%x, SCB_LUN == 0x%x, " "SCB_TAG == 0x%x, SCB_CONTROL == 0x%x\n", ahc_inb(ahc, SCSIID), ahc_inb(ahc, SCB_SCSIID), ahc_inb(ahc, SCB_LUN), ahc_inb(ahc, SCB_TAG), ahc_inb(ahc, SCB_CONTROL)); printk("SCSIBUSL == 0x%x, SCSISIGI == 0x%x\n", ahc_inb(ahc, SCSIBUSL), ahc_inb(ahc, SCSISIGI)); printk("SXFRCTL0 == 0x%x\n", ahc_inb(ahc, SXFRCTL0)); printk("SEQCTL == 0x%x\n", ahc_inb(ahc, SEQCTL)); ahc_dump_card_state(ahc); ahc->msgout_buf[0] = MSG_BUS_DEV_RESET; ahc->msgout_len = 1; ahc->msgout_index = 0; ahc->msg_type = MSG_TYPE_INITIATOR_MSGOUT; ahc_outb(ahc, MSG_OUT, HOST_MSG); ahc_assert_atn(ahc); break; } case SEND_REJECT: { u_int rejbyte = ahc_inb(ahc, ACCUM); printk("%s:%c:%d: Warning - unknown message received from " "target (0x%x). Rejecting\n", ahc_name(ahc), devinfo.channel, devinfo.target, rejbyte); break; } case PROTO_VIOLATION: { ahc_handle_proto_violation(ahc); break; } case IGN_WIDE_RES: ahc_handle_ign_wide_residue(ahc, &devinfo); break; case PDATA_REINIT: ahc_reinitialize_dataptrs(ahc); break; case BAD_PHASE: { u_int lastphase; lastphase = ahc_inb(ahc, LASTPHASE); printk("%s:%c:%d: unknown scsi bus phase %x, " "lastphase = 0x%x. Attempting to continue\n", ahc_name(ahc), devinfo.channel, devinfo.target, lastphase, ahc_inb(ahc, SCSISIGI)); break; } case MISSED_BUSFREE: { u_int lastphase; lastphase = ahc_inb(ahc, LASTPHASE); printk("%s:%c:%d: Missed busfree. " "Lastphase = 0x%x, Curphase = 0x%x\n", ahc_name(ahc), devinfo.channel, devinfo.target, lastphase, ahc_inb(ahc, SCSISIGI)); ahc_restart(ahc); return; } case HOST_MSG_LOOP: { /* * The sequencer has encountered a message phase * that requires host assistance for completion. * While handling the message phase(s), we will be * notified by the sequencer after each byte is * transferred so we can track bus phase changes. * * If this is the first time we've seen a HOST_MSG_LOOP * interrupt, initialize the state of the host message * loop. */ if (ahc->msg_type == MSG_TYPE_NONE) { struct scb *scb; u_int scb_index; u_int bus_phase; bus_phase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK; if (bus_phase != P_MESGIN && bus_phase != P_MESGOUT) { printk("ahc_intr: HOST_MSG_LOOP bad " "phase 0x%x\n", bus_phase); /* * Probably transitioned to bus free before * we got here. Just punt the message. */ ahc_clear_intstat(ahc); ahc_restart(ahc); return; } scb_index = ahc_inb(ahc, SCB_TAG); scb = ahc_lookup_scb(ahc, scb_index); if (devinfo.role == ROLE_INITIATOR) { if (bus_phase == P_MESGOUT) { if (scb == NULL) panic("HOST_MSG_LOOP with " "invalid SCB %x\n", scb_index); ahc_setup_initiator_msgout(ahc, &devinfo, scb); } else { ahc->msg_type = MSG_TYPE_INITIATOR_MSGIN; ahc->msgin_index = 0; } } #ifdef AHC_TARGET_MODE else { if (bus_phase == P_MESGOUT) { ahc->msg_type = MSG_TYPE_TARGET_MSGOUT; ahc->msgin_index = 0; } else ahc_setup_target_msgin(ahc, &devinfo, scb); } #endif } ahc_handle_message_phase(ahc); break; } case PERR_DETECTED: { /* * If we've cleared the parity error interrupt * but the sequencer still believes that SCSIPERR * is true, it must be that the parity error is * for the currently presented byte on the bus, * and we are not in a phase (data-in) where we will * eventually ack this byte. Ack the byte and * throw it away in the hope that the target will * take us to message out to deliver the appropriate * error message. */ if ((intstat & SCSIINT) == 0 && (ahc_inb(ahc, SSTAT1) & SCSIPERR) != 0) { if ((ahc->features & AHC_DT) == 0) { u_int curphase; /* * The hardware will only let you ack bytes * if the expected phase in SCSISIGO matches * the current phase. Make sure this is * currently the case. */ curphase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK; ahc_outb(ahc, LASTPHASE, curphase); ahc_outb(ahc, SCSISIGO, curphase); } if ((ahc_inb(ahc, SCSISIGI) & (CDI|MSGI)) == 0) { int wait; /* * In a data phase. Faster to bitbucket * the data than to individually ack each * byte. This is also the only strategy * that will work with AUTOACK enabled. */ ahc_outb(ahc, SXFRCTL1, ahc_inb(ahc, SXFRCTL1) | BITBUCKET); wait = 5000; while (--wait != 0) { if ((ahc_inb(ahc, SCSISIGI) & (CDI|MSGI)) != 0) break; ahc_delay(100); } ahc_outb(ahc, SXFRCTL1, ahc_inb(ahc, SXFRCTL1) & ~BITBUCKET); if (wait == 0) { struct scb *scb; u_int scb_index; ahc_print_devinfo(ahc, &devinfo); printk("Unable to clear parity error. " "Resetting bus.\n"); scb_index = ahc_inb(ahc, SCB_TAG); scb = ahc_lookup_scb(ahc, scb_index); if (scb != NULL) ahc_set_transaction_status(scb, CAM_UNCOR_PARITY); ahc_reset_channel(ahc, devinfo.channel, /*init reset*/TRUE); } } else { ahc_inb(ahc, SCSIDATL); } } break; } case DATA_OVERRUN: { /* * When the sequencer detects an overrun, it * places the controller in "BITBUCKET" mode * and allows the target to complete its transfer. * Unfortunately, none of the counters get updated * when the controller is in this mode, so we have * no way of knowing how large the overrun was. */ u_int scbindex = ahc_inb(ahc, SCB_TAG); u_int lastphase = ahc_inb(ahc, LASTPHASE); u_int i; scb = ahc_lookup_scb(ahc, scbindex); for (i = 0; i < num_phases; i++) { if (lastphase == ahc_phase_table[i].phase) break; } ahc_print_path(ahc, scb); printk("data overrun detected %s." " Tag == 0x%x.\n", ahc_phase_table[i].phasemsg, scb->hscb->tag); ahc_print_path(ahc, scb); printk("%s seen Data Phase. Length = %ld. NumSGs = %d.\n", ahc_inb(ahc, SEQ_FLAGS) & DPHASE ? "Have" : "Haven't", ahc_get_transfer_length(scb), scb->sg_count); if (scb->sg_count > 0) { for (i = 0; i < scb->sg_count; i++) { printk("sg[%d] - Addr 0x%x%x : Length %d\n", i, (ahc_le32toh(scb->sg_list[i].len) >> 24 & SG_HIGH_ADDR_BITS), ahc_le32toh(scb->sg_list[i].addr), ahc_le32toh(scb->sg_list[i].len) & AHC_SG_LEN_MASK); } } /* * Set this and it will take effect when the * target does a command complete. */ ahc_freeze_devq(ahc, scb); if ((scb->flags & SCB_SENSE) == 0) { ahc_set_transaction_status(scb, CAM_DATA_RUN_ERR); } else { scb->flags &= ~SCB_SENSE; ahc_set_transaction_status(scb, CAM_AUTOSENSE_FAIL); } ahc_freeze_scb(scb); if ((ahc->features & AHC_ULTRA2) != 0) { /* * Clear the channel in case we return * to data phase later. */ ahc_outb(ahc, SXFRCTL0, ahc_inb(ahc, SXFRCTL0) | CLRSTCNT|CLRCHN); ahc_outb(ahc, SXFRCTL0, ahc_inb(ahc, SXFRCTL0) | CLRSTCNT|CLRCHN); } if ((ahc->flags & AHC_39BIT_ADDRESSING) != 0) { u_int dscommand1; /* Ensure HHADDR is 0 for future DMA operations. */ dscommand1 = ahc_inb(ahc, DSCOMMAND1); ahc_outb(ahc, DSCOMMAND1, dscommand1 | HADDLDSEL0); ahc_outb(ahc, HADDR, 0); ahc_outb(ahc, DSCOMMAND1, dscommand1); } break; } case MKMSG_FAILED: { u_int scbindex; printk("%s:%c:%d:%d: Attempt to issue message failed\n", ahc_name(ahc), devinfo.channel, devinfo.target, devinfo.lun); scbindex = ahc_inb(ahc, SCB_TAG); scb = ahc_lookup_scb(ahc, scbindex); if (scb != NULL && (scb->flags & SCB_RECOVERY_SCB) != 0) /* * Ensure that we didn't put a second instance of this * SCB into the QINFIFO. */ ahc_search_qinfifo(ahc, SCB_GET_TARGET(ahc, scb), SCB_GET_CHANNEL(ahc, scb), SCB_GET_LUN(scb), scb->hscb->tag, ROLE_INITIATOR, /*status*/0, SEARCH_REMOVE); break; } case NO_FREE_SCB: { printk("%s: No free or disconnected SCBs\n", ahc_name(ahc)); ahc_dump_card_state(ahc); panic("for safety"); break; } case SCB_MISMATCH: { u_int scbptr; scbptr = ahc_inb(ahc, SCBPTR); printk("Bogus TAG after DMA. SCBPTR %d, tag %d, our tag %d\n", scbptr, ahc_inb(ahc, ARG_1), ahc->scb_data->hscbs[scbptr].tag); ahc_dump_card_state(ahc); panic("for safety"); break; } case OUT_OF_RANGE: { printk("%s: BTT calculation out of range\n", ahc_name(ahc)); printk("SAVED_SCSIID == 0x%x, SAVED_LUN == 0x%x, " "ARG_1 == 0x%x ACCUM = 0x%x\n", ahc_inb(ahc, SAVED_SCSIID), ahc_inb(ahc, SAVED_LUN), ahc_inb(ahc, ARG_1), ahc_inb(ahc, ACCUM)); printk("SEQ_FLAGS == 0x%x, SCBPTR == 0x%x, BTT == 0x%x, " "SINDEX == 0x%x\n, A == 0x%x\n", ahc_inb(ahc, SEQ_FLAGS), ahc_inb(ahc, SCBPTR), ahc_index_busy_tcl(ahc, BUILD_TCL(ahc_inb(ahc, SAVED_SCSIID), ahc_inb(ahc, SAVED_LUN))), ahc_inb(ahc, SINDEX), ahc_inb(ahc, ACCUM)); printk("SCSIID == 0x%x, SCB_SCSIID == 0x%x, SCB_LUN == 0x%x, " "SCB_TAG == 0x%x, SCB_CONTROL == 0x%x\n", ahc_inb(ahc, SCSIID), ahc_inb(ahc, SCB_SCSIID), ahc_inb(ahc, SCB_LUN), ahc_inb(ahc, SCB_TAG), ahc_inb(ahc, SCB_CONTROL)); printk("SCSIBUSL == 0x%x, SCSISIGI == 0x%x\n", ahc_inb(ahc, SCSIBUSL), ahc_inb(ahc, SCSISIGI)); ahc_dump_card_state(ahc); panic("for safety"); break; } default: printk("ahc_intr: seqint, " "intstat == 0x%x, scsisigi = 0x%x\n", intstat, ahc_inb(ahc, SCSISIGI)); break; } unpause: /* * The sequencer is paused immediately on * a SEQINT, so we should restart it when * we're done. */ ahc_unpause(ahc); } static void ahc_handle_scsiint(struct ahc_softc *ahc, u_int intstat) { u_int scb_index; u_int status0; u_int status; struct scb *scb; char cur_channel; char intr_channel; if ((ahc->features & AHC_TWIN) != 0 && ((ahc_inb(ahc, SBLKCTL) & SELBUSB) != 0)) cur_channel = 'B'; else cur_channel = 'A'; intr_channel = cur_channel; if ((ahc->features & AHC_ULTRA2) != 0) status0 = ahc_inb(ahc, SSTAT0) & IOERR; else status0 = 0; status = ahc_inb(ahc, SSTAT1) & (SELTO|SCSIRSTI|BUSFREE|SCSIPERR); if (status == 0 && status0 == 0) { if ((ahc->features & AHC_TWIN) != 0) { /* Try the other channel */ ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) ^ SELBUSB); status = ahc_inb(ahc, SSTAT1) & (SELTO|SCSIRSTI|BUSFREE|SCSIPERR); intr_channel = (cur_channel == 'A') ? 'B' : 'A'; } if (status == 0) { printk("%s: Spurious SCSI interrupt\n", ahc_name(ahc)); ahc_outb(ahc, CLRINT, CLRSCSIINT); ahc_unpause(ahc); return; } } /* Make sure the sequencer is in a safe location. */ ahc_clear_critical_section(ahc); scb_index = ahc_inb(ahc, SCB_TAG); scb = ahc_lookup_scb(ahc, scb_index); if (scb != NULL && (ahc_inb(ahc, SEQ_FLAGS) & NOT_IDENTIFIED) != 0) scb = NULL; if ((ahc->features & AHC_ULTRA2) != 0 && (status0 & IOERR) != 0) { int now_lvd; now_lvd = ahc_inb(ahc, SBLKCTL) & ENAB40; printk("%s: Transceiver State Has Changed to %s mode\n", ahc_name(ahc), now_lvd ? "LVD" : "SE"); ahc_outb(ahc, CLRSINT0, CLRIOERR); /* * When transitioning to SE mode, the reset line * glitches, triggering an arbitration bug in some * Ultra2 controllers. This bug is cleared when we * assert the reset line. Since a reset glitch has * already occurred with this transition and a * transceiver state change is handled just like * a bus reset anyway, asserting the reset line * ourselves is safe. */ ahc_reset_channel(ahc, intr_channel, /*Initiate Reset*/now_lvd == 0); } else if ((status & SCSIRSTI) != 0) { printk("%s: Someone reset channel %c\n", ahc_name(ahc), intr_channel); if (intr_channel != cur_channel) ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) ^ SELBUSB); ahc_reset_channel(ahc, intr_channel, /*Initiate Reset*/FALSE); } else if ((status & SCSIPERR) != 0) { /* * Determine the bus phase and queue an appropriate message. * SCSIPERR is latched true as soon as a parity error * occurs. If the sequencer acked the transfer that * caused the parity error and the currently presented * transfer on the bus has correct parity, SCSIPERR will * be cleared by CLRSCSIPERR. Use this to determine if * we should look at the last phase the sequencer recorded, * or the current phase presented on the bus. */ struct ahc_devinfo devinfo; u_int mesg_out; u_int curphase; u_int errorphase; u_int lastphase; u_int scsirate; u_int i; u_int sstat2; int silent; lastphase = ahc_inb(ahc, LASTPHASE); curphase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK; sstat2 = ahc_inb(ahc, SSTAT2); ahc_outb(ahc, CLRSINT1, CLRSCSIPERR); /* * For all phases save DATA, the sequencer won't * automatically ack a byte that has a parity error * in it. So the only way that the current phase * could be 'data-in' is if the parity error is for * an already acked byte in the data phase. During * synchronous data-in transfers, we may actually * ack bytes before latching the current phase in * LASTPHASE, leading to the discrepancy between * curphase and lastphase. */ if ((ahc_inb(ahc, SSTAT1) & SCSIPERR) != 0 || curphase == P_DATAIN || curphase == P_DATAIN_DT) errorphase = curphase; else errorphase = lastphase; for (i = 0; i < num_phases; i++) { if (errorphase == ahc_phase_table[i].phase) break; } mesg_out = ahc_phase_table[i].mesg_out; silent = FALSE; if (scb != NULL) { if (SCB_IS_SILENT(scb)) silent = TRUE; else ahc_print_path(ahc, scb); scb->flags |= SCB_TRANSMISSION_ERROR; } else printk("%s:%c:%d: ", ahc_name(ahc), intr_channel, SCSIID_TARGET(ahc, ahc_inb(ahc, SAVED_SCSIID))); scsirate = ahc_inb(ahc, SCSIRATE); if (silent == FALSE) { printk("parity error detected %s. " "SEQADDR(0x%x) SCSIRATE(0x%x)\n", ahc_phase_table[i].phasemsg, ahc_inw(ahc, SEQADDR0), scsirate); if ((ahc->features & AHC_DT) != 0) { if ((sstat2 & CRCVALERR) != 0) printk("\tCRC Value Mismatch\n"); if ((sstat2 & CRCENDERR) != 0) printk("\tNo terminal CRC packet " "recevied\n"); if ((sstat2 & CRCREQERR) != 0) printk("\tIllegal CRC packet " "request\n"); if ((sstat2 & DUAL_EDGE_ERR) != 0) printk("\tUnexpected %sDT Data Phase\n", (scsirate & SINGLE_EDGE) ? "" : "non-"); } } if ((ahc->features & AHC_DT) != 0 && (sstat2 & DUAL_EDGE_ERR) != 0) { /* * This error applies regardless of * data direction, so ignore the value * in the phase table. */ mesg_out = MSG_INITIATOR_DET_ERR; } /* * We've set the hardware to assert ATN if we * get a parity error on "in" phases, so all we * need to do is stuff the message buffer with * the appropriate message. "In" phases have set * mesg_out to something other than MSG_NOP. */ if (mesg_out != MSG_NOOP) { if (ahc->msg_type != MSG_TYPE_NONE) ahc->send_msg_perror = TRUE; else ahc_outb(ahc, MSG_OUT, mesg_out); } /* * Force a renegotiation with this target just in * case we are out of sync for some external reason * unknown (or unreported) by the target. */ ahc_fetch_devinfo(ahc, &devinfo); ahc_force_renegotiation(ahc, &devinfo); ahc_outb(ahc, CLRINT, CLRSCSIINT); ahc_unpause(ahc); } else if ((status & SELTO) != 0) { u_int scbptr; /* Stop the selection */ ahc_outb(ahc, SCSISEQ, 0); /* No more pending messages */ ahc_clear_msg_state(ahc); /* Clear interrupt state */ ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) & ~ENBUSFREE); ahc_outb(ahc, CLRSINT1, CLRSELTIMEO|CLRBUSFREE|CLRSCSIPERR); /* * Although the driver does not care about the * 'Selection in Progress' status bit, the busy * LED does. SELINGO is only cleared by a successful * selection, so we must manually clear it to insure * the LED turns off just incase no future successful * selections occur (e.g. no devices on the bus). */ ahc_outb(ahc, CLRSINT0, CLRSELINGO); scbptr = ahc_inb(ahc, WAITING_SCBH); ahc_outb(ahc, SCBPTR, scbptr); scb_index = ahc_inb(ahc, SCB_TAG); scb = ahc_lookup_scb(ahc, scb_index); if (scb == NULL) { printk("%s: ahc_intr - referenced scb not " "valid during SELTO scb(%d, %d)\n", ahc_name(ahc), scbptr, scb_index); ahc_dump_card_state(ahc); } else { struct ahc_devinfo devinfo; #ifdef AHC_DEBUG if ((ahc_debug & AHC_SHOW_SELTO) != 0) { ahc_print_path(ahc, scb); printk("Saw Selection Timeout for SCB 0x%x\n", scb_index); } #endif ahc_scb_devinfo(ahc, &devinfo, scb); ahc_set_transaction_status(scb, CAM_SEL_TIMEOUT); ahc_freeze_devq(ahc, scb); /* * Cancel any pending transactions on the device * now that it seems to be missing. This will * also revert us to async/narrow transfers until * we can renegotiate with the device. */ ahc_handle_devreset(ahc, &devinfo, CAM_SEL_TIMEOUT, "Selection Timeout", /*verbose_level*/1); } ahc_outb(ahc, CLRINT, CLRSCSIINT); ahc_restart(ahc); } else if ((status & BUSFREE) != 0 && (ahc_inb(ahc, SIMODE1) & ENBUSFREE) != 0) { struct ahc_devinfo devinfo; u_int lastphase; u_int saved_scsiid; u_int saved_lun; u_int target; u_int initiator_role_id; char channel; int printerror; /* * Clear our selection hardware as soon as possible. * We may have an entry in the waiting Q for this target, * that is affected by this busfree and we don't want to * go about selecting the target while we handle the event. */ ahc_outb(ahc, SCSISEQ, ahc_inb(ahc, SCSISEQ) & (ENSELI|ENRSELI|ENAUTOATNP)); /* * Disable busfree interrupts and clear the busfree * interrupt status. We do this here so that several * bus transactions occur prior to clearing the SCSIINT * latch. It can take a bit for the clearing to take effect. */ ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) & ~ENBUSFREE); ahc_outb(ahc, CLRSINT1, CLRBUSFREE|CLRSCSIPERR); /* * Look at what phase we were last in. * If its message out, chances are pretty good * that the busfree was in response to one of * our abort requests. */ lastphase = ahc_inb(ahc, LASTPHASE); saved_scsiid = ahc_inb(ahc, SAVED_SCSIID); saved_lun = ahc_inb(ahc, SAVED_LUN); target = SCSIID_TARGET(ahc, saved_scsiid); initiator_role_id = SCSIID_OUR_ID(saved_scsiid); channel = SCSIID_CHANNEL(ahc, saved_scsiid); ahc_compile_devinfo(&devinfo, initiator_role_id, target, saved_lun, channel, ROLE_INITIATOR); printerror = 1; if (lastphase == P_MESGOUT) { u_int tag; tag = SCB_LIST_NULL; if (ahc_sent_msg(ahc, AHCMSG_1B, MSG_ABORT_TAG, TRUE) || ahc_sent_msg(ahc, AHCMSG_1B, MSG_ABORT, TRUE)) { if (ahc->msgout_buf[ahc->msgout_index - 1] == MSG_ABORT_TAG) tag = scb->hscb->tag; ahc_print_path(ahc, scb); printk("SCB %d - Abort%s Completed.\n", scb->hscb->tag, tag == SCB_LIST_NULL ? "" : " Tag"); ahc_abort_scbs(ahc, target, channel, saved_lun, tag, ROLE_INITIATOR, CAM_REQ_ABORTED); printerror = 0; } else if (ahc_sent_msg(ahc, AHCMSG_1B, MSG_BUS_DEV_RESET, TRUE)) { #ifdef __FreeBSD__ /* * Don't mark the user's request for this BDR * as completing with CAM_BDR_SENT. CAM3 * specifies CAM_REQ_CMP. */ if (scb != NULL && scb->io_ctx->ccb_h.func_code== XPT_RESET_DEV && ahc_match_scb(ahc, scb, target, channel, CAM_LUN_WILDCARD, SCB_LIST_NULL, ROLE_INITIATOR)) { ahc_set_transaction_status(scb, CAM_REQ_CMP); } #endif ahc_compile_devinfo(&devinfo, initiator_role_id, target, CAM_LUN_WILDCARD, channel, ROLE_INITIATOR); ahc_handle_devreset(ahc, &devinfo, CAM_BDR_SENT, "Bus Device Reset", /*verbose_level*/0); printerror = 0; } else if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_PPR, FALSE)) { struct ahc_initiator_tinfo *tinfo; struct ahc_tmode_tstate *tstate; /* * PPR Rejected. Try non-ppr negotiation * and retry command. */ tinfo = ahc_fetch_transinfo(ahc, devinfo.channel, devinfo.our_scsiid, devinfo.target, &tstate); tinfo->curr.transport_version = 2; tinfo->goal.transport_version = 2; tinfo->goal.ppr_options = 0; ahc_qinfifo_requeue_tail(ahc, scb); printerror = 0; } else if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_WDTR, FALSE)) { /* * Negotiation Rejected. Go-narrow and * retry command. */ ahc_set_width(ahc, &devinfo, MSG_EXT_WDTR_BUS_8_BIT, AHC_TRANS_CUR|AHC_TRANS_GOAL, /*paused*/TRUE); ahc_qinfifo_requeue_tail(ahc, scb); printerror = 0; } else if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_SDTR, FALSE)) { /* * Negotiation Rejected. Go-async and * retry command. */ ahc_set_syncrate(ahc, &devinfo, /*syncrate*/NULL, /*period*/0, /*offset*/0, /*ppr_options*/0, AHC_TRANS_CUR|AHC_TRANS_GOAL, /*paused*/TRUE); ahc_qinfifo_requeue_tail(ahc, scb); printerror = 0; } } if (printerror != 0) { u_int i; if (scb != NULL) { u_int tag; if ((scb->hscb->control & TAG_ENB) != 0) tag = scb->hscb->tag; else tag = SCB_LIST_NULL; ahc_print_path(ahc, scb); ahc_abort_scbs(ahc, target, channel, SCB_GET_LUN(scb), tag, ROLE_INITIATOR, CAM_UNEXP_BUSFREE); } else { /* * We had not fully identified this connection, * so we cannot abort anything. */ printk("%s: ", ahc_name(ahc)); } for (i = 0; i < num_phases; i++) { if (lastphase == ahc_phase_table[i].phase) break; } if (lastphase != P_BUSFREE) { /* * Renegotiate with this device at the * next opportunity just in case this busfree * is due to a negotiation mismatch with the * device. */ ahc_force_renegotiation(ahc, &devinfo); } printk("Unexpected busfree %s\n" "SEQADDR == 0x%x\n", ahc_phase_table[i].phasemsg, ahc_inb(ahc, SEQADDR0) | (ahc_inb(ahc, SEQADDR1) << 8)); } ahc_outb(ahc, CLRINT, CLRSCSIINT); ahc_restart(ahc); } else { printk("%s: Missing case in ahc_handle_scsiint. status = %x\n", ahc_name(ahc), status); ahc_outb(ahc, CLRINT, CLRSCSIINT); } } /* * Force renegotiation to occur the next time we initiate * a command to the current device. */ static void ahc_force_renegotiation(struct ahc_softc *ahc, struct ahc_devinfo *devinfo) { struct ahc_initiator_tinfo *targ_info; struct ahc_tmode_tstate *tstate; targ_info = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid, devinfo->target, &tstate); ahc_update_neg_request(ahc, devinfo, tstate, targ_info, AHC_NEG_IF_NON_ASYNC); } #define AHC_MAX_STEPS 2000 static void ahc_clear_critical_section(struct ahc_softc *ahc) { int stepping; int steps; u_int simode0; u_int simode1; if (ahc->num_critical_sections == 0) return; stepping = FALSE; steps = 0; simode0 = 0; simode1 = 0; for (;;) { struct cs *cs; u_int seqaddr; u_int i; seqaddr = ahc_inb(ahc, SEQADDR0) | (ahc_inb(ahc, SEQADDR1) << 8); /* * Seqaddr represents the next instruction to execute, * so we are really executing the instruction just * before it. */ if (seqaddr != 0) seqaddr -= 1; cs = ahc->critical_sections; for (i = 0; i < ahc->num_critical_sections; i++, cs++) { if (cs->begin < seqaddr && cs->end >= seqaddr) break; } if (i == ahc->num_critical_sections) break; if (steps > AHC_MAX_STEPS) { printk("%s: Infinite loop in critical section\n", ahc_name(ahc)); ahc_dump_card_state(ahc); panic("critical section loop"); } steps++; if (stepping == FALSE) { /* * Disable all interrupt sources so that the * sequencer will not be stuck by a pausing * interrupt condition while we attempt to * leave a critical section. */ simode0 = ahc_inb(ahc, SIMODE0); ahc_outb(ahc, SIMODE0, 0); simode1 = ahc_inb(ahc, SIMODE1); if ((ahc->features & AHC_DT) != 0) /* * On DT class controllers, we * use the enhanced busfree logic. * Unfortunately we cannot re-enable * busfree detection within the * current connection, so we must * leave it on while single stepping. */ ahc_outb(ahc, SIMODE1, simode1 & ENBUSFREE); else ahc_outb(ahc, SIMODE1, 0); ahc_outb(ahc, CLRINT, CLRSCSIINT); ahc_outb(ahc, SEQCTL, ahc->seqctl | STEP); stepping = TRUE; } if ((ahc->features & AHC_DT) != 0) { ahc_outb(ahc, CLRSINT1, CLRBUSFREE); ahc_outb(ahc, CLRINT, CLRSCSIINT); } ahc_outb(ahc, HCNTRL, ahc->unpause); while (!ahc_is_paused(ahc)) ahc_delay(200); } if (stepping) { ahc_outb(ahc, SIMODE0, simode0); ahc_outb(ahc, SIMODE1, simode1); ahc_outb(ahc, SEQCTL, ahc->seqctl); } } /* * Clear any pending interrupt status. */ static void ahc_clear_intstat(struct ahc_softc *ahc) { /* Clear any interrupt conditions this may have caused */ ahc_outb(ahc, CLRSINT1, CLRSELTIMEO|CLRATNO|CLRSCSIRSTI |CLRBUSFREE|CLRSCSIPERR|CLRPHASECHG| CLRREQINIT); ahc_flush_device_writes(ahc); ahc_outb(ahc, CLRSINT0, CLRSELDO|CLRSELDI|CLRSELINGO); ahc_flush_device_writes(ahc); ahc_outb(ahc, CLRINT, CLRSCSIINT); ahc_flush_device_writes(ahc); } /**************************** Debugging Routines ******************************/ #ifdef AHC_DEBUG uint32_t ahc_debug = AHC_DEBUG_OPTS; #endif #if 0 /* unused */ static void ahc_print_scb(struct scb *scb) { int i; struct hardware_scb *hscb = scb->hscb; printk("scb:%p control:0x%x scsiid:0x%x lun:%d cdb_len:%d\n", (void *)scb, hscb->control, hscb->scsiid, hscb->lun, hscb->cdb_len); printk("Shared Data: "); for (i = 0; i < sizeof(hscb->shared_data.cdb); i++) printk("%#02x", hscb->shared_data.cdb[i]); printk(" dataptr:%#x datacnt:%#x sgptr:%#x tag:%#x\n", ahc_le32toh(hscb->dataptr), ahc_le32toh(hscb->datacnt), ahc_le32toh(hscb->sgptr), hscb->tag); if (scb->sg_count > 0) { for (i = 0; i < scb->sg_count; i++) { printk("sg[%d] - Addr 0x%x%x : Length %d\n", i, (ahc_le32toh(scb->sg_list[i].len) >> 24 & SG_HIGH_ADDR_BITS), ahc_le32toh(scb->sg_list[i].addr), ahc_le32toh(scb->sg_list[i].len)); } } } #endif /************************* Transfer Negotiation *******************************/ /* * Allocate per target mode instance (ID we respond to as a target) * transfer negotiation data structures. */ static struct ahc_tmode_tstate * ahc_alloc_tstate(struct ahc_softc *ahc, u_int scsi_id, char channel) { struct ahc_tmode_tstate *master_tstate; struct ahc_tmode_tstate *tstate; int i; master_tstate = ahc->enabled_targets[ahc->our_id]; if (channel == 'B') { scsi_id += 8; master_tstate = ahc->enabled_targets[ahc->our_id_b + 8]; } if (ahc->enabled_targets[scsi_id] != NULL && ahc->enabled_targets[scsi_id] != master_tstate) panic("%s: ahc_alloc_tstate - Target already allocated", ahc_name(ahc)); tstate = kmalloc(sizeof(*tstate), GFP_ATOMIC); if (tstate == NULL) return (NULL); /* * If we have allocated a master tstate, copy user settings from * the master tstate (taken from SRAM or the EEPROM) for this * channel, but reset our current and goal settings to async/narrow * until an initiator talks to us. */ if (master_tstate != NULL) { memcpy(tstate, master_tstate, sizeof(*tstate)); memset(tstate->enabled_luns, 0, sizeof(tstate->enabled_luns)); tstate->ultraenb = 0; for (i = 0; i < AHC_NUM_TARGETS; i++) { memset(&tstate->transinfo[i].curr, 0, sizeof(tstate->transinfo[i].curr)); memset(&tstate->transinfo[i].goal, 0, sizeof(tstate->transinfo[i].goal)); } } else memset(tstate, 0, sizeof(*tstate)); ahc->enabled_targets[scsi_id] = tstate; return (tstate); } #ifdef AHC_TARGET_MODE /* * Free per target mode instance (ID we respond to as a target) * transfer negotiation data structures. */ static void ahc_free_tstate(struct ahc_softc *ahc, u_int scsi_id, char channel, int force) { struct ahc_tmode_tstate *tstate; /* * Don't clean up our "master" tstate. * It has our default user settings. */ if (((channel == 'B' && scsi_id == ahc->our_id_b) || (channel == 'A' && scsi_id == ahc->our_id)) && force == FALSE) return; if (channel == 'B') scsi_id += 8; tstate = ahc->enabled_targets[scsi_id]; if (tstate != NULL) kfree(tstate); ahc->enabled_targets[scsi_id] = NULL; } #endif /* * Called when we have an active connection to a target on the bus, * this function finds the nearest syncrate to the input period limited * by the capabilities of the bus connectivity of and sync settings for * the target. */ const struct ahc_syncrate * ahc_devlimited_syncrate(struct ahc_softc *ahc, struct ahc_initiator_tinfo *tinfo, u_int *period, u_int *ppr_options, role_t role) { struct ahc_transinfo *transinfo; u_int maxsync; if ((ahc->features & AHC_ULTRA2) != 0) { if ((ahc_inb(ahc, SBLKCTL) & ENAB40) != 0 && (ahc_inb(ahc, SSTAT2) & EXP_ACTIVE) == 0) { maxsync = AHC_SYNCRATE_DT; } else { maxsync = AHC_SYNCRATE_ULTRA; /* Can't do DT on an SE bus */ *ppr_options &= ~MSG_EXT_PPR_DT_REQ; } } else if ((ahc->features & AHC_ULTRA) != 0) { maxsync = AHC_SYNCRATE_ULTRA; } else { maxsync = AHC_SYNCRATE_FAST; } /* * Never allow a value higher than our current goal * period otherwise we may allow a target initiated * negotiation to go above the limit as set by the * user. In the case of an initiator initiated * sync negotiation, we limit based on the user * setting. This allows the system to still accept * incoming negotiations even if target initiated * negotiation is not performed. */ if (role == ROLE_TARGET) transinfo = &tinfo->user; else transinfo = &tinfo->goal; *ppr_options &= transinfo->ppr_options; if (transinfo->width == MSG_EXT_WDTR_BUS_8_BIT) { maxsync = max(maxsync, (u_int)AHC_SYNCRATE_ULTRA2); *ppr_options &= ~MSG_EXT_PPR_DT_REQ; } if (transinfo->period == 0) { *period = 0; *ppr_options = 0; return (NULL); } *period = max(*period, (u_int)transinfo->period); return (ahc_find_syncrate(ahc, period, ppr_options, maxsync)); } /* * Look up the valid period to SCSIRATE conversion in our table. * Return the period and offset that should be sent to the target * if this was the beginning of an SDTR. */ const struct ahc_syncrate * ahc_find_syncrate(struct ahc_softc *ahc, u_int *period, u_int *ppr_options, u_int maxsync) { const struct ahc_syncrate *syncrate; if ((ahc->features & AHC_DT) == 0) *ppr_options &= ~MSG_EXT_PPR_DT_REQ; /* Skip all DT only entries if DT is not available */ if ((*ppr_options & MSG_EXT_PPR_DT_REQ) == 0 && maxsync < AHC_SYNCRATE_ULTRA2) maxsync = AHC_SYNCRATE_ULTRA2; /* Now set the maxsync based on the card capabilities * DT is already done above */ if ((ahc->features & (AHC_DT | AHC_ULTRA2)) == 0 && maxsync < AHC_SYNCRATE_ULTRA) maxsync = AHC_SYNCRATE_ULTRA; if ((ahc->features & (AHC_DT | AHC_ULTRA2 | AHC_ULTRA)) == 0 && maxsync < AHC_SYNCRATE_FAST) maxsync = AHC_SYNCRATE_FAST; for (syncrate = &ahc_syncrates[maxsync]; syncrate->rate != NULL; syncrate++) { /* * The Ultra2 table doesn't go as low * as for the Fast/Ultra cards. */ if ((ahc->features & AHC_ULTRA2) != 0 && (syncrate->sxfr_u2 == 0)) break; if (*period <= syncrate->period) { /* * When responding to a target that requests * sync, the requested rate may fall between * two rates that we can output, but still be * a rate that we can receive. Because of this, * we want to respond to the target with * the same rate that it sent to us even * if the period we use to send data to it * is lower. Only lower the response period * if we must. */ if (syncrate == &ahc_syncrates[maxsync]) *period = syncrate->period; /* * At some speeds, we only support * ST transfers. */ if ((syncrate->sxfr_u2 & ST_SXFR) != 0) *ppr_options &= ~MSG_EXT_PPR_DT_REQ; break; } } if ((*period == 0) || (syncrate->rate == NULL) || ((ahc->features & AHC_ULTRA2) != 0 && (syncrate->sxfr_u2 == 0))) { /* Use asynchronous transfers. */ *period = 0; syncrate = NULL; *ppr_options &= ~MSG_EXT_PPR_DT_REQ; } return (syncrate); } /* * Convert from an entry in our syncrate table to the SCSI equivalent * sync "period" factor. */ u_int ahc_find_period(struct ahc_softc *ahc, u_int scsirate, u_int maxsync) { const struct ahc_syncrate *syncrate; if ((ahc->features & AHC_ULTRA2) != 0) scsirate &= SXFR_ULTRA2; else scsirate &= SXFR; /* now set maxsync based on card capabilities */ if ((ahc->features & AHC_DT) == 0 && maxsync < AHC_SYNCRATE_ULTRA2) maxsync = AHC_SYNCRATE_ULTRA2; if ((ahc->features & (AHC_DT | AHC_ULTRA2)) == 0 && maxsync < AHC_SYNCRATE_ULTRA) maxsync = AHC_SYNCRATE_ULTRA; if ((ahc->features & (AHC_DT | AHC_ULTRA2 | AHC_ULTRA)) == 0 && maxsync < AHC_SYNCRATE_FAST) maxsync = AHC_SYNCRATE_FAST; syncrate = &ahc_syncrates[maxsync]; while (syncrate->rate != NULL) { if ((ahc->features & AHC_ULTRA2) != 0) { if (syncrate->sxfr_u2 == 0) break; else if (scsirate == (syncrate->sxfr_u2 & SXFR_ULTRA2)) return (syncrate->period); } else if (scsirate == (syncrate->sxfr & SXFR)) { return (syncrate->period); } syncrate++; } return (0); /* async */ } /* * Truncate the given synchronous offset to a value the * current adapter type and syncrate are capable of. */ static void ahc_validate_offset(struct ahc_softc *ahc, struct ahc_initiator_tinfo *tinfo, const struct ahc_syncrate *syncrate, u_int *offset, int wide, role_t role) { u_int maxoffset; /* Limit offset to what we can do */ if (syncrate == NULL) { maxoffset = 0; } else if ((ahc->features & AHC_ULTRA2) != 0) { maxoffset = MAX_OFFSET_ULTRA2; } else { if (wide) maxoffset = MAX_OFFSET_16BIT; else maxoffset = MAX_OFFSET_8BIT; } *offset = min(*offset, maxoffset); if (tinfo != NULL) { if (role == ROLE_TARGET) *offset = min(*offset, (u_int)tinfo->user.offset); else *offset = min(*offset, (u_int)tinfo->goal.offset); } } /* * Truncate the given transfer width parameter to a value the * current adapter type is capable of. */ static void ahc_validate_width(struct ahc_softc *ahc, struct ahc_initiator_tinfo *tinfo, u_int *bus_width, role_t role) { switch (*bus_width) { default: if (ahc->features & AHC_WIDE) { /* Respond Wide */ *bus_width = MSG_EXT_WDTR_BUS_16_BIT; break; } /* FALLTHROUGH */ case MSG_EXT_WDTR_BUS_8_BIT: *bus_width = MSG_EXT_WDTR_BUS_8_BIT; break; } if (tinfo != NULL) { if (role == ROLE_TARGET) *bus_width = min((u_int)tinfo->user.width, *bus_width); else *bus_width = min((u_int)tinfo->goal.width, *bus_width); } } /* * Update the bitmask of targets for which the controller should * negotiate with at the next convenient opportunity. This currently * means the next time we send the initial identify messages for * a new transaction. */ int ahc_update_neg_request(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, struct ahc_tmode_tstate *tstate, struct ahc_initiator_tinfo *tinfo, ahc_neg_type neg_type) { u_int auto_negotiate_orig; auto_negotiate_orig = tstate->auto_negotiate; if (neg_type == AHC_NEG_ALWAYS) { /* * Force our "current" settings to be * unknown so that unless a bus reset * occurs the need to renegotiate is * recorded persistently. */ if ((ahc->features & AHC_WIDE) != 0) tinfo->curr.width = AHC_WIDTH_UNKNOWN; tinfo->curr.period = AHC_PERIOD_UNKNOWN; tinfo->curr.offset = AHC_OFFSET_UNKNOWN; } if (tinfo->curr.period != tinfo->goal.period || tinfo->curr.width != tinfo->goal.width || tinfo->curr.offset != tinfo->goal.offset || tinfo->curr.ppr_options != tinfo->goal.ppr_options || (neg_type == AHC_NEG_IF_NON_ASYNC && (tinfo->goal.offset != 0 || tinfo->goal.width != MSG_EXT_WDTR_BUS_8_BIT || tinfo->goal.ppr_options != 0))) tstate->auto_negotiate |= devinfo->target_mask; else tstate->auto_negotiate &= ~devinfo->target_mask; return (auto_negotiate_orig != tstate->auto_negotiate); } /* * Update the user/goal/curr tables of synchronous negotiation * parameters as well as, in the case of a current or active update, * any data structures on the host controller. In the case of an * active update, the specified target is currently talking to us on * the bus, so the transfer parameter update must take effect * immediately. */ void ahc_set_syncrate(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, const struct ahc_syncrate *syncrate, u_int period, u_int offset, u_int ppr_options, u_int type, int paused) { struct ahc_initiator_tinfo *tinfo; struct ahc_tmode_tstate *tstate; u_int old_period; u_int old_offset; u_int old_ppr; int active; int update_needed; active = (type & AHC_TRANS_ACTIVE) == AHC_TRANS_ACTIVE; update_needed = 0; if (syncrate == NULL) { period = 0; offset = 0; } tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid, devinfo->target, &tstate); if ((type & AHC_TRANS_USER) != 0) { tinfo->user.period = period; tinfo->user.offset = offset; tinfo->user.ppr_options = ppr_options; } if ((type & AHC_TRANS_GOAL) != 0) { tinfo->goal.period = period; tinfo->goal.offset = offset; tinfo->goal.ppr_options = ppr_options; } old_period = tinfo->curr.period; old_offset = tinfo->curr.offset; old_ppr = tinfo->curr.ppr_options; if ((type & AHC_TRANS_CUR) != 0 && (old_period != period || old_offset != offset || old_ppr != ppr_options)) { u_int scsirate; update_needed++; scsirate = tinfo->scsirate; if ((ahc->features & AHC_ULTRA2) != 0) { scsirate &= ~(SXFR_ULTRA2|SINGLE_EDGE|ENABLE_CRC); if (syncrate != NULL) { scsirate |= syncrate->sxfr_u2; if ((ppr_options & MSG_EXT_PPR_DT_REQ) != 0) scsirate |= ENABLE_CRC; else scsirate |= SINGLE_EDGE; } } else { scsirate &= ~(SXFR|SOFS); /* * Ensure Ultra mode is set properly for * this target. */ tstate->ultraenb &= ~devinfo->target_mask; if (syncrate != NULL) { if (syncrate->sxfr & ULTRA_SXFR) { tstate->ultraenb |= devinfo->target_mask; } scsirate |= syncrate->sxfr & SXFR; scsirate |= offset & SOFS; } if (active) { u_int sxfrctl0; sxfrctl0 = ahc_inb(ahc, SXFRCTL0); sxfrctl0 &= ~FAST20; if (tstate->ultraenb & devinfo->target_mask) sxfrctl0 |= FAST20; ahc_outb(ahc, SXFRCTL0, sxfrctl0); } } if (active) { ahc_outb(ahc, SCSIRATE, scsirate); if ((ahc->features & AHC_ULTRA2) != 0) ahc_outb(ahc, SCSIOFFSET, offset); } tinfo->scsirate = scsirate; tinfo->curr.period = period; tinfo->curr.offset = offset; tinfo->curr.ppr_options = ppr_options; ahc_send_async(ahc, devinfo->channel, devinfo->target, CAM_LUN_WILDCARD, AC_TRANSFER_NEG); if (bootverbose) { if (offset != 0) { printk("%s: target %d synchronous at %sMHz%s, " "offset = 0x%x\n", ahc_name(ahc), devinfo->target, syncrate->rate, (ppr_options & MSG_EXT_PPR_DT_REQ) ? " DT" : "", offset); } else { printk("%s: target %d using " "asynchronous transfers\n", ahc_name(ahc), devinfo->target); } } } update_needed += ahc_update_neg_request(ahc, devinfo, tstate, tinfo, AHC_NEG_TO_GOAL); if (update_needed) ahc_update_pending_scbs(ahc); } /* * Update the user/goal/curr tables of wide negotiation * parameters as well as, in the case of a current or active update, * any data structures on the host controller. In the case of an * active update, the specified target is currently talking to us on * the bus, so the transfer parameter update must take effect * immediately. */ void ahc_set_width(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, u_int width, u_int type, int paused) { struct ahc_initiator_tinfo *tinfo; struct ahc_tmode_tstate *tstate; u_int oldwidth; int active; int update_needed; active = (type & AHC_TRANS_ACTIVE) == AHC_TRANS_ACTIVE; update_needed = 0; tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid, devinfo->target, &tstate); if ((type & AHC_TRANS_USER) != 0) tinfo->user.width = width; if ((type & AHC_TRANS_GOAL) != 0) tinfo->goal.width = width; oldwidth = tinfo->curr.width; if ((type & AHC_TRANS_CUR) != 0 && oldwidth != width) { u_int scsirate; update_needed++; scsirate = tinfo->scsirate; scsirate &= ~WIDEXFER; if (width == MSG_EXT_WDTR_BUS_16_BIT) scsirate |= WIDEXFER; tinfo->scsirate = scsirate; if (active) ahc_outb(ahc, SCSIRATE, scsirate); tinfo->curr.width = width; ahc_send_async(ahc, devinfo->channel, devinfo->target, CAM_LUN_WILDCARD, AC_TRANSFER_NEG); if (bootverbose) { printk("%s: target %d using %dbit transfers\n", ahc_name(ahc), devinfo->target, 8 * (0x01 << width)); } } update_needed += ahc_update_neg_request(ahc, devinfo, tstate, tinfo, AHC_NEG_TO_GOAL); if (update_needed) ahc_update_pending_scbs(ahc); } /* * Update the current state of tagged queuing for a given target. */ static void ahc_set_tags(struct ahc_softc *ahc, struct scsi_cmnd *cmd, struct ahc_devinfo *devinfo, ahc_queue_alg alg) { struct scsi_device *sdev = cmd->device; ahc_platform_set_tags(ahc, sdev, devinfo, alg); ahc_send_async(ahc, devinfo->channel, devinfo->target, devinfo->lun, AC_TRANSFER_NEG); } /* * When the transfer settings for a connection change, update any * in-transit SCBs to contain the new data so the hardware will * be set correctly during future (re)selections. */ static void ahc_update_pending_scbs(struct ahc_softc *ahc) { struct scb *pending_scb; int pending_scb_count; int i; int paused; u_int saved_scbptr; /* * Traverse the pending SCB list and ensure that all of the * SCBs there have the proper settings. */ pending_scb_count = 0; LIST_FOREACH(pending_scb, &ahc->pending_scbs, pending_links) { struct ahc_devinfo devinfo; struct hardware_scb *pending_hscb; struct ahc_initiator_tinfo *tinfo; struct ahc_tmode_tstate *tstate; ahc_scb_devinfo(ahc, &devinfo, pending_scb); tinfo = ahc_fetch_transinfo(ahc, devinfo.channel, devinfo.our_scsiid, devinfo.target, &tstate); pending_hscb = pending_scb->hscb; pending_hscb->control &= ~ULTRAENB; if ((tstate->ultraenb & devinfo.target_mask) != 0) pending_hscb->control |= ULTRAENB; pending_hscb->scsirate = tinfo->scsirate; pending_hscb->scsioffset = tinfo->curr.offset; if ((tstate->auto_negotiate & devinfo.target_mask) == 0 && (pending_scb->flags & SCB_AUTO_NEGOTIATE) != 0) { pending_scb->flags &= ~SCB_AUTO_NEGOTIATE; pending_hscb->control &= ~MK_MESSAGE; } ahc_sync_scb(ahc, pending_scb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); pending_scb_count++; } if (pending_scb_count == 0) return; if (ahc_is_paused(ahc)) { paused = 1; } else { paused = 0; ahc_pause(ahc); } saved_scbptr = ahc_inb(ahc, SCBPTR); /* Ensure that the hscbs down on the card match the new information */ for (i = 0; i < ahc->scb_data->maxhscbs; i++) { struct hardware_scb *pending_hscb; u_int control; u_int scb_tag; ahc_outb(ahc, SCBPTR, i); scb_tag = ahc_inb(ahc, SCB_TAG); pending_scb = ahc_lookup_scb(ahc, scb_tag); if (pending_scb == NULL) continue; pending_hscb = pending_scb->hscb; control = ahc_inb(ahc, SCB_CONTROL); control &= ~(ULTRAENB|MK_MESSAGE); control |= pending_hscb->control & (ULTRAENB|MK_MESSAGE); ahc_outb(ahc, SCB_CONTROL, control); ahc_outb(ahc, SCB_SCSIRATE, pending_hscb->scsirate); ahc_outb(ahc, SCB_SCSIOFFSET, pending_hscb->scsioffset); } ahc_outb(ahc, SCBPTR, saved_scbptr); if (paused == 0) ahc_unpause(ahc); } /**************************** Pathing Information *****************************/ static void ahc_fetch_devinfo(struct ahc_softc *ahc, struct ahc_devinfo *devinfo) { u_int saved_scsiid; role_t role; int our_id; if (ahc_inb(ahc, SSTAT0) & TARGET) role = ROLE_TARGET; else role = ROLE_INITIATOR; if (role == ROLE_TARGET && (ahc->features & AHC_MULTI_TID) != 0 && (ahc_inb(ahc, SEQ_FLAGS) & (CMDPHASE_PENDING|TARG_CMD_PENDING|NO_DISCONNECT)) != 0) { /* We were selected, so pull our id from TARGIDIN */ our_id = ahc_inb(ahc, TARGIDIN) & OID; } else if ((ahc->features & AHC_ULTRA2) != 0) our_id = ahc_inb(ahc, SCSIID_ULTRA2) & OID; else our_id = ahc_inb(ahc, SCSIID) & OID; saved_scsiid = ahc_inb(ahc, SAVED_SCSIID); ahc_compile_devinfo(devinfo, our_id, SCSIID_TARGET(ahc, saved_scsiid), ahc_inb(ahc, SAVED_LUN), SCSIID_CHANNEL(ahc, saved_scsiid), role); } static const struct ahc_phase_table_entry* ahc_lookup_phase_entry(int phase) { const struct ahc_phase_table_entry *entry; const struct ahc_phase_table_entry *last_entry; /* * num_phases doesn't include the default entry which * will be returned if the phase doesn't match. */ last_entry = &ahc_phase_table[num_phases]; for (entry = ahc_phase_table; entry < last_entry; entry++) { if (phase == entry->phase) break; } return (entry); } void ahc_compile_devinfo(struct ahc_devinfo *devinfo, u_int our_id, u_int target, u_int lun, char channel, role_t role) { devinfo->our_scsiid = our_id; devinfo->target = target; devinfo->lun = lun; devinfo->target_offset = target; devinfo->channel = channel; devinfo->role = role; if (channel == 'B') devinfo->target_offset += 8; devinfo->target_mask = (0x01 << devinfo->target_offset); } void ahc_print_devinfo(struct ahc_softc *ahc, struct ahc_devinfo *devinfo) { printk("%s:%c:%d:%d: ", ahc_name(ahc), devinfo->channel, devinfo->target, devinfo->lun); } static void ahc_scb_devinfo(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, struct scb *scb) { role_t role; int our_id; our_id = SCSIID_OUR_ID(scb->hscb->scsiid); role = ROLE_INITIATOR; if ((scb->flags & SCB_TARGET_SCB) != 0) role = ROLE_TARGET; ahc_compile_devinfo(devinfo, our_id, SCB_GET_TARGET(ahc, scb), SCB_GET_LUN(scb), SCB_GET_CHANNEL(ahc, scb), role); } /************************ Message Phase Processing ****************************/ static void ahc_assert_atn(struct ahc_softc *ahc) { u_int scsisigo; scsisigo = ATNO; if ((ahc->features & AHC_DT) == 0) scsisigo |= ahc_inb(ahc, SCSISIGI); ahc_outb(ahc, SCSISIGO, scsisigo); } /* * When an initiator transaction with the MK_MESSAGE flag either reconnects * or enters the initial message out phase, we are interrupted. Fill our * outgoing message buffer with the appropriate message and beging handing * the message phase(s) manually. */ static void ahc_setup_initiator_msgout(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, struct scb *scb) { /* * To facilitate adding multiple messages together, * each routine should increment the index and len * variables instead of setting them explicitly. */ ahc->msgout_index = 0; ahc->msgout_len = 0; if ((scb->flags & SCB_DEVICE_RESET) == 0 && ahc_inb(ahc, MSG_OUT) == MSG_IDENTIFYFLAG) { u_int identify_msg; identify_msg = MSG_IDENTIFYFLAG | SCB_GET_LUN(scb); if ((scb->hscb->control & DISCENB) != 0) identify_msg |= MSG_IDENTIFY_DISCFLAG; ahc->msgout_buf[ahc->msgout_index++] = identify_msg; ahc->msgout_len++; if ((scb->hscb->control & TAG_ENB) != 0) { ahc->msgout_buf[ahc->msgout_index++] = scb->hscb->control & (TAG_ENB|SCB_TAG_TYPE); ahc->msgout_buf[ahc->msgout_index++] = scb->hscb->tag; ahc->msgout_len += 2; } } if (scb->flags & SCB_DEVICE_RESET) { ahc->msgout_buf[ahc->msgout_index++] = MSG_BUS_DEV_RESET; ahc->msgout_len++; ahc_print_path(ahc, scb); printk("Bus Device Reset Message Sent\n"); /* * Clear our selection hardware in advance of * the busfree. We may have an entry in the waiting * Q for this target, and we don't want to go about * selecting while we handle the busfree and blow it * away. */ ahc_outb(ahc, SCSISEQ, (ahc_inb(ahc, SCSISEQ) & ~ENSELO)); } else if ((scb->flags & SCB_ABORT) != 0) { if ((scb->hscb->control & TAG_ENB) != 0) ahc->msgout_buf[ahc->msgout_index++] = MSG_ABORT_TAG; else ahc->msgout_buf[ahc->msgout_index++] = MSG_ABORT; ahc->msgout_len++; ahc_print_path(ahc, scb); printk("Abort%s Message Sent\n", (scb->hscb->control & TAG_ENB) != 0 ? " Tag" : ""); /* * Clear our selection hardware in advance of * the busfree. We may have an entry in the waiting * Q for this target, and we don't want to go about * selecting while we handle the busfree and blow it * away. */ ahc_outb(ahc, SCSISEQ, (ahc_inb(ahc, SCSISEQ) & ~ENSELO)); } else if ((scb->flags & (SCB_AUTO_NEGOTIATE|SCB_NEGOTIATE)) != 0) { ahc_build_transfer_msg(ahc, devinfo); } else { printk("ahc_intr: AWAITING_MSG for an SCB that " "does not have a waiting message\n"); printk("SCSIID = %x, target_mask = %x\n", scb->hscb->scsiid, devinfo->target_mask); panic("SCB = %d, SCB Control = %x, MSG_OUT = %x " "SCB flags = %x", scb->hscb->tag, scb->hscb->control, ahc_inb(ahc, MSG_OUT), scb->flags); } /* * Clear the MK_MESSAGE flag from the SCB so we aren't * asked to send this message again. */ ahc_outb(ahc, SCB_CONTROL, ahc_inb(ahc, SCB_CONTROL) & ~MK_MESSAGE); scb->hscb->control &= ~MK_MESSAGE; ahc->msgout_index = 0; ahc->msg_type = MSG_TYPE_INITIATOR_MSGOUT; } /* * Build an appropriate transfer negotiation message for the * currently active target. */ static void ahc_build_transfer_msg(struct ahc_softc *ahc, struct ahc_devinfo *devinfo) { /* * We need to initiate transfer negotiations. * If our current and goal settings are identical, * we want to renegotiate due to a check condition. */ struct ahc_initiator_tinfo *tinfo; struct ahc_tmode_tstate *tstate; const struct ahc_syncrate *rate; int dowide; int dosync; int doppr; u_int period; u_int ppr_options; u_int offset; tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid, devinfo->target, &tstate); /* * Filter our period based on the current connection. * If we can't perform DT transfers on this segment (not in LVD * mode for instance), then our decision to issue a PPR message * may change. */ period = tinfo->goal.period; offset = tinfo->goal.offset; ppr_options = tinfo->goal.ppr_options; /* Target initiated PPR is not allowed in the SCSI spec */ if (devinfo->role == ROLE_TARGET) ppr_options = 0; rate = ahc_devlimited_syncrate(ahc, tinfo, &period, &ppr_options, devinfo->role); dowide = tinfo->curr.width != tinfo->goal.width; dosync = tinfo->curr.offset != offset || tinfo->curr.period != period; /* * Only use PPR if we have options that need it, even if the device * claims to support it. There might be an expander in the way * that doesn't. */ doppr = ppr_options != 0; if (!dowide && !dosync && !doppr) { dowide = tinfo->goal.width != MSG_EXT_WDTR_BUS_8_BIT; dosync = tinfo->goal.offset != 0; } if (!dowide && !dosync && !doppr) { /* * Force async with a WDTR message if we have a wide bus, * or just issue an SDTR with a 0 offset. */ if ((ahc->features & AHC_WIDE) != 0) dowide = 1; else dosync = 1; if (bootverbose) { ahc_print_devinfo(ahc, devinfo); printk("Ensuring async\n"); } } /* Target initiated PPR is not allowed in the SCSI spec */ if (devinfo->role == ROLE_TARGET) doppr = 0; /* * Both the PPR message and SDTR message require the * goal syncrate to be limited to what the target device * is capable of handling (based on whether an LVD->SE * expander is on the bus), so combine these two cases. * Regardless, guarantee that if we are using WDTR and SDTR * messages that WDTR comes first. */ if (doppr || (dosync && !dowide)) { offset = tinfo->goal.offset; ahc_validate_offset(ahc, tinfo, rate, &offset, doppr ? tinfo->goal.width : tinfo->curr.width, devinfo->role); if (doppr) { ahc_construct_ppr(ahc, devinfo, period, offset, tinfo->goal.width, ppr_options); } else { ahc_construct_sdtr(ahc, devinfo, period, offset); } } else { ahc_construct_wdtr(ahc, devinfo, tinfo->goal.width); } } /* * Build a synchronous negotiation message in our message * buffer based on the input parameters. */ static void ahc_construct_sdtr(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, u_int period, u_int offset) { if (offset == 0) period = AHC_ASYNC_XFER_PERIOD; ahc->msgout_index += spi_populate_sync_msg( ahc->msgout_buf + ahc->msgout_index, period, offset); ahc->msgout_len += 5; if (bootverbose) { printk("(%s:%c:%d:%d): Sending SDTR period %x, offset %x\n", ahc_name(ahc), devinfo->channel, devinfo->target, devinfo->lun, period, offset); } } /* * Build a wide negotiation message in our message * buffer based on the input parameters. */ static void ahc_construct_wdtr(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, u_int bus_width) { ahc->msgout_index += spi_populate_width_msg( ahc->msgout_buf + ahc->msgout_index, bus_width); ahc->msgout_len += 4; if (bootverbose) { printk("(%s:%c:%d:%d): Sending WDTR %x\n", ahc_name(ahc), devinfo->channel, devinfo->target, devinfo->lun, bus_width); } } /* * Build a parallel protocol request message in our message * buffer based on the input parameters. */ static void ahc_construct_ppr(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, u_int period, u_int offset, u_int bus_width, u_int ppr_options) { if (offset == 0) period = AHC_ASYNC_XFER_PERIOD; ahc->msgout_index += spi_populate_ppr_msg( ahc->msgout_buf + ahc->msgout_index, period, offset, bus_width, ppr_options); ahc->msgout_len += 8; if (bootverbose) { printk("(%s:%c:%d:%d): Sending PPR bus_width %x, period %x, " "offset %x, ppr_options %x\n", ahc_name(ahc), devinfo->channel, devinfo->target, devinfo->lun, bus_width, period, offset, ppr_options); } } /* * Clear any active message state. */ static void ahc_clear_msg_state(struct ahc_softc *ahc) { ahc->msgout_len = 0; ahc->msgin_index = 0; ahc->msg_type = MSG_TYPE_NONE; if ((ahc_inb(ahc, SCSISIGI) & ATNI) != 0) { /* * The target didn't care to respond to our * message request, so clear ATN. */ ahc_outb(ahc, CLRSINT1, CLRATNO); } ahc_outb(ahc, MSG_OUT, MSG_NOOP); ahc_outb(ahc, SEQ_FLAGS2, ahc_inb(ahc, SEQ_FLAGS2) & ~TARGET_MSG_PENDING); } static void ahc_handle_proto_violation(struct ahc_softc *ahc) { struct ahc_devinfo devinfo; struct scb *scb; u_int scbid; u_int seq_flags; u_int curphase; u_int lastphase; int found; ahc_fetch_devinfo(ahc, &devinfo); scbid = ahc_inb(ahc, SCB_TAG); scb = ahc_lookup_scb(ahc, scbid); seq_flags = ahc_inb(ahc, SEQ_FLAGS); curphase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK; lastphase = ahc_inb(ahc, LASTPHASE); if ((seq_flags & NOT_IDENTIFIED) != 0) { /* * The reconnecting target either did not send an * identify message, or did, but we didn't find an SCB * to match. */ ahc_print_devinfo(ahc, &devinfo); printk("Target did not send an IDENTIFY message. " "LASTPHASE = 0x%x.\n", lastphase); scb = NULL; } else if (scb == NULL) { /* * We don't seem to have an SCB active for this * transaction. Print an error and reset the bus. */ ahc_print_devinfo(ahc, &devinfo); printk("No SCB found during protocol violation\n"); goto proto_violation_reset; } else { ahc_set_transaction_status(scb, CAM_SEQUENCE_FAIL); if ((seq_flags & NO_CDB_SENT) != 0) { ahc_print_path(ahc, scb); printk("No or incomplete CDB sent to device.\n"); } else if ((ahc_inb(ahc, SCB_CONTROL) & STATUS_RCVD) == 0) { /* * The target never bothered to provide status to * us prior to completing the command. Since we don't * know the disposition of this command, we must attempt * to abort it. Assert ATN and prepare to send an abort * message. */ ahc_print_path(ahc, scb); printk("Completed command without status.\n"); } else { ahc_print_path(ahc, scb); printk("Unknown protocol violation.\n"); ahc_dump_card_state(ahc); } } if ((lastphase & ~P_DATAIN_DT) == 0 || lastphase == P_COMMAND) { proto_violation_reset: /* * Target either went directly to data/command * phase or didn't respond to our ATN. * The only safe thing to do is to blow * it away with a bus reset. */ found = ahc_reset_channel(ahc, 'A', TRUE); printk("%s: Issued Channel %c Bus Reset. " "%d SCBs aborted\n", ahc_name(ahc), 'A', found); } else { /* * Leave the selection hardware off in case * this abort attempt will affect yet to * be sent commands. */ ahc_outb(ahc, SCSISEQ, ahc_inb(ahc, SCSISEQ) & ~ENSELO); ahc_assert_atn(ahc); ahc_outb(ahc, MSG_OUT, HOST_MSG); if (scb == NULL) { ahc_print_devinfo(ahc, &devinfo); ahc->msgout_buf[0] = MSG_ABORT_TASK; ahc->msgout_len = 1; ahc->msgout_index = 0; ahc->msg_type = MSG_TYPE_INITIATOR_MSGOUT; } else { ahc_print_path(ahc, scb); scb->flags |= SCB_ABORT; } printk("Protocol violation %s. Attempting to abort.\n", ahc_lookup_phase_entry(curphase)->phasemsg); } } /* * Manual message loop handler. */ static void ahc_handle_message_phase(struct ahc_softc *ahc) { struct ahc_devinfo devinfo; u_int bus_phase; int end_session; ahc_fetch_devinfo(ahc, &devinfo); end_session = FALSE; bus_phase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK; reswitch: switch (ahc->msg_type) { case MSG_TYPE_INITIATOR_MSGOUT: { int lastbyte; int phasemis; int msgdone; if (ahc->msgout_len == 0) panic("HOST_MSG_LOOP interrupt with no active message"); #ifdef AHC_DEBUG if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) { ahc_print_devinfo(ahc, &devinfo); printk("INITIATOR_MSG_OUT"); } #endif phasemis = bus_phase != P_MESGOUT; if (phasemis) { #ifdef AHC_DEBUG if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) { printk(" PHASEMIS %s\n", ahc_lookup_phase_entry(bus_phase) ->phasemsg); } #endif if (bus_phase == P_MESGIN) { /* * Change gears and see if * this messages is of interest to * us or should be passed back to * the sequencer. */ ahc_outb(ahc, CLRSINT1, CLRATNO); ahc->send_msg_perror = FALSE; ahc->msg_type = MSG_TYPE_INITIATOR_MSGIN; ahc->msgin_index = 0; goto reswitch; } end_session = TRUE; break; } if (ahc->send_msg_perror) { ahc_outb(ahc, CLRSINT1, CLRATNO); ahc_outb(ahc, CLRSINT1, CLRREQINIT); #ifdef AHC_DEBUG if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) printk(" byte 0x%x\n", ahc->send_msg_perror); #endif ahc_outb(ahc, SCSIDATL, MSG_PARITY_ERROR); break; } msgdone = ahc->msgout_index == ahc->msgout_len; if (msgdone) { /* * The target has requested a retry. * Re-assert ATN, reset our message index to * 0, and try again. */ ahc->msgout_index = 0; ahc_assert_atn(ahc); } lastbyte = ahc->msgout_index == (ahc->msgout_len - 1); if (lastbyte) { /* Last byte is signified by dropping ATN */ ahc_outb(ahc, CLRSINT1, CLRATNO); } /* * Clear our interrupt status and present * the next byte on the bus. */ ahc_outb(ahc, CLRSINT1, CLRREQINIT); #ifdef AHC_DEBUG if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) printk(" byte 0x%x\n", ahc->msgout_buf[ahc->msgout_index]); #endif ahc_outb(ahc, SCSIDATL, ahc->msgout_buf[ahc->msgout_index++]); break; } case MSG_TYPE_INITIATOR_MSGIN: { int phasemis; int message_done; #ifdef AHC_DEBUG if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) { ahc_print_devinfo(ahc, &devinfo); printk("INITIATOR_MSG_IN"); } #endif phasemis = bus_phase != P_MESGIN; if (phasemis) { #ifdef AHC_DEBUG if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) { printk(" PHASEMIS %s\n", ahc_lookup_phase_entry(bus_phase) ->phasemsg); } #endif ahc->msgin_index = 0; if (bus_phase == P_MESGOUT && (ahc->send_msg_perror == TRUE || (ahc->msgout_len != 0 && ahc->msgout_index == 0))) { ahc->msg_type = MSG_TYPE_INITIATOR_MSGOUT; goto reswitch; } end_session = TRUE; break; } /* Pull the byte in without acking it */ ahc->msgin_buf[ahc->msgin_index] = ahc_inb(ahc, SCSIBUSL); #ifdef AHC_DEBUG if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) printk(" byte 0x%x\n", ahc->msgin_buf[ahc->msgin_index]); #endif message_done = ahc_parse_msg(ahc, &devinfo); if (message_done) { /* * Clear our incoming message buffer in case there * is another message following this one. */ ahc->msgin_index = 0; /* * If this message illicited a response, * assert ATN so the target takes us to the * message out phase. */ if (ahc->msgout_len != 0) { #ifdef AHC_DEBUG if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) { ahc_print_devinfo(ahc, &devinfo); printk("Asserting ATN for response\n"); } #endif ahc_assert_atn(ahc); } } else ahc->msgin_index++; if (message_done == MSGLOOP_TERMINATED) { end_session = TRUE; } else { /* Ack the byte */ ahc_outb(ahc, CLRSINT1, CLRREQINIT); ahc_inb(ahc, SCSIDATL); } break; } case MSG_TYPE_TARGET_MSGIN: { int msgdone; int msgout_request; if (ahc->msgout_len == 0) panic("Target MSGIN with no active message"); /* * If we interrupted a mesgout session, the initiator * will not know this until our first REQ. So, we * only honor mesgout requests after we've sent our * first byte. */ if ((ahc_inb(ahc, SCSISIGI) & ATNI) != 0 && ahc->msgout_index > 0) msgout_request = TRUE; else msgout_request = FALSE; if (msgout_request) { /* * Change gears and see if * this messages is of interest to * us or should be passed back to * the sequencer. */ ahc->msg_type = MSG_TYPE_TARGET_MSGOUT; ahc_outb(ahc, SCSISIGO, P_MESGOUT | BSYO); ahc->msgin_index = 0; /* Dummy read to REQ for first byte */ ahc_inb(ahc, SCSIDATL); ahc_outb(ahc, SXFRCTL0, ahc_inb(ahc, SXFRCTL0) | SPIOEN); break; } msgdone = ahc->msgout_index == ahc->msgout_len; if (msgdone) { ahc_outb(ahc, SXFRCTL0, ahc_inb(ahc, SXFRCTL0) & ~SPIOEN); end_session = TRUE; break; } /* * Present the next byte on the bus. */ ahc_outb(ahc, SXFRCTL0, ahc_inb(ahc, SXFRCTL0) | SPIOEN); ahc_outb(ahc, SCSIDATL, ahc->msgout_buf[ahc->msgout_index++]); break; } case MSG_TYPE_TARGET_MSGOUT: { int lastbyte; int msgdone; /* * The initiator signals that this is * the last byte by dropping ATN. */ lastbyte = (ahc_inb(ahc, SCSISIGI) & ATNI) == 0; /* * Read the latched byte, but turn off SPIOEN first * so that we don't inadvertently cause a REQ for the * next byte. */ ahc_outb(ahc, SXFRCTL0, ahc_inb(ahc, SXFRCTL0) & ~SPIOEN); ahc->msgin_buf[ahc->msgin_index] = ahc_inb(ahc, SCSIDATL); msgdone = ahc_parse_msg(ahc, &devinfo); if (msgdone == MSGLOOP_TERMINATED) { /* * The message is *really* done in that it caused * us to go to bus free. The sequencer has already * been reset at this point, so pull the ejection * handle. */ return; } ahc->msgin_index++; /* * XXX Read spec about initiator dropping ATN too soon * and use msgdone to detect it. */ if (msgdone == MSGLOOP_MSGCOMPLETE) { ahc->msgin_index = 0; /* * If this message illicited a response, transition * to the Message in phase and send it. */ if (ahc->msgout_len != 0) { ahc_outb(ahc, SCSISIGO, P_MESGIN | BSYO); ahc_outb(ahc, SXFRCTL0, ahc_inb(ahc, SXFRCTL0) | SPIOEN); ahc->msg_type = MSG_TYPE_TARGET_MSGIN; ahc->msgin_index = 0; break; } } if (lastbyte) end_session = TRUE; else { /* Ask for the next byte. */ ahc_outb(ahc, SXFRCTL0, ahc_inb(ahc, SXFRCTL0) | SPIOEN); } break; } default: panic("Unknown REQINIT message type"); } if (end_session) { ahc_clear_msg_state(ahc); ahc_outb(ahc, RETURN_1, EXIT_MSG_LOOP); } else ahc_outb(ahc, RETURN_1, CONT_MSG_LOOP); } /* * See if we sent a particular extended message to the target. * If "full" is true, return true only if the target saw the full * message. If "full" is false, return true if the target saw at * least the first byte of the message. */ static int ahc_sent_msg(struct ahc_softc *ahc, ahc_msgtype type, u_int msgval, int full) { int found; u_int index; found = FALSE; index = 0; while (index < ahc->msgout_len) { if (ahc->msgout_buf[index] == MSG_EXTENDED) { u_int end_index; end_index = index + 1 + ahc->msgout_buf[index + 1]; if (ahc->msgout_buf[index+2] == msgval && type == AHCMSG_EXT) { if (full) { if (ahc->msgout_index > end_index) found = TRUE; } else if (ahc->msgout_index > index) found = TRUE; } index = end_index; } else if (ahc->msgout_buf[index] >= MSG_SIMPLE_TASK && ahc->msgout_buf[index] <= MSG_IGN_WIDE_RESIDUE) { /* Skip tag type and tag id or residue param*/ index += 2; } else { /* Single byte message */ if (type == AHCMSG_1B && ahc->msgout_buf[index] == msgval && ahc->msgout_index > index) found = TRUE; index++; } if (found) break; } return (found); } /* * Wait for a complete incoming message, parse it, and respond accordingly. */ static int ahc_parse_msg(struct ahc_softc *ahc, struct ahc_devinfo *devinfo) { struct ahc_initiator_tinfo *tinfo; struct ahc_tmode_tstate *tstate; int reject; int done; int response; u_int targ_scsirate; done = MSGLOOP_IN_PROG; response = FALSE; reject = FALSE; tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid, devinfo->target, &tstate); targ_scsirate = tinfo->scsirate; /* * Parse as much of the message as is available, * rejecting it if we don't support it. When * the entire message is available and has been * handled, return MSGLOOP_MSGCOMPLETE, indicating * that we have parsed an entire message. * * In the case of extended messages, we accept the length * byte outright and perform more checking once we know the * extended message type. */ switch (ahc->msgin_buf[0]) { case MSG_DISCONNECT: case MSG_SAVEDATAPOINTER: case MSG_CMDCOMPLETE: case MSG_RESTOREPOINTERS: case MSG_IGN_WIDE_RESIDUE: /* * End our message loop as these are messages * the sequencer handles on its own. */ done = MSGLOOP_TERMINATED; break; case MSG_MESSAGE_REJECT: response = ahc_handle_msg_reject(ahc, devinfo); /* FALLTHROUGH */ case MSG_NOOP: done = MSGLOOP_MSGCOMPLETE; break; case MSG_EXTENDED: { /* Wait for enough of the message to begin validation */ if (ahc->msgin_index < 2) break; switch (ahc->msgin_buf[2]) { case MSG_EXT_SDTR: { const struct ahc_syncrate *syncrate; u_int period; u_int ppr_options; u_int offset; u_int saved_offset; if (ahc->msgin_buf[1] != MSG_EXT_SDTR_LEN) { reject = TRUE; break; } /* * Wait until we have both args before validating * and acting on this message. * * Add one to MSG_EXT_SDTR_LEN to account for * the extended message preamble. */ if (ahc->msgin_index < (MSG_EXT_SDTR_LEN + 1)) break; period = ahc->msgin_buf[3]; ppr_options = 0; saved_offset = offset = ahc->msgin_buf[4]; syncrate = ahc_devlimited_syncrate(ahc, tinfo, &period, &ppr_options, devinfo->role); ahc_validate_offset(ahc, tinfo, syncrate, &offset, targ_scsirate & WIDEXFER, devinfo->role); if (bootverbose) { printk("(%s:%c:%d:%d): Received " "SDTR period %x, offset %x\n\t" "Filtered to period %x, offset %x\n", ahc_name(ahc), devinfo->channel, devinfo->target, devinfo->lun, ahc->msgin_buf[3], saved_offset, period, offset); } ahc_set_syncrate(ahc, devinfo, syncrate, period, offset, ppr_options, AHC_TRANS_ACTIVE|AHC_TRANS_GOAL, /*paused*/TRUE); /* * See if we initiated Sync Negotiation * and didn't have to fall down to async * transfers. */ if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_SDTR, TRUE)) { /* We started it */ if (saved_offset != offset) { /* Went too low - force async */ reject = TRUE; } } else { /* * Send our own SDTR in reply */ if (bootverbose && devinfo->role == ROLE_INITIATOR) { printk("(%s:%c:%d:%d): Target " "Initiated SDTR\n", ahc_name(ahc), devinfo->channel, devinfo->target, devinfo->lun); } ahc->msgout_index = 0; ahc->msgout_len = 0; ahc_construct_sdtr(ahc, devinfo, period, offset); ahc->msgout_index = 0; response = TRUE; } done = MSGLOOP_MSGCOMPLETE; break; } case MSG_EXT_WDTR: { u_int bus_width; u_int saved_width; u_int sending_reply; sending_reply = FALSE; if (ahc->msgin_buf[1] != MSG_EXT_WDTR_LEN) { reject = TRUE; break; } /* * Wait until we have our arg before validating * and acting on this message. * * Add one to MSG_EXT_WDTR_LEN to account for * the extended message preamble. */ if (ahc->msgin_index < (MSG_EXT_WDTR_LEN + 1)) break; bus_width = ahc->msgin_buf[3]; saved_width = bus_width; ahc_validate_width(ahc, tinfo, &bus_width, devinfo->role); if (bootverbose) { printk("(%s:%c:%d:%d): Received WDTR " "%x filtered to %x\n", ahc_name(ahc), devinfo->channel, devinfo->target, devinfo->lun, saved_width, bus_width); } if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_WDTR, TRUE)) { /* * Don't send a WDTR back to the * target, since we asked first. * If the width went higher than our * request, reject it. */ if (saved_width > bus_width) { reject = TRUE; printk("(%s:%c:%d:%d): requested %dBit " "transfers. Rejecting...\n", ahc_name(ahc), devinfo->channel, devinfo->target, devinfo->lun, 8 * (0x01 << bus_width)); bus_width = 0; } } else { /* * Send our own WDTR in reply */ if (bootverbose && devinfo->role == ROLE_INITIATOR) { printk("(%s:%c:%d:%d): Target " "Initiated WDTR\n", ahc_name(ahc), devinfo->channel, devinfo->target, devinfo->lun); } ahc->msgout_index = 0; ahc->msgout_len = 0; ahc_construct_wdtr(ahc, devinfo, bus_width); ahc->msgout_index = 0; response = TRUE; sending_reply = TRUE; } /* * After a wide message, we are async, but * some devices don't seem to honor this portion * of the spec. Force a renegotiation of the * sync component of our transfer agreement even * if our goal is async. By updating our width * after forcing the negotiation, we avoid * renegotiating for width. */ ahc_update_neg_request(ahc, devinfo, tstate, tinfo, AHC_NEG_ALWAYS); ahc_set_width(ahc, devinfo, bus_width, AHC_TRANS_ACTIVE|AHC_TRANS_GOAL, /*paused*/TRUE); if (sending_reply == FALSE && reject == FALSE) { /* * We will always have an SDTR to send. */ ahc->msgout_index = 0; ahc->msgout_len = 0; ahc_build_transfer_msg(ahc, devinfo); ahc->msgout_index = 0; response = TRUE; } done = MSGLOOP_MSGCOMPLETE; break; } case MSG_EXT_PPR: { const struct ahc_syncrate *syncrate; u_int period; u_int offset; u_int bus_width; u_int ppr_options; u_int saved_width; u_int saved_offset; u_int saved_ppr_options; if (ahc->msgin_buf[1] != MSG_EXT_PPR_LEN) { reject = TRUE; break; } /* * Wait until we have all args before validating * and acting on this message. * * Add one to MSG_EXT_PPR_LEN to account for * the extended message preamble. */ if (ahc->msgin_index < (MSG_EXT_PPR_LEN + 1)) break; period = ahc->msgin_buf[3]; offset = ahc->msgin_buf[5]; bus_width = ahc->msgin_buf[6]; saved_width = bus_width; ppr_options = ahc->msgin_buf[7]; /* * According to the spec, a DT only * period factor with no DT option * set implies async. */ if ((ppr_options & MSG_EXT_PPR_DT_REQ) == 0 && period == 9) offset = 0; saved_ppr_options = ppr_options; saved_offset = offset; /* * Mask out any options we don't support * on any controller. Transfer options are * only available if we are negotiating wide. */ ppr_options &= MSG_EXT_PPR_DT_REQ; if (bus_width == 0) ppr_options = 0; ahc_validate_width(ahc, tinfo, &bus_width, devinfo->role); syncrate = ahc_devlimited_syncrate(ahc, tinfo, &period, &ppr_options, devinfo->role); ahc_validate_offset(ahc, tinfo, syncrate, &offset, bus_width, devinfo->role); if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_PPR, TRUE)) { /* * If we are unable to do any of the * requested options (we went too low), * then we'll have to reject the message. */ if (saved_width > bus_width || saved_offset != offset || saved_ppr_options != ppr_options) { reject = TRUE; period = 0; offset = 0; bus_width = 0; ppr_options = 0; syncrate = NULL; } } else { if (devinfo->role != ROLE_TARGET) printk("(%s:%c:%d:%d): Target " "Initiated PPR\n", ahc_name(ahc), devinfo->channel, devinfo->target, devinfo->lun); else printk("(%s:%c:%d:%d): Initiator " "Initiated PPR\n", ahc_name(ahc), devinfo->channel, devinfo->target, devinfo->lun); ahc->msgout_index = 0; ahc->msgout_len = 0; ahc_construct_ppr(ahc, devinfo, period, offset, bus_width, ppr_options); ahc->msgout_index = 0; response = TRUE; } if (bootverbose) { printk("(%s:%c:%d:%d): Received PPR width %x, " "period %x, offset %x,options %x\n" "\tFiltered to width %x, period %x, " "offset %x, options %x\n", ahc_name(ahc), devinfo->channel, devinfo->target, devinfo->lun, saved_width, ahc->msgin_buf[3], saved_offset, saved_ppr_options, bus_width, period, offset, ppr_options); } ahc_set_width(ahc, devinfo, bus_width, AHC_TRANS_ACTIVE|AHC_TRANS_GOAL, /*paused*/TRUE); ahc_set_syncrate(ahc, devinfo, syncrate, period, offset, ppr_options, AHC_TRANS_ACTIVE|AHC_TRANS_GOAL, /*paused*/TRUE); done = MSGLOOP_MSGCOMPLETE; break; } default: /* Unknown extended message. Reject it. */ reject = TRUE; break; } break; } #ifdef AHC_TARGET_MODE case MSG_BUS_DEV_RESET: ahc_handle_devreset(ahc, devinfo, CAM_BDR_SENT, "Bus Device Reset Received", /*verbose_level*/0); ahc_restart(ahc); done = MSGLOOP_TERMINATED; break; case MSG_ABORT_TAG: case MSG_ABORT: case MSG_CLEAR_QUEUE: { int tag; /* Target mode messages */ if (devinfo->role != ROLE_TARGET) { reject = TRUE; break; } tag = SCB_LIST_NULL; if (ahc->msgin_buf[0] == MSG_ABORT_TAG) tag = ahc_inb(ahc, INITIATOR_TAG); ahc_abort_scbs(ahc, devinfo->target, devinfo->channel, devinfo->lun, tag, ROLE_TARGET, CAM_REQ_ABORTED); tstate = ahc->enabled_targets[devinfo->our_scsiid]; if (tstate != NULL) { struct ahc_tmode_lstate* lstate; lstate = tstate->enabled_luns[devinfo->lun]; if (lstate != NULL) { ahc_queue_lstate_event(ahc, lstate, devinfo->our_scsiid, ahc->msgin_buf[0], /*arg*/tag); ahc_send_lstate_events(ahc, lstate); } } ahc_restart(ahc); done = MSGLOOP_TERMINATED; break; } #endif case MSG_TERM_IO_PROC: default: reject = TRUE; break; } if (reject) { /* * Setup to reject the message. */ ahc->msgout_index = 0; ahc->msgout_len = 1; ahc->msgout_buf[0] = MSG_MESSAGE_REJECT; done = MSGLOOP_MSGCOMPLETE; response = TRUE; } if (done != MSGLOOP_IN_PROG && !response) /* Clear the outgoing message buffer */ ahc->msgout_len = 0; return (done); } /* * Process a message reject message. */ static int ahc_handle_msg_reject(struct ahc_softc *ahc, struct ahc_devinfo *devinfo) { /* * What we care about here is if we had an * outstanding SDTR or WDTR message for this * target. If we did, this is a signal that * the target is refusing negotiation. */ struct scb *scb; struct ahc_initiator_tinfo *tinfo; struct ahc_tmode_tstate *tstate; u_int scb_index; u_int last_msg; int response = 0; scb_index = ahc_inb(ahc, SCB_TAG); scb = ahc_lookup_scb(ahc, scb_index); tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid, devinfo->target, &tstate); /* Might be necessary */ last_msg = ahc_inb(ahc, LAST_MSG); if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_PPR, /*full*/FALSE)) { /* * Target does not support the PPR message. * Attempt to negotiate SPI-2 style. */ if (bootverbose) { printk("(%s:%c:%d:%d): PPR Rejected. " "Trying WDTR/SDTR\n", ahc_name(ahc), devinfo->channel, devinfo->target, devinfo->lun); } tinfo->goal.ppr_options = 0; tinfo->curr.transport_version = 2; tinfo->goal.transport_version = 2; ahc->msgout_index = 0; ahc->msgout_len = 0; ahc_build_transfer_msg(ahc, devinfo); ahc->msgout_index = 0; response = 1; } else if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_WDTR, /*full*/FALSE)) { /* note 8bit xfers */ printk("(%s:%c:%d:%d): refuses WIDE negotiation. Using " "8bit transfers\n", ahc_name(ahc), devinfo->channel, devinfo->target, devinfo->lun); ahc_set_width(ahc, devinfo, MSG_EXT_WDTR_BUS_8_BIT, AHC_TRANS_ACTIVE|AHC_TRANS_GOAL, /*paused*/TRUE); /* * No need to clear the sync rate. If the target * did not accept the command, our syncrate is * unaffected. If the target started the negotiation, * but rejected our response, we already cleared the * sync rate before sending our WDTR. */ if (tinfo->goal.offset != tinfo->curr.offset) { /* Start the sync negotiation */ ahc->msgout_index = 0; ahc->msgout_len = 0; ahc_build_transfer_msg(ahc, devinfo); ahc->msgout_index = 0; response = 1; } } else if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_SDTR, /*full*/FALSE)) { /* note asynch xfers and clear flag */ ahc_set_syncrate(ahc, devinfo, /*syncrate*/NULL, /*period*/0, /*offset*/0, /*ppr_options*/0, AHC_TRANS_ACTIVE|AHC_TRANS_GOAL, /*paused*/TRUE); printk("(%s:%c:%d:%d): refuses synchronous negotiation. " "Using asynchronous transfers\n", ahc_name(ahc), devinfo->channel, devinfo->target, devinfo->lun); } else if ((scb->hscb->control & MSG_SIMPLE_TASK) != 0) { int tag_type; int mask; tag_type = (scb->hscb->control & MSG_SIMPLE_TASK); if (tag_type == MSG_SIMPLE_TASK) { printk("(%s:%c:%d:%d): refuses tagged commands. " "Performing non-tagged I/O\n", ahc_name(ahc), devinfo->channel, devinfo->target, devinfo->lun); ahc_set_tags(ahc, scb->io_ctx, devinfo, AHC_QUEUE_NONE); mask = ~0x23; } else { printk("(%s:%c:%d:%d): refuses %s tagged commands. " "Performing simple queue tagged I/O only\n", ahc_name(ahc), devinfo->channel, devinfo->target, devinfo->lun, tag_type == MSG_ORDERED_TASK ? "ordered" : "head of queue"); ahc_set_tags(ahc, scb->io_ctx, devinfo, AHC_QUEUE_BASIC); mask = ~0x03; } /* * Resend the identify for this CCB as the target * may believe that the selection is invalid otherwise. */ ahc_outb(ahc, SCB_CONTROL, ahc_inb(ahc, SCB_CONTROL) & mask); scb->hscb->control &= mask; ahc_set_transaction_tag(scb, /*enabled*/FALSE, /*type*/MSG_SIMPLE_TASK); ahc_outb(ahc, MSG_OUT, MSG_IDENTIFYFLAG); ahc_assert_atn(ahc); /* * This transaction is now at the head of * the untagged queue for this target. */ if ((ahc->flags & AHC_SCB_BTT) == 0) { struct scb_tailq *untagged_q; untagged_q = &(ahc->untagged_queues[devinfo->target_offset]); TAILQ_INSERT_HEAD(untagged_q, scb, links.tqe); scb->flags |= SCB_UNTAGGEDQ; } ahc_busy_tcl(ahc, BUILD_TCL(scb->hscb->scsiid, devinfo->lun), scb->hscb->tag); /* * Requeue all tagged commands for this target * currently in our possession so they can be * converted to untagged commands. */ ahc_search_qinfifo(ahc, SCB_GET_TARGET(ahc, scb), SCB_GET_CHANNEL(ahc, scb), SCB_GET_LUN(scb), /*tag*/SCB_LIST_NULL, ROLE_INITIATOR, CAM_REQUEUE_REQ, SEARCH_COMPLETE); } else { /* * Otherwise, we ignore it. */ printk("%s:%c:%d: Message reject for %x -- ignored\n", ahc_name(ahc), devinfo->channel, devinfo->target, last_msg); } return (response); } /* * Process an ingnore wide residue message. */ static void ahc_handle_ign_wide_residue(struct ahc_softc *ahc, struct ahc_devinfo *devinfo) { u_int scb_index; struct scb *scb; scb_index = ahc_inb(ahc, SCB_TAG); scb = ahc_lookup_scb(ahc, scb_index); /* * XXX Actually check data direction in the sequencer? * Perhaps add datadir to some spare bits in the hscb? */ if ((ahc_inb(ahc, SEQ_FLAGS) & DPHASE) == 0 || ahc_get_transfer_dir(scb) != CAM_DIR_IN) { /* * Ignore the message if we haven't * seen an appropriate data phase yet. */ } else { /* * If the residual occurred on the last * transfer and the transfer request was * expected to end on an odd count, do * nothing. Otherwise, subtract a byte * and update the residual count accordingly. */ uint32_t sgptr; sgptr = ahc_inb(ahc, SCB_RESIDUAL_SGPTR); if ((sgptr & SG_LIST_NULL) != 0 && (ahc_inb(ahc, SCB_LUN) & SCB_XFERLEN_ODD) != 0) { /* * If the residual occurred on the last * transfer and the transfer request was * expected to end on an odd count, do * nothing. */ } else { struct ahc_dma_seg *sg; uint32_t data_cnt; uint32_t data_addr; uint32_t sglen; /* Pull in all of the sgptr */ sgptr = ahc_inl(ahc, SCB_RESIDUAL_SGPTR); data_cnt = ahc_inl(ahc, SCB_RESIDUAL_DATACNT); if ((sgptr & SG_LIST_NULL) != 0) { /* * The residual data count is not updated * for the command run to completion case. * Explicitly zero the count. */ data_cnt &= ~AHC_SG_LEN_MASK; } data_addr = ahc_inl(ahc, SHADDR); data_cnt += 1; data_addr -= 1; sgptr &= SG_PTR_MASK; sg = ahc_sg_bus_to_virt(scb, sgptr); /* * The residual sg ptr points to the next S/G * to load so we must go back one. */ sg--; sglen = ahc_le32toh(sg->len) & AHC_SG_LEN_MASK; if (sg != scb->sg_list && sglen < (data_cnt & AHC_SG_LEN_MASK)) { sg--; sglen = ahc_le32toh(sg->len); /* * Preserve High Address and SG_LIST bits * while setting the count to 1. */ data_cnt = 1 | (sglen & (~AHC_SG_LEN_MASK)); data_addr = ahc_le32toh(sg->addr) + (sglen & AHC_SG_LEN_MASK) - 1; /* * Increment sg so it points to the * "next" sg. */ sg++; sgptr = ahc_sg_virt_to_bus(scb, sg); } ahc_outl(ahc, SCB_RESIDUAL_SGPTR, sgptr); ahc_outl(ahc, SCB_RESIDUAL_DATACNT, data_cnt); /* * Toggle the "oddness" of the transfer length * to handle this mid-transfer ignore wide * residue. This ensures that the oddness is * correct for subsequent data transfers. */ ahc_outb(ahc, SCB_LUN, ahc_inb(ahc, SCB_LUN) ^ SCB_XFERLEN_ODD); } } } /* * Reinitialize the data pointers for the active transfer * based on its current residual. */ static void ahc_reinitialize_dataptrs(struct ahc_softc *ahc) { struct scb *scb; struct ahc_dma_seg *sg; u_int scb_index; uint32_t sgptr; uint32_t resid; uint32_t dataptr; scb_index = ahc_inb(ahc, SCB_TAG); scb = ahc_lookup_scb(ahc, scb_index); sgptr = (ahc_inb(ahc, SCB_RESIDUAL_SGPTR + 3) << 24) | (ahc_inb(ahc, SCB_RESIDUAL_SGPTR + 2) << 16) | (ahc_inb(ahc, SCB_RESIDUAL_SGPTR + 1) << 8) | ahc_inb(ahc, SCB_RESIDUAL_SGPTR); sgptr &= SG_PTR_MASK; sg = ahc_sg_bus_to_virt(scb, sgptr); /* The residual sg_ptr always points to the next sg */ sg--; resid = (ahc_inb(ahc, SCB_RESIDUAL_DATACNT + 2) << 16) | (ahc_inb(ahc, SCB_RESIDUAL_DATACNT + 1) << 8) | ahc_inb(ahc, SCB_RESIDUAL_DATACNT); dataptr = ahc_le32toh(sg->addr) + (ahc_le32toh(sg->len) & AHC_SG_LEN_MASK) - resid; if ((ahc->flags & AHC_39BIT_ADDRESSING) != 0) { u_int dscommand1; dscommand1 = ahc_inb(ahc, DSCOMMAND1); ahc_outb(ahc, DSCOMMAND1, dscommand1 | HADDLDSEL0); ahc_outb(ahc, HADDR, (ahc_le32toh(sg->len) >> 24) & SG_HIGH_ADDR_BITS); ahc_outb(ahc, DSCOMMAND1, dscommand1); } ahc_outb(ahc, HADDR + 3, dataptr >> 24); ahc_outb(ahc, HADDR + 2, dataptr >> 16); ahc_outb(ahc, HADDR + 1, dataptr >> 8); ahc_outb(ahc, HADDR, dataptr); ahc_outb(ahc, HCNT + 2, resid >> 16); ahc_outb(ahc, HCNT + 1, resid >> 8); ahc_outb(ahc, HCNT, resid); if ((ahc->features & AHC_ULTRA2) == 0) { ahc_outb(ahc, STCNT + 2, resid >> 16); ahc_outb(ahc, STCNT + 1, resid >> 8); ahc_outb(ahc, STCNT, resid); } } /* * Handle the effects of issuing a bus device reset message. */ static void ahc_handle_devreset(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, cam_status status, char *message, int verbose_level) { #ifdef AHC_TARGET_MODE struct ahc_tmode_tstate* tstate; u_int lun; #endif int found; found = ahc_abort_scbs(ahc, devinfo->target, devinfo->channel, CAM_LUN_WILDCARD, SCB_LIST_NULL, devinfo->role, status); #ifdef AHC_TARGET_MODE /* * Send an immediate notify ccb to all target mord peripheral * drivers affected by this action. */ tstate = ahc->enabled_targets[devinfo->our_scsiid]; if (tstate != NULL) { for (lun = 0; lun < AHC_NUM_LUNS; lun++) { struct ahc_tmode_lstate* lstate; lstate = tstate->enabled_luns[lun]; if (lstate == NULL) continue; ahc_queue_lstate_event(ahc, lstate, devinfo->our_scsiid, MSG_BUS_DEV_RESET, /*arg*/0); ahc_send_lstate_events(ahc, lstate); } } #endif /* * Go back to async/narrow transfers and renegotiate. */ ahc_set_width(ahc, devinfo, MSG_EXT_WDTR_BUS_8_BIT, AHC_TRANS_CUR, /*paused*/TRUE); ahc_set_syncrate(ahc, devinfo, /*syncrate*/NULL, /*period*/0, /*offset*/0, /*ppr_options*/0, AHC_TRANS_CUR, /*paused*/TRUE); if (status != CAM_SEL_TIMEOUT) ahc_send_async(ahc, devinfo->channel, devinfo->target, CAM_LUN_WILDCARD, AC_SENT_BDR); if (message != NULL && (verbose_level <= bootverbose)) printk("%s: %s on %c:%d. %d SCBs aborted\n", ahc_name(ahc), message, devinfo->channel, devinfo->target, found); } #ifdef AHC_TARGET_MODE static void ahc_setup_target_msgin(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, struct scb *scb) { /* * To facilitate adding multiple messages together, * each routine should increment the index and len * variables instead of setting them explicitly. */ ahc->msgout_index = 0; ahc->msgout_len = 0; if (scb != NULL && (scb->flags & SCB_AUTO_NEGOTIATE) != 0) ahc_build_transfer_msg(ahc, devinfo); else panic("ahc_intr: AWAITING target message with no message"); ahc->msgout_index = 0; ahc->msg_type = MSG_TYPE_TARGET_MSGIN; } #endif /**************************** Initialization **********************************/ /* * Allocate a controller structure for a new device * and perform initial initializion. */ struct ahc_softc * ahc_alloc(void *platform_arg, char *name) { struct ahc_softc *ahc; int i; #ifndef __FreeBSD__ ahc = kmalloc(sizeof(*ahc), GFP_ATOMIC); if (!ahc) { printk("aic7xxx: cannot malloc softc!\n"); kfree(name); return NULL; } #else ahc = device_get_softc((device_t)platform_arg); #endif memset(ahc, 0, sizeof(*ahc)); ahc->seep_config = kmalloc(sizeof(*ahc->seep_config), GFP_ATOMIC); if (ahc->seep_config == NULL) { #ifndef __FreeBSD__ kfree(ahc); #endif kfree(name); return (NULL); } LIST_INIT(&ahc->pending_scbs); /* We don't know our unit number until the OSM sets it */ ahc->name = name; ahc->unit = -1; ahc->description = NULL; ahc->channel = 'A'; ahc->channel_b = 'B'; ahc->chip = AHC_NONE; ahc->features = AHC_FENONE; ahc->bugs = AHC_BUGNONE; ahc->flags = AHC_FNONE; /* * Default to all error reporting enabled with the * sequencer operating at its fastest speed. * The bus attach code may modify this. */ ahc->seqctl = FASTMODE; for (i = 0; i < AHC_NUM_TARGETS; i++) TAILQ_INIT(&ahc->untagged_queues[i]); if (ahc_platform_alloc(ahc, platform_arg) != 0) { ahc_free(ahc); ahc = NULL; } return (ahc); } int ahc_softc_init(struct ahc_softc *ahc) { /* The IRQMS bit is only valid on VL and EISA chips */ if ((ahc->chip & AHC_PCI) == 0) ahc->unpause = ahc_inb(ahc, HCNTRL) & IRQMS; else ahc->unpause = 0; ahc->pause = ahc->unpause | PAUSE; /* XXX The shared scb data stuff should be deprecated */ if (ahc->scb_data == NULL) { ahc->scb_data = kmalloc(sizeof(*ahc->scb_data), GFP_ATOMIC); if (ahc->scb_data == NULL) return (ENOMEM); memset(ahc->scb_data, 0, sizeof(*ahc->scb_data)); } return (0); } void ahc_set_unit(struct ahc_softc *ahc, int unit) { ahc->unit = unit; } void ahc_set_name(struct ahc_softc *ahc, char *name) { if (ahc->name != NULL) kfree(ahc->name); ahc->name = name; } void ahc_free(struct ahc_softc *ahc) { int i; switch (ahc->init_level) { default: case 5: ahc_shutdown(ahc); /* FALLTHROUGH */ case 4: ahc_dmamap_unload(ahc, ahc->shared_data_dmat, ahc->shared_data_dmamap); /* FALLTHROUGH */ case 3: ahc_dmamem_free(ahc, ahc->shared_data_dmat, ahc->qoutfifo, ahc->shared_data_dmamap); ahc_dmamap_destroy(ahc, ahc->shared_data_dmat, ahc->shared_data_dmamap); /* FALLTHROUGH */ case 2: ahc_dma_tag_destroy(ahc, ahc->shared_data_dmat); case 1: #ifndef __linux__ ahc_dma_tag_destroy(ahc, ahc->buffer_dmat); #endif break; case 0: break; } #ifndef __linux__ ahc_dma_tag_destroy(ahc, ahc->parent_dmat); #endif ahc_platform_free(ahc); ahc_fini_scbdata(ahc); for (i = 0; i < AHC_NUM_TARGETS; i++) { struct ahc_tmode_tstate *tstate; tstate = ahc->enabled_targets[i]; if (tstate != NULL) { #ifdef AHC_TARGET_MODE int j; for (j = 0; j < AHC_NUM_LUNS; j++) { struct ahc_tmode_lstate *lstate; lstate = tstate->enabled_luns[j]; if (lstate != NULL) { xpt_free_path(lstate->path); kfree(lstate); } } #endif kfree(tstate); } } #ifdef AHC_TARGET_MODE if (ahc->black_hole != NULL) { xpt_free_path(ahc->black_hole->path); kfree(ahc->black_hole); } #endif if (ahc->name != NULL) kfree(ahc->name); if (ahc->seep_config != NULL) kfree(ahc->seep_config); #ifndef __FreeBSD__ kfree(ahc); #endif return; } static void ahc_shutdown(void *arg) { struct ahc_softc *ahc; int i; ahc = (struct ahc_softc *)arg; /* This will reset most registers to 0, but not all */ ahc_reset(ahc, /*reinit*/FALSE); ahc_outb(ahc, SCSISEQ, 0); ahc_outb(ahc, SXFRCTL0, 0); ahc_outb(ahc, DSPCISTATUS, 0); for (i = TARG_SCSIRATE; i < SCSICONF; i++) ahc_outb(ahc, i, 0); } /* * Reset the controller and record some information about it * that is only available just after a reset. If "reinit" is * non-zero, this reset occurred after initial configuration * and the caller requests that the chip be fully reinitialized * to a runable state. Chip interrupts are *not* enabled after * a reinitialization. The caller must enable interrupts via * ahc_intr_enable(). */ int ahc_reset(struct ahc_softc *ahc, int reinit) { u_int sblkctl; u_int sxfrctl1_a, sxfrctl1_b; int error; int wait; /* * Preserve the value of the SXFRCTL1 register for all channels. * It contains settings that affect termination and we don't want * to disturb the integrity of the bus. */ ahc_pause(ahc); sxfrctl1_b = 0; if ((ahc->chip & AHC_CHIPID_MASK) == AHC_AIC7770) { u_int sblkctl; /* * Save channel B's settings in case this chip * is setup for TWIN channel operation. */ sblkctl = ahc_inb(ahc, SBLKCTL); ahc_outb(ahc, SBLKCTL, sblkctl | SELBUSB); sxfrctl1_b = ahc_inb(ahc, SXFRCTL1); ahc_outb(ahc, SBLKCTL, sblkctl & ~SELBUSB); } sxfrctl1_a = ahc_inb(ahc, SXFRCTL1); ahc_outb(ahc, HCNTRL, CHIPRST | ahc->pause); /* * Ensure that the reset has finished. We delay 1000us * prior to reading the register to make sure the chip * has sufficiently completed its reset to handle register * accesses. */ wait = 1000; do { ahc_delay(1000); } while (--wait && !(ahc_inb(ahc, HCNTRL) & CHIPRSTACK)); if (wait == 0) { printk("%s: WARNING - Failed chip reset! " "Trying to initialize anyway.\n", ahc_name(ahc)); } ahc_outb(ahc, HCNTRL, ahc->pause); /* Determine channel configuration */ sblkctl = ahc_inb(ahc, SBLKCTL) & (SELBUSB|SELWIDE); /* No Twin Channel PCI cards */ if ((ahc->chip & AHC_PCI) != 0) sblkctl &= ~SELBUSB; switch (sblkctl) { case 0: /* Single Narrow Channel */ break; case 2: /* Wide Channel */ ahc->features |= AHC_WIDE; break; case 8: /* Twin Channel */ ahc->features |= AHC_TWIN; break; default: printk(" Unsupported adapter type. Ignoring\n"); return(-1); } /* * Reload sxfrctl1. * * We must always initialize STPWEN to 1 before we * restore the saved values. STPWEN is initialized * to a tri-state condition which can only be cleared * by turning it on. */ if ((ahc->features & AHC_TWIN) != 0) { u_int sblkctl; sblkctl = ahc_inb(ahc, SBLKCTL); ahc_outb(ahc, SBLKCTL, sblkctl | SELBUSB); ahc_outb(ahc, SXFRCTL1, sxfrctl1_b); ahc_outb(ahc, SBLKCTL, sblkctl & ~SELBUSB); } ahc_outb(ahc, SXFRCTL1, sxfrctl1_a); error = 0; if (reinit != 0) /* * If a recovery action has forced a chip reset, * re-initialize the chip to our liking. */ error = ahc->bus_chip_init(ahc); #ifdef AHC_DUMP_SEQ else ahc_dumpseq(ahc); #endif return (error); } /* * Determine the number of SCBs available on the controller */ int ahc_probe_scbs(struct ahc_softc *ahc) { int i; for (i = 0; i < AHC_SCB_MAX; i++) { ahc_outb(ahc, SCBPTR, i); ahc_outb(ahc, SCB_BASE, i); if (ahc_inb(ahc, SCB_BASE) != i) break; ahc_outb(ahc, SCBPTR, 0); if (ahc_inb(ahc, SCB_BASE) != 0) break; } return (i); } static void ahc_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) { dma_addr_t *baddr; baddr = (dma_addr_t *)arg; *baddr = segs->ds_addr; } static void ahc_build_free_scb_list(struct ahc_softc *ahc) { int scbsize; int i; scbsize = 32; if ((ahc->flags & AHC_LSCBS_ENABLED) != 0) scbsize = 64; for (i = 0; i < ahc->scb_data->maxhscbs; i++) { int j; ahc_outb(ahc, SCBPTR, i); /* * Touch all SCB bytes to avoid parity errors * should one of our debugging routines read * an otherwise uninitiatlized byte. */ for (j = 0; j < scbsize; j++) ahc_outb(ahc, SCB_BASE+j, 0xFF); /* Clear the control byte. */ ahc_outb(ahc, SCB_CONTROL, 0); /* Set the next pointer */ if ((ahc->flags & AHC_PAGESCBS) != 0) ahc_outb(ahc, SCB_NEXT, i+1); else ahc_outb(ahc, SCB_NEXT, SCB_LIST_NULL); /* Make the tag number, SCSIID, and lun invalid */ ahc_outb(ahc, SCB_TAG, SCB_LIST_NULL); ahc_outb(ahc, SCB_SCSIID, 0xFF); ahc_outb(ahc, SCB_LUN, 0xFF); } if ((ahc->flags & AHC_PAGESCBS) != 0) { /* SCB 0 heads the free list. */ ahc_outb(ahc, FREE_SCBH, 0); } else { /* No free list. */ ahc_outb(ahc, FREE_SCBH, SCB_LIST_NULL); } /* Make sure that the last SCB terminates the free list */ ahc_outb(ahc, SCBPTR, i-1); ahc_outb(ahc, SCB_NEXT, SCB_LIST_NULL); } static int ahc_init_scbdata(struct ahc_softc *ahc) { struct scb_data *scb_data; scb_data = ahc->scb_data; SLIST_INIT(&scb_data->free_scbs); SLIST_INIT(&scb_data->sg_maps); /* Allocate SCB resources */ scb_data->scbarray = kmalloc(sizeof(struct scb) * AHC_SCB_MAX_ALLOC, GFP_ATOMIC); if (scb_data->scbarray == NULL) return (ENOMEM); memset(scb_data->scbarray, 0, sizeof(struct scb) * AHC_SCB_MAX_ALLOC); /* Determine the number of hardware SCBs and initialize them */ scb_data->maxhscbs = ahc_probe_scbs(ahc); if (ahc->scb_data->maxhscbs == 0) { printk("%s: No SCB space found\n", ahc_name(ahc)); return (ENXIO); } /* * Create our DMA tags. These tags define the kinds of device * accessible memory allocations and memory mappings we will * need to perform during normal operation. * * Unless we need to further restrict the allocation, we rely * on the restrictions of the parent dmat, hence the common * use of MAXADDR and MAXSIZE. */ /* DMA tag for our hardware scb structures */ if (ahc_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/1, /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1, /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, AHC_SCB_MAX_ALLOC * sizeof(struct hardware_scb), /*nsegments*/1, /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, /*flags*/0, &scb_data->hscb_dmat) != 0) { goto error_exit; } scb_data->init_level++; /* Allocation for our hscbs */ if (ahc_dmamem_alloc(ahc, scb_data->hscb_dmat, (void **)&scb_data->hscbs, BUS_DMA_NOWAIT, &scb_data->hscb_dmamap) != 0) { goto error_exit; } scb_data->init_level++; /* And permanently map them */ ahc_dmamap_load(ahc, scb_data->hscb_dmat, scb_data->hscb_dmamap, scb_data->hscbs, AHC_SCB_MAX_ALLOC * sizeof(struct hardware_scb), ahc_dmamap_cb, &scb_data->hscb_busaddr, /*flags*/0); scb_data->init_level++; /* DMA tag for our sense buffers */ if (ahc_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/1, /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1, /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, AHC_SCB_MAX_ALLOC * sizeof(struct scsi_sense_data), /*nsegments*/1, /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, /*flags*/0, &scb_data->sense_dmat) != 0) { goto error_exit; } scb_data->init_level++; /* Allocate them */ if (ahc_dmamem_alloc(ahc, scb_data->sense_dmat, (void **)&scb_data->sense, BUS_DMA_NOWAIT, &scb_data->sense_dmamap) != 0) { goto error_exit; } scb_data->init_level++; /* And permanently map them */ ahc_dmamap_load(ahc, scb_data->sense_dmat, scb_data->sense_dmamap, scb_data->sense, AHC_SCB_MAX_ALLOC * sizeof(struct scsi_sense_data), ahc_dmamap_cb, &scb_data->sense_busaddr, /*flags*/0); scb_data->init_level++; /* DMA tag for our S/G structures. We allocate in page sized chunks */ if (ahc_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/8, /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1, /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, PAGE_SIZE, /*nsegments*/1, /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, /*flags*/0, &scb_data->sg_dmat) != 0) { goto error_exit; } scb_data->init_level++; /* Perform initial CCB allocation */ memset(scb_data->hscbs, 0, AHC_SCB_MAX_ALLOC * sizeof(struct hardware_scb)); ahc_alloc_scbs(ahc); if (scb_data->numscbs == 0) { printk("%s: ahc_init_scbdata - " "Unable to allocate initial scbs\n", ahc_name(ahc)); goto error_exit; } /* * Reserve the next queued SCB. */ ahc->next_queued_scb = ahc_get_scb(ahc); /* * Note that we were successful */ return (0); error_exit: return (ENOMEM); } static void ahc_fini_scbdata(struct ahc_softc *ahc) { struct scb_data *scb_data; scb_data = ahc->scb_data; if (scb_data == NULL) return; switch (scb_data->init_level) { default: case 7: { struct sg_map_node *sg_map; while ((sg_map = SLIST_FIRST(&scb_data->sg_maps))!= NULL) { SLIST_REMOVE_HEAD(&scb_data->sg_maps, links); ahc_dmamap_unload(ahc, scb_data->sg_dmat, sg_map->sg_dmamap); ahc_dmamem_free(ahc, scb_data->sg_dmat, sg_map->sg_vaddr, sg_map->sg_dmamap); kfree(sg_map); } ahc_dma_tag_destroy(ahc, scb_data->sg_dmat); } case 6: ahc_dmamap_unload(ahc, scb_data->sense_dmat, scb_data->sense_dmamap); case 5: ahc_dmamem_free(ahc, scb_data->sense_dmat, scb_data->sense, scb_data->sense_dmamap); ahc_dmamap_destroy(ahc, scb_data->sense_dmat, scb_data->sense_dmamap); case 4: ahc_dma_tag_destroy(ahc, scb_data->sense_dmat); case 3: ahc_dmamap_unload(ahc, scb_data->hscb_dmat, scb_data->hscb_dmamap); case 2: ahc_dmamem_free(ahc, scb_data->hscb_dmat, scb_data->hscbs, scb_data->hscb_dmamap); ahc_dmamap_destroy(ahc, scb_data->hscb_dmat, scb_data->hscb_dmamap); case 1: ahc_dma_tag_destroy(ahc, scb_data->hscb_dmat); break; case 0: break; } if (scb_data->scbarray != NULL) kfree(scb_data->scbarray); } static void ahc_alloc_scbs(struct ahc_softc *ahc) { struct scb_data *scb_data; struct scb *next_scb; struct sg_map_node *sg_map; dma_addr_t physaddr; struct ahc_dma_seg *segs; int newcount; int i; scb_data = ahc->scb_data; if (scb_data->numscbs >= AHC_SCB_MAX_ALLOC) /* Can't allocate any more */ return; next_scb = &scb_data->scbarray[scb_data->numscbs]; sg_map = kmalloc(sizeof(*sg_map), GFP_ATOMIC); if (sg_map == NULL) return; /* Allocate S/G space for the next batch of SCBS */ if (ahc_dmamem_alloc(ahc, scb_data->sg_dmat, (void **)&sg_map->sg_vaddr, BUS_DMA_NOWAIT, &sg_map->sg_dmamap) != 0) { kfree(sg_map); return; } SLIST_INSERT_HEAD(&scb_data->sg_maps, sg_map, links); ahc_dmamap_load(ahc, scb_data->sg_dmat, sg_map->sg_dmamap, sg_map->sg_vaddr, PAGE_SIZE, ahc_dmamap_cb, &sg_map->sg_physaddr, /*flags*/0); segs = sg_map->sg_vaddr; physaddr = sg_map->sg_physaddr; newcount = (PAGE_SIZE / (AHC_NSEG * sizeof(struct ahc_dma_seg))); newcount = min(newcount, (AHC_SCB_MAX_ALLOC - scb_data->numscbs)); for (i = 0; i < newcount; i++) { struct scb_platform_data *pdata; #ifndef __linux__ int error; #endif pdata = kmalloc(sizeof(*pdata), GFP_ATOMIC); if (pdata == NULL) break; next_scb->platform_data = pdata; next_scb->sg_map = sg_map; next_scb->sg_list = segs; /* * The sequencer always starts with the second entry. * The first entry is embedded in the scb. */ next_scb->sg_list_phys = physaddr + sizeof(struct ahc_dma_seg); next_scb->ahc_softc = ahc; next_scb->flags = SCB_FREE; #ifndef __linux__ error = ahc_dmamap_create(ahc, ahc->buffer_dmat, /*flags*/0, &next_scb->dmamap); if (error != 0) break; #endif next_scb->hscb = &scb_data->hscbs[scb_data->numscbs]; next_scb->hscb->tag = ahc->scb_data->numscbs; SLIST_INSERT_HEAD(&ahc->scb_data->free_scbs, next_scb, links.sle); segs += AHC_NSEG; physaddr += (AHC_NSEG * sizeof(struct ahc_dma_seg)); next_scb++; ahc->scb_data->numscbs++; } } void ahc_controller_info(struct ahc_softc *ahc, char *buf) { int len; len = sprintf(buf, "%s: ", ahc_chip_names[ahc->chip & AHC_CHIPID_MASK]); buf += len; if ((ahc->features & AHC_TWIN) != 0) len = sprintf(buf, "Twin Channel, A SCSI Id=%d, " "B SCSI Id=%d, primary %c, ", ahc->our_id, ahc->our_id_b, (ahc->flags & AHC_PRIMARY_CHANNEL) + 'A'); else { const char *speed; const char *type; speed = ""; if ((ahc->features & AHC_ULTRA) != 0) { speed = "Ultra "; } else if ((ahc->features & AHC_DT) != 0) { speed = "Ultra160 "; } else if ((ahc->features & AHC_ULTRA2) != 0) { speed = "Ultra2 "; } if ((ahc->features & AHC_WIDE) != 0) { type = "Wide"; } else { type = "Single"; } len = sprintf(buf, "%s%s Channel %c, SCSI Id=%d, ", speed, type, ahc->channel, ahc->our_id); } buf += len; if ((ahc->flags & AHC_PAGESCBS) != 0) sprintf(buf, "%d/%d SCBs", ahc->scb_data->maxhscbs, AHC_MAX_QUEUE); else sprintf(buf, "%d SCBs", ahc->scb_data->maxhscbs); } int ahc_chip_init(struct ahc_softc *ahc) { int term; int error; u_int i; u_int scsi_conf; u_int scsiseq_template; uint32_t physaddr; ahc_outb(ahc, SEQ_FLAGS, 0); ahc_outb(ahc, SEQ_FLAGS2, 0); /* Set the SCSI Id, SXFRCTL0, SXFRCTL1, and SIMODE1, for both channels*/ if (ahc->features & AHC_TWIN) { /* * Setup Channel B first. */ ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) | SELBUSB); term = (ahc->flags & AHC_TERM_ENB_B) != 0 ? STPWEN : 0; ahc_outb(ahc, SCSIID, ahc->our_id_b); scsi_conf = ahc_inb(ahc, SCSICONF + 1); ahc_outb(ahc, SXFRCTL1, (scsi_conf & (ENSPCHK|STIMESEL)) |term|ahc->seltime_b|ENSTIMER|ACTNEGEN); if ((ahc->features & AHC_ULTRA2) != 0) ahc_outb(ahc, SIMODE0, ahc_inb(ahc, SIMODE0)|ENIOERR); ahc_outb(ahc, SIMODE1, ENSELTIMO|ENSCSIRST|ENSCSIPERR); ahc_outb(ahc, SXFRCTL0, DFON|SPIOEN); /* Select Channel A */ ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) & ~SELBUSB); } term = (ahc->flags & AHC_TERM_ENB_A) != 0 ? STPWEN : 0; if ((ahc->features & AHC_ULTRA2) != 0) ahc_outb(ahc, SCSIID_ULTRA2, ahc->our_id); else ahc_outb(ahc, SCSIID, ahc->our_id); scsi_conf = ahc_inb(ahc, SCSICONF); ahc_outb(ahc, SXFRCTL1, (scsi_conf & (ENSPCHK|STIMESEL)) |term|ahc->seltime |ENSTIMER|ACTNEGEN); if ((ahc->features & AHC_ULTRA2) != 0) ahc_outb(ahc, SIMODE0, ahc_inb(ahc, SIMODE0)|ENIOERR); ahc_outb(ahc, SIMODE1, ENSELTIMO|ENSCSIRST|ENSCSIPERR); ahc_outb(ahc, SXFRCTL0, DFON|SPIOEN); /* There are no untagged SCBs active yet. */ for (i = 0; i < 16; i++) { ahc_unbusy_tcl(ahc, BUILD_TCL(i << 4, 0)); if ((ahc->flags & AHC_SCB_BTT) != 0) { int lun; /* * The SCB based BTT allows an entry per * target and lun pair. */ for (lun = 1; lun < AHC_NUM_LUNS; lun++) ahc_unbusy_tcl(ahc, BUILD_TCL(i << 4, lun)); } } /* All of our queues are empty */ for (i = 0; i < 256; i++) ahc->qoutfifo[i] = SCB_LIST_NULL; ahc_sync_qoutfifo(ahc, BUS_DMASYNC_PREREAD); for (i = 0; i < 256; i++) ahc->qinfifo[i] = SCB_LIST_NULL; if ((ahc->features & AHC_MULTI_TID) != 0) { ahc_outb(ahc, TARGID, 0); ahc_outb(ahc, TARGID + 1, 0); } /* * Tell the sequencer where it can find our arrays in memory. */ physaddr = ahc->scb_data->hscb_busaddr; ahc_outb(ahc, HSCB_ADDR, physaddr & 0xFF); ahc_outb(ahc, HSCB_ADDR + 1, (physaddr >> 8) & 0xFF); ahc_outb(ahc, HSCB_ADDR + 2, (physaddr >> 16) & 0xFF); ahc_outb(ahc, HSCB_ADDR + 3, (physaddr >> 24) & 0xFF); physaddr = ahc->shared_data_busaddr; ahc_outb(ahc, SHARED_DATA_ADDR, physaddr & 0xFF); ahc_outb(ahc, SHARED_DATA_ADDR + 1, (physaddr >> 8) & 0xFF); ahc_outb(ahc, SHARED_DATA_ADDR + 2, (physaddr >> 16) & 0xFF); ahc_outb(ahc, SHARED_DATA_ADDR + 3, (physaddr >> 24) & 0xFF); /* * Initialize the group code to command length table. * This overrides the values in TARG_SCSIRATE, so only * setup the table after we have processed that information. */ ahc_outb(ahc, CMDSIZE_TABLE, 5); ahc_outb(ahc, CMDSIZE_TABLE + 1, 9); ahc_outb(ahc, CMDSIZE_TABLE + 2, 9); ahc_outb(ahc, CMDSIZE_TABLE + 3, 0); ahc_outb(ahc, CMDSIZE_TABLE + 4, 15); ahc_outb(ahc, CMDSIZE_TABLE + 5, 11); ahc_outb(ahc, CMDSIZE_TABLE + 6, 0); ahc_outb(ahc, CMDSIZE_TABLE + 7, 0); if ((ahc->features & AHC_HS_MAILBOX) != 0) ahc_outb(ahc, HS_MAILBOX, 0); /* Tell the sequencer of our initial queue positions */ if ((ahc->features & AHC_TARGETMODE) != 0) { ahc->tqinfifonext = 1; ahc_outb(ahc, KERNEL_TQINPOS, ahc->tqinfifonext - 1); ahc_outb(ahc, TQINPOS, ahc->tqinfifonext); } ahc->qinfifonext = 0; ahc->qoutfifonext = 0; if ((ahc->features & AHC_QUEUE_REGS) != 0) { ahc_outb(ahc, QOFF_CTLSTA, SCB_QSIZE_256); ahc_outb(ahc, HNSCB_QOFF, ahc->qinfifonext); ahc_outb(ahc, SNSCB_QOFF, ahc->qinfifonext); ahc_outb(ahc, SDSCB_QOFF, 0); } else { ahc_outb(ahc, KERNEL_QINPOS, ahc->qinfifonext); ahc_outb(ahc, QINPOS, ahc->qinfifonext); ahc_outb(ahc, QOUTPOS, ahc->qoutfifonext); } /* We don't have any waiting selections */ ahc_outb(ahc, WAITING_SCBH, SCB_LIST_NULL); /* Our disconnection list is empty too */ ahc_outb(ahc, DISCONNECTED_SCBH, SCB_LIST_NULL); /* Message out buffer starts empty */ ahc_outb(ahc, MSG_OUT, MSG_NOOP); /* * Setup the allowed SCSI Sequences based on operational mode. * If we are a target, we'll enable select in operations once * we've had a lun enabled. */ scsiseq_template = ENSELO|ENAUTOATNO|ENAUTOATNP; if ((ahc->flags & AHC_INITIATORROLE) != 0) scsiseq_template |= ENRSELI; ahc_outb(ahc, SCSISEQ_TEMPLATE, scsiseq_template); /* Initialize our list of free SCBs. */ ahc_build_free_scb_list(ahc); /* * Tell the sequencer which SCB will be the next one it receives. */ ahc_outb(ahc, NEXT_QUEUED_SCB, ahc->next_queued_scb->hscb->tag); /* * Load the Sequencer program and Enable the adapter * in "fast" mode. */ if (bootverbose) printk("%s: Downloading Sequencer Program...", ahc_name(ahc)); error = ahc_loadseq(ahc); if (error != 0) return (error); if ((ahc->features & AHC_ULTRA2) != 0) { int wait; /* * Wait for up to 500ms for our transceivers * to settle. If the adapter does not have * a cable attached, the transceivers may * never settle, so don't complain if we * fail here. */ for (wait = 5000; (ahc_inb(ahc, SBLKCTL) & (ENAB40|ENAB20)) == 0 && wait; wait--) ahc_delay(100); } ahc_restart(ahc); return (0); } /* * Start the board, ready for normal operation */ int ahc_init(struct ahc_softc *ahc) { int max_targ; u_int i; u_int scsi_conf; u_int ultraenb; u_int discenable; u_int tagenable; size_t driver_data_size; #ifdef AHC_DEBUG if ((ahc_debug & AHC_DEBUG_SEQUENCER) != 0) ahc->flags |= AHC_SEQUENCER_DEBUG; #endif #ifdef AHC_PRINT_SRAM printk("Scratch Ram:"); for (i = 0x20; i < 0x5f; i++) { if (((i % 8) == 0) && (i != 0)) { printk ("\n "); } printk (" 0x%x", ahc_inb(ahc, i)); } if ((ahc->features & AHC_MORE_SRAM) != 0) { for (i = 0x70; i < 0x7f; i++) { if (((i % 8) == 0) && (i != 0)) { printk ("\n "); } printk (" 0x%x", ahc_inb(ahc, i)); } } printk ("\n"); /* * Reading uninitialized scratch ram may * generate parity errors. */ ahc_outb(ahc, CLRINT, CLRPARERR); ahc_outb(ahc, CLRINT, CLRBRKADRINT); #endif max_targ = 15; /* * Assume we have a board at this stage and it has been reset. */ if ((ahc->flags & AHC_USEDEFAULTS) != 0) ahc->our_id = ahc->our_id_b = 7; /* * Default to allowing initiator operations. */ ahc->flags |= AHC_INITIATORROLE; /* * Only allow target mode features if this unit has them enabled. */ if ((AHC_TMODE_ENABLE & (0x1 << ahc->unit)) == 0) ahc->features &= ~AHC_TARGETMODE; #ifndef __linux__ /* DMA tag for mapping buffers into device visible space. */ if (ahc_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/1, /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1, /*lowaddr*/ahc->flags & AHC_39BIT_ADDRESSING ? (dma_addr_t)0x7FFFFFFFFFULL : BUS_SPACE_MAXADDR_32BIT, /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, /*maxsize*/(AHC_NSEG - 1) * PAGE_SIZE, /*nsegments*/AHC_NSEG, /*maxsegsz*/AHC_MAXTRANSFER_SIZE, /*flags*/BUS_DMA_ALLOCNOW, &ahc->buffer_dmat) != 0) { return (ENOMEM); } #endif ahc->init_level++; /* * DMA tag for our command fifos and other data in system memory * the card's sequencer must be able to access. For initiator * roles, we need to allocate space for the qinfifo and qoutfifo. * The qinfifo and qoutfifo are composed of 256 1 byte elements. * When providing for the target mode role, we must additionally * provide space for the incoming target command fifo and an extra * byte to deal with a dma bug in some chip versions. */ driver_data_size = 2 * 256 * sizeof(uint8_t); if ((ahc->features & AHC_TARGETMODE) != 0) driver_data_size += AHC_TMODE_CMDS * sizeof(struct target_cmd) + /*DMA WideOdd Bug Buffer*/1; if (ahc_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/1, /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1, /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, driver_data_size, /*nsegments*/1, /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, /*flags*/0, &ahc->shared_data_dmat) != 0) { return (ENOMEM); } ahc->init_level++; /* Allocation of driver data */ if (ahc_dmamem_alloc(ahc, ahc->shared_data_dmat, (void **)&ahc->qoutfifo, BUS_DMA_NOWAIT, &ahc->shared_data_dmamap) != 0) { return (ENOMEM); } ahc->init_level++; /* And permanently map it in */ ahc_dmamap_load(ahc, ahc->shared_data_dmat, ahc->shared_data_dmamap, ahc->qoutfifo, driver_data_size, ahc_dmamap_cb, &ahc->shared_data_busaddr, /*flags*/0); if ((ahc->features & AHC_TARGETMODE) != 0) { ahc->targetcmds = (struct target_cmd *)ahc->qoutfifo; ahc->qoutfifo = (uint8_t *)&ahc->targetcmds[AHC_TMODE_CMDS]; ahc->dma_bug_buf = ahc->shared_data_busaddr + driver_data_size - 1; /* All target command blocks start out invalid. */ for (i = 0; i < AHC_TMODE_CMDS; i++) ahc->targetcmds[i].cmd_valid = 0; ahc_sync_tqinfifo(ahc, BUS_DMASYNC_PREREAD); ahc->qoutfifo = (uint8_t *)&ahc->targetcmds[256]; } ahc->qinfifo = &ahc->qoutfifo[256]; ahc->init_level++; /* Allocate SCB data now that buffer_dmat is initialized */ if (ahc->scb_data->maxhscbs == 0) if (ahc_init_scbdata(ahc) != 0) return (ENOMEM); /* * Allocate a tstate to house information for our * initiator presence on the bus as well as the user * data for any target mode initiator. */ if (ahc_alloc_tstate(ahc, ahc->our_id, 'A') == NULL) { printk("%s: unable to allocate ahc_tmode_tstate. " "Failing attach\n", ahc_name(ahc)); return (ENOMEM); } if ((ahc->features & AHC_TWIN) != 0) { if (ahc_alloc_tstate(ahc, ahc->our_id_b, 'B') == NULL) { printk("%s: unable to allocate ahc_tmode_tstate. " "Failing attach\n", ahc_name(ahc)); return (ENOMEM); } } if (ahc->scb_data->maxhscbs < AHC_SCB_MAX_ALLOC) { ahc->flags |= AHC_PAGESCBS; } else { ahc->flags &= ~AHC_PAGESCBS; } #ifdef AHC_DEBUG if (ahc_debug & AHC_SHOW_MISC) { printk("%s: hardware scb %u bytes; kernel scb %u bytes; " "ahc_dma %u bytes\n", ahc_name(ahc), (u_int)sizeof(struct hardware_scb), (u_int)sizeof(struct scb), (u_int)sizeof(struct ahc_dma_seg)); } #endif /* AHC_DEBUG */ /* * Look at the information that board initialization or * the board bios has left us. */ if (ahc->features & AHC_TWIN) { scsi_conf = ahc_inb(ahc, SCSICONF + 1); if ((scsi_conf & RESET_SCSI) != 0 && (ahc->flags & AHC_INITIATORROLE) != 0) ahc->flags |= AHC_RESET_BUS_B; } scsi_conf = ahc_inb(ahc, SCSICONF); if ((scsi_conf & RESET_SCSI) != 0 && (ahc->flags & AHC_INITIATORROLE) != 0) ahc->flags |= AHC_RESET_BUS_A; ultraenb = 0; tagenable = ALL_TARGETS_MASK; /* Grab the disconnection disable table and invert it for our needs */ if ((ahc->flags & AHC_USEDEFAULTS) != 0) { printk("%s: Host Adapter Bios disabled. Using default SCSI " "device parameters\n", ahc_name(ahc)); ahc->flags |= AHC_EXTENDED_TRANS_A|AHC_EXTENDED_TRANS_B| AHC_TERM_ENB_A|AHC_TERM_ENB_B; discenable = ALL_TARGETS_MASK; if ((ahc->features & AHC_ULTRA) != 0) ultraenb = ALL_TARGETS_MASK; } else { discenable = ~((ahc_inb(ahc, DISC_DSB + 1) << 8) | ahc_inb(ahc, DISC_DSB)); if ((ahc->features & (AHC_ULTRA|AHC_ULTRA2)) != 0) ultraenb = (ahc_inb(ahc, ULTRA_ENB + 1) << 8) | ahc_inb(ahc, ULTRA_ENB); } if ((ahc->features & (AHC_WIDE|AHC_TWIN)) == 0) max_targ = 7; for (i = 0; i <= max_targ; i++) { struct ahc_initiator_tinfo *tinfo; struct ahc_tmode_tstate *tstate; u_int our_id; u_int target_id; char channel; channel = 'A'; our_id = ahc->our_id; target_id = i; if (i > 7 && (ahc->features & AHC_TWIN) != 0) { channel = 'B'; our_id = ahc->our_id_b; target_id = i % 8; } tinfo = ahc_fetch_transinfo(ahc, channel, our_id, target_id, &tstate); /* Default to async narrow across the board */ memset(tinfo, 0, sizeof(*tinfo)); if (ahc->flags & AHC_USEDEFAULTS) { if ((ahc->features & AHC_WIDE) != 0) tinfo->user.width = MSG_EXT_WDTR_BUS_16_BIT; /* * These will be truncated when we determine the * connection type we have with the target. */ tinfo->user.period = ahc_syncrates->period; tinfo->user.offset = MAX_OFFSET; } else { u_int scsirate; uint16_t mask; /* Take the settings leftover in scratch RAM. */ scsirate = ahc_inb(ahc, TARG_SCSIRATE + i); mask = (0x01 << i); if ((ahc->features & AHC_ULTRA2) != 0) { u_int offset; u_int maxsync; if ((scsirate & SOFS) == 0x0F) { /* * Haven't negotiated yet, * so the format is different. */ scsirate = (scsirate & SXFR) >> 4 | (ultraenb & mask) ? 0x08 : 0x0 | (scsirate & WIDEXFER); offset = MAX_OFFSET_ULTRA2; } else offset = ahc_inb(ahc, TARG_OFFSET + i); if ((scsirate & ~WIDEXFER) == 0 && offset != 0) /* Set to the lowest sync rate, 5MHz */ scsirate |= 0x1c; maxsync = AHC_SYNCRATE_ULTRA2; if ((ahc->features & AHC_DT) != 0) maxsync = AHC_SYNCRATE_DT; tinfo->user.period = ahc_find_period(ahc, scsirate, maxsync); if (offset == 0) tinfo->user.period = 0; else tinfo->user.offset = MAX_OFFSET; if ((scsirate & SXFR_ULTRA2) <= 8/*10MHz*/ && (ahc->features & AHC_DT) != 0) tinfo->user.ppr_options = MSG_EXT_PPR_DT_REQ; } else if ((scsirate & SOFS) != 0) { if ((scsirate & SXFR) == 0x40 && (ultraenb & mask) != 0) { /* Treat 10MHz as a non-ultra speed */ scsirate &= ~SXFR; ultraenb &= ~mask; } tinfo->user.period = ahc_find_period(ahc, scsirate, (ultraenb & mask) ? AHC_SYNCRATE_ULTRA : AHC_SYNCRATE_FAST); if (tinfo->user.period != 0) tinfo->user.offset = MAX_OFFSET; } if (tinfo->user.period == 0) tinfo->user.offset = 0; if ((scsirate & WIDEXFER) != 0 && (ahc->features & AHC_WIDE) != 0) tinfo->user.width = MSG_EXT_WDTR_BUS_16_BIT; tinfo->user.protocol_version = 4; if ((ahc->features & AHC_DT) != 0) tinfo->user.transport_version = 3; else tinfo->user.transport_version = 2; tinfo->goal.protocol_version = 2; tinfo->goal.transport_version = 2; tinfo->curr.protocol_version = 2; tinfo->curr.transport_version = 2; } tstate->ultraenb = 0; } ahc->user_discenable = discenable; ahc->user_tagenable = tagenable; return (ahc->bus_chip_init(ahc)); } void ahc_intr_enable(struct ahc_softc *ahc, int enable) { u_int hcntrl; hcntrl = ahc_inb(ahc, HCNTRL); hcntrl &= ~INTEN; ahc->pause &= ~INTEN; ahc->unpause &= ~INTEN; if (enable) { hcntrl |= INTEN; ahc->pause |= INTEN; ahc->unpause |= INTEN; } ahc_outb(ahc, HCNTRL, hcntrl); } /* * Ensure that the card is paused in a location * outside of all critical sections and that all * pending work is completed prior to returning. * This routine should only be called from outside * an interrupt context. */ void ahc_pause_and_flushwork(struct ahc_softc *ahc) { int intstat; int maxloops; int paused; maxloops = 1000; ahc->flags |= AHC_ALL_INTERRUPTS; paused = FALSE; do { if (paused) { ahc_unpause(ahc); /* * Give the sequencer some time to service * any active selections. */ ahc_delay(500); } ahc_intr(ahc); ahc_pause(ahc); paused = TRUE; ahc_outb(ahc, SCSISEQ, ahc_inb(ahc, SCSISEQ) & ~ENSELO); intstat = ahc_inb(ahc, INTSTAT); if ((intstat & INT_PEND) == 0) { ahc_clear_critical_section(ahc); intstat = ahc_inb(ahc, INTSTAT); } } while (--maxloops && (intstat != 0xFF || (ahc->features & AHC_REMOVABLE) == 0) && ((intstat & INT_PEND) != 0 || (ahc_inb(ahc, SSTAT0) & (SELDO|SELINGO)) != 0)); if (maxloops == 0) { printk("Infinite interrupt loop, INTSTAT = %x", ahc_inb(ahc, INTSTAT)); } ahc_platform_flushwork(ahc); ahc->flags &= ~AHC_ALL_INTERRUPTS; } #ifdef CONFIG_PM int ahc_suspend(struct ahc_softc *ahc) { ahc_pause_and_flushwork(ahc); if (LIST_FIRST(&ahc->pending_scbs) != NULL) { ahc_unpause(ahc); return (EBUSY); } #ifdef AHC_TARGET_MODE /* * XXX What about ATIOs that have not yet been serviced? * Perhaps we should just refuse to be suspended if we * are acting in a target role. */ if (ahc->pending_device != NULL) { ahc_unpause(ahc); return (EBUSY); } #endif ahc_shutdown(ahc); return (0); } int ahc_resume(struct ahc_softc *ahc) { ahc_reset(ahc, /*reinit*/TRUE); ahc_intr_enable(ahc, TRUE); ahc_restart(ahc); return (0); } #endif /************************** Busy Target Table *********************************/ /* * Return the untagged transaction id for a given target/channel lun. * Optionally, clear the entry. */ static u_int ahc_index_busy_tcl(struct ahc_softc *ahc, u_int tcl) { u_int scbid; u_int target_offset; if ((ahc->flags & AHC_SCB_BTT) != 0) { u_int saved_scbptr; saved_scbptr = ahc_inb(ahc, SCBPTR); ahc_outb(ahc, SCBPTR, TCL_LUN(tcl)); scbid = ahc_inb(ahc, SCB_64_BTT + TCL_TARGET_OFFSET(tcl)); ahc_outb(ahc, SCBPTR, saved_scbptr); } else { target_offset = TCL_TARGET_OFFSET(tcl); scbid = ahc_inb(ahc, BUSY_TARGETS + target_offset); } return (scbid); } static void ahc_unbusy_tcl(struct ahc_softc *ahc, u_int tcl) { u_int target_offset; if ((ahc->flags & AHC_SCB_BTT) != 0) { u_int saved_scbptr; saved_scbptr = ahc_inb(ahc, SCBPTR); ahc_outb(ahc, SCBPTR, TCL_LUN(tcl)); ahc_outb(ahc, SCB_64_BTT+TCL_TARGET_OFFSET(tcl), SCB_LIST_NULL); ahc_outb(ahc, SCBPTR, saved_scbptr); } else { target_offset = TCL_TARGET_OFFSET(tcl); ahc_outb(ahc, BUSY_TARGETS + target_offset, SCB_LIST_NULL); } } static void ahc_busy_tcl(struct ahc_softc *ahc, u_int tcl, u_int scbid) { u_int target_offset; if ((ahc->flags & AHC_SCB_BTT) != 0) { u_int saved_scbptr; saved_scbptr = ahc_inb(ahc, SCBPTR); ahc_outb(ahc, SCBPTR, TCL_LUN(tcl)); ahc_outb(ahc, SCB_64_BTT + TCL_TARGET_OFFSET(tcl), scbid); ahc_outb(ahc, SCBPTR, saved_scbptr); } else { target_offset = TCL_TARGET_OFFSET(tcl); ahc_outb(ahc, BUSY_TARGETS + target_offset, scbid); } } /************************** SCB and SCB queue management **********************/ int ahc_match_scb(struct ahc_softc *ahc, struct scb *scb, int target, char channel, int lun, u_int tag, role_t role) { int targ = SCB_GET_TARGET(ahc, scb); char chan = SCB_GET_CHANNEL(ahc, scb); int slun = SCB_GET_LUN(scb); int match; match = ((chan == channel) || (channel == ALL_CHANNELS)); if (match != 0) match = ((targ == target) || (target == CAM_TARGET_WILDCARD)); if (match != 0) match = ((lun == slun) || (lun == CAM_LUN_WILDCARD)); if (match != 0) { #ifdef AHC_TARGET_MODE int group; group = XPT_FC_GROUP(scb->io_ctx->ccb_h.func_code); if (role == ROLE_INITIATOR) { match = (group != XPT_FC_GROUP_TMODE) && ((tag == scb->hscb->tag) || (tag == SCB_LIST_NULL)); } else if (role == ROLE_TARGET) { match = (group == XPT_FC_GROUP_TMODE) && ((tag == scb->io_ctx->csio.tag_id) || (tag == SCB_LIST_NULL)); } #else /* !AHC_TARGET_MODE */ match = ((tag == scb->hscb->tag) || (tag == SCB_LIST_NULL)); #endif /* AHC_TARGET_MODE */ } return match; } static void ahc_freeze_devq(struct ahc_softc *ahc, struct scb *scb) { int target; char channel; int lun; target = SCB_GET_TARGET(ahc, scb); lun = SCB_GET_LUN(scb); channel = SCB_GET_CHANNEL(ahc, scb); ahc_search_qinfifo(ahc, target, channel, lun, /*tag*/SCB_LIST_NULL, ROLE_UNKNOWN, CAM_REQUEUE_REQ, SEARCH_COMPLETE); ahc_platform_freeze_devq(ahc, scb); } void ahc_qinfifo_requeue_tail(struct ahc_softc *ahc, struct scb *scb) { struct scb *prev_scb; prev_scb = NULL; if (ahc_qinfifo_count(ahc) != 0) { u_int prev_tag; uint8_t prev_pos; prev_pos = ahc->qinfifonext - 1; prev_tag = ahc->qinfifo[prev_pos]; prev_scb = ahc_lookup_scb(ahc, prev_tag); } ahc_qinfifo_requeue(ahc, prev_scb, scb); if ((ahc->features & AHC_QUEUE_REGS) != 0) { ahc_outb(ahc, HNSCB_QOFF, ahc->qinfifonext); } else { ahc_outb(ahc, KERNEL_QINPOS, ahc->qinfifonext); } } static void ahc_qinfifo_requeue(struct ahc_softc *ahc, struct scb *prev_scb, struct scb *scb) { if (prev_scb == NULL) { ahc_outb(ahc, NEXT_QUEUED_SCB, scb->hscb->tag); } else { prev_scb->hscb->next = scb->hscb->tag; ahc_sync_scb(ahc, prev_scb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); } ahc->qinfifo[ahc->qinfifonext++] = scb->hscb->tag; scb->hscb->next = ahc->next_queued_scb->hscb->tag; ahc_sync_scb(ahc, scb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); } static int ahc_qinfifo_count(struct ahc_softc *ahc) { uint8_t qinpos; uint8_t diff; if ((ahc->features & AHC_QUEUE_REGS) != 0) { qinpos = ahc_inb(ahc, SNSCB_QOFF); ahc_outb(ahc, SNSCB_QOFF, qinpos); } else qinpos = ahc_inb(ahc, QINPOS); diff = ahc->qinfifonext - qinpos; return (diff); } int ahc_search_qinfifo(struct ahc_softc *ahc, int target, char channel, int lun, u_int tag, role_t role, uint32_t status, ahc_search_action action) { struct scb *scb; struct scb *prev_scb; uint8_t qinstart; uint8_t qinpos; uint8_t qintail; uint8_t next; uint8_t prev; uint8_t curscbptr; int found; int have_qregs; qintail = ahc->qinfifonext; have_qregs = (ahc->features & AHC_QUEUE_REGS) != 0; if (have_qregs) { qinstart = ahc_inb(ahc, SNSCB_QOFF); ahc_outb(ahc, SNSCB_QOFF, qinstart); } else qinstart = ahc_inb(ahc, QINPOS); qinpos = qinstart; found = 0; prev_scb = NULL; if (action == SEARCH_COMPLETE) { /* * Don't attempt to run any queued untagged transactions * until we are done with the abort process. */ ahc_freeze_untagged_queues(ahc); } /* * Start with an empty queue. Entries that are not chosen * for removal will be re-added to the queue as we go. */ ahc->qinfifonext = qinpos; ahc_outb(ahc, NEXT_QUEUED_SCB, ahc->next_queued_scb->hscb->tag); while (qinpos != qintail) { scb = ahc_lookup_scb(ahc, ahc->qinfifo[qinpos]); if (scb == NULL) { printk("qinpos = %d, SCB index = %d\n", qinpos, ahc->qinfifo[qinpos]); panic("Loop 1\n"); } if (ahc_match_scb(ahc, scb, target, channel, lun, tag, role)) { /* * We found an scb that needs to be acted on. */ found++; switch (action) { case SEARCH_COMPLETE: { cam_status ostat; cam_status cstat; ostat = ahc_get_transaction_status(scb); if (ostat == CAM_REQ_INPROG) ahc_set_transaction_status(scb, status); cstat = ahc_get_transaction_status(scb); if (cstat != CAM_REQ_CMP) ahc_freeze_scb(scb); if ((scb->flags & SCB_ACTIVE) == 0) printk("Inactive SCB in qinfifo\n"); ahc_done(ahc, scb); /* FALLTHROUGH */ } case SEARCH_REMOVE: break; case SEARCH_COUNT: ahc_qinfifo_requeue(ahc, prev_scb, scb); prev_scb = scb; break; } } else { ahc_qinfifo_requeue(ahc, prev_scb, scb); prev_scb = scb; } qinpos++; } if ((ahc->features & AHC_QUEUE_REGS) != 0) { ahc_outb(ahc, HNSCB_QOFF, ahc->qinfifonext); } else { ahc_outb(ahc, KERNEL_QINPOS, ahc->qinfifonext); } if (action != SEARCH_COUNT && (found != 0) && (qinstart != ahc->qinfifonext)) { /* * The sequencer may be in the process of dmaing * down the SCB at the beginning of the queue. * This could be problematic if either the first, * or the second SCB is removed from the queue * (the first SCB includes a pointer to the "next" * SCB to dma). If we have removed any entries, swap * the first element in the queue with the next HSCB * so the sequencer will notice that NEXT_QUEUED_SCB * has changed during its dma attempt and will retry * the DMA. */ scb = ahc_lookup_scb(ahc, ahc->qinfifo[qinstart]); if (scb == NULL) { printk("found = %d, qinstart = %d, qinfifionext = %d\n", found, qinstart, ahc->qinfifonext); panic("First/Second Qinfifo fixup\n"); } /* * ahc_swap_with_next_hscb forces our next pointer to * point to the reserved SCB for future commands. Save * and restore our original next pointer to maintain * queue integrity. */ next = scb->hscb->next; ahc->scb_data->scbindex[scb->hscb->tag] = NULL; ahc_swap_with_next_hscb(ahc, scb); scb->hscb->next = next; ahc->qinfifo[qinstart] = scb->hscb->tag; /* Tell the card about the new head of the qinfifo. */ ahc_outb(ahc, NEXT_QUEUED_SCB, scb->hscb->tag); /* Fixup the tail "next" pointer. */ qintail = ahc->qinfifonext - 1; scb = ahc_lookup_scb(ahc, ahc->qinfifo[qintail]); scb->hscb->next = ahc->next_queued_scb->hscb->tag; } /* * Search waiting for selection list. */ curscbptr = ahc_inb(ahc, SCBPTR); next = ahc_inb(ahc, WAITING_SCBH); /* Start at head of list. */ prev = SCB_LIST_NULL; while (next != SCB_LIST_NULL) { uint8_t scb_index; ahc_outb(ahc, SCBPTR, next); scb_index = ahc_inb(ahc, SCB_TAG); if (scb_index >= ahc->scb_data->numscbs) { printk("Waiting List inconsistency. " "SCB index == %d, yet numscbs == %d.", scb_index, ahc->scb_data->numscbs); ahc_dump_card_state(ahc); panic("for safety"); } scb = ahc_lookup_scb(ahc, scb_index); if (scb == NULL) { printk("scb_index = %d, next = %d\n", scb_index, next); panic("Waiting List traversal\n"); } if (ahc_match_scb(ahc, scb, target, channel, lun, SCB_LIST_NULL, role)) { /* * We found an scb that needs to be acted on. */ found++; switch (action) { case SEARCH_COMPLETE: { cam_status ostat; cam_status cstat; ostat = ahc_get_transaction_status(scb); if (ostat == CAM_REQ_INPROG) ahc_set_transaction_status(scb, status); cstat = ahc_get_transaction_status(scb); if (cstat != CAM_REQ_CMP) ahc_freeze_scb(scb); if ((scb->flags & SCB_ACTIVE) == 0) printk("Inactive SCB in Waiting List\n"); ahc_done(ahc, scb); /* FALLTHROUGH */ } case SEARCH_REMOVE: next = ahc_rem_wscb(ahc, next, prev); break; case SEARCH_COUNT: prev = next; next = ahc_inb(ahc, SCB_NEXT); break; } } else { prev = next; next = ahc_inb(ahc, SCB_NEXT); } } ahc_outb(ahc, SCBPTR, curscbptr); found += ahc_search_untagged_queues(ahc, /*ahc_io_ctx_t*/NULL, target, channel, lun, status, action); if (action == SEARCH_COMPLETE) ahc_release_untagged_queues(ahc); return (found); } int ahc_search_untagged_queues(struct ahc_softc *ahc, ahc_io_ctx_t ctx, int target, char channel, int lun, uint32_t status, ahc_search_action action) { struct scb *scb; int maxtarget; int found; int i; if (action == SEARCH_COMPLETE) { /* * Don't attempt to run any queued untagged transactions * until we are done with the abort process. */ ahc_freeze_untagged_queues(ahc); } found = 0; i = 0; if ((ahc->flags & AHC_SCB_BTT) == 0) { maxtarget = 16; if (target != CAM_TARGET_WILDCARD) { i = target; if (channel == 'B') i += 8; maxtarget = i + 1; } } else { maxtarget = 0; } for (; i < maxtarget; i++) { struct scb_tailq *untagged_q; struct scb *next_scb; untagged_q = &(ahc->untagged_queues[i]); next_scb = TAILQ_FIRST(untagged_q); while (next_scb != NULL) { scb = next_scb; next_scb = TAILQ_NEXT(scb, links.tqe); /* * The head of the list may be the currently * active untagged command for a device. * We're only searching for commands that * have not been started. A transaction * marked active but still in the qinfifo * is removed by the qinfifo scanning code * above. */ if ((scb->flags & SCB_ACTIVE) != 0) continue; if (ahc_match_scb(ahc, scb, target, channel, lun, SCB_LIST_NULL, ROLE_INITIATOR) == 0 || (ctx != NULL && ctx != scb->io_ctx)) continue; /* * We found an scb that needs to be acted on. */ found++; switch (action) { case SEARCH_COMPLETE: { cam_status ostat; cam_status cstat; ostat = ahc_get_transaction_status(scb); if (ostat == CAM_REQ_INPROG) ahc_set_transaction_status(scb, status); cstat = ahc_get_transaction_status(scb); if (cstat != CAM_REQ_CMP) ahc_freeze_scb(scb); if ((scb->flags & SCB_ACTIVE) == 0) printk("Inactive SCB in untaggedQ\n"); ahc_done(ahc, scb); break; } case SEARCH_REMOVE: scb->flags &= ~SCB_UNTAGGEDQ; TAILQ_REMOVE(untagged_q, scb, links.tqe); break; case SEARCH_COUNT: break; } } } if (action == SEARCH_COMPLETE) ahc_release_untagged_queues(ahc); return (found); } int ahc_search_disc_list(struct ahc_softc *ahc, int target, char channel, int lun, u_int tag, int stop_on_first, int remove, int save_state) { struct scb *scbp; u_int next; u_int prev; u_int count; u_int active_scb; count = 0; next = ahc_inb(ahc, DISCONNECTED_SCBH); prev = SCB_LIST_NULL; if (save_state) { /* restore this when we're done */ active_scb = ahc_inb(ahc, SCBPTR); } else /* Silence compiler */ active_scb = SCB_LIST_NULL; while (next != SCB_LIST_NULL) { u_int scb_index; ahc_outb(ahc, SCBPTR, next); scb_index = ahc_inb(ahc, SCB_TAG); if (scb_index >= ahc->scb_data->numscbs) { printk("Disconnected List inconsistency. " "SCB index == %d, yet numscbs == %d.", scb_index, ahc->scb_data->numscbs); ahc_dump_card_state(ahc); panic("for safety"); } if (next == prev) { panic("Disconnected List Loop. " "cur SCBPTR == %x, prev SCBPTR == %x.", next, prev); } scbp = ahc_lookup_scb(ahc, scb_index); if (ahc_match_scb(ahc, scbp, target, channel, lun, tag, ROLE_INITIATOR)) { count++; if (remove) { next = ahc_rem_scb_from_disc_list(ahc, prev, next); } else { prev = next; next = ahc_inb(ahc, SCB_NEXT); } if (stop_on_first) break; } else { prev = next; next = ahc_inb(ahc, SCB_NEXT); } } if (save_state) ahc_outb(ahc, SCBPTR, active_scb); return (count); } /* * Remove an SCB from the on chip list of disconnected transactions. * This is empty/unused if we are not performing SCB paging. */ static u_int ahc_rem_scb_from_disc_list(struct ahc_softc *ahc, u_int prev, u_int scbptr) { u_int next; ahc_outb(ahc, SCBPTR, scbptr); next = ahc_inb(ahc, SCB_NEXT); ahc_outb(ahc, SCB_CONTROL, 0); ahc_add_curscb_to_free_list(ahc); if (prev != SCB_LIST_NULL) { ahc_outb(ahc, SCBPTR, prev); ahc_outb(ahc, SCB_NEXT, next); } else ahc_outb(ahc, DISCONNECTED_SCBH, next); return (next); } /* * Add the SCB as selected by SCBPTR onto the on chip list of * free hardware SCBs. This list is empty/unused if we are not * performing SCB paging. */ static void ahc_add_curscb_to_free_list(struct ahc_softc *ahc) { /* * Invalidate the tag so that our abort * routines don't think it's active. */ ahc_outb(ahc, SCB_TAG, SCB_LIST_NULL); if ((ahc->flags & AHC_PAGESCBS) != 0) { ahc_outb(ahc, SCB_NEXT, ahc_inb(ahc, FREE_SCBH)); ahc_outb(ahc, FREE_SCBH, ahc_inb(ahc, SCBPTR)); } } /* * Manipulate the waiting for selection list and return the * scb that follows the one that we remove. */ static u_int ahc_rem_wscb(struct ahc_softc *ahc, u_int scbpos, u_int prev) { u_int curscb, next; /* * Select the SCB we want to abort and * pull the next pointer out of it. */ curscb = ahc_inb(ahc, SCBPTR); ahc_outb(ahc, SCBPTR, scbpos); next = ahc_inb(ahc, SCB_NEXT); /* Clear the necessary fields */ ahc_outb(ahc, SCB_CONTROL, 0); ahc_add_curscb_to_free_list(ahc); /* update the waiting list */ if (prev == SCB_LIST_NULL) { /* First in the list */ ahc_outb(ahc, WAITING_SCBH, next); /* * Ensure we aren't attempting to perform * selection for this entry. */ ahc_outb(ahc, SCSISEQ, (ahc_inb(ahc, SCSISEQ) & ~ENSELO)); } else { /* * Select the scb that pointed to us * and update its next pointer. */ ahc_outb(ahc, SCBPTR, prev); ahc_outb(ahc, SCB_NEXT, next); } /* * Point us back at the original scb position. */ ahc_outb(ahc, SCBPTR, curscb); return next; } /******************************** Error Handling ******************************/ /* * Abort all SCBs that match the given description (target/channel/lun/tag), * setting their status to the passed in status if the status has not already * been modified from CAM_REQ_INPROG. This routine assumes that the sequencer * is paused before it is called. */ static int ahc_abort_scbs(struct ahc_softc *ahc, int target, char channel, int lun, u_int tag, role_t role, uint32_t status) { struct scb *scbp; struct scb *scbp_next; u_int active_scb; int i, j; int maxtarget; int minlun; int maxlun; int found; /* * Don't attempt to run any queued untagged transactions * until we are done with the abort process. */ ahc_freeze_untagged_queues(ahc); /* restore this when we're done */ active_scb = ahc_inb(ahc, SCBPTR); found = ahc_search_qinfifo(ahc, target, channel, lun, SCB_LIST_NULL, role, CAM_REQUEUE_REQ, SEARCH_COMPLETE); /* * Clean out the busy target table for any untagged commands. */ i = 0; maxtarget = 16; if (target != CAM_TARGET_WILDCARD) { i = target; if (channel == 'B') i += 8; maxtarget = i + 1; } if (lun == CAM_LUN_WILDCARD) { /* * Unless we are using an SCB based * busy targets table, there is only * one table entry for all luns of * a target. */ minlun = 0; maxlun = 1; if ((ahc->flags & AHC_SCB_BTT) != 0) maxlun = AHC_NUM_LUNS; } else { minlun = lun; maxlun = lun + 1; } if (role != ROLE_TARGET) { for (;i < maxtarget; i++) { for (j = minlun;j < maxlun; j++) { u_int scbid; u_int tcl; tcl = BUILD_TCL(i << 4, j); scbid = ahc_index_busy_tcl(ahc, tcl); scbp = ahc_lookup_scb(ahc, scbid); if (scbp == NULL || ahc_match_scb(ahc, scbp, target, channel, lun, tag, role) == 0) continue; ahc_unbusy_tcl(ahc, BUILD_TCL(i << 4, j)); } } /* * Go through the disconnected list and remove any entries we * have queued for completion, 0'ing their control byte too. * We save the active SCB and restore it ourselves, so there * is no reason for this search to restore it too. */ ahc_search_disc_list(ahc, target, channel, lun, tag, /*stop_on_first*/FALSE, /*remove*/TRUE, /*save_state*/FALSE); } /* * Go through the hardware SCB array looking for commands that * were active but not on any list. In some cases, these remnants * might not still have mappings in the scbindex array (e.g. unexpected * bus free with the same scb queued for an abort). Don't hold this * against them. */ for (i = 0; i < ahc->scb_data->maxhscbs; i++) { u_int scbid; ahc_outb(ahc, SCBPTR, i); scbid = ahc_inb(ahc, SCB_TAG); scbp = ahc_lookup_scb(ahc, scbid); if ((scbp == NULL && scbid != SCB_LIST_NULL) || (scbp != NULL && ahc_match_scb(ahc, scbp, target, channel, lun, tag, role))) ahc_add_curscb_to_free_list(ahc); } /* * Go through the pending CCB list and look for * commands for this target that are still active. * These are other tagged commands that were * disconnected when the reset occurred. */ scbp_next = LIST_FIRST(&ahc->pending_scbs); while (scbp_next != NULL) { scbp = scbp_next; scbp_next = LIST_NEXT(scbp, pending_links); if (ahc_match_scb(ahc, scbp, target, channel, lun, tag, role)) { cam_status ostat; ostat = ahc_get_transaction_status(scbp); if (ostat == CAM_REQ_INPROG) ahc_set_transaction_status(scbp, status); if (ahc_get_transaction_status(scbp) != CAM_REQ_CMP) ahc_freeze_scb(scbp); if ((scbp->flags & SCB_ACTIVE) == 0) printk("Inactive SCB on pending list\n"); ahc_done(ahc, scbp); found++; } } ahc_outb(ahc, SCBPTR, active_scb); ahc_platform_abort_scbs(ahc, target, channel, lun, tag, role, status); ahc_release_untagged_queues(ahc); return found; } static void ahc_reset_current_bus(struct ahc_softc *ahc) { uint8_t scsiseq; ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) & ~ENSCSIRST); scsiseq = ahc_inb(ahc, SCSISEQ); ahc_outb(ahc, SCSISEQ, scsiseq | SCSIRSTO); ahc_flush_device_writes(ahc); ahc_delay(AHC_BUSRESET_DELAY); /* Turn off the bus reset */ ahc_outb(ahc, SCSISEQ, scsiseq & ~SCSIRSTO); ahc_clear_intstat(ahc); /* Re-enable reset interrupts */ ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) | ENSCSIRST); } int ahc_reset_channel(struct ahc_softc *ahc, char channel, int initiate_reset) { struct ahc_devinfo devinfo; u_int initiator, target, max_scsiid; u_int sblkctl; u_int scsiseq; u_int simode1; int found; int restart_needed; char cur_channel; ahc->pending_device = NULL; ahc_compile_devinfo(&devinfo, CAM_TARGET_WILDCARD, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD, channel, ROLE_UNKNOWN); ahc_pause(ahc); /* Make sure the sequencer is in a safe location. */ ahc_clear_critical_section(ahc); /* * Run our command complete fifos to ensure that we perform * completion processing on any commands that 'completed' * before the reset occurred. */ ahc_run_qoutfifo(ahc); #ifdef AHC_TARGET_MODE /* * XXX - In Twin mode, the tqinfifo may have commands * for an unaffected channel in it. However, if * we have run out of ATIO resources to drain that * queue, we may not get them all out here. Further, * the blocked transactions for the reset channel * should just be killed off, irrespecitve of whether * we are blocked on ATIO resources. Write a routine * to compact the tqinfifo appropriately. */ if ((ahc->flags & AHC_TARGETROLE) != 0) { ahc_run_tqinfifo(ahc, /*paused*/TRUE); } #endif /* * Reset the bus if we are initiating this reset */ sblkctl = ahc_inb(ahc, SBLKCTL); cur_channel = 'A'; if ((ahc->features & AHC_TWIN) != 0 && ((sblkctl & SELBUSB) != 0)) cur_channel = 'B'; scsiseq = ahc_inb(ahc, SCSISEQ_TEMPLATE); if (cur_channel != channel) { /* Case 1: Command for another bus is active * Stealthily reset the other bus without * upsetting the current bus. */ ahc_outb(ahc, SBLKCTL, sblkctl ^ SELBUSB); simode1 = ahc_inb(ahc, SIMODE1) & ~(ENBUSFREE|ENSCSIRST); #ifdef AHC_TARGET_MODE /* * Bus resets clear ENSELI, so we cannot * defer re-enabling bus reset interrupts * if we are in target mode. */ if ((ahc->flags & AHC_TARGETROLE) != 0) simode1 |= ENSCSIRST; #endif ahc_outb(ahc, SIMODE1, simode1); if (initiate_reset) ahc_reset_current_bus(ahc); ahc_clear_intstat(ahc); ahc_outb(ahc, SCSISEQ, scsiseq & (ENSELI|ENRSELI|ENAUTOATNP)); ahc_outb(ahc, SBLKCTL, sblkctl); restart_needed = FALSE; } else { /* Case 2: A command from this bus is active or we're idle */ simode1 = ahc_inb(ahc, SIMODE1) & ~(ENBUSFREE|ENSCSIRST); #ifdef AHC_TARGET_MODE /* * Bus resets clear ENSELI, so we cannot * defer re-enabling bus reset interrupts * if we are in target mode. */ if ((ahc->flags & AHC_TARGETROLE) != 0) simode1 |= ENSCSIRST; #endif ahc_outb(ahc, SIMODE1, simode1); if (initiate_reset) ahc_reset_current_bus(ahc); ahc_clear_intstat(ahc); ahc_outb(ahc, SCSISEQ, scsiseq & (ENSELI|ENRSELI|ENAUTOATNP)); restart_needed = TRUE; } /* * Clean up all the state information for the * pending transactions on this bus. */ found = ahc_abort_scbs(ahc, CAM_TARGET_WILDCARD, channel, CAM_LUN_WILDCARD, SCB_LIST_NULL, ROLE_UNKNOWN, CAM_SCSI_BUS_RESET); max_scsiid = (ahc->features & AHC_WIDE) ? 15 : 7; #ifdef AHC_TARGET_MODE /* * Send an immediate notify ccb to all target more peripheral * drivers affected by this action. */ for (target = 0; target <= max_scsiid; target++) { struct ahc_tmode_tstate* tstate; u_int lun; tstate = ahc->enabled_targets[target]; if (tstate == NULL) continue; for (lun = 0; lun < AHC_NUM_LUNS; lun++) { struct ahc_tmode_lstate* lstate; lstate = tstate->enabled_luns[lun]; if (lstate == NULL) continue; ahc_queue_lstate_event(ahc, lstate, CAM_TARGET_WILDCARD, EVENT_TYPE_BUS_RESET, /*arg*/0); ahc_send_lstate_events(ahc, lstate); } } #endif /* Notify the XPT that a bus reset occurred */ ahc_send_async(ahc, devinfo.channel, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD, AC_BUS_RESET); /* * Revert to async/narrow transfers until we renegotiate. */ for (target = 0; target <= max_scsiid; target++) { if (ahc->enabled_targets[target] == NULL) continue; for (initiator = 0; initiator <= max_scsiid; initiator++) { struct ahc_devinfo devinfo; ahc_compile_devinfo(&devinfo, target, initiator, CAM_LUN_WILDCARD, channel, ROLE_UNKNOWN); ahc_set_width(ahc, &devinfo, MSG_EXT_WDTR_BUS_8_BIT, AHC_TRANS_CUR, /*paused*/TRUE); ahc_set_syncrate(ahc, &devinfo, /*syncrate*/NULL, /*period*/0, /*offset*/0, /*ppr_options*/0, AHC_TRANS_CUR, /*paused*/TRUE); } } if (restart_needed) ahc_restart(ahc); else ahc_unpause(ahc); return found; } /***************************** Residual Processing ****************************/ /* * Calculate the residual for a just completed SCB. */ static void ahc_calc_residual(struct ahc_softc *ahc, struct scb *scb) { struct hardware_scb *hscb; struct status_pkt *spkt; uint32_t sgptr; uint32_t resid_sgptr; uint32_t resid; /* * 5 cases. * 1) No residual. * SG_RESID_VALID clear in sgptr. * 2) Transferless command * 3) Never performed any transfers. * sgptr has SG_FULL_RESID set. * 4) No residual but target did not * save data pointers after the * last transfer, so sgptr was * never updated. * 5) We have a partial residual. * Use residual_sgptr to determine * where we are. */ hscb = scb->hscb; sgptr = ahc_le32toh(hscb->sgptr); if ((sgptr & SG_RESID_VALID) == 0) /* Case 1 */ return; sgptr &= ~SG_RESID_VALID; if ((sgptr & SG_LIST_NULL) != 0) /* Case 2 */ return; spkt = &hscb->shared_data.status; resid_sgptr = ahc_le32toh(spkt->residual_sg_ptr); if ((sgptr & SG_FULL_RESID) != 0) { /* Case 3 */ resid = ahc_get_transfer_length(scb); } else if ((resid_sgptr & SG_LIST_NULL) != 0) { /* Case 4 */ return; } else if ((resid_sgptr & ~SG_PTR_MASK) != 0) { panic("Bogus resid sgptr value 0x%x\n", resid_sgptr); } else { struct ahc_dma_seg *sg; /* * Remainder of the SG where the transfer * stopped. */ resid = ahc_le32toh(spkt->residual_datacnt) & AHC_SG_LEN_MASK; sg = ahc_sg_bus_to_virt(scb, resid_sgptr & SG_PTR_MASK); /* The residual sg_ptr always points to the next sg */ sg--; /* * Add up the contents of all residual * SG segments that are after the SG where * the transfer stopped. */ while ((ahc_le32toh(sg->len) & AHC_DMA_LAST_SEG) == 0) { sg++; resid += ahc_le32toh(sg->len) & AHC_SG_LEN_MASK; } } if ((scb->flags & SCB_SENSE) == 0) ahc_set_residual(scb, resid); else ahc_set_sense_residual(scb, resid); #ifdef AHC_DEBUG if ((ahc_debug & AHC_SHOW_MISC) != 0) { ahc_print_path(ahc, scb); printk("Handled %sResidual of %d bytes\n", (scb->flags & SCB_SENSE) ? "Sense " : "", resid); } #endif } /******************************* Target Mode **********************************/ #ifdef AHC_TARGET_MODE /* * Add a target mode event to this lun's queue */ static void ahc_queue_lstate_event(struct ahc_softc *ahc, struct ahc_tmode_lstate *lstate, u_int initiator_id, u_int event_type, u_int event_arg) { struct ahc_tmode_event *event; int pending; xpt_freeze_devq(lstate->path, /*count*/1); if (lstate->event_w_idx >= lstate->event_r_idx) pending = lstate->event_w_idx - lstate->event_r_idx; else pending = AHC_TMODE_EVENT_BUFFER_SIZE + 1 - (lstate->event_r_idx - lstate->event_w_idx); if (event_type == EVENT_TYPE_BUS_RESET || event_type == MSG_BUS_DEV_RESET) { /* * Any earlier events are irrelevant, so reset our buffer. * This has the effect of allowing us to deal with reset * floods (an external device holding down the reset line) * without losing the event that is really interesting. */ lstate->event_r_idx = 0; lstate->event_w_idx = 0; xpt_release_devq(lstate->path, pending, /*runqueue*/FALSE); } if (pending == AHC_TMODE_EVENT_BUFFER_SIZE) { xpt_print_path(lstate->path); printk("immediate event %x:%x lost\n", lstate->event_buffer[lstate->event_r_idx].event_type, lstate->event_buffer[lstate->event_r_idx].event_arg); lstate->event_r_idx++; if (lstate->event_r_idx == AHC_TMODE_EVENT_BUFFER_SIZE) lstate->event_r_idx = 0; xpt_release_devq(lstate->path, /*count*/1, /*runqueue*/FALSE); } event = &lstate->event_buffer[lstate->event_w_idx]; event->initiator_id = initiator_id; event->event_type = event_type; event->event_arg = event_arg; lstate->event_w_idx++; if (lstate->event_w_idx == AHC_TMODE_EVENT_BUFFER_SIZE) lstate->event_w_idx = 0; } /* * Send any target mode events queued up waiting * for immediate notify resources. */ void ahc_send_lstate_events(struct ahc_softc *ahc, struct ahc_tmode_lstate *lstate) { struct ccb_hdr *ccbh; struct ccb_immed_notify *inot; while (lstate->event_r_idx != lstate->event_w_idx && (ccbh = SLIST_FIRST(&lstate->immed_notifies)) != NULL) { struct ahc_tmode_event *event; event = &lstate->event_buffer[lstate->event_r_idx]; SLIST_REMOVE_HEAD(&lstate->immed_notifies, sim_links.sle); inot = (struct ccb_immed_notify *)ccbh; switch (event->event_type) { case EVENT_TYPE_BUS_RESET: ccbh->status = CAM_SCSI_BUS_RESET|CAM_DEV_QFRZN; break; default: ccbh->status = CAM_MESSAGE_RECV|CAM_DEV_QFRZN; inot->message_args[0] = event->event_type; inot->message_args[1] = event->event_arg; break; } inot->initiator_id = event->initiator_id; inot->sense_len = 0; xpt_done((union ccb *)inot); lstate->event_r_idx++; if (lstate->event_r_idx == AHC_TMODE_EVENT_BUFFER_SIZE) lstate->event_r_idx = 0; } } #endif /******************** Sequencer Program Patching/Download *********************/ #ifdef AHC_DUMP_SEQ void ahc_dumpseq(struct ahc_softc* ahc) { int i; ahc_outb(ahc, SEQCTL, PERRORDIS|FAILDIS|FASTMODE|LOADRAM); ahc_outb(ahc, SEQADDR0, 0); ahc_outb(ahc, SEQADDR1, 0); for (i = 0; i < ahc->instruction_ram_size; i++) { uint8_t ins_bytes[4]; ahc_insb(ahc, SEQRAM, ins_bytes, 4); printk("0x%08x\n", ins_bytes[0] << 24 | ins_bytes[1] << 16 | ins_bytes[2] << 8 | ins_bytes[3]); } } #endif static int ahc_loadseq(struct ahc_softc *ahc) { struct cs cs_table[num_critical_sections]; u_int begin_set[num_critical_sections]; u_int end_set[num_critical_sections]; const struct patch *cur_patch; u_int cs_count; u_int cur_cs; u_int i; u_int skip_addr; u_int sg_prefetch_cnt; int downloaded; uint8_t download_consts[7]; /* * Start out with 0 critical sections * that apply to this firmware load. */ cs_count = 0; cur_cs = 0; memset(begin_set, 0, sizeof(begin_set)); memset(end_set, 0, sizeof(end_set)); /* Setup downloadable constant table */ download_consts[QOUTFIFO_OFFSET] = 0; if (ahc->targetcmds != NULL) download_consts[QOUTFIFO_OFFSET] += 32; download_consts[QINFIFO_OFFSET] = download_consts[QOUTFIFO_OFFSET] + 1; download_consts[CACHESIZE_MASK] = ahc->pci_cachesize - 1; download_consts[INVERTED_CACHESIZE_MASK] = ~(ahc->pci_cachesize - 1); sg_prefetch_cnt = ahc->pci_cachesize; if (sg_prefetch_cnt < (2 * sizeof(struct ahc_dma_seg))) sg_prefetch_cnt = 2 * sizeof(struct ahc_dma_seg); download_consts[SG_PREFETCH_CNT] = sg_prefetch_cnt; download_consts[SG_PREFETCH_ALIGN_MASK] = ~(sg_prefetch_cnt - 1); download_consts[SG_PREFETCH_ADDR_MASK] = (sg_prefetch_cnt - 1); cur_patch = patches; downloaded = 0; skip_addr = 0; ahc_outb(ahc, SEQCTL, PERRORDIS|FAILDIS|FASTMODE|LOADRAM); ahc_outb(ahc, SEQADDR0, 0); ahc_outb(ahc, SEQADDR1, 0); for (i = 0; i < sizeof(seqprog)/4; i++) { if (ahc_check_patch(ahc, &cur_patch, i, &skip_addr) == 0) { /* * Don't download this instruction as it * is in a patch that was removed. */ continue; } if (downloaded == ahc->instruction_ram_size) { /* * We're about to exceed the instruction * storage capacity for this chip. Fail * the load. */ printk("\n%s: Program too large for instruction memory " "size of %d!\n", ahc_name(ahc), ahc->instruction_ram_size); return (ENOMEM); } /* * Move through the CS table until we find a CS * that might apply to this instruction. */ for (; cur_cs < num_critical_sections; cur_cs++) { if (critical_sections[cur_cs].end <= i) { if (begin_set[cs_count] == TRUE && end_set[cs_count] == FALSE) { cs_table[cs_count].end = downloaded; end_set[cs_count] = TRUE; cs_count++; } continue; } if (critical_sections[cur_cs].begin <= i && begin_set[cs_count] == FALSE) { cs_table[cs_count].begin = downloaded; begin_set[cs_count] = TRUE; } break; } ahc_download_instr(ahc, i, download_consts); downloaded++; } ahc->num_critical_sections = cs_count; if (cs_count != 0) { cs_count *= sizeof(struct cs); ahc->critical_sections = kmalloc(cs_count, GFP_ATOMIC); if (ahc->critical_sections == NULL) panic("ahc_loadseq: Could not malloc"); memcpy(ahc->critical_sections, cs_table, cs_count); } ahc_outb(ahc, SEQCTL, PERRORDIS|FAILDIS|FASTMODE); if (bootverbose) { printk(" %d instructions downloaded\n", downloaded); printk("%s: Features 0x%x, Bugs 0x%x, Flags 0x%x\n", ahc_name(ahc), ahc->features, ahc->bugs, ahc->flags); } return (0); } static int ahc_check_patch(struct ahc_softc *ahc, const struct patch **start_patch, u_int start_instr, u_int *skip_addr) { const struct patch *cur_patch; const struct patch *last_patch; u_int num_patches; num_patches = ARRAY_SIZE(patches); last_patch = &patches[num_patches]; cur_patch = *start_patch; while (cur_patch < last_patch && start_instr == cur_patch->begin) { if (cur_patch->patch_func(ahc) == 0) { /* Start rejecting code */ *skip_addr = start_instr + cur_patch->skip_instr; cur_patch += cur_patch->skip_patch; } else { /* Accepted this patch. Advance to the next * one and wait for our intruction pointer to * hit this point. */ cur_patch++; } } *start_patch = cur_patch; if (start_instr < *skip_addr) /* Still skipping */ return (0); return (1); } static void ahc_download_instr(struct ahc_softc *ahc, u_int instrptr, uint8_t *dconsts) { union ins_formats instr; struct ins_format1 *fmt1_ins; struct ins_format3 *fmt3_ins; u_int opcode; /* * The firmware is always compiled into a little endian format. */ instr.integer = ahc_le32toh(*(uint32_t*)&seqprog[instrptr * 4]); fmt1_ins = &instr.format1; fmt3_ins = NULL; /* Pull the opcode */ opcode = instr.format1.opcode; switch (opcode) { case AIC_OP_JMP: case AIC_OP_JC: case AIC_OP_JNC: case AIC_OP_CALL: case AIC_OP_JNE: case AIC_OP_JNZ: case AIC_OP_JE: case AIC_OP_JZ: { const struct patch *cur_patch; int address_offset; u_int address; u_int skip_addr; u_int i; fmt3_ins = &instr.format3; address_offset = 0; address = fmt3_ins->address; cur_patch = patches; skip_addr = 0; for (i = 0; i < address;) { ahc_check_patch(ahc, &cur_patch, i, &skip_addr); if (skip_addr > i) { int end_addr; end_addr = min(address, skip_addr); address_offset += end_addr - i; i = skip_addr; } else { i++; } } address -= address_offset; fmt3_ins->address = address; /* FALLTHROUGH */ } case AIC_OP_OR: case AIC_OP_AND: case AIC_OP_XOR: case AIC_OP_ADD: case AIC_OP_ADC: case AIC_OP_BMOV: if (fmt1_ins->parity != 0) { fmt1_ins->immediate = dconsts[fmt1_ins->immediate]; } fmt1_ins->parity = 0; if ((ahc->features & AHC_CMD_CHAN) == 0 && opcode == AIC_OP_BMOV) { /* * Block move was added at the same time * as the command channel. Verify that * this is only a move of a single element * and convert the BMOV to a MOV * (AND with an immediate of FF). */ if (fmt1_ins->immediate != 1) panic("%s: BMOV not supported\n", ahc_name(ahc)); fmt1_ins->opcode = AIC_OP_AND; fmt1_ins->immediate = 0xff; } /* FALLTHROUGH */ case AIC_OP_ROL: if ((ahc->features & AHC_ULTRA2) != 0) { int i, count; /* Calculate odd parity for the instruction */ for (i = 0, count = 0; i < 31; i++) { uint32_t mask; mask = 0x01 << i; if ((instr.integer & mask) != 0) count++; } if ((count & 0x01) == 0) instr.format1.parity = 1; } else { /* Compress the instruction for older sequencers */ if (fmt3_ins != NULL) { instr.integer = fmt3_ins->immediate | (fmt3_ins->source << 8) | (fmt3_ins->address << 16) | (fmt3_ins->opcode << 25); } else { instr.integer = fmt1_ins->immediate | (fmt1_ins->source << 8) | (fmt1_ins->destination << 16) | (fmt1_ins->ret << 24) | (fmt1_ins->opcode << 25); } } /* The sequencer is a little endian cpu */ instr.integer = ahc_htole32(instr.integer); ahc_outsb(ahc, SEQRAM, instr.bytes, 4); break; default: panic("Unknown opcode encountered in seq program"); break; } } int ahc_print_register(const ahc_reg_parse_entry_t *table, u_int num_entries, const char *name, u_int address, u_int value, u_int *cur_column, u_int wrap_point) { int printed; u_int printed_mask; if (cur_column != NULL && *cur_column >= wrap_point) { printk("\n"); *cur_column = 0; } printed = printk("%s[0x%x]", name, value); if (table == NULL) { printed += printk(" "); *cur_column += printed; return (printed); } printed_mask = 0; while (printed_mask != 0xFF) { int entry; for (entry = 0; entry < num_entries; entry++) { if (((value & table[entry].mask) != table[entry].value) || ((printed_mask & table[entry].mask) == table[entry].mask)) continue; printed += printk("%s%s", printed_mask == 0 ? ":(" : "|", table[entry].name); printed_mask |= table[entry].mask; break; } if (entry >= num_entries) break; } if (printed_mask != 0) printed += printk(") "); else printed += printk(" "); if (cur_column != NULL) *cur_column += printed; return (printed); } void ahc_dump_card_state(struct ahc_softc *ahc) { struct scb *scb; struct scb_tailq *untagged_q; u_int cur_col; int paused; int target; int maxtarget; int i; uint8_t last_phase; uint8_t qinpos; uint8_t qintail; uint8_t qoutpos; uint8_t scb_index; uint8_t saved_scbptr; if (ahc_is_paused(ahc)) { paused = 1; } else { paused = 0; ahc_pause(ahc); } saved_scbptr = ahc_inb(ahc, SCBPTR); last_phase = ahc_inb(ahc, LASTPHASE); printk(">>>>>>>>>>>>>>>>>> Dump Card State Begins <<<<<<<<<<<<<<<<<\n" "%s: Dumping Card State %s, at SEQADDR 0x%x\n", ahc_name(ahc), ahc_lookup_phase_entry(last_phase)->phasemsg, ahc_inb(ahc, SEQADDR0) | (ahc_inb(ahc, SEQADDR1) << 8)); if (paused) printk("Card was paused\n"); printk("ACCUM = 0x%x, SINDEX = 0x%x, DINDEX = 0x%x, ARG_2 = 0x%x\n", ahc_inb(ahc, ACCUM), ahc_inb(ahc, SINDEX), ahc_inb(ahc, DINDEX), ahc_inb(ahc, ARG_2)); printk("HCNT = 0x%x SCBPTR = 0x%x\n", ahc_inb(ahc, HCNT), ahc_inb(ahc, SCBPTR)); cur_col = 0; if ((ahc->features & AHC_DT) != 0) ahc_scsiphase_print(ahc_inb(ahc, SCSIPHASE), &cur_col, 50); ahc_scsisigi_print(ahc_inb(ahc, SCSISIGI), &cur_col, 50); ahc_error_print(ahc_inb(ahc, ERROR), &cur_col, 50); ahc_scsibusl_print(ahc_inb(ahc, SCSIBUSL), &cur_col, 50); ahc_lastphase_print(ahc_inb(ahc, LASTPHASE), &cur_col, 50); ahc_scsiseq_print(ahc_inb(ahc, SCSISEQ), &cur_col, 50); ahc_sblkctl_print(ahc_inb(ahc, SBLKCTL), &cur_col, 50); ahc_scsirate_print(ahc_inb(ahc, SCSIRATE), &cur_col, 50); ahc_seqctl_print(ahc_inb(ahc, SEQCTL), &cur_col, 50); ahc_seq_flags_print(ahc_inb(ahc, SEQ_FLAGS), &cur_col, 50); ahc_sstat0_print(ahc_inb(ahc, SSTAT0), &cur_col, 50); ahc_sstat1_print(ahc_inb(ahc, SSTAT1), &cur_col, 50); ahc_sstat2_print(ahc_inb(ahc, SSTAT2), &cur_col, 50); ahc_sstat3_print(ahc_inb(ahc, SSTAT3), &cur_col, 50); ahc_simode0_print(ahc_inb(ahc, SIMODE0), &cur_col, 50); ahc_simode1_print(ahc_inb(ahc, SIMODE1), &cur_col, 50); ahc_sxfrctl0_print(ahc_inb(ahc, SXFRCTL0), &cur_col, 50); ahc_dfcntrl_print(ahc_inb(ahc, DFCNTRL), &cur_col, 50); ahc_dfstatus_print(ahc_inb(ahc, DFSTATUS), &cur_col, 50); if (cur_col != 0) printk("\n"); printk("STACK:"); for (i = 0; i < STACK_SIZE; i++) printk(" 0x%x", ahc_inb(ahc, STACK)|(ahc_inb(ahc, STACK) << 8)); printk("\nSCB count = %d\n", ahc->scb_data->numscbs); printk("Kernel NEXTQSCB = %d\n", ahc->next_queued_scb->hscb->tag); printk("Card NEXTQSCB = %d\n", ahc_inb(ahc, NEXT_QUEUED_SCB)); /* QINFIFO */ printk("QINFIFO entries: "); if ((ahc->features & AHC_QUEUE_REGS) != 0) { qinpos = ahc_inb(ahc, SNSCB_QOFF); ahc_outb(ahc, SNSCB_QOFF, qinpos); } else qinpos = ahc_inb(ahc, QINPOS); qintail = ahc->qinfifonext; while (qinpos != qintail) { printk("%d ", ahc->qinfifo[qinpos]); qinpos++; } printk("\n"); printk("Waiting Queue entries: "); scb_index = ahc_inb(ahc, WAITING_SCBH); i = 0; while (scb_index != SCB_LIST_NULL && i++ < 256) { ahc_outb(ahc, SCBPTR, scb_index); printk("%d:%d ", scb_index, ahc_inb(ahc, SCB_TAG)); scb_index = ahc_inb(ahc, SCB_NEXT); } printk("\n"); printk("Disconnected Queue entries: "); scb_index = ahc_inb(ahc, DISCONNECTED_SCBH); i = 0; while (scb_index != SCB_LIST_NULL && i++ < 256) { ahc_outb(ahc, SCBPTR, scb_index); printk("%d:%d ", scb_index, ahc_inb(ahc, SCB_TAG)); scb_index = ahc_inb(ahc, SCB_NEXT); } printk("\n"); ahc_sync_qoutfifo(ahc, BUS_DMASYNC_POSTREAD); printk("QOUTFIFO entries: "); qoutpos = ahc->qoutfifonext; i = 0; while (ahc->qoutfifo[qoutpos] != SCB_LIST_NULL && i++ < 256) { printk("%d ", ahc->qoutfifo[qoutpos]); qoutpos++; } printk("\n"); printk("Sequencer Free SCB List: "); scb_index = ahc_inb(ahc, FREE_SCBH); i = 0; while (scb_index != SCB_LIST_NULL && i++ < 256) { ahc_outb(ahc, SCBPTR, scb_index); printk("%d ", scb_index); scb_index = ahc_inb(ahc, SCB_NEXT); } printk("\n"); printk("Sequencer SCB Info: "); for (i = 0; i < ahc->scb_data->maxhscbs; i++) { ahc_outb(ahc, SCBPTR, i); cur_col = printk("\n%3d ", i); ahc_scb_control_print(ahc_inb(ahc, SCB_CONTROL), &cur_col, 60); ahc_scb_scsiid_print(ahc_inb(ahc, SCB_SCSIID), &cur_col, 60); ahc_scb_lun_print(ahc_inb(ahc, SCB_LUN), &cur_col, 60); ahc_scb_tag_print(ahc_inb(ahc, SCB_TAG), &cur_col, 60); } printk("\n"); printk("Pending list: "); i = 0; LIST_FOREACH(scb, &ahc->pending_scbs, pending_links) { if (i++ > 256) break; cur_col = printk("\n%3d ", scb->hscb->tag); ahc_scb_control_print(scb->hscb->control, &cur_col, 60); ahc_scb_scsiid_print(scb->hscb->scsiid, &cur_col, 60); ahc_scb_lun_print(scb->hscb->lun, &cur_col, 60); if ((ahc->flags & AHC_PAGESCBS) == 0) { ahc_outb(ahc, SCBPTR, scb->hscb->tag); printk("("); ahc_scb_control_print(ahc_inb(ahc, SCB_CONTROL), &cur_col, 60); ahc_scb_tag_print(ahc_inb(ahc, SCB_TAG), &cur_col, 60); printk(")"); } } printk("\n"); printk("Kernel Free SCB list: "); i = 0; SLIST_FOREACH(scb, &ahc->scb_data->free_scbs, links.sle) { if (i++ > 256) break; printk("%d ", scb->hscb->tag); } printk("\n"); maxtarget = (ahc->features & (AHC_WIDE|AHC_TWIN)) ? 15 : 7; for (target = 0; target <= maxtarget; target++) { untagged_q = &ahc->untagged_queues[target]; if (TAILQ_FIRST(untagged_q) == NULL) continue; printk("Untagged Q(%d): ", target); i = 0; TAILQ_FOREACH(scb, untagged_q, links.tqe) { if (i++ > 256) break; printk("%d ", scb->hscb->tag); } printk("\n"); } ahc_platform_dump_card_state(ahc); printk("\n<<<<<<<<<<<<<<<<< Dump Card State Ends >>>>>>>>>>>>>>>>>>\n"); ahc_outb(ahc, SCBPTR, saved_scbptr); if (paused == 0) ahc_unpause(ahc); } /************************* Target Mode ****************************************/ #ifdef AHC_TARGET_MODE cam_status ahc_find_tmode_devs(struct ahc_softc *ahc, struct cam_sim *sim, union ccb *ccb, struct ahc_tmode_tstate **tstate, struct ahc_tmode_lstate **lstate, int notfound_failure) { if ((ahc->features & AHC_TARGETMODE) == 0) return (CAM_REQ_INVALID); /* * Handle the 'black hole' device that sucks up * requests to unattached luns on enabled targets. */ if (ccb->ccb_h.target_id == CAM_TARGET_WILDCARD && ccb->ccb_h.target_lun == CAM_LUN_WILDCARD) { *tstate = NULL; *lstate = ahc->black_hole; } else { u_int max_id; max_id = (ahc->features & AHC_WIDE) ? 16 : 8; if (ccb->ccb_h.target_id >= max_id) return (CAM_TID_INVALID); if (ccb->ccb_h.target_lun >= AHC_NUM_LUNS) return (CAM_LUN_INVALID); *tstate = ahc->enabled_targets[ccb->ccb_h.target_id]; *lstate = NULL; if (*tstate != NULL) *lstate = (*tstate)->enabled_luns[ccb->ccb_h.target_lun]; } if (notfound_failure != 0 && *lstate == NULL) return (CAM_PATH_INVALID); return (CAM_REQ_CMP); } void ahc_handle_en_lun(struct ahc_softc *ahc, struct cam_sim *sim, union ccb *ccb) { struct ahc_tmode_tstate *tstate; struct ahc_tmode_lstate *lstate; struct ccb_en_lun *cel; cam_status status; u_long s; u_int target; u_int lun; u_int target_mask; u_int our_id; int error; char channel; status = ahc_find_tmode_devs(ahc, sim, ccb, &tstate, &lstate, /*notfound_failure*/FALSE); if (status != CAM_REQ_CMP) { ccb->ccb_h.status = status; return; } if (cam_sim_bus(sim) == 0) our_id = ahc->our_id; else our_id = ahc->our_id_b; if (ccb->ccb_h.target_id != our_id) { /* * our_id represents our initiator ID, or * the ID of the first target to have an * enabled lun in target mode. There are * two cases that may preclude enabling a * target id other than our_id. * * o our_id is for an active initiator role. * Since the hardware does not support * reselections to the initiator role at * anything other than our_id, and our_id * is used by the hardware to indicate the * ID to use for both select-out and * reselect-out operations, the only target * ID we can support in this mode is our_id. * * o The MULTARGID feature is not available and * a previous target mode ID has been enabled. */ if ((ahc->features & AHC_MULTIROLE) != 0) { if ((ahc->features & AHC_MULTI_TID) != 0 && (ahc->flags & AHC_INITIATORROLE) != 0) { /* * Only allow additional targets if * the initiator role is disabled. * The hardware cannot handle a re-select-in * on the initiator id during a re-select-out * on a different target id. */ status = CAM_TID_INVALID; } else if ((ahc->flags & AHC_INITIATORROLE) != 0 || ahc->enabled_luns > 0) { /* * Only allow our target id to change * if the initiator role is not configured * and there are no enabled luns which * are attached to the currently registered * scsi id. */ status = CAM_TID_INVALID; } } else if ((ahc->features & AHC_MULTI_TID) == 0 && ahc->enabled_luns > 0) { status = CAM_TID_INVALID; } } if (status != CAM_REQ_CMP) { ccb->ccb_h.status = status; return; } /* * We now have an id that is valid. * If we aren't in target mode, switch modes. */ if ((ahc->flags & AHC_TARGETROLE) == 0 && ccb->ccb_h.target_id != CAM_TARGET_WILDCARD) { u_long s; ahc_flag saved_flags; printk("Configuring Target Mode\n"); ahc_lock(ahc, &s); if (LIST_FIRST(&ahc->pending_scbs) != NULL) { ccb->ccb_h.status = CAM_BUSY; ahc_unlock(ahc, &s); return; } saved_flags = ahc->flags; ahc->flags |= AHC_TARGETROLE; if ((ahc->features & AHC_MULTIROLE) == 0) ahc->flags &= ~AHC_INITIATORROLE; ahc_pause(ahc); error = ahc_loadseq(ahc); if (error != 0) { /* * Restore original configuration and notify * the caller that we cannot support target mode. * Since the adapter started out in this * configuration, the firmware load will succeed, * so there is no point in checking ahc_loadseq's * return value. */ ahc->flags = saved_flags; (void)ahc_loadseq(ahc); ahc_restart(ahc); ahc_unlock(ahc, &s); ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; return; } ahc_restart(ahc); ahc_unlock(ahc, &s); } cel = &ccb->cel; target = ccb->ccb_h.target_id; lun = ccb->ccb_h.target_lun; channel = SIM_CHANNEL(ahc, sim); target_mask = 0x01 << target; if (channel == 'B') target_mask <<= 8; if (cel->enable != 0) { u_int scsiseq; /* Are we already enabled?? */ if (lstate != NULL) { xpt_print_path(ccb->ccb_h.path); printk("Lun already enabled\n"); ccb->ccb_h.status = CAM_LUN_ALRDY_ENA; return; } if (cel->grp6_len != 0 || cel->grp7_len != 0) { /* * Don't (yet?) support vendor * specific commands. */ ccb->ccb_h.status = CAM_REQ_INVALID; printk("Non-zero Group Codes\n"); return; } /* * Seems to be okay. * Setup our data structures. */ if (target != CAM_TARGET_WILDCARD && tstate == NULL) { tstate = ahc_alloc_tstate(ahc, target, channel); if (tstate == NULL) { xpt_print_path(ccb->ccb_h.path); printk("Couldn't allocate tstate\n"); ccb->ccb_h.status = CAM_RESRC_UNAVAIL; return; } } lstate = kmalloc(sizeof(*lstate), GFP_ATOMIC); if (lstate == NULL) { xpt_print_path(ccb->ccb_h.path); printk("Couldn't allocate lstate\n"); ccb->ccb_h.status = CAM_RESRC_UNAVAIL; return; } memset(lstate, 0, sizeof(*lstate)); status = xpt_create_path(&lstate->path, /*periph*/NULL, xpt_path_path_id(ccb->ccb_h.path), xpt_path_target_id(ccb->ccb_h.path), xpt_path_lun_id(ccb->ccb_h.path)); if (status != CAM_REQ_CMP) { kfree(lstate); xpt_print_path(ccb->ccb_h.path); printk("Couldn't allocate path\n"); ccb->ccb_h.status = CAM_RESRC_UNAVAIL; return; } SLIST_INIT(&lstate->accept_tios); SLIST_INIT(&lstate->immed_notifies); ahc_lock(ahc, &s); ahc_pause(ahc); if (target != CAM_TARGET_WILDCARD) { tstate->enabled_luns[lun] = lstate; ahc->enabled_luns++; if ((ahc->features & AHC_MULTI_TID) != 0) { u_int targid_mask; targid_mask = ahc_inb(ahc, TARGID) | (ahc_inb(ahc, TARGID + 1) << 8); targid_mask |= target_mask; ahc_outb(ahc, TARGID, targid_mask); ahc_outb(ahc, TARGID+1, (targid_mask >> 8)); ahc_update_scsiid(ahc, targid_mask); } else { u_int our_id; char channel; channel = SIM_CHANNEL(ahc, sim); our_id = SIM_SCSI_ID(ahc, sim); /* * This can only happen if selections * are not enabled */ if (target != our_id) { u_int sblkctl; char cur_channel; int swap; sblkctl = ahc_inb(ahc, SBLKCTL); cur_channel = (sblkctl & SELBUSB) ? 'B' : 'A'; if ((ahc->features & AHC_TWIN) == 0) cur_channel = 'A'; swap = cur_channel != channel; if (channel == 'A') ahc->our_id = target; else ahc->our_id_b = target; if (swap) ahc_outb(ahc, SBLKCTL, sblkctl ^ SELBUSB); ahc_outb(ahc, SCSIID, target); if (swap) ahc_outb(ahc, SBLKCTL, sblkctl); } } } else ahc->black_hole = lstate; /* Allow select-in operations */ if (ahc->black_hole != NULL && ahc->enabled_luns > 0) { scsiseq = ahc_inb(ahc, SCSISEQ_TEMPLATE); scsiseq |= ENSELI; ahc_outb(ahc, SCSISEQ_TEMPLATE, scsiseq); scsiseq = ahc_inb(ahc, SCSISEQ); scsiseq |= ENSELI; ahc_outb(ahc, SCSISEQ, scsiseq); } ahc_unpause(ahc); ahc_unlock(ahc, &s); ccb->ccb_h.status = CAM_REQ_CMP; xpt_print_path(ccb->ccb_h.path); printk("Lun now enabled for target mode\n"); } else { struct scb *scb; int i, empty; if (lstate == NULL) { ccb->ccb_h.status = CAM_LUN_INVALID; return; } ahc_lock(ahc, &s); ccb->ccb_h.status = CAM_REQ_CMP; LIST_FOREACH(scb, &ahc->pending_scbs, pending_links) { struct ccb_hdr *ccbh; ccbh = &scb->io_ctx->ccb_h; if (ccbh->func_code == XPT_CONT_TARGET_IO && !xpt_path_comp(ccbh->path, ccb->ccb_h.path)){ printk("CTIO pending\n"); ccb->ccb_h.status = CAM_REQ_INVALID; ahc_unlock(ahc, &s); return; } } if (SLIST_FIRST(&lstate->accept_tios) != NULL) { printk("ATIOs pending\n"); ccb->ccb_h.status = CAM_REQ_INVALID; } if (SLIST_FIRST(&lstate->immed_notifies) != NULL) { printk("INOTs pending\n"); ccb->ccb_h.status = CAM_REQ_INVALID; } if (ccb->ccb_h.status != CAM_REQ_CMP) { ahc_unlock(ahc, &s); return; } xpt_print_path(ccb->ccb_h.path); printk("Target mode disabled\n"); xpt_free_path(lstate->path); kfree(lstate); ahc_pause(ahc); /* Can we clean up the target too? */ if (target != CAM_TARGET_WILDCARD) { tstate->enabled_luns[lun] = NULL; ahc->enabled_luns--; for (empty = 1, i = 0; i < 8; i++) if (tstate->enabled_luns[i] != NULL) { empty = 0; break; } if (empty) { ahc_free_tstate(ahc, target, channel, /*force*/FALSE); if (ahc->features & AHC_MULTI_TID) { u_int targid_mask; targid_mask = ahc_inb(ahc, TARGID) | (ahc_inb(ahc, TARGID + 1) << 8); targid_mask &= ~target_mask; ahc_outb(ahc, TARGID, targid_mask); ahc_outb(ahc, TARGID+1, (targid_mask >> 8)); ahc_update_scsiid(ahc, targid_mask); } } } else { ahc->black_hole = NULL; /* * We can't allow selections without * our black hole device. */ empty = TRUE; } if (ahc->enabled_luns == 0) { /* Disallow select-in */ u_int scsiseq; scsiseq = ahc_inb(ahc, SCSISEQ_TEMPLATE); scsiseq &= ~ENSELI; ahc_outb(ahc, SCSISEQ_TEMPLATE, scsiseq); scsiseq = ahc_inb(ahc, SCSISEQ); scsiseq &= ~ENSELI; ahc_outb(ahc, SCSISEQ, scsiseq); if ((ahc->features & AHC_MULTIROLE) == 0) { printk("Configuring Initiator Mode\n"); ahc->flags &= ~AHC_TARGETROLE; ahc->flags |= AHC_INITIATORROLE; /* * Returning to a configuration that * fit previously will always succeed. */ (void)ahc_loadseq(ahc); ahc_restart(ahc); /* * Unpaused. The extra unpause * that follows is harmless. */ } } ahc_unpause(ahc); ahc_unlock(ahc, &s); } } static void ahc_update_scsiid(struct ahc_softc *ahc, u_int targid_mask) { u_int scsiid_mask; u_int scsiid; if ((ahc->features & AHC_MULTI_TID) == 0) panic("ahc_update_scsiid called on non-multitid unit\n"); /* * Since we will rely on the TARGID mask * for selection enables, ensure that OID * in SCSIID is not set to some other ID * that we don't want to allow selections on. */ if ((ahc->features & AHC_ULTRA2) != 0) scsiid = ahc_inb(ahc, SCSIID_ULTRA2); else scsiid = ahc_inb(ahc, SCSIID); scsiid_mask = 0x1 << (scsiid & OID); if ((targid_mask & scsiid_mask) == 0) { u_int our_id; /* ffs counts from 1 */ our_id = ffs(targid_mask); if (our_id == 0) our_id = ahc->our_id; else our_id--; scsiid &= TID; scsiid |= our_id; } if ((ahc->features & AHC_ULTRA2) != 0) ahc_outb(ahc, SCSIID_ULTRA2, scsiid); else ahc_outb(ahc, SCSIID, scsiid); } static void ahc_run_tqinfifo(struct ahc_softc *ahc, int paused) { struct target_cmd *cmd; /* * If the card supports auto-access pause, * we can access the card directly regardless * of whether it is paused or not. */ if ((ahc->features & AHC_AUTOPAUSE) != 0) paused = TRUE; ahc_sync_tqinfifo(ahc, BUS_DMASYNC_POSTREAD); while ((cmd = &ahc->targetcmds[ahc->tqinfifonext])->cmd_valid != 0) { /* * Only advance through the queue if we * have the resources to process the command. */ if (ahc_handle_target_cmd(ahc, cmd) != 0) break; cmd->cmd_valid = 0; ahc_dmamap_sync(ahc, ahc->shared_data_dmat, ahc->shared_data_dmamap, ahc_targetcmd_offset(ahc, ahc->tqinfifonext), sizeof(struct target_cmd), BUS_DMASYNC_PREREAD); ahc->tqinfifonext++; /* * Lazily update our position in the target mode incoming * command queue as seen by the sequencer. */ if ((ahc->tqinfifonext & (HOST_TQINPOS - 1)) == 1) { if ((ahc->features & AHC_HS_MAILBOX) != 0) { u_int hs_mailbox; hs_mailbox = ahc_inb(ahc, HS_MAILBOX); hs_mailbox &= ~HOST_TQINPOS; hs_mailbox |= ahc->tqinfifonext & HOST_TQINPOS; ahc_outb(ahc, HS_MAILBOX, hs_mailbox); } else { if (!paused) ahc_pause(ahc); ahc_outb(ahc, KERNEL_TQINPOS, ahc->tqinfifonext & HOST_TQINPOS); if (!paused) ahc_unpause(ahc); } } } } static int ahc_handle_target_cmd(struct ahc_softc *ahc, struct target_cmd *cmd) { struct ahc_tmode_tstate *tstate; struct ahc_tmode_lstate *lstate; struct ccb_accept_tio *atio; uint8_t *byte; int initiator; int target; int lun; initiator = SCSIID_TARGET(ahc, cmd->scsiid); target = SCSIID_OUR_ID(cmd->scsiid); lun = (cmd->identify & MSG_IDENTIFY_LUNMASK); byte = cmd->bytes; tstate = ahc->enabled_targets[target]; lstate = NULL; if (tstate != NULL) lstate = tstate->enabled_luns[lun]; /* * Commands for disabled luns go to the black hole driver. */ if (lstate == NULL) lstate = ahc->black_hole; atio = (struct ccb_accept_tio*)SLIST_FIRST(&lstate->accept_tios); if (atio == NULL) { ahc->flags |= AHC_TQINFIFO_BLOCKED; /* * Wait for more ATIOs from the peripheral driver for this lun. */ if (bootverbose) printk("%s: ATIOs exhausted\n", ahc_name(ahc)); return (1); } else ahc->flags &= ~AHC_TQINFIFO_BLOCKED; #if 0 printk("Incoming command from %d for %d:%d%s\n", initiator, target, lun, lstate == ahc->black_hole ? "(Black Holed)" : ""); #endif SLIST_REMOVE_HEAD(&lstate->accept_tios, sim_links.sle); if (lstate == ahc->black_hole) { /* Fill in the wildcards */ atio->ccb_h.target_id = target; atio->ccb_h.target_lun = lun; } /* * Package it up and send it off to * whomever has this lun enabled. */ atio->sense_len = 0; atio->init_id = initiator; if (byte[0] != 0xFF) { /* Tag was included */ atio->tag_action = *byte++; atio->tag_id = *byte++; atio->ccb_h.flags = CAM_TAG_ACTION_VALID; } else { atio->ccb_h.flags = 0; } byte++; /* Okay. Now determine the cdb size based on the command code */ switch (*byte >> CMD_GROUP_CODE_SHIFT) { case 0: atio->cdb_len = 6; break; case 1: case 2: atio->cdb_len = 10; break; case 4: atio->cdb_len = 16; break; case 5: atio->cdb_len = 12; break; case 3: default: /* Only copy the opcode. */ atio->cdb_len = 1; printk("Reserved or VU command code type encountered\n"); break; } memcpy(atio->cdb_io.cdb_bytes, byte, atio->cdb_len); atio->ccb_h.status |= CAM_CDB_RECVD; if ((cmd->identify & MSG_IDENTIFY_DISCFLAG) == 0) { /* * We weren't allowed to disconnect. * We're hanging on the bus until a * continue target I/O comes in response * to this accept tio. */ #if 0 printk("Received Immediate Command %d:%d:%d - %p\n", initiator, target, lun, ahc->pending_device); #endif ahc->pending_device = lstate; ahc_freeze_ccb((union ccb *)atio); atio->ccb_h.flags |= CAM_DIS_DISCONNECT; } xpt_done((union ccb*)atio); return (0); } #endif
gpl-2.0
aosp-hybris/android_kernel_goldfish
net/dns_resolver/dns_query.c
8944
4670
/* Upcall routine, designed to work as a key type and working through * /sbin/request-key to contact userspace when handling DNS queries. * * See Documentation/networking/dns_resolver.txt * * Copyright (c) 2007 Igor Mammedov * Author(s): Igor Mammedov (niallain@gmail.com) * Steve French (sfrench@us.ibm.com) * Wang Lei (wang840925@gmail.com) * David Howells (dhowells@redhat.com) * * The upcall wrapper used to make an arbitrary DNS query. * * This function requires the appropriate userspace tool dns.upcall to be * installed and something like the following lines should be added to the * /etc/request-key.conf file: * * create dns_resolver * * /sbin/dns.upcall %k * * For example to use this module to query AFSDB RR: * * create dns_resolver afsdb:* * /sbin/dns.afsdb %k * * This library is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published * by the Free Software Foundation; either version 2.1 of the License, or * (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See * the GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/module.h> #include <linux/slab.h> #include <linux/dns_resolver.h> #include <linux/err.h> #include <keys/dns_resolver-type.h> #include <keys/user-type.h> #include "internal.h" /** * dns_query - Query the DNS * @type: Query type (or NULL for straight host->IP lookup) * @name: Name to look up * @namelen: Length of name * @options: Request options (or NULL if no options) * @_result: Where to place the returned data. * @_expiry: Where to store the result expiry time (or NULL) * * The data will be returned in the pointer at *result, and the caller is * responsible for freeing it. * * The description should be of the form "[<query_type>:]<domain_name>", and * the options need to be appropriate for the query type requested. If no * query_type is given, then the query is a straight hostname to IP address * lookup. * * The DNS resolution lookup is performed by upcalling to userspace by way of * requesting a key of type dns_resolver. * * Returns the size of the result on success, -ve error code otherwise. */ int dns_query(const char *type, const char *name, size_t namelen, const char *options, char **_result, time_t *_expiry) { struct key *rkey; struct user_key_payload *upayload; const struct cred *saved_cred; size_t typelen, desclen; char *desc, *cp; int ret, len; kenter("%s,%*.*s,%zu,%s", type, (int)namelen, (int)namelen, name, namelen, options); if (!name || namelen == 0 || !_result) return -EINVAL; /* construct the query key description as "[<type>:]<name>" */ typelen = 0; desclen = 0; if (type) { typelen = strlen(type); if (typelen < 1) return -EINVAL; desclen += typelen + 1; } if (!namelen) namelen = strlen(name); if (namelen < 3) return -EINVAL; desclen += namelen + 1; desc = kmalloc(desclen, GFP_KERNEL); if (!desc) return -ENOMEM; cp = desc; if (type) { memcpy(cp, type, typelen); cp += typelen; *cp++ = ':'; } memcpy(cp, name, namelen); cp += namelen; *cp = '\0'; if (!options) options = ""; kdebug("call request_key(,%s,%s)", desc, options); /* make the upcall, using special credentials to prevent the use of * add_key() to preinstall malicious redirections */ saved_cred = override_creds(dns_resolver_cache); rkey = request_key(&key_type_dns_resolver, desc, options); revert_creds(saved_cred); kfree(desc); if (IS_ERR(rkey)) { ret = PTR_ERR(rkey); goto out; } down_read(&rkey->sem); rkey->perm |= KEY_USR_VIEW; ret = key_validate(rkey); if (ret < 0) goto put; /* If the DNS server gave an error, return that to the caller */ ret = rkey->type_data.x[0]; if (ret) goto put; upayload = rcu_dereference_protected(rkey->payload.data, lockdep_is_held(&rkey->sem)); len = upayload->datalen; ret = -ENOMEM; *_result = kmalloc(len + 1, GFP_KERNEL); if (!*_result) goto put; memcpy(*_result, upayload->data, len + 1); if (_expiry) *_expiry = rkey->expiry; ret = len; put: up_read(&rkey->sem); key_put(rkey); out: kleave(" = %d", ret); return ret; } EXPORT_SYMBOL(dns_query);
gpl-2.0
javilonas/kernel_common
mm/percpu-km.c
11248
2852
/* * mm/percpu-km.c - kernel memory based chunk allocation * * Copyright (C) 2010 SUSE Linux Products GmbH * Copyright (C) 2010 Tejun Heo <tj@kernel.org> * * This file is released under the GPLv2. * * Chunks are allocated as a contiguous kernel memory using gfp * allocation. This is to be used on nommu architectures. * * To use percpu-km, * * - define CONFIG_NEED_PER_CPU_KM from the arch Kconfig. * * - CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK must not be defined. It's * not compatible with PER_CPU_KM. EMBED_FIRST_CHUNK should work * fine. * * - NUMA is not supported. When setting up the first chunk, * @cpu_distance_fn should be NULL or report all CPUs to be nearer * than or at LOCAL_DISTANCE. * * - It's best if the chunk size is power of two multiple of * PAGE_SIZE. Because each chunk is allocated as a contiguous * kernel memory block using alloc_pages(), memory will be wasted if * chunk size is not aligned. percpu-km code will whine about it. */ #if defined(CONFIG_SMP) && defined(CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK) #error "contiguous percpu allocation is incompatible with paged first chunk" #endif #include <linux/log2.h> static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int off, int size) { unsigned int cpu; for_each_possible_cpu(cpu) memset((void *)pcpu_chunk_addr(chunk, cpu, 0) + off, 0, size); return 0; } static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int off, int size) { /* nada */ } static struct pcpu_chunk *pcpu_create_chunk(void) { const int nr_pages = pcpu_group_sizes[0] >> PAGE_SHIFT; struct pcpu_chunk *chunk; struct page *pages; int i; chunk = pcpu_alloc_chunk(); if (!chunk) return NULL; pages = alloc_pages(GFP_KERNEL, order_base_2(nr_pages)); if (!pages) { pcpu_free_chunk(chunk); return NULL; } for (i = 0; i < nr_pages; i++) pcpu_set_page_chunk(nth_page(pages, i), chunk); chunk->data = pages; chunk->base_addr = page_address(pages) - pcpu_group_offsets[0]; return chunk; } static void pcpu_destroy_chunk(struct pcpu_chunk *chunk) { const int nr_pages = pcpu_group_sizes[0] >> PAGE_SHIFT; if (chunk && chunk->data) __free_pages(chunk->data, order_base_2(nr_pages)); pcpu_free_chunk(chunk); } static struct page *pcpu_addr_to_page(void *addr) { return virt_to_page(addr); } static int __init pcpu_verify_alloc_info(const struct pcpu_alloc_info *ai) { size_t nr_pages, alloc_pages; /* all units must be in a single group */ if (ai->nr_groups != 1) { printk(KERN_CRIT "percpu: can't handle more than one groups\n"); return -EINVAL; } nr_pages = (ai->groups[0].nr_units * ai->unit_size) >> PAGE_SHIFT; alloc_pages = roundup_pow_of_two(nr_pages); if (alloc_pages > nr_pages) printk(KERN_WARNING "percpu: wasting %zu pages per chunk\n", alloc_pages - nr_pages); return 0; }
gpl-2.0
Shabbypenguin/Photon-Kernel
arch/alpha/kernel/core_polaris.c
13808
4523
/* * linux/arch/alpha/kernel/core_polaris.c * * POLARIS chip-specific code */ #define __EXTERN_INLINE inline #include <asm/io.h> #include <asm/core_polaris.h> #undef __EXTERN_INLINE #include <linux/types.h> #include <linux/pci.h> #include <linux/sched.h> #include <linux/init.h> #include <asm/ptrace.h> #include "proto.h" #include "pci_impl.h" /* * BIOS32-style PCI interface: */ #define DEBUG_CONFIG 0 #if DEBUG_CONFIG # define DBG_CFG(args) printk args #else # define DBG_CFG(args) #endif /* * Given a bus, device, and function number, compute resulting * configuration space address. This is fairly straightforward * on POLARIS, since the chip itself generates Type 0 or Type 1 * cycles automatically depending on the bus number (Bus 0 is * hardwired to Type 0, all others are Type 1. Peer bridges * are not supported). * * All types: * * 3 3 3 3|3 3 3 3|3 3 2 2|2 2 2 2|2 2 2 2|1 1 1 1|1 1 1 1|1 1 * 9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * |1|1|1|1|1|0|0|1|1|1|1|1|1|1|1|0|B|B|B|B|B|B|B|B|D|D|D|D|D|F|F|F|R|R|R|R|R|R|x|x| * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * * 23:16 bus number (8 bits = 128 possible buses) * 15:11 Device number (5 bits) * 10:8 function number * 7:2 register number * * Notes: * The function number selects which function of a multi-function device * (e.g., scsi and ethernet). * * The register selects a DWORD (32 bit) register offset. Hence it * doesn't get shifted by 2 bits as we want to "drop" the bottom two * bits. */ static int mk_conf_addr(struct pci_bus *pbus, unsigned int device_fn, int where, unsigned long *pci_addr, u8 *type1) { u8 bus = pbus->number; *type1 = (bus == 0) ? 0 : 1; *pci_addr = (bus << 16) | (device_fn << 8) | (where) | POLARIS_DENSE_CONFIG_BASE; DBG_CFG(("mk_conf_addr(bus=%d ,device_fn=0x%x, where=0x%x," " returning address 0x%p\n" bus, device_fn, where, *pci_addr)); return 0; } static int polaris_read_config(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *value) { unsigned long addr; unsigned char type1; if (mk_conf_addr(bus, devfn, where, &addr, &type1)) return PCIBIOS_DEVICE_NOT_FOUND; switch (size) { case 1: *value = __kernel_ldbu(*(vucp)addr); break; case 2: *value = __kernel_ldwu(*(vusp)addr); break; case 4: *value = *(vuip)addr; break; } return PCIBIOS_SUCCESSFUL; } static int polaris_write_config(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 value) { unsigned long addr; unsigned char type1; if (mk_conf_addr(bus, devfn, where, &addr, &type1)) return PCIBIOS_DEVICE_NOT_FOUND; switch (size) { case 1: __kernel_stb(value, *(vucp)addr); mb(); __kernel_ldbu(*(vucp)addr); break; case 2: __kernel_stw(value, *(vusp)addr); mb(); __kernel_ldwu(*(vusp)addr); break; case 4: *(vuip)addr = value; mb(); *(vuip)addr; break; } return PCIBIOS_SUCCESSFUL; } struct pci_ops polaris_pci_ops = { .read = polaris_read_config, .write = polaris_write_config, }; void __init polaris_init_arch(void) { struct pci_controller *hose; /* May need to initialize error reporting (see PCICTL0/1), but * for now assume that the firmware has done the right thing * already. */ #if 0 printk("polaris_init_arch(): trusting firmware for setup\n"); #endif /* * Create our single hose. */ pci_isa_hose = hose = alloc_pci_controller(); hose->io_space = &ioport_resource; hose->mem_space = &iomem_resource; hose->index = 0; hose->sparse_mem_base = 0; hose->dense_mem_base = POLARIS_DENSE_MEM_BASE - IDENT_ADDR; hose->sparse_io_base = 0; hose->dense_io_base = POLARIS_DENSE_IO_BASE - IDENT_ADDR; hose->sg_isa = hose->sg_pci = NULL; /* The I/O window is fixed at 2G @ 2G. */ __direct_map_base = 0x80000000; __direct_map_size = 0x80000000; } static inline void polaris_pci_clr_err(void) { *(vusp)POLARIS_W_STATUS; /* Write 1's to settable bits to clear errors */ *(vusp)POLARIS_W_STATUS = 0x7800; mb(); *(vusp)POLARIS_W_STATUS; } void polaris_machine_check(unsigned long vector, unsigned long la_ptr) { /* Clear the error before any reporting. */ mb(); mb(); draina(); polaris_pci_clr_err(); wrmces(0x7); mb(); process_mcheck_info(vector, la_ptr, "POLARIS", mcheck_expected(0)); }
gpl-2.0
jiangjiali66/linux-xlnx
kernel/power/main.c
497
15521
/* * kernel/power/main.c - PM subsystem core functionality. * * Copyright (c) 2003 Patrick Mochel * Copyright (c) 2003 Open Source Development Lab * * This file is released under the GPLv2 * */ #include <linux/export.h> #include <linux/kobject.h> #include <linux/string.h> #include <linux/resume-trace.h> #include <linux/workqueue.h> #include <linux/debugfs.h> #include <linux/seq_file.h> #include "power.h" DEFINE_MUTEX(pm_mutex); #ifdef CONFIG_PM_SLEEP /* Routines for PM-transition notifications */ static BLOCKING_NOTIFIER_HEAD(pm_chain_head); int register_pm_notifier(struct notifier_block *nb) { return blocking_notifier_chain_register(&pm_chain_head, nb); } EXPORT_SYMBOL_GPL(register_pm_notifier); int unregister_pm_notifier(struct notifier_block *nb) { return blocking_notifier_chain_unregister(&pm_chain_head, nb); } EXPORT_SYMBOL_GPL(unregister_pm_notifier); int pm_notifier_call_chain(unsigned long val) { int ret = blocking_notifier_call_chain(&pm_chain_head, val, NULL); return notifier_to_errno(ret); } /* If set, devices may be suspended and resumed asynchronously. */ int pm_async_enabled = 1; static ssize_t pm_async_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sprintf(buf, "%d\n", pm_async_enabled); } static ssize_t pm_async_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t n) { unsigned long val; if (kstrtoul(buf, 10, &val)) return -EINVAL; if (val > 1) return -EINVAL; pm_async_enabled = val; return n; } power_attr(pm_async); #ifdef CONFIG_PM_DEBUG int pm_test_level = TEST_NONE; static const char * const pm_tests[__TEST_AFTER_LAST] = { [TEST_NONE] = "none", [TEST_CORE] = "core", [TEST_CPUS] = "processors", [TEST_PLATFORM] = "platform", [TEST_DEVICES] = "devices", [TEST_FREEZER] = "freezer", }; static ssize_t pm_test_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { char *s = buf; int level; for (level = TEST_FIRST; level <= TEST_MAX; level++) if (pm_tests[level]) { if (level == pm_test_level) s += sprintf(s, "[%s] ", pm_tests[level]); else s += sprintf(s, "%s ", pm_tests[level]); } if (s != buf) /* convert the last space to a newline */ *(s-1) = '\n'; return (s - buf); } static ssize_t pm_test_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t n) { const char * const *s; int level; char *p; int len; int error = -EINVAL; p = memchr(buf, '\n', n); len = p ? p - buf : n; lock_system_sleep(); level = TEST_FIRST; for (s = &pm_tests[level]; level <= TEST_MAX; s++, level++) if (*s && len == strlen(*s) && !strncmp(buf, *s, len)) { pm_test_level = level; error = 0; break; } unlock_system_sleep(); return error ? error : n; } power_attr(pm_test); #endif /* CONFIG_PM_DEBUG */ #ifdef CONFIG_DEBUG_FS static char *suspend_step_name(enum suspend_stat_step step) { switch (step) { case SUSPEND_FREEZE: return "freeze"; case SUSPEND_PREPARE: return "prepare"; case SUSPEND_SUSPEND: return "suspend"; case SUSPEND_SUSPEND_NOIRQ: return "suspend_noirq"; case SUSPEND_RESUME_NOIRQ: return "resume_noirq"; case SUSPEND_RESUME: return "resume"; default: return ""; } } static int suspend_stats_show(struct seq_file *s, void *unused) { int i, index, last_dev, last_errno, last_step; last_dev = suspend_stats.last_failed_dev + REC_FAILED_NUM - 1; last_dev %= REC_FAILED_NUM; last_errno = suspend_stats.last_failed_errno + REC_FAILED_NUM - 1; last_errno %= REC_FAILED_NUM; last_step = suspend_stats.last_failed_step + REC_FAILED_NUM - 1; last_step %= REC_FAILED_NUM; seq_printf(s, "%s: %d\n%s: %d\n%s: %d\n%s: %d\n%s: %d\n" "%s: %d\n%s: %d\n%s: %d\n%s: %d\n%s: %d\n", "success", suspend_stats.success, "fail", suspend_stats.fail, "failed_freeze", suspend_stats.failed_freeze, "failed_prepare", suspend_stats.failed_prepare, "failed_suspend", suspend_stats.failed_suspend, "failed_suspend_late", suspend_stats.failed_suspend_late, "failed_suspend_noirq", suspend_stats.failed_suspend_noirq, "failed_resume", suspend_stats.failed_resume, "failed_resume_early", suspend_stats.failed_resume_early, "failed_resume_noirq", suspend_stats.failed_resume_noirq); seq_printf(s, "failures:\n last_failed_dev:\t%-s\n", suspend_stats.failed_devs[last_dev]); for (i = 1; i < REC_FAILED_NUM; i++) { index = last_dev + REC_FAILED_NUM - i; index %= REC_FAILED_NUM; seq_printf(s, "\t\t\t%-s\n", suspend_stats.failed_devs[index]); } seq_printf(s, " last_failed_errno:\t%-d\n", suspend_stats.errno[last_errno]); for (i = 1; i < REC_FAILED_NUM; i++) { index = last_errno + REC_FAILED_NUM - i; index %= REC_FAILED_NUM; seq_printf(s, "\t\t\t%-d\n", suspend_stats.errno[index]); } seq_printf(s, " last_failed_step:\t%-s\n", suspend_step_name( suspend_stats.failed_steps[last_step])); for (i = 1; i < REC_FAILED_NUM; i++) { index = last_step + REC_FAILED_NUM - i; index %= REC_FAILED_NUM; seq_printf(s, "\t\t\t%-s\n", suspend_step_name( suspend_stats.failed_steps[index])); } return 0; } static int suspend_stats_open(struct inode *inode, struct file *file) { return single_open(file, suspend_stats_show, NULL); } static const struct file_operations suspend_stats_operations = { .open = suspend_stats_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static int __init pm_debugfs_init(void) { debugfs_create_file("suspend_stats", S_IFREG | S_IRUGO, NULL, NULL, &suspend_stats_operations); return 0; } late_initcall(pm_debugfs_init); #endif /* CONFIG_DEBUG_FS */ #endif /* CONFIG_PM_SLEEP */ #ifdef CONFIG_PM_SLEEP_DEBUG /* * pm_print_times: print time taken by devices to suspend and resume. * * show() returns whether printing of suspend and resume times is enabled. * store() accepts 0 or 1. 0 disables printing and 1 enables it. */ bool pm_print_times_enabled; static ssize_t pm_print_times_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sprintf(buf, "%d\n", pm_print_times_enabled); } static ssize_t pm_print_times_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t n) { unsigned long val; if (kstrtoul(buf, 10, &val)) return -EINVAL; if (val > 1) return -EINVAL; pm_print_times_enabled = !!val; return n; } power_attr(pm_print_times); static inline void pm_print_times_init(void) { pm_print_times_enabled = !!initcall_debug; } #else /* !CONFIG_PP_SLEEP_DEBUG */ static inline void pm_print_times_init(void) {} #endif /* CONFIG_PM_SLEEP_DEBUG */ struct kobject *power_kobj; /** * state - control system sleep states. * * show() returns available sleep state labels, which may be "mem", "standby", * "freeze" and "disk" (hibernation). See Documentation/power/states.txt for a * description of what they mean. * * store() accepts one of those strings, translates it into the proper * enumerated value, and initiates a suspend transition. */ static ssize_t state_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { char *s = buf; #ifdef CONFIG_SUSPEND suspend_state_t i; for (i = PM_SUSPEND_MIN; i < PM_SUSPEND_MAX; i++) if (pm_states[i]) s += sprintf(s,"%s ", pm_states[i]); #endif if (hibernation_available()) s += sprintf(s, "disk "); if (s != buf) /* convert the last space to a newline */ *(s-1) = '\n'; return (s - buf); } static suspend_state_t decode_state(const char *buf, size_t n) { #ifdef CONFIG_SUSPEND suspend_state_t state; #endif char *p; int len; p = memchr(buf, '\n', n); len = p ? p - buf : n; /* Check hibernation first. */ if (len == 4 && !strncmp(buf, "disk", len)) return PM_SUSPEND_MAX; #ifdef CONFIG_SUSPEND for (state = PM_SUSPEND_MIN; state < PM_SUSPEND_MAX; state++) { const char *label = pm_states[state]; if (label && len == strlen(label) && !strncmp(buf, label, len)) return state; } #endif return PM_SUSPEND_ON; } static ssize_t state_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t n) { suspend_state_t state; int error; error = pm_autosleep_lock(); if (error) return error; if (pm_autosleep_state() > PM_SUSPEND_ON) { error = -EBUSY; goto out; } state = decode_state(buf, n); if (state < PM_SUSPEND_MAX) error = pm_suspend(state); else if (state == PM_SUSPEND_MAX) error = hibernate(); else error = -EINVAL; out: pm_autosleep_unlock(); return error ? error : n; } power_attr(state); #ifdef CONFIG_PM_SLEEP /* * The 'wakeup_count' attribute, along with the functions defined in * drivers/base/power/wakeup.c, provides a means by which wakeup events can be * handled in a non-racy way. * * If a wakeup event occurs when the system is in a sleep state, it simply is * woken up. In turn, if an event that would wake the system up from a sleep * state occurs when it is undergoing a transition to that sleep state, the * transition should be aborted. Moreover, if such an event occurs when the * system is in the working state, an attempt to start a transition to the * given sleep state should fail during certain period after the detection of * the event. Using the 'state' attribute alone is not sufficient to satisfy * these requirements, because a wakeup event may occur exactly when 'state' * is being written to and may be delivered to user space right before it is * frozen, so the event will remain only partially processed until the system is * woken up by another event. In particular, it won't cause the transition to * a sleep state to be aborted. * * This difficulty may be overcome if user space uses 'wakeup_count' before * writing to 'state'. It first should read from 'wakeup_count' and store * the read value. Then, after carrying out its own preparations for the system * transition to a sleep state, it should write the stored value to * 'wakeup_count'. If that fails, at least one wakeup event has occurred since * 'wakeup_count' was read and 'state' should not be written to. Otherwise, it * is allowed to write to 'state', but the transition will be aborted if there * are any wakeup events detected after 'wakeup_count' was written to. */ static ssize_t wakeup_count_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { unsigned int val; return pm_get_wakeup_count(&val, true) ? sprintf(buf, "%u\n", val) : -EINTR; } static ssize_t wakeup_count_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t n) { unsigned int val; int error; error = pm_autosleep_lock(); if (error) return error; if (pm_autosleep_state() > PM_SUSPEND_ON) { error = -EBUSY; goto out; } error = -EINVAL; if (sscanf(buf, "%u", &val) == 1) { if (pm_save_wakeup_count(val)) error = n; else pm_print_active_wakeup_sources(); } out: pm_autosleep_unlock(); return error; } power_attr(wakeup_count); #ifdef CONFIG_PM_AUTOSLEEP static ssize_t autosleep_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { suspend_state_t state = pm_autosleep_state(); if (state == PM_SUSPEND_ON) return sprintf(buf, "off\n"); #ifdef CONFIG_SUSPEND if (state < PM_SUSPEND_MAX) return sprintf(buf, "%s\n", pm_states[state] ? pm_states[state] : "error"); #endif #ifdef CONFIG_HIBERNATION return sprintf(buf, "disk\n"); #else return sprintf(buf, "error"); #endif } static ssize_t autosleep_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t n) { suspend_state_t state = decode_state(buf, n); int error; if (state == PM_SUSPEND_ON && strcmp(buf, "off") && strcmp(buf, "off\n")) return -EINVAL; error = pm_autosleep_set_state(state); return error ? error : n; } power_attr(autosleep); #endif /* CONFIG_PM_AUTOSLEEP */ #ifdef CONFIG_PM_WAKELOCKS static ssize_t wake_lock_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return pm_show_wakelocks(buf, true); } static ssize_t wake_lock_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t n) { int error = pm_wake_lock(buf); return error ? error : n; } power_attr(wake_lock); static ssize_t wake_unlock_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return pm_show_wakelocks(buf, false); } static ssize_t wake_unlock_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t n) { int error = pm_wake_unlock(buf); return error ? error : n; } power_attr(wake_unlock); #endif /* CONFIG_PM_WAKELOCKS */ #endif /* CONFIG_PM_SLEEP */ #ifdef CONFIG_PM_TRACE int pm_trace_enabled; static ssize_t pm_trace_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sprintf(buf, "%d\n", pm_trace_enabled); } static ssize_t pm_trace_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t n) { int val; if (sscanf(buf, "%d", &val) == 1) { pm_trace_enabled = !!val; if (pm_trace_enabled) { pr_warn("PM: Enabling pm_trace changes system date and time during resume.\n" "PM: Correct system time has to be restored manually after resume.\n"); } return n; } return -EINVAL; } power_attr(pm_trace); static ssize_t pm_trace_dev_match_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return show_trace_dev_match(buf, PAGE_SIZE); } static ssize_t pm_trace_dev_match_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t n) { return -EINVAL; } power_attr(pm_trace_dev_match); #endif /* CONFIG_PM_TRACE */ #ifdef CONFIG_FREEZER static ssize_t pm_freeze_timeout_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sprintf(buf, "%u\n", freeze_timeout_msecs); } static ssize_t pm_freeze_timeout_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t n) { unsigned long val; if (kstrtoul(buf, 10, &val)) return -EINVAL; freeze_timeout_msecs = val; return n; } power_attr(pm_freeze_timeout); #endif /* CONFIG_FREEZER*/ static struct attribute * g[] = { &state_attr.attr, #ifdef CONFIG_PM_TRACE &pm_trace_attr.attr, &pm_trace_dev_match_attr.attr, #endif #ifdef CONFIG_PM_SLEEP &pm_async_attr.attr, &wakeup_count_attr.attr, #ifdef CONFIG_PM_AUTOSLEEP &autosleep_attr.attr, #endif #ifdef CONFIG_PM_WAKELOCKS &wake_lock_attr.attr, &wake_unlock_attr.attr, #endif #ifdef CONFIG_PM_DEBUG &pm_test_attr.attr, #endif #ifdef CONFIG_PM_SLEEP_DEBUG &pm_print_times_attr.attr, #endif #endif #ifdef CONFIG_FREEZER &pm_freeze_timeout_attr.attr, #endif NULL, }; static struct attribute_group attr_group = { .attrs = g, }; struct workqueue_struct *pm_wq; EXPORT_SYMBOL_GPL(pm_wq); static int __init pm_start_workqueue(void) { pm_wq = alloc_workqueue("pm", WQ_FREEZABLE, 0); return pm_wq ? 0 : -ENOMEM; } static int __init pm_init(void) { int error = pm_start_workqueue(); if (error) return error; hibernate_image_size_init(); hibernate_reserved_size_init(); power_kobj = kobject_create_and_add("power", NULL); if (!power_kobj) return -ENOMEM; error = sysfs_create_group(power_kobj, &attr_group); if (error) return error; pm_print_times_init(); return pm_autosleep_init(); } core_initcall(pm_init);
gpl-2.0
CyanogenMod/htc-kernel-msm7x30
drivers/net/qla3xxx.c
497
108976
/* * QLogic QLA3xxx NIC HBA Driver * Copyright (c) 2003-2006 QLogic Corporation * * See LICENSE.qla3xxx for copyright and licensing details. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/types.h> #include <linux/module.h> #include <linux/list.h> #include <linux/pci.h> #include <linux/dma-mapping.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/dmapool.h> #include <linux/mempool.h> #include <linux/spinlock.h> #include <linux/kthread.h> #include <linux/interrupt.h> #include <linux/errno.h> #include <linux/ioport.h> #include <linux/ip.h> #include <linux/in.h> #include <linux/if_arp.h> #include <linux/if_ether.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/ethtool.h> #include <linux/skbuff.h> #include <linux/rtnetlink.h> #include <linux/if_vlan.h> #include <linux/delay.h> #include <linux/mm.h> #include "qla3xxx.h" #define DRV_NAME "qla3xxx" #define DRV_STRING "QLogic ISP3XXX Network Driver" #define DRV_VERSION "v2.03.00-k5" #define PFX DRV_NAME " " static const char ql3xxx_driver_name[] = DRV_NAME; static const char ql3xxx_driver_version[] = DRV_VERSION; MODULE_AUTHOR("QLogic Corporation"); MODULE_DESCRIPTION("QLogic ISP3XXX Network Driver " DRV_VERSION " "); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_VERSION); static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN; static int debug = -1; /* defaults above */ module_param(debug, int, 0); MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); static int msi; module_param(msi, int, 0); MODULE_PARM_DESC(msi, "Turn on Message Signaled Interrupts."); static struct pci_device_id ql3xxx_pci_tbl[] __devinitdata = { {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3022_DEVICE_ID)}, {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3032_DEVICE_ID)}, /* required last entry */ {0,} }; MODULE_DEVICE_TABLE(pci, ql3xxx_pci_tbl); /* * These are the known PHY's which are used */ typedef enum { PHY_TYPE_UNKNOWN = 0, PHY_VITESSE_VSC8211, PHY_AGERE_ET1011C, MAX_PHY_DEV_TYPES } PHY_DEVICE_et; typedef struct { PHY_DEVICE_et phyDevice; u32 phyIdOUI; u16 phyIdModel; char *name; } PHY_DEVICE_INFO_t; static const PHY_DEVICE_INFO_t PHY_DEVICES[] = {{PHY_TYPE_UNKNOWN, 0x000000, 0x0, "PHY_TYPE_UNKNOWN"}, {PHY_VITESSE_VSC8211, 0x0003f1, 0xb, "PHY_VITESSE_VSC8211"}, {PHY_AGERE_ET1011C, 0x00a0bc, 0x1, "PHY_AGERE_ET1011C"}, }; /* * Caller must take hw_lock. */ static int ql_sem_spinlock(struct ql3_adapter *qdev, u32 sem_mask, u32 sem_bits) { struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; u32 value; unsigned int seconds = 3; do { writel((sem_mask | sem_bits), &port_regs->CommonRegs.semaphoreReg); value = readl(&port_regs->CommonRegs.semaphoreReg); if ((value & (sem_mask >> 16)) == sem_bits) return 0; ssleep(1); } while(--seconds); return -1; } static void ql_sem_unlock(struct ql3_adapter *qdev, u32 sem_mask) { struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; writel(sem_mask, &port_regs->CommonRegs.semaphoreReg); readl(&port_regs->CommonRegs.semaphoreReg); } static int ql_sem_lock(struct ql3_adapter *qdev, u32 sem_mask, u32 sem_bits) { struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; u32 value; writel((sem_mask | sem_bits), &port_regs->CommonRegs.semaphoreReg); value = readl(&port_regs->CommonRegs.semaphoreReg); return ((value & (sem_mask >> 16)) == sem_bits); } /* * Caller holds hw_lock. */ static int ql_wait_for_drvr_lock(struct ql3_adapter *qdev) { int i = 0; while (1) { if (!ql_sem_lock(qdev, QL_DRVR_SEM_MASK, (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 2) << 1)) { if (i < 10) { ssleep(1); i++; } else { printk(KERN_ERR PFX "%s: Timed out waiting for " "driver lock...\n", qdev->ndev->name); return 0; } } else { printk(KERN_DEBUG PFX "%s: driver lock acquired.\n", qdev->ndev->name); return 1; } } } static void ql_set_register_page(struct ql3_adapter *qdev, u32 page) { struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; writel(((ISP_CONTROL_NP_MASK << 16) | page), &port_regs->CommonRegs.ispControlStatus); readl(&port_regs->CommonRegs.ispControlStatus); qdev->current_page = page; } static u32 ql_read_common_reg_l(struct ql3_adapter *qdev, u32 __iomem * reg) { u32 value; unsigned long hw_flags; spin_lock_irqsave(&qdev->hw_lock, hw_flags); value = readl(reg); spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); return value; } static u32 ql_read_common_reg(struct ql3_adapter *qdev, u32 __iomem * reg) { return readl(reg); } static u32 ql_read_page0_reg_l(struct ql3_adapter *qdev, u32 __iomem *reg) { u32 value; unsigned long hw_flags; spin_lock_irqsave(&qdev->hw_lock, hw_flags); if (qdev->current_page != 0) ql_set_register_page(qdev,0); value = readl(reg); spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); return value; } static u32 ql_read_page0_reg(struct ql3_adapter *qdev, u32 __iomem *reg) { if (qdev->current_page != 0) ql_set_register_page(qdev,0); return readl(reg); } static void ql_write_common_reg_l(struct ql3_adapter *qdev, u32 __iomem *reg, u32 value) { unsigned long hw_flags; spin_lock_irqsave(&qdev->hw_lock, hw_flags); writel(value, reg); readl(reg); spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); return; } static void ql_write_common_reg(struct ql3_adapter *qdev, u32 __iomem *reg, u32 value) { writel(value, reg); readl(reg); return; } static void ql_write_nvram_reg(struct ql3_adapter *qdev, u32 __iomem *reg, u32 value) { writel(value, reg); readl(reg); udelay(1); return; } static void ql_write_page0_reg(struct ql3_adapter *qdev, u32 __iomem *reg, u32 value) { if (qdev->current_page != 0) ql_set_register_page(qdev,0); writel(value, reg); readl(reg); return; } /* * Caller holds hw_lock. Only called during init. */ static void ql_write_page1_reg(struct ql3_adapter *qdev, u32 __iomem *reg, u32 value) { if (qdev->current_page != 1) ql_set_register_page(qdev,1); writel(value, reg); readl(reg); return; } /* * Caller holds hw_lock. Only called during init. */ static void ql_write_page2_reg(struct ql3_adapter *qdev, u32 __iomem *reg, u32 value) { if (qdev->current_page != 2) ql_set_register_page(qdev,2); writel(value, reg); readl(reg); return; } static void ql_disable_interrupts(struct ql3_adapter *qdev) { struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg, (ISP_IMR_ENABLE_INT << 16)); } static void ql_enable_interrupts(struct ql3_adapter *qdev) { struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg, ((0xff << 16) | ISP_IMR_ENABLE_INT)); } static void ql_release_to_lrg_buf_free_list(struct ql3_adapter *qdev, struct ql_rcv_buf_cb *lrg_buf_cb) { dma_addr_t map; int err; lrg_buf_cb->next = NULL; if (qdev->lrg_buf_free_tail == NULL) { /* The list is empty */ qdev->lrg_buf_free_head = qdev->lrg_buf_free_tail = lrg_buf_cb; } else { qdev->lrg_buf_free_tail->next = lrg_buf_cb; qdev->lrg_buf_free_tail = lrg_buf_cb; } if (!lrg_buf_cb->skb) { lrg_buf_cb->skb = netdev_alloc_skb(qdev->ndev, qdev->lrg_buffer_len); if (unlikely(!lrg_buf_cb->skb)) { printk(KERN_ERR PFX "%s: failed netdev_alloc_skb().\n", qdev->ndev->name); qdev->lrg_buf_skb_check++; } else { /* * We save some space to copy the ethhdr from first * buffer */ skb_reserve(lrg_buf_cb->skb, QL_HEADER_SPACE); map = pci_map_single(qdev->pdev, lrg_buf_cb->skb->data, qdev->lrg_buffer_len - QL_HEADER_SPACE, PCI_DMA_FROMDEVICE); err = pci_dma_mapping_error(qdev->pdev, map); if(err) { printk(KERN_ERR "%s: PCI mapping failed with error: %d\n", qdev->ndev->name, err); dev_kfree_skb(lrg_buf_cb->skb); lrg_buf_cb->skb = NULL; qdev->lrg_buf_skb_check++; return; } lrg_buf_cb->buf_phy_addr_low = cpu_to_le32(LS_64BITS(map)); lrg_buf_cb->buf_phy_addr_high = cpu_to_le32(MS_64BITS(map)); pci_unmap_addr_set(lrg_buf_cb, mapaddr, map); pci_unmap_len_set(lrg_buf_cb, maplen, qdev->lrg_buffer_len - QL_HEADER_SPACE); } } qdev->lrg_buf_free_count++; } static struct ql_rcv_buf_cb *ql_get_from_lrg_buf_free_list(struct ql3_adapter *qdev) { struct ql_rcv_buf_cb *lrg_buf_cb; if ((lrg_buf_cb = qdev->lrg_buf_free_head) != NULL) { if ((qdev->lrg_buf_free_head = lrg_buf_cb->next) == NULL) qdev->lrg_buf_free_tail = NULL; qdev->lrg_buf_free_count--; } return lrg_buf_cb; } static u32 addrBits = EEPROM_NO_ADDR_BITS; static u32 dataBits = EEPROM_NO_DATA_BITS; static void fm93c56a_deselect(struct ql3_adapter *qdev); static void eeprom_readword(struct ql3_adapter *qdev, u32 eepromAddr, unsigned short *value); /* * Caller holds hw_lock. */ static void fm93c56a_select(struct ql3_adapter *qdev) { struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_1; ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg, ISP_NVRAM_MASK | qdev->eeprom_cmd_data); ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg, ((ISP_NVRAM_MASK << 16) | qdev->eeprom_cmd_data)); } /* * Caller holds hw_lock. */ static void fm93c56a_cmd(struct ql3_adapter *qdev, u32 cmd, u32 eepromAddr) { int i; u32 mask; u32 dataBit; u32 previousBit; struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; /* Clock in a zero, then do the start bit */ ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg, ISP_NVRAM_MASK | qdev->eeprom_cmd_data | AUBURN_EEPROM_DO_1); ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg, ISP_NVRAM_MASK | qdev-> eeprom_cmd_data | AUBURN_EEPROM_DO_1 | AUBURN_EEPROM_CLK_RISE); ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg, ISP_NVRAM_MASK | qdev-> eeprom_cmd_data | AUBURN_EEPROM_DO_1 | AUBURN_EEPROM_CLK_FALL); mask = 1 << (FM93C56A_CMD_BITS - 1); /* Force the previous data bit to be different */ previousBit = 0xffff; for (i = 0; i < FM93C56A_CMD_BITS; i++) { dataBit = (cmd & mask) ? AUBURN_EEPROM_DO_1 : AUBURN_EEPROM_DO_0; if (previousBit != dataBit) { /* * If the bit changed, then change the DO state to * match */ ql_write_nvram_reg(qdev, &port_regs->CommonRegs. serialPortInterfaceReg, ISP_NVRAM_MASK | qdev-> eeprom_cmd_data | dataBit); previousBit = dataBit; } ql_write_nvram_reg(qdev, &port_regs->CommonRegs. serialPortInterfaceReg, ISP_NVRAM_MASK | qdev-> eeprom_cmd_data | dataBit | AUBURN_EEPROM_CLK_RISE); ql_write_nvram_reg(qdev, &port_regs->CommonRegs. serialPortInterfaceReg, ISP_NVRAM_MASK | qdev-> eeprom_cmd_data | dataBit | AUBURN_EEPROM_CLK_FALL); cmd = cmd << 1; } mask = 1 << (addrBits - 1); /* Force the previous data bit to be different */ previousBit = 0xffff; for (i = 0; i < addrBits; i++) { dataBit = (eepromAddr & mask) ? AUBURN_EEPROM_DO_1 : AUBURN_EEPROM_DO_0; if (previousBit != dataBit) { /* * If the bit changed, then change the DO state to * match */ ql_write_nvram_reg(qdev, &port_regs->CommonRegs. serialPortInterfaceReg, ISP_NVRAM_MASK | qdev-> eeprom_cmd_data | dataBit); previousBit = dataBit; } ql_write_nvram_reg(qdev, &port_regs->CommonRegs. serialPortInterfaceReg, ISP_NVRAM_MASK | qdev-> eeprom_cmd_data | dataBit | AUBURN_EEPROM_CLK_RISE); ql_write_nvram_reg(qdev, &port_regs->CommonRegs. serialPortInterfaceReg, ISP_NVRAM_MASK | qdev-> eeprom_cmd_data | dataBit | AUBURN_EEPROM_CLK_FALL); eepromAddr = eepromAddr << 1; } } /* * Caller holds hw_lock. */ static void fm93c56a_deselect(struct ql3_adapter *qdev) { struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_0; ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg, ISP_NVRAM_MASK | qdev->eeprom_cmd_data); } /* * Caller holds hw_lock. */ static void fm93c56a_datain(struct ql3_adapter *qdev, unsigned short *value) { int i; u32 data = 0; u32 dataBit; struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; /* Read the data bits */ /* The first bit is a dummy. Clock right over it. */ for (i = 0; i < dataBits; i++) { ql_write_nvram_reg(qdev, &port_regs->CommonRegs. serialPortInterfaceReg, ISP_NVRAM_MASK | qdev->eeprom_cmd_data | AUBURN_EEPROM_CLK_RISE); ql_write_nvram_reg(qdev, &port_regs->CommonRegs. serialPortInterfaceReg, ISP_NVRAM_MASK | qdev->eeprom_cmd_data | AUBURN_EEPROM_CLK_FALL); dataBit = (ql_read_common_reg (qdev, &port_regs->CommonRegs. serialPortInterfaceReg) & AUBURN_EEPROM_DI_1) ? 1 : 0; data = (data << 1) | dataBit; } *value = (u16) data; } /* * Caller holds hw_lock. */ static void eeprom_readword(struct ql3_adapter *qdev, u32 eepromAddr, unsigned short *value) { fm93c56a_select(qdev); fm93c56a_cmd(qdev, (int)FM93C56A_READ, eepromAddr); fm93c56a_datain(qdev, value); fm93c56a_deselect(qdev); } static void ql_set_mac_addr(struct net_device *ndev, u16 *addr) { __le16 *p = (__le16 *)ndev->dev_addr; p[0] = cpu_to_le16(addr[0]); p[1] = cpu_to_le16(addr[1]); p[2] = cpu_to_le16(addr[2]); } static int ql_get_nvram_params(struct ql3_adapter *qdev) { u16 *pEEPROMData; u16 checksum = 0; u32 index; unsigned long hw_flags; spin_lock_irqsave(&qdev->hw_lock, hw_flags); pEEPROMData = (u16 *) & qdev->nvram_data; qdev->eeprom_cmd_data = 0; if(ql_sem_spinlock(qdev, QL_NVRAM_SEM_MASK, (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 2) << 10)) { printk(KERN_ERR PFX"%s: Failed ql_sem_spinlock().\n", __func__); spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); return -1; } for (index = 0; index < EEPROM_SIZE; index++) { eeprom_readword(qdev, index, pEEPROMData); checksum += *pEEPROMData; pEEPROMData++; } ql_sem_unlock(qdev, QL_NVRAM_SEM_MASK); if (checksum != 0) { printk(KERN_ERR PFX "%s: checksum should be zero, is %x!!\n", qdev->ndev->name, checksum); spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); return -1; } spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); return checksum; } static const u32 PHYAddr[2] = { PORT0_PHY_ADDRESS, PORT1_PHY_ADDRESS }; static int ql_wait_for_mii_ready(struct ql3_adapter *qdev) { struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; u32 temp; int count = 1000; while (count) { temp = ql_read_page0_reg(qdev, &port_regs->macMIIStatusReg); if (!(temp & MAC_MII_STATUS_BSY)) return 0; udelay(10); count--; } return -1; } static void ql_mii_enable_scan_mode(struct ql3_adapter *qdev) { struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; u32 scanControl; if (qdev->numPorts > 1) { /* Auto scan will cycle through multiple ports */ scanControl = MAC_MII_CONTROL_AS | MAC_MII_CONTROL_SC; } else { scanControl = MAC_MII_CONTROL_SC; } /* * Scan register 1 of PHY/PETBI, * Set up to scan both devices * The autoscan starts from the first register, completes * the last one before rolling over to the first */ ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, PHYAddr[0] | MII_SCAN_REGISTER); ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, (scanControl) | ((MAC_MII_CONTROL_SC | MAC_MII_CONTROL_AS) << 16)); } static u8 ql_mii_disable_scan_mode(struct ql3_adapter *qdev) { u8 ret; struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; /* See if scan mode is enabled before we turn it off */ if (ql_read_page0_reg(qdev, &port_regs->macMIIMgmtControlReg) & (MAC_MII_CONTROL_AS | MAC_MII_CONTROL_SC)) { /* Scan is enabled */ ret = 1; } else { /* Scan is disabled */ ret = 0; } /* * When disabling scan mode you must first change the MII register * address */ ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, PHYAddr[0] | MII_SCAN_REGISTER); ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, ((MAC_MII_CONTROL_SC | MAC_MII_CONTROL_AS | MAC_MII_CONTROL_RC) << 16)); return ret; } static int ql_mii_write_reg_ex(struct ql3_adapter *qdev, u16 regAddr, u16 value, u32 phyAddr) { struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; u8 scanWasEnabled; scanWasEnabled = ql_mii_disable_scan_mode(qdev); if (ql_wait_for_mii_ready(qdev)) { if (netif_msg_link(qdev)) printk(KERN_WARNING PFX "%s Timed out waiting for management port to " "get free before issuing command.\n", qdev->ndev->name); return -1; } ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, phyAddr | regAddr); ql_write_page0_reg(qdev, &port_regs->macMIIMgmtDataReg, value); /* Wait for write to complete 9/10/04 SJP */ if (ql_wait_for_mii_ready(qdev)) { if (netif_msg_link(qdev)) printk(KERN_WARNING PFX "%s: Timed out waiting for management port to " "get free before issuing command.\n", qdev->ndev->name); return -1; } if (scanWasEnabled) ql_mii_enable_scan_mode(qdev); return 0; } static int ql_mii_read_reg_ex(struct ql3_adapter *qdev, u16 regAddr, u16 * value, u32 phyAddr) { struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; u8 scanWasEnabled; u32 temp; scanWasEnabled = ql_mii_disable_scan_mode(qdev); if (ql_wait_for_mii_ready(qdev)) { if (netif_msg_link(qdev)) printk(KERN_WARNING PFX "%s: Timed out waiting for management port to " "get free before issuing command.\n", qdev->ndev->name); return -1; } ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, phyAddr | regAddr); ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, (MAC_MII_CONTROL_RC << 16)); ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, (MAC_MII_CONTROL_RC << 16) | MAC_MII_CONTROL_RC); /* Wait for the read to complete */ if (ql_wait_for_mii_ready(qdev)) { if (netif_msg_link(qdev)) printk(KERN_WARNING PFX "%s: Timed out waiting for management port to " "get free after issuing command.\n", qdev->ndev->name); return -1; } temp = ql_read_page0_reg(qdev, &port_regs->macMIIMgmtDataReg); *value = (u16) temp; if (scanWasEnabled) ql_mii_enable_scan_mode(qdev); return 0; } static int ql_mii_write_reg(struct ql3_adapter *qdev, u16 regAddr, u16 value) { struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; ql_mii_disable_scan_mode(qdev); if (ql_wait_for_mii_ready(qdev)) { if (netif_msg_link(qdev)) printk(KERN_WARNING PFX "%s: Timed out waiting for management port to " "get free before issuing command.\n", qdev->ndev->name); return -1; } ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, qdev->PHYAddr | regAddr); ql_write_page0_reg(qdev, &port_regs->macMIIMgmtDataReg, value); /* Wait for write to complete. */ if (ql_wait_for_mii_ready(qdev)) { if (netif_msg_link(qdev)) printk(KERN_WARNING PFX "%s: Timed out waiting for management port to " "get free before issuing command.\n", qdev->ndev->name); return -1; } ql_mii_enable_scan_mode(qdev); return 0; } static int ql_mii_read_reg(struct ql3_adapter *qdev, u16 regAddr, u16 *value) { u32 temp; struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; ql_mii_disable_scan_mode(qdev); if (ql_wait_for_mii_ready(qdev)) { if (netif_msg_link(qdev)) printk(KERN_WARNING PFX "%s: Timed out waiting for management port to " "get free before issuing command.\n", qdev->ndev->name); return -1; } ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, qdev->PHYAddr | regAddr); ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, (MAC_MII_CONTROL_RC << 16)); ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, (MAC_MII_CONTROL_RC << 16) | MAC_MII_CONTROL_RC); /* Wait for the read to complete */ if (ql_wait_for_mii_ready(qdev)) { if (netif_msg_link(qdev)) printk(KERN_WARNING PFX "%s: Timed out waiting for management port to " "get free before issuing command.\n", qdev->ndev->name); return -1; } temp = ql_read_page0_reg(qdev, &port_regs->macMIIMgmtDataReg); *value = (u16) temp; ql_mii_enable_scan_mode(qdev); return 0; } static void ql_petbi_reset(struct ql3_adapter *qdev) { ql_mii_write_reg(qdev, PETBI_CONTROL_REG, PETBI_CTRL_SOFT_RESET); } static void ql_petbi_start_neg(struct ql3_adapter *qdev) { u16 reg; /* Enable Auto-negotiation sense */ ql_mii_read_reg(qdev, PETBI_TBI_CTRL, &reg); reg |= PETBI_TBI_AUTO_SENSE; ql_mii_write_reg(qdev, PETBI_TBI_CTRL, reg); ql_mii_write_reg(qdev, PETBI_NEG_ADVER, PETBI_NEG_PAUSE | PETBI_NEG_DUPLEX); ql_mii_write_reg(qdev, PETBI_CONTROL_REG, PETBI_CTRL_AUTO_NEG | PETBI_CTRL_RESTART_NEG | PETBI_CTRL_FULL_DUPLEX | PETBI_CTRL_SPEED_1000); } static void ql_petbi_reset_ex(struct ql3_adapter *qdev) { ql_mii_write_reg_ex(qdev, PETBI_CONTROL_REG, PETBI_CTRL_SOFT_RESET, PHYAddr[qdev->mac_index]); } static void ql_petbi_start_neg_ex(struct ql3_adapter *qdev) { u16 reg; /* Enable Auto-negotiation sense */ ql_mii_read_reg_ex(qdev, PETBI_TBI_CTRL, &reg, PHYAddr[qdev->mac_index]); reg |= PETBI_TBI_AUTO_SENSE; ql_mii_write_reg_ex(qdev, PETBI_TBI_CTRL, reg, PHYAddr[qdev->mac_index]); ql_mii_write_reg_ex(qdev, PETBI_NEG_ADVER, PETBI_NEG_PAUSE | PETBI_NEG_DUPLEX, PHYAddr[qdev->mac_index]); ql_mii_write_reg_ex(qdev, PETBI_CONTROL_REG, PETBI_CTRL_AUTO_NEG | PETBI_CTRL_RESTART_NEG | PETBI_CTRL_FULL_DUPLEX | PETBI_CTRL_SPEED_1000, PHYAddr[qdev->mac_index]); } static void ql_petbi_init(struct ql3_adapter *qdev) { ql_petbi_reset(qdev); ql_petbi_start_neg(qdev); } static void ql_petbi_init_ex(struct ql3_adapter *qdev) { ql_petbi_reset_ex(qdev); ql_petbi_start_neg_ex(qdev); } static int ql_is_petbi_neg_pause(struct ql3_adapter *qdev) { u16 reg; if (ql_mii_read_reg(qdev, PETBI_NEG_PARTNER, &reg) < 0) return 0; return (reg & PETBI_NEG_PAUSE_MASK) == PETBI_NEG_PAUSE; } static void phyAgereSpecificInit(struct ql3_adapter *qdev, u32 miiAddr) { printk(KERN_INFO "%s: enabling Agere specific PHY\n", qdev->ndev->name); /* power down device bit 11 = 1 */ ql_mii_write_reg_ex(qdev, 0x00, 0x1940, miiAddr); /* enable diagnostic mode bit 2 = 1 */ ql_mii_write_reg_ex(qdev, 0x12, 0x840e, miiAddr); /* 1000MB amplitude adjust (see Agere errata) */ ql_mii_write_reg_ex(qdev, 0x10, 0x8805, miiAddr); /* 1000MB amplitude adjust (see Agere errata) */ ql_mii_write_reg_ex(qdev, 0x11, 0xf03e, miiAddr); /* 100MB amplitude adjust (see Agere errata) */ ql_mii_write_reg_ex(qdev, 0x10, 0x8806, miiAddr); /* 100MB amplitude adjust (see Agere errata) */ ql_mii_write_reg_ex(qdev, 0x11, 0x003e, miiAddr); /* 10MB amplitude adjust (see Agere errata) */ ql_mii_write_reg_ex(qdev, 0x10, 0x8807, miiAddr); /* 10MB amplitude adjust (see Agere errata) */ ql_mii_write_reg_ex(qdev, 0x11, 0x1f00, miiAddr); /* point to hidden reg 0x2806 */ ql_mii_write_reg_ex(qdev, 0x10, 0x2806, miiAddr); /* Write new PHYAD w/bit 5 set */ ql_mii_write_reg_ex(qdev, 0x11, 0x0020 | (PHYAddr[qdev->mac_index] >> 8), miiAddr); /* * Disable diagnostic mode bit 2 = 0 * Power up device bit 11 = 0 * Link up (on) and activity (blink) */ ql_mii_write_reg(qdev, 0x12, 0x840a); ql_mii_write_reg(qdev, 0x00, 0x1140); ql_mii_write_reg(qdev, 0x1c, 0xfaf0); } static PHY_DEVICE_et getPhyType (struct ql3_adapter *qdev, u16 phyIdReg0, u16 phyIdReg1) { PHY_DEVICE_et result = PHY_TYPE_UNKNOWN; u32 oui; u16 model; int i; if (phyIdReg0 == 0xffff) { return result; } if (phyIdReg1 == 0xffff) { return result; } /* oui is split between two registers */ oui = (phyIdReg0 << 6) | ((phyIdReg1 & PHY_OUI_1_MASK) >> 10); model = (phyIdReg1 & PHY_MODEL_MASK) >> 4; /* Scan table for this PHY */ for(i = 0; i < MAX_PHY_DEV_TYPES; i++) { if ((oui == PHY_DEVICES[i].phyIdOUI) && (model == PHY_DEVICES[i].phyIdModel)) { result = PHY_DEVICES[i].phyDevice; printk(KERN_INFO "%s: Phy: %s\n", qdev->ndev->name, PHY_DEVICES[i].name); break; } } return result; } static int ql_phy_get_speed(struct ql3_adapter *qdev) { u16 reg; switch(qdev->phyType) { case PHY_AGERE_ET1011C: { if (ql_mii_read_reg(qdev, 0x1A, &reg) < 0) return 0; reg = (reg >> 8) & 3; break; } default: if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, &reg) < 0) return 0; reg = (((reg & 0x18) >> 3) & 3); } switch(reg) { case 2: return SPEED_1000; case 1: return SPEED_100; case 0: return SPEED_10; default: return -1; } } static int ql_is_full_dup(struct ql3_adapter *qdev) { u16 reg; switch(qdev->phyType) { case PHY_AGERE_ET1011C: { if (ql_mii_read_reg(qdev, 0x1A, &reg)) return 0; return ((reg & 0x0080) && (reg & 0x1000)) != 0; } case PHY_VITESSE_VSC8211: default: { if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, &reg) < 0) return 0; return (reg & PHY_AUX_DUPLEX_STAT) != 0; } } } static int ql_is_phy_neg_pause(struct ql3_adapter *qdev) { u16 reg; if (ql_mii_read_reg(qdev, PHY_NEG_PARTNER, &reg) < 0) return 0; return (reg & PHY_NEG_PAUSE) != 0; } static int PHY_Setup(struct ql3_adapter *qdev) { u16 reg1; u16 reg2; bool agereAddrChangeNeeded = false; u32 miiAddr = 0; int err; /* Determine the PHY we are using by reading the ID's */ err = ql_mii_read_reg(qdev, PHY_ID_0_REG, &reg1); if(err != 0) { printk(KERN_ERR "%s: Could not read from reg PHY_ID_0_REG\n", qdev->ndev->name); return err; } err = ql_mii_read_reg(qdev, PHY_ID_1_REG, &reg2); if(err != 0) { printk(KERN_ERR "%s: Could not read from reg PHY_ID_0_REG\n", qdev->ndev->name); return err; } /* Check if we have a Agere PHY */ if ((reg1 == 0xffff) || (reg2 == 0xffff)) { /* Determine which MII address we should be using determined by the index of the card */ if (qdev->mac_index == 0) { miiAddr = MII_AGERE_ADDR_1; } else { miiAddr = MII_AGERE_ADDR_2; } err =ql_mii_read_reg_ex(qdev, PHY_ID_0_REG, &reg1, miiAddr); if(err != 0) { printk(KERN_ERR "%s: Could not read from reg PHY_ID_0_REG after Agere detected\n", qdev->ndev->name); return err; } err = ql_mii_read_reg_ex(qdev, PHY_ID_1_REG, &reg2, miiAddr); if(err != 0) { printk(KERN_ERR "%s: Could not read from reg PHY_ID_0_REG after Agere detected\n", qdev->ndev->name); return err; } /* We need to remember to initialize the Agere PHY */ agereAddrChangeNeeded = true; } /* Determine the particular PHY we have on board to apply PHY specific initializations */ qdev->phyType = getPhyType(qdev, reg1, reg2); if ((qdev->phyType == PHY_AGERE_ET1011C) && agereAddrChangeNeeded) { /* need this here so address gets changed */ phyAgereSpecificInit(qdev, miiAddr); } else if (qdev->phyType == PHY_TYPE_UNKNOWN) { printk(KERN_ERR "%s: PHY is unknown\n", qdev->ndev->name); return -EIO; } return 0; } /* * Caller holds hw_lock. */ static void ql_mac_enable(struct ql3_adapter *qdev, u32 enable) { struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; u32 value; if (enable) value = (MAC_CONFIG_REG_PE | (MAC_CONFIG_REG_PE << 16)); else value = (MAC_CONFIG_REG_PE << 16); if (qdev->mac_index) ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value); else ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value); } /* * Caller holds hw_lock. */ static void ql_mac_cfg_soft_reset(struct ql3_adapter *qdev, u32 enable) { struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; u32 value; if (enable) value = (MAC_CONFIG_REG_SR | (MAC_CONFIG_REG_SR << 16)); else value = (MAC_CONFIG_REG_SR << 16); if (qdev->mac_index) ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value); else ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value); } /* * Caller holds hw_lock. */ static void ql_mac_cfg_gig(struct ql3_adapter *qdev, u32 enable) { struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; u32 value; if (enable) value = (MAC_CONFIG_REG_GM | (MAC_CONFIG_REG_GM << 16)); else value = (MAC_CONFIG_REG_GM << 16); if (qdev->mac_index) ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value); else ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value); } /* * Caller holds hw_lock. */ static void ql_mac_cfg_full_dup(struct ql3_adapter *qdev, u32 enable) { struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; u32 value; if (enable) value = (MAC_CONFIG_REG_FD | (MAC_CONFIG_REG_FD << 16)); else value = (MAC_CONFIG_REG_FD << 16); if (qdev->mac_index) ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value); else ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value); } /* * Caller holds hw_lock. */ static void ql_mac_cfg_pause(struct ql3_adapter *qdev, u32 enable) { struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; u32 value; if (enable) value = ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) | ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) << 16)); else value = ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) << 16); if (qdev->mac_index) ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value); else ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value); } /* * Caller holds hw_lock. */ static int ql_is_fiber(struct ql3_adapter *qdev) { struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; u32 bitToCheck = 0; u32 temp; switch (qdev->mac_index) { case 0: bitToCheck = PORT_STATUS_SM0; break; case 1: bitToCheck = PORT_STATUS_SM1; break; } temp = ql_read_page0_reg(qdev, &port_regs->portStatus); return (temp & bitToCheck) != 0; } static int ql_is_auto_cfg(struct ql3_adapter *qdev) { u16 reg; ql_mii_read_reg(qdev, 0x00, &reg); return (reg & 0x1000) != 0; } /* * Caller holds hw_lock. */ static int ql_is_auto_neg_complete(struct ql3_adapter *qdev) { struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; u32 bitToCheck = 0; u32 temp; switch (qdev->mac_index) { case 0: bitToCheck = PORT_STATUS_AC0; break; case 1: bitToCheck = PORT_STATUS_AC1; break; } temp = ql_read_page0_reg(qdev, &port_regs->portStatus); if (temp & bitToCheck) { if (netif_msg_link(qdev)) printk(KERN_INFO PFX "%s: Auto-Negotiate complete.\n", qdev->ndev->name); return 1; } else { if (netif_msg_link(qdev)) printk(KERN_WARNING PFX "%s: Auto-Negotiate incomplete.\n", qdev->ndev->name); return 0; } } /* * ql_is_neg_pause() returns 1 if pause was negotiated to be on */ static int ql_is_neg_pause(struct ql3_adapter *qdev) { if (ql_is_fiber(qdev)) return ql_is_petbi_neg_pause(qdev); else return ql_is_phy_neg_pause(qdev); } static int ql_auto_neg_error(struct ql3_adapter *qdev) { struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; u32 bitToCheck = 0; u32 temp; switch (qdev->mac_index) { case 0: bitToCheck = PORT_STATUS_AE0; break; case 1: bitToCheck = PORT_STATUS_AE1; break; } temp = ql_read_page0_reg(qdev, &port_regs->portStatus); return (temp & bitToCheck) != 0; } static u32 ql_get_link_speed(struct ql3_adapter *qdev) { if (ql_is_fiber(qdev)) return SPEED_1000; else return ql_phy_get_speed(qdev); } static int ql_is_link_full_dup(struct ql3_adapter *qdev) { if (ql_is_fiber(qdev)) return 1; else return ql_is_full_dup(qdev); } /* * Caller holds hw_lock. */ static int ql_link_down_detect(struct ql3_adapter *qdev) { struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; u32 bitToCheck = 0; u32 temp; switch (qdev->mac_index) { case 0: bitToCheck = ISP_CONTROL_LINK_DN_0; break; case 1: bitToCheck = ISP_CONTROL_LINK_DN_1; break; } temp = ql_read_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus); return (temp & bitToCheck) != 0; } /* * Caller holds hw_lock. */ static int ql_link_down_detect_clear(struct ql3_adapter *qdev) { struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; switch (qdev->mac_index) { case 0: ql_write_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus, (ISP_CONTROL_LINK_DN_0) | (ISP_CONTROL_LINK_DN_0 << 16)); break; case 1: ql_write_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus, (ISP_CONTROL_LINK_DN_1) | (ISP_CONTROL_LINK_DN_1 << 16)); break; default: return 1; } return 0; } /* * Caller holds hw_lock. */ static int ql_this_adapter_controls_port(struct ql3_adapter *qdev) { struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; u32 bitToCheck = 0; u32 temp; switch (qdev->mac_index) { case 0: bitToCheck = PORT_STATUS_F1_ENABLED; break; case 1: bitToCheck = PORT_STATUS_F3_ENABLED; break; default: break; } temp = ql_read_page0_reg(qdev, &port_regs->portStatus); if (temp & bitToCheck) { if (netif_msg_link(qdev)) printk(KERN_DEBUG PFX "%s: is not link master.\n", qdev->ndev->name); return 0; } else { if (netif_msg_link(qdev)) printk(KERN_DEBUG PFX "%s: is link master.\n", qdev->ndev->name); return 1; } } static void ql_phy_reset_ex(struct ql3_adapter *qdev) { ql_mii_write_reg_ex(qdev, CONTROL_REG, PHY_CTRL_SOFT_RESET, PHYAddr[qdev->mac_index]); } static void ql_phy_start_neg_ex(struct ql3_adapter *qdev) { u16 reg; u16 portConfiguration; if(qdev->phyType == PHY_AGERE_ET1011C) { /* turn off external loopback */ ql_mii_write_reg(qdev, 0x13, 0x0000); } if(qdev->mac_index == 0) portConfiguration = qdev->nvram_data.macCfg_port0.portConfiguration; else portConfiguration = qdev->nvram_data.macCfg_port1.portConfiguration; /* Some HBA's in the field are set to 0 and they need to be reinterpreted with a default value */ if(portConfiguration == 0) portConfiguration = PORT_CONFIG_DEFAULT; /* Set the 1000 advertisements */ ql_mii_read_reg_ex(qdev, PHY_GIG_CONTROL, &reg, PHYAddr[qdev->mac_index]); reg &= ~PHY_GIG_ALL_PARAMS; if(portConfiguration & PORT_CONFIG_1000MB_SPEED) { if(portConfiguration & PORT_CONFIG_FULL_DUPLEX_ENABLED) reg |= PHY_GIG_ADV_1000F; else reg |= PHY_GIG_ADV_1000H; } ql_mii_write_reg_ex(qdev, PHY_GIG_CONTROL, reg, PHYAddr[qdev->mac_index]); /* Set the 10/100 & pause negotiation advertisements */ ql_mii_read_reg_ex(qdev, PHY_NEG_ADVER, &reg, PHYAddr[qdev->mac_index]); reg &= ~PHY_NEG_ALL_PARAMS; if(portConfiguration & PORT_CONFIG_SYM_PAUSE_ENABLED) reg |= PHY_NEG_ASY_PAUSE | PHY_NEG_SYM_PAUSE; if(portConfiguration & PORT_CONFIG_FULL_DUPLEX_ENABLED) { if(portConfiguration & PORT_CONFIG_100MB_SPEED) reg |= PHY_NEG_ADV_100F; if(portConfiguration & PORT_CONFIG_10MB_SPEED) reg |= PHY_NEG_ADV_10F; } if(portConfiguration & PORT_CONFIG_HALF_DUPLEX_ENABLED) { if(portConfiguration & PORT_CONFIG_100MB_SPEED) reg |= PHY_NEG_ADV_100H; if(portConfiguration & PORT_CONFIG_10MB_SPEED) reg |= PHY_NEG_ADV_10H; } if(portConfiguration & PORT_CONFIG_1000MB_SPEED) { reg |= 1; } ql_mii_write_reg_ex(qdev, PHY_NEG_ADVER, reg, PHYAddr[qdev->mac_index]); ql_mii_read_reg_ex(qdev, CONTROL_REG, &reg, PHYAddr[qdev->mac_index]); ql_mii_write_reg_ex(qdev, CONTROL_REG, reg | PHY_CTRL_RESTART_NEG | PHY_CTRL_AUTO_NEG, PHYAddr[qdev->mac_index]); } static void ql_phy_init_ex(struct ql3_adapter *qdev) { ql_phy_reset_ex(qdev); PHY_Setup(qdev); ql_phy_start_neg_ex(qdev); } /* * Caller holds hw_lock. */ static u32 ql_get_link_state(struct ql3_adapter *qdev) { struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; u32 bitToCheck = 0; u32 temp, linkState; switch (qdev->mac_index) { case 0: bitToCheck = PORT_STATUS_UP0; break; case 1: bitToCheck = PORT_STATUS_UP1; break; } temp = ql_read_page0_reg(qdev, &port_regs->portStatus); if (temp & bitToCheck) { linkState = LS_UP; } else { linkState = LS_DOWN; } return linkState; } static int ql_port_start(struct ql3_adapter *qdev) { if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 2) << 7)) { printk(KERN_ERR "%s: Could not get hw lock for GIO\n", qdev->ndev->name); return -1; } if (ql_is_fiber(qdev)) { ql_petbi_init(qdev); } else { /* Copper port */ ql_phy_init_ex(qdev); } ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); return 0; } static int ql_finish_auto_neg(struct ql3_adapter *qdev) { if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 2) << 7)) return -1; if (!ql_auto_neg_error(qdev)) { if (test_bit(QL_LINK_MASTER,&qdev->flags)) { /* configure the MAC */ if (netif_msg_link(qdev)) printk(KERN_DEBUG PFX "%s: Configuring link.\n", qdev->ndev-> name); ql_mac_cfg_soft_reset(qdev, 1); ql_mac_cfg_gig(qdev, (ql_get_link_speed (qdev) == SPEED_1000)); ql_mac_cfg_full_dup(qdev, ql_is_link_full_dup (qdev)); ql_mac_cfg_pause(qdev, ql_is_neg_pause (qdev)); ql_mac_cfg_soft_reset(qdev, 0); /* enable the MAC */ if (netif_msg_link(qdev)) printk(KERN_DEBUG PFX "%s: Enabling mac.\n", qdev->ndev-> name); ql_mac_enable(qdev, 1); } qdev->port_link_state = LS_UP; netif_start_queue(qdev->ndev); netif_carrier_on(qdev->ndev); if (netif_msg_link(qdev)) printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n", qdev->ndev->name, ql_get_link_speed(qdev), ql_is_link_full_dup(qdev) ? "full" : "half"); } else { /* Remote error detected */ if (test_bit(QL_LINK_MASTER,&qdev->flags)) { if (netif_msg_link(qdev)) printk(KERN_DEBUG PFX "%s: Remote error detected. " "Calling ql_port_start().\n", qdev->ndev-> name); /* * ql_port_start() is shared code and needs * to lock the PHY on it's own. */ ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); if(ql_port_start(qdev)) {/* Restart port */ return -1; } else return 0; } } ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); return 0; } static void ql_link_state_machine_work(struct work_struct *work) { struct ql3_adapter *qdev = container_of(work, struct ql3_adapter, link_state_work.work); u32 curr_link_state; unsigned long hw_flags; spin_lock_irqsave(&qdev->hw_lock, hw_flags); curr_link_state = ql_get_link_state(qdev); if (test_bit(QL_RESET_ACTIVE,&qdev->flags)) { if (netif_msg_link(qdev)) printk(KERN_INFO PFX "%s: Reset in progress, skip processing link " "state.\n", qdev->ndev->name); spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); /* Restart timer on 2 second interval. */ mod_timer(&qdev->adapter_timer, jiffies + HZ * 1);\ return; } switch (qdev->port_link_state) { default: if (test_bit(QL_LINK_MASTER,&qdev->flags)) { ql_port_start(qdev); } qdev->port_link_state = LS_DOWN; /* Fall Through */ case LS_DOWN: if (curr_link_state == LS_UP) { if (netif_msg_link(qdev)) printk(KERN_INFO PFX "%s: Link is up.\n", qdev->ndev->name); if (ql_is_auto_neg_complete(qdev)) ql_finish_auto_neg(qdev); if (qdev->port_link_state == LS_UP) ql_link_down_detect_clear(qdev); qdev->port_link_state = LS_UP; } break; case LS_UP: /* * See if the link is currently down or went down and came * back up */ if (curr_link_state == LS_DOWN) { if (netif_msg_link(qdev)) printk(KERN_INFO PFX "%s: Link is down.\n", qdev->ndev->name); qdev->port_link_state = LS_DOWN; } if (ql_link_down_detect(qdev)) qdev->port_link_state = LS_DOWN; break; } spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); /* Restart timer on 2 second interval. */ mod_timer(&qdev->adapter_timer, jiffies + HZ * 1); } /* * Caller must take hw_lock and QL_PHY_GIO_SEM. */ static void ql_get_phy_owner(struct ql3_adapter *qdev) { if (ql_this_adapter_controls_port(qdev)) set_bit(QL_LINK_MASTER,&qdev->flags); else clear_bit(QL_LINK_MASTER,&qdev->flags); } /* * Caller must take hw_lock and QL_PHY_GIO_SEM. */ static void ql_init_scan_mode(struct ql3_adapter *qdev) { ql_mii_enable_scan_mode(qdev); if (test_bit(QL_LINK_OPTICAL,&qdev->flags)) { if (ql_this_adapter_controls_port(qdev)) ql_petbi_init_ex(qdev); } else { if (ql_this_adapter_controls_port(qdev)) ql_phy_init_ex(qdev); } } /* * MII_Setup needs to be called before taking the PHY out of reset so that the * management interface clock speed can be set properly. It would be better if * we had a way to disable MDC until after the PHY is out of reset, but we * don't have that capability. */ static int ql_mii_setup(struct ql3_adapter *qdev) { u32 reg; struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 2) << 7)) return -1; if (qdev->device_id == QL3032_DEVICE_ID) ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, 0x0f00000); /* Divide 125MHz clock by 28 to meet PHY timing requirements */ reg = MAC_MII_CONTROL_CLK_SEL_DIV28; ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, reg | ((MAC_MII_CONTROL_CLK_SEL_MASK) << 16)); ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); return 0; } static u32 ql_supported_modes(struct ql3_adapter *qdev) { u32 supported; if (test_bit(QL_LINK_OPTICAL,&qdev->flags)) { supported = SUPPORTED_1000baseT_Full | SUPPORTED_FIBRE | SUPPORTED_Autoneg; } else { supported = SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_TP; } return supported; } static int ql_get_auto_cfg_status(struct ql3_adapter *qdev) { int status; unsigned long hw_flags; spin_lock_irqsave(&qdev->hw_lock, hw_flags); if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 2) << 7)) { spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); return 0; } status = ql_is_auto_cfg(qdev); ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); return status; } static u32 ql_get_speed(struct ql3_adapter *qdev) { u32 status; unsigned long hw_flags; spin_lock_irqsave(&qdev->hw_lock, hw_flags); if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 2) << 7)) { spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); return 0; } status = ql_get_link_speed(qdev); ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); return status; } static int ql_get_full_dup(struct ql3_adapter *qdev) { int status; unsigned long hw_flags; spin_lock_irqsave(&qdev->hw_lock, hw_flags); if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 2) << 7)) { spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); return 0; } status = ql_is_link_full_dup(qdev); ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); return status; } static int ql_get_settings(struct net_device *ndev, struct ethtool_cmd *ecmd) { struct ql3_adapter *qdev = netdev_priv(ndev); ecmd->transceiver = XCVR_INTERNAL; ecmd->supported = ql_supported_modes(qdev); if (test_bit(QL_LINK_OPTICAL,&qdev->flags)) { ecmd->port = PORT_FIBRE; } else { ecmd->port = PORT_TP; ecmd->phy_address = qdev->PHYAddr; } ecmd->advertising = ql_supported_modes(qdev); ecmd->autoneg = ql_get_auto_cfg_status(qdev); ecmd->speed = ql_get_speed(qdev); ecmd->duplex = ql_get_full_dup(qdev); return 0; } static void ql_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *drvinfo) { struct ql3_adapter *qdev = netdev_priv(ndev); strncpy(drvinfo->driver, ql3xxx_driver_name, 32); strncpy(drvinfo->version, ql3xxx_driver_version, 32); strncpy(drvinfo->fw_version, "N/A", 32); strncpy(drvinfo->bus_info, pci_name(qdev->pdev), 32); drvinfo->regdump_len = 0; drvinfo->eedump_len = 0; } static u32 ql_get_msglevel(struct net_device *ndev) { struct ql3_adapter *qdev = netdev_priv(ndev); return qdev->msg_enable; } static void ql_set_msglevel(struct net_device *ndev, u32 value) { struct ql3_adapter *qdev = netdev_priv(ndev); qdev->msg_enable = value; } static void ql_get_pauseparam(struct net_device *ndev, struct ethtool_pauseparam *pause) { struct ql3_adapter *qdev = netdev_priv(ndev); struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; u32 reg; if(qdev->mac_index == 0) reg = ql_read_page0_reg(qdev, &port_regs->mac0ConfigReg); else reg = ql_read_page0_reg(qdev, &port_regs->mac1ConfigReg); pause->autoneg = ql_get_auto_cfg_status(qdev); pause->rx_pause = (reg & MAC_CONFIG_REG_RF) >> 2; pause->tx_pause = (reg & MAC_CONFIG_REG_TF) >> 1; } static const struct ethtool_ops ql3xxx_ethtool_ops = { .get_settings = ql_get_settings, .get_drvinfo = ql_get_drvinfo, .get_link = ethtool_op_get_link, .get_msglevel = ql_get_msglevel, .set_msglevel = ql_set_msglevel, .get_pauseparam = ql_get_pauseparam, }; static int ql_populate_free_queue(struct ql3_adapter *qdev) { struct ql_rcv_buf_cb *lrg_buf_cb = qdev->lrg_buf_free_head; dma_addr_t map; int err; while (lrg_buf_cb) { if (!lrg_buf_cb->skb) { lrg_buf_cb->skb = netdev_alloc_skb(qdev->ndev, qdev->lrg_buffer_len); if (unlikely(!lrg_buf_cb->skb)) { printk(KERN_DEBUG PFX "%s: Failed netdev_alloc_skb().\n", qdev->ndev->name); break; } else { /* * We save some space to copy the ethhdr from * first buffer */ skb_reserve(lrg_buf_cb->skb, QL_HEADER_SPACE); map = pci_map_single(qdev->pdev, lrg_buf_cb->skb->data, qdev->lrg_buffer_len - QL_HEADER_SPACE, PCI_DMA_FROMDEVICE); err = pci_dma_mapping_error(qdev->pdev, map); if(err) { printk(KERN_ERR "%s: PCI mapping failed with error: %d\n", qdev->ndev->name, err); dev_kfree_skb(lrg_buf_cb->skb); lrg_buf_cb->skb = NULL; break; } lrg_buf_cb->buf_phy_addr_low = cpu_to_le32(LS_64BITS(map)); lrg_buf_cb->buf_phy_addr_high = cpu_to_le32(MS_64BITS(map)); pci_unmap_addr_set(lrg_buf_cb, mapaddr, map); pci_unmap_len_set(lrg_buf_cb, maplen, qdev->lrg_buffer_len - QL_HEADER_SPACE); --qdev->lrg_buf_skb_check; if (!qdev->lrg_buf_skb_check) return 1; } } lrg_buf_cb = lrg_buf_cb->next; } return 0; } /* * Caller holds hw_lock. */ static void ql_update_small_bufq_prod_index(struct ql3_adapter *qdev) { struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; if (qdev->small_buf_release_cnt >= 16) { while (qdev->small_buf_release_cnt >= 16) { qdev->small_buf_q_producer_index++; if (qdev->small_buf_q_producer_index == NUM_SBUFQ_ENTRIES) qdev->small_buf_q_producer_index = 0; qdev->small_buf_release_cnt -= 8; } wmb(); writel(qdev->small_buf_q_producer_index, &port_regs->CommonRegs.rxSmallQProducerIndex); } } /* * Caller holds hw_lock. */ static void ql_update_lrg_bufq_prod_index(struct ql3_adapter *qdev) { struct bufq_addr_element *lrg_buf_q_ele; int i; struct ql_rcv_buf_cb *lrg_buf_cb; struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; if ((qdev->lrg_buf_free_count >= 8) && (qdev->lrg_buf_release_cnt >= 16)) { if (qdev->lrg_buf_skb_check) if (!ql_populate_free_queue(qdev)) return; lrg_buf_q_ele = qdev->lrg_buf_next_free; while ((qdev->lrg_buf_release_cnt >= 16) && (qdev->lrg_buf_free_count >= 8)) { for (i = 0; i < 8; i++) { lrg_buf_cb = ql_get_from_lrg_buf_free_list(qdev); lrg_buf_q_ele->addr_high = lrg_buf_cb->buf_phy_addr_high; lrg_buf_q_ele->addr_low = lrg_buf_cb->buf_phy_addr_low; lrg_buf_q_ele++; qdev->lrg_buf_release_cnt--; } qdev->lrg_buf_q_producer_index++; if (qdev->lrg_buf_q_producer_index == qdev->num_lbufq_entries) qdev->lrg_buf_q_producer_index = 0; if (qdev->lrg_buf_q_producer_index == (qdev->num_lbufq_entries - 1)) { lrg_buf_q_ele = qdev->lrg_buf_q_virt_addr; } } wmb(); qdev->lrg_buf_next_free = lrg_buf_q_ele; writel(qdev->lrg_buf_q_producer_index, &port_regs->CommonRegs.rxLargeQProducerIndex); } } static void ql_process_mac_tx_intr(struct ql3_adapter *qdev, struct ob_mac_iocb_rsp *mac_rsp) { struct ql_tx_buf_cb *tx_cb; int i; int retval = 0; if(mac_rsp->flags & OB_MAC_IOCB_RSP_S) { printk(KERN_WARNING "Frame short but, frame was padded and sent.\n"); } tx_cb = &qdev->tx_buf[mac_rsp->transaction_id]; /* Check the transmit response flags for any errors */ if(mac_rsp->flags & OB_MAC_IOCB_RSP_S) { printk(KERN_ERR "Frame too short to be legal, frame not sent.\n"); qdev->ndev->stats.tx_errors++; retval = -EIO; goto frame_not_sent; } if(tx_cb->seg_count == 0) { printk(KERN_ERR "tx_cb->seg_count == 0: %d\n", mac_rsp->transaction_id); qdev->ndev->stats.tx_errors++; retval = -EIO; goto invalid_seg_count; } pci_unmap_single(qdev->pdev, pci_unmap_addr(&tx_cb->map[0], mapaddr), pci_unmap_len(&tx_cb->map[0], maplen), PCI_DMA_TODEVICE); tx_cb->seg_count--; if (tx_cb->seg_count) { for (i = 1; i < tx_cb->seg_count; i++) { pci_unmap_page(qdev->pdev, pci_unmap_addr(&tx_cb->map[i], mapaddr), pci_unmap_len(&tx_cb->map[i], maplen), PCI_DMA_TODEVICE); } } qdev->ndev->stats.tx_packets++; qdev->ndev->stats.tx_bytes += tx_cb->skb->len; frame_not_sent: dev_kfree_skb_irq(tx_cb->skb); tx_cb->skb = NULL; invalid_seg_count: atomic_inc(&qdev->tx_count); } static void ql_get_sbuf(struct ql3_adapter *qdev) { if (++qdev->small_buf_index == NUM_SMALL_BUFFERS) qdev->small_buf_index = 0; qdev->small_buf_release_cnt++; } static struct ql_rcv_buf_cb *ql_get_lbuf(struct ql3_adapter *qdev) { struct ql_rcv_buf_cb *lrg_buf_cb = NULL; lrg_buf_cb = &qdev->lrg_buf[qdev->lrg_buf_index]; qdev->lrg_buf_release_cnt++; if (++qdev->lrg_buf_index == qdev->num_large_buffers) qdev->lrg_buf_index = 0; return(lrg_buf_cb); } /* * The difference between 3022 and 3032 for inbound completions: * 3022 uses two buffers per completion. The first buffer contains * (some) header info, the second the remainder of the headers plus * the data. For this chip we reserve some space at the top of the * receive buffer so that the header info in buffer one can be * prepended to the buffer two. Buffer two is the sent up while * buffer one is returned to the hardware to be reused. * 3032 receives all of it's data and headers in one buffer for a * simpler process. 3032 also supports checksum verification as * can be seen in ql_process_macip_rx_intr(). */ static void ql_process_mac_rx_intr(struct ql3_adapter *qdev, struct ib_mac_iocb_rsp *ib_mac_rsp_ptr) { struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL; struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL; struct sk_buff *skb; u16 length = le16_to_cpu(ib_mac_rsp_ptr->length); /* * Get the inbound address list (small buffer). */ ql_get_sbuf(qdev); if (qdev->device_id == QL3022_DEVICE_ID) lrg_buf_cb1 = ql_get_lbuf(qdev); /* start of second buffer */ lrg_buf_cb2 = ql_get_lbuf(qdev); skb = lrg_buf_cb2->skb; qdev->ndev->stats.rx_packets++; qdev->ndev->stats.rx_bytes += length; skb_put(skb, length); pci_unmap_single(qdev->pdev, pci_unmap_addr(lrg_buf_cb2, mapaddr), pci_unmap_len(lrg_buf_cb2, maplen), PCI_DMA_FROMDEVICE); prefetch(skb->data); skb->ip_summed = CHECKSUM_NONE; skb->protocol = eth_type_trans(skb, qdev->ndev); netif_receive_skb(skb); lrg_buf_cb2->skb = NULL; if (qdev->device_id == QL3022_DEVICE_ID) ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1); ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2); } static void ql_process_macip_rx_intr(struct ql3_adapter *qdev, struct ib_ip_iocb_rsp *ib_ip_rsp_ptr) { struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL; struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL; struct sk_buff *skb1 = NULL, *skb2; struct net_device *ndev = qdev->ndev; u16 length = le16_to_cpu(ib_ip_rsp_ptr->length); u16 size = 0; /* * Get the inbound address list (small buffer). */ ql_get_sbuf(qdev); if (qdev->device_id == QL3022_DEVICE_ID) { /* start of first buffer on 3022 */ lrg_buf_cb1 = ql_get_lbuf(qdev); skb1 = lrg_buf_cb1->skb; size = ETH_HLEN; if (*((u16 *) skb1->data) != 0xFFFF) size += VLAN_ETH_HLEN - ETH_HLEN; } /* start of second buffer */ lrg_buf_cb2 = ql_get_lbuf(qdev); skb2 = lrg_buf_cb2->skb; skb_put(skb2, length); /* Just the second buffer length here. */ pci_unmap_single(qdev->pdev, pci_unmap_addr(lrg_buf_cb2, mapaddr), pci_unmap_len(lrg_buf_cb2, maplen), PCI_DMA_FROMDEVICE); prefetch(skb2->data); skb2->ip_summed = CHECKSUM_NONE; if (qdev->device_id == QL3022_DEVICE_ID) { /* * Copy the ethhdr from first buffer to second. This * is necessary for 3022 IP completions. */ skb_copy_from_linear_data_offset(skb1, VLAN_ID_LEN, skb_push(skb2, size), size); } else { u16 checksum = le16_to_cpu(ib_ip_rsp_ptr->checksum); if (checksum & (IB_IP_IOCB_RSP_3032_ICE | IB_IP_IOCB_RSP_3032_CE)) { printk(KERN_ERR "%s: Bad checksum for this %s packet, checksum = %x.\n", __func__, ((checksum & IB_IP_IOCB_RSP_3032_TCP) ? "TCP" : "UDP"),checksum); } else if ((checksum & IB_IP_IOCB_RSP_3032_TCP) || (checksum & IB_IP_IOCB_RSP_3032_UDP && !(checksum & IB_IP_IOCB_RSP_3032_NUC))) { skb2->ip_summed = CHECKSUM_UNNECESSARY; } } skb2->protocol = eth_type_trans(skb2, qdev->ndev); netif_receive_skb(skb2); ndev->stats.rx_packets++; ndev->stats.rx_bytes += length; lrg_buf_cb2->skb = NULL; if (qdev->device_id == QL3022_DEVICE_ID) ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1); ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2); } static int ql_tx_rx_clean(struct ql3_adapter *qdev, int *tx_cleaned, int *rx_cleaned, int work_to_do) { struct net_rsp_iocb *net_rsp; struct net_device *ndev = qdev->ndev; int work_done = 0; /* While there are entries in the completion queue. */ while ((le32_to_cpu(*(qdev->prsp_producer_index)) != qdev->rsp_consumer_index) && (work_done < work_to_do)) { net_rsp = qdev->rsp_current; rmb(); /* * Fix 4032 chipe undocumented "feature" where bit-8 is set if the * inbound completion is for a VLAN. */ if (qdev->device_id == QL3032_DEVICE_ID) net_rsp->opcode &= 0x7f; switch (net_rsp->opcode) { case OPCODE_OB_MAC_IOCB_FN0: case OPCODE_OB_MAC_IOCB_FN2: ql_process_mac_tx_intr(qdev, (struct ob_mac_iocb_rsp *) net_rsp); (*tx_cleaned)++; break; case OPCODE_IB_MAC_IOCB: case OPCODE_IB_3032_MAC_IOCB: ql_process_mac_rx_intr(qdev, (struct ib_mac_iocb_rsp *) net_rsp); (*rx_cleaned)++; break; case OPCODE_IB_IP_IOCB: case OPCODE_IB_3032_IP_IOCB: ql_process_macip_rx_intr(qdev, (struct ib_ip_iocb_rsp *) net_rsp); (*rx_cleaned)++; break; default: { u32 *tmp = (u32 *) net_rsp; printk(KERN_ERR PFX "%s: Hit default case, not " "handled!\n" " dropping the packet, opcode = " "%x.\n", ndev->name, net_rsp->opcode); printk(KERN_ERR PFX "0x%08lx 0x%08lx 0x%08lx 0x%08lx \n", (unsigned long int)tmp[0], (unsigned long int)tmp[1], (unsigned long int)tmp[2], (unsigned long int)tmp[3]); } } qdev->rsp_consumer_index++; if (qdev->rsp_consumer_index == NUM_RSP_Q_ENTRIES) { qdev->rsp_consumer_index = 0; qdev->rsp_current = qdev->rsp_q_virt_addr; } else { qdev->rsp_current++; } work_done = *tx_cleaned + *rx_cleaned; } return work_done; } static int ql_poll(struct napi_struct *napi, int budget) { struct ql3_adapter *qdev = container_of(napi, struct ql3_adapter, napi); int rx_cleaned = 0, tx_cleaned = 0; unsigned long hw_flags; struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; ql_tx_rx_clean(qdev, &tx_cleaned, &rx_cleaned, budget); if (tx_cleaned + rx_cleaned != budget) { spin_lock_irqsave(&qdev->hw_lock, hw_flags); __napi_complete(napi); ql_update_small_bufq_prod_index(qdev); ql_update_lrg_bufq_prod_index(qdev); writel(qdev->rsp_consumer_index, &port_regs->CommonRegs.rspQConsumerIndex); spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); ql_enable_interrupts(qdev); } return tx_cleaned + rx_cleaned; } static irqreturn_t ql3xxx_isr(int irq, void *dev_id) { struct net_device *ndev = dev_id; struct ql3_adapter *qdev = netdev_priv(ndev); struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; u32 value; int handled = 1; u32 var; port_regs = qdev->mem_map_registers; value = ql_read_common_reg_l(qdev, &port_regs->CommonRegs.ispControlStatus); if (value & (ISP_CONTROL_FE | ISP_CONTROL_RI)) { spin_lock(&qdev->adapter_lock); netif_stop_queue(qdev->ndev); netif_carrier_off(qdev->ndev); ql_disable_interrupts(qdev); qdev->port_link_state = LS_DOWN; set_bit(QL_RESET_ACTIVE,&qdev->flags) ; if (value & ISP_CONTROL_FE) { /* * Chip Fatal Error. */ var = ql_read_page0_reg_l(qdev, &port_regs->PortFatalErrStatus); printk(KERN_WARNING PFX "%s: Resetting chip. PortFatalErrStatus " "register = 0x%x\n", ndev->name, var); set_bit(QL_RESET_START,&qdev->flags) ; } else { /* * Soft Reset Requested. */ set_bit(QL_RESET_PER_SCSI,&qdev->flags) ; printk(KERN_ERR PFX "%s: Another function issued a reset to the " "chip. ISR value = %x.\n", ndev->name, value); } queue_delayed_work(qdev->workqueue, &qdev->reset_work, 0); spin_unlock(&qdev->adapter_lock); } else if (value & ISP_IMR_DISABLE_CMPL_INT) { ql_disable_interrupts(qdev); if (likely(napi_schedule_prep(&qdev->napi))) { __napi_schedule(&qdev->napi); } } else { return IRQ_NONE; } return IRQ_RETVAL(handled); } /* * Get the total number of segments needed for the * given number of fragments. This is necessary because * outbound address lists (OAL) will be used when more than * two frags are given. Each address list has 5 addr/len * pairs. The 5th pair in each AOL is used to point to * the next AOL if more frags are coming. * That is why the frags:segment count ratio is not linear. */ static int ql_get_seg_count(struct ql3_adapter *qdev, unsigned short frags) { if (qdev->device_id == QL3022_DEVICE_ID) return 1; switch(frags) { case 0: return 1; /* just the skb->data seg */ case 1: return 2; /* skb->data + 1 frag */ case 2: return 3; /* skb->data + 2 frags */ case 3: return 5; /* skb->data + 1 frag + 1 AOL containting 2 frags */ case 4: return 6; case 5: return 7; case 6: return 8; case 7: return 10; case 8: return 11; case 9: return 12; case 10: return 13; case 11: return 15; case 12: return 16; case 13: return 17; case 14: return 18; case 15: return 20; case 16: return 21; case 17: return 22; case 18: return 23; } return -1; } static void ql_hw_csum_setup(const struct sk_buff *skb, struct ob_mac_iocb_req *mac_iocb_ptr) { const struct iphdr *ip = ip_hdr(skb); mac_iocb_ptr->ip_hdr_off = skb_network_offset(skb); mac_iocb_ptr->ip_hdr_len = ip->ihl; if (ip->protocol == IPPROTO_TCP) { mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_TC | OB_3032MAC_IOCB_REQ_IC; } else { mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_UC | OB_3032MAC_IOCB_REQ_IC; } } /* * Map the buffers for this transmit. This will return * NETDEV_TX_BUSY or NETDEV_TX_OK based on success. */ static int ql_send_map(struct ql3_adapter *qdev, struct ob_mac_iocb_req *mac_iocb_ptr, struct ql_tx_buf_cb *tx_cb, struct sk_buff *skb) { struct oal *oal; struct oal_entry *oal_entry; int len = skb_headlen(skb); dma_addr_t map; int err; int completed_segs, i; int seg_cnt, seg = 0; int frag_cnt = (int)skb_shinfo(skb)->nr_frags; seg_cnt = tx_cb->seg_count; /* * Map the skb buffer first. */ map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE); err = pci_dma_mapping_error(qdev->pdev, map); if(err) { printk(KERN_ERR "%s: PCI mapping failed with error: %d\n", qdev->ndev->name, err); return NETDEV_TX_BUSY; } oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low; oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map)); oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map)); oal_entry->len = cpu_to_le32(len); pci_unmap_addr_set(&tx_cb->map[seg], mapaddr, map); pci_unmap_len_set(&tx_cb->map[seg], maplen, len); seg++; if (seg_cnt == 1) { /* Terminate the last segment. */ oal_entry->len |= cpu_to_le32(OAL_LAST_ENTRY); } else { oal = tx_cb->oal; for (completed_segs=0; completed_segs<frag_cnt; completed_segs++,seg++) { skb_frag_t *frag = &skb_shinfo(skb)->frags[completed_segs]; oal_entry++; if ((seg == 2 && seg_cnt > 3) || /* Check for continuation */ (seg == 7 && seg_cnt > 8) || /* requirements. It's strange */ (seg == 12 && seg_cnt > 13) || /* but necessary. */ (seg == 17 && seg_cnt > 18)) { /* Continuation entry points to outbound address list. */ map = pci_map_single(qdev->pdev, oal, sizeof(struct oal), PCI_DMA_TODEVICE); err = pci_dma_mapping_error(qdev->pdev, map); if(err) { printk(KERN_ERR "%s: PCI mapping outbound address list with error: %d\n", qdev->ndev->name, err); goto map_error; } oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map)); oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map)); oal_entry->len = cpu_to_le32(sizeof(struct oal) | OAL_CONT_ENTRY); pci_unmap_addr_set(&tx_cb->map[seg], mapaddr, map); pci_unmap_len_set(&tx_cb->map[seg], maplen, sizeof(struct oal)); oal_entry = (struct oal_entry *)oal; oal++; seg++; } map = pci_map_page(qdev->pdev, frag->page, frag->page_offset, frag->size, PCI_DMA_TODEVICE); err = pci_dma_mapping_error(qdev->pdev, map); if(err) { printk(KERN_ERR "%s: PCI mapping frags failed with error: %d\n", qdev->ndev->name, err); goto map_error; } oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map)); oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map)); oal_entry->len = cpu_to_le32(frag->size); pci_unmap_addr_set(&tx_cb->map[seg], mapaddr, map); pci_unmap_len_set(&tx_cb->map[seg], maplen, frag->size); } /* Terminate the last segment. */ oal_entry->len |= cpu_to_le32(OAL_LAST_ENTRY); } return NETDEV_TX_OK; map_error: /* A PCI mapping failed and now we will need to back out * We need to traverse through the oal's and associated pages which * have been mapped and now we must unmap them to clean up properly */ seg = 1; oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low; oal = tx_cb->oal; for (i=0; i<completed_segs; i++,seg++) { oal_entry++; if((seg == 2 && seg_cnt > 3) || /* Check for continuation */ (seg == 7 && seg_cnt > 8) || /* requirements. It's strange */ (seg == 12 && seg_cnt > 13) || /* but necessary. */ (seg == 17 && seg_cnt > 18)) { pci_unmap_single(qdev->pdev, pci_unmap_addr(&tx_cb->map[seg], mapaddr), pci_unmap_len(&tx_cb->map[seg], maplen), PCI_DMA_TODEVICE); oal++; seg++; } pci_unmap_page(qdev->pdev, pci_unmap_addr(&tx_cb->map[seg], mapaddr), pci_unmap_len(&tx_cb->map[seg], maplen), PCI_DMA_TODEVICE); } pci_unmap_single(qdev->pdev, pci_unmap_addr(&tx_cb->map[0], mapaddr), pci_unmap_addr(&tx_cb->map[0], maplen), PCI_DMA_TODEVICE); return NETDEV_TX_BUSY; } /* * The difference between 3022 and 3032 sends: * 3022 only supports a simple single segment transmission. * 3032 supports checksumming and scatter/gather lists (fragments). * The 3032 supports sglists by using the 3 addr/len pairs (ALP) * in the IOCB plus a chain of outbound address lists (OAL) that * each contain 5 ALPs. The last ALP of the IOCB (3rd) or OAL (5th) * will used to point to an OAL when more ALP entries are required. * The IOCB is always the top of the chain followed by one or more * OALs (when necessary). */ static netdev_tx_t ql3xxx_send(struct sk_buff *skb, struct net_device *ndev) { struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev); struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; struct ql_tx_buf_cb *tx_cb; u32 tot_len = skb->len; struct ob_mac_iocb_req *mac_iocb_ptr; if (unlikely(atomic_read(&qdev->tx_count) < 2)) { return NETDEV_TX_BUSY; } tx_cb = &qdev->tx_buf[qdev->req_producer_index] ; if((tx_cb->seg_count = ql_get_seg_count(qdev, (skb_shinfo(skb)->nr_frags))) == -1) { printk(KERN_ERR PFX"%s: invalid segment count!\n",__func__); return NETDEV_TX_OK; } mac_iocb_ptr = tx_cb->queue_entry; memset((void *)mac_iocb_ptr, 0, sizeof(struct ob_mac_iocb_req)); mac_iocb_ptr->opcode = qdev->mac_ob_opcode; mac_iocb_ptr->flags = OB_MAC_IOCB_REQ_X; mac_iocb_ptr->flags |= qdev->mb_bit_mask; mac_iocb_ptr->transaction_id = qdev->req_producer_index; mac_iocb_ptr->data_len = cpu_to_le16((u16) tot_len); tx_cb->skb = skb; if (qdev->device_id == QL3032_DEVICE_ID && skb->ip_summed == CHECKSUM_PARTIAL) ql_hw_csum_setup(skb, mac_iocb_ptr); if(ql_send_map(qdev,mac_iocb_ptr,tx_cb,skb) != NETDEV_TX_OK) { printk(KERN_ERR PFX"%s: Could not map the segments!\n",__func__); return NETDEV_TX_BUSY; } wmb(); qdev->req_producer_index++; if (qdev->req_producer_index == NUM_REQ_Q_ENTRIES) qdev->req_producer_index = 0; wmb(); ql_write_common_reg_l(qdev, &port_regs->CommonRegs.reqQProducerIndex, qdev->req_producer_index); if (netif_msg_tx_queued(qdev)) printk(KERN_DEBUG PFX "%s: tx queued, slot %d, len %d\n", ndev->name, qdev->req_producer_index, skb->len); atomic_dec(&qdev->tx_count); return NETDEV_TX_OK; } static int ql_alloc_net_req_rsp_queues(struct ql3_adapter *qdev) { qdev->req_q_size = (u32) (NUM_REQ_Q_ENTRIES * sizeof(struct ob_mac_iocb_req)); qdev->req_q_virt_addr = pci_alloc_consistent(qdev->pdev, (size_t) qdev->req_q_size, &qdev->req_q_phy_addr); if ((qdev->req_q_virt_addr == NULL) || LS_64BITS(qdev->req_q_phy_addr) & (qdev->req_q_size - 1)) { printk(KERN_ERR PFX "%s: reqQ failed.\n", qdev->ndev->name); return -ENOMEM; } qdev->rsp_q_size = NUM_RSP_Q_ENTRIES * sizeof(struct net_rsp_iocb); qdev->rsp_q_virt_addr = pci_alloc_consistent(qdev->pdev, (size_t) qdev->rsp_q_size, &qdev->rsp_q_phy_addr); if ((qdev->rsp_q_virt_addr == NULL) || LS_64BITS(qdev->rsp_q_phy_addr) & (qdev->rsp_q_size - 1)) { printk(KERN_ERR PFX "%s: rspQ allocation failed\n", qdev->ndev->name); pci_free_consistent(qdev->pdev, (size_t) qdev->req_q_size, qdev->req_q_virt_addr, qdev->req_q_phy_addr); return -ENOMEM; } set_bit(QL_ALLOC_REQ_RSP_Q_DONE,&qdev->flags); return 0; } static void ql_free_net_req_rsp_queues(struct ql3_adapter *qdev) { if (!test_bit(QL_ALLOC_REQ_RSP_Q_DONE,&qdev->flags)) { printk(KERN_INFO PFX "%s: Already done.\n", qdev->ndev->name); return; } pci_free_consistent(qdev->pdev, qdev->req_q_size, qdev->req_q_virt_addr, qdev->req_q_phy_addr); qdev->req_q_virt_addr = NULL; pci_free_consistent(qdev->pdev, qdev->rsp_q_size, qdev->rsp_q_virt_addr, qdev->rsp_q_phy_addr); qdev->rsp_q_virt_addr = NULL; clear_bit(QL_ALLOC_REQ_RSP_Q_DONE,&qdev->flags); } static int ql_alloc_buffer_queues(struct ql3_adapter *qdev) { /* Create Large Buffer Queue */ qdev->lrg_buf_q_size = qdev->num_lbufq_entries * sizeof(struct lrg_buf_q_entry); if (qdev->lrg_buf_q_size < PAGE_SIZE) qdev->lrg_buf_q_alloc_size = PAGE_SIZE; else qdev->lrg_buf_q_alloc_size = qdev->lrg_buf_q_size * 2; qdev->lrg_buf = kmalloc(qdev->num_large_buffers * sizeof(struct ql_rcv_buf_cb),GFP_KERNEL); if (qdev->lrg_buf == NULL) { printk(KERN_ERR PFX "%s: qdev->lrg_buf alloc failed.\n", qdev->ndev->name); return -ENOMEM; } qdev->lrg_buf_q_alloc_virt_addr = pci_alloc_consistent(qdev->pdev, qdev->lrg_buf_q_alloc_size, &qdev->lrg_buf_q_alloc_phy_addr); if (qdev->lrg_buf_q_alloc_virt_addr == NULL) { printk(KERN_ERR PFX "%s: lBufQ failed\n", qdev->ndev->name); return -ENOMEM; } qdev->lrg_buf_q_virt_addr = qdev->lrg_buf_q_alloc_virt_addr; qdev->lrg_buf_q_phy_addr = qdev->lrg_buf_q_alloc_phy_addr; /* Create Small Buffer Queue */ qdev->small_buf_q_size = NUM_SBUFQ_ENTRIES * sizeof(struct lrg_buf_q_entry); if (qdev->small_buf_q_size < PAGE_SIZE) qdev->small_buf_q_alloc_size = PAGE_SIZE; else qdev->small_buf_q_alloc_size = qdev->small_buf_q_size * 2; qdev->small_buf_q_alloc_virt_addr = pci_alloc_consistent(qdev->pdev, qdev->small_buf_q_alloc_size, &qdev->small_buf_q_alloc_phy_addr); if (qdev->small_buf_q_alloc_virt_addr == NULL) { printk(KERN_ERR PFX "%s: Small Buffer Queue allocation failed.\n", qdev->ndev->name); pci_free_consistent(qdev->pdev, qdev->lrg_buf_q_alloc_size, qdev->lrg_buf_q_alloc_virt_addr, qdev->lrg_buf_q_alloc_phy_addr); return -ENOMEM; } qdev->small_buf_q_virt_addr = qdev->small_buf_q_alloc_virt_addr; qdev->small_buf_q_phy_addr = qdev->small_buf_q_alloc_phy_addr; set_bit(QL_ALLOC_BUFQS_DONE,&qdev->flags); return 0; } static void ql_free_buffer_queues(struct ql3_adapter *qdev) { if (!test_bit(QL_ALLOC_BUFQS_DONE,&qdev->flags)) { printk(KERN_INFO PFX "%s: Already done.\n", qdev->ndev->name); return; } if(qdev->lrg_buf) kfree(qdev->lrg_buf); pci_free_consistent(qdev->pdev, qdev->lrg_buf_q_alloc_size, qdev->lrg_buf_q_alloc_virt_addr, qdev->lrg_buf_q_alloc_phy_addr); qdev->lrg_buf_q_virt_addr = NULL; pci_free_consistent(qdev->pdev, qdev->small_buf_q_alloc_size, qdev->small_buf_q_alloc_virt_addr, qdev->small_buf_q_alloc_phy_addr); qdev->small_buf_q_virt_addr = NULL; clear_bit(QL_ALLOC_BUFQS_DONE,&qdev->flags); } static int ql_alloc_small_buffers(struct ql3_adapter *qdev) { int i; struct bufq_addr_element *small_buf_q_entry; /* Currently we allocate on one of memory and use it for smallbuffers */ qdev->small_buf_total_size = (QL_ADDR_ELE_PER_BUFQ_ENTRY * NUM_SBUFQ_ENTRIES * QL_SMALL_BUFFER_SIZE); qdev->small_buf_virt_addr = pci_alloc_consistent(qdev->pdev, qdev->small_buf_total_size, &qdev->small_buf_phy_addr); if (qdev->small_buf_virt_addr == NULL) { printk(KERN_ERR PFX "%s: Failed to get small buffer memory.\n", qdev->ndev->name); return -ENOMEM; } qdev->small_buf_phy_addr_low = LS_64BITS(qdev->small_buf_phy_addr); qdev->small_buf_phy_addr_high = MS_64BITS(qdev->small_buf_phy_addr); small_buf_q_entry = qdev->small_buf_q_virt_addr; /* Initialize the small buffer queue. */ for (i = 0; i < (QL_ADDR_ELE_PER_BUFQ_ENTRY * NUM_SBUFQ_ENTRIES); i++) { small_buf_q_entry->addr_high = cpu_to_le32(qdev->small_buf_phy_addr_high); small_buf_q_entry->addr_low = cpu_to_le32(qdev->small_buf_phy_addr_low + (i * QL_SMALL_BUFFER_SIZE)); small_buf_q_entry++; } qdev->small_buf_index = 0; set_bit(QL_ALLOC_SMALL_BUF_DONE,&qdev->flags); return 0; } static void ql_free_small_buffers(struct ql3_adapter *qdev) { if (!test_bit(QL_ALLOC_SMALL_BUF_DONE,&qdev->flags)) { printk(KERN_INFO PFX "%s: Already done.\n", qdev->ndev->name); return; } if (qdev->small_buf_virt_addr != NULL) { pci_free_consistent(qdev->pdev, qdev->small_buf_total_size, qdev->small_buf_virt_addr, qdev->small_buf_phy_addr); qdev->small_buf_virt_addr = NULL; } } static void ql_free_large_buffers(struct ql3_adapter *qdev) { int i = 0; struct ql_rcv_buf_cb *lrg_buf_cb; for (i = 0; i < qdev->num_large_buffers; i++) { lrg_buf_cb = &qdev->lrg_buf[i]; if (lrg_buf_cb->skb) { dev_kfree_skb(lrg_buf_cb->skb); pci_unmap_single(qdev->pdev, pci_unmap_addr(lrg_buf_cb, mapaddr), pci_unmap_len(lrg_buf_cb, maplen), PCI_DMA_FROMDEVICE); memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb)); } else { break; } } } static void ql_init_large_buffers(struct ql3_adapter *qdev) { int i; struct ql_rcv_buf_cb *lrg_buf_cb; struct bufq_addr_element *buf_addr_ele = qdev->lrg_buf_q_virt_addr; for (i = 0; i < qdev->num_large_buffers; i++) { lrg_buf_cb = &qdev->lrg_buf[i]; buf_addr_ele->addr_high = lrg_buf_cb->buf_phy_addr_high; buf_addr_ele->addr_low = lrg_buf_cb->buf_phy_addr_low; buf_addr_ele++; } qdev->lrg_buf_index = 0; qdev->lrg_buf_skb_check = 0; } static int ql_alloc_large_buffers(struct ql3_adapter *qdev) { int i; struct ql_rcv_buf_cb *lrg_buf_cb; struct sk_buff *skb; dma_addr_t map; int err; for (i = 0; i < qdev->num_large_buffers; i++) { skb = netdev_alloc_skb(qdev->ndev, qdev->lrg_buffer_len); if (unlikely(!skb)) { /* Better luck next round */ printk(KERN_ERR PFX "%s: large buff alloc failed, " "for %d bytes at index %d.\n", qdev->ndev->name, qdev->lrg_buffer_len * 2, i); ql_free_large_buffers(qdev); return -ENOMEM; } else { lrg_buf_cb = &qdev->lrg_buf[i]; memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb)); lrg_buf_cb->index = i; lrg_buf_cb->skb = skb; /* * We save some space to copy the ethhdr from first * buffer */ skb_reserve(skb, QL_HEADER_SPACE); map = pci_map_single(qdev->pdev, skb->data, qdev->lrg_buffer_len - QL_HEADER_SPACE, PCI_DMA_FROMDEVICE); err = pci_dma_mapping_error(qdev->pdev, map); if(err) { printk(KERN_ERR "%s: PCI mapping failed with error: %d\n", qdev->ndev->name, err); ql_free_large_buffers(qdev); return -ENOMEM; } pci_unmap_addr_set(lrg_buf_cb, mapaddr, map); pci_unmap_len_set(lrg_buf_cb, maplen, qdev->lrg_buffer_len - QL_HEADER_SPACE); lrg_buf_cb->buf_phy_addr_low = cpu_to_le32(LS_64BITS(map)); lrg_buf_cb->buf_phy_addr_high = cpu_to_le32(MS_64BITS(map)); } } return 0; } static void ql_free_send_free_list(struct ql3_adapter *qdev) { struct ql_tx_buf_cb *tx_cb; int i; tx_cb = &qdev->tx_buf[0]; for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) { if (tx_cb->oal) { kfree(tx_cb->oal); tx_cb->oal = NULL; } tx_cb++; } } static int ql_create_send_free_list(struct ql3_adapter *qdev) { struct ql_tx_buf_cb *tx_cb; int i; struct ob_mac_iocb_req *req_q_curr = qdev->req_q_virt_addr; /* Create free list of transmit buffers */ for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) { tx_cb = &qdev->tx_buf[i]; tx_cb->skb = NULL; tx_cb->queue_entry = req_q_curr; req_q_curr++; tx_cb->oal = kmalloc(512, GFP_KERNEL); if (tx_cb->oal == NULL) return -1; } return 0; } static int ql_alloc_mem_resources(struct ql3_adapter *qdev) { if (qdev->ndev->mtu == NORMAL_MTU_SIZE) { qdev->num_lbufq_entries = NUM_LBUFQ_ENTRIES; qdev->lrg_buffer_len = NORMAL_MTU_SIZE; } else if (qdev->ndev->mtu == JUMBO_MTU_SIZE) { /* * Bigger buffers, so less of them. */ qdev->num_lbufq_entries = JUMBO_NUM_LBUFQ_ENTRIES; qdev->lrg_buffer_len = JUMBO_MTU_SIZE; } else { printk(KERN_ERR PFX "%s: Invalid mtu size. Only 1500 and 9000 are accepted.\n", qdev->ndev->name); return -ENOMEM; } qdev->num_large_buffers = qdev->num_lbufq_entries * QL_ADDR_ELE_PER_BUFQ_ENTRY; qdev->lrg_buffer_len += VLAN_ETH_HLEN + VLAN_ID_LEN + QL_HEADER_SPACE; qdev->max_frame_size = (qdev->lrg_buffer_len - QL_HEADER_SPACE) + ETHERNET_CRC_SIZE; /* * First allocate a page of shared memory and use it for shadow * locations of Network Request Queue Consumer Address Register and * Network Completion Queue Producer Index Register */ qdev->shadow_reg_virt_addr = pci_alloc_consistent(qdev->pdev, PAGE_SIZE, &qdev->shadow_reg_phy_addr); if (qdev->shadow_reg_virt_addr != NULL) { qdev->preq_consumer_index = (u16 *) qdev->shadow_reg_virt_addr; qdev->req_consumer_index_phy_addr_high = MS_64BITS(qdev->shadow_reg_phy_addr); qdev->req_consumer_index_phy_addr_low = LS_64BITS(qdev->shadow_reg_phy_addr); qdev->prsp_producer_index = (__le32 *) (((u8 *) qdev->preq_consumer_index) + 8); qdev->rsp_producer_index_phy_addr_high = qdev->req_consumer_index_phy_addr_high; qdev->rsp_producer_index_phy_addr_low = qdev->req_consumer_index_phy_addr_low + 8; } else { printk(KERN_ERR PFX "%s: shadowReg Alloc failed.\n", qdev->ndev->name); return -ENOMEM; } if (ql_alloc_net_req_rsp_queues(qdev) != 0) { printk(KERN_ERR PFX "%s: ql_alloc_net_req_rsp_queues failed.\n", qdev->ndev->name); goto err_req_rsp; } if (ql_alloc_buffer_queues(qdev) != 0) { printk(KERN_ERR PFX "%s: ql_alloc_buffer_queues failed.\n", qdev->ndev->name); goto err_buffer_queues; } if (ql_alloc_small_buffers(qdev) != 0) { printk(KERN_ERR PFX "%s: ql_alloc_small_buffers failed\n", qdev->ndev->name); goto err_small_buffers; } if (ql_alloc_large_buffers(qdev) != 0) { printk(KERN_ERR PFX "%s: ql_alloc_large_buffers failed\n", qdev->ndev->name); goto err_small_buffers; } /* Initialize the large buffer queue. */ ql_init_large_buffers(qdev); if (ql_create_send_free_list(qdev)) goto err_free_list; qdev->rsp_current = qdev->rsp_q_virt_addr; return 0; err_free_list: ql_free_send_free_list(qdev); err_small_buffers: ql_free_buffer_queues(qdev); err_buffer_queues: ql_free_net_req_rsp_queues(qdev); err_req_rsp: pci_free_consistent(qdev->pdev, PAGE_SIZE, qdev->shadow_reg_virt_addr, qdev->shadow_reg_phy_addr); return -ENOMEM; } static void ql_free_mem_resources(struct ql3_adapter *qdev) { ql_free_send_free_list(qdev); ql_free_large_buffers(qdev); ql_free_small_buffers(qdev); ql_free_buffer_queues(qdev); ql_free_net_req_rsp_queues(qdev); if (qdev->shadow_reg_virt_addr != NULL) { pci_free_consistent(qdev->pdev, PAGE_SIZE, qdev->shadow_reg_virt_addr, qdev->shadow_reg_phy_addr); qdev->shadow_reg_virt_addr = NULL; } } static int ql_init_misc_registers(struct ql3_adapter *qdev) { struct ql3xxx_local_ram_registers __iomem *local_ram = (void __iomem *)qdev->mem_map_registers; if(ql_sem_spinlock(qdev, QL_DDR_RAM_SEM_MASK, (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 2) << 4)) return -1; ql_write_page2_reg(qdev, &local_ram->bufletSize, qdev->nvram_data.bufletSize); ql_write_page2_reg(qdev, &local_ram->maxBufletCount, qdev->nvram_data.bufletCount); ql_write_page2_reg(qdev, &local_ram->freeBufletThresholdLow, (qdev->nvram_data.tcpWindowThreshold25 << 16) | (qdev->nvram_data.tcpWindowThreshold0)); ql_write_page2_reg(qdev, &local_ram->freeBufletThresholdHigh, qdev->nvram_data.tcpWindowThreshold50); ql_write_page2_reg(qdev, &local_ram->ipHashTableBase, (qdev->nvram_data.ipHashTableBaseHi << 16) | qdev->nvram_data.ipHashTableBaseLo); ql_write_page2_reg(qdev, &local_ram->ipHashTableCount, qdev->nvram_data.ipHashTableSize); ql_write_page2_reg(qdev, &local_ram->tcpHashTableBase, (qdev->nvram_data.tcpHashTableBaseHi << 16) | qdev->nvram_data.tcpHashTableBaseLo); ql_write_page2_reg(qdev, &local_ram->tcpHashTableCount, qdev->nvram_data.tcpHashTableSize); ql_write_page2_reg(qdev, &local_ram->ncbBase, (qdev->nvram_data.ncbTableBaseHi << 16) | qdev->nvram_data.ncbTableBaseLo); ql_write_page2_reg(qdev, &local_ram->maxNcbCount, qdev->nvram_data.ncbTableSize); ql_write_page2_reg(qdev, &local_ram->drbBase, (qdev->nvram_data.drbTableBaseHi << 16) | qdev->nvram_data.drbTableBaseLo); ql_write_page2_reg(qdev, &local_ram->maxDrbCount, qdev->nvram_data.drbTableSize); ql_sem_unlock(qdev, QL_DDR_RAM_SEM_MASK); return 0; } static int ql_adapter_initialize(struct ql3_adapter *qdev) { u32 value; struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; struct ql3xxx_host_memory_registers __iomem *hmem_regs = (void __iomem *)port_regs; u32 delay = 10; int status = 0; unsigned long hw_flags = 0; if(ql_mii_setup(qdev)) return -1; /* Bring out PHY out of reset */ ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg, (ISP_SERIAL_PORT_IF_WE | (ISP_SERIAL_PORT_IF_WE << 16))); /* Give the PHY time to come out of reset. */ mdelay(100); qdev->port_link_state = LS_DOWN; netif_carrier_off(qdev->ndev); /* V2 chip fix for ARS-39168. */ ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg, (ISP_SERIAL_PORT_IF_SDE | (ISP_SERIAL_PORT_IF_SDE << 16))); /* Request Queue Registers */ *((u32 *) (qdev->preq_consumer_index)) = 0; atomic_set(&qdev->tx_count,NUM_REQ_Q_ENTRIES); qdev->req_producer_index = 0; ql_write_page1_reg(qdev, &hmem_regs->reqConsumerIndexAddrHigh, qdev->req_consumer_index_phy_addr_high); ql_write_page1_reg(qdev, &hmem_regs->reqConsumerIndexAddrLow, qdev->req_consumer_index_phy_addr_low); ql_write_page1_reg(qdev, &hmem_regs->reqBaseAddrHigh, MS_64BITS(qdev->req_q_phy_addr)); ql_write_page1_reg(qdev, &hmem_regs->reqBaseAddrLow, LS_64BITS(qdev->req_q_phy_addr)); ql_write_page1_reg(qdev, &hmem_regs->reqLength, NUM_REQ_Q_ENTRIES); /* Response Queue Registers */ *((__le16 *) (qdev->prsp_producer_index)) = 0; qdev->rsp_consumer_index = 0; qdev->rsp_current = qdev->rsp_q_virt_addr; ql_write_page1_reg(qdev, &hmem_regs->rspProducerIndexAddrHigh, qdev->rsp_producer_index_phy_addr_high); ql_write_page1_reg(qdev, &hmem_regs->rspProducerIndexAddrLow, qdev->rsp_producer_index_phy_addr_low); ql_write_page1_reg(qdev, &hmem_regs->rspBaseAddrHigh, MS_64BITS(qdev->rsp_q_phy_addr)); ql_write_page1_reg(qdev, &hmem_regs->rspBaseAddrLow, LS_64BITS(qdev->rsp_q_phy_addr)); ql_write_page1_reg(qdev, &hmem_regs->rspLength, NUM_RSP_Q_ENTRIES); /* Large Buffer Queue */ ql_write_page1_reg(qdev, &hmem_regs->rxLargeQBaseAddrHigh, MS_64BITS(qdev->lrg_buf_q_phy_addr)); ql_write_page1_reg(qdev, &hmem_regs->rxLargeQBaseAddrLow, LS_64BITS(qdev->lrg_buf_q_phy_addr)); ql_write_page1_reg(qdev, &hmem_regs->rxLargeQLength, qdev->num_lbufq_entries); ql_write_page1_reg(qdev, &hmem_regs->rxLargeBufferLength, qdev->lrg_buffer_len); /* Small Buffer Queue */ ql_write_page1_reg(qdev, &hmem_regs->rxSmallQBaseAddrHigh, MS_64BITS(qdev->small_buf_q_phy_addr)); ql_write_page1_reg(qdev, &hmem_regs->rxSmallQBaseAddrLow, LS_64BITS(qdev->small_buf_q_phy_addr)); ql_write_page1_reg(qdev, &hmem_regs->rxSmallQLength, NUM_SBUFQ_ENTRIES); ql_write_page1_reg(qdev, &hmem_regs->rxSmallBufferLength, QL_SMALL_BUFFER_SIZE); qdev->small_buf_q_producer_index = NUM_SBUFQ_ENTRIES - 1; qdev->small_buf_release_cnt = 8; qdev->lrg_buf_q_producer_index = qdev->num_lbufq_entries - 1; qdev->lrg_buf_release_cnt = 8; qdev->lrg_buf_next_free = (struct bufq_addr_element *)qdev->lrg_buf_q_virt_addr; qdev->small_buf_index = 0; qdev->lrg_buf_index = 0; qdev->lrg_buf_free_count = 0; qdev->lrg_buf_free_head = NULL; qdev->lrg_buf_free_tail = NULL; ql_write_common_reg(qdev, &port_regs->CommonRegs. rxSmallQProducerIndex, qdev->small_buf_q_producer_index); ql_write_common_reg(qdev, &port_regs->CommonRegs. rxLargeQProducerIndex, qdev->lrg_buf_q_producer_index); /* * Find out if the chip has already been initialized. If it has, then * we skip some of the initialization. */ clear_bit(QL_LINK_MASTER, &qdev->flags); value = ql_read_page0_reg(qdev, &port_regs->portStatus); if ((value & PORT_STATUS_IC) == 0) { /* Chip has not been configured yet, so let it rip. */ if(ql_init_misc_registers(qdev)) { status = -1; goto out; } value = qdev->nvram_data.tcpMaxWindowSize; ql_write_page0_reg(qdev, &port_regs->tcpMaxWindow, value); value = (0xFFFF << 16) | qdev->nvram_data.extHwConfig; if(ql_sem_spinlock(qdev, QL_FLASH_SEM_MASK, (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 2) << 13)) { status = -1; goto out; } ql_write_page0_reg(qdev, &port_regs->ExternalHWConfig, value); ql_write_page0_reg(qdev, &port_regs->InternalChipConfig, (((INTERNAL_CHIP_SD | INTERNAL_CHIP_WE) << 16) | (INTERNAL_CHIP_SD | INTERNAL_CHIP_WE))); ql_sem_unlock(qdev, QL_FLASH_SEM_MASK); } if (qdev->mac_index) ql_write_page0_reg(qdev, &port_regs->mac1MaxFrameLengthReg, qdev->max_frame_size); else ql_write_page0_reg(qdev, &port_regs->mac0MaxFrameLengthReg, qdev->max_frame_size); if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 2) << 7)) { status = -1; goto out; } PHY_Setup(qdev); ql_init_scan_mode(qdev); ql_get_phy_owner(qdev); /* Load the MAC Configuration */ /* Program lower 32 bits of the MAC address */ ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg, (MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16)); ql_write_page0_reg(qdev, &port_regs->macAddrDataReg, ((qdev->ndev->dev_addr[2] << 24) | (qdev->ndev->dev_addr[3] << 16) | (qdev->ndev->dev_addr[4] << 8) | qdev->ndev->dev_addr[5])); /* Program top 16 bits of the MAC address */ ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg, ((MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16) | 1)); ql_write_page0_reg(qdev, &port_regs->macAddrDataReg, ((qdev->ndev->dev_addr[0] << 8) | qdev->ndev->dev_addr[1])); /* Enable Primary MAC */ ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg, ((MAC_ADDR_INDIRECT_PTR_REG_PE << 16) | MAC_ADDR_INDIRECT_PTR_REG_PE)); /* Clear Primary and Secondary IP addresses */ ql_write_page0_reg(qdev, &port_regs->ipAddrIndexReg, ((IP_ADDR_INDEX_REG_MASK << 16) | (qdev->mac_index << 2))); ql_write_page0_reg(qdev, &port_regs->ipAddrDataReg, 0); ql_write_page0_reg(qdev, &port_regs->ipAddrIndexReg, ((IP_ADDR_INDEX_REG_MASK << 16) | ((qdev->mac_index << 2) + 1))); ql_write_page0_reg(qdev, &port_regs->ipAddrDataReg, 0); ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); /* Indicate Configuration Complete */ ql_write_page0_reg(qdev, &port_regs->portControl, ((PORT_CONTROL_CC << 16) | PORT_CONTROL_CC)); do { value = ql_read_page0_reg(qdev, &port_regs->portStatus); if (value & PORT_STATUS_IC) break; spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); msleep(500); spin_lock_irqsave(&qdev->hw_lock, hw_flags); } while (--delay); if (delay == 0) { printk(KERN_ERR PFX "%s: Hw Initialization timeout.\n", qdev->ndev->name); status = -1; goto out; } /* Enable Ethernet Function */ if (qdev->device_id == QL3032_DEVICE_ID) { value = (QL3032_PORT_CONTROL_EF | QL3032_PORT_CONTROL_KIE | QL3032_PORT_CONTROL_EIv6 | QL3032_PORT_CONTROL_EIv4 | QL3032_PORT_CONTROL_ET); ql_write_page0_reg(qdev, &port_regs->functionControl, ((value << 16) | value)); } else { value = (PORT_CONTROL_EF | PORT_CONTROL_ET | PORT_CONTROL_EI | PORT_CONTROL_HH); ql_write_page0_reg(qdev, &port_regs->portControl, ((value << 16) | value)); } out: return status; } /* * Caller holds hw_lock. */ static int ql_adapter_reset(struct ql3_adapter *qdev) { struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; int status = 0; u16 value; int max_wait_time; set_bit(QL_RESET_ACTIVE, &qdev->flags); clear_bit(QL_RESET_DONE, &qdev->flags); /* * Issue soft reset to chip. */ printk(KERN_DEBUG PFX "%s: Issue soft reset to chip.\n", qdev->ndev->name); ql_write_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus, ((ISP_CONTROL_SR << 16) | ISP_CONTROL_SR)); /* Wait 3 seconds for reset to complete. */ printk(KERN_DEBUG PFX "%s: Wait 10 milliseconds for reset to complete.\n", qdev->ndev->name); /* Wait until the firmware tells us the Soft Reset is done */ max_wait_time = 5; do { value = ql_read_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus); if ((value & ISP_CONTROL_SR) == 0) break; ssleep(1); } while ((--max_wait_time)); /* * Also, make sure that the Network Reset Interrupt bit has been * cleared after the soft reset has taken place. */ value = ql_read_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus); if (value & ISP_CONTROL_RI) { printk(KERN_DEBUG PFX "ql_adapter_reset: clearing RI after reset.\n"); ql_write_common_reg(qdev, &port_regs->CommonRegs. ispControlStatus, ((ISP_CONTROL_RI << 16) | ISP_CONTROL_RI)); } if (max_wait_time == 0) { /* Issue Force Soft Reset */ ql_write_common_reg(qdev, &port_regs->CommonRegs. ispControlStatus, ((ISP_CONTROL_FSR << 16) | ISP_CONTROL_FSR)); /* * Wait until the firmware tells us the Force Soft Reset is * done */ max_wait_time = 5; do { value = ql_read_common_reg(qdev, &port_regs->CommonRegs. ispControlStatus); if ((value & ISP_CONTROL_FSR) == 0) { break; } ssleep(1); } while ((--max_wait_time)); } if (max_wait_time == 0) status = 1; clear_bit(QL_RESET_ACTIVE, &qdev->flags); set_bit(QL_RESET_DONE, &qdev->flags); return status; } static void ql_set_mac_info(struct ql3_adapter *qdev) { struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; u32 value, port_status; u8 func_number; /* Get the function number */ value = ql_read_common_reg_l(qdev, &port_regs->CommonRegs.ispControlStatus); func_number = (u8) ((value >> 4) & OPCODE_FUNC_ID_MASK); port_status = ql_read_page0_reg(qdev, &port_regs->portStatus); switch (value & ISP_CONTROL_FN_MASK) { case ISP_CONTROL_FN0_NET: qdev->mac_index = 0; qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number; qdev->mb_bit_mask = FN0_MA_BITS_MASK; qdev->PHYAddr = PORT0_PHY_ADDRESS; if (port_status & PORT_STATUS_SM0) set_bit(QL_LINK_OPTICAL,&qdev->flags); else clear_bit(QL_LINK_OPTICAL,&qdev->flags); break; case ISP_CONTROL_FN1_NET: qdev->mac_index = 1; qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number; qdev->mb_bit_mask = FN1_MA_BITS_MASK; qdev->PHYAddr = PORT1_PHY_ADDRESS; if (port_status & PORT_STATUS_SM1) set_bit(QL_LINK_OPTICAL,&qdev->flags); else clear_bit(QL_LINK_OPTICAL,&qdev->flags); break; case ISP_CONTROL_FN0_SCSI: case ISP_CONTROL_FN1_SCSI: default: printk(KERN_DEBUG PFX "%s: Invalid function number, ispControlStatus = 0x%x\n", qdev->ndev->name,value); break; } qdev->numPorts = qdev->nvram_data.version_and_numPorts >> 8; } static void ql_display_dev_info(struct net_device *ndev) { struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev); struct pci_dev *pdev = qdev->pdev; printk(KERN_INFO PFX "\n%s Adapter %d RevisionID %d found %s on PCI slot %d.\n", DRV_NAME, qdev->index, qdev->chip_rev_id, (qdev->device_id == QL3032_DEVICE_ID) ? "QLA3032" : "QLA3022", qdev->pci_slot); printk(KERN_INFO PFX "%s Interface.\n", test_bit(QL_LINK_OPTICAL,&qdev->flags) ? "OPTICAL" : "COPPER"); /* * Print PCI bus width/type. */ printk(KERN_INFO PFX "Bus interface is %s %s.\n", ((qdev->pci_width == 64) ? "64-bit" : "32-bit"), ((qdev->pci_x) ? "PCI-X" : "PCI")); printk(KERN_INFO PFX "mem IO base address adjusted = 0x%p\n", qdev->mem_map_registers); printk(KERN_INFO PFX "Interrupt number = %d\n", pdev->irq); if (netif_msg_probe(qdev)) printk(KERN_INFO PFX "%s: MAC address %pM\n", ndev->name, ndev->dev_addr); } static int ql_adapter_down(struct ql3_adapter *qdev, int do_reset) { struct net_device *ndev = qdev->ndev; int retval = 0; netif_stop_queue(ndev); netif_carrier_off(ndev); clear_bit(QL_ADAPTER_UP,&qdev->flags); clear_bit(QL_LINK_MASTER,&qdev->flags); ql_disable_interrupts(qdev); free_irq(qdev->pdev->irq, ndev); if (qdev->msi && test_bit(QL_MSI_ENABLED,&qdev->flags)) { printk(KERN_INFO PFX "%s: calling pci_disable_msi().\n", qdev->ndev->name); clear_bit(QL_MSI_ENABLED,&qdev->flags); pci_disable_msi(qdev->pdev); } del_timer_sync(&qdev->adapter_timer); napi_disable(&qdev->napi); if (do_reset) { int soft_reset; unsigned long hw_flags; spin_lock_irqsave(&qdev->hw_lock, hw_flags); if (ql_wait_for_drvr_lock(qdev)) { if ((soft_reset = ql_adapter_reset(qdev))) { printk(KERN_ERR PFX "%s: ql_adapter_reset(%d) FAILED!\n", ndev->name, qdev->index); } printk(KERN_ERR PFX "%s: Releaseing driver lock via chip reset.\n",ndev->name); } else { printk(KERN_ERR PFX "%s: Could not acquire driver lock to do " "reset!\n", ndev->name); retval = -1; } spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); } ql_free_mem_resources(qdev); return retval; } static int ql_adapter_up(struct ql3_adapter *qdev) { struct net_device *ndev = qdev->ndev; int err; unsigned long irq_flags = IRQF_SAMPLE_RANDOM | IRQF_SHARED; unsigned long hw_flags; if (ql_alloc_mem_resources(qdev)) { printk(KERN_ERR PFX "%s Unable to allocate buffers.\n", ndev->name); return -ENOMEM; } if (qdev->msi) { if (pci_enable_msi(qdev->pdev)) { printk(KERN_ERR PFX "%s: User requested MSI, but MSI failed to " "initialize. Continuing without MSI.\n", qdev->ndev->name); qdev->msi = 0; } else { printk(KERN_INFO PFX "%s: MSI Enabled...\n", qdev->ndev->name); set_bit(QL_MSI_ENABLED,&qdev->flags); irq_flags &= ~IRQF_SHARED; } } if ((err = request_irq(qdev->pdev->irq, ql3xxx_isr, irq_flags, ndev->name, ndev))) { printk(KERN_ERR PFX "%s: Failed to reserve interrupt %d already in use.\n", ndev->name, qdev->pdev->irq); goto err_irq; } spin_lock_irqsave(&qdev->hw_lock, hw_flags); if ((err = ql_wait_for_drvr_lock(qdev))) { if ((err = ql_adapter_initialize(qdev))) { printk(KERN_ERR PFX "%s: Unable to initialize adapter.\n", ndev->name); goto err_init; } printk(KERN_ERR PFX "%s: Releaseing driver lock.\n",ndev->name); ql_sem_unlock(qdev, QL_DRVR_SEM_MASK); } else { printk(KERN_ERR PFX "%s: Could not aquire driver lock.\n", ndev->name); goto err_lock; } spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); set_bit(QL_ADAPTER_UP,&qdev->flags); mod_timer(&qdev->adapter_timer, jiffies + HZ * 1); napi_enable(&qdev->napi); ql_enable_interrupts(qdev); return 0; err_init: ql_sem_unlock(qdev, QL_DRVR_SEM_MASK); err_lock: spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); free_irq(qdev->pdev->irq, ndev); err_irq: if (qdev->msi && test_bit(QL_MSI_ENABLED,&qdev->flags)) { printk(KERN_INFO PFX "%s: calling pci_disable_msi().\n", qdev->ndev->name); clear_bit(QL_MSI_ENABLED,&qdev->flags); pci_disable_msi(qdev->pdev); } return err; } static int ql_cycle_adapter(struct ql3_adapter *qdev, int reset) { if( ql_adapter_down(qdev,reset) || ql_adapter_up(qdev)) { printk(KERN_ERR PFX "%s: Driver up/down cycle failed, " "closing device\n",qdev->ndev->name); rtnl_lock(); dev_close(qdev->ndev); rtnl_unlock(); return -1; } return 0; } static int ql3xxx_close(struct net_device *ndev) { struct ql3_adapter *qdev = netdev_priv(ndev); /* * Wait for device to recover from a reset. * (Rarely happens, but possible.) */ while (!test_bit(QL_ADAPTER_UP,&qdev->flags)) msleep(50); ql_adapter_down(qdev,QL_DO_RESET); return 0; } static int ql3xxx_open(struct net_device *ndev) { struct ql3_adapter *qdev = netdev_priv(ndev); return (ql_adapter_up(qdev)); } static int ql3xxx_set_mac_address(struct net_device *ndev, void *p) { struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev); struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; struct sockaddr *addr = p; unsigned long hw_flags; if (netif_running(ndev)) return -EBUSY; if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len); spin_lock_irqsave(&qdev->hw_lock, hw_flags); /* Program lower 32 bits of the MAC address */ ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg, (MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16)); ql_write_page0_reg(qdev, &port_regs->macAddrDataReg, ((ndev->dev_addr[2] << 24) | (ndev-> dev_addr[3] << 16) | (ndev->dev_addr[4] << 8) | ndev->dev_addr[5])); /* Program top 16 bits of the MAC address */ ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg, ((MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16) | 1)); ql_write_page0_reg(qdev, &port_regs->macAddrDataReg, ((ndev->dev_addr[0] << 8) | ndev->dev_addr[1])); spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); return 0; } static void ql3xxx_tx_timeout(struct net_device *ndev) { struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev); printk(KERN_ERR PFX "%s: Resetting...\n", ndev->name); /* * Stop the queues, we've got a problem. */ netif_stop_queue(ndev); /* * Wake up the worker to process this event. */ queue_delayed_work(qdev->workqueue, &qdev->tx_timeout_work, 0); } static void ql_reset_work(struct work_struct *work) { struct ql3_adapter *qdev = container_of(work, struct ql3_adapter, reset_work.work); struct net_device *ndev = qdev->ndev; u32 value; struct ql_tx_buf_cb *tx_cb; int max_wait_time, i; struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; unsigned long hw_flags; if (test_bit((QL_RESET_PER_SCSI | QL_RESET_START),&qdev->flags)) { clear_bit(QL_LINK_MASTER,&qdev->flags); /* * Loop through the active list and return the skb. */ for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) { int j; tx_cb = &qdev->tx_buf[i]; if (tx_cb->skb) { printk(KERN_DEBUG PFX "%s: Freeing lost SKB.\n", qdev->ndev->name); pci_unmap_single(qdev->pdev, pci_unmap_addr(&tx_cb->map[0], mapaddr), pci_unmap_len(&tx_cb->map[0], maplen), PCI_DMA_TODEVICE); for(j=1;j<tx_cb->seg_count;j++) { pci_unmap_page(qdev->pdev, pci_unmap_addr(&tx_cb->map[j],mapaddr), pci_unmap_len(&tx_cb->map[j],maplen), PCI_DMA_TODEVICE); } dev_kfree_skb(tx_cb->skb); tx_cb->skb = NULL; } } printk(KERN_ERR PFX "%s: Clearing NRI after reset.\n", qdev->ndev->name); spin_lock_irqsave(&qdev->hw_lock, hw_flags); ql_write_common_reg(qdev, &port_regs->CommonRegs. ispControlStatus, ((ISP_CONTROL_RI << 16) | ISP_CONTROL_RI)); /* * Wait the for Soft Reset to Complete. */ max_wait_time = 10; do { value = ql_read_common_reg(qdev, &port_regs->CommonRegs. ispControlStatus); if ((value & ISP_CONTROL_SR) == 0) { printk(KERN_DEBUG PFX "%s: reset completed.\n", qdev->ndev->name); break; } if (value & ISP_CONTROL_RI) { printk(KERN_DEBUG PFX "%s: clearing NRI after reset.\n", qdev->ndev->name); ql_write_common_reg(qdev, &port_regs-> CommonRegs. ispControlStatus, ((ISP_CONTROL_RI << 16) | ISP_CONTROL_RI)); } spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); ssleep(1); spin_lock_irqsave(&qdev->hw_lock, hw_flags); } while (--max_wait_time); spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); if (value & ISP_CONTROL_SR) { /* * Set the reset flags and clear the board again. * Nothing else to do... */ printk(KERN_ERR PFX "%s: Timed out waiting for reset to " "complete.\n", ndev->name); printk(KERN_ERR PFX "%s: Do a reset.\n", ndev->name); clear_bit(QL_RESET_PER_SCSI,&qdev->flags); clear_bit(QL_RESET_START,&qdev->flags); ql_cycle_adapter(qdev,QL_DO_RESET); return; } clear_bit(QL_RESET_ACTIVE,&qdev->flags); clear_bit(QL_RESET_PER_SCSI,&qdev->flags); clear_bit(QL_RESET_START,&qdev->flags); ql_cycle_adapter(qdev,QL_NO_RESET); } } static void ql_tx_timeout_work(struct work_struct *work) { struct ql3_adapter *qdev = container_of(work, struct ql3_adapter, tx_timeout_work.work); ql_cycle_adapter(qdev, QL_DO_RESET); } static void ql_get_board_info(struct ql3_adapter *qdev) { struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; u32 value; value = ql_read_page0_reg_l(qdev, &port_regs->portStatus); qdev->chip_rev_id = ((value & PORT_STATUS_REV_ID_MASK) >> 12); if (value & PORT_STATUS_64) qdev->pci_width = 64; else qdev->pci_width = 32; if (value & PORT_STATUS_X) qdev->pci_x = 1; else qdev->pci_x = 0; qdev->pci_slot = (u8) PCI_SLOT(qdev->pdev->devfn); } static void ql3xxx_timer(unsigned long ptr) { struct ql3_adapter *qdev = (struct ql3_adapter *)ptr; queue_delayed_work(qdev->workqueue, &qdev->link_state_work, 0); } static const struct net_device_ops ql3xxx_netdev_ops = { .ndo_open = ql3xxx_open, .ndo_start_xmit = ql3xxx_send, .ndo_stop = ql3xxx_close, .ndo_set_multicast_list = NULL, /* not allowed on NIC side */ .ndo_change_mtu = eth_change_mtu, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = ql3xxx_set_mac_address, .ndo_tx_timeout = ql3xxx_tx_timeout, }; static int __devinit ql3xxx_probe(struct pci_dev *pdev, const struct pci_device_id *pci_entry) { struct net_device *ndev = NULL; struct ql3_adapter *qdev = NULL; static int cards_found = 0; int uninitialized_var(pci_using_dac), err; err = pci_enable_device(pdev); if (err) { printk(KERN_ERR PFX "%s cannot enable PCI device\n", pci_name(pdev)); goto err_out; } err = pci_request_regions(pdev, DRV_NAME); if (err) { printk(KERN_ERR PFX "%s cannot obtain PCI resources\n", pci_name(pdev)); goto err_out_disable_pdev; } pci_set_master(pdev); if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { pci_using_dac = 1; err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); } else if (!(err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) { pci_using_dac = 0; err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); } if (err) { printk(KERN_ERR PFX "%s no usable DMA configuration\n", pci_name(pdev)); goto err_out_free_regions; } ndev = alloc_etherdev(sizeof(struct ql3_adapter)); if (!ndev) { printk(KERN_ERR PFX "%s could not alloc etherdev\n", pci_name(pdev)); err = -ENOMEM; goto err_out_free_regions; } SET_NETDEV_DEV(ndev, &pdev->dev); pci_set_drvdata(pdev, ndev); qdev = netdev_priv(ndev); qdev->index = cards_found; qdev->ndev = ndev; qdev->pdev = pdev; qdev->device_id = pci_entry->device; qdev->port_link_state = LS_DOWN; if (msi) qdev->msi = 1; qdev->msg_enable = netif_msg_init(debug, default_msg); if (pci_using_dac) ndev->features |= NETIF_F_HIGHDMA; if (qdev->device_id == QL3032_DEVICE_ID) ndev->features |= NETIF_F_IP_CSUM | NETIF_F_SG; qdev->mem_map_registers = pci_ioremap_bar(pdev, 1); if (!qdev->mem_map_registers) { printk(KERN_ERR PFX "%s: cannot map device registers\n", pci_name(pdev)); err = -EIO; goto err_out_free_ndev; } spin_lock_init(&qdev->adapter_lock); spin_lock_init(&qdev->hw_lock); /* Set driver entry points */ ndev->netdev_ops = &ql3xxx_netdev_ops; SET_ETHTOOL_OPS(ndev, &ql3xxx_ethtool_ops); ndev->watchdog_timeo = 5 * HZ; netif_napi_add(ndev, &qdev->napi, ql_poll, 64); ndev->irq = pdev->irq; /* make sure the EEPROM is good */ if (ql_get_nvram_params(qdev)) { printk(KERN_ALERT PFX "ql3xxx_probe: Adapter #%d, Invalid NVRAM parameters.\n", qdev->index); err = -EIO; goto err_out_iounmap; } ql_set_mac_info(qdev); /* Validate and set parameters */ if (qdev->mac_index) { ndev->mtu = qdev->nvram_data.macCfg_port1.etherMtu_mac ; ql_set_mac_addr(ndev, qdev->nvram_data.funcCfg_fn2.macAddress); } else { ndev->mtu = qdev->nvram_data.macCfg_port0.etherMtu_mac ; ql_set_mac_addr(ndev, qdev->nvram_data.funcCfg_fn0.macAddress); } memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len); ndev->tx_queue_len = NUM_REQ_Q_ENTRIES; /* Record PCI bus information. */ ql_get_board_info(qdev); /* * Set the Maximum Memory Read Byte Count value. We do this to handle * jumbo frames. */ if (qdev->pci_x) { pci_write_config_word(pdev, (int)0x4e, (u16) 0x0036); } err = register_netdev(ndev); if (err) { printk(KERN_ERR PFX "%s: cannot register net device\n", pci_name(pdev)); goto err_out_iounmap; } /* we're going to reset, so assume we have no link for now */ netif_carrier_off(ndev); netif_stop_queue(ndev); qdev->workqueue = create_singlethread_workqueue(ndev->name); INIT_DELAYED_WORK(&qdev->reset_work, ql_reset_work); INIT_DELAYED_WORK(&qdev->tx_timeout_work, ql_tx_timeout_work); INIT_DELAYED_WORK(&qdev->link_state_work, ql_link_state_machine_work); init_timer(&qdev->adapter_timer); qdev->adapter_timer.function = ql3xxx_timer; qdev->adapter_timer.expires = jiffies + HZ * 2; /* two second delay */ qdev->adapter_timer.data = (unsigned long)qdev; if(!cards_found) { printk(KERN_ALERT PFX "%s\n", DRV_STRING); printk(KERN_ALERT PFX "Driver name: %s, Version: %s.\n", DRV_NAME, DRV_VERSION); } ql_display_dev_info(ndev); cards_found++; return 0; err_out_iounmap: iounmap(qdev->mem_map_registers); err_out_free_ndev: free_netdev(ndev); err_out_free_regions: pci_release_regions(pdev); err_out_disable_pdev: pci_disable_device(pdev); pci_set_drvdata(pdev, NULL); err_out: return err; } static void __devexit ql3xxx_remove(struct pci_dev *pdev) { struct net_device *ndev = pci_get_drvdata(pdev); struct ql3_adapter *qdev = netdev_priv(ndev); unregister_netdev(ndev); qdev = netdev_priv(ndev); ql_disable_interrupts(qdev); if (qdev->workqueue) { cancel_delayed_work(&qdev->reset_work); cancel_delayed_work(&qdev->tx_timeout_work); destroy_workqueue(qdev->workqueue); qdev->workqueue = NULL; } iounmap(qdev->mem_map_registers); pci_release_regions(pdev); pci_set_drvdata(pdev, NULL); free_netdev(ndev); } static struct pci_driver ql3xxx_driver = { .name = DRV_NAME, .id_table = ql3xxx_pci_tbl, .probe = ql3xxx_probe, .remove = __devexit_p(ql3xxx_remove), }; static int __init ql3xxx_init_module(void) { return pci_register_driver(&ql3xxx_driver); } static void __exit ql3xxx_exit(void) { pci_unregister_driver(&ql3xxx_driver); } module_init(ql3xxx_init_module); module_exit(ql3xxx_exit);
gpl-2.0
gongwan33/hiveboard_linux_with_sonix291_uvcdriver
arch/arm/mach-omap1/pm.c
497
19611
/* * linux/arch/arm/mach-omap1/pm.c * * OMAP Power Management Routines * * Original code for the SA11x0: * Copyright (c) 2001 Cliff Brake <cbrake@accelent.com> * * Modified for the PXA250 by Nicolas Pitre: * Copyright (c) 2002 Monta Vista Software, Inc. * * Modified for the OMAP1510 by David Singleton: * Copyright (c) 2002 Monta Vista Software, Inc. * * Cleanup 2004 for OMAP1510/1610 by Dirk Behme <dirk.behme@de.bosch.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/suspend.h> #include <linux/sched.h> #include <linux/proc_fs.h> #include <linux/interrupt.h> #include <linux/sysfs.h> #include <linux/module.h> #include <linux/io.h> #include <asm/irq.h> #include <asm/atomic.h> #include <asm/mach/time.h> #include <asm/mach/irq.h> #include <mach/cpu.h> #include <mach/irqs.h> #include <mach/clock.h> #include <mach/sram.h> #include <mach/tc.h> #include <mach/mux.h> #include <mach/dma.h> #include <mach/dmtimer.h> #include "pm.h" static unsigned int arm_sleep_save[ARM_SLEEP_SAVE_SIZE]; static unsigned short dsp_sleep_save[DSP_SLEEP_SAVE_SIZE]; static unsigned short ulpd_sleep_save[ULPD_SLEEP_SAVE_SIZE]; static unsigned int mpui730_sleep_save[MPUI730_SLEEP_SAVE_SIZE]; static unsigned int mpui1510_sleep_save[MPUI1510_SLEEP_SAVE_SIZE]; static unsigned int mpui1610_sleep_save[MPUI1610_SLEEP_SAVE_SIZE]; #ifdef CONFIG_OMAP_32K_TIMER static unsigned short enable_dyn_sleep = 1; static ssize_t idle_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sprintf(buf, "%hu\n", enable_dyn_sleep); } static ssize_t idle_store(struct kobject *kobj, struct kobj_attribute *attr, const char * buf, size_t n) { unsigned short value; if (sscanf(buf, "%hu", &value) != 1 || (value != 0 && value != 1)) { printk(KERN_ERR "idle_sleep_store: Invalid value\n"); return -EINVAL; } enable_dyn_sleep = value; return n; } static struct kobj_attribute sleep_while_idle_attr = __ATTR(sleep_while_idle, 0644, idle_show, idle_store); #endif static void (*omap_sram_suspend)(unsigned long r0, unsigned long r1) = NULL; /* * Let's power down on idle, but only if we are really * idle, because once we start down the path of * going idle we continue to do idle even if we get * a clock tick interrupt . . */ void omap1_pm_idle(void) { extern __u32 arm_idlect1_mask; __u32 use_idlect1 = arm_idlect1_mask; int do_sleep = 0; local_irq_disable(); local_fiq_disable(); if (need_resched()) { local_fiq_enable(); local_irq_enable(); return; } #ifdef CONFIG_OMAP_MPU_TIMER #warning Enable 32kHz OS timer in order to allow sleep states in idle use_idlect1 = use_idlect1 & ~(1 << 9); #else while (enable_dyn_sleep) { #ifdef CONFIG_CBUS_TAHVO_USB extern int vbus_active; /* Clock requirements? */ if (vbus_active) break; #endif do_sleep = 1; break; } #endif #ifdef CONFIG_OMAP_DM_TIMER use_idlect1 = omap_dm_timer_modify_idlect_mask(use_idlect1); #endif if (omap_dma_running()) use_idlect1 &= ~(1 << 6); /* We should be able to remove the do_sleep variable and multiple * tests above as soon as drivers, timer and DMA code have been fixed. * Even the sleep block count should become obsolete. */ if ((use_idlect1 != ~0) || !do_sleep) { __u32 saved_idlect1 = omap_readl(ARM_IDLECT1); if (cpu_is_omap15xx()) use_idlect1 &= OMAP1510_BIG_SLEEP_REQUEST; else use_idlect1 &= OMAP1610_IDLECT1_SLEEP_VAL; omap_writel(use_idlect1, ARM_IDLECT1); __asm__ volatile ("mcr p15, 0, r0, c7, c0, 4"); omap_writel(saved_idlect1, ARM_IDLECT1); local_fiq_enable(); local_irq_enable(); return; } omap_sram_suspend(omap_readl(ARM_IDLECT1), omap_readl(ARM_IDLECT2)); local_fiq_enable(); local_irq_enable(); } /* * Configuration of the wakeup event is board specific. For the * moment we put it into this helper function. Later it may move * to board specific files. */ static void omap_pm_wakeup_setup(void) { u32 level1_wake = 0; u32 level2_wake = OMAP_IRQ_BIT(INT_UART2); /* * Turn off all interrupts except GPIO bank 1, L1-2nd level cascade, * and the L2 wakeup interrupts: keypad and UART2. Note that the * drivers must still separately call omap_set_gpio_wakeup() to * wake up to a GPIO interrupt. */ if (cpu_is_omap730()) level1_wake = OMAP_IRQ_BIT(INT_730_GPIO_BANK1) | OMAP_IRQ_BIT(INT_730_IH2_IRQ); else if (cpu_is_omap15xx()) level1_wake = OMAP_IRQ_BIT(INT_GPIO_BANK1) | OMAP_IRQ_BIT(INT_1510_IH2_IRQ); else if (cpu_is_omap16xx()) level1_wake = OMAP_IRQ_BIT(INT_GPIO_BANK1) | OMAP_IRQ_BIT(INT_1610_IH2_IRQ); omap_writel(~level1_wake, OMAP_IH1_MIR); if (cpu_is_omap730()) { omap_writel(~level2_wake, OMAP_IH2_0_MIR); omap_writel(~(OMAP_IRQ_BIT(INT_730_WAKE_UP_REQ) | OMAP_IRQ_BIT(INT_730_MPUIO_KEYPAD)), OMAP_IH2_1_MIR); } else if (cpu_is_omap15xx()) { level2_wake |= OMAP_IRQ_BIT(INT_KEYBOARD); omap_writel(~level2_wake, OMAP_IH2_MIR); } else if (cpu_is_omap16xx()) { level2_wake |= OMAP_IRQ_BIT(INT_KEYBOARD); omap_writel(~level2_wake, OMAP_IH2_0_MIR); /* INT_1610_WAKE_UP_REQ is needed for GPIO wakeup... */ omap_writel(~OMAP_IRQ_BIT(INT_1610_WAKE_UP_REQ), OMAP_IH2_1_MIR); omap_writel(~0x0, OMAP_IH2_2_MIR); omap_writel(~0x0, OMAP_IH2_3_MIR); } /* New IRQ agreement, recalculate in cascade order */ omap_writel(1, OMAP_IH2_CONTROL); omap_writel(1, OMAP_IH1_CONTROL); } #define EN_DSPCK 13 /* ARM_CKCTL */ #define EN_APICK 6 /* ARM_IDLECT2 */ #define DSP_EN 1 /* ARM_RSTCT1 */ void omap1_pm_suspend(void) { unsigned long arg0 = 0, arg1 = 0; printk(KERN_INFO "PM: OMAP%x is trying to enter deep sleep...\n", omap_rev()); omap_serial_wake_trigger(1); if (!cpu_is_omap15xx()) omap_writew(0xffff, ULPD_SOFT_DISABLE_REQ_REG); /* * Step 1: turn off interrupts (FIXME: NOTE: already disabled) */ local_irq_disable(); local_fiq_disable(); /* * Step 2: save registers * * The omap is a strange/beautiful device. The caches, memory * and register state are preserved across power saves. * We have to save and restore very little register state to * idle the omap. * * Save interrupt, MPUI, ARM and UPLD control registers. */ if (cpu_is_omap730()) { MPUI730_SAVE(OMAP_IH1_MIR); MPUI730_SAVE(OMAP_IH2_0_MIR); MPUI730_SAVE(OMAP_IH2_1_MIR); MPUI730_SAVE(MPUI_CTRL); MPUI730_SAVE(MPUI_DSP_BOOT_CONFIG); MPUI730_SAVE(MPUI_DSP_API_CONFIG); MPUI730_SAVE(EMIFS_CONFIG); MPUI730_SAVE(EMIFF_SDRAM_CONFIG); } else if (cpu_is_omap15xx()) { MPUI1510_SAVE(OMAP_IH1_MIR); MPUI1510_SAVE(OMAP_IH2_MIR); MPUI1510_SAVE(MPUI_CTRL); MPUI1510_SAVE(MPUI_DSP_BOOT_CONFIG); MPUI1510_SAVE(MPUI_DSP_API_CONFIG); MPUI1510_SAVE(EMIFS_CONFIG); MPUI1510_SAVE(EMIFF_SDRAM_CONFIG); } else if (cpu_is_omap16xx()) { MPUI1610_SAVE(OMAP_IH1_MIR); MPUI1610_SAVE(OMAP_IH2_0_MIR); MPUI1610_SAVE(OMAP_IH2_1_MIR); MPUI1610_SAVE(OMAP_IH2_2_MIR); MPUI1610_SAVE(OMAP_IH2_3_MIR); MPUI1610_SAVE(MPUI_CTRL); MPUI1610_SAVE(MPUI_DSP_BOOT_CONFIG); MPUI1610_SAVE(MPUI_DSP_API_CONFIG); MPUI1610_SAVE(EMIFS_CONFIG); MPUI1610_SAVE(EMIFF_SDRAM_CONFIG); } ARM_SAVE(ARM_CKCTL); ARM_SAVE(ARM_IDLECT1); ARM_SAVE(ARM_IDLECT2); if (!(cpu_is_omap15xx())) ARM_SAVE(ARM_IDLECT3); ARM_SAVE(ARM_EWUPCT); ARM_SAVE(ARM_RSTCT1); ARM_SAVE(ARM_RSTCT2); ARM_SAVE(ARM_SYSST); ULPD_SAVE(ULPD_CLOCK_CTRL); ULPD_SAVE(ULPD_STATUS_REQ); /* (Step 3 removed - we now allow deep sleep by default) */ /* * Step 4: OMAP DSP Shutdown */ /* stop DSP */ omap_writew(omap_readw(ARM_RSTCT1) & ~(1 << DSP_EN), ARM_RSTCT1); /* shut down dsp_ck */ if (!cpu_is_omap730()) omap_writew(omap_readw(ARM_CKCTL) & ~(1 << EN_DSPCK), ARM_CKCTL); /* temporarily enabling api_ck to access DSP registers */ omap_writew(omap_readw(ARM_IDLECT2) | 1 << EN_APICK, ARM_IDLECT2); /* save DSP registers */ DSP_SAVE(DSP_IDLECT2); /* Stop all DSP domain clocks */ __raw_writew(0, DSP_IDLECT2); /* * Step 5: Wakeup Event Setup */ omap_pm_wakeup_setup(); /* * Step 6: ARM and Traffic controller shutdown */ /* disable ARM watchdog */ omap_writel(0x00F5, OMAP_WDT_TIMER_MODE); omap_writel(0x00A0, OMAP_WDT_TIMER_MODE); /* * Step 6b: ARM and Traffic controller shutdown * * Step 6 continues here. Prepare jump to power management * assembly code in internal SRAM. * * Since the omap_cpu_suspend routine has been copied to * SRAM, we'll do an indirect procedure call to it and pass the * contents of arm_idlect1 and arm_idlect2 so it can restore * them when it wakes up and it will return. */ arg0 = arm_sleep_save[ARM_SLEEP_SAVE_ARM_IDLECT1]; arg1 = arm_sleep_save[ARM_SLEEP_SAVE_ARM_IDLECT2]; /* * Step 6c: ARM and Traffic controller shutdown * * Jump to assembly code. The processor will stay there * until wake up. */ omap_sram_suspend(arg0, arg1); /* * If we are here, processor is woken up! */ /* * Restore DSP clocks */ /* again temporarily enabling api_ck to access DSP registers */ omap_writew(omap_readw(ARM_IDLECT2) | 1 << EN_APICK, ARM_IDLECT2); /* Restore DSP domain clocks */ DSP_RESTORE(DSP_IDLECT2); /* * Restore ARM state, except ARM_IDLECT1/2 which omap_cpu_suspend did */ if (!(cpu_is_omap15xx())) ARM_RESTORE(ARM_IDLECT3); ARM_RESTORE(ARM_CKCTL); ARM_RESTORE(ARM_EWUPCT); ARM_RESTORE(ARM_RSTCT1); ARM_RESTORE(ARM_RSTCT2); ARM_RESTORE(ARM_SYSST); ULPD_RESTORE(ULPD_CLOCK_CTRL); ULPD_RESTORE(ULPD_STATUS_REQ); if (cpu_is_omap730()) { MPUI730_RESTORE(EMIFS_CONFIG); MPUI730_RESTORE(EMIFF_SDRAM_CONFIG); MPUI730_RESTORE(OMAP_IH1_MIR); MPUI730_RESTORE(OMAP_IH2_0_MIR); MPUI730_RESTORE(OMAP_IH2_1_MIR); } else if (cpu_is_omap15xx()) { MPUI1510_RESTORE(MPUI_CTRL); MPUI1510_RESTORE(MPUI_DSP_BOOT_CONFIG); MPUI1510_RESTORE(MPUI_DSP_API_CONFIG); MPUI1510_RESTORE(EMIFS_CONFIG); MPUI1510_RESTORE(EMIFF_SDRAM_CONFIG); MPUI1510_RESTORE(OMAP_IH1_MIR); MPUI1510_RESTORE(OMAP_IH2_MIR); } else if (cpu_is_omap16xx()) { MPUI1610_RESTORE(MPUI_CTRL); MPUI1610_RESTORE(MPUI_DSP_BOOT_CONFIG); MPUI1610_RESTORE(MPUI_DSP_API_CONFIG); MPUI1610_RESTORE(EMIFS_CONFIG); MPUI1610_RESTORE(EMIFF_SDRAM_CONFIG); MPUI1610_RESTORE(OMAP_IH1_MIR); MPUI1610_RESTORE(OMAP_IH2_0_MIR); MPUI1610_RESTORE(OMAP_IH2_1_MIR); MPUI1610_RESTORE(OMAP_IH2_2_MIR); MPUI1610_RESTORE(OMAP_IH2_3_MIR); } if (!cpu_is_omap15xx()) omap_writew(0, ULPD_SOFT_DISABLE_REQ_REG); /* * Re-enable interrupts */ local_irq_enable(); local_fiq_enable(); omap_serial_wake_trigger(0); printk(KERN_INFO "PM: OMAP%x is re-starting from deep sleep...\n", omap_rev()); } #if defined(DEBUG) && defined(CONFIG_PROC_FS) static int g_read_completed; /* * Read system PM registers for debugging */ static int omap_pm_read_proc( char *page_buffer, char **my_first_byte, off_t virtual_start, int length, int *eof, void *data) { int my_buffer_offset = 0; char * const my_base = page_buffer; ARM_SAVE(ARM_CKCTL); ARM_SAVE(ARM_IDLECT1); ARM_SAVE(ARM_IDLECT2); if (!(cpu_is_omap15xx())) ARM_SAVE(ARM_IDLECT3); ARM_SAVE(ARM_EWUPCT); ARM_SAVE(ARM_RSTCT1); ARM_SAVE(ARM_RSTCT2); ARM_SAVE(ARM_SYSST); ULPD_SAVE(ULPD_IT_STATUS); ULPD_SAVE(ULPD_CLOCK_CTRL); ULPD_SAVE(ULPD_SOFT_REQ); ULPD_SAVE(ULPD_STATUS_REQ); ULPD_SAVE(ULPD_DPLL_CTRL); ULPD_SAVE(ULPD_POWER_CTRL); if (cpu_is_omap730()) { MPUI730_SAVE(MPUI_CTRL); MPUI730_SAVE(MPUI_DSP_STATUS); MPUI730_SAVE(MPUI_DSP_BOOT_CONFIG); MPUI730_SAVE(MPUI_DSP_API_CONFIG); MPUI730_SAVE(EMIFF_SDRAM_CONFIG); MPUI730_SAVE(EMIFS_CONFIG); } else if (cpu_is_omap15xx()) { MPUI1510_SAVE(MPUI_CTRL); MPUI1510_SAVE(MPUI_DSP_STATUS); MPUI1510_SAVE(MPUI_DSP_BOOT_CONFIG); MPUI1510_SAVE(MPUI_DSP_API_CONFIG); MPUI1510_SAVE(EMIFF_SDRAM_CONFIG); MPUI1510_SAVE(EMIFS_CONFIG); } else if (cpu_is_omap16xx()) { MPUI1610_SAVE(MPUI_CTRL); MPUI1610_SAVE(MPUI_DSP_STATUS); MPUI1610_SAVE(MPUI_DSP_BOOT_CONFIG); MPUI1610_SAVE(MPUI_DSP_API_CONFIG); MPUI1610_SAVE(EMIFF_SDRAM_CONFIG); MPUI1610_SAVE(EMIFS_CONFIG); } if (virtual_start == 0) { g_read_completed = 0; my_buffer_offset += sprintf(my_base + my_buffer_offset, "ARM_CKCTL_REG: 0x%-8x \n" "ARM_IDLECT1_REG: 0x%-8x \n" "ARM_IDLECT2_REG: 0x%-8x \n" "ARM_IDLECT3_REG: 0x%-8x \n" "ARM_EWUPCT_REG: 0x%-8x \n" "ARM_RSTCT1_REG: 0x%-8x \n" "ARM_RSTCT2_REG: 0x%-8x \n" "ARM_SYSST_REG: 0x%-8x \n" "ULPD_IT_STATUS_REG: 0x%-4x \n" "ULPD_CLOCK_CTRL_REG: 0x%-4x \n" "ULPD_SOFT_REQ_REG: 0x%-4x \n" "ULPD_DPLL_CTRL_REG: 0x%-4x \n" "ULPD_STATUS_REQ_REG: 0x%-4x \n" "ULPD_POWER_CTRL_REG: 0x%-4x \n", ARM_SHOW(ARM_CKCTL), ARM_SHOW(ARM_IDLECT1), ARM_SHOW(ARM_IDLECT2), ARM_SHOW(ARM_IDLECT3), ARM_SHOW(ARM_EWUPCT), ARM_SHOW(ARM_RSTCT1), ARM_SHOW(ARM_RSTCT2), ARM_SHOW(ARM_SYSST), ULPD_SHOW(ULPD_IT_STATUS), ULPD_SHOW(ULPD_CLOCK_CTRL), ULPD_SHOW(ULPD_SOFT_REQ), ULPD_SHOW(ULPD_DPLL_CTRL), ULPD_SHOW(ULPD_STATUS_REQ), ULPD_SHOW(ULPD_POWER_CTRL)); if (cpu_is_omap730()) { my_buffer_offset += sprintf(my_base + my_buffer_offset, "MPUI730_CTRL_REG 0x%-8x \n" "MPUI730_DSP_STATUS_REG: 0x%-8x \n" "MPUI730_DSP_BOOT_CONFIG_REG: 0x%-8x \n" "MPUI730_DSP_API_CONFIG_REG: 0x%-8x \n" "MPUI730_SDRAM_CONFIG_REG: 0x%-8x \n" "MPUI730_EMIFS_CONFIG_REG: 0x%-8x \n", MPUI730_SHOW(MPUI_CTRL), MPUI730_SHOW(MPUI_DSP_STATUS), MPUI730_SHOW(MPUI_DSP_BOOT_CONFIG), MPUI730_SHOW(MPUI_DSP_API_CONFIG), MPUI730_SHOW(EMIFF_SDRAM_CONFIG), MPUI730_SHOW(EMIFS_CONFIG)); } else if (cpu_is_omap15xx()) { my_buffer_offset += sprintf(my_base + my_buffer_offset, "MPUI1510_CTRL_REG 0x%-8x \n" "MPUI1510_DSP_STATUS_REG: 0x%-8x \n" "MPUI1510_DSP_BOOT_CONFIG_REG: 0x%-8x \n" "MPUI1510_DSP_API_CONFIG_REG: 0x%-8x \n" "MPUI1510_SDRAM_CONFIG_REG: 0x%-8x \n" "MPUI1510_EMIFS_CONFIG_REG: 0x%-8x \n", MPUI1510_SHOW(MPUI_CTRL), MPUI1510_SHOW(MPUI_DSP_STATUS), MPUI1510_SHOW(MPUI_DSP_BOOT_CONFIG), MPUI1510_SHOW(MPUI_DSP_API_CONFIG), MPUI1510_SHOW(EMIFF_SDRAM_CONFIG), MPUI1510_SHOW(EMIFS_CONFIG)); } else if (cpu_is_omap16xx()) { my_buffer_offset += sprintf(my_base + my_buffer_offset, "MPUI1610_CTRL_REG 0x%-8x \n" "MPUI1610_DSP_STATUS_REG: 0x%-8x \n" "MPUI1610_DSP_BOOT_CONFIG_REG: 0x%-8x \n" "MPUI1610_DSP_API_CONFIG_REG: 0x%-8x \n" "MPUI1610_SDRAM_CONFIG_REG: 0x%-8x \n" "MPUI1610_EMIFS_CONFIG_REG: 0x%-8x \n", MPUI1610_SHOW(MPUI_CTRL), MPUI1610_SHOW(MPUI_DSP_STATUS), MPUI1610_SHOW(MPUI_DSP_BOOT_CONFIG), MPUI1610_SHOW(MPUI_DSP_API_CONFIG), MPUI1610_SHOW(EMIFF_SDRAM_CONFIG), MPUI1610_SHOW(EMIFS_CONFIG)); } g_read_completed++; } else if (g_read_completed >= 1) { *eof = 1; return 0; } g_read_completed++; *my_first_byte = page_buffer; return my_buffer_offset; } static void omap_pm_init_proc(void) { struct proc_dir_entry *entry; entry = create_proc_read_entry("driver/omap_pm", S_IWUSR | S_IRUGO, NULL, omap_pm_read_proc, NULL); } #endif /* DEBUG && CONFIG_PROC_FS */ static void (*saved_idle)(void) = NULL; /* * omap_pm_prepare - Do preliminary suspend work. * */ static int omap_pm_prepare(void) { /* We cannot sleep in idle until we have resumed */ saved_idle = pm_idle; pm_idle = NULL; return 0; } /* * omap_pm_enter - Actually enter a sleep state. * @state: State we're entering. * */ static int omap_pm_enter(suspend_state_t state) { switch (state) { case PM_SUSPEND_STANDBY: case PM_SUSPEND_MEM: omap1_pm_suspend(); break; default: return -EINVAL; } return 0; } /** * omap_pm_finish - Finish up suspend sequence. * * This is called after we wake back up (or if entering the sleep state * failed). */ static void omap_pm_finish(void) { pm_idle = saved_idle; } static irqreturn_t omap_wakeup_interrupt(int irq, void *dev) { return IRQ_HANDLED; } static struct irqaction omap_wakeup_irq = { .name = "peripheral wakeup", .flags = IRQF_DISABLED, .handler = omap_wakeup_interrupt }; static struct platform_suspend_ops omap_pm_ops ={ .prepare = omap_pm_prepare, .enter = omap_pm_enter, .finish = omap_pm_finish, .valid = suspend_valid_only_mem, }; static int __init omap_pm_init(void) { #ifdef CONFIG_OMAP_32K_TIMER int error; #endif printk("Power Management for TI OMAP.\n"); /* * We copy the assembler sleep/wakeup routines to SRAM. * These routines need to be in SRAM as that's the only * memory the MPU can see when it wakes up. */ if (cpu_is_omap730()) { omap_sram_suspend = omap_sram_push(omap730_cpu_suspend, omap730_cpu_suspend_sz); } else if (cpu_is_omap15xx()) { omap_sram_suspend = omap_sram_push(omap1510_cpu_suspend, omap1510_cpu_suspend_sz); } else if (cpu_is_omap16xx()) { omap_sram_suspend = omap_sram_push(omap1610_cpu_suspend, omap1610_cpu_suspend_sz); } if (omap_sram_suspend == NULL) { printk(KERN_ERR "PM not initialized: Missing SRAM support\n"); return -ENODEV; } pm_idle = omap1_pm_idle; if (cpu_is_omap730()) setup_irq(INT_730_WAKE_UP_REQ, &omap_wakeup_irq); else if (cpu_is_omap16xx()) setup_irq(INT_1610_WAKE_UP_REQ, &omap_wakeup_irq); /* Program new power ramp-up time * (0 for most boards since we don't lower voltage when in deep sleep) */ omap_writew(ULPD_SETUP_ANALOG_CELL_3_VAL, ULPD_SETUP_ANALOG_CELL_3); /* Setup ULPD POWER_CTRL_REG - enter deep sleep whenever possible */ omap_writew(ULPD_POWER_CTRL_REG_VAL, ULPD_POWER_CTRL); /* Configure IDLECT3 */ if (cpu_is_omap730()) omap_writel(OMAP730_IDLECT3_VAL, OMAP730_IDLECT3); else if (cpu_is_omap16xx()) omap_writel(OMAP1610_IDLECT3_VAL, OMAP1610_IDLECT3); suspend_set_ops(&omap_pm_ops); #if defined(DEBUG) && defined(CONFIG_PROC_FS) omap_pm_init_proc(); #endif #ifdef CONFIG_OMAP_32K_TIMER error = sysfs_create_file(power_kobj, &sleep_while_idle_attr.attr); if (error) printk(KERN_ERR "sysfs_create_file failed: %d\n", error); #endif if (cpu_is_omap16xx()) { /* configure LOW_PWR pin */ omap_cfg_reg(T20_1610_LOW_PWR); } return 0; } __initcall(omap_pm_init);
gpl-2.0
tzanussi/linux-yocto-micro-3.19
drivers/mfd/twl4030-irq.c
753
20704
/* * twl4030-irq.c - TWL4030/TPS659x0 irq support * * Copyright (C) 2005-2006 Texas Instruments, Inc. * * Modifications to defer interrupt handling to a kernel thread: * Copyright (C) 2006 MontaVista Software, Inc. * * Based on tlv320aic23.c: * Copyright (c) by Kai Svahn <kai.svahn@nokia.com> * * Code cleanup and modifications to IRQ handler. * by syed khasim <x0khasim@ti.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/export.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/slab.h> #include <linux/of.h> #include <linux/irqdomain.h> #include <linux/i2c/twl.h> #include "twl-core.h" /* * TWL4030 IRQ handling has two stages in hardware, and thus in software. * The Primary Interrupt Handler (PIH) stage exposes status bits saying * which Secondary Interrupt Handler (SIH) stage is raising an interrupt. * SIH modules are more traditional IRQ components, which support per-IRQ * enable/disable and trigger controls; they do most of the work. * * These chips are designed to support IRQ handling from two different * I2C masters. Each has a dedicated IRQ line, and dedicated IRQ status * and mask registers in the PIH and SIH modules. * * We set up IRQs starting at a platform-specified base, always starting * with PIH and the SIH for PWR_INT and then usually adding GPIO: * base + 0 .. base + 7 PIH * base + 8 .. base + 15 SIH for PWR_INT * base + 16 .. base + 33 SIH for GPIO */ #define TWL4030_CORE_NR_IRQS 8 #define TWL4030_PWR_NR_IRQS 8 /* PIH register offsets */ #define REG_PIH_ISR_P1 0x01 #define REG_PIH_ISR_P2 0x02 #define REG_PIH_SIR 0x03 /* for testing */ /* Linux could (eventually) use either IRQ line */ static int irq_line; struct sih { char name[8]; u8 module; /* module id */ u8 control_offset; /* for SIH_CTRL */ bool set_cor; u8 bits; /* valid in isr/imr */ u8 bytes_ixr; /* bytelen of ISR/IMR/SIR */ u8 edr_offset; u8 bytes_edr; /* bytelen of EDR */ u8 irq_lines; /* number of supported irq lines */ /* SIR ignored -- set interrupt, for testing only */ struct sih_irq_data { u8 isr_offset; u8 imr_offset; } mask[2]; /* + 2 bytes padding */ }; static const struct sih *sih_modules; static int nr_sih_modules; #define SIH_INITIALIZER(modname, nbits) \ .module = TWL4030_MODULE_ ## modname, \ .control_offset = TWL4030_ ## modname ## _SIH_CTRL, \ .bits = nbits, \ .bytes_ixr = DIV_ROUND_UP(nbits, 8), \ .edr_offset = TWL4030_ ## modname ## _EDR, \ .bytes_edr = DIV_ROUND_UP((2*(nbits)), 8), \ .irq_lines = 2, \ .mask = { { \ .isr_offset = TWL4030_ ## modname ## _ISR1, \ .imr_offset = TWL4030_ ## modname ## _IMR1, \ }, \ { \ .isr_offset = TWL4030_ ## modname ## _ISR2, \ .imr_offset = TWL4030_ ## modname ## _IMR2, \ }, }, /* register naming policies are inconsistent ... */ #define TWL4030_INT_PWR_EDR TWL4030_INT_PWR_EDR1 #define TWL4030_MODULE_KEYPAD_KEYP TWL4030_MODULE_KEYPAD #define TWL4030_MODULE_INT_PWR TWL4030_MODULE_INT /* * Order in this table matches order in PIH_ISR. That is, * BIT(n) in PIH_ISR is sih_modules[n]. */ /* sih_modules_twl4030 is used both in twl4030 and twl5030 */ static const struct sih sih_modules_twl4030[6] = { [0] = { .name = "gpio", .module = TWL4030_MODULE_GPIO, .control_offset = REG_GPIO_SIH_CTRL, .set_cor = true, .bits = TWL4030_GPIO_MAX, .bytes_ixr = 3, /* Note: *all* of these IRQs default to no-trigger */ .edr_offset = REG_GPIO_EDR1, .bytes_edr = 5, .irq_lines = 2, .mask = { { .isr_offset = REG_GPIO_ISR1A, .imr_offset = REG_GPIO_IMR1A, }, { .isr_offset = REG_GPIO_ISR1B, .imr_offset = REG_GPIO_IMR1B, }, }, }, [1] = { .name = "keypad", .set_cor = true, SIH_INITIALIZER(KEYPAD_KEYP, 4) }, [2] = { .name = "bci", .module = TWL4030_MODULE_INTERRUPTS, .control_offset = TWL4030_INTERRUPTS_BCISIHCTRL, .set_cor = true, .bits = 12, .bytes_ixr = 2, .edr_offset = TWL4030_INTERRUPTS_BCIEDR1, /* Note: most of these IRQs default to no-trigger */ .bytes_edr = 3, .irq_lines = 2, .mask = { { .isr_offset = TWL4030_INTERRUPTS_BCIISR1A, .imr_offset = TWL4030_INTERRUPTS_BCIIMR1A, }, { .isr_offset = TWL4030_INTERRUPTS_BCIISR1B, .imr_offset = TWL4030_INTERRUPTS_BCIIMR1B, }, }, }, [3] = { .name = "madc", SIH_INITIALIZER(MADC, 4) }, [4] = { /* USB doesn't use the same SIH organization */ .name = "usb", }, [5] = { .name = "power", .set_cor = true, SIH_INITIALIZER(INT_PWR, 8) }, /* there are no SIH modules #6 or #7 ... */ }; static const struct sih sih_modules_twl5031[8] = { [0] = { .name = "gpio", .module = TWL4030_MODULE_GPIO, .control_offset = REG_GPIO_SIH_CTRL, .set_cor = true, .bits = TWL4030_GPIO_MAX, .bytes_ixr = 3, /* Note: *all* of these IRQs default to no-trigger */ .edr_offset = REG_GPIO_EDR1, .bytes_edr = 5, .irq_lines = 2, .mask = { { .isr_offset = REG_GPIO_ISR1A, .imr_offset = REG_GPIO_IMR1A, }, { .isr_offset = REG_GPIO_ISR1B, .imr_offset = REG_GPIO_IMR1B, }, }, }, [1] = { .name = "keypad", .set_cor = true, SIH_INITIALIZER(KEYPAD_KEYP, 4) }, [2] = { .name = "bci", .module = TWL5031_MODULE_INTERRUPTS, .control_offset = TWL5031_INTERRUPTS_BCISIHCTRL, .bits = 7, .bytes_ixr = 1, .edr_offset = TWL5031_INTERRUPTS_BCIEDR1, /* Note: most of these IRQs default to no-trigger */ .bytes_edr = 2, .irq_lines = 2, .mask = { { .isr_offset = TWL5031_INTERRUPTS_BCIISR1, .imr_offset = TWL5031_INTERRUPTS_BCIIMR1, }, { .isr_offset = TWL5031_INTERRUPTS_BCIISR2, .imr_offset = TWL5031_INTERRUPTS_BCIIMR2, }, }, }, [3] = { .name = "madc", SIH_INITIALIZER(MADC, 4) }, [4] = { /* USB doesn't use the same SIH organization */ .name = "usb", }, [5] = { .name = "power", .set_cor = true, SIH_INITIALIZER(INT_PWR, 8) }, [6] = { /* * ECI/DBI doesn't use the same SIH organization. * For example, it supports only one interrupt output line. * That is, the interrupts are seen on both INT1 and INT2 lines. */ .name = "eci_dbi", .module = TWL5031_MODULE_ACCESSORY, .bits = 9, .bytes_ixr = 2, .irq_lines = 1, .mask = { { .isr_offset = TWL5031_ACIIDR_LSB, .imr_offset = TWL5031_ACIIMR_LSB, }, }, }, [7] = { /* Audio accessory */ .name = "audio", .module = TWL5031_MODULE_ACCESSORY, .control_offset = TWL5031_ACCSIHCTRL, .bits = 2, .bytes_ixr = 1, .edr_offset = TWL5031_ACCEDR1, /* Note: most of these IRQs default to no-trigger */ .bytes_edr = 1, .irq_lines = 2, .mask = { { .isr_offset = TWL5031_ACCISR1, .imr_offset = TWL5031_ACCIMR1, }, { .isr_offset = TWL5031_ACCISR2, .imr_offset = TWL5031_ACCIMR2, }, }, }, }; #undef TWL4030_MODULE_KEYPAD_KEYP #undef TWL4030_MODULE_INT_PWR #undef TWL4030_INT_PWR_EDR /*----------------------------------------------------------------------*/ static unsigned twl4030_irq_base; /* * handle_twl4030_pih() is the desc->handle method for the twl4030 interrupt. * This is a chained interrupt, so there is no desc->action method for it. * Now we need to query the interrupt controller in the twl4030 to determine * which module is generating the interrupt request. However, we can't do i2c * transactions in interrupt context, so we must defer that work to a kernel * thread. All we do here is acknowledge and mask the interrupt and wakeup * the kernel thread. */ static irqreturn_t handle_twl4030_pih(int irq, void *devid) { irqreturn_t ret; u8 pih_isr; ret = twl_i2c_read_u8(TWL_MODULE_PIH, &pih_isr, REG_PIH_ISR_P1); if (ret) { pr_warn("twl4030: I2C error %d reading PIH ISR\n", ret); return IRQ_NONE; } while (pih_isr) { unsigned long pending = __ffs(pih_isr); unsigned int irq; pih_isr &= ~BIT(pending); irq = pending + twl4030_irq_base; handle_nested_irq(irq); } return IRQ_HANDLED; } /*----------------------------------------------------------------------*/ /* * twl4030_init_sih_modules() ... start from a known state where no * IRQs will be coming in, and where we can quickly enable them then * handle them as they arrive. Mask all IRQs: maybe init SIH_CTRL. * * NOTE: we don't touch EDR registers here; they stay with hardware * defaults or whatever the last value was. Note that when both EDR * bits for an IRQ are clear, that's as if its IMR bit is set... */ static int twl4030_init_sih_modules(unsigned line) { const struct sih *sih; u8 buf[4]; int i; int status; /* line 0 == int1_n signal; line 1 == int2_n signal */ if (line > 1) return -EINVAL; irq_line = line; /* disable all interrupts on our line */ memset(buf, 0xff, sizeof(buf)); sih = sih_modules; for (i = 0; i < nr_sih_modules; i++, sih++) { /* skip USB -- it's funky */ if (!sih->bytes_ixr) continue; /* Not all the SIH modules support multiple interrupt lines */ if (sih->irq_lines <= line) continue; status = twl_i2c_write(sih->module, buf, sih->mask[line].imr_offset, sih->bytes_ixr); if (status < 0) pr_err("twl4030: err %d initializing %s %s\n", status, sih->name, "IMR"); /* * Maybe disable "exclusive" mode; buffer second pending irq; * set Clear-On-Read (COR) bit. * * NOTE that sometimes COR polarity is documented as being * inverted: for MADC, COR=1 means "clear on write". * And for PWR_INT it's not documented... */ if (sih->set_cor) { status = twl_i2c_write_u8(sih->module, TWL4030_SIH_CTRL_COR_MASK, sih->control_offset); if (status < 0) pr_err("twl4030: err %d initializing %s %s\n", status, sih->name, "SIH_CTRL"); } } sih = sih_modules; for (i = 0; i < nr_sih_modules; i++, sih++) { u8 rxbuf[4]; int j; /* skip USB */ if (!sih->bytes_ixr) continue; /* Not all the SIH modules support multiple interrupt lines */ if (sih->irq_lines <= line) continue; /* * Clear pending interrupt status. Either the read was * enough, or we need to write those bits. Repeat, in * case an IRQ is pending (PENDDIS=0) ... that's not * uncommon with PWR_INT.PWRON. */ for (j = 0; j < 2; j++) { status = twl_i2c_read(sih->module, rxbuf, sih->mask[line].isr_offset, sih->bytes_ixr); if (status < 0) pr_warn("twl4030: err %d initializing %s %s\n", status, sih->name, "ISR"); if (!sih->set_cor) { status = twl_i2c_write(sih->module, buf, sih->mask[line].isr_offset, sih->bytes_ixr); if (status < 0) pr_warn("twl4030: write failed: %d\n", status); } /* * else COR=1 means read sufficed. * (for most SIH modules...) */ } } return 0; } static inline void activate_irq(int irq) { #ifdef CONFIG_ARM /* * ARM requires an extra step to clear IRQ_NOREQUEST, which it * sets on behalf of every irq_chip. Also sets IRQ_NOPROBE. */ set_irq_flags(irq, IRQF_VALID); #else /* same effect on other architectures */ irq_set_noprobe(irq); #endif } /*----------------------------------------------------------------------*/ struct sih_agent { int irq_base; const struct sih *sih; u32 imr; bool imr_change_pending; u32 edge_change; struct mutex irq_lock; char *irq_name; }; /*----------------------------------------------------------------------*/ /* * All irq_chip methods get issued from code holding irq_desc[irq].lock, * which can't perform the underlying I2C operations (because they sleep). * So we must hand them off to a thread (workqueue) and cope with asynch * completion, potentially including some re-ordering, of these requests. */ static void twl4030_sih_mask(struct irq_data *data) { struct sih_agent *agent = irq_data_get_irq_chip_data(data); agent->imr |= BIT(data->irq - agent->irq_base); agent->imr_change_pending = true; } static void twl4030_sih_unmask(struct irq_data *data) { struct sih_agent *agent = irq_data_get_irq_chip_data(data); agent->imr &= ~BIT(data->irq - agent->irq_base); agent->imr_change_pending = true; } static int twl4030_sih_set_type(struct irq_data *data, unsigned trigger) { struct sih_agent *agent = irq_data_get_irq_chip_data(data); if (trigger & ~(IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING)) return -EINVAL; if (irqd_get_trigger_type(data) != trigger) agent->edge_change |= BIT(data->irq - agent->irq_base); return 0; } static void twl4030_sih_bus_lock(struct irq_data *data) { struct sih_agent *agent = irq_data_get_irq_chip_data(data); mutex_lock(&agent->irq_lock); } static void twl4030_sih_bus_sync_unlock(struct irq_data *data) { struct sih_agent *agent = irq_data_get_irq_chip_data(data); const struct sih *sih = agent->sih; int status; if (agent->imr_change_pending) { union { u32 word; u8 bytes[4]; } imr; /* byte[0] gets overwritten as we write ... */ imr.word = cpu_to_le32(agent->imr); agent->imr_change_pending = false; /* write the whole mask ... simpler than subsetting it */ status = twl_i2c_write(sih->module, imr.bytes, sih->mask[irq_line].imr_offset, sih->bytes_ixr); if (status) pr_err("twl4030: %s, %s --> %d\n", __func__, "write", status); } if (agent->edge_change) { u32 edge_change; u8 bytes[6]; edge_change = agent->edge_change; agent->edge_change = 0; /* * Read, reserving first byte for write scratch. Yes, this * could be cached for some speedup ... but be careful about * any processor on the other IRQ line, EDR registers are * shared. */ status = twl_i2c_read(sih->module, bytes, sih->edr_offset, sih->bytes_edr); if (status) { pr_err("twl4030: %s, %s --> %d\n", __func__, "read", status); return; } /* Modify only the bits we know must change */ while (edge_change) { int i = fls(edge_change) - 1; int byte = i >> 2; int off = (i & 0x3) * 2; unsigned int type; bytes[byte] &= ~(0x03 << off); type = irq_get_trigger_type(i + agent->irq_base); if (type & IRQ_TYPE_EDGE_RISING) bytes[byte] |= BIT(off + 1); if (type & IRQ_TYPE_EDGE_FALLING) bytes[byte] |= BIT(off + 0); edge_change &= ~BIT(i); } /* Write */ status = twl_i2c_write(sih->module, bytes, sih->edr_offset, sih->bytes_edr); if (status) pr_err("twl4030: %s, %s --> %d\n", __func__, "write", status); } mutex_unlock(&agent->irq_lock); } static struct irq_chip twl4030_sih_irq_chip = { .name = "twl4030", .irq_mask = twl4030_sih_mask, .irq_unmask = twl4030_sih_unmask, .irq_set_type = twl4030_sih_set_type, .irq_bus_lock = twl4030_sih_bus_lock, .irq_bus_sync_unlock = twl4030_sih_bus_sync_unlock, .flags = IRQCHIP_SKIP_SET_WAKE, }; /*----------------------------------------------------------------------*/ static inline int sih_read_isr(const struct sih *sih) { int status; union { u8 bytes[4]; u32 word; } isr; /* FIXME need retry-on-error ... */ isr.word = 0; status = twl_i2c_read(sih->module, isr.bytes, sih->mask[irq_line].isr_offset, sih->bytes_ixr); return (status < 0) ? status : le32_to_cpu(isr.word); } /* * Generic handler for SIH interrupts ... we "know" this is called * in task context, with IRQs enabled. */ static irqreturn_t handle_twl4030_sih(int irq, void *data) { struct sih_agent *agent = irq_get_handler_data(irq); const struct sih *sih = agent->sih; int isr; /* reading ISR acks the IRQs, using clear-on-read mode */ isr = sih_read_isr(sih); if (isr < 0) { pr_err("twl4030: %s SIH, read ISR error %d\n", sih->name, isr); /* REVISIT: recover; eventually mask it all, etc */ return IRQ_HANDLED; } while (isr) { irq = fls(isr); irq--; isr &= ~BIT(irq); if (irq < sih->bits) handle_nested_irq(agent->irq_base + irq); else pr_err("twl4030: %s SIH, invalid ISR bit %d\n", sih->name, irq); } return IRQ_HANDLED; } /* returns the first IRQ used by this SIH bank, or negative errno */ int twl4030_sih_setup(struct device *dev, int module, int irq_base) { int sih_mod; const struct sih *sih = NULL; struct sih_agent *agent; int i, irq; int status = -EINVAL; /* only support modules with standard clear-on-read for now */ for (sih_mod = 0, sih = sih_modules; sih_mod < nr_sih_modules; sih_mod++, sih++) { if (sih->module == module && sih->set_cor) { status = 0; break; } } if (status < 0) return status; agent = kzalloc(sizeof(*agent), GFP_KERNEL); if (!agent) return -ENOMEM; agent->irq_base = irq_base; agent->sih = sih; agent->imr = ~0; mutex_init(&agent->irq_lock); for (i = 0; i < sih->bits; i++) { irq = irq_base + i; irq_set_chip_data(irq, agent); irq_set_chip_and_handler(irq, &twl4030_sih_irq_chip, handle_edge_irq); irq_set_nested_thread(irq, 1); activate_irq(irq); } /* replace generic PIH handler (handle_simple_irq) */ irq = sih_mod + twl4030_irq_base; irq_set_handler_data(irq, agent); agent->irq_name = kasprintf(GFP_KERNEL, "twl4030_%s", sih->name); status = request_threaded_irq(irq, NULL, handle_twl4030_sih, IRQF_EARLY_RESUME, agent->irq_name ?: sih->name, NULL); dev_info(dev, "%s (irq %d) chaining IRQs %d..%d\n", sih->name, irq, irq_base, irq_base + i - 1); return status < 0 ? status : irq_base; } /* FIXME need a call to reverse twl4030_sih_setup() ... */ /*----------------------------------------------------------------------*/ /* FIXME pass in which interrupt line we'll use ... */ #define twl_irq_line 0 int twl4030_init_irq(struct device *dev, int irq_num) { static struct irq_chip twl4030_irq_chip; int status, i; int irq_base, irq_end, nr_irqs; struct device_node *node = dev->of_node; /* * TWL core and pwr interrupts must be contiguous because * the hwirqs numbers are defined contiguously from 1 to 15. * Create only one domain for both. */ nr_irqs = TWL4030_PWR_NR_IRQS + TWL4030_CORE_NR_IRQS; irq_base = irq_alloc_descs(-1, 0, nr_irqs, 0); if (IS_ERR_VALUE(irq_base)) { dev_err(dev, "Fail to allocate IRQ descs\n"); return irq_base; } irq_domain_add_legacy(node, nr_irqs, irq_base, 0, &irq_domain_simple_ops, NULL); irq_end = irq_base + TWL4030_CORE_NR_IRQS; /* * Mask and clear all TWL4030 interrupts since initially we do * not have any TWL4030 module interrupt handlers present */ status = twl4030_init_sih_modules(twl_irq_line); if (status < 0) return status; twl4030_irq_base = irq_base; /* * Install an irq handler for each of the SIH modules; * clone dummy irq_chip since PIH can't *do* anything */ twl4030_irq_chip = dummy_irq_chip; twl4030_irq_chip.name = "twl4030"; twl4030_sih_irq_chip.irq_ack = dummy_irq_chip.irq_ack; for (i = irq_base; i < irq_end; i++) { irq_set_chip_and_handler(i, &twl4030_irq_chip, handle_simple_irq); irq_set_nested_thread(i, 1); activate_irq(i); } dev_info(dev, "%s (irq %d) chaining IRQs %d..%d\n", "PIH", irq_num, irq_base, irq_end); /* ... and the PWR_INT module ... */ status = twl4030_sih_setup(dev, TWL4030_MODULE_INT, irq_end); if (status < 0) { dev_err(dev, "sih_setup PWR INT --> %d\n", status); goto fail; } /* install an irq handler to demultiplex the TWL4030 interrupt */ status = request_threaded_irq(irq_num, NULL, handle_twl4030_pih, IRQF_ONESHOT, "TWL4030-PIH", NULL); if (status < 0) { dev_err(dev, "could not claim irq%d: %d\n", irq_num, status); goto fail_rqirq; } enable_irq_wake(irq_num); return irq_base; fail_rqirq: /* clean up twl4030_sih_setup */ fail: for (i = irq_base; i < irq_end; i++) { irq_set_nested_thread(i, 0); irq_set_chip_and_handler(i, NULL, NULL); } return status; } int twl4030_exit_irq(void) { /* FIXME undo twl_init_irq() */ if (twl4030_irq_base) { pr_err("twl4030: can't yet clean up IRQs?\n"); return -ENOSYS; } return 0; } int twl4030_init_chip_irq(const char *chip) { if (!strcmp(chip, "twl5031")) { sih_modules = sih_modules_twl5031; nr_sih_modules = ARRAY_SIZE(sih_modules_twl5031); } else { sih_modules = sih_modules_twl4030; nr_sih_modules = ARRAY_SIZE(sih_modules_twl4030); } return 0; }
gpl-2.0
blackwing182/htc-kernel-msm7x30-3.0
drivers/tty/serial/atmel_serial.c
753
45961
/* * Driver for Atmel AT91 / AT32 Serial ports * Copyright (C) 2003 Rick Bronson * * Based on drivers/char/serial_sa1100.c, by Deep Blue Solutions Ltd. * Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o. * * DMA support added by Chip Coldwell. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/module.h> #include <linux/tty.h> #include <linux/ioport.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/serial.h> #include <linux/clk.h> #include <linux/console.h> #include <linux/sysrq.h> #include <linux/tty_flip.h> #include <linux/platform_device.h> #include <linux/dma-mapping.h> #include <linux/atmel_pdc.h> #include <linux/atmel_serial.h> #include <linux/uaccess.h> #include <asm/io.h> #include <asm/ioctls.h> #include <asm/mach/serial_at91.h> #include <mach/board.h> #ifdef CONFIG_ARM #include <mach/cpu.h> #include <mach/gpio.h> #endif #define PDC_BUFFER_SIZE 512 /* Revisit: We should calculate this based on the actual port settings */ #define PDC_RX_TIMEOUT (3 * 10) /* 3 bytes */ #if defined(CONFIG_SERIAL_ATMEL_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ) #define SUPPORT_SYSRQ #endif #include <linux/serial_core.h> static void atmel_start_rx(struct uart_port *port); static void atmel_stop_rx(struct uart_port *port); #ifdef CONFIG_SERIAL_ATMEL_TTYAT /* Use device name ttyAT, major 204 and minor 154-169. This is necessary if we * should coexist with the 8250 driver, such as if we have an external 16C550 * UART. */ #define SERIAL_ATMEL_MAJOR 204 #define MINOR_START 154 #define ATMEL_DEVICENAME "ttyAT" #else /* Use device name ttyS, major 4, minor 64-68. This is the usual serial port * name, but it is legally reserved for the 8250 driver. */ #define SERIAL_ATMEL_MAJOR TTY_MAJOR #define MINOR_START 64 #define ATMEL_DEVICENAME "ttyS" #endif #define ATMEL_ISR_PASS_LIMIT 256 /* UART registers. CR is write-only, hence no GET macro */ #define UART_PUT_CR(port,v) __raw_writel(v, (port)->membase + ATMEL_US_CR) #define UART_GET_MR(port) __raw_readl((port)->membase + ATMEL_US_MR) #define UART_PUT_MR(port,v) __raw_writel(v, (port)->membase + ATMEL_US_MR) #define UART_PUT_IER(port,v) __raw_writel(v, (port)->membase + ATMEL_US_IER) #define UART_PUT_IDR(port,v) __raw_writel(v, (port)->membase + ATMEL_US_IDR) #define UART_GET_IMR(port) __raw_readl((port)->membase + ATMEL_US_IMR) #define UART_GET_CSR(port) __raw_readl((port)->membase + ATMEL_US_CSR) #define UART_GET_CHAR(port) __raw_readl((port)->membase + ATMEL_US_RHR) #define UART_PUT_CHAR(port,v) __raw_writel(v, (port)->membase + ATMEL_US_THR) #define UART_GET_BRGR(port) __raw_readl((port)->membase + ATMEL_US_BRGR) #define UART_PUT_BRGR(port,v) __raw_writel(v, (port)->membase + ATMEL_US_BRGR) #define UART_PUT_RTOR(port,v) __raw_writel(v, (port)->membase + ATMEL_US_RTOR) #define UART_PUT_TTGR(port, v) __raw_writel(v, (port)->membase + ATMEL_US_TTGR) /* PDC registers */ #define UART_PUT_PTCR(port,v) __raw_writel(v, (port)->membase + ATMEL_PDC_PTCR) #define UART_GET_PTSR(port) __raw_readl((port)->membase + ATMEL_PDC_PTSR) #define UART_PUT_RPR(port,v) __raw_writel(v, (port)->membase + ATMEL_PDC_RPR) #define UART_GET_RPR(port) __raw_readl((port)->membase + ATMEL_PDC_RPR) #define UART_PUT_RCR(port,v) __raw_writel(v, (port)->membase + ATMEL_PDC_RCR) #define UART_PUT_RNPR(port,v) __raw_writel(v, (port)->membase + ATMEL_PDC_RNPR) #define UART_PUT_RNCR(port,v) __raw_writel(v, (port)->membase + ATMEL_PDC_RNCR) #define UART_PUT_TPR(port,v) __raw_writel(v, (port)->membase + ATMEL_PDC_TPR) #define UART_PUT_TCR(port,v) __raw_writel(v, (port)->membase + ATMEL_PDC_TCR) #define UART_GET_TCR(port) __raw_readl((port)->membase + ATMEL_PDC_TCR) static int (*atmel_open_hook)(struct uart_port *); static void (*atmel_close_hook)(struct uart_port *); struct atmel_dma_buffer { unsigned char *buf; dma_addr_t dma_addr; unsigned int dma_size; unsigned int ofs; }; struct atmel_uart_char { u16 status; u16 ch; }; #define ATMEL_SERIAL_RINGSIZE 1024 /* * We wrap our port structure around the generic uart_port. */ struct atmel_uart_port { struct uart_port uart; /* uart */ struct clk *clk; /* uart clock */ int may_wakeup; /* cached value of device_may_wakeup for times we need to disable it */ u32 backup_imr; /* IMR saved during suspend */ int break_active; /* break being received */ short use_dma_rx; /* enable PDC receiver */ short pdc_rx_idx; /* current PDC RX buffer */ struct atmel_dma_buffer pdc_rx[2]; /* PDC receier */ short use_dma_tx; /* enable PDC transmitter */ struct atmel_dma_buffer pdc_tx; /* PDC transmitter */ struct tasklet_struct tasklet; unsigned int irq_status; unsigned int irq_status_prev; struct circ_buf rx_ring; struct serial_rs485 rs485; /* rs485 settings */ unsigned int tx_done_mask; }; static struct atmel_uart_port atmel_ports[ATMEL_MAX_UART]; #ifdef SUPPORT_SYSRQ static struct console atmel_console; #endif static inline struct atmel_uart_port * to_atmel_uart_port(struct uart_port *uart) { return container_of(uart, struct atmel_uart_port, uart); } #ifdef CONFIG_SERIAL_ATMEL_PDC static bool atmel_use_dma_rx(struct uart_port *port) { struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); return atmel_port->use_dma_rx; } static bool atmel_use_dma_tx(struct uart_port *port) { struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); return atmel_port->use_dma_tx; } #else static bool atmel_use_dma_rx(struct uart_port *port) { return false; } static bool atmel_use_dma_tx(struct uart_port *port) { return false; } #endif /* Enable or disable the rs485 support */ void atmel_config_rs485(struct uart_port *port, struct serial_rs485 *rs485conf) { struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); unsigned int mode; spin_lock(&port->lock); /* Disable interrupts */ UART_PUT_IDR(port, atmel_port->tx_done_mask); mode = UART_GET_MR(port); /* Resetting serial mode to RS232 (0x0) */ mode &= ~ATMEL_US_USMODE; atmel_port->rs485 = *rs485conf; if (rs485conf->flags & SER_RS485_ENABLED) { dev_dbg(port->dev, "Setting UART to RS485\n"); atmel_port->tx_done_mask = ATMEL_US_TXEMPTY; if (rs485conf->flags & SER_RS485_RTS_AFTER_SEND) UART_PUT_TTGR(port, rs485conf->delay_rts_after_send); mode |= ATMEL_US_USMODE_RS485; } else { dev_dbg(port->dev, "Setting UART to RS232\n"); if (atmel_use_dma_tx(port)) atmel_port->tx_done_mask = ATMEL_US_ENDTX | ATMEL_US_TXBUFE; else atmel_port->tx_done_mask = ATMEL_US_TXRDY; } UART_PUT_MR(port, mode); /* Enable interrupts */ UART_PUT_IER(port, atmel_port->tx_done_mask); spin_unlock(&port->lock); } /* * Return TIOCSER_TEMT when transmitter FIFO and Shift register is empty. */ static u_int atmel_tx_empty(struct uart_port *port) { return (UART_GET_CSR(port) & ATMEL_US_TXEMPTY) ? TIOCSER_TEMT : 0; } /* * Set state of the modem control output lines */ static void atmel_set_mctrl(struct uart_port *port, u_int mctrl) { unsigned int control = 0; unsigned int mode; struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); #ifdef CONFIG_ARCH_AT91RM9200 if (cpu_is_at91rm9200()) { /* * AT91RM9200 Errata #39: RTS0 is not internally connected * to PA21. We need to drive the pin manually. */ if (port->mapbase == AT91RM9200_BASE_US0) { if (mctrl & TIOCM_RTS) at91_set_gpio_value(AT91_PIN_PA21, 0); else at91_set_gpio_value(AT91_PIN_PA21, 1); } } #endif if (mctrl & TIOCM_RTS) control |= ATMEL_US_RTSEN; else control |= ATMEL_US_RTSDIS; if (mctrl & TIOCM_DTR) control |= ATMEL_US_DTREN; else control |= ATMEL_US_DTRDIS; UART_PUT_CR(port, control); /* Local loopback mode? */ mode = UART_GET_MR(port) & ~ATMEL_US_CHMODE; if (mctrl & TIOCM_LOOP) mode |= ATMEL_US_CHMODE_LOC_LOOP; else mode |= ATMEL_US_CHMODE_NORMAL; /* Resetting serial mode to RS232 (0x0) */ mode &= ~ATMEL_US_USMODE; if (atmel_port->rs485.flags & SER_RS485_ENABLED) { dev_dbg(port->dev, "Setting UART to RS485\n"); if (atmel_port->rs485.flags & SER_RS485_RTS_AFTER_SEND) UART_PUT_TTGR(port, atmel_port->rs485.delay_rts_after_send); mode |= ATMEL_US_USMODE_RS485; } else { dev_dbg(port->dev, "Setting UART to RS232\n"); } UART_PUT_MR(port, mode); } /* * Get state of the modem control input lines */ static u_int atmel_get_mctrl(struct uart_port *port) { unsigned int status, ret = 0; status = UART_GET_CSR(port); /* * The control signals are active low. */ if (!(status & ATMEL_US_DCD)) ret |= TIOCM_CD; if (!(status & ATMEL_US_CTS)) ret |= TIOCM_CTS; if (!(status & ATMEL_US_DSR)) ret |= TIOCM_DSR; if (!(status & ATMEL_US_RI)) ret |= TIOCM_RI; return ret; } /* * Stop transmitting. */ static void atmel_stop_tx(struct uart_port *port) { struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); if (atmel_use_dma_tx(port)) { /* disable PDC transmit */ UART_PUT_PTCR(port, ATMEL_PDC_TXTDIS); } /* Disable interrupts */ UART_PUT_IDR(port, atmel_port->tx_done_mask); if (atmel_port->rs485.flags & SER_RS485_ENABLED) atmel_start_rx(port); } /* * Start transmitting. */ static void atmel_start_tx(struct uart_port *port) { struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); if (atmel_use_dma_tx(port)) { if (UART_GET_PTSR(port) & ATMEL_PDC_TXTEN) /* The transmitter is already running. Yes, we really need this.*/ return; if (atmel_port->rs485.flags & SER_RS485_ENABLED) atmel_stop_rx(port); /* re-enable PDC transmit */ UART_PUT_PTCR(port, ATMEL_PDC_TXTEN); } /* Enable interrupts */ UART_PUT_IER(port, atmel_port->tx_done_mask); } /* * start receiving - port is in process of being opened. */ static void atmel_start_rx(struct uart_port *port) { UART_PUT_CR(port, ATMEL_US_RSTSTA); /* reset status and receiver */ if (atmel_use_dma_rx(port)) { /* enable PDC controller */ UART_PUT_IER(port, ATMEL_US_ENDRX | ATMEL_US_TIMEOUT | port->read_status_mask); UART_PUT_PTCR(port, ATMEL_PDC_RXTEN); } else { UART_PUT_IER(port, ATMEL_US_RXRDY); } } /* * Stop receiving - port is in process of being closed. */ static void atmel_stop_rx(struct uart_port *port) { if (atmel_use_dma_rx(port)) { /* disable PDC receive */ UART_PUT_PTCR(port, ATMEL_PDC_RXTDIS); UART_PUT_IDR(port, ATMEL_US_ENDRX | ATMEL_US_TIMEOUT | port->read_status_mask); } else { UART_PUT_IDR(port, ATMEL_US_RXRDY); } } /* * Enable modem status interrupts */ static void atmel_enable_ms(struct uart_port *port) { UART_PUT_IER(port, ATMEL_US_RIIC | ATMEL_US_DSRIC | ATMEL_US_DCDIC | ATMEL_US_CTSIC); } /* * Control the transmission of a break signal */ static void atmel_break_ctl(struct uart_port *port, int break_state) { if (break_state != 0) UART_PUT_CR(port, ATMEL_US_STTBRK); /* start break */ else UART_PUT_CR(port, ATMEL_US_STPBRK); /* stop break */ } /* * Stores the incoming character in the ring buffer */ static void atmel_buffer_rx_char(struct uart_port *port, unsigned int status, unsigned int ch) { struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); struct circ_buf *ring = &atmel_port->rx_ring; struct atmel_uart_char *c; if (!CIRC_SPACE(ring->head, ring->tail, ATMEL_SERIAL_RINGSIZE)) /* Buffer overflow, ignore char */ return; c = &((struct atmel_uart_char *)ring->buf)[ring->head]; c->status = status; c->ch = ch; /* Make sure the character is stored before we update head. */ smp_wmb(); ring->head = (ring->head + 1) & (ATMEL_SERIAL_RINGSIZE - 1); } /* * Deal with parity, framing and overrun errors. */ static void atmel_pdc_rxerr(struct uart_port *port, unsigned int status) { /* clear error */ UART_PUT_CR(port, ATMEL_US_RSTSTA); if (status & ATMEL_US_RXBRK) { /* ignore side-effect */ status &= ~(ATMEL_US_PARE | ATMEL_US_FRAME); port->icount.brk++; } if (status & ATMEL_US_PARE) port->icount.parity++; if (status & ATMEL_US_FRAME) port->icount.frame++; if (status & ATMEL_US_OVRE) port->icount.overrun++; } /* * Characters received (called from interrupt handler) */ static void atmel_rx_chars(struct uart_port *port) { struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); unsigned int status, ch; status = UART_GET_CSR(port); while (status & ATMEL_US_RXRDY) { ch = UART_GET_CHAR(port); /* * note that the error handling code is * out of the main execution path */ if (unlikely(status & (ATMEL_US_PARE | ATMEL_US_FRAME | ATMEL_US_OVRE | ATMEL_US_RXBRK) || atmel_port->break_active)) { /* clear error */ UART_PUT_CR(port, ATMEL_US_RSTSTA); if (status & ATMEL_US_RXBRK && !atmel_port->break_active) { atmel_port->break_active = 1; UART_PUT_IER(port, ATMEL_US_RXBRK); } else { /* * This is either the end-of-break * condition or we've received at * least one character without RXBRK * being set. In both cases, the next * RXBRK will indicate start-of-break. */ UART_PUT_IDR(port, ATMEL_US_RXBRK); status &= ~ATMEL_US_RXBRK; atmel_port->break_active = 0; } } atmel_buffer_rx_char(port, status, ch); status = UART_GET_CSR(port); } tasklet_schedule(&atmel_port->tasklet); } /* * Transmit characters (called from tasklet with TXRDY interrupt * disabled) */ static void atmel_tx_chars(struct uart_port *port) { struct circ_buf *xmit = &port->state->xmit; struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); if (port->x_char && UART_GET_CSR(port) & atmel_port->tx_done_mask) { UART_PUT_CHAR(port, port->x_char); port->icount.tx++; port->x_char = 0; } if (uart_circ_empty(xmit) || uart_tx_stopped(port)) return; while (UART_GET_CSR(port) & atmel_port->tx_done_mask) { UART_PUT_CHAR(port, xmit->buf[xmit->tail]); xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); port->icount.tx++; if (uart_circ_empty(xmit)) break; } if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) uart_write_wakeup(port); if (!uart_circ_empty(xmit)) /* Enable interrupts */ UART_PUT_IER(port, atmel_port->tx_done_mask); } /* * receive interrupt handler. */ static void atmel_handle_receive(struct uart_port *port, unsigned int pending) { struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); if (atmel_use_dma_rx(port)) { /* * PDC receive. Just schedule the tasklet and let it * figure out the details. * * TODO: We're not handling error flags correctly at * the moment. */ if (pending & (ATMEL_US_ENDRX | ATMEL_US_TIMEOUT)) { UART_PUT_IDR(port, (ATMEL_US_ENDRX | ATMEL_US_TIMEOUT)); tasklet_schedule(&atmel_port->tasklet); } if (pending & (ATMEL_US_RXBRK | ATMEL_US_OVRE | ATMEL_US_FRAME | ATMEL_US_PARE)) atmel_pdc_rxerr(port, pending); } /* Interrupt receive */ if (pending & ATMEL_US_RXRDY) atmel_rx_chars(port); else if (pending & ATMEL_US_RXBRK) { /* * End of break detected. If it came along with a * character, atmel_rx_chars will handle it. */ UART_PUT_CR(port, ATMEL_US_RSTSTA); UART_PUT_IDR(port, ATMEL_US_RXBRK); atmel_port->break_active = 0; } } /* * transmit interrupt handler. (Transmit is IRQF_NODELAY safe) */ static void atmel_handle_transmit(struct uart_port *port, unsigned int pending) { struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); if (pending & atmel_port->tx_done_mask) { /* Either PDC or interrupt transmission */ UART_PUT_IDR(port, atmel_port->tx_done_mask); tasklet_schedule(&atmel_port->tasklet); } } /* * status flags interrupt handler. */ static void atmel_handle_status(struct uart_port *port, unsigned int pending, unsigned int status) { struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); if (pending & (ATMEL_US_RIIC | ATMEL_US_DSRIC | ATMEL_US_DCDIC | ATMEL_US_CTSIC)) { atmel_port->irq_status = status; tasklet_schedule(&atmel_port->tasklet); } } /* * Interrupt handler */ static irqreturn_t atmel_interrupt(int irq, void *dev_id) { struct uart_port *port = dev_id; unsigned int status, pending, pass_counter = 0; do { status = UART_GET_CSR(port); pending = status & UART_GET_IMR(port); if (!pending) break; atmel_handle_receive(port, pending); atmel_handle_status(port, pending, status); atmel_handle_transmit(port, pending); } while (pass_counter++ < ATMEL_ISR_PASS_LIMIT); return pass_counter ? IRQ_HANDLED : IRQ_NONE; } /* * Called from tasklet with ENDTX and TXBUFE interrupts disabled. */ static void atmel_tx_dma(struct uart_port *port) { struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); struct circ_buf *xmit = &port->state->xmit; struct atmel_dma_buffer *pdc = &atmel_port->pdc_tx; int count; /* nothing left to transmit? */ if (UART_GET_TCR(port)) return; xmit->tail += pdc->ofs; xmit->tail &= UART_XMIT_SIZE - 1; port->icount.tx += pdc->ofs; pdc->ofs = 0; /* more to transmit - setup next transfer */ /* disable PDC transmit */ UART_PUT_PTCR(port, ATMEL_PDC_TXTDIS); if (!uart_circ_empty(xmit) && !uart_tx_stopped(port)) { dma_sync_single_for_device(port->dev, pdc->dma_addr, pdc->dma_size, DMA_TO_DEVICE); count = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE); pdc->ofs = count; UART_PUT_TPR(port, pdc->dma_addr + xmit->tail); UART_PUT_TCR(port, count); /* re-enable PDC transmit */ UART_PUT_PTCR(port, ATMEL_PDC_TXTEN); /* Enable interrupts */ UART_PUT_IER(port, atmel_port->tx_done_mask); } else { if (atmel_port->rs485.flags & SER_RS485_ENABLED) { /* DMA done, stop TX, start RX for RS485 */ atmel_start_rx(port); } } if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) uart_write_wakeup(port); } static void atmel_rx_from_ring(struct uart_port *port) { struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); struct circ_buf *ring = &atmel_port->rx_ring; unsigned int flg; unsigned int status; while (ring->head != ring->tail) { struct atmel_uart_char c; /* Make sure c is loaded after head. */ smp_rmb(); c = ((struct atmel_uart_char *)ring->buf)[ring->tail]; ring->tail = (ring->tail + 1) & (ATMEL_SERIAL_RINGSIZE - 1); port->icount.rx++; status = c.status; flg = TTY_NORMAL; /* * note that the error handling code is * out of the main execution path */ if (unlikely(status & (ATMEL_US_PARE | ATMEL_US_FRAME | ATMEL_US_OVRE | ATMEL_US_RXBRK))) { if (status & ATMEL_US_RXBRK) { /* ignore side-effect */ status &= ~(ATMEL_US_PARE | ATMEL_US_FRAME); port->icount.brk++; if (uart_handle_break(port)) continue; } if (status & ATMEL_US_PARE) port->icount.parity++; if (status & ATMEL_US_FRAME) port->icount.frame++; if (status & ATMEL_US_OVRE) port->icount.overrun++; status &= port->read_status_mask; if (status & ATMEL_US_RXBRK) flg = TTY_BREAK; else if (status & ATMEL_US_PARE) flg = TTY_PARITY; else if (status & ATMEL_US_FRAME) flg = TTY_FRAME; } if (uart_handle_sysrq_char(port, c.ch)) continue; uart_insert_char(port, status, ATMEL_US_OVRE, c.ch, flg); } /* * Drop the lock here since it might end up calling * uart_start(), which takes the lock. */ spin_unlock(&port->lock); tty_flip_buffer_push(port->state->port.tty); spin_lock(&port->lock); } static void atmel_rx_from_dma(struct uart_port *port) { struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); struct tty_struct *tty = port->state->port.tty; struct atmel_dma_buffer *pdc; int rx_idx = atmel_port->pdc_rx_idx; unsigned int head; unsigned int tail; unsigned int count; do { /* Reset the UART timeout early so that we don't miss one */ UART_PUT_CR(port, ATMEL_US_STTTO); pdc = &atmel_port->pdc_rx[rx_idx]; head = UART_GET_RPR(port) - pdc->dma_addr; tail = pdc->ofs; /* If the PDC has switched buffers, RPR won't contain * any address within the current buffer. Since head * is unsigned, we just need a one-way comparison to * find out. * * In this case, we just need to consume the entire * buffer and resubmit it for DMA. This will clear the * ENDRX bit as well, so that we can safely re-enable * all interrupts below. */ head = min(head, pdc->dma_size); if (likely(head != tail)) { dma_sync_single_for_cpu(port->dev, pdc->dma_addr, pdc->dma_size, DMA_FROM_DEVICE); /* * head will only wrap around when we recycle * the DMA buffer, and when that happens, we * explicitly set tail to 0. So head will * always be greater than tail. */ count = head - tail; tty_insert_flip_string(tty, pdc->buf + pdc->ofs, count); dma_sync_single_for_device(port->dev, pdc->dma_addr, pdc->dma_size, DMA_FROM_DEVICE); port->icount.rx += count; pdc->ofs = head; } /* * If the current buffer is full, we need to check if * the next one contains any additional data. */ if (head >= pdc->dma_size) { pdc->ofs = 0; UART_PUT_RNPR(port, pdc->dma_addr); UART_PUT_RNCR(port, pdc->dma_size); rx_idx = !rx_idx; atmel_port->pdc_rx_idx = rx_idx; } } while (head >= pdc->dma_size); /* * Drop the lock here since it might end up calling * uart_start(), which takes the lock. */ spin_unlock(&port->lock); tty_flip_buffer_push(tty); spin_lock(&port->lock); UART_PUT_IER(port, ATMEL_US_ENDRX | ATMEL_US_TIMEOUT); } /* * tasklet handling tty stuff outside the interrupt handler. */ static void atmel_tasklet_func(unsigned long data) { struct uart_port *port = (struct uart_port *)data; struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); unsigned int status; unsigned int status_change; /* The interrupt handler does not take the lock */ spin_lock(&port->lock); if (atmel_use_dma_tx(port)) atmel_tx_dma(port); else atmel_tx_chars(port); status = atmel_port->irq_status; status_change = status ^ atmel_port->irq_status_prev; if (status_change & (ATMEL_US_RI | ATMEL_US_DSR | ATMEL_US_DCD | ATMEL_US_CTS)) { /* TODO: All reads to CSR will clear these interrupts! */ if (status_change & ATMEL_US_RI) port->icount.rng++; if (status_change & ATMEL_US_DSR) port->icount.dsr++; if (status_change & ATMEL_US_DCD) uart_handle_dcd_change(port, !(status & ATMEL_US_DCD)); if (status_change & ATMEL_US_CTS) uart_handle_cts_change(port, !(status & ATMEL_US_CTS)); wake_up_interruptible(&port->state->port.delta_msr_wait); atmel_port->irq_status_prev = status; } if (atmel_use_dma_rx(port)) atmel_rx_from_dma(port); else atmel_rx_from_ring(port); spin_unlock(&port->lock); } /* * Perform initialization and enable port for reception */ static int atmel_startup(struct uart_port *port) { struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); struct tty_struct *tty = port->state->port.tty; int retval; /* * Ensure that no interrupts are enabled otherwise when * request_irq() is called we could get stuck trying to * handle an unexpected interrupt */ UART_PUT_IDR(port, -1); /* * Allocate the IRQ */ retval = request_irq(port->irq, atmel_interrupt, IRQF_SHARED, tty ? tty->name : "atmel_serial", port); if (retval) { printk("atmel_serial: atmel_startup - Can't get irq\n"); return retval; } /* * Initialize DMA (if necessary) */ if (atmel_use_dma_rx(port)) { int i; for (i = 0; i < 2; i++) { struct atmel_dma_buffer *pdc = &atmel_port->pdc_rx[i]; pdc->buf = kmalloc(PDC_BUFFER_SIZE, GFP_KERNEL); if (pdc->buf == NULL) { if (i != 0) { dma_unmap_single(port->dev, atmel_port->pdc_rx[0].dma_addr, PDC_BUFFER_SIZE, DMA_FROM_DEVICE); kfree(atmel_port->pdc_rx[0].buf); } free_irq(port->irq, port); return -ENOMEM; } pdc->dma_addr = dma_map_single(port->dev, pdc->buf, PDC_BUFFER_SIZE, DMA_FROM_DEVICE); pdc->dma_size = PDC_BUFFER_SIZE; pdc->ofs = 0; } atmel_port->pdc_rx_idx = 0; UART_PUT_RPR(port, atmel_port->pdc_rx[0].dma_addr); UART_PUT_RCR(port, PDC_BUFFER_SIZE); UART_PUT_RNPR(port, atmel_port->pdc_rx[1].dma_addr); UART_PUT_RNCR(port, PDC_BUFFER_SIZE); } if (atmel_use_dma_tx(port)) { struct atmel_dma_buffer *pdc = &atmel_port->pdc_tx; struct circ_buf *xmit = &port->state->xmit; pdc->buf = xmit->buf; pdc->dma_addr = dma_map_single(port->dev, pdc->buf, UART_XMIT_SIZE, DMA_TO_DEVICE); pdc->dma_size = UART_XMIT_SIZE; pdc->ofs = 0; } /* * If there is a specific "open" function (to register * control line interrupts) */ if (atmel_open_hook) { retval = atmel_open_hook(port); if (retval) { free_irq(port->irq, port); return retval; } } /* Save current CSR for comparison in atmel_tasklet_func() */ atmel_port->irq_status_prev = UART_GET_CSR(port); atmel_port->irq_status = atmel_port->irq_status_prev; /* * Finally, enable the serial port */ UART_PUT_CR(port, ATMEL_US_RSTSTA | ATMEL_US_RSTRX); /* enable xmit & rcvr */ UART_PUT_CR(port, ATMEL_US_TXEN | ATMEL_US_RXEN); if (atmel_use_dma_rx(port)) { /* set UART timeout */ UART_PUT_RTOR(port, PDC_RX_TIMEOUT); UART_PUT_CR(port, ATMEL_US_STTTO); UART_PUT_IER(port, ATMEL_US_ENDRX | ATMEL_US_TIMEOUT); /* enable PDC controller */ UART_PUT_PTCR(port, ATMEL_PDC_RXTEN); } else { /* enable receive only */ UART_PUT_IER(port, ATMEL_US_RXRDY); } return 0; } /* * Disable the port */ static void atmel_shutdown(struct uart_port *port) { struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); /* * Ensure everything is stopped. */ atmel_stop_rx(port); atmel_stop_tx(port); /* * Shut-down the DMA. */ if (atmel_use_dma_rx(port)) { int i; for (i = 0; i < 2; i++) { struct atmel_dma_buffer *pdc = &atmel_port->pdc_rx[i]; dma_unmap_single(port->dev, pdc->dma_addr, pdc->dma_size, DMA_FROM_DEVICE); kfree(pdc->buf); } } if (atmel_use_dma_tx(port)) { struct atmel_dma_buffer *pdc = &atmel_port->pdc_tx; dma_unmap_single(port->dev, pdc->dma_addr, pdc->dma_size, DMA_TO_DEVICE); } /* * Disable all interrupts, port and break condition. */ UART_PUT_CR(port, ATMEL_US_RSTSTA); UART_PUT_IDR(port, -1); /* * Free the interrupt */ free_irq(port->irq, port); /* * If there is a specific "close" function (to unregister * control line interrupts) */ if (atmel_close_hook) atmel_close_hook(port); } /* * Flush any TX data submitted for DMA. Called when the TX circular * buffer is reset. */ static void atmel_flush_buffer(struct uart_port *port) { struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); if (atmel_use_dma_tx(port)) { UART_PUT_TCR(port, 0); atmel_port->pdc_tx.ofs = 0; } } /* * Power / Clock management. */ static void atmel_serial_pm(struct uart_port *port, unsigned int state, unsigned int oldstate) { struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); switch (state) { case 0: /* * Enable the peripheral clock for this serial port. * This is called on uart_open() or a resume event. */ clk_enable(atmel_port->clk); /* re-enable interrupts if we disabled some on suspend */ UART_PUT_IER(port, atmel_port->backup_imr); break; case 3: /* Back up the interrupt mask and disable all interrupts */ atmel_port->backup_imr = UART_GET_IMR(port); UART_PUT_IDR(port, -1); /* * Disable the peripheral clock for this serial port. * This is called on uart_close() or a suspend event. */ clk_disable(atmel_port->clk); break; default: printk(KERN_ERR "atmel_serial: unknown pm %d\n", state); } } /* * Change the port parameters */ static void atmel_set_termios(struct uart_port *port, struct ktermios *termios, struct ktermios *old) { unsigned long flags; unsigned int mode, imr, quot, baud; struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); /* Get current mode register */ mode = UART_GET_MR(port) & ~(ATMEL_US_USCLKS | ATMEL_US_CHRL | ATMEL_US_NBSTOP | ATMEL_US_PAR | ATMEL_US_USMODE); baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk / 16); quot = uart_get_divisor(port, baud); if (quot > 65535) { /* BRGR is 16-bit, so switch to slower clock */ quot /= 8; mode |= ATMEL_US_USCLKS_MCK_DIV8; } /* byte size */ switch (termios->c_cflag & CSIZE) { case CS5: mode |= ATMEL_US_CHRL_5; break; case CS6: mode |= ATMEL_US_CHRL_6; break; case CS7: mode |= ATMEL_US_CHRL_7; break; default: mode |= ATMEL_US_CHRL_8; break; } /* stop bits */ if (termios->c_cflag & CSTOPB) mode |= ATMEL_US_NBSTOP_2; /* parity */ if (termios->c_cflag & PARENB) { /* Mark or Space parity */ if (termios->c_cflag & CMSPAR) { if (termios->c_cflag & PARODD) mode |= ATMEL_US_PAR_MARK; else mode |= ATMEL_US_PAR_SPACE; } else if (termios->c_cflag & PARODD) mode |= ATMEL_US_PAR_ODD; else mode |= ATMEL_US_PAR_EVEN; } else mode |= ATMEL_US_PAR_NONE; /* hardware handshake (RTS/CTS) */ if (termios->c_cflag & CRTSCTS) mode |= ATMEL_US_USMODE_HWHS; else mode |= ATMEL_US_USMODE_NORMAL; spin_lock_irqsave(&port->lock, flags); port->read_status_mask = ATMEL_US_OVRE; if (termios->c_iflag & INPCK) port->read_status_mask |= (ATMEL_US_FRAME | ATMEL_US_PARE); if (termios->c_iflag & (BRKINT | PARMRK)) port->read_status_mask |= ATMEL_US_RXBRK; if (atmel_use_dma_rx(port)) /* need to enable error interrupts */ UART_PUT_IER(port, port->read_status_mask); /* * Characters to ignore */ port->ignore_status_mask = 0; if (termios->c_iflag & IGNPAR) port->ignore_status_mask |= (ATMEL_US_FRAME | ATMEL_US_PARE); if (termios->c_iflag & IGNBRK) { port->ignore_status_mask |= ATMEL_US_RXBRK; /* * If we're ignoring parity and break indicators, * ignore overruns too (for real raw support). */ if (termios->c_iflag & IGNPAR) port->ignore_status_mask |= ATMEL_US_OVRE; } /* TODO: Ignore all characters if CREAD is set.*/ /* update the per-port timeout */ uart_update_timeout(port, termios->c_cflag, baud); /* * save/disable interrupts. The tty layer will ensure that the * transmitter is empty if requested by the caller, so there's * no need to wait for it here. */ imr = UART_GET_IMR(port); UART_PUT_IDR(port, -1); /* disable receiver and transmitter */ UART_PUT_CR(port, ATMEL_US_TXDIS | ATMEL_US_RXDIS); /* Resetting serial mode to RS232 (0x0) */ mode &= ~ATMEL_US_USMODE; if (atmel_port->rs485.flags & SER_RS485_ENABLED) { dev_dbg(port->dev, "Setting UART to RS485\n"); if (atmel_port->rs485.flags & SER_RS485_RTS_AFTER_SEND) UART_PUT_TTGR(port, atmel_port->rs485.delay_rts_after_send); mode |= ATMEL_US_USMODE_RS485; } else { dev_dbg(port->dev, "Setting UART to RS232\n"); } /* set the parity, stop bits and data size */ UART_PUT_MR(port, mode); /* set the baud rate */ UART_PUT_BRGR(port, quot); UART_PUT_CR(port, ATMEL_US_RSTSTA | ATMEL_US_RSTRX); UART_PUT_CR(port, ATMEL_US_TXEN | ATMEL_US_RXEN); /* restore interrupts */ UART_PUT_IER(port, imr); /* CTS flow-control and modem-status interrupts */ if (UART_ENABLE_MS(port, termios->c_cflag)) port->ops->enable_ms(port); spin_unlock_irqrestore(&port->lock, flags); } static void atmel_set_ldisc(struct uart_port *port, int new) { int line = port->line; if (line >= port->state->port.tty->driver->num) return; if (port->state->port.tty->ldisc->ops->num == N_PPS) { port->flags |= UPF_HARDPPS_CD; atmel_enable_ms(port); } else { port->flags &= ~UPF_HARDPPS_CD; } } /* * Return string describing the specified port */ static const char *atmel_type(struct uart_port *port) { return (port->type == PORT_ATMEL) ? "ATMEL_SERIAL" : NULL; } /* * Release the memory region(s) being used by 'port'. */ static void atmel_release_port(struct uart_port *port) { struct platform_device *pdev = to_platform_device(port->dev); int size = pdev->resource[0].end - pdev->resource[0].start + 1; release_mem_region(port->mapbase, size); if (port->flags & UPF_IOREMAP) { iounmap(port->membase); port->membase = NULL; } } /* * Request the memory region(s) being used by 'port'. */ static int atmel_request_port(struct uart_port *port) { struct platform_device *pdev = to_platform_device(port->dev); int size = pdev->resource[0].end - pdev->resource[0].start + 1; if (!request_mem_region(port->mapbase, size, "atmel_serial")) return -EBUSY; if (port->flags & UPF_IOREMAP) { port->membase = ioremap(port->mapbase, size); if (port->membase == NULL) { release_mem_region(port->mapbase, size); return -ENOMEM; } } return 0; } /* * Configure/autoconfigure the port. */ static void atmel_config_port(struct uart_port *port, int flags) { if (flags & UART_CONFIG_TYPE) { port->type = PORT_ATMEL; atmel_request_port(port); } } /* * Verify the new serial_struct (for TIOCSSERIAL). */ static int atmel_verify_port(struct uart_port *port, struct serial_struct *ser) { int ret = 0; if (ser->type != PORT_UNKNOWN && ser->type != PORT_ATMEL) ret = -EINVAL; if (port->irq != ser->irq) ret = -EINVAL; if (ser->io_type != SERIAL_IO_MEM) ret = -EINVAL; if (port->uartclk / 16 != ser->baud_base) ret = -EINVAL; if ((void *)port->mapbase != ser->iomem_base) ret = -EINVAL; if (port->iobase != ser->port) ret = -EINVAL; if (ser->hub6 != 0) ret = -EINVAL; return ret; } #ifdef CONFIG_CONSOLE_POLL static int atmel_poll_get_char(struct uart_port *port) { while (!(UART_GET_CSR(port) & ATMEL_US_RXRDY)) cpu_relax(); return UART_GET_CHAR(port); } static void atmel_poll_put_char(struct uart_port *port, unsigned char ch) { while (!(UART_GET_CSR(port) & ATMEL_US_TXRDY)) cpu_relax(); UART_PUT_CHAR(port, ch); } #endif static int atmel_ioctl(struct uart_port *port, unsigned int cmd, unsigned long arg) { struct serial_rs485 rs485conf; switch (cmd) { case TIOCSRS485: if (copy_from_user(&rs485conf, (struct serial_rs485 *) arg, sizeof(rs485conf))) return -EFAULT; atmel_config_rs485(port, &rs485conf); break; case TIOCGRS485: if (copy_to_user((struct serial_rs485 *) arg, &(to_atmel_uart_port(port)->rs485), sizeof(rs485conf))) return -EFAULT; break; default: return -ENOIOCTLCMD; } return 0; } static struct uart_ops atmel_pops = { .tx_empty = atmel_tx_empty, .set_mctrl = atmel_set_mctrl, .get_mctrl = atmel_get_mctrl, .stop_tx = atmel_stop_tx, .start_tx = atmel_start_tx, .stop_rx = atmel_stop_rx, .enable_ms = atmel_enable_ms, .break_ctl = atmel_break_ctl, .startup = atmel_startup, .shutdown = atmel_shutdown, .flush_buffer = atmel_flush_buffer, .set_termios = atmel_set_termios, .set_ldisc = atmel_set_ldisc, .type = atmel_type, .release_port = atmel_release_port, .request_port = atmel_request_port, .config_port = atmel_config_port, .verify_port = atmel_verify_port, .pm = atmel_serial_pm, .ioctl = atmel_ioctl, #ifdef CONFIG_CONSOLE_POLL .poll_get_char = atmel_poll_get_char, .poll_put_char = atmel_poll_put_char, #endif }; /* * Configure the port from the platform device resource info. */ static void __devinit atmel_init_port(struct atmel_uart_port *atmel_port, struct platform_device *pdev) { struct uart_port *port = &atmel_port->uart; struct atmel_uart_data *data = pdev->dev.platform_data; port->iotype = UPIO_MEM; port->flags = UPF_BOOT_AUTOCONF; port->ops = &atmel_pops; port->fifosize = 1; port->line = data->num; port->dev = &pdev->dev; port->mapbase = pdev->resource[0].start; port->irq = pdev->resource[1].start; tasklet_init(&atmel_port->tasklet, atmel_tasklet_func, (unsigned long)port); memset(&atmel_port->rx_ring, 0, sizeof(atmel_port->rx_ring)); if (data->regs) /* Already mapped by setup code */ port->membase = data->regs; else { port->flags |= UPF_IOREMAP; port->membase = NULL; } /* for console, the clock could already be configured */ if (!atmel_port->clk) { atmel_port->clk = clk_get(&pdev->dev, "usart"); clk_enable(atmel_port->clk); port->uartclk = clk_get_rate(atmel_port->clk); clk_disable(atmel_port->clk); /* only enable clock when USART is in use */ } atmel_port->use_dma_rx = data->use_dma_rx; atmel_port->use_dma_tx = data->use_dma_tx; atmel_port->rs485 = data->rs485; /* Use TXEMPTY for interrupt when rs485 else TXRDY or ENDTX|TXBUFE */ if (atmel_port->rs485.flags & SER_RS485_ENABLED) atmel_port->tx_done_mask = ATMEL_US_TXEMPTY; else if (atmel_use_dma_tx(port)) { port->fifosize = PDC_BUFFER_SIZE; atmel_port->tx_done_mask = ATMEL_US_ENDTX | ATMEL_US_TXBUFE; } else { atmel_port->tx_done_mask = ATMEL_US_TXRDY; } } /* * Register board-specific modem-control line handlers. */ void __init atmel_register_uart_fns(struct atmel_port_fns *fns) { if (fns->enable_ms) atmel_pops.enable_ms = fns->enable_ms; if (fns->get_mctrl) atmel_pops.get_mctrl = fns->get_mctrl; if (fns->set_mctrl) atmel_pops.set_mctrl = fns->set_mctrl; atmel_open_hook = fns->open; atmel_close_hook = fns->close; atmel_pops.pm = fns->pm; atmel_pops.set_wake = fns->set_wake; } #ifdef CONFIG_SERIAL_ATMEL_CONSOLE static void atmel_console_putchar(struct uart_port *port, int ch) { while (!(UART_GET_CSR(port) & ATMEL_US_TXRDY)) cpu_relax(); UART_PUT_CHAR(port, ch); } /* * Interrupts are disabled on entering */ static void atmel_console_write(struct console *co, const char *s, u_int count) { struct uart_port *port = &atmel_ports[co->index].uart; struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); unsigned int status, imr; unsigned int pdc_tx; /* * First, save IMR and then disable interrupts */ imr = UART_GET_IMR(port); UART_PUT_IDR(port, ATMEL_US_RXRDY | atmel_port->tx_done_mask); /* Store PDC transmit status and disable it */ pdc_tx = UART_GET_PTSR(port) & ATMEL_PDC_TXTEN; UART_PUT_PTCR(port, ATMEL_PDC_TXTDIS); uart_console_write(port, s, count, atmel_console_putchar); /* * Finally, wait for transmitter to become empty * and restore IMR */ do { status = UART_GET_CSR(port); } while (!(status & ATMEL_US_TXRDY)); /* Restore PDC transmit status */ if (pdc_tx) UART_PUT_PTCR(port, ATMEL_PDC_TXTEN); /* set interrupts back the way they were */ UART_PUT_IER(port, imr); } /* * If the port was already initialised (eg, by a boot loader), * try to determine the current setup. */ static void __init atmel_console_get_options(struct uart_port *port, int *baud, int *parity, int *bits) { unsigned int mr, quot; /* * If the baud rate generator isn't running, the port wasn't * initialized by the boot loader. */ quot = UART_GET_BRGR(port) & ATMEL_US_CD; if (!quot) return; mr = UART_GET_MR(port) & ATMEL_US_CHRL; if (mr == ATMEL_US_CHRL_8) *bits = 8; else *bits = 7; mr = UART_GET_MR(port) & ATMEL_US_PAR; if (mr == ATMEL_US_PAR_EVEN) *parity = 'e'; else if (mr == ATMEL_US_PAR_ODD) *parity = 'o'; /* * The serial core only rounds down when matching this to a * supported baud rate. Make sure we don't end up slightly * lower than one of those, as it would make us fall through * to a much lower baud rate than we really want. */ *baud = port->uartclk / (16 * (quot - 1)); } static int __init atmel_console_setup(struct console *co, char *options) { struct uart_port *port = &atmel_ports[co->index].uart; int baud = 115200; int bits = 8; int parity = 'n'; int flow = 'n'; if (port->membase == NULL) { /* Port not initialized yet - delay setup */ return -ENODEV; } clk_enable(atmel_ports[co->index].clk); UART_PUT_IDR(port, -1); UART_PUT_CR(port, ATMEL_US_RSTSTA | ATMEL_US_RSTRX); UART_PUT_CR(port, ATMEL_US_TXEN | ATMEL_US_RXEN); if (options) uart_parse_options(options, &baud, &parity, &bits, &flow); else atmel_console_get_options(port, &baud, &parity, &bits); return uart_set_options(port, co, baud, parity, bits, flow); } static struct uart_driver atmel_uart; static struct console atmel_console = { .name = ATMEL_DEVICENAME, .write = atmel_console_write, .device = uart_console_device, .setup = atmel_console_setup, .flags = CON_PRINTBUFFER, .index = -1, .data = &atmel_uart, }; #define ATMEL_CONSOLE_DEVICE (&atmel_console) /* * Early console initialization (before VM subsystem initialized). */ static int __init atmel_console_init(void) { if (atmel_default_console_device) { add_preferred_console(ATMEL_DEVICENAME, atmel_default_console_device->id, NULL); atmel_init_port(&atmel_ports[atmel_default_console_device->id], atmel_default_console_device); register_console(&atmel_console); } return 0; } console_initcall(atmel_console_init); /* * Late console initialization. */ static int __init atmel_late_console_init(void) { if (atmel_default_console_device && !(atmel_console.flags & CON_ENABLED)) register_console(&atmel_console); return 0; } core_initcall(atmel_late_console_init); static inline bool atmel_is_console_port(struct uart_port *port) { return port->cons && port->cons->index == port->line; } #else #define ATMEL_CONSOLE_DEVICE NULL static inline bool atmel_is_console_port(struct uart_port *port) { return false; } #endif static struct uart_driver atmel_uart = { .owner = THIS_MODULE, .driver_name = "atmel_serial", .dev_name = ATMEL_DEVICENAME, .major = SERIAL_ATMEL_MAJOR, .minor = MINOR_START, .nr = ATMEL_MAX_UART, .cons = ATMEL_CONSOLE_DEVICE, }; #ifdef CONFIG_PM static bool atmel_serial_clk_will_stop(void) { #ifdef CONFIG_ARCH_AT91 return at91_suspend_entering_slow_clock(); #else return false; #endif } static int atmel_serial_suspend(struct platform_device *pdev, pm_message_t state) { struct uart_port *port = platform_get_drvdata(pdev); struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); if (atmel_is_console_port(port) && console_suspend_enabled) { /* Drain the TX shifter */ while (!(UART_GET_CSR(port) & ATMEL_US_TXEMPTY)) cpu_relax(); } /* we can not wake up if we're running on slow clock */ atmel_port->may_wakeup = device_may_wakeup(&pdev->dev); if (atmel_serial_clk_will_stop()) device_set_wakeup_enable(&pdev->dev, 0); uart_suspend_port(&atmel_uart, port); return 0; } static int atmel_serial_resume(struct platform_device *pdev) { struct uart_port *port = platform_get_drvdata(pdev); struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); uart_resume_port(&atmel_uart, port); device_set_wakeup_enable(&pdev->dev, atmel_port->may_wakeup); return 0; } #else #define atmel_serial_suspend NULL #define atmel_serial_resume NULL #endif static int __devinit atmel_serial_probe(struct platform_device *pdev) { struct atmel_uart_port *port; struct atmel_uart_data *pdata = pdev->dev.platform_data; void *data; int ret; BUILD_BUG_ON(ATMEL_SERIAL_RINGSIZE & (ATMEL_SERIAL_RINGSIZE - 1)); port = &atmel_ports[pdata->num]; port->backup_imr = 0; atmel_init_port(port, pdev); if (!atmel_use_dma_rx(&port->uart)) { ret = -ENOMEM; data = kmalloc(sizeof(struct atmel_uart_char) * ATMEL_SERIAL_RINGSIZE, GFP_KERNEL); if (!data) goto err_alloc_ring; port->rx_ring.buf = data; } ret = uart_add_one_port(&atmel_uart, &port->uart); if (ret) goto err_add_port; #ifdef CONFIG_SERIAL_ATMEL_CONSOLE if (atmel_is_console_port(&port->uart) && ATMEL_CONSOLE_DEVICE->flags & CON_ENABLED) { /* * The serial core enabled the clock for us, so undo * the clk_enable() in atmel_console_setup() */ clk_disable(port->clk); } #endif device_init_wakeup(&pdev->dev, 1); platform_set_drvdata(pdev, port); if (port->rs485.flags & SER_RS485_ENABLED) { UART_PUT_MR(&port->uart, ATMEL_US_USMODE_NORMAL); UART_PUT_CR(&port->uart, ATMEL_US_RTSEN); } return 0; err_add_port: kfree(port->rx_ring.buf); port->rx_ring.buf = NULL; err_alloc_ring: if (!atmel_is_console_port(&port->uart)) { clk_put(port->clk); port->clk = NULL; } return ret; } static int __devexit atmel_serial_remove(struct platform_device *pdev) { struct uart_port *port = platform_get_drvdata(pdev); struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); int ret = 0; device_init_wakeup(&pdev->dev, 0); platform_set_drvdata(pdev, NULL); ret = uart_remove_one_port(&atmel_uart, port); tasklet_kill(&atmel_port->tasklet); kfree(atmel_port->rx_ring.buf); /* "port" is allocated statically, so we shouldn't free it */ clk_put(atmel_port->clk); return ret; } static struct platform_driver atmel_serial_driver = { .probe = atmel_serial_probe, .remove = __devexit_p(atmel_serial_remove), .suspend = atmel_serial_suspend, .resume = atmel_serial_resume, .driver = { .name = "atmel_usart", .owner = THIS_MODULE, }, }; static int __init atmel_serial_init(void) { int ret; ret = uart_register_driver(&atmel_uart); if (ret) return ret; ret = platform_driver_register(&atmel_serial_driver); if (ret) uart_unregister_driver(&atmel_uart); return ret; } static void __exit atmel_serial_exit(void) { platform_driver_unregister(&atmel_serial_driver); uart_unregister_driver(&atmel_uart); } module_init(atmel_serial_init); module_exit(atmel_serial_exit); MODULE_AUTHOR("Rick Bronson"); MODULE_DESCRIPTION("Atmel AT91 / AT32 serial port driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:atmel_usart");
gpl-2.0
s05427226/linux
drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.c
1265
6706
/* * Copyright (c) 2014, Cisco Systems, Inc. All rights reserved. * * This program is free software; you may redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * */ #include <linux/init.h> #include <linux/list.h> #include <linux/slab.h> #include <linux/list_sort.h> #include <linux/interval_tree_generic.h> #include "usnic_uiom_interval_tree.h" #define START(node) ((node)->start) #define LAST(node) ((node)->last) #define MAKE_NODE(node, start, end, ref_cnt, flags, err, err_out) \ do { \ node = usnic_uiom_interval_node_alloc(start, \ end, ref_cnt, flags); \ if (!node) { \ err = -ENOMEM; \ goto err_out; \ } \ } while (0) #define MARK_FOR_ADD(node, list) (list_add_tail(&node->link, list)) #define MAKE_NODE_AND_APPEND(node, start, end, ref_cnt, flags, err, \ err_out, list) \ do { \ MAKE_NODE(node, start, end, \ ref_cnt, flags, err, \ err_out); \ MARK_FOR_ADD(node, list); \ } while (0) #define FLAGS_EQUAL(flags1, flags2, mask) \ (((flags1) & (mask)) == ((flags2) & (mask))) static struct usnic_uiom_interval_node* usnic_uiom_interval_node_alloc(long int start, long int last, int ref_cnt, int flags) { struct usnic_uiom_interval_node *interval = kzalloc(sizeof(*interval), GFP_ATOMIC); if (!interval) return NULL; interval->start = start; interval->last = last; interval->flags = flags; interval->ref_cnt = ref_cnt; return interval; } static int interval_cmp(void *priv, struct list_head *a, struct list_head *b) { struct usnic_uiom_interval_node *node_a, *node_b; node_a = list_entry(a, struct usnic_uiom_interval_node, link); node_b = list_entry(b, struct usnic_uiom_interval_node, link); /* long to int */ if (node_a->start < node_b->start) return -1; else if (node_a->start > node_b->start) return 1; return 0; } static void find_intervals_intersection_sorted(struct rb_root *root, unsigned long start, unsigned long last, struct list_head *list) { struct usnic_uiom_interval_node *node; INIT_LIST_HEAD(list); for (node = usnic_uiom_interval_tree_iter_first(root, start, last); node; node = usnic_uiom_interval_tree_iter_next(node, start, last)) list_add_tail(&node->link, list); list_sort(NULL, list, interval_cmp); } int usnic_uiom_get_intervals_diff(unsigned long start, unsigned long last, int flags, int flag_mask, struct rb_root *root, struct list_head *diff_set) { struct usnic_uiom_interval_node *interval, *tmp; int err = 0; long int pivot = start; LIST_HEAD(intersection_set); INIT_LIST_HEAD(diff_set); find_intervals_intersection_sorted(root, start, last, &intersection_set); list_for_each_entry(interval, &intersection_set, link) { if (pivot < interval->start) { MAKE_NODE_AND_APPEND(tmp, pivot, interval->start - 1, 1, flags, err, err_out, diff_set); pivot = interval->start; } /* * Invariant: Set [start, pivot] is either in diff_set or root, * but not in both. */ if (pivot > interval->last) { continue; } else if (pivot <= interval->last && FLAGS_EQUAL(interval->flags, flags, flag_mask)) { pivot = interval->last + 1; } } if (pivot <= last) MAKE_NODE_AND_APPEND(tmp, pivot, last, 1, flags, err, err_out, diff_set); return 0; err_out: list_for_each_entry_safe(interval, tmp, diff_set, link) { list_del(&interval->link); kfree(interval); } return err; } void usnic_uiom_put_interval_set(struct list_head *intervals) { struct usnic_uiom_interval_node *interval, *tmp; list_for_each_entry_safe(interval, tmp, intervals, link) kfree(interval); } int usnic_uiom_insert_interval(struct rb_root *root, unsigned long start, unsigned long last, int flags) { struct usnic_uiom_interval_node *interval, *tmp; unsigned long istart, ilast; int iref_cnt, iflags; unsigned long lpivot = start; int err = 0; LIST_HEAD(to_add); LIST_HEAD(intersection_set); find_intervals_intersection_sorted(root, start, last, &intersection_set); list_for_each_entry(interval, &intersection_set, link) { /* * Invariant - lpivot is the left edge of next interval to be * inserted */ istart = interval->start; ilast = interval->last; iref_cnt = interval->ref_cnt; iflags = interval->flags; if (istart < lpivot) { MAKE_NODE_AND_APPEND(tmp, istart, lpivot - 1, iref_cnt, iflags, err, err_out, &to_add); } else if (istart > lpivot) { MAKE_NODE_AND_APPEND(tmp, lpivot, istart - 1, 1, flags, err, err_out, &to_add); lpivot = istart; } else { lpivot = istart; } if (ilast > last) { MAKE_NODE_AND_APPEND(tmp, lpivot, last, iref_cnt + 1, iflags | flags, err, err_out, &to_add); MAKE_NODE_AND_APPEND(tmp, last + 1, ilast, iref_cnt, iflags, err, err_out, &to_add); } else { MAKE_NODE_AND_APPEND(tmp, lpivot, ilast, iref_cnt + 1, iflags | flags, err, err_out, &to_add); } lpivot = ilast + 1; } if (lpivot <= last) MAKE_NODE_AND_APPEND(tmp, lpivot, last, 1, flags, err, err_out, &to_add); list_for_each_entry_safe(interval, tmp, &intersection_set, link) { usnic_uiom_interval_tree_remove(interval, root); kfree(interval); } list_for_each_entry(interval, &to_add, link) usnic_uiom_interval_tree_insert(interval, root); return 0; err_out: list_for_each_entry_safe(interval, tmp, &to_add, link) kfree(interval); return err; } void usnic_uiom_remove_interval(struct rb_root *root, unsigned long start, unsigned long last, struct list_head *removed) { struct usnic_uiom_interval_node *interval; for (interval = usnic_uiom_interval_tree_iter_first(root, start, last); interval; interval = usnic_uiom_interval_tree_iter_next(interval, start, last)) { if (--interval->ref_cnt == 0) list_add_tail(&interval->link, removed); } list_for_each_entry(interval, removed, link) usnic_uiom_interval_tree_remove(interval, root); } INTERVAL_TREE_DEFINE(struct usnic_uiom_interval_node, rb, unsigned long, __subtree_last, START, LAST, , usnic_uiom_interval_tree)
gpl-2.0
PrasannaC/Galaxy_Fit_s5670-msm7x27_kernel
sound/pcmcia/vx/vxp_ops.c
1777
15841
/* * Driver for Digigram VXpocket soundcards * * lowlevel routines for VXpocket soundcards * * Copyright (c) 2002 by Takashi Iwai <tiwai@suse.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/delay.h> #include <linux/device.h> #include <linux/firmware.h> #include <sound/core.h> #include <asm/io.h> #include "vxpocket.h" static int vxp_reg_offset[VX_REG_MAX] = { [VX_ICR] = 0x00, // ICR [VX_CVR] = 0x01, // CVR [VX_ISR] = 0x02, // ISR [VX_IVR] = 0x03, // IVR [VX_RXH] = 0x05, // RXH [VX_RXM] = 0x06, // RXM [VX_RXL] = 0x07, // RXL [VX_DMA] = 0x04, // DMA [VX_CDSP] = 0x08, // CDSP [VX_LOFREQ] = 0x09, // LFREQ [VX_HIFREQ] = 0x0a, // HFREQ [VX_DATA] = 0x0b, // DATA [VX_MICRO] = 0x0c, // MICRO [VX_DIALOG] = 0x0d, // DIALOG [VX_CSUER] = 0x0e, // CSUER [VX_RUER] = 0x0f, // RUER }; static inline unsigned long vxp_reg_addr(struct vx_core *_chip, int reg) { struct snd_vxpocket *chip = (struct snd_vxpocket *)_chip; return chip->port + vxp_reg_offset[reg]; } /* * snd_vx_inb - read a byte from the register * @offset: register offset */ static unsigned char vxp_inb(struct vx_core *chip, int offset) { return inb(vxp_reg_addr(chip, offset)); } /* * snd_vx_outb - write a byte on the register * @offset: the register offset * @val: the value to write */ static void vxp_outb(struct vx_core *chip, int offset, unsigned char val) { outb(val, vxp_reg_addr(chip, offset)); } /* * redefine macros to call directly */ #undef vx_inb #define vx_inb(chip,reg) vxp_inb((struct vx_core *)(chip), VX_##reg) #undef vx_outb #define vx_outb(chip,reg,val) vxp_outb((struct vx_core *)(chip), VX_##reg,val) /* * vx_check_magic - check the magic word on xilinx * * returns zero if a magic word is detected, or a negative error code. */ static int vx_check_magic(struct vx_core *chip) { unsigned long end_time = jiffies + HZ / 5; int c; do { c = vx_inb(chip, CDSP); if (c == CDSP_MAGIC) return 0; msleep(10); } while (time_after_eq(end_time, jiffies)); snd_printk(KERN_ERR "cannot find xilinx magic word (%x)\n", c); return -EIO; } /* * vx_reset_dsp - reset the DSP */ #define XX_DSP_RESET_WAIT_TIME 2 /* ms */ static void vxp_reset_dsp(struct vx_core *_chip) { struct snd_vxpocket *chip = (struct snd_vxpocket *)_chip; /* set the reset dsp bit to 1 */ vx_outb(chip, CDSP, chip->regCDSP | VXP_CDSP_DSP_RESET_MASK); vx_inb(chip, CDSP); mdelay(XX_DSP_RESET_WAIT_TIME); /* reset the bit */ chip->regCDSP &= ~VXP_CDSP_DSP_RESET_MASK; vx_outb(chip, CDSP, chip->regCDSP); vx_inb(chip, CDSP); mdelay(XX_DSP_RESET_WAIT_TIME); } /* * reset codec bit */ static void vxp_reset_codec(struct vx_core *_chip) { struct snd_vxpocket *chip = (struct snd_vxpocket *)_chip; /* Set the reset CODEC bit to 1. */ vx_outb(chip, CDSP, chip->regCDSP | VXP_CDSP_CODEC_RESET_MASK); vx_inb(chip, CDSP); msleep(10); /* Set the reset CODEC bit to 0. */ chip->regCDSP &= ~VXP_CDSP_CODEC_RESET_MASK; vx_outb(chip, CDSP, chip->regCDSP); vx_inb(chip, CDSP); msleep(1); } /* * vx_load_xilinx_binary - load the xilinx binary image * the binary image is the binary array converted from the bitstream file. */ static int vxp_load_xilinx_binary(struct vx_core *_chip, const struct firmware *fw) { struct snd_vxpocket *chip = (struct snd_vxpocket *)_chip; unsigned int i; int c; int regCSUER, regRUER; const unsigned char *image; unsigned char data; /* Switch to programmation mode */ chip->regDIALOG |= VXP_DLG_XILINX_REPROG_MASK; vx_outb(chip, DIALOG, chip->regDIALOG); /* Save register CSUER and RUER */ regCSUER = vx_inb(chip, CSUER); regRUER = vx_inb(chip, RUER); /* reset HF0 and HF1 */ vx_outb(chip, ICR, 0); /* Wait for answer HF2 equal to 1 */ snd_printdd(KERN_DEBUG "check ISR_HF2\n"); if (vx_check_isr(_chip, ISR_HF2, ISR_HF2, 20) < 0) goto _error; /* set HF1 for loading xilinx binary */ vx_outb(chip, ICR, ICR_HF1); image = fw->data; for (i = 0; i < fw->size; i++, image++) { data = *image; if (vx_wait_isr_bit(_chip, ISR_TX_EMPTY) < 0) goto _error; vx_outb(chip, TXL, data); /* wait for reading */ if (vx_wait_for_rx_full(_chip) < 0) goto _error; c = vx_inb(chip, RXL); if (c != (int)data) snd_printk(KERN_ERR "vxpocket: load xilinx mismatch at %d: 0x%x != 0x%x\n", i, c, (int)data); } /* reset HF1 */ vx_outb(chip, ICR, 0); /* wait for HF3 */ if (vx_check_isr(_chip, ISR_HF3, ISR_HF3, 20) < 0) goto _error; /* read the number of bytes received */ if (vx_wait_for_rx_full(_chip) < 0) goto _error; c = (int)vx_inb(chip, RXH) << 16; c |= (int)vx_inb(chip, RXM) << 8; c |= vx_inb(chip, RXL); snd_printdd(KERN_DEBUG "xilinx: dsp size received 0x%x, orig 0x%Zx\n", c, fw->size); vx_outb(chip, ICR, ICR_HF0); /* TEMPO 250ms : wait until Xilinx is downloaded */ msleep(300); /* test magical word */ if (vx_check_magic(_chip) < 0) goto _error; /* Restore register 0x0E and 0x0F (thus replacing COR and FCSR) */ vx_outb(chip, CSUER, regCSUER); vx_outb(chip, RUER, regRUER); /* Reset the Xilinx's signal enabling IO access */ chip->regDIALOG |= VXP_DLG_XILINX_REPROG_MASK; vx_outb(chip, DIALOG, chip->regDIALOG); vx_inb(chip, DIALOG); msleep(10); chip->regDIALOG &= ~VXP_DLG_XILINX_REPROG_MASK; vx_outb(chip, DIALOG, chip->regDIALOG); vx_inb(chip, DIALOG); /* Reset of the Codec */ vxp_reset_codec(_chip); vx_reset_dsp(_chip); return 0; _error: vx_outb(chip, CSUER, regCSUER); vx_outb(chip, RUER, regRUER); chip->regDIALOG &= ~VXP_DLG_XILINX_REPROG_MASK; vx_outb(chip, DIALOG, chip->regDIALOG); return -EIO; } /* * vxp_load_dsp - load_dsp callback */ static int vxp_load_dsp(struct vx_core *vx, int index, const struct firmware *fw) { int err; switch (index) { case 0: /* xilinx boot */ if ((err = vx_check_magic(vx)) < 0) return err; if ((err = snd_vx_load_boot_image(vx, fw)) < 0) return err; return 0; case 1: /* xilinx image */ return vxp_load_xilinx_binary(vx, fw); case 2: /* DSP boot */ return snd_vx_dsp_boot(vx, fw); case 3: /* DSP image */ return snd_vx_dsp_load(vx, fw); default: snd_BUG(); return -EINVAL; } } /* * vx_test_and_ack - test and acknowledge interrupt * * called from irq hander, too * * spinlock held! */ static int vxp_test_and_ack(struct vx_core *_chip) { struct snd_vxpocket *chip = (struct snd_vxpocket *)_chip; /* not booted yet? */ if (! (_chip->chip_status & VX_STAT_XILINX_LOADED)) return -ENXIO; if (! (vx_inb(chip, DIALOG) & VXP_DLG_MEMIRQ_MASK)) return -EIO; /* ok, interrupts generated, now ack it */ /* set ACQUIT bit up and down */ vx_outb(chip, DIALOG, chip->regDIALOG | VXP_DLG_ACK_MEMIRQ_MASK); /* useless read just to spend some time and maintain * the ACQUIT signal up for a while ( a bus cycle ) */ vx_inb(chip, DIALOG); vx_outb(chip, DIALOG, chip->regDIALOG & ~VXP_DLG_ACK_MEMIRQ_MASK); return 0; } /* * vx_validate_irq - enable/disable IRQ */ static void vxp_validate_irq(struct vx_core *_chip, int enable) { struct snd_vxpocket *chip = (struct snd_vxpocket *)_chip; /* Set the interrupt enable bit to 1 in CDSP register */ if (enable) chip->regCDSP |= VXP_CDSP_VALID_IRQ_MASK; else chip->regCDSP &= ~VXP_CDSP_VALID_IRQ_MASK; vx_outb(chip, CDSP, chip->regCDSP); } /* * vx_setup_pseudo_dma - set up the pseudo dma read/write mode. * @do_write: 0 = read, 1 = set up for DMA write */ static void vx_setup_pseudo_dma(struct vx_core *_chip, int do_write) { struct snd_vxpocket *chip = (struct snd_vxpocket *)_chip; /* Interrupt mode and HREQ pin enabled for host transmit / receive data transfers */ vx_outb(chip, ICR, do_write ? ICR_TREQ : ICR_RREQ); /* Reset the pseudo-dma register */ vx_inb(chip, ISR); vx_outb(chip, ISR, 0); /* Select DMA in read/write transfer mode and in 16-bit accesses */ chip->regDIALOG |= VXP_DLG_DMA16_SEL_MASK; chip->regDIALOG |= do_write ? VXP_DLG_DMAWRITE_SEL_MASK : VXP_DLG_DMAREAD_SEL_MASK; vx_outb(chip, DIALOG, chip->regDIALOG); } /* * vx_release_pseudo_dma - disable the pseudo-DMA mode */ static void vx_release_pseudo_dma(struct vx_core *_chip) { struct snd_vxpocket *chip = (struct snd_vxpocket *)_chip; /* Disable DMA and 16-bit accesses */ chip->regDIALOG &= ~(VXP_DLG_DMAWRITE_SEL_MASK| VXP_DLG_DMAREAD_SEL_MASK| VXP_DLG_DMA16_SEL_MASK); vx_outb(chip, DIALOG, chip->regDIALOG); /* HREQ pin disabled. */ vx_outb(chip, ICR, 0); } /* * vx_pseudo_dma_write - write bulk data on pseudo-DMA mode * @count: data length to transfer in bytes * * data size must be aligned to 6 bytes to ensure the 24bit alignment on DSP. * NB: call with a certain lock! */ static void vxp_dma_write(struct vx_core *chip, struct snd_pcm_runtime *runtime, struct vx_pipe *pipe, int count) { long port = vxp_reg_addr(chip, VX_DMA); int offset = pipe->hw_ptr; unsigned short *addr = (unsigned short *)(runtime->dma_area + offset); vx_setup_pseudo_dma(chip, 1); if (offset + count > pipe->buffer_bytes) { int length = pipe->buffer_bytes - offset; count -= length; length >>= 1; /* in 16bit words */ /* Transfer using pseudo-dma. */ while (length-- > 0) { outw(cpu_to_le16(*addr), port); addr++; } addr = (unsigned short *)runtime->dma_area; pipe->hw_ptr = 0; } pipe->hw_ptr += count; count >>= 1; /* in 16bit words */ /* Transfer using pseudo-dma. */ while (count-- > 0) { outw(cpu_to_le16(*addr), port); addr++; } vx_release_pseudo_dma(chip); } /* * vx_pseudo_dma_read - read bulk data on pseudo DMA mode * @offset: buffer offset in bytes * @count: data length to transfer in bytes * * the read length must be aligned to 6 bytes, as well as write. * NB: call with a certain lock! */ static void vxp_dma_read(struct vx_core *chip, struct snd_pcm_runtime *runtime, struct vx_pipe *pipe, int count) { struct snd_vxpocket *pchip = (struct snd_vxpocket *)chip; long port = vxp_reg_addr(chip, VX_DMA); int offset = pipe->hw_ptr; unsigned short *addr = (unsigned short *)(runtime->dma_area + offset); if (snd_BUG_ON(count % 2)) return; vx_setup_pseudo_dma(chip, 0); if (offset + count > pipe->buffer_bytes) { int length = pipe->buffer_bytes - offset; count -= length; length >>= 1; /* in 16bit words */ /* Transfer using pseudo-dma. */ while (length-- > 0) *addr++ = le16_to_cpu(inw(port)); addr = (unsigned short *)runtime->dma_area; pipe->hw_ptr = 0; } pipe->hw_ptr += count; count >>= 1; /* in 16bit words */ /* Transfer using pseudo-dma. */ while (count-- > 1) *addr++ = le16_to_cpu(inw(port)); /* Disable DMA */ pchip->regDIALOG &= ~VXP_DLG_DMAREAD_SEL_MASK; vx_outb(chip, DIALOG, pchip->regDIALOG); /* Read the last word (16 bits) */ *addr = le16_to_cpu(inw(port)); /* Disable 16-bit accesses */ pchip->regDIALOG &= ~VXP_DLG_DMA16_SEL_MASK; vx_outb(chip, DIALOG, pchip->regDIALOG); /* HREQ pin disabled. */ vx_outb(chip, ICR, 0); } /* * write a codec data (24bit) */ static void vxp_write_codec_reg(struct vx_core *chip, int codec, unsigned int data) { int i; /* Activate access to the corresponding codec register */ if (! codec) vx_inb(chip, LOFREQ); else vx_inb(chip, CODEC2); /* We have to send 24 bits (3 x 8 bits). Start with most signif. Bit */ for (i = 0; i < 24; i++, data <<= 1) vx_outb(chip, DATA, ((data & 0x800000) ? VX_DATA_CODEC_MASK : 0)); /* Terminate access to codec registers */ vx_inb(chip, HIFREQ); } /* * vx_set_mic_boost - set mic boost level (on vxp440 only) * @boost: 0 = 20dB, 1 = +38dB */ void vx_set_mic_boost(struct vx_core *chip, int boost) { struct snd_vxpocket *pchip = (struct snd_vxpocket *)chip; unsigned long flags; if (chip->chip_status & VX_STAT_IS_STALE) return; spin_lock_irqsave(&chip->lock, flags); if (pchip->regCDSP & P24_CDSP_MICS_SEL_MASK) { if (boost) { /* boost: 38 dB */ pchip->regCDSP &= ~P24_CDSP_MIC20_SEL_MASK; pchip->regCDSP |= P24_CDSP_MIC38_SEL_MASK; } else { /* minimum value: 20 dB */ pchip->regCDSP |= P24_CDSP_MIC20_SEL_MASK; pchip->regCDSP &= ~P24_CDSP_MIC38_SEL_MASK; } vx_outb(chip, CDSP, pchip->regCDSP); } spin_unlock_irqrestore(&chip->lock, flags); } /* * remap the linear value (0-8) to the actual value (0-15) */ static int vx_compute_mic_level(int level) { switch (level) { case 5: level = 6 ; break; case 6: level = 8 ; break; case 7: level = 11; break; case 8: level = 15; break; default: break ; } return level; } /* * vx_set_mic_level - set mic level (on vxpocket only) * @level: the mic level = 0 - 8 (max) */ void vx_set_mic_level(struct vx_core *chip, int level) { struct snd_vxpocket *pchip = (struct snd_vxpocket *)chip; unsigned long flags; if (chip->chip_status & VX_STAT_IS_STALE) return; spin_lock_irqsave(&chip->lock, flags); if (pchip->regCDSP & VXP_CDSP_MIC_SEL_MASK) { level = vx_compute_mic_level(level); vx_outb(chip, MICRO, level); } spin_unlock_irqrestore(&chip->lock, flags); } /* * change the input audio source */ static void vxp_change_audio_source(struct vx_core *_chip, int src) { struct snd_vxpocket *chip = (struct snd_vxpocket *)_chip; switch (src) { case VX_AUDIO_SRC_DIGITAL: chip->regCDSP |= VXP_CDSP_DATAIN_SEL_MASK; vx_outb(chip, CDSP, chip->regCDSP); break; case VX_AUDIO_SRC_LINE: chip->regCDSP &= ~VXP_CDSP_DATAIN_SEL_MASK; if (_chip->type == VX_TYPE_VXP440) chip->regCDSP &= ~P24_CDSP_MICS_SEL_MASK; else chip->regCDSP &= ~VXP_CDSP_MIC_SEL_MASK; vx_outb(chip, CDSP, chip->regCDSP); break; case VX_AUDIO_SRC_MIC: chip->regCDSP &= ~VXP_CDSP_DATAIN_SEL_MASK; /* reset mic levels */ if (_chip->type == VX_TYPE_VXP440) { chip->regCDSP &= ~P24_CDSP_MICS_SEL_MASK; if (chip->mic_level) chip->regCDSP |= P24_CDSP_MIC38_SEL_MASK; else chip->regCDSP |= P24_CDSP_MIC20_SEL_MASK; vx_outb(chip, CDSP, chip->regCDSP); } else { chip->regCDSP |= VXP_CDSP_MIC_SEL_MASK; vx_outb(chip, CDSP, chip->regCDSP); vx_outb(chip, MICRO, vx_compute_mic_level(chip->mic_level)); } break; } } /* * change the clock source * source = INTERNAL_QUARTZ or UER_SYNC */ static void vxp_set_clock_source(struct vx_core *_chip, int source) { struct snd_vxpocket *chip = (struct snd_vxpocket *)_chip; if (source == INTERNAL_QUARTZ) chip->regCDSP &= ~VXP_CDSP_CLOCKIN_SEL_MASK; else chip->regCDSP |= VXP_CDSP_CLOCKIN_SEL_MASK; vx_outb(chip, CDSP, chip->regCDSP); } /* * reset the board */ static void vxp_reset_board(struct vx_core *_chip, int cold_reset) { struct snd_vxpocket *chip = (struct snd_vxpocket *)_chip; chip->regCDSP = 0; chip->regDIALOG = 0; } /* * callbacks */ /* exported */ struct snd_vx_ops snd_vxpocket_ops = { .in8 = vxp_inb, .out8 = vxp_outb, .test_and_ack = vxp_test_and_ack, .validate_irq = vxp_validate_irq, .write_codec = vxp_write_codec_reg, .reset_codec = vxp_reset_codec, .change_audio_source = vxp_change_audio_source, .set_clock_source = vxp_set_clock_source, .load_dsp = vxp_load_dsp, .add_controls = vxp_add_mic_controls, .reset_dsp = vxp_reset_dsp, .reset_board = vxp_reset_board, .dma_write = vxp_dma_write, .dma_read = vxp_dma_read, };
gpl-2.0
CyanogenMod/android_kernel_samsung_trlte
drivers/media/dvb-frontends/isl6421.c
3569
4526
/* * isl6421.h - driver for lnb supply and control ic ISL6421 * * Copyright (C) 2006 Andrew de Quincey * Copyright (C) 2006 Oliver Endriss * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * Or, point your browser to http://www.gnu.org/copyleft/gpl.html * * * the project's page is at http://www.linuxtv.org */ #include <linux/delay.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/string.h> #include <linux/slab.h> #include "dvb_frontend.h" #include "isl6421.h" struct isl6421 { u8 config; u8 override_or; u8 override_and; struct i2c_adapter *i2c; u8 i2c_addr; }; static int isl6421_set_voltage(struct dvb_frontend *fe, fe_sec_voltage_t voltage) { struct isl6421 *isl6421 = (struct isl6421 *) fe->sec_priv; struct i2c_msg msg = { .addr = isl6421->i2c_addr, .flags = 0, .buf = &isl6421->config, .len = sizeof(isl6421->config) }; isl6421->config &= ~(ISL6421_VSEL1 | ISL6421_EN1); switch(voltage) { case SEC_VOLTAGE_OFF: break; case SEC_VOLTAGE_13: isl6421->config |= ISL6421_EN1; break; case SEC_VOLTAGE_18: isl6421->config |= (ISL6421_EN1 | ISL6421_VSEL1); break; default: return -EINVAL; } isl6421->config |= isl6421->override_or; isl6421->config &= isl6421->override_and; return (i2c_transfer(isl6421->i2c, &msg, 1) == 1) ? 0 : -EIO; } static int isl6421_enable_high_lnb_voltage(struct dvb_frontend *fe, long arg) { struct isl6421 *isl6421 = (struct isl6421 *) fe->sec_priv; struct i2c_msg msg = { .addr = isl6421->i2c_addr, .flags = 0, .buf = &isl6421->config, .len = sizeof(isl6421->config) }; if (arg) isl6421->config |= ISL6421_LLC1; else isl6421->config &= ~ISL6421_LLC1; isl6421->config |= isl6421->override_or; isl6421->config &= isl6421->override_and; return (i2c_transfer(isl6421->i2c, &msg, 1) == 1) ? 0 : -EIO; } static int isl6421_set_tone(struct dvb_frontend* fe, fe_sec_tone_mode_t tone) { struct isl6421 *isl6421 = (struct isl6421 *) fe->sec_priv; struct i2c_msg msg = { .addr = isl6421->i2c_addr, .flags = 0, .buf = &isl6421->config, .len = sizeof(isl6421->config) }; switch (tone) { case SEC_TONE_ON: isl6421->config |= ISL6421_ENT1; break; case SEC_TONE_OFF: isl6421->config &= ~ISL6421_ENT1; break; default: return -EINVAL; } isl6421->config |= isl6421->override_or; isl6421->config &= isl6421->override_and; return (i2c_transfer(isl6421->i2c, &msg, 1) == 1) ? 0 : -EIO; } static void isl6421_release(struct dvb_frontend *fe) { /* power off */ isl6421_set_voltage(fe, SEC_VOLTAGE_OFF); /* free */ kfree(fe->sec_priv); fe->sec_priv = NULL; } struct dvb_frontend *isl6421_attach(struct dvb_frontend *fe, struct i2c_adapter *i2c, u8 i2c_addr, u8 override_set, u8 override_clear, bool override_tone) { struct isl6421 *isl6421 = kmalloc(sizeof(struct isl6421), GFP_KERNEL); if (!isl6421) return NULL; /* default configuration */ isl6421->config = ISL6421_ISEL1; isl6421->i2c = i2c; isl6421->i2c_addr = i2c_addr; fe->sec_priv = isl6421; /* bits which should be forced to '1' */ isl6421->override_or = override_set; /* bits which should be forced to '0' */ isl6421->override_and = ~override_clear; /* detect if it is present or not */ if (isl6421_set_voltage(fe, SEC_VOLTAGE_OFF)) { kfree(isl6421); fe->sec_priv = NULL; return NULL; } /* install release callback */ fe->ops.release_sec = isl6421_release; /* override frontend ops */ fe->ops.set_voltage = isl6421_set_voltage; fe->ops.enable_high_lnb_voltage = isl6421_enable_high_lnb_voltage; if (override_tone) fe->ops.set_tone = isl6421_set_tone; return fe; } EXPORT_SYMBOL(isl6421_attach); MODULE_DESCRIPTION("Driver for lnb supply and control ic isl6421"); MODULE_AUTHOR("Andrew de Quincey & Oliver Endriss"); MODULE_LICENSE("GPL");
gpl-2.0
zlatinski/omap-android-drm-kms
lib/plist.c
4593
4839
/* * lib/plist.c * * Descending-priority-sorted double-linked list * * (C) 2002-2003 Intel Corp * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>. * * 2001-2005 (c) MontaVista Software, Inc. * Daniel Walker <dwalker@mvista.com> * * (C) 2005 Thomas Gleixner <tglx@linutronix.de> * * Simplifications of the original code by * Oleg Nesterov <oleg@tv-sign.ru> * * Licensed under the FSF's GNU Public License v2 or later. * * Based on simple lists (include/linux/list.h). * * This file contains the add / del functions which are considered to * be too large to inline. See include/linux/plist.h for further * information. */ #include <linux/bug.h> #include <linux/plist.h> #include <linux/spinlock.h> #ifdef CONFIG_DEBUG_PI_LIST static struct plist_head test_head; static void plist_check_prev_next(struct list_head *t, struct list_head *p, struct list_head *n) { WARN(n->prev != p || p->next != n, "top: %p, n: %p, p: %p\n" "prev: %p, n: %p, p: %p\n" "next: %p, n: %p, p: %p\n", t, t->next, t->prev, p, p->next, p->prev, n, n->next, n->prev); } static void plist_check_list(struct list_head *top) { struct list_head *prev = top, *next = top->next; plist_check_prev_next(top, prev, next); while (next != top) { prev = next; next = prev->next; plist_check_prev_next(top, prev, next); } } static void plist_check_head(struct plist_head *head) { if (!plist_head_empty(head)) plist_check_list(&plist_first(head)->prio_list); plist_check_list(&head->node_list); } #else # define plist_check_head(h) do { } while (0) #endif /** * plist_add - add @node to @head * * @node: &struct plist_node pointer * @head: &struct plist_head pointer */ void plist_add(struct plist_node *node, struct plist_head *head) { struct plist_node *first, *iter, *prev = NULL; struct list_head *node_next = &head->node_list; plist_check_head(head); WARN_ON(!plist_node_empty(node)); WARN_ON(!list_empty(&node->prio_list)); if (plist_head_empty(head)) goto ins_node; first = iter = plist_first(head); do { if (node->prio < iter->prio) { node_next = &iter->node_list; break; } prev = iter; iter = list_entry(iter->prio_list.next, struct plist_node, prio_list); } while (iter != first); if (!prev || prev->prio != node->prio) list_add_tail(&node->prio_list, &iter->prio_list); ins_node: list_add_tail(&node->node_list, node_next); plist_check_head(head); } /** * plist_del - Remove a @node from plist. * * @node: &struct plist_node pointer - entry to be removed * @head: &struct plist_head pointer - list head */ void plist_del(struct plist_node *node, struct plist_head *head) { plist_check_head(head); if (!list_empty(&node->prio_list)) { if (node->node_list.next != &head->node_list) { struct plist_node *next; next = list_entry(node->node_list.next, struct plist_node, node_list); /* add the next plist_node into prio_list */ if (list_empty(&next->prio_list)) list_add(&next->prio_list, &node->prio_list); } list_del_init(&node->prio_list); } list_del_init(&node->node_list); plist_check_head(head); } #ifdef CONFIG_DEBUG_PI_LIST #include <linux/sched.h> #include <linux/module.h> #include <linux/init.h> static struct plist_node __initdata test_node[241]; static void __init plist_test_check(int nr_expect) { struct plist_node *first, *prio_pos, *node_pos; if (plist_head_empty(&test_head)) { BUG_ON(nr_expect != 0); return; } prio_pos = first = plist_first(&test_head); plist_for_each(node_pos, &test_head) { if (nr_expect-- < 0) break; if (node_pos == first) continue; if (node_pos->prio == prio_pos->prio) { BUG_ON(!list_empty(&node_pos->prio_list)); continue; } BUG_ON(prio_pos->prio > node_pos->prio); BUG_ON(prio_pos->prio_list.next != &node_pos->prio_list); prio_pos = node_pos; } BUG_ON(nr_expect != 0); BUG_ON(prio_pos->prio_list.next != &first->prio_list); } static int __init plist_test(void) { int nr_expect = 0, i, loop; unsigned int r = local_clock(); printk(KERN_INFO "start plist test\n"); plist_head_init(&test_head); for (i = 0; i < ARRAY_SIZE(test_node); i++) plist_node_init(test_node + i, 0); for (loop = 0; loop < 1000; loop++) { r = r * 193939 % 47629; i = r % ARRAY_SIZE(test_node); if (plist_node_empty(test_node + i)) { r = r * 193939 % 47629; test_node[i].prio = r % 99; plist_add(test_node + i, &test_head); nr_expect++; } else { plist_del(test_node + i, &test_head); nr_expect--; } plist_test_check(nr_expect); } for (i = 0; i < ARRAY_SIZE(test_node); i++) { if (plist_node_empty(test_node + i)) continue; plist_del(test_node + i, &test_head); nr_expect--; plist_test_check(nr_expect); } printk(KERN_INFO "end plist test\n"); return 0; } module_init(plist_test); #endif
gpl-2.0
WildfireDEV/android_kernel_htc_m7
arch/powerpc/platforms/82xx/pq2ads-pci-pic.c
4593
4062
/* * PQ2 ADS-style PCI interrupt controller * * Copyright 2007 Freescale Semiconductor, Inc. * Author: Scott Wood <scottwood@freescale.com> * * Loosely based on mpc82xx ADS support by Vitaly Bordug <vbordug@ru.mvista.com> * Copyright (c) 2006 MontaVista Software, Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation. */ #include <linux/init.h> #include <linux/spinlock.h> #include <linux/irq.h> #include <linux/types.h> #include <linux/bootmem.h> #include <linux/slab.h> #include <asm/io.h> #include <asm/prom.h> #include <asm/cpm2.h> #include "pq2.h" static DEFINE_RAW_SPINLOCK(pci_pic_lock); struct pq2ads_pci_pic { struct device_node *node; struct irq_domain *host; struct { u32 stat; u32 mask; } __iomem *regs; }; #define NUM_IRQS 32 static void pq2ads_pci_mask_irq(struct irq_data *d) { struct pq2ads_pci_pic *priv = irq_data_get_irq_chip_data(d); int irq = NUM_IRQS - irqd_to_hwirq(d) - 1; if (irq != -1) { unsigned long flags; raw_spin_lock_irqsave(&pci_pic_lock, flags); setbits32(&priv->regs->mask, 1 << irq); mb(); raw_spin_unlock_irqrestore(&pci_pic_lock, flags); } } static void pq2ads_pci_unmask_irq(struct irq_data *d) { struct pq2ads_pci_pic *priv = irq_data_get_irq_chip_data(d); int irq = NUM_IRQS - irqd_to_hwirq(d) - 1; if (irq != -1) { unsigned long flags; raw_spin_lock_irqsave(&pci_pic_lock, flags); clrbits32(&priv->regs->mask, 1 << irq); raw_spin_unlock_irqrestore(&pci_pic_lock, flags); } } static struct irq_chip pq2ads_pci_ic = { .name = "PQ2 ADS PCI", .irq_mask = pq2ads_pci_mask_irq, .irq_mask_ack = pq2ads_pci_mask_irq, .irq_ack = pq2ads_pci_mask_irq, .irq_unmask = pq2ads_pci_unmask_irq, .irq_enable = pq2ads_pci_unmask_irq, .irq_disable = pq2ads_pci_mask_irq }; static void pq2ads_pci_irq_demux(unsigned int irq, struct irq_desc *desc) { struct pq2ads_pci_pic *priv = irq_desc_get_handler_data(desc); u32 stat, mask, pend; int bit; for (;;) { stat = in_be32(&priv->regs->stat); mask = in_be32(&priv->regs->mask); pend = stat & ~mask; if (!pend) break; for (bit = 0; pend != 0; ++bit, pend <<= 1) { if (pend & 0x80000000) { int virq = irq_linear_revmap(priv->host, bit); generic_handle_irq(virq); } } } } static int pci_pic_host_map(struct irq_domain *h, unsigned int virq, irq_hw_number_t hw) { irq_set_status_flags(virq, IRQ_LEVEL); irq_set_chip_data(virq, h->host_data); irq_set_chip_and_handler(virq, &pq2ads_pci_ic, handle_level_irq); return 0; } static const struct irq_domain_ops pci_pic_host_ops = { .map = pci_pic_host_map, }; int __init pq2ads_pci_init_irq(void) { struct pq2ads_pci_pic *priv; struct irq_domain *host; struct device_node *np; int ret = -ENODEV; int irq; np = of_find_compatible_node(NULL, NULL, "fsl,pq2ads-pci-pic"); if (!np) { printk(KERN_ERR "No pci pic node in device tree.\n"); of_node_put(np); goto out; } irq = irq_of_parse_and_map(np, 0); if (irq == NO_IRQ) { printk(KERN_ERR "No interrupt in pci pic node.\n"); of_node_put(np); goto out; } priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) { of_node_put(np); ret = -ENOMEM; goto out_unmap_irq; } /* PCI interrupt controller registers: status and mask */ priv->regs = of_iomap(np, 0); if (!priv->regs) { printk(KERN_ERR "Cannot map PCI PIC registers.\n"); goto out_free_bootmem; } /* mask all PCI interrupts */ out_be32(&priv->regs->mask, ~0); mb(); host = irq_domain_add_linear(np, NUM_IRQS, &pci_pic_host_ops, priv); if (!host) { ret = -ENOMEM; goto out_unmap_regs; } priv->host = host; irq_set_handler_data(irq, priv); irq_set_chained_handler(irq, pq2ads_pci_irq_demux); of_node_put(np); return 0; out_unmap_regs: iounmap(priv->regs); out_free_bootmem: free_bootmem((unsigned long)priv, sizeof(struct pq2ads_pci_pic)); of_node_put(np); out_unmap_irq: irq_dispose_mapping(irq); out: return ret; }
gpl-2.0
jaluma/Kernel-6.0
arch/powerpc/sysdev/qe_lib/qe_ic.c
4593
11518
/* * arch/powerpc/sysdev/qe_lib/qe_ic.c * * Copyright (C) 2006 Freescale Semicondutor, Inc. All rights reserved. * * Author: Li Yang <leoli@freescale.com> * Based on code from Shlomi Gridish <gridish@freescale.com> * * QUICC ENGINE Interrupt Controller * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/errno.h> #include <linux/reboot.h> #include <linux/slab.h> #include <linux/stddef.h> #include <linux/sched.h> #include <linux/signal.h> #include <linux/device.h> #include <linux/bootmem.h> #include <linux/spinlock.h> #include <asm/irq.h> #include <asm/io.h> #include <asm/prom.h> #include <asm/qe_ic.h> #include "qe_ic.h" static DEFINE_RAW_SPINLOCK(qe_ic_lock); static struct qe_ic_info qe_ic_info[] = { [1] = { .mask = 0x00008000, .mask_reg = QEIC_CIMR, .pri_code = 0, .pri_reg = QEIC_CIPWCC, }, [2] = { .mask = 0x00004000, .mask_reg = QEIC_CIMR, .pri_code = 1, .pri_reg = QEIC_CIPWCC, }, [3] = { .mask = 0x00002000, .mask_reg = QEIC_CIMR, .pri_code = 2, .pri_reg = QEIC_CIPWCC, }, [10] = { .mask = 0x00000040, .mask_reg = QEIC_CIMR, .pri_code = 1, .pri_reg = QEIC_CIPZCC, }, [11] = { .mask = 0x00000020, .mask_reg = QEIC_CIMR, .pri_code = 2, .pri_reg = QEIC_CIPZCC, }, [12] = { .mask = 0x00000010, .mask_reg = QEIC_CIMR, .pri_code = 3, .pri_reg = QEIC_CIPZCC, }, [13] = { .mask = 0x00000008, .mask_reg = QEIC_CIMR, .pri_code = 4, .pri_reg = QEIC_CIPZCC, }, [14] = { .mask = 0x00000004, .mask_reg = QEIC_CIMR, .pri_code = 5, .pri_reg = QEIC_CIPZCC, }, [15] = { .mask = 0x00000002, .mask_reg = QEIC_CIMR, .pri_code = 6, .pri_reg = QEIC_CIPZCC, }, [20] = { .mask = 0x10000000, .mask_reg = QEIC_CRIMR, .pri_code = 3, .pri_reg = QEIC_CIPRTA, }, [25] = { .mask = 0x00800000, .mask_reg = QEIC_CRIMR, .pri_code = 0, .pri_reg = QEIC_CIPRTB, }, [26] = { .mask = 0x00400000, .mask_reg = QEIC_CRIMR, .pri_code = 1, .pri_reg = QEIC_CIPRTB, }, [27] = { .mask = 0x00200000, .mask_reg = QEIC_CRIMR, .pri_code = 2, .pri_reg = QEIC_CIPRTB, }, [28] = { .mask = 0x00100000, .mask_reg = QEIC_CRIMR, .pri_code = 3, .pri_reg = QEIC_CIPRTB, }, [32] = { .mask = 0x80000000, .mask_reg = QEIC_CIMR, .pri_code = 0, .pri_reg = QEIC_CIPXCC, }, [33] = { .mask = 0x40000000, .mask_reg = QEIC_CIMR, .pri_code = 1, .pri_reg = QEIC_CIPXCC, }, [34] = { .mask = 0x20000000, .mask_reg = QEIC_CIMR, .pri_code = 2, .pri_reg = QEIC_CIPXCC, }, [35] = { .mask = 0x10000000, .mask_reg = QEIC_CIMR, .pri_code = 3, .pri_reg = QEIC_CIPXCC, }, [36] = { .mask = 0x08000000, .mask_reg = QEIC_CIMR, .pri_code = 4, .pri_reg = QEIC_CIPXCC, }, [40] = { .mask = 0x00800000, .mask_reg = QEIC_CIMR, .pri_code = 0, .pri_reg = QEIC_CIPYCC, }, [41] = { .mask = 0x00400000, .mask_reg = QEIC_CIMR, .pri_code = 1, .pri_reg = QEIC_CIPYCC, }, [42] = { .mask = 0x00200000, .mask_reg = QEIC_CIMR, .pri_code = 2, .pri_reg = QEIC_CIPYCC, }, [43] = { .mask = 0x00100000, .mask_reg = QEIC_CIMR, .pri_code = 3, .pri_reg = QEIC_CIPYCC, }, }; static inline u32 qe_ic_read(volatile __be32 __iomem * base, unsigned int reg) { return in_be32(base + (reg >> 2)); } static inline void qe_ic_write(volatile __be32 __iomem * base, unsigned int reg, u32 value) { out_be32(base + (reg >> 2), value); } static inline struct qe_ic *qe_ic_from_irq(unsigned int virq) { return irq_get_chip_data(virq); } static inline struct qe_ic *qe_ic_from_irq_data(struct irq_data *d) { return irq_data_get_irq_chip_data(d); } static void qe_ic_unmask_irq(struct irq_data *d) { struct qe_ic *qe_ic = qe_ic_from_irq_data(d); unsigned int src = irqd_to_hwirq(d); unsigned long flags; u32 temp; raw_spin_lock_irqsave(&qe_ic_lock, flags); temp = qe_ic_read(qe_ic->regs, qe_ic_info[src].mask_reg); qe_ic_write(qe_ic->regs, qe_ic_info[src].mask_reg, temp | qe_ic_info[src].mask); raw_spin_unlock_irqrestore(&qe_ic_lock, flags); } static void qe_ic_mask_irq(struct irq_data *d) { struct qe_ic *qe_ic = qe_ic_from_irq_data(d); unsigned int src = irqd_to_hwirq(d); unsigned long flags; u32 temp; raw_spin_lock_irqsave(&qe_ic_lock, flags); temp = qe_ic_read(qe_ic->regs, qe_ic_info[src].mask_reg); qe_ic_write(qe_ic->regs, qe_ic_info[src].mask_reg, temp & ~qe_ic_info[src].mask); /* Flush the above write before enabling interrupts; otherwise, * spurious interrupts will sometimes happen. To be 100% sure * that the write has reached the device before interrupts are * enabled, the mask register would have to be read back; however, * this is not required for correctness, only to avoid wasting * time on a large number of spurious interrupts. In testing, * a sync reduced the observed spurious interrupts to zero. */ mb(); raw_spin_unlock_irqrestore(&qe_ic_lock, flags); } static struct irq_chip qe_ic_irq_chip = { .name = "QEIC", .irq_unmask = qe_ic_unmask_irq, .irq_mask = qe_ic_mask_irq, .irq_mask_ack = qe_ic_mask_irq, }; static int qe_ic_host_match(struct irq_domain *h, struct device_node *node) { /* Exact match, unless qe_ic node is NULL */ return h->of_node == NULL || h->of_node == node; } static int qe_ic_host_map(struct irq_domain *h, unsigned int virq, irq_hw_number_t hw) { struct qe_ic *qe_ic = h->host_data; struct irq_chip *chip; if (qe_ic_info[hw].mask == 0) { printk(KERN_ERR "Can't map reserved IRQ\n"); return -EINVAL; } /* Default chip */ chip = &qe_ic->hc_irq; irq_set_chip_data(virq, qe_ic); irq_set_status_flags(virq, IRQ_LEVEL); irq_set_chip_and_handler(virq, chip, handle_level_irq); return 0; } static struct irq_domain_ops qe_ic_host_ops = { .match = qe_ic_host_match, .map = qe_ic_host_map, .xlate = irq_domain_xlate_onetwocell, }; /* Return an interrupt vector or NO_IRQ if no interrupt is pending. */ unsigned int qe_ic_get_low_irq(struct qe_ic *qe_ic) { int irq; BUG_ON(qe_ic == NULL); /* get the interrupt source vector. */ irq = qe_ic_read(qe_ic->regs, QEIC_CIVEC) >> 26; if (irq == 0) return NO_IRQ; return irq_linear_revmap(qe_ic->irqhost, irq); } /* Return an interrupt vector or NO_IRQ if no interrupt is pending. */ unsigned int qe_ic_get_high_irq(struct qe_ic *qe_ic) { int irq; BUG_ON(qe_ic == NULL); /* get the interrupt source vector. */ irq = qe_ic_read(qe_ic->regs, QEIC_CHIVEC) >> 26; if (irq == 0) return NO_IRQ; return irq_linear_revmap(qe_ic->irqhost, irq); } void __init qe_ic_init(struct device_node *node, unsigned int flags, void (*low_handler)(unsigned int irq, struct irq_desc *desc), void (*high_handler)(unsigned int irq, struct irq_desc *desc)) { struct qe_ic *qe_ic; struct resource res; u32 temp = 0, ret, high_active = 0; ret = of_address_to_resource(node, 0, &res); if (ret) return; qe_ic = kzalloc(sizeof(*qe_ic), GFP_KERNEL); if (qe_ic == NULL) return; qe_ic->irqhost = irq_domain_add_linear(node, NR_QE_IC_INTS, &qe_ic_host_ops, qe_ic); if (qe_ic->irqhost == NULL) { kfree(qe_ic); return; } qe_ic->regs = ioremap(res.start, resource_size(&res)); qe_ic->hc_irq = qe_ic_irq_chip; qe_ic->virq_high = irq_of_parse_and_map(node, 0); qe_ic->virq_low = irq_of_parse_and_map(node, 1); if (qe_ic->virq_low == NO_IRQ) { printk(KERN_ERR "Failed to map QE_IC low IRQ\n"); kfree(qe_ic); return; } /* default priority scheme is grouped. If spread mode is */ /* required, configure cicr accordingly. */ if (flags & QE_IC_SPREADMODE_GRP_W) temp |= CICR_GWCC; if (flags & QE_IC_SPREADMODE_GRP_X) temp |= CICR_GXCC; if (flags & QE_IC_SPREADMODE_GRP_Y) temp |= CICR_GYCC; if (flags & QE_IC_SPREADMODE_GRP_Z) temp |= CICR_GZCC; if (flags & QE_IC_SPREADMODE_GRP_RISCA) temp |= CICR_GRTA; if (flags & QE_IC_SPREADMODE_GRP_RISCB) temp |= CICR_GRTB; /* choose destination signal for highest priority interrupt */ if (flags & QE_IC_HIGH_SIGNAL) { temp |= (SIGNAL_HIGH << CICR_HPIT_SHIFT); high_active = 1; } qe_ic_write(qe_ic->regs, QEIC_CICR, temp); irq_set_handler_data(qe_ic->virq_low, qe_ic); irq_set_chained_handler(qe_ic->virq_low, low_handler); if (qe_ic->virq_high != NO_IRQ && qe_ic->virq_high != qe_ic->virq_low) { irq_set_handler_data(qe_ic->virq_high, qe_ic); irq_set_chained_handler(qe_ic->virq_high, high_handler); } } void qe_ic_set_highest_priority(unsigned int virq, int high) { struct qe_ic *qe_ic = qe_ic_from_irq(virq); unsigned int src = virq_to_hw(virq); u32 temp = 0; temp = qe_ic_read(qe_ic->regs, QEIC_CICR); temp &= ~CICR_HP_MASK; temp |= src << CICR_HP_SHIFT; temp &= ~CICR_HPIT_MASK; temp |= (high ? SIGNAL_HIGH : SIGNAL_LOW) << CICR_HPIT_SHIFT; qe_ic_write(qe_ic->regs, QEIC_CICR, temp); } /* Set Priority level within its group, from 1 to 8 */ int qe_ic_set_priority(unsigned int virq, unsigned int priority) { struct qe_ic *qe_ic = qe_ic_from_irq(virq); unsigned int src = virq_to_hw(virq); u32 temp; if (priority > 8 || priority == 0) return -EINVAL; if (src > 127) return -EINVAL; if (qe_ic_info[src].pri_reg == 0) return -EINVAL; temp = qe_ic_read(qe_ic->regs, qe_ic_info[src].pri_reg); if (priority < 4) { temp &= ~(0x7 << (32 - priority * 3)); temp |= qe_ic_info[src].pri_code << (32 - priority * 3); } else { temp &= ~(0x7 << (24 - priority * 3)); temp |= qe_ic_info[src].pri_code << (24 - priority * 3); } qe_ic_write(qe_ic->regs, qe_ic_info[src].pri_reg, temp); return 0; } /* Set a QE priority to use high irq, only priority 1~2 can use high irq */ int qe_ic_set_high_priority(unsigned int virq, unsigned int priority, int high) { struct qe_ic *qe_ic = qe_ic_from_irq(virq); unsigned int src = virq_to_hw(virq); u32 temp, control_reg = QEIC_CICNR, shift = 0; if (priority > 2 || priority == 0) return -EINVAL; switch (qe_ic_info[src].pri_reg) { case QEIC_CIPZCC: shift = CICNR_ZCC1T_SHIFT; break; case QEIC_CIPWCC: shift = CICNR_WCC1T_SHIFT; break; case QEIC_CIPYCC: shift = CICNR_YCC1T_SHIFT; break; case QEIC_CIPXCC: shift = CICNR_XCC1T_SHIFT; break; case QEIC_CIPRTA: shift = CRICR_RTA1T_SHIFT; control_reg = QEIC_CRICR; break; case QEIC_CIPRTB: shift = CRICR_RTB1T_SHIFT; control_reg = QEIC_CRICR; break; default: return -EINVAL; } shift += (2 - priority) * 2; temp = qe_ic_read(qe_ic->regs, control_reg); temp &= ~(SIGNAL_MASK << shift); temp |= (high ? SIGNAL_HIGH : SIGNAL_LOW) << shift; qe_ic_write(qe_ic->regs, control_reg, temp); return 0; } static struct bus_type qe_ic_subsys = { .name = "qe_ic", .dev_name = "qe_ic", }; static struct device device_qe_ic = { .id = 0, .bus = &qe_ic_subsys, }; static int __init init_qe_ic_sysfs(void) { int rc; printk(KERN_DEBUG "Registering qe_ic with sysfs...\n"); rc = subsys_system_register(&qe_ic_subsys, NULL); if (rc) { printk(KERN_ERR "Failed registering qe_ic sys class\n"); return -ENODEV; } rc = device_register(&device_qe_ic); if (rc) { printk(KERN_ERR "Failed registering qe_ic sys device\n"); return -ENODEV; } return 0; } subsys_initcall(init_qe_ic_sysfs);
gpl-2.0
PyYoshi/android_kernel_huawei_hw01e_caf
arch/sparc/kernel/power.c
4849
1551
/* power.c: Power management driver. * * Copyright (C) 1999, 2007, 2008 David S. Miller (davem@davemloft.net) */ #include <linux/kernel.h> #include <linux/export.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/reboot.h> #include <linux/of_device.h> #include <asm/prom.h> #include <asm/io.h> static void __iomem *power_reg; static irqreturn_t power_handler(int irq, void *dev_id) { orderly_poweroff(true); /* FIXME: Check registers for status... */ return IRQ_HANDLED; } static int __devinit has_button_interrupt(unsigned int irq, struct device_node *dp) { if (irq == 0xffffffff) return 0; if (!of_find_property(dp, "button", NULL)) return 0; return 1; } static int __devinit power_probe(struct platform_device *op) { struct resource *res = &op->resource[0]; unsigned int irq = op->archdata.irqs[0]; power_reg = of_ioremap(res, 0, 0x4, "power"); printk(KERN_INFO "%s: Control reg at %llx\n", op->dev.of_node->name, res->start); if (has_button_interrupt(irq, op->dev.of_node)) { if (request_irq(irq, power_handler, 0, "power", NULL) < 0) printk(KERN_ERR "power: Cannot setup IRQ handler.\n"); } return 0; } static const struct of_device_id power_match[] = { { .name = "power", }, {}, }; static struct platform_driver power_driver = { .probe = power_probe, .driver = { .name = "power", .owner = THIS_MODULE, .of_match_table = power_match, }, }; static int __init power_init(void) { return platform_driver_register(&power_driver); } device_initcall(power_init);
gpl-2.0
JSkernel/G_pro2_msm8974_JSKernel
drivers/media/video/uvc/uvc_ctrl.c
4849
49852
/* * uvc_ctrl.c -- USB Video Class driver - Controls * * Copyright (C) 2005-2010 * Laurent Pinchart (laurent.pinchart@ideasonboard.com) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * */ #include <linux/kernel.h> #include <linux/list.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/uaccess.h> #include <linux/usb.h> #include <linux/videodev2.h> #include <linux/vmalloc.h> #include <linux/wait.h> #include <linux/atomic.h> #include "uvcvideo.h" #define UVC_CTRL_DATA_CURRENT 0 #define UVC_CTRL_DATA_BACKUP 1 #define UVC_CTRL_DATA_MIN 2 #define UVC_CTRL_DATA_MAX 3 #define UVC_CTRL_DATA_RES 4 #define UVC_CTRL_DATA_DEF 5 #define UVC_CTRL_DATA_LAST 6 /* ------------------------------------------------------------------------ * Controls */ static struct uvc_control_info uvc_ctrls[] = { { .entity = UVC_GUID_UVC_PROCESSING, .selector = UVC_PU_BRIGHTNESS_CONTROL, .index = 0, .size = 2, .flags = UVC_CTRL_FLAG_SET_CUR | UVC_CTRL_FLAG_GET_RANGE | UVC_CTRL_FLAG_RESTORE, }, { .entity = UVC_GUID_UVC_PROCESSING, .selector = UVC_PU_CONTRAST_CONTROL, .index = 1, .size = 2, .flags = UVC_CTRL_FLAG_SET_CUR | UVC_CTRL_FLAG_GET_RANGE | UVC_CTRL_FLAG_RESTORE, }, { .entity = UVC_GUID_UVC_PROCESSING, .selector = UVC_PU_HUE_CONTROL, .index = 2, .size = 2, .flags = UVC_CTRL_FLAG_SET_CUR | UVC_CTRL_FLAG_GET_RANGE | UVC_CTRL_FLAG_RESTORE | UVC_CTRL_FLAG_AUTO_UPDATE, }, { .entity = UVC_GUID_UVC_PROCESSING, .selector = UVC_PU_SATURATION_CONTROL, .index = 3, .size = 2, .flags = UVC_CTRL_FLAG_SET_CUR | UVC_CTRL_FLAG_GET_RANGE | UVC_CTRL_FLAG_RESTORE, }, { .entity = UVC_GUID_UVC_PROCESSING, .selector = UVC_PU_SHARPNESS_CONTROL, .index = 4, .size = 2, .flags = UVC_CTRL_FLAG_SET_CUR | UVC_CTRL_FLAG_GET_RANGE | UVC_CTRL_FLAG_RESTORE, }, { .entity = UVC_GUID_UVC_PROCESSING, .selector = UVC_PU_GAMMA_CONTROL, .index = 5, .size = 2, .flags = UVC_CTRL_FLAG_SET_CUR | UVC_CTRL_FLAG_GET_RANGE | UVC_CTRL_FLAG_RESTORE, }, { .entity = UVC_GUID_UVC_PROCESSING, .selector = UVC_PU_WHITE_BALANCE_TEMPERATURE_CONTROL, .index = 6, .size = 2, .flags = UVC_CTRL_FLAG_SET_CUR | UVC_CTRL_FLAG_GET_RANGE | UVC_CTRL_FLAG_RESTORE | UVC_CTRL_FLAG_AUTO_UPDATE, }, { .entity = UVC_GUID_UVC_PROCESSING, .selector = UVC_PU_WHITE_BALANCE_COMPONENT_CONTROL, .index = 7, .size = 4, .flags = UVC_CTRL_FLAG_SET_CUR | UVC_CTRL_FLAG_GET_RANGE | UVC_CTRL_FLAG_RESTORE | UVC_CTRL_FLAG_AUTO_UPDATE, }, { .entity = UVC_GUID_UVC_PROCESSING, .selector = UVC_PU_BACKLIGHT_COMPENSATION_CONTROL, .index = 8, .size = 2, .flags = UVC_CTRL_FLAG_SET_CUR | UVC_CTRL_FLAG_GET_RANGE | UVC_CTRL_FLAG_RESTORE, }, { .entity = UVC_GUID_UVC_PROCESSING, .selector = UVC_PU_GAIN_CONTROL, .index = 9, .size = 2, .flags = UVC_CTRL_FLAG_SET_CUR | UVC_CTRL_FLAG_GET_RANGE | UVC_CTRL_FLAG_RESTORE, }, { .entity = UVC_GUID_UVC_PROCESSING, .selector = UVC_PU_POWER_LINE_FREQUENCY_CONTROL, .index = 10, .size = 1, .flags = UVC_CTRL_FLAG_SET_CUR | UVC_CTRL_FLAG_GET_CUR | UVC_CTRL_FLAG_GET_DEF | UVC_CTRL_FLAG_RESTORE, }, { .entity = UVC_GUID_UVC_PROCESSING, .selector = UVC_PU_HUE_AUTO_CONTROL, .index = 11, .size = 1, .flags = UVC_CTRL_FLAG_SET_CUR | UVC_CTRL_FLAG_GET_CUR | UVC_CTRL_FLAG_GET_DEF | UVC_CTRL_FLAG_RESTORE, }, { .entity = UVC_GUID_UVC_PROCESSING, .selector = UVC_PU_WHITE_BALANCE_TEMPERATURE_AUTO_CONTROL, .index = 12, .size = 1, .flags = UVC_CTRL_FLAG_SET_CUR | UVC_CTRL_FLAG_GET_CUR | UVC_CTRL_FLAG_GET_DEF | UVC_CTRL_FLAG_RESTORE, }, { .entity = UVC_GUID_UVC_PROCESSING, .selector = UVC_PU_WHITE_BALANCE_COMPONENT_AUTO_CONTROL, .index = 13, .size = 1, .flags = UVC_CTRL_FLAG_SET_CUR | UVC_CTRL_FLAG_GET_CUR | UVC_CTRL_FLAG_GET_DEF | UVC_CTRL_FLAG_RESTORE, }, { .entity = UVC_GUID_UVC_PROCESSING, .selector = UVC_PU_DIGITAL_MULTIPLIER_CONTROL, .index = 14, .size = 2, .flags = UVC_CTRL_FLAG_SET_CUR | UVC_CTRL_FLAG_GET_RANGE | UVC_CTRL_FLAG_RESTORE, }, { .entity = UVC_GUID_UVC_PROCESSING, .selector = UVC_PU_DIGITAL_MULTIPLIER_LIMIT_CONTROL, .index = 15, .size = 2, .flags = UVC_CTRL_FLAG_SET_CUR | UVC_CTRL_FLAG_GET_RANGE | UVC_CTRL_FLAG_RESTORE, }, { .entity = UVC_GUID_UVC_PROCESSING, .selector = UVC_PU_ANALOG_VIDEO_STANDARD_CONTROL, .index = 16, .size = 1, .flags = UVC_CTRL_FLAG_GET_CUR, }, { .entity = UVC_GUID_UVC_PROCESSING, .selector = UVC_PU_ANALOG_LOCK_STATUS_CONTROL, .index = 17, .size = 1, .flags = UVC_CTRL_FLAG_GET_CUR, }, { .entity = UVC_GUID_UVC_CAMERA, .selector = UVC_CT_SCANNING_MODE_CONTROL, .index = 0, .size = 1, .flags = UVC_CTRL_FLAG_SET_CUR | UVC_CTRL_FLAG_GET_CUR | UVC_CTRL_FLAG_RESTORE, }, { .entity = UVC_GUID_UVC_CAMERA, .selector = UVC_CT_AE_MODE_CONTROL, .index = 1, .size = 1, .flags = UVC_CTRL_FLAG_SET_CUR | UVC_CTRL_FLAG_GET_CUR | UVC_CTRL_FLAG_GET_DEF | UVC_CTRL_FLAG_GET_RES | UVC_CTRL_FLAG_RESTORE, }, { .entity = UVC_GUID_UVC_CAMERA, .selector = UVC_CT_AE_PRIORITY_CONTROL, .index = 2, .size = 1, .flags = UVC_CTRL_FLAG_SET_CUR | UVC_CTRL_FLAG_GET_CUR | UVC_CTRL_FLAG_RESTORE, }, { .entity = UVC_GUID_UVC_CAMERA, .selector = UVC_CT_EXPOSURE_TIME_ABSOLUTE_CONTROL, .index = 3, .size = 4, .flags = UVC_CTRL_FLAG_SET_CUR | UVC_CTRL_FLAG_GET_RANGE | UVC_CTRL_FLAG_RESTORE, }, { .entity = UVC_GUID_UVC_CAMERA, .selector = UVC_CT_EXPOSURE_TIME_RELATIVE_CONTROL, .index = 4, .size = 1, .flags = UVC_CTRL_FLAG_SET_CUR | UVC_CTRL_FLAG_RESTORE, }, { .entity = UVC_GUID_UVC_CAMERA, .selector = UVC_CT_FOCUS_ABSOLUTE_CONTROL, .index = 5, .size = 2, .flags = UVC_CTRL_FLAG_SET_CUR | UVC_CTRL_FLAG_GET_RANGE | UVC_CTRL_FLAG_RESTORE | UVC_CTRL_FLAG_AUTO_UPDATE, }, { .entity = UVC_GUID_UVC_CAMERA, .selector = UVC_CT_FOCUS_RELATIVE_CONTROL, .index = 6, .size = 2, .flags = UVC_CTRL_FLAG_SET_CUR | UVC_CTRL_FLAG_GET_MIN | UVC_CTRL_FLAG_GET_MAX | UVC_CTRL_FLAG_GET_RES | UVC_CTRL_FLAG_GET_DEF | UVC_CTRL_FLAG_AUTO_UPDATE, }, { .entity = UVC_GUID_UVC_CAMERA, .selector = UVC_CT_IRIS_ABSOLUTE_CONTROL, .index = 7, .size = 2, .flags = UVC_CTRL_FLAG_SET_CUR | UVC_CTRL_FLAG_GET_RANGE | UVC_CTRL_FLAG_RESTORE | UVC_CTRL_FLAG_AUTO_UPDATE, }, { .entity = UVC_GUID_UVC_CAMERA, .selector = UVC_CT_IRIS_RELATIVE_CONTROL, .index = 8, .size = 1, .flags = UVC_CTRL_FLAG_SET_CUR | UVC_CTRL_FLAG_AUTO_UPDATE, }, { .entity = UVC_GUID_UVC_CAMERA, .selector = UVC_CT_ZOOM_ABSOLUTE_CONTROL, .index = 9, .size = 2, .flags = UVC_CTRL_FLAG_SET_CUR | UVC_CTRL_FLAG_GET_RANGE | UVC_CTRL_FLAG_RESTORE | UVC_CTRL_FLAG_AUTO_UPDATE, }, { .entity = UVC_GUID_UVC_CAMERA, .selector = UVC_CT_ZOOM_RELATIVE_CONTROL, .index = 10, .size = 3, .flags = UVC_CTRL_FLAG_SET_CUR | UVC_CTRL_FLAG_GET_MIN | UVC_CTRL_FLAG_GET_MAX | UVC_CTRL_FLAG_GET_RES | UVC_CTRL_FLAG_GET_DEF | UVC_CTRL_FLAG_AUTO_UPDATE, }, { .entity = UVC_GUID_UVC_CAMERA, .selector = UVC_CT_PANTILT_ABSOLUTE_CONTROL, .index = 11, .size = 8, .flags = UVC_CTRL_FLAG_SET_CUR | UVC_CTRL_FLAG_GET_RANGE | UVC_CTRL_FLAG_RESTORE | UVC_CTRL_FLAG_AUTO_UPDATE, }, { .entity = UVC_GUID_UVC_CAMERA, .selector = UVC_CT_PANTILT_RELATIVE_CONTROL, .index = 12, .size = 4, .flags = UVC_CTRL_FLAG_SET_CUR | UVC_CTRL_FLAG_GET_MIN | UVC_CTRL_FLAG_GET_MAX | UVC_CTRL_FLAG_GET_RES | UVC_CTRL_FLAG_GET_DEF | UVC_CTRL_FLAG_AUTO_UPDATE, }, { .entity = UVC_GUID_UVC_CAMERA, .selector = UVC_CT_ROLL_ABSOLUTE_CONTROL, .index = 13, .size = 2, .flags = UVC_CTRL_FLAG_SET_CUR | UVC_CTRL_FLAG_GET_RANGE | UVC_CTRL_FLAG_RESTORE | UVC_CTRL_FLAG_AUTO_UPDATE, }, { .entity = UVC_GUID_UVC_CAMERA, .selector = UVC_CT_ROLL_RELATIVE_CONTROL, .index = 14, .size = 2, .flags = UVC_CTRL_FLAG_SET_CUR | UVC_CTRL_FLAG_GET_MIN | UVC_CTRL_FLAG_GET_MAX | UVC_CTRL_FLAG_GET_RES | UVC_CTRL_FLAG_GET_DEF | UVC_CTRL_FLAG_AUTO_UPDATE, }, { .entity = UVC_GUID_UVC_CAMERA, .selector = UVC_CT_FOCUS_AUTO_CONTROL, .index = 17, .size = 1, .flags = UVC_CTRL_FLAG_SET_CUR | UVC_CTRL_FLAG_GET_CUR | UVC_CTRL_FLAG_GET_DEF | UVC_CTRL_FLAG_RESTORE, }, { .entity = UVC_GUID_UVC_CAMERA, .selector = UVC_CT_PRIVACY_CONTROL, .index = 18, .size = 1, .flags = UVC_CTRL_FLAG_SET_CUR | UVC_CTRL_FLAG_GET_CUR | UVC_CTRL_FLAG_RESTORE | UVC_CTRL_FLAG_AUTO_UPDATE, }, }; static struct uvc_menu_info power_line_frequency_controls[] = { { 0, "Disabled" }, { 1, "50 Hz" }, { 2, "60 Hz" }, }; static struct uvc_menu_info exposure_auto_controls[] = { { 2, "Auto Mode" }, { 1, "Manual Mode" }, { 4, "Shutter Priority Mode" }, { 8, "Aperture Priority Mode" }, }; static __s32 uvc_ctrl_get_zoom(struct uvc_control_mapping *mapping, __u8 query, const __u8 *data) { __s8 zoom = (__s8)data[0]; switch (query) { case UVC_GET_CUR: return (zoom == 0) ? 0 : (zoom > 0 ? data[2] : -data[2]); case UVC_GET_MIN: case UVC_GET_MAX: case UVC_GET_RES: case UVC_GET_DEF: default: return data[2]; } } static void uvc_ctrl_set_zoom(struct uvc_control_mapping *mapping, __s32 value, __u8 *data) { data[0] = value == 0 ? 0 : (value > 0) ? 1 : 0xff; data[2] = min((int)abs(value), 0xff); } static struct uvc_control_mapping uvc_ctrl_mappings[] = { { .id = V4L2_CID_BRIGHTNESS, .name = "Brightness", .entity = UVC_GUID_UVC_PROCESSING, .selector = UVC_PU_BRIGHTNESS_CONTROL, .size = 16, .offset = 0, .v4l2_type = V4L2_CTRL_TYPE_INTEGER, .data_type = UVC_CTRL_DATA_TYPE_SIGNED, }, { .id = V4L2_CID_CONTRAST, .name = "Contrast", .entity = UVC_GUID_UVC_PROCESSING, .selector = UVC_PU_CONTRAST_CONTROL, .size = 16, .offset = 0, .v4l2_type = V4L2_CTRL_TYPE_INTEGER, .data_type = UVC_CTRL_DATA_TYPE_UNSIGNED, }, { .id = V4L2_CID_HUE, .name = "Hue", .entity = UVC_GUID_UVC_PROCESSING, .selector = UVC_PU_HUE_CONTROL, .size = 16, .offset = 0, .v4l2_type = V4L2_CTRL_TYPE_INTEGER, .data_type = UVC_CTRL_DATA_TYPE_SIGNED, }, { .id = V4L2_CID_SATURATION, .name = "Saturation", .entity = UVC_GUID_UVC_PROCESSING, .selector = UVC_PU_SATURATION_CONTROL, .size = 16, .offset = 0, .v4l2_type = V4L2_CTRL_TYPE_INTEGER, .data_type = UVC_CTRL_DATA_TYPE_UNSIGNED, }, { .id = V4L2_CID_SHARPNESS, .name = "Sharpness", .entity = UVC_GUID_UVC_PROCESSING, .selector = UVC_PU_SHARPNESS_CONTROL, .size = 16, .offset = 0, .v4l2_type = V4L2_CTRL_TYPE_INTEGER, .data_type = UVC_CTRL_DATA_TYPE_UNSIGNED, }, { .id = V4L2_CID_GAMMA, .name = "Gamma", .entity = UVC_GUID_UVC_PROCESSING, .selector = UVC_PU_GAMMA_CONTROL, .size = 16, .offset = 0, .v4l2_type = V4L2_CTRL_TYPE_INTEGER, .data_type = UVC_CTRL_DATA_TYPE_UNSIGNED, }, { .id = V4L2_CID_BACKLIGHT_COMPENSATION, .name = "Backlight Compensation", .entity = UVC_GUID_UVC_PROCESSING, .selector = UVC_PU_BACKLIGHT_COMPENSATION_CONTROL, .size = 16, .offset = 0, .v4l2_type = V4L2_CTRL_TYPE_INTEGER, .data_type = UVC_CTRL_DATA_TYPE_UNSIGNED, }, { .id = V4L2_CID_GAIN, .name = "Gain", .entity = UVC_GUID_UVC_PROCESSING, .selector = UVC_PU_GAIN_CONTROL, .size = 16, .offset = 0, .v4l2_type = V4L2_CTRL_TYPE_INTEGER, .data_type = UVC_CTRL_DATA_TYPE_UNSIGNED, }, { .id = V4L2_CID_POWER_LINE_FREQUENCY, .name = "Power Line Frequency", .entity = UVC_GUID_UVC_PROCESSING, .selector = UVC_PU_POWER_LINE_FREQUENCY_CONTROL, .size = 2, .offset = 0, .v4l2_type = V4L2_CTRL_TYPE_MENU, .data_type = UVC_CTRL_DATA_TYPE_ENUM, .menu_info = power_line_frequency_controls, .menu_count = ARRAY_SIZE(power_line_frequency_controls), }, { .id = V4L2_CID_HUE_AUTO, .name = "Hue, Auto", .entity = UVC_GUID_UVC_PROCESSING, .selector = UVC_PU_HUE_AUTO_CONTROL, .size = 1, .offset = 0, .v4l2_type = V4L2_CTRL_TYPE_BOOLEAN, .data_type = UVC_CTRL_DATA_TYPE_BOOLEAN, }, { .id = V4L2_CID_EXPOSURE_AUTO, .name = "Exposure, Auto", .entity = UVC_GUID_UVC_CAMERA, .selector = UVC_CT_AE_MODE_CONTROL, .size = 4, .offset = 0, .v4l2_type = V4L2_CTRL_TYPE_MENU, .data_type = UVC_CTRL_DATA_TYPE_BITMASK, .menu_info = exposure_auto_controls, .menu_count = ARRAY_SIZE(exposure_auto_controls), }, { .id = V4L2_CID_EXPOSURE_AUTO_PRIORITY, .name = "Exposure, Auto Priority", .entity = UVC_GUID_UVC_CAMERA, .selector = UVC_CT_AE_PRIORITY_CONTROL, .size = 1, .offset = 0, .v4l2_type = V4L2_CTRL_TYPE_BOOLEAN, .data_type = UVC_CTRL_DATA_TYPE_BOOLEAN, }, { .id = V4L2_CID_EXPOSURE_ABSOLUTE, .name = "Exposure (Absolute)", .entity = UVC_GUID_UVC_CAMERA, .selector = UVC_CT_EXPOSURE_TIME_ABSOLUTE_CONTROL, .size = 32, .offset = 0, .v4l2_type = V4L2_CTRL_TYPE_INTEGER, .data_type = UVC_CTRL_DATA_TYPE_UNSIGNED, }, { .id = V4L2_CID_AUTO_WHITE_BALANCE, .name = "White Balance Temperature, Auto", .entity = UVC_GUID_UVC_PROCESSING, .selector = UVC_PU_WHITE_BALANCE_TEMPERATURE_AUTO_CONTROL, .size = 1, .offset = 0, .v4l2_type = V4L2_CTRL_TYPE_BOOLEAN, .data_type = UVC_CTRL_DATA_TYPE_BOOLEAN, }, { .id = V4L2_CID_WHITE_BALANCE_TEMPERATURE, .name = "White Balance Temperature", .entity = UVC_GUID_UVC_PROCESSING, .selector = UVC_PU_WHITE_BALANCE_TEMPERATURE_CONTROL, .size = 16, .offset = 0, .v4l2_type = V4L2_CTRL_TYPE_INTEGER, .data_type = UVC_CTRL_DATA_TYPE_UNSIGNED, }, { .id = V4L2_CID_AUTO_WHITE_BALANCE, .name = "White Balance Component, Auto", .entity = UVC_GUID_UVC_PROCESSING, .selector = UVC_PU_WHITE_BALANCE_COMPONENT_AUTO_CONTROL, .size = 1, .offset = 0, .v4l2_type = V4L2_CTRL_TYPE_BOOLEAN, .data_type = UVC_CTRL_DATA_TYPE_BOOLEAN, }, { .id = V4L2_CID_BLUE_BALANCE, .name = "White Balance Blue Component", .entity = UVC_GUID_UVC_PROCESSING, .selector = UVC_PU_WHITE_BALANCE_COMPONENT_CONTROL, .size = 16, .offset = 0, .v4l2_type = V4L2_CTRL_TYPE_INTEGER, .data_type = UVC_CTRL_DATA_TYPE_SIGNED, }, { .id = V4L2_CID_RED_BALANCE, .name = "White Balance Red Component", .entity = UVC_GUID_UVC_PROCESSING, .selector = UVC_PU_WHITE_BALANCE_COMPONENT_CONTROL, .size = 16, .offset = 16, .v4l2_type = V4L2_CTRL_TYPE_INTEGER, .data_type = UVC_CTRL_DATA_TYPE_SIGNED, }, { .id = V4L2_CID_FOCUS_ABSOLUTE, .name = "Focus (absolute)", .entity = UVC_GUID_UVC_CAMERA, .selector = UVC_CT_FOCUS_ABSOLUTE_CONTROL, .size = 16, .offset = 0, .v4l2_type = V4L2_CTRL_TYPE_INTEGER, .data_type = UVC_CTRL_DATA_TYPE_UNSIGNED, }, { .id = V4L2_CID_FOCUS_AUTO, .name = "Focus, Auto", .entity = UVC_GUID_UVC_CAMERA, .selector = UVC_CT_FOCUS_AUTO_CONTROL, .size = 1, .offset = 0, .v4l2_type = V4L2_CTRL_TYPE_BOOLEAN, .data_type = UVC_CTRL_DATA_TYPE_BOOLEAN, }, { .id = V4L2_CID_IRIS_ABSOLUTE, .name = "Iris, Absolute", .entity = UVC_GUID_UVC_CAMERA, .selector = UVC_CT_IRIS_ABSOLUTE_CONTROL, .size = 16, .offset = 0, .v4l2_type = V4L2_CTRL_TYPE_INTEGER, .data_type = UVC_CTRL_DATA_TYPE_UNSIGNED, }, { .id = V4L2_CID_IRIS_RELATIVE, .name = "Iris, Relative", .entity = UVC_GUID_UVC_CAMERA, .selector = UVC_CT_IRIS_RELATIVE_CONTROL, .size = 8, .offset = 0, .v4l2_type = V4L2_CTRL_TYPE_INTEGER, .data_type = UVC_CTRL_DATA_TYPE_SIGNED, }, { .id = V4L2_CID_ZOOM_ABSOLUTE, .name = "Zoom, Absolute", .entity = UVC_GUID_UVC_CAMERA, .selector = UVC_CT_ZOOM_ABSOLUTE_CONTROL, .size = 16, .offset = 0, .v4l2_type = V4L2_CTRL_TYPE_INTEGER, .data_type = UVC_CTRL_DATA_TYPE_UNSIGNED, }, { .id = V4L2_CID_ZOOM_CONTINUOUS, .name = "Zoom, Continuous", .entity = UVC_GUID_UVC_CAMERA, .selector = UVC_CT_ZOOM_RELATIVE_CONTROL, .size = 0, .offset = 0, .v4l2_type = V4L2_CTRL_TYPE_INTEGER, .data_type = UVC_CTRL_DATA_TYPE_SIGNED, .get = uvc_ctrl_get_zoom, .set = uvc_ctrl_set_zoom, }, { .id = V4L2_CID_PAN_ABSOLUTE, .name = "Pan (Absolute)", .entity = UVC_GUID_UVC_CAMERA, .selector = UVC_CT_PANTILT_ABSOLUTE_CONTROL, .size = 32, .offset = 0, .v4l2_type = V4L2_CTRL_TYPE_INTEGER, .data_type = UVC_CTRL_DATA_TYPE_UNSIGNED, }, { .id = V4L2_CID_TILT_ABSOLUTE, .name = "Tilt (Absolute)", .entity = UVC_GUID_UVC_CAMERA, .selector = UVC_CT_PANTILT_ABSOLUTE_CONTROL, .size = 32, .offset = 32, .v4l2_type = V4L2_CTRL_TYPE_INTEGER, .data_type = UVC_CTRL_DATA_TYPE_UNSIGNED, }, { .id = V4L2_CID_PRIVACY, .name = "Privacy", .entity = UVC_GUID_UVC_CAMERA, .selector = UVC_CT_PRIVACY_CONTROL, .size = 1, .offset = 0, .v4l2_type = V4L2_CTRL_TYPE_BOOLEAN, .data_type = UVC_CTRL_DATA_TYPE_BOOLEAN, }, }; /* ------------------------------------------------------------------------ * Utility functions */ static inline __u8 *uvc_ctrl_data(struct uvc_control *ctrl, int id) { return ctrl->uvc_data + id * ctrl->info.size; } static inline int uvc_test_bit(const __u8 *data, int bit) { return (data[bit >> 3] >> (bit & 7)) & 1; } static inline void uvc_clear_bit(__u8 *data, int bit) { data[bit >> 3] &= ~(1 << (bit & 7)); } /* Extract the bit string specified by mapping->offset and mapping->size * from the little-endian data stored at 'data' and return the result as * a signed 32bit integer. Sign extension will be performed if the mapping * references a signed data type. */ static __s32 uvc_get_le_value(struct uvc_control_mapping *mapping, __u8 query, const __u8 *data) { int bits = mapping->size; int offset = mapping->offset; __s32 value = 0; __u8 mask; data += offset / 8; offset &= 7; mask = ((1LL << bits) - 1) << offset; for (; bits > 0; data++) { __u8 byte = *data & mask; value |= offset > 0 ? (byte >> offset) : (byte << (-offset)); bits -= 8 - (offset > 0 ? offset : 0); offset -= 8; mask = (1 << bits) - 1; } /* Sign-extend the value if needed. */ if (mapping->data_type == UVC_CTRL_DATA_TYPE_SIGNED) value |= -(value & (1 << (mapping->size - 1))); return value; } /* Set the bit string specified by mapping->offset and mapping->size * in the little-endian data stored at 'data' to the value 'value'. */ static void uvc_set_le_value(struct uvc_control_mapping *mapping, __s32 value, __u8 *data) { int bits = mapping->size; int offset = mapping->offset; __u8 mask; /* According to the v4l2 spec, writing any value to a button control * should result in the action belonging to the button control being * triggered. UVC devices however want to see a 1 written -> override * value. */ if (mapping->v4l2_type == V4L2_CTRL_TYPE_BUTTON) value = -1; data += offset / 8; offset &= 7; for (; bits > 0; data++) { mask = ((1LL << bits) - 1) << offset; *data = (*data & ~mask) | ((value << offset) & mask); value >>= offset ? offset : 8; bits -= 8 - offset; offset = 0; } } /* ------------------------------------------------------------------------ * Terminal and unit management */ static const __u8 uvc_processing_guid[16] = UVC_GUID_UVC_PROCESSING; static const __u8 uvc_camera_guid[16] = UVC_GUID_UVC_CAMERA; static const __u8 uvc_media_transport_input_guid[16] = UVC_GUID_UVC_MEDIA_TRANSPORT_INPUT; static int uvc_entity_match_guid(const struct uvc_entity *entity, const __u8 guid[16]) { switch (UVC_ENTITY_TYPE(entity)) { case UVC_ITT_CAMERA: return memcmp(uvc_camera_guid, guid, 16) == 0; case UVC_ITT_MEDIA_TRANSPORT_INPUT: return memcmp(uvc_media_transport_input_guid, guid, 16) == 0; case UVC_VC_PROCESSING_UNIT: return memcmp(uvc_processing_guid, guid, 16) == 0; case UVC_VC_EXTENSION_UNIT: return memcmp(entity->extension.guidExtensionCode, guid, 16) == 0; default: return 0; } } /* ------------------------------------------------------------------------ * UVC Controls */ static void __uvc_find_control(struct uvc_entity *entity, __u32 v4l2_id, struct uvc_control_mapping **mapping, struct uvc_control **control, int next) { struct uvc_control *ctrl; struct uvc_control_mapping *map; unsigned int i; if (entity == NULL) return; for (i = 0; i < entity->ncontrols; ++i) { ctrl = &entity->controls[i]; if (!ctrl->initialized) continue; list_for_each_entry(map, &ctrl->info.mappings, list) { if ((map->id == v4l2_id) && !next) { *control = ctrl; *mapping = map; return; } if ((*mapping == NULL || (*mapping)->id > map->id) && (map->id > v4l2_id) && next) { *control = ctrl; *mapping = map; } } } } static struct uvc_control *uvc_find_control(struct uvc_video_chain *chain, __u32 v4l2_id, struct uvc_control_mapping **mapping) { struct uvc_control *ctrl = NULL; struct uvc_entity *entity; int next = v4l2_id & V4L2_CTRL_FLAG_NEXT_CTRL; *mapping = NULL; /* Mask the query flags. */ v4l2_id &= V4L2_CTRL_ID_MASK; /* Find the control. */ list_for_each_entry(entity, &chain->entities, chain) { __uvc_find_control(entity, v4l2_id, mapping, &ctrl, next); if (ctrl && !next) return ctrl; } if (ctrl == NULL && !next) uvc_trace(UVC_TRACE_CONTROL, "Control 0x%08x not found.\n", v4l2_id); return ctrl; } static int uvc_ctrl_populate_cache(struct uvc_video_chain *chain, struct uvc_control *ctrl) { int ret; if (ctrl->info.flags & UVC_CTRL_FLAG_GET_DEF) { ret = uvc_query_ctrl(chain->dev, UVC_GET_DEF, ctrl->entity->id, chain->dev->intfnum, ctrl->info.selector, uvc_ctrl_data(ctrl, UVC_CTRL_DATA_DEF), ctrl->info.size); if (ret < 0) return ret; } if (ctrl->info.flags & UVC_CTRL_FLAG_GET_MIN) { ret = uvc_query_ctrl(chain->dev, UVC_GET_MIN, ctrl->entity->id, chain->dev->intfnum, ctrl->info.selector, uvc_ctrl_data(ctrl, UVC_CTRL_DATA_MIN), ctrl->info.size); if (ret < 0) return ret; } if (ctrl->info.flags & UVC_CTRL_FLAG_GET_MAX) { ret = uvc_query_ctrl(chain->dev, UVC_GET_MAX, ctrl->entity->id, chain->dev->intfnum, ctrl->info.selector, uvc_ctrl_data(ctrl, UVC_CTRL_DATA_MAX), ctrl->info.size); if (ret < 0) return ret; } if (ctrl->info.flags & UVC_CTRL_FLAG_GET_RES) { ret = uvc_query_ctrl(chain->dev, UVC_GET_RES, ctrl->entity->id, chain->dev->intfnum, ctrl->info.selector, uvc_ctrl_data(ctrl, UVC_CTRL_DATA_RES), ctrl->info.size); if (ret < 0) { if (UVC_ENTITY_TYPE(ctrl->entity) != UVC_VC_EXTENSION_UNIT) return ret; /* GET_RES is mandatory for XU controls, but some * cameras still choke on it. Ignore errors and set the * resolution value to zero. */ uvc_warn_once(chain->dev, UVC_WARN_XU_GET_RES, "UVC non compliance - GET_RES failed on " "an XU control. Enabling workaround.\n"); memset(uvc_ctrl_data(ctrl, UVC_CTRL_DATA_RES), 0, ctrl->info.size); } } ctrl->cached = 1; return 0; } int uvc_query_v4l2_ctrl(struct uvc_video_chain *chain, struct v4l2_queryctrl *v4l2_ctrl) { struct uvc_control *ctrl; struct uvc_control_mapping *mapping; struct uvc_menu_info *menu; unsigned int i; int ret; ret = mutex_lock_interruptible(&chain->ctrl_mutex); if (ret < 0) return -ERESTARTSYS; ctrl = uvc_find_control(chain, v4l2_ctrl->id, &mapping); if (ctrl == NULL) { ret = -EINVAL; goto done; } memset(v4l2_ctrl, 0, sizeof *v4l2_ctrl); v4l2_ctrl->id = mapping->id; v4l2_ctrl->type = mapping->v4l2_type; strlcpy(v4l2_ctrl->name, mapping->name, sizeof v4l2_ctrl->name); v4l2_ctrl->flags = 0; if (!(ctrl->info.flags & UVC_CTRL_FLAG_GET_CUR)) v4l2_ctrl->flags |= V4L2_CTRL_FLAG_WRITE_ONLY; if (!(ctrl->info.flags & UVC_CTRL_FLAG_SET_CUR)) v4l2_ctrl->flags |= V4L2_CTRL_FLAG_READ_ONLY; if (!ctrl->cached) { ret = uvc_ctrl_populate_cache(chain, ctrl); if (ret < 0) goto done; } if (ctrl->info.flags & UVC_CTRL_FLAG_GET_DEF) { v4l2_ctrl->default_value = mapping->get(mapping, UVC_GET_DEF, uvc_ctrl_data(ctrl, UVC_CTRL_DATA_DEF)); } switch (mapping->v4l2_type) { case V4L2_CTRL_TYPE_MENU: v4l2_ctrl->minimum = 0; v4l2_ctrl->maximum = mapping->menu_count - 1; v4l2_ctrl->step = 1; menu = mapping->menu_info; for (i = 0; i < mapping->menu_count; ++i, ++menu) { if (menu->value == v4l2_ctrl->default_value) { v4l2_ctrl->default_value = i; break; } } goto done; case V4L2_CTRL_TYPE_BOOLEAN: v4l2_ctrl->minimum = 0; v4l2_ctrl->maximum = 1; v4l2_ctrl->step = 1; goto done; case V4L2_CTRL_TYPE_BUTTON: v4l2_ctrl->minimum = 0; v4l2_ctrl->maximum = 0; v4l2_ctrl->step = 0; goto done; default: break; } if (ctrl->info.flags & UVC_CTRL_FLAG_GET_MIN) v4l2_ctrl->minimum = mapping->get(mapping, UVC_GET_MIN, uvc_ctrl_data(ctrl, UVC_CTRL_DATA_MIN)); if (ctrl->info.flags & UVC_CTRL_FLAG_GET_MAX) v4l2_ctrl->maximum = mapping->get(mapping, UVC_GET_MAX, uvc_ctrl_data(ctrl, UVC_CTRL_DATA_MAX)); if (ctrl->info.flags & UVC_CTRL_FLAG_GET_RES) v4l2_ctrl->step = mapping->get(mapping, UVC_GET_RES, uvc_ctrl_data(ctrl, UVC_CTRL_DATA_RES)); done: mutex_unlock(&chain->ctrl_mutex); return ret; } /* * Mapping V4L2 controls to UVC controls can be straighforward if done well. * Most of the UVC controls exist in V4L2, and can be mapped directly. Some * must be grouped (for instance the Red Balance, Blue Balance and Do White * Balance V4L2 controls use the White Balance Component UVC control) or * otherwise translated. The approach we take here is to use a translation * table for the controls that can be mapped directly, and handle the others * manually. */ int uvc_query_v4l2_menu(struct uvc_video_chain *chain, struct v4l2_querymenu *query_menu) { struct uvc_menu_info *menu_info; struct uvc_control_mapping *mapping; struct uvc_control *ctrl; u32 index = query_menu->index; u32 id = query_menu->id; int ret; memset(query_menu, 0, sizeof(*query_menu)); query_menu->id = id; query_menu->index = index; ret = mutex_lock_interruptible(&chain->ctrl_mutex); if (ret < 0) return -ERESTARTSYS; ctrl = uvc_find_control(chain, query_menu->id, &mapping); if (ctrl == NULL || mapping->v4l2_type != V4L2_CTRL_TYPE_MENU) { ret = -EINVAL; goto done; } if (query_menu->index >= mapping->menu_count) { ret = -EINVAL; goto done; } menu_info = &mapping->menu_info[query_menu->index]; if (mapping->data_type == UVC_CTRL_DATA_TYPE_BITMASK && (ctrl->info.flags & UVC_CTRL_FLAG_GET_RES)) { s32 bitmap; if (!ctrl->cached) { ret = uvc_ctrl_populate_cache(chain, ctrl); if (ret < 0) goto done; } bitmap = mapping->get(mapping, UVC_GET_RES, uvc_ctrl_data(ctrl, UVC_CTRL_DATA_RES)); if (!(bitmap & menu_info->value)) { ret = -EINVAL; goto done; } } strlcpy(query_menu->name, menu_info->name, sizeof query_menu->name); done: mutex_unlock(&chain->ctrl_mutex); return ret; } /* -------------------------------------------------------------------------- * Control transactions * * To make extended set operations as atomic as the hardware allows, controls * are handled using begin/commit/rollback operations. * * At the beginning of a set request, uvc_ctrl_begin should be called to * initialize the request. This function acquires the control lock. * * When setting a control, the new value is stored in the control data field * at position UVC_CTRL_DATA_CURRENT. The control is then marked as dirty for * later processing. If the UVC and V4L2 control sizes differ, the current * value is loaded from the hardware before storing the new value in the data * field. * * After processing all controls in the transaction, uvc_ctrl_commit or * uvc_ctrl_rollback must be called to apply the pending changes to the * hardware or revert them. When applying changes, all controls marked as * dirty will be modified in the UVC device, and the dirty flag will be * cleared. When reverting controls, the control data field * UVC_CTRL_DATA_CURRENT is reverted to its previous value * (UVC_CTRL_DATA_BACKUP) for all dirty controls. Both functions release the * control lock. */ int uvc_ctrl_begin(struct uvc_video_chain *chain) { return mutex_lock_interruptible(&chain->ctrl_mutex) ? -ERESTARTSYS : 0; } static int uvc_ctrl_commit_entity(struct uvc_device *dev, struct uvc_entity *entity, int rollback) { struct uvc_control *ctrl; unsigned int i; int ret; if (entity == NULL) return 0; for (i = 0; i < entity->ncontrols; ++i) { ctrl = &entity->controls[i]; if (!ctrl->initialized) continue; /* Reset the loaded flag for auto-update controls that were * marked as loaded in uvc_ctrl_get/uvc_ctrl_set to prevent * uvc_ctrl_get from using the cached value. */ if (ctrl->info.flags & UVC_CTRL_FLAG_AUTO_UPDATE) ctrl->loaded = 0; if (!ctrl->dirty) continue; if (!rollback) ret = uvc_query_ctrl(dev, UVC_SET_CUR, ctrl->entity->id, dev->intfnum, ctrl->info.selector, uvc_ctrl_data(ctrl, UVC_CTRL_DATA_CURRENT), ctrl->info.size); else ret = 0; if (rollback || ret < 0) memcpy(uvc_ctrl_data(ctrl, UVC_CTRL_DATA_CURRENT), uvc_ctrl_data(ctrl, UVC_CTRL_DATA_BACKUP), ctrl->info.size); ctrl->dirty = 0; if (ret < 0) return ret; } return 0; } int __uvc_ctrl_commit(struct uvc_video_chain *chain, int rollback) { struct uvc_entity *entity; int ret = 0; /* Find the control. */ list_for_each_entry(entity, &chain->entities, chain) { ret = uvc_ctrl_commit_entity(chain->dev, entity, rollback); if (ret < 0) goto done; } done: mutex_unlock(&chain->ctrl_mutex); return ret; } int uvc_ctrl_get(struct uvc_video_chain *chain, struct v4l2_ext_control *xctrl) { struct uvc_control *ctrl; struct uvc_control_mapping *mapping; struct uvc_menu_info *menu; unsigned int i; int ret; ctrl = uvc_find_control(chain, xctrl->id, &mapping); if (ctrl == NULL || (ctrl->info.flags & UVC_CTRL_FLAG_GET_CUR) == 0) return -EINVAL; if (!ctrl->loaded) { ret = uvc_query_ctrl(chain->dev, UVC_GET_CUR, ctrl->entity->id, chain->dev->intfnum, ctrl->info.selector, uvc_ctrl_data(ctrl, UVC_CTRL_DATA_CURRENT), ctrl->info.size); if (ret < 0) return ret; ctrl->loaded = 1; } xctrl->value = mapping->get(mapping, UVC_GET_CUR, uvc_ctrl_data(ctrl, UVC_CTRL_DATA_CURRENT)); if (mapping->v4l2_type == V4L2_CTRL_TYPE_MENU) { menu = mapping->menu_info; for (i = 0; i < mapping->menu_count; ++i, ++menu) { if (menu->value == xctrl->value) { xctrl->value = i; break; } } } return 0; } int uvc_ctrl_set(struct uvc_video_chain *chain, struct v4l2_ext_control *xctrl) { struct uvc_control *ctrl; struct uvc_control_mapping *mapping; s32 value; u32 step; s32 min; s32 max; int ret; ctrl = uvc_find_control(chain, xctrl->id, &mapping); if (ctrl == NULL || (ctrl->info.flags & UVC_CTRL_FLAG_SET_CUR) == 0) return -EINVAL; /* Clamp out of range values. */ switch (mapping->v4l2_type) { case V4L2_CTRL_TYPE_INTEGER: if (!ctrl->cached) { ret = uvc_ctrl_populate_cache(chain, ctrl); if (ret < 0) return ret; } min = mapping->get(mapping, UVC_GET_MIN, uvc_ctrl_data(ctrl, UVC_CTRL_DATA_MIN)); max = mapping->get(mapping, UVC_GET_MAX, uvc_ctrl_data(ctrl, UVC_CTRL_DATA_MAX)); step = mapping->get(mapping, UVC_GET_RES, uvc_ctrl_data(ctrl, UVC_CTRL_DATA_RES)); if (step == 0) step = 1; xctrl->value = min + (xctrl->value - min + step/2) / step * step; xctrl->value = clamp(xctrl->value, min, max); value = xctrl->value; break; case V4L2_CTRL_TYPE_BOOLEAN: xctrl->value = clamp(xctrl->value, 0, 1); value = xctrl->value; break; case V4L2_CTRL_TYPE_MENU: if (xctrl->value < 0 || xctrl->value >= mapping->menu_count) return -ERANGE; value = mapping->menu_info[xctrl->value].value; /* Valid menu indices are reported by the GET_RES request for * UVC controls that support it. */ if (mapping->data_type == UVC_CTRL_DATA_TYPE_BITMASK && (ctrl->info.flags & UVC_CTRL_FLAG_GET_RES)) { if (!ctrl->cached) { ret = uvc_ctrl_populate_cache(chain, ctrl); if (ret < 0) return ret; } step = mapping->get(mapping, UVC_GET_RES, uvc_ctrl_data(ctrl, UVC_CTRL_DATA_RES)); if (!(step & value)) return -ERANGE; } break; default: value = xctrl->value; break; } /* If the mapping doesn't span the whole UVC control, the current value * needs to be loaded from the device to perform the read-modify-write * operation. */ if (!ctrl->loaded && (ctrl->info.size * 8) != mapping->size) { if ((ctrl->info.flags & UVC_CTRL_FLAG_GET_CUR) == 0) { memset(uvc_ctrl_data(ctrl, UVC_CTRL_DATA_CURRENT), 0, ctrl->info.size); } else { ret = uvc_query_ctrl(chain->dev, UVC_GET_CUR, ctrl->entity->id, chain->dev->intfnum, ctrl->info.selector, uvc_ctrl_data(ctrl, UVC_CTRL_DATA_CURRENT), ctrl->info.size); if (ret < 0) return ret; } ctrl->loaded = 1; } /* Backup the current value in case we need to rollback later. */ if (!ctrl->dirty) { memcpy(uvc_ctrl_data(ctrl, UVC_CTRL_DATA_BACKUP), uvc_ctrl_data(ctrl, UVC_CTRL_DATA_CURRENT), ctrl->info.size); } mapping->set(mapping, value, uvc_ctrl_data(ctrl, UVC_CTRL_DATA_CURRENT)); ctrl->dirty = 1; ctrl->modified = 1; return 0; } /* -------------------------------------------------------------------------- * Dynamic controls */ static void uvc_ctrl_fixup_xu_info(struct uvc_device *dev, const struct uvc_control *ctrl, struct uvc_control_info *info) { struct uvc_ctrl_fixup { struct usb_device_id id; u8 entity; u8 selector; u8 flags; }; static const struct uvc_ctrl_fixup fixups[] = { { { USB_DEVICE(0x046d, 0x08c2) }, 9, 1, UVC_CTRL_FLAG_GET_MIN | UVC_CTRL_FLAG_GET_MAX | UVC_CTRL_FLAG_GET_DEF | UVC_CTRL_FLAG_SET_CUR | UVC_CTRL_FLAG_AUTO_UPDATE }, { { USB_DEVICE(0x046d, 0x08cc) }, 9, 1, UVC_CTRL_FLAG_GET_MIN | UVC_CTRL_FLAG_GET_MAX | UVC_CTRL_FLAG_GET_DEF | UVC_CTRL_FLAG_SET_CUR | UVC_CTRL_FLAG_AUTO_UPDATE }, { { USB_DEVICE(0x046d, 0x0994) }, 9, 1, UVC_CTRL_FLAG_GET_MIN | UVC_CTRL_FLAG_GET_MAX | UVC_CTRL_FLAG_GET_DEF | UVC_CTRL_FLAG_SET_CUR | UVC_CTRL_FLAG_AUTO_UPDATE }, }; unsigned int i; for (i = 0; i < ARRAY_SIZE(fixups); ++i) { if (!usb_match_one_id(dev->intf, &fixups[i].id)) continue; if (fixups[i].entity == ctrl->entity->id && fixups[i].selector == info->selector) { info->flags = fixups[i].flags; return; } } } /* * Query control information (size and flags) for XU controls. */ static int uvc_ctrl_fill_xu_info(struct uvc_device *dev, const struct uvc_control *ctrl, struct uvc_control_info *info) { u8 *data; int ret; data = kmalloc(2, GFP_KERNEL); if (data == NULL) return -ENOMEM; memcpy(info->entity, ctrl->entity->extension.guidExtensionCode, sizeof(info->entity)); info->index = ctrl->index; info->selector = ctrl->index + 1; /* Query and verify the control length (GET_LEN) */ ret = uvc_query_ctrl(dev, UVC_GET_LEN, ctrl->entity->id, dev->intfnum, info->selector, data, 2); if (ret < 0) { uvc_trace(UVC_TRACE_CONTROL, "GET_LEN failed on control %pUl/%u (%d).\n", info->entity, info->selector, ret); goto done; } info->size = le16_to_cpup((__le16 *)data); /* Query the control information (GET_INFO) */ ret = uvc_query_ctrl(dev, UVC_GET_INFO, ctrl->entity->id, dev->intfnum, info->selector, data, 1); if (ret < 0) { uvc_trace(UVC_TRACE_CONTROL, "GET_INFO failed on control %pUl/%u (%d).\n", info->entity, info->selector, ret); goto done; } info->flags = UVC_CTRL_FLAG_GET_MIN | UVC_CTRL_FLAG_GET_MAX | UVC_CTRL_FLAG_GET_RES | UVC_CTRL_FLAG_GET_DEF | (data[0] & UVC_CONTROL_CAP_GET ? UVC_CTRL_FLAG_GET_CUR : 0) | (data[0] & UVC_CONTROL_CAP_SET ? UVC_CTRL_FLAG_SET_CUR : 0) | (data[0] & UVC_CONTROL_CAP_AUTOUPDATE ? UVC_CTRL_FLAG_AUTO_UPDATE : 0); uvc_ctrl_fixup_xu_info(dev, ctrl, info); uvc_trace(UVC_TRACE_CONTROL, "XU control %pUl/%u queried: len %u, " "flags { get %u set %u auto %u }.\n", info->entity, info->selector, info->size, (info->flags & UVC_CTRL_FLAG_GET_CUR) ? 1 : 0, (info->flags & UVC_CTRL_FLAG_SET_CUR) ? 1 : 0, (info->flags & UVC_CTRL_FLAG_AUTO_UPDATE) ? 1 : 0); done: kfree(data); return ret; } static int uvc_ctrl_add_info(struct uvc_device *dev, struct uvc_control *ctrl, const struct uvc_control_info *info); static int uvc_ctrl_init_xu_ctrl(struct uvc_device *dev, struct uvc_control *ctrl) { struct uvc_control_info info; int ret; if (ctrl->initialized) return 0; ret = uvc_ctrl_fill_xu_info(dev, ctrl, &info); if (ret < 0) return ret; ret = uvc_ctrl_add_info(dev, ctrl, &info); if (ret < 0) uvc_trace(UVC_TRACE_CONTROL, "Failed to initialize control " "%pUl/%u on device %s entity %u\n", info.entity, info.selector, dev->udev->devpath, ctrl->entity->id); return ret; } int uvc_xu_ctrl_query(struct uvc_video_chain *chain, struct uvc_xu_control_query *xqry) { struct uvc_entity *entity; struct uvc_control *ctrl; unsigned int i, found = 0; __u32 reqflags; __u16 size; __u8 *data = NULL; int ret; /* Find the extension unit. */ list_for_each_entry(entity, &chain->entities, chain) { if (UVC_ENTITY_TYPE(entity) == UVC_VC_EXTENSION_UNIT && entity->id == xqry->unit) break; } if (entity->id != xqry->unit) { uvc_trace(UVC_TRACE_CONTROL, "Extension unit %u not found.\n", xqry->unit); return -ENOENT; } /* Find the control and perform delayed initialization if needed. */ for (i = 0; i < entity->ncontrols; ++i) { ctrl = &entity->controls[i]; if (ctrl->index == xqry->selector - 1) { found = 1; break; } } if (!found) { uvc_trace(UVC_TRACE_CONTROL, "Control %pUl/%u not found.\n", entity->extension.guidExtensionCode, xqry->selector); return -ENOENT; } if (mutex_lock_interruptible(&chain->ctrl_mutex)) return -ERESTARTSYS; ret = uvc_ctrl_init_xu_ctrl(chain->dev, ctrl); if (ret < 0) { ret = -ENOENT; goto done; } /* Validate the required buffer size and flags for the request */ reqflags = 0; size = ctrl->info.size; switch (xqry->query) { case UVC_GET_CUR: reqflags = UVC_CTRL_FLAG_GET_CUR; break; case UVC_GET_MIN: reqflags = UVC_CTRL_FLAG_GET_MIN; break; case UVC_GET_MAX: reqflags = UVC_CTRL_FLAG_GET_MAX; break; case UVC_GET_DEF: reqflags = UVC_CTRL_FLAG_GET_DEF; break; case UVC_GET_RES: reqflags = UVC_CTRL_FLAG_GET_RES; break; case UVC_SET_CUR: reqflags = UVC_CTRL_FLAG_SET_CUR; break; case UVC_GET_LEN: size = 2; break; case UVC_GET_INFO: size = 1; break; default: ret = -EINVAL; goto done; } if (size != xqry->size) { ret = -ENOBUFS; goto done; } if (reqflags && !(ctrl->info.flags & reqflags)) { ret = -EBADRQC; goto done; } data = kmalloc(size, GFP_KERNEL); if (data == NULL) { ret = -ENOMEM; goto done; } if (xqry->query == UVC_SET_CUR && copy_from_user(data, xqry->data, size)) { ret = -EFAULT; goto done; } ret = uvc_query_ctrl(chain->dev, xqry->query, xqry->unit, chain->dev->intfnum, xqry->selector, data, size); if (ret < 0) goto done; if (xqry->query != UVC_SET_CUR && copy_to_user(xqry->data, data, size)) ret = -EFAULT; done: kfree(data); mutex_unlock(&chain->ctrl_mutex); return ret; } /* -------------------------------------------------------------------------- * Suspend/resume */ /* * Restore control values after resume, skipping controls that haven't been * changed. * * TODO * - Don't restore modified controls that are back to their default value. * - Handle restore order (Auto-Exposure Mode should be restored before * Exposure Time). */ int uvc_ctrl_resume_device(struct uvc_device *dev) { struct uvc_control *ctrl; struct uvc_entity *entity; unsigned int i; int ret; /* Walk the entities list and restore controls when possible. */ list_for_each_entry(entity, &dev->entities, list) { for (i = 0; i < entity->ncontrols; ++i) { ctrl = &entity->controls[i]; if (!ctrl->initialized || !ctrl->modified || (ctrl->info.flags & UVC_CTRL_FLAG_RESTORE) == 0) continue; printk(KERN_INFO "restoring control %pUl/%u/%u\n", ctrl->info.entity, ctrl->info.index, ctrl->info.selector); ctrl->dirty = 1; } ret = uvc_ctrl_commit_entity(dev, entity, 0); if (ret < 0) return ret; } return 0; } /* -------------------------------------------------------------------------- * Control and mapping handling */ /* * Add control information to a given control. */ static int uvc_ctrl_add_info(struct uvc_device *dev, struct uvc_control *ctrl, const struct uvc_control_info *info) { int ret = 0; memcpy(&ctrl->info, info, sizeof(*info)); INIT_LIST_HEAD(&ctrl->info.mappings); /* Allocate an array to save control values (cur, def, max, etc.) */ ctrl->uvc_data = kzalloc(ctrl->info.size * UVC_CTRL_DATA_LAST + 1, GFP_KERNEL); if (ctrl->uvc_data == NULL) { ret = -ENOMEM; goto done; } ctrl->initialized = 1; uvc_trace(UVC_TRACE_CONTROL, "Added control %pUl/%u to device %s " "entity %u\n", ctrl->info.entity, ctrl->info.selector, dev->udev->devpath, ctrl->entity->id); done: if (ret < 0) kfree(ctrl->uvc_data); return ret; } /* * Add a control mapping to a given control. */ static int __uvc_ctrl_add_mapping(struct uvc_device *dev, struct uvc_control *ctrl, const struct uvc_control_mapping *mapping) { struct uvc_control_mapping *map; unsigned int size; /* Most mappings come from static kernel data and need to be duplicated. * Mappings that come from userspace will be unnecessarily duplicated, * this could be optimized. */ map = kmemdup(mapping, sizeof(*mapping), GFP_KERNEL); if (map == NULL) return -ENOMEM; size = sizeof(*mapping->menu_info) * mapping->menu_count; map->menu_info = kmemdup(mapping->menu_info, size, GFP_KERNEL); if (map->menu_info == NULL) { kfree(map); return -ENOMEM; } if (map->get == NULL) map->get = uvc_get_le_value; if (map->set == NULL) map->set = uvc_set_le_value; map->ctrl = &ctrl->info; list_add_tail(&map->list, &ctrl->info.mappings); uvc_trace(UVC_TRACE_CONTROL, "Adding mapping '%s' to control %pUl/%u.\n", map->name, ctrl->info.entity, ctrl->info.selector); return 0; } int uvc_ctrl_add_mapping(struct uvc_video_chain *chain, const struct uvc_control_mapping *mapping) { struct uvc_device *dev = chain->dev; struct uvc_control_mapping *map; struct uvc_entity *entity; struct uvc_control *ctrl; int found = 0; int ret; if (mapping->id & ~V4L2_CTRL_ID_MASK) { uvc_trace(UVC_TRACE_CONTROL, "Can't add mapping '%s', control " "id 0x%08x is invalid.\n", mapping->name, mapping->id); return -EINVAL; } /* Search for the matching (GUID/CS) control on the current chain */ list_for_each_entry(entity, &chain->entities, chain) { unsigned int i; if (UVC_ENTITY_TYPE(entity) != UVC_VC_EXTENSION_UNIT || !uvc_entity_match_guid(entity, mapping->entity)) continue; for (i = 0; i < entity->ncontrols; ++i) { ctrl = &entity->controls[i]; if (ctrl->index == mapping->selector - 1) { found = 1; break; } } if (found) break; } if (!found) return -ENOENT; if (mutex_lock_interruptible(&chain->ctrl_mutex)) return -ERESTARTSYS; /* Perform delayed initialization of XU controls */ ret = uvc_ctrl_init_xu_ctrl(dev, ctrl); if (ret < 0) { ret = -ENOENT; goto done; } list_for_each_entry(map, &ctrl->info.mappings, list) { if (mapping->id == map->id) { uvc_trace(UVC_TRACE_CONTROL, "Can't add mapping '%s', " "control id 0x%08x already exists.\n", mapping->name, mapping->id); ret = -EEXIST; goto done; } } /* Prevent excess memory consumption */ if (atomic_inc_return(&dev->nmappings) > UVC_MAX_CONTROL_MAPPINGS) { atomic_dec(&dev->nmappings); uvc_trace(UVC_TRACE_CONTROL, "Can't add mapping '%s', maximum " "mappings count (%u) exceeded.\n", mapping->name, UVC_MAX_CONTROL_MAPPINGS); ret = -ENOMEM; goto done; } ret = __uvc_ctrl_add_mapping(dev, ctrl, mapping); if (ret < 0) atomic_dec(&dev->nmappings); done: mutex_unlock(&chain->ctrl_mutex); return ret; } /* * Prune an entity of its bogus controls using a blacklist. Bogus controls * are currently the ones that crash the camera or unconditionally return an * error when queried. */ static void uvc_ctrl_prune_entity(struct uvc_device *dev, struct uvc_entity *entity) { struct uvc_ctrl_blacklist { struct usb_device_id id; u8 index; }; static const struct uvc_ctrl_blacklist processing_blacklist[] = { { { USB_DEVICE(0x13d3, 0x509b) }, 9 }, /* Gain */ { { USB_DEVICE(0x1c4f, 0x3000) }, 6 }, /* WB Temperature */ { { USB_DEVICE(0x5986, 0x0241) }, 2 }, /* Hue */ }; static const struct uvc_ctrl_blacklist camera_blacklist[] = { { { USB_DEVICE(0x06f8, 0x3005) }, 9 }, /* Zoom, Absolute */ }; const struct uvc_ctrl_blacklist *blacklist; unsigned int size; unsigned int count; unsigned int i; u8 *controls; switch (UVC_ENTITY_TYPE(entity)) { case UVC_VC_PROCESSING_UNIT: blacklist = processing_blacklist; count = ARRAY_SIZE(processing_blacklist); controls = entity->processing.bmControls; size = entity->processing.bControlSize; break; case UVC_ITT_CAMERA: blacklist = camera_blacklist; count = ARRAY_SIZE(camera_blacklist); controls = entity->camera.bmControls; size = entity->camera.bControlSize; break; default: return; } for (i = 0; i < count; ++i) { if (!usb_match_one_id(dev->intf, &blacklist[i].id)) continue; if (blacklist[i].index >= 8 * size || !uvc_test_bit(controls, blacklist[i].index)) continue; uvc_trace(UVC_TRACE_CONTROL, "%u/%u control is black listed, " "removing it.\n", entity->id, blacklist[i].index); uvc_clear_bit(controls, blacklist[i].index); } } /* * Add control information and hardcoded stock control mappings to the given * device. */ static void uvc_ctrl_init_ctrl(struct uvc_device *dev, struct uvc_control *ctrl) { const struct uvc_control_info *info = uvc_ctrls; const struct uvc_control_info *iend = info + ARRAY_SIZE(uvc_ctrls); const struct uvc_control_mapping *mapping = uvc_ctrl_mappings; const struct uvc_control_mapping *mend = mapping + ARRAY_SIZE(uvc_ctrl_mappings); /* XU controls initialization requires querying the device for control * information. As some buggy UVC devices will crash when queried * repeatedly in a tight loop, delay XU controls initialization until * first use. */ if (UVC_ENTITY_TYPE(ctrl->entity) == UVC_VC_EXTENSION_UNIT) return; for (; info < iend; ++info) { if (uvc_entity_match_guid(ctrl->entity, info->entity) && ctrl->index == info->index) { uvc_ctrl_add_info(dev, ctrl, info); break; } } if (!ctrl->initialized) return; for (; mapping < mend; ++mapping) { if (uvc_entity_match_guid(ctrl->entity, mapping->entity) && ctrl->info.selector == mapping->selector) __uvc_ctrl_add_mapping(dev, ctrl, mapping); } } /* * Initialize device controls. */ int uvc_ctrl_init_device(struct uvc_device *dev) { struct uvc_entity *entity; unsigned int i; /* Walk the entities list and instantiate controls */ list_for_each_entry(entity, &dev->entities, list) { struct uvc_control *ctrl; unsigned int bControlSize = 0, ncontrols = 0; __u8 *bmControls = NULL; if (UVC_ENTITY_TYPE(entity) == UVC_VC_EXTENSION_UNIT) { bmControls = entity->extension.bmControls; bControlSize = entity->extension.bControlSize; } else if (UVC_ENTITY_TYPE(entity) == UVC_VC_PROCESSING_UNIT) { bmControls = entity->processing.bmControls; bControlSize = entity->processing.bControlSize; } else if (UVC_ENTITY_TYPE(entity) == UVC_ITT_CAMERA) { bmControls = entity->camera.bmControls; bControlSize = entity->camera.bControlSize; } /* Remove bogus/blacklisted controls */ uvc_ctrl_prune_entity(dev, entity); /* Count supported controls and allocate the controls array */ for (i = 0; i < bControlSize; ++i) ncontrols += hweight8(bmControls[i]); if (ncontrols == 0) continue; entity->controls = kcalloc(ncontrols, sizeof(*ctrl), GFP_KERNEL); if (entity->controls == NULL) return -ENOMEM; entity->ncontrols = ncontrols; /* Initialize all supported controls */ ctrl = entity->controls; for (i = 0; i < bControlSize * 8; ++i) { if (uvc_test_bit(bmControls, i) == 0) continue; ctrl->entity = entity; ctrl->index = i; uvc_ctrl_init_ctrl(dev, ctrl); ctrl++; } } return 0; } /* * Cleanup device controls. */ static void uvc_ctrl_cleanup_mappings(struct uvc_device *dev, struct uvc_control *ctrl) { struct uvc_control_mapping *mapping, *nm; list_for_each_entry_safe(mapping, nm, &ctrl->info.mappings, list) { list_del(&mapping->list); kfree(mapping->menu_info); kfree(mapping); } } void uvc_ctrl_cleanup_device(struct uvc_device *dev) { struct uvc_entity *entity; unsigned int i; /* Free controls and control mappings for all entities. */ list_for_each_entry(entity, &dev->entities, list) { for (i = 0; i < entity->ncontrols; ++i) { struct uvc_control *ctrl = &entity->controls[i]; if (!ctrl->initialized) continue; uvc_ctrl_cleanup_mappings(dev, ctrl); kfree(ctrl->uvc_data); } kfree(entity->controls); } }
gpl-2.0
rahulkodinya/drm
arch/sparc/kernel/pci_schizo.c
4849
48989
/* pci_schizo.c: SCHIZO/TOMATILLO specific PCI controller support. * * Copyright (C) 2001, 2002, 2003, 2007, 2008 David S. Miller (davem@davemloft.net) */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/export.h> #include <linux/interrupt.h> #include <linux/of_device.h> #include <asm/iommu.h> #include <asm/irq.h> #include <asm/pstate.h> #include <asm/prom.h> #include <asm/upa.h> #include "pci_impl.h" #include "iommu_common.h" #define DRIVER_NAME "schizo" #define PFX DRIVER_NAME ": " /* This is a convention that at least Excalibur and Merlin * follow. I suppose the SCHIZO used in Starcat and friends * will do similar. * * The only way I could see this changing is if the newlink * block requires more space in Schizo's address space than * they predicted, thus requiring an address space reorg when * the newer Schizo is taped out. */ /* Streaming buffer control register. */ #define SCHIZO_STRBUF_CTRL_LPTR 0x00000000000000f0UL /* LRU Lock Pointer */ #define SCHIZO_STRBUF_CTRL_LENAB 0x0000000000000008UL /* LRU Lock Enable */ #define SCHIZO_STRBUF_CTRL_RRDIS 0x0000000000000004UL /* Rerun Disable */ #define SCHIZO_STRBUF_CTRL_DENAB 0x0000000000000002UL /* Diagnostic Mode Enable */ #define SCHIZO_STRBUF_CTRL_ENAB 0x0000000000000001UL /* Streaming Buffer Enable */ /* IOMMU control register. */ #define SCHIZO_IOMMU_CTRL_RESV 0xfffffffff9000000UL /* Reserved */ #define SCHIZO_IOMMU_CTRL_XLTESTAT 0x0000000006000000UL /* Translation Error Status */ #define SCHIZO_IOMMU_CTRL_XLTEERR 0x0000000001000000UL /* Translation Error encountered */ #define SCHIZO_IOMMU_CTRL_LCKEN 0x0000000000800000UL /* Enable translation locking */ #define SCHIZO_IOMMU_CTRL_LCKPTR 0x0000000000780000UL /* Translation lock pointer */ #define SCHIZO_IOMMU_CTRL_TSBSZ 0x0000000000070000UL /* TSB Size */ #define SCHIZO_IOMMU_TSBSZ_1K 0x0000000000000000UL /* TSB Table 1024 8-byte entries */ #define SCHIZO_IOMMU_TSBSZ_2K 0x0000000000010000UL /* TSB Table 2048 8-byte entries */ #define SCHIZO_IOMMU_TSBSZ_4K 0x0000000000020000UL /* TSB Table 4096 8-byte entries */ #define SCHIZO_IOMMU_TSBSZ_8K 0x0000000000030000UL /* TSB Table 8192 8-byte entries */ #define SCHIZO_IOMMU_TSBSZ_16K 0x0000000000040000UL /* TSB Table 16k 8-byte entries */ #define SCHIZO_IOMMU_TSBSZ_32K 0x0000000000050000UL /* TSB Table 32k 8-byte entries */ #define SCHIZO_IOMMU_TSBSZ_64K 0x0000000000060000UL /* TSB Table 64k 8-byte entries */ #define SCHIZO_IOMMU_TSBSZ_128K 0x0000000000070000UL /* TSB Table 128k 8-byte entries */ #define SCHIZO_IOMMU_CTRL_RESV2 0x000000000000fff8UL /* Reserved */ #define SCHIZO_IOMMU_CTRL_TBWSZ 0x0000000000000004UL /* Assumed page size, 0=8k 1=64k */ #define SCHIZO_IOMMU_CTRL_DENAB 0x0000000000000002UL /* Diagnostic mode enable */ #define SCHIZO_IOMMU_CTRL_ENAB 0x0000000000000001UL /* IOMMU Enable */ /* Schizo config space address format is nearly identical to * that of PSYCHO: * * 32 24 23 16 15 11 10 8 7 2 1 0 * --------------------------------------------------------- * |0 0 0 0 0 0 0 0 0| bus | device | function | reg | 0 0 | * --------------------------------------------------------- */ #define SCHIZO_CONFIG_BASE(PBM) ((PBM)->config_space) #define SCHIZO_CONFIG_ENCODE(BUS, DEVFN, REG) \ (((unsigned long)(BUS) << 16) | \ ((unsigned long)(DEVFN) << 8) | \ ((unsigned long)(REG))) static void *schizo_pci_config_mkaddr(struct pci_pbm_info *pbm, unsigned char bus, unsigned int devfn, int where) { if (!pbm) return NULL; bus -= pbm->pci_first_busno; return (void *) (SCHIZO_CONFIG_BASE(pbm) | SCHIZO_CONFIG_ENCODE(bus, devfn, where)); } /* SCHIZO error handling support. */ enum schizo_error_type { UE_ERR, CE_ERR, PCI_ERR, SAFARI_ERR }; static DEFINE_SPINLOCK(stc_buf_lock); static unsigned long stc_error_buf[128]; static unsigned long stc_tag_buf[16]; static unsigned long stc_line_buf[16]; #define SCHIZO_UE_INO 0x30 /* Uncorrectable ECC error */ #define SCHIZO_CE_INO 0x31 /* Correctable ECC error */ #define SCHIZO_PCIERR_A_INO 0x32 /* PBM A PCI bus error */ #define SCHIZO_PCIERR_B_INO 0x33 /* PBM B PCI bus error */ #define SCHIZO_SERR_INO 0x34 /* Safari interface error */ #define SCHIZO_STC_ERR 0xb800UL /* --> 0xba00 */ #define SCHIZO_STC_TAG 0xba00UL /* --> 0xba80 */ #define SCHIZO_STC_LINE 0xbb00UL /* --> 0xbb80 */ #define SCHIZO_STCERR_WRITE 0x2UL #define SCHIZO_STCERR_READ 0x1UL #define SCHIZO_STCTAG_PPN 0x3fffffff00000000UL #define SCHIZO_STCTAG_VPN 0x00000000ffffe000UL #define SCHIZO_STCTAG_VALID 0x8000000000000000UL #define SCHIZO_STCTAG_READ 0x4000000000000000UL #define SCHIZO_STCLINE_LINDX 0x0000000007800000UL #define SCHIZO_STCLINE_SPTR 0x000000000007e000UL #define SCHIZO_STCLINE_LADDR 0x0000000000001fc0UL #define SCHIZO_STCLINE_EPTR 0x000000000000003fUL #define SCHIZO_STCLINE_VALID 0x0000000000600000UL #define SCHIZO_STCLINE_FOFN 0x0000000000180000UL static void __schizo_check_stc_error_pbm(struct pci_pbm_info *pbm, enum schizo_error_type type) { struct strbuf *strbuf = &pbm->stc; unsigned long regbase = pbm->pbm_regs; unsigned long err_base, tag_base, line_base; u64 control; int i; err_base = regbase + SCHIZO_STC_ERR; tag_base = regbase + SCHIZO_STC_TAG; line_base = regbase + SCHIZO_STC_LINE; spin_lock(&stc_buf_lock); /* This is __REALLY__ dangerous. When we put the * streaming buffer into diagnostic mode to probe * it's tags and error status, we _must_ clear all * of the line tag valid bits before re-enabling * the streaming buffer. If any dirty data lives * in the STC when we do this, we will end up * invalidating it before it has a chance to reach * main memory. */ control = upa_readq(strbuf->strbuf_control); upa_writeq((control | SCHIZO_STRBUF_CTRL_DENAB), strbuf->strbuf_control); for (i = 0; i < 128; i++) { unsigned long val; val = upa_readq(err_base + (i * 8UL)); upa_writeq(0UL, err_base + (i * 8UL)); stc_error_buf[i] = val; } for (i = 0; i < 16; i++) { stc_tag_buf[i] = upa_readq(tag_base + (i * 8UL)); stc_line_buf[i] = upa_readq(line_base + (i * 8UL)); upa_writeq(0UL, tag_base + (i * 8UL)); upa_writeq(0UL, line_base + (i * 8UL)); } /* OK, state is logged, exit diagnostic mode. */ upa_writeq(control, strbuf->strbuf_control); for (i = 0; i < 16; i++) { int j, saw_error, first, last; saw_error = 0; first = i * 8; last = first + 8; for (j = first; j < last; j++) { unsigned long errval = stc_error_buf[j]; if (errval != 0) { saw_error++; printk("%s: STC_ERR(%d)[wr(%d)rd(%d)]\n", pbm->name, j, (errval & SCHIZO_STCERR_WRITE) ? 1 : 0, (errval & SCHIZO_STCERR_READ) ? 1 : 0); } } if (saw_error != 0) { unsigned long tagval = stc_tag_buf[i]; unsigned long lineval = stc_line_buf[i]; printk("%s: STC_TAG(%d)[PA(%016lx)VA(%08lx)V(%d)R(%d)]\n", pbm->name, i, ((tagval & SCHIZO_STCTAG_PPN) >> 19UL), (tagval & SCHIZO_STCTAG_VPN), ((tagval & SCHIZO_STCTAG_VALID) ? 1 : 0), ((tagval & SCHIZO_STCTAG_READ) ? 1 : 0)); /* XXX Should spit out per-bank error information... -DaveM */ printk("%s: STC_LINE(%d)[LIDX(%lx)SP(%lx)LADDR(%lx)EP(%lx)" "V(%d)FOFN(%d)]\n", pbm->name, i, ((lineval & SCHIZO_STCLINE_LINDX) >> 23UL), ((lineval & SCHIZO_STCLINE_SPTR) >> 13UL), ((lineval & SCHIZO_STCLINE_LADDR) >> 6UL), ((lineval & SCHIZO_STCLINE_EPTR) >> 0UL), ((lineval & SCHIZO_STCLINE_VALID) ? 1 : 0), ((lineval & SCHIZO_STCLINE_FOFN) ? 1 : 0)); } } spin_unlock(&stc_buf_lock); } /* IOMMU is per-PBM in Schizo, so interrogate both for anonymous * controller level errors. */ #define SCHIZO_IOMMU_TAG 0xa580UL #define SCHIZO_IOMMU_DATA 0xa600UL #define SCHIZO_IOMMU_TAG_CTXT 0x0000001ffe000000UL #define SCHIZO_IOMMU_TAG_ERRSTS 0x0000000001800000UL #define SCHIZO_IOMMU_TAG_ERR 0x0000000000400000UL #define SCHIZO_IOMMU_TAG_WRITE 0x0000000000200000UL #define SCHIZO_IOMMU_TAG_STREAM 0x0000000000100000UL #define SCHIZO_IOMMU_TAG_SIZE 0x0000000000080000UL #define SCHIZO_IOMMU_TAG_VPAGE 0x000000000007ffffUL #define SCHIZO_IOMMU_DATA_VALID 0x0000000100000000UL #define SCHIZO_IOMMU_DATA_CACHE 0x0000000040000000UL #define SCHIZO_IOMMU_DATA_PPAGE 0x000000003fffffffUL static void schizo_check_iommu_error_pbm(struct pci_pbm_info *pbm, enum schizo_error_type type) { struct iommu *iommu = pbm->iommu; unsigned long iommu_tag[16]; unsigned long iommu_data[16]; unsigned long flags; u64 control; int i; spin_lock_irqsave(&iommu->lock, flags); control = upa_readq(iommu->iommu_control); if (control & SCHIZO_IOMMU_CTRL_XLTEERR) { unsigned long base; char *type_string; /* Clear the error encountered bit. */ control &= ~SCHIZO_IOMMU_CTRL_XLTEERR; upa_writeq(control, iommu->iommu_control); switch((control & SCHIZO_IOMMU_CTRL_XLTESTAT) >> 25UL) { case 0: type_string = "Protection Error"; break; case 1: type_string = "Invalid Error"; break; case 2: type_string = "TimeOut Error"; break; case 3: default: type_string = "ECC Error"; break; } printk("%s: IOMMU Error, type[%s]\n", pbm->name, type_string); /* Put the IOMMU into diagnostic mode and probe * it's TLB for entries with error status. * * It is very possible for another DVMA to occur * while we do this probe, and corrupt the system * further. But we are so screwed at this point * that we are likely to crash hard anyways, so * get as much diagnostic information to the * console as we can. */ upa_writeq(control | SCHIZO_IOMMU_CTRL_DENAB, iommu->iommu_control); base = pbm->pbm_regs; for (i = 0; i < 16; i++) { iommu_tag[i] = upa_readq(base + SCHIZO_IOMMU_TAG + (i * 8UL)); iommu_data[i] = upa_readq(base + SCHIZO_IOMMU_DATA + (i * 8UL)); /* Now clear out the entry. */ upa_writeq(0, base + SCHIZO_IOMMU_TAG + (i * 8UL)); upa_writeq(0, base + SCHIZO_IOMMU_DATA + (i * 8UL)); } /* Leave diagnostic mode. */ upa_writeq(control, iommu->iommu_control); for (i = 0; i < 16; i++) { unsigned long tag, data; tag = iommu_tag[i]; if (!(tag & SCHIZO_IOMMU_TAG_ERR)) continue; data = iommu_data[i]; switch((tag & SCHIZO_IOMMU_TAG_ERRSTS) >> 23UL) { case 0: type_string = "Protection Error"; break; case 1: type_string = "Invalid Error"; break; case 2: type_string = "TimeOut Error"; break; case 3: default: type_string = "ECC Error"; break; } printk("%s: IOMMU TAG(%d)[error(%s) ctx(%x) wr(%d) str(%d) " "sz(%dK) vpg(%08lx)]\n", pbm->name, i, type_string, (int)((tag & SCHIZO_IOMMU_TAG_CTXT) >> 25UL), ((tag & SCHIZO_IOMMU_TAG_WRITE) ? 1 : 0), ((tag & SCHIZO_IOMMU_TAG_STREAM) ? 1 : 0), ((tag & SCHIZO_IOMMU_TAG_SIZE) ? 64 : 8), (tag & SCHIZO_IOMMU_TAG_VPAGE) << IOMMU_PAGE_SHIFT); printk("%s: IOMMU DATA(%d)[valid(%d) cache(%d) ppg(%016lx)]\n", pbm->name, i, ((data & SCHIZO_IOMMU_DATA_VALID) ? 1 : 0), ((data & SCHIZO_IOMMU_DATA_CACHE) ? 1 : 0), (data & SCHIZO_IOMMU_DATA_PPAGE) << IOMMU_PAGE_SHIFT); } } if (pbm->stc.strbuf_enabled) __schizo_check_stc_error_pbm(pbm, type); spin_unlock_irqrestore(&iommu->lock, flags); } static void schizo_check_iommu_error(struct pci_pbm_info *pbm, enum schizo_error_type type) { schizo_check_iommu_error_pbm(pbm, type); if (pbm->sibling) schizo_check_iommu_error_pbm(pbm->sibling, type); } /* Uncorrectable ECC error status gathering. */ #define SCHIZO_UE_AFSR 0x10030UL #define SCHIZO_UE_AFAR 0x10038UL #define SCHIZO_UEAFSR_PPIO 0x8000000000000000UL /* Safari */ #define SCHIZO_UEAFSR_PDRD 0x4000000000000000UL /* Safari/Tomatillo */ #define SCHIZO_UEAFSR_PDWR 0x2000000000000000UL /* Safari */ #define SCHIZO_UEAFSR_SPIO 0x1000000000000000UL /* Safari */ #define SCHIZO_UEAFSR_SDMA 0x0800000000000000UL /* Safari/Tomatillo */ #define SCHIZO_UEAFSR_ERRPNDG 0x0300000000000000UL /* Safari */ #define SCHIZO_UEAFSR_BMSK 0x000003ff00000000UL /* Safari */ #define SCHIZO_UEAFSR_QOFF 0x00000000c0000000UL /* Safari/Tomatillo */ #define SCHIZO_UEAFSR_AID 0x000000001f000000UL /* Safari/Tomatillo */ #define SCHIZO_UEAFSR_PARTIAL 0x0000000000800000UL /* Safari */ #define SCHIZO_UEAFSR_OWNEDIN 0x0000000000400000UL /* Safari */ #define SCHIZO_UEAFSR_MTAGSYND 0x00000000000f0000UL /* Safari */ #define SCHIZO_UEAFSR_MTAG 0x000000000000e000UL /* Safari */ #define SCHIZO_UEAFSR_ECCSYND 0x00000000000001ffUL /* Safari */ static irqreturn_t schizo_ue_intr(int irq, void *dev_id) { struct pci_pbm_info *pbm = dev_id; unsigned long afsr_reg = pbm->controller_regs + SCHIZO_UE_AFSR; unsigned long afar_reg = pbm->controller_regs + SCHIZO_UE_AFAR; unsigned long afsr, afar, error_bits; int reported, limit; /* Latch uncorrectable error status. */ afar = upa_readq(afar_reg); /* If either of the error pending bits are set in the * AFSR, the error status is being actively updated by * the hardware and we must re-read to get a clean value. */ limit = 1000; do { afsr = upa_readq(afsr_reg); } while ((afsr & SCHIZO_UEAFSR_ERRPNDG) != 0 && --limit); /* Clear the primary/secondary error status bits. */ error_bits = afsr & (SCHIZO_UEAFSR_PPIO | SCHIZO_UEAFSR_PDRD | SCHIZO_UEAFSR_PDWR | SCHIZO_UEAFSR_SPIO | SCHIZO_UEAFSR_SDMA); if (!error_bits) return IRQ_NONE; upa_writeq(error_bits, afsr_reg); /* Log the error. */ printk("%s: Uncorrectable Error, primary error type[%s]\n", pbm->name, (((error_bits & SCHIZO_UEAFSR_PPIO) ? "PIO" : ((error_bits & SCHIZO_UEAFSR_PDRD) ? "DMA Read" : ((error_bits & SCHIZO_UEAFSR_PDWR) ? "DMA Write" : "???"))))); printk("%s: bytemask[%04lx] qword_offset[%lx] SAFARI_AID[%02lx]\n", pbm->name, (afsr & SCHIZO_UEAFSR_BMSK) >> 32UL, (afsr & SCHIZO_UEAFSR_QOFF) >> 30UL, (afsr & SCHIZO_UEAFSR_AID) >> 24UL); printk("%s: partial[%d] owned_in[%d] mtag[%lx] mtag_synd[%lx] ecc_sync[%lx]\n", pbm->name, (afsr & SCHIZO_UEAFSR_PARTIAL) ? 1 : 0, (afsr & SCHIZO_UEAFSR_OWNEDIN) ? 1 : 0, (afsr & SCHIZO_UEAFSR_MTAG) >> 13UL, (afsr & SCHIZO_UEAFSR_MTAGSYND) >> 16UL, (afsr & SCHIZO_UEAFSR_ECCSYND) >> 0UL); printk("%s: UE AFAR [%016lx]\n", pbm->name, afar); printk("%s: UE Secondary errors [", pbm->name); reported = 0; if (afsr & SCHIZO_UEAFSR_SPIO) { reported++; printk("(PIO)"); } if (afsr & SCHIZO_UEAFSR_SDMA) { reported++; printk("(DMA)"); } if (!reported) printk("(none)"); printk("]\n"); /* Interrogate IOMMU for error status. */ schizo_check_iommu_error(pbm, UE_ERR); return IRQ_HANDLED; } #define SCHIZO_CE_AFSR 0x10040UL #define SCHIZO_CE_AFAR 0x10048UL #define SCHIZO_CEAFSR_PPIO 0x8000000000000000UL #define SCHIZO_CEAFSR_PDRD 0x4000000000000000UL #define SCHIZO_CEAFSR_PDWR 0x2000000000000000UL #define SCHIZO_CEAFSR_SPIO 0x1000000000000000UL #define SCHIZO_CEAFSR_SDMA 0x0800000000000000UL #define SCHIZO_CEAFSR_ERRPNDG 0x0300000000000000UL #define SCHIZO_CEAFSR_BMSK 0x000003ff00000000UL #define SCHIZO_CEAFSR_QOFF 0x00000000c0000000UL #define SCHIZO_CEAFSR_AID 0x000000001f000000UL #define SCHIZO_CEAFSR_PARTIAL 0x0000000000800000UL #define SCHIZO_CEAFSR_OWNEDIN 0x0000000000400000UL #define SCHIZO_CEAFSR_MTAGSYND 0x00000000000f0000UL #define SCHIZO_CEAFSR_MTAG 0x000000000000e000UL #define SCHIZO_CEAFSR_ECCSYND 0x00000000000001ffUL static irqreturn_t schizo_ce_intr(int irq, void *dev_id) { struct pci_pbm_info *pbm = dev_id; unsigned long afsr_reg = pbm->controller_regs + SCHIZO_CE_AFSR; unsigned long afar_reg = pbm->controller_regs + SCHIZO_CE_AFAR; unsigned long afsr, afar, error_bits; int reported, limit; /* Latch error status. */ afar = upa_readq(afar_reg); /* If either of the error pending bits are set in the * AFSR, the error status is being actively updated by * the hardware and we must re-read to get a clean value. */ limit = 1000; do { afsr = upa_readq(afsr_reg); } while ((afsr & SCHIZO_UEAFSR_ERRPNDG) != 0 && --limit); /* Clear primary/secondary error status bits. */ error_bits = afsr & (SCHIZO_CEAFSR_PPIO | SCHIZO_CEAFSR_PDRD | SCHIZO_CEAFSR_PDWR | SCHIZO_CEAFSR_SPIO | SCHIZO_CEAFSR_SDMA); if (!error_bits) return IRQ_NONE; upa_writeq(error_bits, afsr_reg); /* Log the error. */ printk("%s: Correctable Error, primary error type[%s]\n", pbm->name, (((error_bits & SCHIZO_CEAFSR_PPIO) ? "PIO" : ((error_bits & SCHIZO_CEAFSR_PDRD) ? "DMA Read" : ((error_bits & SCHIZO_CEAFSR_PDWR) ? "DMA Write" : "???"))))); /* XXX Use syndrome and afar to print out module string just like * XXX UDB CE trap handler does... -DaveM */ printk("%s: bytemask[%04lx] qword_offset[%lx] SAFARI_AID[%02lx]\n", pbm->name, (afsr & SCHIZO_UEAFSR_BMSK) >> 32UL, (afsr & SCHIZO_UEAFSR_QOFF) >> 30UL, (afsr & SCHIZO_UEAFSR_AID) >> 24UL); printk("%s: partial[%d] owned_in[%d] mtag[%lx] mtag_synd[%lx] ecc_sync[%lx]\n", pbm->name, (afsr & SCHIZO_UEAFSR_PARTIAL) ? 1 : 0, (afsr & SCHIZO_UEAFSR_OWNEDIN) ? 1 : 0, (afsr & SCHIZO_UEAFSR_MTAG) >> 13UL, (afsr & SCHIZO_UEAFSR_MTAGSYND) >> 16UL, (afsr & SCHIZO_UEAFSR_ECCSYND) >> 0UL); printk("%s: CE AFAR [%016lx]\n", pbm->name, afar); printk("%s: CE Secondary errors [", pbm->name); reported = 0; if (afsr & SCHIZO_CEAFSR_SPIO) { reported++; printk("(PIO)"); } if (afsr & SCHIZO_CEAFSR_SDMA) { reported++; printk("(DMA)"); } if (!reported) printk("(none)"); printk("]\n"); return IRQ_HANDLED; } #define SCHIZO_PCI_AFSR 0x2010UL #define SCHIZO_PCI_AFAR 0x2018UL #define SCHIZO_PCIAFSR_PMA 0x8000000000000000UL /* Schizo/Tomatillo */ #define SCHIZO_PCIAFSR_PTA 0x4000000000000000UL /* Schizo/Tomatillo */ #define SCHIZO_PCIAFSR_PRTRY 0x2000000000000000UL /* Schizo/Tomatillo */ #define SCHIZO_PCIAFSR_PPERR 0x1000000000000000UL /* Schizo/Tomatillo */ #define SCHIZO_PCIAFSR_PTTO 0x0800000000000000UL /* Schizo/Tomatillo */ #define SCHIZO_PCIAFSR_PUNUS 0x0400000000000000UL /* Schizo */ #define SCHIZO_PCIAFSR_SMA 0x0200000000000000UL /* Schizo/Tomatillo */ #define SCHIZO_PCIAFSR_STA 0x0100000000000000UL /* Schizo/Tomatillo */ #define SCHIZO_PCIAFSR_SRTRY 0x0080000000000000UL /* Schizo/Tomatillo */ #define SCHIZO_PCIAFSR_SPERR 0x0040000000000000UL /* Schizo/Tomatillo */ #define SCHIZO_PCIAFSR_STTO 0x0020000000000000UL /* Schizo/Tomatillo */ #define SCHIZO_PCIAFSR_SUNUS 0x0010000000000000UL /* Schizo */ #define SCHIZO_PCIAFSR_BMSK 0x000003ff00000000UL /* Schizo/Tomatillo */ #define SCHIZO_PCIAFSR_BLK 0x0000000080000000UL /* Schizo/Tomatillo */ #define SCHIZO_PCIAFSR_CFG 0x0000000040000000UL /* Schizo/Tomatillo */ #define SCHIZO_PCIAFSR_MEM 0x0000000020000000UL /* Schizo/Tomatillo */ #define SCHIZO_PCIAFSR_IO 0x0000000010000000UL /* Schizo/Tomatillo */ #define SCHIZO_PCI_CTRL (0x2000UL) #define SCHIZO_PCICTRL_BUS_UNUS (1UL << 63UL) /* Safari */ #define SCHIZO_PCICTRL_DTO_INT (1UL << 61UL) /* Tomatillo */ #define SCHIZO_PCICTRL_ARB_PRIO (0x1ff << 52UL) /* Tomatillo */ #define SCHIZO_PCICTRL_ESLCK (1UL << 51UL) /* Safari */ #define SCHIZO_PCICTRL_ERRSLOT (7UL << 48UL) /* Safari */ #define SCHIZO_PCICTRL_TTO_ERR (1UL << 38UL) /* Safari/Tomatillo */ #define SCHIZO_PCICTRL_RTRY_ERR (1UL << 37UL) /* Safari/Tomatillo */ #define SCHIZO_PCICTRL_DTO_ERR (1UL << 36UL) /* Safari/Tomatillo */ #define SCHIZO_PCICTRL_SBH_ERR (1UL << 35UL) /* Safari */ #define SCHIZO_PCICTRL_SERR (1UL << 34UL) /* Safari/Tomatillo */ #define SCHIZO_PCICTRL_PCISPD (1UL << 33UL) /* Safari */ #define SCHIZO_PCICTRL_MRM_PREF (1UL << 30UL) /* Tomatillo */ #define SCHIZO_PCICTRL_RDO_PREF (1UL << 29UL) /* Tomatillo */ #define SCHIZO_PCICTRL_RDL_PREF (1UL << 28UL) /* Tomatillo */ #define SCHIZO_PCICTRL_PTO (3UL << 24UL) /* Safari/Tomatillo */ #define SCHIZO_PCICTRL_PTO_SHIFT 24UL #define SCHIZO_PCICTRL_TRWSW (7UL << 21UL) /* Tomatillo */ #define SCHIZO_PCICTRL_F_TGT_A (1UL << 20UL) /* Tomatillo */ #define SCHIZO_PCICTRL_S_DTO_INT (1UL << 19UL) /* Safari */ #define SCHIZO_PCICTRL_F_TGT_RT (1UL << 19UL) /* Tomatillo */ #define SCHIZO_PCICTRL_SBH_INT (1UL << 18UL) /* Safari */ #define SCHIZO_PCICTRL_T_DTO_INT (1UL << 18UL) /* Tomatillo */ #define SCHIZO_PCICTRL_EEN (1UL << 17UL) /* Safari/Tomatillo */ #define SCHIZO_PCICTRL_PARK (1UL << 16UL) /* Safari/Tomatillo */ #define SCHIZO_PCICTRL_PCIRST (1UL << 8UL) /* Safari */ #define SCHIZO_PCICTRL_ARB_S (0x3fUL << 0UL) /* Safari */ #define SCHIZO_PCICTRL_ARB_T (0xffUL << 0UL) /* Tomatillo */ static irqreturn_t schizo_pcierr_intr_other(struct pci_pbm_info *pbm) { unsigned long csr_reg, csr, csr_error_bits; irqreturn_t ret = IRQ_NONE; u16 stat; csr_reg = pbm->pbm_regs + SCHIZO_PCI_CTRL; csr = upa_readq(csr_reg); csr_error_bits = csr & (SCHIZO_PCICTRL_BUS_UNUS | SCHIZO_PCICTRL_TTO_ERR | SCHIZO_PCICTRL_RTRY_ERR | SCHIZO_PCICTRL_DTO_ERR | SCHIZO_PCICTRL_SBH_ERR | SCHIZO_PCICTRL_SERR); if (csr_error_bits) { /* Clear the errors. */ upa_writeq(csr, csr_reg); /* Log 'em. */ if (csr_error_bits & SCHIZO_PCICTRL_BUS_UNUS) printk("%s: Bus unusable error asserted.\n", pbm->name); if (csr_error_bits & SCHIZO_PCICTRL_TTO_ERR) printk("%s: PCI TRDY# timeout error asserted.\n", pbm->name); if (csr_error_bits & SCHIZO_PCICTRL_RTRY_ERR) printk("%s: PCI excessive retry error asserted.\n", pbm->name); if (csr_error_bits & SCHIZO_PCICTRL_DTO_ERR) printk("%s: PCI discard timeout error asserted.\n", pbm->name); if (csr_error_bits & SCHIZO_PCICTRL_SBH_ERR) printk("%s: PCI streaming byte hole error asserted.\n", pbm->name); if (csr_error_bits & SCHIZO_PCICTRL_SERR) printk("%s: PCI SERR signal asserted.\n", pbm->name); ret = IRQ_HANDLED; } pci_read_config_word(pbm->pci_bus->self, PCI_STATUS, &stat); if (stat & (PCI_STATUS_PARITY | PCI_STATUS_SIG_TARGET_ABORT | PCI_STATUS_REC_TARGET_ABORT | PCI_STATUS_REC_MASTER_ABORT | PCI_STATUS_SIG_SYSTEM_ERROR)) { printk("%s: PCI bus error, PCI_STATUS[%04x]\n", pbm->name, stat); pci_write_config_word(pbm->pci_bus->self, PCI_STATUS, 0xffff); ret = IRQ_HANDLED; } return ret; } static irqreturn_t schizo_pcierr_intr(int irq, void *dev_id) { struct pci_pbm_info *pbm = dev_id; unsigned long afsr_reg, afar_reg, base; unsigned long afsr, afar, error_bits; int reported; base = pbm->pbm_regs; afsr_reg = base + SCHIZO_PCI_AFSR; afar_reg = base + SCHIZO_PCI_AFAR; /* Latch error status. */ afar = upa_readq(afar_reg); afsr = upa_readq(afsr_reg); /* Clear primary/secondary error status bits. */ error_bits = afsr & (SCHIZO_PCIAFSR_PMA | SCHIZO_PCIAFSR_PTA | SCHIZO_PCIAFSR_PRTRY | SCHIZO_PCIAFSR_PPERR | SCHIZO_PCIAFSR_PTTO | SCHIZO_PCIAFSR_PUNUS | SCHIZO_PCIAFSR_SMA | SCHIZO_PCIAFSR_STA | SCHIZO_PCIAFSR_SRTRY | SCHIZO_PCIAFSR_SPERR | SCHIZO_PCIAFSR_STTO | SCHIZO_PCIAFSR_SUNUS); if (!error_bits) return schizo_pcierr_intr_other(pbm); upa_writeq(error_bits, afsr_reg); /* Log the error. */ printk("%s: PCI Error, primary error type[%s]\n", pbm->name, (((error_bits & SCHIZO_PCIAFSR_PMA) ? "Master Abort" : ((error_bits & SCHIZO_PCIAFSR_PTA) ? "Target Abort" : ((error_bits & SCHIZO_PCIAFSR_PRTRY) ? "Excessive Retries" : ((error_bits & SCHIZO_PCIAFSR_PPERR) ? "Parity Error" : ((error_bits & SCHIZO_PCIAFSR_PTTO) ? "Timeout" : ((error_bits & SCHIZO_PCIAFSR_PUNUS) ? "Bus Unusable" : "???")))))))); printk("%s: bytemask[%04lx] was_block(%d) space(%s)\n", pbm->name, (afsr & SCHIZO_PCIAFSR_BMSK) >> 32UL, (afsr & SCHIZO_PCIAFSR_BLK) ? 1 : 0, ((afsr & SCHIZO_PCIAFSR_CFG) ? "Config" : ((afsr & SCHIZO_PCIAFSR_MEM) ? "Memory" : ((afsr & SCHIZO_PCIAFSR_IO) ? "I/O" : "???")))); printk("%s: PCI AFAR [%016lx]\n", pbm->name, afar); printk("%s: PCI Secondary errors [", pbm->name); reported = 0; if (afsr & SCHIZO_PCIAFSR_SMA) { reported++; printk("(Master Abort)"); } if (afsr & SCHIZO_PCIAFSR_STA) { reported++; printk("(Target Abort)"); } if (afsr & SCHIZO_PCIAFSR_SRTRY) { reported++; printk("(Excessive Retries)"); } if (afsr & SCHIZO_PCIAFSR_SPERR) { reported++; printk("(Parity Error)"); } if (afsr & SCHIZO_PCIAFSR_STTO) { reported++; printk("(Timeout)"); } if (afsr & SCHIZO_PCIAFSR_SUNUS) { reported++; printk("(Bus Unusable)"); } if (!reported) printk("(none)"); printk("]\n"); /* For the error types shown, scan PBM's PCI bus for devices * which have logged that error type. */ /* If we see a Target Abort, this could be the result of an * IOMMU translation error of some sort. It is extremely * useful to log this information as usually it indicates * a bug in the IOMMU support code or a PCI device driver. */ if (error_bits & (SCHIZO_PCIAFSR_PTA | SCHIZO_PCIAFSR_STA)) { schizo_check_iommu_error(pbm, PCI_ERR); pci_scan_for_target_abort(pbm, pbm->pci_bus); } if (error_bits & (SCHIZO_PCIAFSR_PMA | SCHIZO_PCIAFSR_SMA)) pci_scan_for_master_abort(pbm, pbm->pci_bus); /* For excessive retries, PSYCHO/PBM will abort the device * and there is no way to specifically check for excessive * retries in the config space status registers. So what * we hope is that we'll catch it via the master/target * abort events. */ if (error_bits & (SCHIZO_PCIAFSR_PPERR | SCHIZO_PCIAFSR_SPERR)) pci_scan_for_parity_error(pbm, pbm->pci_bus); return IRQ_HANDLED; } #define SCHIZO_SAFARI_ERRLOG 0x10018UL #define SAFARI_ERRLOG_ERROUT 0x8000000000000000UL #define BUS_ERROR_BADCMD 0x4000000000000000UL /* Schizo/Tomatillo */ #define BUS_ERROR_SSMDIS 0x2000000000000000UL /* Safari */ #define BUS_ERROR_BADMA 0x1000000000000000UL /* Safari */ #define BUS_ERROR_BADMB 0x0800000000000000UL /* Safari */ #define BUS_ERROR_BADMC 0x0400000000000000UL /* Safari */ #define BUS_ERROR_SNOOP_GR 0x0000000000200000UL /* Tomatillo */ #define BUS_ERROR_SNOOP_PCI 0x0000000000100000UL /* Tomatillo */ #define BUS_ERROR_SNOOP_RD 0x0000000000080000UL /* Tomatillo */ #define BUS_ERROR_SNOOP_RDS 0x0000000000020000UL /* Tomatillo */ #define BUS_ERROR_SNOOP_RDSA 0x0000000000010000UL /* Tomatillo */ #define BUS_ERROR_SNOOP_OWN 0x0000000000008000UL /* Tomatillo */ #define BUS_ERROR_SNOOP_RDO 0x0000000000004000UL /* Tomatillo */ #define BUS_ERROR_CPU1PS 0x0000000000002000UL /* Safari */ #define BUS_ERROR_WDATA_PERR 0x0000000000002000UL /* Tomatillo */ #define BUS_ERROR_CPU1PB 0x0000000000001000UL /* Safari */ #define BUS_ERROR_CTRL_PERR 0x0000000000001000UL /* Tomatillo */ #define BUS_ERROR_CPU0PS 0x0000000000000800UL /* Safari */ #define BUS_ERROR_SNOOP_ERR 0x0000000000000800UL /* Tomatillo */ #define BUS_ERROR_CPU0PB 0x0000000000000400UL /* Safari */ #define BUS_ERROR_JBUS_ILL_B 0x0000000000000400UL /* Tomatillo */ #define BUS_ERROR_CIQTO 0x0000000000000200UL /* Safari */ #define BUS_ERROR_LPQTO 0x0000000000000100UL /* Safari */ #define BUS_ERROR_JBUS_ILL_C 0x0000000000000100UL /* Tomatillo */ #define BUS_ERROR_SFPQTO 0x0000000000000080UL /* Safari */ #define BUS_ERROR_UFPQTO 0x0000000000000040UL /* Safari */ #define BUS_ERROR_RD_PERR 0x0000000000000040UL /* Tomatillo */ #define BUS_ERROR_APERR 0x0000000000000020UL /* Safari/Tomatillo */ #define BUS_ERROR_UNMAP 0x0000000000000010UL /* Safari/Tomatillo */ #define BUS_ERROR_BUSERR 0x0000000000000004UL /* Safari/Tomatillo */ #define BUS_ERROR_TIMEOUT 0x0000000000000002UL /* Safari/Tomatillo */ #define BUS_ERROR_ILL 0x0000000000000001UL /* Safari */ /* We only expect UNMAP errors here. The rest of the Safari errors * are marked fatal and thus cause a system reset. */ static irqreturn_t schizo_safarierr_intr(int irq, void *dev_id) { struct pci_pbm_info *pbm = dev_id; u64 errlog; errlog = upa_readq(pbm->controller_regs + SCHIZO_SAFARI_ERRLOG); upa_writeq(errlog & ~(SAFARI_ERRLOG_ERROUT), pbm->controller_regs + SCHIZO_SAFARI_ERRLOG); if (!(errlog & BUS_ERROR_UNMAP)) { printk("%s: Unexpected Safari/JBUS error interrupt, errlog[%016llx]\n", pbm->name, errlog); return IRQ_HANDLED; } printk("%s: Safari/JBUS interrupt, UNMAPPED error, interrogating IOMMUs.\n", pbm->name); schizo_check_iommu_error(pbm, SAFARI_ERR); return IRQ_HANDLED; } /* Nearly identical to PSYCHO equivalents... */ #define SCHIZO_ECC_CTRL 0x10020UL #define SCHIZO_ECCCTRL_EE 0x8000000000000000UL /* Enable ECC Checking */ #define SCHIZO_ECCCTRL_UE 0x4000000000000000UL /* Enable UE Interrupts */ #define SCHIZO_ECCCTRL_CE 0x2000000000000000UL /* Enable CE INterrupts */ #define SCHIZO_SAFARI_ERRCTRL 0x10008UL #define SCHIZO_SAFERRCTRL_EN 0x8000000000000000UL #define SCHIZO_SAFARI_IRQCTRL 0x10010UL #define SCHIZO_SAFIRQCTRL_EN 0x8000000000000000UL static int pbm_routes_this_ino(struct pci_pbm_info *pbm, u32 ino) { ino &= IMAP_INO; if (pbm->ino_bitmap & (1UL << ino)) return 1; return 0; } /* How the Tomatillo IRQs are routed around is pure guesswork here. * * All the Tomatillo devices I see in prtconf dumps seem to have only * a single PCI bus unit attached to it. It would seem they are separate * devices because their PortID (ie. JBUS ID) values are all different * and thus the registers are mapped to totally different locations. * * However, two Tomatillo's look "similar" in that the only difference * in their PortID is the lowest bit. * * So if we were to ignore this lower bit, it certainly looks like two * PCI bus units of the same Tomatillo. I still have not really * figured this out... */ static void tomatillo_register_error_handlers(struct pci_pbm_info *pbm) { struct platform_device *op = of_find_device_by_node(pbm->op->dev.of_node); u64 tmp, err_mask, err_no_mask; int err; /* Tomatillo IRQ property layout is: * 0: PCIERR * 1: UE ERR * 2: CE ERR * 3: SERR * 4: POWER FAIL? */ if (pbm_routes_this_ino(pbm, SCHIZO_UE_INO)) { err = request_irq(op->archdata.irqs[1], schizo_ue_intr, 0, "TOMATILLO_UE", pbm); if (err) printk(KERN_WARNING "%s: Could not register UE, " "err=%d\n", pbm->name, err); } if (pbm_routes_this_ino(pbm, SCHIZO_CE_INO)) { err = request_irq(op->archdata.irqs[2], schizo_ce_intr, 0, "TOMATILLO_CE", pbm); if (err) printk(KERN_WARNING "%s: Could not register CE, " "err=%d\n", pbm->name, err); } err = 0; if (pbm_routes_this_ino(pbm, SCHIZO_PCIERR_A_INO)) { err = request_irq(op->archdata.irqs[0], schizo_pcierr_intr, 0, "TOMATILLO_PCIERR", pbm); } else if (pbm_routes_this_ino(pbm, SCHIZO_PCIERR_B_INO)) { err = request_irq(op->archdata.irqs[0], schizo_pcierr_intr, 0, "TOMATILLO_PCIERR", pbm); } if (err) printk(KERN_WARNING "%s: Could not register PCIERR, " "err=%d\n", pbm->name, err); if (pbm_routes_this_ino(pbm, SCHIZO_SERR_INO)) { err = request_irq(op->archdata.irqs[3], schizo_safarierr_intr, 0, "TOMATILLO_SERR", pbm); if (err) printk(KERN_WARNING "%s: Could not register SERR, " "err=%d\n", pbm->name, err); } /* Enable UE and CE interrupts for controller. */ upa_writeq((SCHIZO_ECCCTRL_EE | SCHIZO_ECCCTRL_UE | SCHIZO_ECCCTRL_CE), pbm->controller_regs + SCHIZO_ECC_CTRL); /* Enable PCI Error interrupts and clear error * bits. */ err_mask = (SCHIZO_PCICTRL_BUS_UNUS | SCHIZO_PCICTRL_TTO_ERR | SCHIZO_PCICTRL_RTRY_ERR | SCHIZO_PCICTRL_SERR | SCHIZO_PCICTRL_EEN); err_no_mask = SCHIZO_PCICTRL_DTO_ERR; tmp = upa_readq(pbm->pbm_regs + SCHIZO_PCI_CTRL); tmp |= err_mask; tmp &= ~err_no_mask; upa_writeq(tmp, pbm->pbm_regs + SCHIZO_PCI_CTRL); err_mask = (SCHIZO_PCIAFSR_PMA | SCHIZO_PCIAFSR_PTA | SCHIZO_PCIAFSR_PRTRY | SCHIZO_PCIAFSR_PPERR | SCHIZO_PCIAFSR_PTTO | SCHIZO_PCIAFSR_SMA | SCHIZO_PCIAFSR_STA | SCHIZO_PCIAFSR_SRTRY | SCHIZO_PCIAFSR_SPERR | SCHIZO_PCIAFSR_STTO); upa_writeq(err_mask, pbm->pbm_regs + SCHIZO_PCI_AFSR); err_mask = (BUS_ERROR_BADCMD | BUS_ERROR_SNOOP_GR | BUS_ERROR_SNOOP_PCI | BUS_ERROR_SNOOP_RD | BUS_ERROR_SNOOP_RDS | BUS_ERROR_SNOOP_RDSA | BUS_ERROR_SNOOP_OWN | BUS_ERROR_SNOOP_RDO | BUS_ERROR_WDATA_PERR | BUS_ERROR_CTRL_PERR | BUS_ERROR_SNOOP_ERR | BUS_ERROR_JBUS_ILL_B | BUS_ERROR_JBUS_ILL_C | BUS_ERROR_RD_PERR | BUS_ERROR_APERR | BUS_ERROR_UNMAP | BUS_ERROR_BUSERR | BUS_ERROR_TIMEOUT); upa_writeq((SCHIZO_SAFERRCTRL_EN | err_mask), pbm->controller_regs + SCHIZO_SAFARI_ERRCTRL); upa_writeq((SCHIZO_SAFIRQCTRL_EN | (BUS_ERROR_UNMAP)), pbm->controller_regs + SCHIZO_SAFARI_IRQCTRL); } static void schizo_register_error_handlers(struct pci_pbm_info *pbm) { struct platform_device *op = of_find_device_by_node(pbm->op->dev.of_node); u64 tmp, err_mask, err_no_mask; int err; /* Schizo IRQ property layout is: * 0: PCIERR * 1: UE ERR * 2: CE ERR * 3: SERR * 4: POWER FAIL? */ if (pbm_routes_this_ino(pbm, SCHIZO_UE_INO)) { err = request_irq(op->archdata.irqs[1], schizo_ue_intr, 0, "SCHIZO_UE", pbm); if (err) printk(KERN_WARNING "%s: Could not register UE, " "err=%d\n", pbm->name, err); } if (pbm_routes_this_ino(pbm, SCHIZO_CE_INO)) { err = request_irq(op->archdata.irqs[2], schizo_ce_intr, 0, "SCHIZO_CE", pbm); if (err) printk(KERN_WARNING "%s: Could not register CE, " "err=%d\n", pbm->name, err); } err = 0; if (pbm_routes_this_ino(pbm, SCHIZO_PCIERR_A_INO)) { err = request_irq(op->archdata.irqs[0], schizo_pcierr_intr, 0, "SCHIZO_PCIERR", pbm); } else if (pbm_routes_this_ino(pbm, SCHIZO_PCIERR_B_INO)) { err = request_irq(op->archdata.irqs[0], schizo_pcierr_intr, 0, "SCHIZO_PCIERR", pbm); } if (err) printk(KERN_WARNING "%s: Could not register PCIERR, " "err=%d\n", pbm->name, err); if (pbm_routes_this_ino(pbm, SCHIZO_SERR_INO)) { err = request_irq(op->archdata.irqs[3], schizo_safarierr_intr, 0, "SCHIZO_SERR", pbm); if (err) printk(KERN_WARNING "%s: Could not register SERR, " "err=%d\n", pbm->name, err); } /* Enable UE and CE interrupts for controller. */ upa_writeq((SCHIZO_ECCCTRL_EE | SCHIZO_ECCCTRL_UE | SCHIZO_ECCCTRL_CE), pbm->controller_regs + SCHIZO_ECC_CTRL); err_mask = (SCHIZO_PCICTRL_BUS_UNUS | SCHIZO_PCICTRL_ESLCK | SCHIZO_PCICTRL_TTO_ERR | SCHIZO_PCICTRL_RTRY_ERR | SCHIZO_PCICTRL_SBH_ERR | SCHIZO_PCICTRL_SERR | SCHIZO_PCICTRL_EEN); err_no_mask = (SCHIZO_PCICTRL_DTO_ERR | SCHIZO_PCICTRL_SBH_INT); /* Enable PCI Error interrupts and clear error * bits for each PBM. */ tmp = upa_readq(pbm->pbm_regs + SCHIZO_PCI_CTRL); tmp |= err_mask; tmp &= ~err_no_mask; upa_writeq(tmp, pbm->pbm_regs + SCHIZO_PCI_CTRL); upa_writeq((SCHIZO_PCIAFSR_PMA | SCHIZO_PCIAFSR_PTA | SCHIZO_PCIAFSR_PRTRY | SCHIZO_PCIAFSR_PPERR | SCHIZO_PCIAFSR_PTTO | SCHIZO_PCIAFSR_PUNUS | SCHIZO_PCIAFSR_SMA | SCHIZO_PCIAFSR_STA | SCHIZO_PCIAFSR_SRTRY | SCHIZO_PCIAFSR_SPERR | SCHIZO_PCIAFSR_STTO | SCHIZO_PCIAFSR_SUNUS), pbm->pbm_regs + SCHIZO_PCI_AFSR); /* Make all Safari error conditions fatal except unmapped * errors which we make generate interrupts. */ err_mask = (BUS_ERROR_BADCMD | BUS_ERROR_SSMDIS | BUS_ERROR_BADMA | BUS_ERROR_BADMB | BUS_ERROR_BADMC | BUS_ERROR_CPU1PS | BUS_ERROR_CPU1PB | BUS_ERROR_CPU0PS | BUS_ERROR_CPU0PB | BUS_ERROR_CIQTO | BUS_ERROR_LPQTO | BUS_ERROR_SFPQTO | BUS_ERROR_UFPQTO | BUS_ERROR_APERR | BUS_ERROR_BUSERR | BUS_ERROR_TIMEOUT | BUS_ERROR_ILL); #if 1 /* XXX Something wrong with some Excalibur systems * XXX Sun is shipping. The behavior on a 2-cpu * XXX machine is that both CPU1 parity error bits * XXX are set and are immediately set again when * XXX their error status bits are cleared. Just * XXX ignore them for now. -DaveM */ err_mask &= ~(BUS_ERROR_CPU1PS | BUS_ERROR_CPU1PB | BUS_ERROR_CPU0PS | BUS_ERROR_CPU0PB); #endif upa_writeq((SCHIZO_SAFERRCTRL_EN | err_mask), pbm->controller_regs + SCHIZO_SAFARI_ERRCTRL); } static void pbm_config_busmastering(struct pci_pbm_info *pbm) { u8 *addr; /* Set cache-line size to 64 bytes, this is actually * a nop but I do it for completeness. */ addr = schizo_pci_config_mkaddr(pbm, pbm->pci_first_busno, 0, PCI_CACHE_LINE_SIZE); pci_config_write8(addr, 64 / sizeof(u32)); /* Set PBM latency timer to 64 PCI clocks. */ addr = schizo_pci_config_mkaddr(pbm, pbm->pci_first_busno, 0, PCI_LATENCY_TIMER); pci_config_write8(addr, 64); } static void __devinit schizo_scan_bus(struct pci_pbm_info *pbm, struct device *parent) { pbm_config_busmastering(pbm); pbm->is_66mhz_capable = (of_find_property(pbm->op->dev.of_node, "66mhz-capable", NULL) != NULL); pbm->pci_bus = pci_scan_one_pbm(pbm, parent); if (pbm->chip_type == PBM_CHIP_TYPE_TOMATILLO) tomatillo_register_error_handlers(pbm); else schizo_register_error_handlers(pbm); } #define SCHIZO_STRBUF_CONTROL (0x02800UL) #define SCHIZO_STRBUF_FLUSH (0x02808UL) #define SCHIZO_STRBUF_FSYNC (0x02810UL) #define SCHIZO_STRBUF_CTXFLUSH (0x02818UL) #define SCHIZO_STRBUF_CTXMATCH (0x10000UL) static void schizo_pbm_strbuf_init(struct pci_pbm_info *pbm) { unsigned long base = pbm->pbm_regs; u64 control; if (pbm->chip_type == PBM_CHIP_TYPE_TOMATILLO) { /* TOMATILLO lacks streaming cache. */ return; } /* SCHIZO has context flushing. */ pbm->stc.strbuf_control = base + SCHIZO_STRBUF_CONTROL; pbm->stc.strbuf_pflush = base + SCHIZO_STRBUF_FLUSH; pbm->stc.strbuf_fsync = base + SCHIZO_STRBUF_FSYNC; pbm->stc.strbuf_ctxflush = base + SCHIZO_STRBUF_CTXFLUSH; pbm->stc.strbuf_ctxmatch_base = base + SCHIZO_STRBUF_CTXMATCH; pbm->stc.strbuf_flushflag = (volatile unsigned long *) ((((unsigned long)&pbm->stc.__flushflag_buf[0]) + 63UL) & ~63UL); pbm->stc.strbuf_flushflag_pa = (unsigned long) __pa(pbm->stc.strbuf_flushflag); /* Turn off LRU locking and diag mode, enable the * streaming buffer and leave the rerun-disable * setting however OBP set it. */ control = upa_readq(pbm->stc.strbuf_control); control &= ~(SCHIZO_STRBUF_CTRL_LPTR | SCHIZO_STRBUF_CTRL_LENAB | SCHIZO_STRBUF_CTRL_DENAB); control |= SCHIZO_STRBUF_CTRL_ENAB; upa_writeq(control, pbm->stc.strbuf_control); pbm->stc.strbuf_enabled = 1; } #define SCHIZO_IOMMU_CONTROL (0x00200UL) #define SCHIZO_IOMMU_TSBBASE (0x00208UL) #define SCHIZO_IOMMU_FLUSH (0x00210UL) #define SCHIZO_IOMMU_CTXFLUSH (0x00218UL) static int schizo_pbm_iommu_init(struct pci_pbm_info *pbm) { static const u32 vdma_default[] = { 0xc0000000, 0x40000000 }; unsigned long i, tagbase, database; struct iommu *iommu = pbm->iommu; int tsbsize, err; const u32 *vdma; u32 dma_mask; u64 control; vdma = of_get_property(pbm->op->dev.of_node, "virtual-dma", NULL); if (!vdma) vdma = vdma_default; dma_mask = vdma[0]; switch (vdma[1]) { case 0x20000000: dma_mask |= 0x1fffffff; tsbsize = 64; break; case 0x40000000: dma_mask |= 0x3fffffff; tsbsize = 128; break; case 0x80000000: dma_mask |= 0x7fffffff; tsbsize = 128; break; default: printk(KERN_ERR PFX "Strange virtual-dma size.\n"); return -EINVAL; } /* Register addresses, SCHIZO has iommu ctx flushing. */ iommu->iommu_control = pbm->pbm_regs + SCHIZO_IOMMU_CONTROL; iommu->iommu_tsbbase = pbm->pbm_regs + SCHIZO_IOMMU_TSBBASE; iommu->iommu_flush = pbm->pbm_regs + SCHIZO_IOMMU_FLUSH; iommu->iommu_tags = iommu->iommu_flush + (0xa580UL - 0x0210UL); iommu->iommu_ctxflush = pbm->pbm_regs + SCHIZO_IOMMU_CTXFLUSH; /* We use the main control/status register of SCHIZO as the write * completion register. */ iommu->write_complete_reg = pbm->controller_regs + 0x10000UL; /* * Invalidate TLB Entries. */ control = upa_readq(iommu->iommu_control); control |= SCHIZO_IOMMU_CTRL_DENAB; upa_writeq(control, iommu->iommu_control); tagbase = SCHIZO_IOMMU_TAG, database = SCHIZO_IOMMU_DATA; for (i = 0; i < 16; i++) { upa_writeq(0, pbm->pbm_regs + tagbase + (i * 8UL)); upa_writeq(0, pbm->pbm_regs + database + (i * 8UL)); } /* Leave diag mode enabled for full-flushing done * in pci_iommu.c */ err = iommu_table_init(iommu, tsbsize * 8 * 1024, vdma[0], dma_mask, pbm->numa_node); if (err) { printk(KERN_ERR PFX "iommu_table_init() fails with %d\n", err); return err; } upa_writeq(__pa(iommu->page_table), iommu->iommu_tsbbase); control = upa_readq(iommu->iommu_control); control &= ~(SCHIZO_IOMMU_CTRL_TSBSZ | SCHIZO_IOMMU_CTRL_TBWSZ); switch (tsbsize) { case 64: control |= SCHIZO_IOMMU_TSBSZ_64K; break; case 128: control |= SCHIZO_IOMMU_TSBSZ_128K; break; } control |= SCHIZO_IOMMU_CTRL_ENAB; upa_writeq(control, iommu->iommu_control); return 0; } #define SCHIZO_PCI_IRQ_RETRY (0x1a00UL) #define SCHIZO_IRQ_RETRY_INF 0xffUL #define SCHIZO_PCI_DIAG (0x2020UL) #define SCHIZO_PCIDIAG_D_BADECC (1UL << 10UL) /* Disable BAD ECC errors (Schizo) */ #define SCHIZO_PCIDIAG_D_BYPASS (1UL << 9UL) /* Disable MMU bypass mode (Schizo/Tomatillo) */ #define SCHIZO_PCIDIAG_D_TTO (1UL << 8UL) /* Disable TTO errors (Schizo/Tomatillo) */ #define SCHIZO_PCIDIAG_D_RTRYARB (1UL << 7UL) /* Disable retry arbitration (Schizo) */ #define SCHIZO_PCIDIAG_D_RETRY (1UL << 6UL) /* Disable retry limit (Schizo/Tomatillo) */ #define SCHIZO_PCIDIAG_D_INTSYNC (1UL << 5UL) /* Disable interrupt/DMA synch (Schizo/Tomatillo) */ #define SCHIZO_PCIDIAG_I_DMA_PARITY (1UL << 3UL) /* Invert DMA parity (Schizo/Tomatillo) */ #define SCHIZO_PCIDIAG_I_PIOD_PARITY (1UL << 2UL) /* Invert PIO data parity (Schizo/Tomatillo) */ #define SCHIZO_PCIDIAG_I_PIOA_PARITY (1UL << 1UL) /* Invert PIO address parity (Schizo/Tomatillo) */ #define TOMATILLO_PCI_IOC_CSR (0x2248UL) #define TOMATILLO_IOC_PART_WPENAB 0x0000000000080000UL #define TOMATILLO_IOC_RDMULT_PENAB 0x0000000000040000UL #define TOMATILLO_IOC_RDONE_PENAB 0x0000000000020000UL #define TOMATILLO_IOC_RDLINE_PENAB 0x0000000000010000UL #define TOMATILLO_IOC_RDMULT_PLEN 0x000000000000c000UL #define TOMATILLO_IOC_RDMULT_PLEN_SHIFT 14UL #define TOMATILLO_IOC_RDONE_PLEN 0x0000000000003000UL #define TOMATILLO_IOC_RDONE_PLEN_SHIFT 12UL #define TOMATILLO_IOC_RDLINE_PLEN 0x0000000000000c00UL #define TOMATILLO_IOC_RDLINE_PLEN_SHIFT 10UL #define TOMATILLO_IOC_PREF_OFF 0x00000000000003f8UL #define TOMATILLO_IOC_PREF_OFF_SHIFT 3UL #define TOMATILLO_IOC_RDMULT_CPENAB 0x0000000000000004UL #define TOMATILLO_IOC_RDONE_CPENAB 0x0000000000000002UL #define TOMATILLO_IOC_RDLINE_CPENAB 0x0000000000000001UL #define TOMATILLO_PCI_IOC_TDIAG (0x2250UL) #define TOMATILLO_PCI_IOC_DDIAG (0x2290UL) static void schizo_pbm_hw_init(struct pci_pbm_info *pbm) { u64 tmp; upa_writeq(5, pbm->pbm_regs + SCHIZO_PCI_IRQ_RETRY); tmp = upa_readq(pbm->pbm_regs + SCHIZO_PCI_CTRL); /* Enable arbiter for all PCI slots. */ tmp |= 0xff; if (pbm->chip_type == PBM_CHIP_TYPE_TOMATILLO && pbm->chip_version >= 0x2) tmp |= 0x3UL << SCHIZO_PCICTRL_PTO_SHIFT; if (!of_find_property(pbm->op->dev.of_node, "no-bus-parking", NULL)) tmp |= SCHIZO_PCICTRL_PARK; else tmp &= ~SCHIZO_PCICTRL_PARK; if (pbm->chip_type == PBM_CHIP_TYPE_TOMATILLO && pbm->chip_version <= 0x1) tmp |= SCHIZO_PCICTRL_DTO_INT; else tmp &= ~SCHIZO_PCICTRL_DTO_INT; if (pbm->chip_type == PBM_CHIP_TYPE_TOMATILLO) tmp |= (SCHIZO_PCICTRL_MRM_PREF | SCHIZO_PCICTRL_RDO_PREF | SCHIZO_PCICTRL_RDL_PREF); upa_writeq(tmp, pbm->pbm_regs + SCHIZO_PCI_CTRL); tmp = upa_readq(pbm->pbm_regs + SCHIZO_PCI_DIAG); tmp &= ~(SCHIZO_PCIDIAG_D_RTRYARB | SCHIZO_PCIDIAG_D_RETRY | SCHIZO_PCIDIAG_D_INTSYNC); upa_writeq(tmp, pbm->pbm_regs + SCHIZO_PCI_DIAG); if (pbm->chip_type == PBM_CHIP_TYPE_TOMATILLO) { /* Clear prefetch lengths to workaround a bug in * Jalapeno... */ tmp = (TOMATILLO_IOC_PART_WPENAB | (1 << TOMATILLO_IOC_PREF_OFF_SHIFT) | TOMATILLO_IOC_RDMULT_CPENAB | TOMATILLO_IOC_RDONE_CPENAB | TOMATILLO_IOC_RDLINE_CPENAB); upa_writeq(tmp, pbm->pbm_regs + TOMATILLO_PCI_IOC_CSR); } } static int __devinit schizo_pbm_init(struct pci_pbm_info *pbm, struct platform_device *op, u32 portid, int chip_type) { const struct linux_prom64_registers *regs; struct device_node *dp = op->dev.of_node; const char *chipset_name; int err; switch (chip_type) { case PBM_CHIP_TYPE_TOMATILLO: chipset_name = "TOMATILLO"; break; case PBM_CHIP_TYPE_SCHIZO_PLUS: chipset_name = "SCHIZO+"; break; case PBM_CHIP_TYPE_SCHIZO: default: chipset_name = "SCHIZO"; break; } /* For SCHIZO, three OBP regs: * 1) PBM controller regs * 2) Schizo front-end controller regs (same for both PBMs) * 3) PBM PCI config space * * For TOMATILLO, four OBP regs: * 1) PBM controller regs * 2) Tomatillo front-end controller regs * 3) PBM PCI config space * 4) Ichip regs */ regs = of_get_property(dp, "reg", NULL); pbm->next = pci_pbm_root; pci_pbm_root = pbm; pbm->numa_node = -1; pbm->pci_ops = &sun4u_pci_ops; pbm->config_space_reg_bits = 8; pbm->index = pci_num_pbms++; pbm->portid = portid; pbm->op = op; pbm->chip_type = chip_type; pbm->chip_version = of_getintprop_default(dp, "version#", 0); pbm->chip_revision = of_getintprop_default(dp, "module-version#", 0); pbm->pbm_regs = regs[0].phys_addr; pbm->controller_regs = regs[1].phys_addr - 0x10000UL; if (chip_type == PBM_CHIP_TYPE_TOMATILLO) pbm->sync_reg = regs[3].phys_addr + 0x1a18UL; pbm->name = dp->full_name; printk("%s: %s PCI Bus Module ver[%x:%x]\n", pbm->name, chipset_name, pbm->chip_version, pbm->chip_revision); schizo_pbm_hw_init(pbm); pci_determine_mem_io_space(pbm); pci_get_pbm_props(pbm); err = schizo_pbm_iommu_init(pbm); if (err) return err; schizo_pbm_strbuf_init(pbm); schizo_scan_bus(pbm, &op->dev); return 0; } static inline int portid_compare(u32 x, u32 y, int chip_type) { if (chip_type == PBM_CHIP_TYPE_TOMATILLO) { if (x == (y ^ 1)) return 1; return 0; } return (x == y); } static struct pci_pbm_info * __devinit schizo_find_sibling(u32 portid, int chip_type) { struct pci_pbm_info *pbm; for (pbm = pci_pbm_root; pbm; pbm = pbm->next) { if (portid_compare(pbm->portid, portid, chip_type)) return pbm; } return NULL; } static int __devinit __schizo_init(struct platform_device *op, unsigned long chip_type) { struct device_node *dp = op->dev.of_node; struct pci_pbm_info *pbm; struct iommu *iommu; u32 portid; int err; portid = of_getintprop_default(dp, "portid", 0xff); err = -ENOMEM; pbm = kzalloc(sizeof(*pbm), GFP_KERNEL); if (!pbm) { printk(KERN_ERR PFX "Cannot allocate pci_pbm_info.\n"); goto out_err; } pbm->sibling = schizo_find_sibling(portid, chip_type); iommu = kzalloc(sizeof(struct iommu), GFP_KERNEL); if (!iommu) { printk(KERN_ERR PFX "Cannot allocate PBM A iommu.\n"); goto out_free_pbm; } pbm->iommu = iommu; if (schizo_pbm_init(pbm, op, portid, chip_type)) goto out_free_iommu; if (pbm->sibling) pbm->sibling->sibling = pbm; dev_set_drvdata(&op->dev, pbm); return 0; out_free_iommu: kfree(pbm->iommu); out_free_pbm: kfree(pbm); out_err: return err; } static const struct of_device_id schizo_match[]; static int __devinit schizo_probe(struct platform_device *op) { const struct of_device_id *match; match = of_match_device(schizo_match, &op->dev); if (!match) return -EINVAL; return __schizo_init(op, (unsigned long)match->data); } /* The ordering of this table is very important. Some Tomatillo * nodes announce that they are compatible with both pci108e,a801 * and pci108e,8001. So list the chips in reverse chronological * order. */ static const struct of_device_id schizo_match[] = { { .name = "pci", .compatible = "pci108e,a801", .data = (void *) PBM_CHIP_TYPE_TOMATILLO, }, { .name = "pci", .compatible = "pci108e,8002", .data = (void *) PBM_CHIP_TYPE_SCHIZO_PLUS, }, { .name = "pci", .compatible = "pci108e,8001", .data = (void *) PBM_CHIP_TYPE_SCHIZO, }, {}, }; static struct platform_driver schizo_driver = { .driver = { .name = DRIVER_NAME, .owner = THIS_MODULE, .of_match_table = schizo_match, }, .probe = schizo_probe, }; static int __init schizo_init(void) { return platform_driver_register(&schizo_driver); } subsys_initcall(schizo_init);
gpl-2.0
ubports/android_kernel_oneplus_one
arch/x86/kernel/apb_timer.c
5617
11449
/* * apb_timer.c: Driver for Langwell APB timers * * (C) Copyright 2009 Intel Corporation * Author: Jacob Pan (jacob.jun.pan@intel.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; version 2 * of the License. * * Note: * Langwell is the south complex of Intel Moorestown MID platform. There are * eight external timers in total that can be used by the operating system. * The timer information, such as frequency and addresses, is provided to the * OS via SFI tables. * Timer interrupts are routed via FW/HW emulated IOAPIC independently via * individual redirection table entries (RTE). * Unlike HPET, there is no master counter, therefore one of the timers are * used as clocksource. The overall allocation looks like: * - timer 0 - NR_CPUs for per cpu timer * - one timer for clocksource * - one timer for watchdog driver. * It is also worth notice that APB timer does not support true one-shot mode, * free-running mode will be used here to emulate one-shot mode. * APB timer can also be used as broadcast timer along with per cpu local APIC * timer, but by default APB timer has higher rating than local APIC timers. */ #include <linux/delay.h> #include <linux/dw_apb_timer.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/pm.h> #include <linux/sfi.h> #include <linux/interrupt.h> #include <linux/cpu.h> #include <linux/irq.h> #include <asm/fixmap.h> #include <asm/apb_timer.h> #include <asm/mrst.h> #include <asm/time.h> #define APBT_CLOCKEVENT_RATING 110 #define APBT_CLOCKSOURCE_RATING 250 #define APBT_CLOCKEVENT0_NUM (0) #define APBT_CLOCKSOURCE_NUM (2) static phys_addr_t apbt_address; static int apb_timer_block_enabled; static void __iomem *apbt_virt_address; /* * Common DW APB timer info */ static unsigned long apbt_freq; struct apbt_dev { struct dw_apb_clock_event_device *timer; unsigned int num; int cpu; unsigned int irq; char name[10]; }; static struct dw_apb_clocksource *clocksource_apbt; static inline void __iomem *adev_virt_addr(struct apbt_dev *adev) { return apbt_virt_address + adev->num * APBTMRS_REG_SIZE; } static DEFINE_PER_CPU(struct apbt_dev, cpu_apbt_dev); #ifdef CONFIG_SMP static unsigned int apbt_num_timers_used; #endif static inline void apbt_set_mapping(void) { struct sfi_timer_table_entry *mtmr; int phy_cs_timer_id = 0; if (apbt_virt_address) { pr_debug("APBT base already mapped\n"); return; } mtmr = sfi_get_mtmr(APBT_CLOCKEVENT0_NUM); if (mtmr == NULL) { printk(KERN_ERR "Failed to get MTMR %d from SFI\n", APBT_CLOCKEVENT0_NUM); return; } apbt_address = (phys_addr_t)mtmr->phys_addr; if (!apbt_address) { printk(KERN_WARNING "No timer base from SFI, use default\n"); apbt_address = APBT_DEFAULT_BASE; } apbt_virt_address = ioremap_nocache(apbt_address, APBT_MMAP_SIZE); if (!apbt_virt_address) { pr_debug("Failed mapping APBT phy address at %lu\n",\ (unsigned long)apbt_address); goto panic_noapbt; } apbt_freq = mtmr->freq_hz; sfi_free_mtmr(mtmr); /* Now figure out the physical timer id for clocksource device */ mtmr = sfi_get_mtmr(APBT_CLOCKSOURCE_NUM); if (mtmr == NULL) goto panic_noapbt; /* Now figure out the physical timer id */ pr_debug("Use timer %d for clocksource\n", (int)(mtmr->phys_addr & 0xff) / APBTMRS_REG_SIZE); phy_cs_timer_id = (unsigned int)(mtmr->phys_addr & 0xff) / APBTMRS_REG_SIZE; clocksource_apbt = dw_apb_clocksource_init(APBT_CLOCKSOURCE_RATING, "apbt0", apbt_virt_address + phy_cs_timer_id * APBTMRS_REG_SIZE, apbt_freq); return; panic_noapbt: panic("Failed to setup APB system timer\n"); } static inline void apbt_clear_mapping(void) { iounmap(apbt_virt_address); apbt_virt_address = NULL; } /* * APBT timer interrupt enable / disable */ static inline int is_apbt_capable(void) { return apbt_virt_address ? 1 : 0; } static int __init apbt_clockevent_register(void) { struct sfi_timer_table_entry *mtmr; struct apbt_dev *adev = &__get_cpu_var(cpu_apbt_dev); mtmr = sfi_get_mtmr(APBT_CLOCKEVENT0_NUM); if (mtmr == NULL) { printk(KERN_ERR "Failed to get MTMR %d from SFI\n", APBT_CLOCKEVENT0_NUM); return -ENODEV; } adev->num = smp_processor_id(); adev->timer = dw_apb_clockevent_init(smp_processor_id(), "apbt0", mrst_timer_options == MRST_TIMER_LAPIC_APBT ? APBT_CLOCKEVENT_RATING - 100 : APBT_CLOCKEVENT_RATING, adev_virt_addr(adev), 0, apbt_freq); /* Firmware does EOI handling for us. */ adev->timer->eoi = NULL; if (mrst_timer_options == MRST_TIMER_LAPIC_APBT) { global_clock_event = &adev->timer->ced; printk(KERN_DEBUG "%s clockevent registered as global\n", global_clock_event->name); } dw_apb_clockevent_register(adev->timer); sfi_free_mtmr(mtmr); return 0; } #ifdef CONFIG_SMP static void apbt_setup_irq(struct apbt_dev *adev) { /* timer0 irq has been setup early */ if (adev->irq == 0) return; irq_modify_status(adev->irq, 0, IRQ_MOVE_PCNTXT); irq_set_affinity(adev->irq, cpumask_of(adev->cpu)); /* APB timer irqs are set up as mp_irqs, timer is edge type */ __irq_set_handler(adev->irq, handle_edge_irq, 0, "edge"); } /* Should be called with per cpu */ void apbt_setup_secondary_clock(void) { struct apbt_dev *adev; int cpu; /* Don't register boot CPU clockevent */ cpu = smp_processor_id(); if (!cpu) return; adev = &__get_cpu_var(cpu_apbt_dev); if (!adev->timer) { adev->timer = dw_apb_clockevent_init(cpu, adev->name, APBT_CLOCKEVENT_RATING, adev_virt_addr(adev), adev->irq, apbt_freq); adev->timer->eoi = NULL; } else { dw_apb_clockevent_resume(adev->timer); } printk(KERN_INFO "Registering CPU %d clockevent device %s, cpu %08x\n", cpu, adev->name, adev->cpu); apbt_setup_irq(adev); dw_apb_clockevent_register(adev->timer); return; } /* * this notify handler process CPU hotplug events. in case of S0i3, nonboot * cpus are disabled/enabled frequently, for performance reasons, we keep the * per cpu timer irq registered so that we do need to do free_irq/request_irq. * * TODO: it might be more reliable to directly disable percpu clockevent device * without the notifier chain. currently, cpu 0 may get interrupts from other * cpu timers during the offline process due to the ordering of notification. * the extra interrupt is harmless. */ static int apbt_cpuhp_notify(struct notifier_block *n, unsigned long action, void *hcpu) { unsigned long cpu = (unsigned long)hcpu; struct apbt_dev *adev = &per_cpu(cpu_apbt_dev, cpu); switch (action & 0xf) { case CPU_DEAD: dw_apb_clockevent_pause(adev->timer); if (system_state == SYSTEM_RUNNING) { pr_debug("skipping APBT CPU %lu offline\n", cpu); } else if (adev) { pr_debug("APBT clockevent for cpu %lu offline\n", cpu); dw_apb_clockevent_stop(adev->timer); } break; default: pr_debug("APBT notified %lu, no action\n", action); } return NOTIFY_OK; } static __init int apbt_late_init(void) { if (mrst_timer_options == MRST_TIMER_LAPIC_APBT || !apb_timer_block_enabled) return 0; /* This notifier should be called after workqueue is ready */ hotcpu_notifier(apbt_cpuhp_notify, -20); return 0; } fs_initcall(apbt_late_init); #else void apbt_setup_secondary_clock(void) {} #endif /* CONFIG_SMP */ static int apbt_clocksource_register(void) { u64 start, now; cycle_t t1; /* Start the counter, use timer 2 as source, timer 0/1 for event */ dw_apb_clocksource_start(clocksource_apbt); /* Verify whether apbt counter works */ t1 = dw_apb_clocksource_read(clocksource_apbt); rdtscll(start); /* * We don't know the TSC frequency yet, but waiting for * 200000 TSC cycles is safe: * 4 GHz == 50us * 1 GHz == 200us */ do { rep_nop(); rdtscll(now); } while ((now - start) < 200000UL); /* APBT is the only always on clocksource, it has to work! */ if (t1 == dw_apb_clocksource_read(clocksource_apbt)) panic("APBT counter not counting. APBT disabled\n"); dw_apb_clocksource_register(clocksource_apbt); return 0; } /* * Early setup the APBT timer, only use timer 0 for booting then switch to * per CPU timer if possible. * returns 1 if per cpu apbt is setup * returns 0 if no per cpu apbt is chosen * panic if set up failed, this is the only platform timer on Moorestown. */ void __init apbt_time_init(void) { #ifdef CONFIG_SMP int i; struct sfi_timer_table_entry *p_mtmr; unsigned int percpu_timer; struct apbt_dev *adev; #endif if (apb_timer_block_enabled) return; apbt_set_mapping(); if (!apbt_virt_address) goto out_noapbt; /* * Read the frequency and check for a sane value, for ESL model * we extend the possible clock range to allow time scaling. */ if (apbt_freq < APBT_MIN_FREQ || apbt_freq > APBT_MAX_FREQ) { pr_debug("APBT has invalid freq 0x%lx\n", apbt_freq); goto out_noapbt; } if (apbt_clocksource_register()) { pr_debug("APBT has failed to register clocksource\n"); goto out_noapbt; } if (!apbt_clockevent_register()) apb_timer_block_enabled = 1; else { pr_debug("APBT has failed to register clockevent\n"); goto out_noapbt; } #ifdef CONFIG_SMP /* kernel cmdline disable apb timer, so we will use lapic timers */ if (mrst_timer_options == MRST_TIMER_LAPIC_APBT) { printk(KERN_INFO "apbt: disabled per cpu timer\n"); return; } pr_debug("%s: %d CPUs online\n", __func__, num_online_cpus()); if (num_possible_cpus() <= sfi_mtimer_num) { percpu_timer = 1; apbt_num_timers_used = num_possible_cpus(); } else { percpu_timer = 0; apbt_num_timers_used = 1; } pr_debug("%s: %d APB timers used\n", __func__, apbt_num_timers_used); /* here we set up per CPU timer data structure */ for (i = 0; i < apbt_num_timers_used; i++) { adev = &per_cpu(cpu_apbt_dev, i); adev->num = i; adev->cpu = i; p_mtmr = sfi_get_mtmr(i); if (p_mtmr) adev->irq = p_mtmr->irq; else printk(KERN_ERR "Failed to get timer for cpu %d\n", i); snprintf(adev->name, sizeof(adev->name) - 1, "apbt%d", i); } #endif return; out_noapbt: apbt_clear_mapping(); apb_timer_block_enabled = 0; panic("failed to enable APB timer\n"); } /* called before apb_timer_enable, use early map */ unsigned long apbt_quick_calibrate(void) { int i, scale; u64 old, new; cycle_t t1, t2; unsigned long khz = 0; u32 loop, shift; apbt_set_mapping(); dw_apb_clocksource_start(clocksource_apbt); /* check if the timer can count down, otherwise return */ old = dw_apb_clocksource_read(clocksource_apbt); i = 10000; while (--i) { if (old != dw_apb_clocksource_read(clocksource_apbt)) break; } if (!i) goto failed; /* count 16 ms */ loop = (apbt_freq / 1000) << 4; /* restart the timer to ensure it won't get to 0 in the calibration */ dw_apb_clocksource_start(clocksource_apbt); old = dw_apb_clocksource_read(clocksource_apbt); old += loop; t1 = __native_read_tsc(); do { new = dw_apb_clocksource_read(clocksource_apbt); } while (new < old); t2 = __native_read_tsc(); shift = 5; if (unlikely(loop >> shift == 0)) { printk(KERN_INFO "APBT TSC calibration failed, not enough resolution\n"); return 0; } scale = (int)div_u64((t2 - t1), loop >> shift); khz = (scale * (apbt_freq / 1000)) >> shift; printk(KERN_INFO "TSC freq calculated by APB timer is %lu khz\n", khz); return khz; failed: return 0; }
gpl-2.0
BobZmotion/android_kernel_3.4.x
drivers/platform/x86/dell-wmi-aio.c
8177
4322
/* * WMI hotkeys support for Dell All-In-One series * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/types.h> #include <linux/input.h> #include <linux/input/sparse-keymap.h> #include <acpi/acpi_drivers.h> #include <linux/acpi.h> #include <linux/string.h> MODULE_DESCRIPTION("WMI hotkeys driver for Dell All-In-One series"); MODULE_LICENSE("GPL"); #define EVENT_GUID1 "284A0E6B-380E-472A-921F-E52786257FB4" #define EVENT_GUID2 "02314822-307C-4F66-BF0E-48AEAEB26CC8" static const char *dell_wmi_aio_guids[] = { EVENT_GUID1, EVENT_GUID2, NULL }; MODULE_ALIAS("wmi:"EVENT_GUID1); MODULE_ALIAS("wmi:"EVENT_GUID2); static const struct key_entry dell_wmi_aio_keymap[] = { { KE_KEY, 0xc0, { KEY_VOLUMEUP } }, { KE_KEY, 0xc1, { KEY_VOLUMEDOWN } }, { KE_END, 0 } }; static struct input_dev *dell_wmi_aio_input_dev; static void dell_wmi_aio_notify(u32 value, void *context) { struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL }; union acpi_object *obj; acpi_status status; status = wmi_get_event_data(value, &response); if (status != AE_OK) { pr_info("bad event status 0x%x\n", status); return; } obj = (union acpi_object *)response.pointer; if (obj) { unsigned int scancode; switch (obj->type) { case ACPI_TYPE_INTEGER: /* Most All-In-One correctly return integer scancode */ scancode = obj->integer.value; sparse_keymap_report_event(dell_wmi_aio_input_dev, scancode, 1, true); break; case ACPI_TYPE_BUFFER: /* Broken machines return the scancode in a buffer */ if (obj->buffer.pointer && obj->buffer.length > 0) { scancode = obj->buffer.pointer[0]; sparse_keymap_report_event( dell_wmi_aio_input_dev, scancode, 1, true); } break; } } kfree(obj); } static int __init dell_wmi_aio_input_setup(void) { int err; dell_wmi_aio_input_dev = input_allocate_device(); if (!dell_wmi_aio_input_dev) return -ENOMEM; dell_wmi_aio_input_dev->name = "Dell AIO WMI hotkeys"; dell_wmi_aio_input_dev->phys = "wmi/input0"; dell_wmi_aio_input_dev->id.bustype = BUS_HOST; err = sparse_keymap_setup(dell_wmi_aio_input_dev, dell_wmi_aio_keymap, NULL); if (err) { pr_err("Unable to setup input device keymap\n"); goto err_free_dev; } err = input_register_device(dell_wmi_aio_input_dev); if (err) { pr_info("Unable to register input device\n"); goto err_free_keymap; } return 0; err_free_keymap: sparse_keymap_free(dell_wmi_aio_input_dev); err_free_dev: input_free_device(dell_wmi_aio_input_dev); return err; } static const char *dell_wmi_aio_find(void) { int i; for (i = 0; dell_wmi_aio_guids[i] != NULL; i++) if (wmi_has_guid(dell_wmi_aio_guids[i])) return dell_wmi_aio_guids[i]; return NULL; } static int __init dell_wmi_aio_init(void) { int err; const char *guid; guid = dell_wmi_aio_find(); if (!guid) { pr_warn("No known WMI GUID found\n"); return -ENXIO; } err = dell_wmi_aio_input_setup(); if (err) return err; err = wmi_install_notify_handler(guid, dell_wmi_aio_notify, NULL); if (err) { pr_err("Unable to register notify handler - %d\n", err); sparse_keymap_free(dell_wmi_aio_input_dev); input_unregister_device(dell_wmi_aio_input_dev); return err; } return 0; } static void __exit dell_wmi_aio_exit(void) { const char *guid; guid = dell_wmi_aio_find(); wmi_remove_notify_handler(guid); sparse_keymap_free(dell_wmi_aio_input_dev); input_unregister_device(dell_wmi_aio_input_dev); } module_init(dell_wmi_aio_init); module_exit(dell_wmi_aio_exit);
gpl-2.0
GZR-Kernels/fusebox_kernel_lge_g3
sound/pci/emu10k1/emu10k1_synth.c
8689
3269
/* * Copyright (C) 2000 Takashi Iwai <tiwai@suse.de> * * Routines for control of EMU10K1 WaveTable synth * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include "emu10k1_synth_local.h" #include <linux/init.h> #include <linux/module.h> MODULE_AUTHOR("Takashi Iwai"); MODULE_DESCRIPTION("Routines for control of EMU10K1 WaveTable synth"); MODULE_LICENSE("GPL"); /* * create a new hardware dependent device for Emu10k1 */ static int snd_emu10k1_synth_new_device(struct snd_seq_device *dev) { struct snd_emux *emux; struct snd_emu10k1 *hw; struct snd_emu10k1_synth_arg *arg; unsigned long flags; arg = SNDRV_SEQ_DEVICE_ARGPTR(dev); if (arg == NULL) return -EINVAL; if (arg->seq_ports <= 0) return 0; /* nothing */ if (arg->max_voices < 1) arg->max_voices = 1; else if (arg->max_voices > 64) arg->max_voices = 64; if (snd_emux_new(&emux) < 0) return -ENOMEM; snd_emu10k1_ops_setup(emux); hw = arg->hwptr; emux->hw = hw; emux->max_voices = arg->max_voices; emux->num_ports = arg->seq_ports; emux->pitch_shift = -501; emux->memhdr = hw->memhdr; /* maximum two ports */ emux->midi_ports = arg->seq_ports < 2 ? arg->seq_ports : 2; /* audigy has two external midis */ emux->midi_devidx = hw->audigy ? 2 : 1; emux->linear_panning = 0; emux->hwdep_idx = 2; /* FIXED */ if (snd_emux_register(emux, dev->card, arg->index, "Emu10k1") < 0) { snd_emux_free(emux); return -ENOMEM; } spin_lock_irqsave(&hw->voice_lock, flags); hw->synth = emux; hw->get_synth_voice = snd_emu10k1_synth_get_voice; spin_unlock_irqrestore(&hw->voice_lock, flags); dev->driver_data = emux; return 0; } static int snd_emu10k1_synth_delete_device(struct snd_seq_device *dev) { struct snd_emux *emux; struct snd_emu10k1 *hw; unsigned long flags; if (dev->driver_data == NULL) return 0; /* not registered actually */ emux = dev->driver_data; hw = emux->hw; spin_lock_irqsave(&hw->voice_lock, flags); hw->synth = NULL; hw->get_synth_voice = NULL; spin_unlock_irqrestore(&hw->voice_lock, flags); snd_emux_free(emux); return 0; } /* * INIT part */ static int __init alsa_emu10k1_synth_init(void) { static struct snd_seq_dev_ops ops = { snd_emu10k1_synth_new_device, snd_emu10k1_synth_delete_device, }; return snd_seq_device_register_driver(SNDRV_SEQ_DEV_ID_EMU10K1_SYNTH, &ops, sizeof(struct snd_emu10k1_synth_arg)); } static void __exit alsa_emu10k1_synth_exit(void) { snd_seq_device_unregister_driver(SNDRV_SEQ_DEV_ID_EMU10K1_SYNTH); } module_init(alsa_emu10k1_synth_init) module_exit(alsa_emu10k1_synth_exit)
gpl-2.0
htc-mirror/ruby-ics-crc-3.0.16-fd362fb
net/802/p8023.c
14065
1687
/* * NET3: 802.3 data link hooks used for IPX 802.3 * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * 802.3 isn't really a protocol data link layer. Some old IPX stuff * uses it however. Note that there is only one 802.3 protocol layer * in the system. We don't currently support different protocols * running raw 802.3 on different devices. Thankfully nobody else * has done anything like the old IPX. */ #include <linux/in.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/netdevice.h> #include <linux/skbuff.h> #include <linux/slab.h> #include <net/datalink.h> #include <net/p8022.h> /* * Place an 802.3 header on a packet. The driver will do the mac * addresses, we just need to give it the buffer length. */ static int p8023_request(struct datalink_proto *dl, struct sk_buff *skb, unsigned char *dest_node) { struct net_device *dev = skb->dev; dev_hard_header(skb, dev, ETH_P_802_3, dest_node, NULL, skb->len); return dev_queue_xmit(skb); } /* * Create an 802.3 client. Note there can be only one 802.3 client */ struct datalink_proto *make_8023_client(void) { struct datalink_proto *proto = kmalloc(sizeof(*proto), GFP_ATOMIC); if (proto) { proto->header_length = 0; proto->request = p8023_request; } return proto; } /* * Destroy the 802.3 client. */ void destroy_8023_client(struct datalink_proto *dl) { kfree(dl); } EXPORT_SYMBOL(destroy_8023_client); EXPORT_SYMBOL(make_8023_client); MODULE_LICENSE("GPL");
gpl-2.0
tim-yang/linux-3.8
fs/logfs/dir.c
242
21235
/* * fs/logfs/dir.c - directory-related code * * As should be obvious for Linux kernel code, license is GPLv2 * * Copyright (c) 2005-2008 Joern Engel <joern@logfs.org> */ #include "logfs.h" #include <linux/slab.h> /* * Atomic dir operations * * Directory operations are by default not atomic. Dentries and Inodes are * created/removed/altered in separate operations. Therefore we need to do * a small amount of journaling. * * Create, link, mkdir, mknod and symlink all share the same function to do * the work: __logfs_create. This function works in two atomic steps: * 1. allocate inode (remember in journal) * 2. allocate dentry (clear journal) * * As we can only get interrupted between the two, when the inode we just * created is simply stored in the anchor. On next mount, if we were * interrupted, we delete the inode. From a users point of view the * operation never happened. * * Unlink and rmdir also share the same function: unlink. Again, this * function works in two atomic steps * 1. remove dentry (remember inode in journal) * 2. unlink inode (clear journal) * * And again, on the next mount, if we were interrupted, we delete the inode. * From a users point of view the operation succeeded. * * Rename is the real pain to deal with, harder than all the other methods * combined. Depending on the circumstances we can run into three cases. * A "target rename" where the target dentry already existed, a "local * rename" where both parent directories are identical or a "cross-directory * rename" in the remaining case. * * Local rename is atomic, as the old dentry is simply rewritten with a new * name. * * Cross-directory rename works in two steps, similar to __logfs_create and * logfs_unlink: * 1. Write new dentry (remember old dentry in journal) * 2. Remove old dentry (clear journal) * * Here we remember a dentry instead of an inode. On next mount, if we were * interrupted, we delete the dentry. From a users point of view, the * operation succeeded. * * Target rename works in three atomic steps: * 1. Attach old inode to new dentry (remember old dentry and new inode) * 2. Remove old dentry (still remember the new inode) * 3. Remove victim inode * * Here we remember both an inode an a dentry. If we get interrupted * between steps 1 and 2, we delete both the dentry and the inode. If * we get interrupted between steps 2 and 3, we delete just the inode. * In either case, the remaining objects are deleted on next mount. From * a users point of view, the operation succeeded. */ static int write_dir(struct inode *dir, struct logfs_disk_dentry *dd, loff_t pos) { return logfs_inode_write(dir, dd, sizeof(*dd), pos, WF_LOCK, NULL); } static int write_inode(struct inode *inode) { return __logfs_write_inode(inode, NULL, WF_LOCK); } static s64 dir_seek_data(struct inode *inode, s64 pos) { s64 new_pos = logfs_seek_data(inode, pos); return max(pos, new_pos - 1); } static int beyond_eof(struct inode *inode, loff_t bix) { loff_t pos = bix << inode->i_sb->s_blocksize_bits; return pos >= i_size_read(inode); } /* * Prime value was chosen to be roughly 256 + 26. r5 hash uses 11, * so short names (len <= 9) don't even occupy the complete 32bit name * space. A prime >256 ensures short names quickly spread the 32bit * name space. Add about 26 for the estimated amount of information * of each character and pick a prime nearby, preferably a bit-sparse * one. */ static u32 hash_32(const char *s, int len, u32 seed) { u32 hash = seed; int i; for (i = 0; i < len; i++) hash = hash * 293 + s[i]; return hash; } /* * We have to satisfy several conflicting requirements here. Small * directories should stay fairly compact and not require too many * indirect blocks. The number of possible locations for a given hash * should be small to make lookup() fast. And we should try hard not * to overflow the 32bit name space or nfs and 32bit host systems will * be unhappy. * * So we use the following scheme. First we reduce the hash to 0..15 * and try a direct block. If that is occupied we reduce the hash to * 16..255 and try an indirect block. Same for 2x and 3x indirect * blocks. Lastly we reduce the hash to 0x800_0000 .. 0xffff_ffff, * but use buckets containing eight entries instead of a single one. * * Using 16 entries should allow for a reasonable amount of hash * collisions, so the 32bit name space can be packed fairly tight * before overflowing. Oh and currently we don't overflow but return * and error. * * How likely are collisions? Doing the appropriate math is beyond me * and the Bronstein textbook. But running a test program to brute * force collisions for a couple of days showed that on average the * first collision occurs after 598M entries, with 290M being the * smallest result. Obviously 21 entries could already cause a * collision if all entries are carefully chosen. */ static pgoff_t hash_index(u32 hash, int round) { u32 i0_blocks = I0_BLOCKS; u32 i1_blocks = I1_BLOCKS; u32 i2_blocks = I2_BLOCKS; u32 i3_blocks = I3_BLOCKS; switch (round) { case 0: return hash % i0_blocks; case 1: return i0_blocks + hash % (i1_blocks - i0_blocks); case 2: return i1_blocks + hash % (i2_blocks - i1_blocks); case 3: return i2_blocks + hash % (i3_blocks - i2_blocks); case 4 ... 19: return i3_blocks + 16 * (hash % (((1<<31) - i3_blocks) / 16)) + round - 4; } BUG(); } static struct page *logfs_get_dd_page(struct inode *dir, struct dentry *dentry) { struct qstr *name = &dentry->d_name; struct page *page; struct logfs_disk_dentry *dd; u32 hash = hash_32(name->name, name->len, 0); pgoff_t index; int round; if (name->len > LOGFS_MAX_NAMELEN) return ERR_PTR(-ENAMETOOLONG); for (round = 0; round < 20; round++) { index = hash_index(hash, round); if (beyond_eof(dir, index)) return NULL; if (!logfs_exist_block(dir, index)) continue; page = read_cache_page(dir->i_mapping, index, (filler_t *)logfs_readpage, NULL); if (IS_ERR(page)) return page; dd = kmap_atomic(page); BUG_ON(dd->namelen == 0); if (name->len != be16_to_cpu(dd->namelen) || memcmp(name->name, dd->name, name->len)) { kunmap_atomic(dd); page_cache_release(page); continue; } kunmap_atomic(dd); return page; } return NULL; } static int logfs_remove_inode(struct inode *inode) { int ret; drop_nlink(inode); ret = write_inode(inode); LOGFS_BUG_ON(ret, inode->i_sb); return ret; } static void abort_transaction(struct inode *inode, struct logfs_transaction *ta) { if (logfs_inode(inode)->li_block) logfs_inode(inode)->li_block->ta = NULL; kfree(ta); } static int logfs_unlink(struct inode *dir, struct dentry *dentry) { struct logfs_super *super = logfs_super(dir->i_sb); struct inode *inode = dentry->d_inode; struct logfs_transaction *ta; struct page *page; pgoff_t index; int ret; ta = kzalloc(sizeof(*ta), GFP_KERNEL); if (!ta) return -ENOMEM; ta->state = UNLINK_1; ta->ino = inode->i_ino; inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME; page = logfs_get_dd_page(dir, dentry); if (!page) { kfree(ta); return -ENOENT; } if (IS_ERR(page)) { kfree(ta); return PTR_ERR(page); } index = page->index; page_cache_release(page); mutex_lock(&super->s_dirop_mutex); logfs_add_transaction(dir, ta); ret = logfs_delete(dir, index, NULL); if (!ret) ret = write_inode(dir); if (ret) { abort_transaction(dir, ta); printk(KERN_ERR"LOGFS: unable to delete inode\n"); goto out; } ta->state = UNLINK_2; logfs_add_transaction(inode, ta); ret = logfs_remove_inode(inode); out: mutex_unlock(&super->s_dirop_mutex); return ret; } static inline int logfs_empty_dir(struct inode *dir) { u64 data; data = logfs_seek_data(dir, 0) << dir->i_sb->s_blocksize_bits; return data >= i_size_read(dir); } static int logfs_rmdir(struct inode *dir, struct dentry *dentry) { struct inode *inode = dentry->d_inode; if (!logfs_empty_dir(inode)) return -ENOTEMPTY; return logfs_unlink(dir, dentry); } /* FIXME: readdir currently has it's own dir_walk code. I don't see a good * way to combine the two copies */ #define IMPLICIT_NODES 2 static int __logfs_readdir(struct file *file, void *buf, filldir_t filldir) { struct inode *dir = file->f_dentry->d_inode; loff_t pos = file->f_pos - IMPLICIT_NODES; struct page *page; struct logfs_disk_dentry *dd; int full; BUG_ON(pos < 0); for (;; pos++) { if (beyond_eof(dir, pos)) break; if (!logfs_exist_block(dir, pos)) { /* deleted dentry */ pos = dir_seek_data(dir, pos); continue; } page = read_cache_page(dir->i_mapping, pos, (filler_t *)logfs_readpage, NULL); if (IS_ERR(page)) return PTR_ERR(page); dd = kmap(page); BUG_ON(dd->namelen == 0); full = filldir(buf, (char *)dd->name, be16_to_cpu(dd->namelen), pos, be64_to_cpu(dd->ino), dd->type); kunmap(page); page_cache_release(page); if (full) break; } file->f_pos = pos + IMPLICIT_NODES; return 0; } static int logfs_readdir(struct file *file, void *buf, filldir_t filldir) { struct inode *inode = file->f_dentry->d_inode; ino_t pino = parent_ino(file->f_dentry); int err; if (file->f_pos < 0) return -EINVAL; if (file->f_pos == 0) { if (filldir(buf, ".", 1, 1, inode->i_ino, DT_DIR) < 0) return 0; file->f_pos++; } if (file->f_pos == 1) { if (filldir(buf, "..", 2, 2, pino, DT_DIR) < 0) return 0; file->f_pos++; } err = __logfs_readdir(file, buf, filldir); return err; } static void logfs_set_name(struct logfs_disk_dentry *dd, struct qstr *name) { dd->namelen = cpu_to_be16(name->len); memcpy(dd->name, name->name, name->len); } static struct dentry *logfs_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags) { struct page *page; struct logfs_disk_dentry *dd; pgoff_t index; u64 ino = 0; struct inode *inode; page = logfs_get_dd_page(dir, dentry); if (IS_ERR(page)) return ERR_CAST(page); if (!page) { d_add(dentry, NULL); return NULL; } index = page->index; dd = kmap_atomic(page); ino = be64_to_cpu(dd->ino); kunmap_atomic(dd); page_cache_release(page); inode = logfs_iget(dir->i_sb, ino); if (IS_ERR(inode)) printk(KERN_ERR"LogFS: Cannot read inode #%llx for dentry (%lx, %lx)n", ino, dir->i_ino, index); return d_splice_alias(inode, dentry); } static void grow_dir(struct inode *dir, loff_t index) { index = (index + 1) << dir->i_sb->s_blocksize_bits; if (i_size_read(dir) < index) i_size_write(dir, index); } static int logfs_write_dir(struct inode *dir, struct dentry *dentry, struct inode *inode) { struct page *page; struct logfs_disk_dentry *dd; u32 hash = hash_32(dentry->d_name.name, dentry->d_name.len, 0); pgoff_t index; int round, err; for (round = 0; round < 20; round++) { index = hash_index(hash, round); if (logfs_exist_block(dir, index)) continue; page = find_or_create_page(dir->i_mapping, index, GFP_KERNEL); if (!page) return -ENOMEM; dd = kmap_atomic(page); memset(dd, 0, sizeof(*dd)); dd->ino = cpu_to_be64(inode->i_ino); dd->type = logfs_type(inode); logfs_set_name(dd, &dentry->d_name); kunmap_atomic(dd); err = logfs_write_buf(dir, page, WF_LOCK); unlock_page(page); page_cache_release(page); if (!err) grow_dir(dir, index); return err; } /* FIXME: Is there a better return value? In most cases neither * the filesystem nor the directory are full. But we have had * too many collisions for this particular hash and no fallback. */ return -ENOSPC; } static int __logfs_create(struct inode *dir, struct dentry *dentry, struct inode *inode, const char *dest, long destlen) { struct logfs_super *super = logfs_super(dir->i_sb); struct logfs_inode *li = logfs_inode(inode); struct logfs_transaction *ta; int ret; ta = kzalloc(sizeof(*ta), GFP_KERNEL); if (!ta) { drop_nlink(inode); iput(inode); return -ENOMEM; } ta->state = CREATE_1; ta->ino = inode->i_ino; mutex_lock(&super->s_dirop_mutex); logfs_add_transaction(inode, ta); if (dest) { /* symlink */ ret = logfs_inode_write(inode, dest, destlen, 0, WF_LOCK, NULL); if (!ret) ret = write_inode(inode); } else { /* creat/mkdir/mknod */ ret = write_inode(inode); } if (ret) { abort_transaction(inode, ta); li->li_flags |= LOGFS_IF_STILLBORN; /* FIXME: truncate symlink */ drop_nlink(inode); iput(inode); goto out; } ta->state = CREATE_2; logfs_add_transaction(dir, ta); ret = logfs_write_dir(dir, dentry, inode); /* sync directory */ if (!ret) ret = write_inode(dir); if (ret) { logfs_del_transaction(dir, ta); ta->state = CREATE_2; logfs_add_transaction(inode, ta); logfs_remove_inode(inode); iput(inode); goto out; } d_instantiate(dentry, inode); out: mutex_unlock(&super->s_dirop_mutex); return ret; } static int logfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) { struct inode *inode; /* * FIXME: why do we have to fill in S_IFDIR, while the mode is * correct for mknod, creat, etc.? Smells like the vfs *should* * do it for us but for some reason fails to do so. */ inode = logfs_new_inode(dir, S_IFDIR | mode); if (IS_ERR(inode)) return PTR_ERR(inode); inode->i_op = &logfs_dir_iops; inode->i_fop = &logfs_dir_fops; return __logfs_create(dir, dentry, inode, NULL, 0); } static int logfs_create(struct inode *dir, struct dentry *dentry, umode_t mode, bool excl) { struct inode *inode; inode = logfs_new_inode(dir, mode); if (IS_ERR(inode)) return PTR_ERR(inode); inode->i_op = &logfs_reg_iops; inode->i_fop = &logfs_reg_fops; inode->i_mapping->a_ops = &logfs_reg_aops; return __logfs_create(dir, dentry, inode, NULL, 0); } static int logfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t rdev) { struct inode *inode; if (dentry->d_name.len > LOGFS_MAX_NAMELEN) return -ENAMETOOLONG; inode = logfs_new_inode(dir, mode); if (IS_ERR(inode)) return PTR_ERR(inode); init_special_inode(inode, mode, rdev); return __logfs_create(dir, dentry, inode, NULL, 0); } static int logfs_symlink(struct inode *dir, struct dentry *dentry, const char *target) { struct inode *inode; size_t destlen = strlen(target) + 1; if (destlen > dir->i_sb->s_blocksize) return -ENAMETOOLONG; inode = logfs_new_inode(dir, S_IFLNK | 0777); if (IS_ERR(inode)) return PTR_ERR(inode); inode->i_op = &logfs_symlink_iops; inode->i_mapping->a_ops = &logfs_reg_aops; return __logfs_create(dir, dentry, inode, target, destlen); } static int logfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) { struct inode *inode = old_dentry->d_inode; inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME; ihold(inode); inc_nlink(inode); mark_inode_dirty_sync(inode); return __logfs_create(dir, dentry, inode, NULL, 0); } static int logfs_get_dd(struct inode *dir, struct dentry *dentry, struct logfs_disk_dentry *dd, loff_t *pos) { struct page *page; void *map; page = logfs_get_dd_page(dir, dentry); if (IS_ERR(page)) return PTR_ERR(page); *pos = page->index; map = kmap_atomic(page); memcpy(dd, map, sizeof(*dd)); kunmap_atomic(map); page_cache_release(page); return 0; } static int logfs_delete_dd(struct inode *dir, loff_t pos) { /* * Getting called with pos somewhere beyond eof is either a goofup * within this file or means someone maliciously edited the * (crc-protected) journal. */ BUG_ON(beyond_eof(dir, pos)); dir->i_ctime = dir->i_mtime = CURRENT_TIME; log_dir(" Delete dentry (%lx, %llx)\n", dir->i_ino, pos); return logfs_delete(dir, pos, NULL); } /* * Cross-directory rename, target does not exist. Just a little nasty. * Create a new dentry in the target dir, then remove the old dentry, * all the while taking care to remember our operation in the journal. */ static int logfs_rename_cross(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) { struct logfs_super *super = logfs_super(old_dir->i_sb); struct logfs_disk_dentry dd; struct logfs_transaction *ta; loff_t pos; int err; /* 1. locate source dd */ err = logfs_get_dd(old_dir, old_dentry, &dd, &pos); if (err) return err; ta = kzalloc(sizeof(*ta), GFP_KERNEL); if (!ta) return -ENOMEM; ta->state = CROSS_RENAME_1; ta->dir = old_dir->i_ino; ta->pos = pos; /* 2. write target dd */ mutex_lock(&super->s_dirop_mutex); logfs_add_transaction(new_dir, ta); err = logfs_write_dir(new_dir, new_dentry, old_dentry->d_inode); if (!err) err = write_inode(new_dir); if (err) { super->s_rename_dir = 0; super->s_rename_pos = 0; abort_transaction(new_dir, ta); goto out; } /* 3. remove source dd */ ta->state = CROSS_RENAME_2; logfs_add_transaction(old_dir, ta); err = logfs_delete_dd(old_dir, pos); if (!err) err = write_inode(old_dir); LOGFS_BUG_ON(err, old_dir->i_sb); out: mutex_unlock(&super->s_dirop_mutex); return err; } static int logfs_replace_inode(struct inode *dir, struct dentry *dentry, struct logfs_disk_dentry *dd, struct inode *inode) { loff_t pos; int err; err = logfs_get_dd(dir, dentry, dd, &pos); if (err) return err; dd->ino = cpu_to_be64(inode->i_ino); dd->type = logfs_type(inode); err = write_dir(dir, dd, pos); if (err) return err; log_dir("Replace dentry (%lx, %llx) %s -> %llx\n", dir->i_ino, pos, dd->name, be64_to_cpu(dd->ino)); return write_inode(dir); } /* Target dentry exists - the worst case. We need to attach the source * inode to the target dentry, then remove the orphaned target inode and * source dentry. */ static int logfs_rename_target(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) { struct logfs_super *super = logfs_super(old_dir->i_sb); struct inode *old_inode = old_dentry->d_inode; struct inode *new_inode = new_dentry->d_inode; int isdir = S_ISDIR(old_inode->i_mode); struct logfs_disk_dentry dd; struct logfs_transaction *ta; loff_t pos; int err; BUG_ON(isdir != S_ISDIR(new_inode->i_mode)); if (isdir) { if (!logfs_empty_dir(new_inode)) return -ENOTEMPTY; } /* 1. locate source dd */ err = logfs_get_dd(old_dir, old_dentry, &dd, &pos); if (err) return err; ta = kzalloc(sizeof(*ta), GFP_KERNEL); if (!ta) return -ENOMEM; ta->state = TARGET_RENAME_1; ta->dir = old_dir->i_ino; ta->pos = pos; ta->ino = new_inode->i_ino; /* 2. attach source inode to target dd */ mutex_lock(&super->s_dirop_mutex); logfs_add_transaction(new_dir, ta); err = logfs_replace_inode(new_dir, new_dentry, &dd, old_inode); if (err) { super->s_rename_dir = 0; super->s_rename_pos = 0; super->s_victim_ino = 0; abort_transaction(new_dir, ta); goto out; } /* 3. remove source dd */ ta->state = TARGET_RENAME_2; logfs_add_transaction(old_dir, ta); err = logfs_delete_dd(old_dir, pos); if (!err) err = write_inode(old_dir); LOGFS_BUG_ON(err, old_dir->i_sb); /* 4. remove target inode */ ta->state = TARGET_RENAME_3; logfs_add_transaction(new_inode, ta); err = logfs_remove_inode(new_inode); out: mutex_unlock(&super->s_dirop_mutex); return err; } static int logfs_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) { if (new_dentry->d_inode) return logfs_rename_target(old_dir, old_dentry, new_dir, new_dentry); return logfs_rename_cross(old_dir, old_dentry, new_dir, new_dentry); } /* No locking done here, as this is called before .get_sb() returns. */ int logfs_replay_journal(struct super_block *sb) { struct logfs_super *super = logfs_super(sb); struct inode *inode; u64 ino, pos; int err; if (super->s_victim_ino) { /* delete victim inode */ ino = super->s_victim_ino; printk(KERN_INFO"LogFS: delete unmapped inode #%llx\n", ino); inode = logfs_iget(sb, ino); if (IS_ERR(inode)) goto fail; LOGFS_BUG_ON(i_size_read(inode) > 0, sb); super->s_victim_ino = 0; err = logfs_remove_inode(inode); iput(inode); if (err) { super->s_victim_ino = ino; goto fail; } } if (super->s_rename_dir) { /* delete old dd from rename */ ino = super->s_rename_dir; pos = super->s_rename_pos; printk(KERN_INFO"LogFS: delete unbacked dentry (%llx, %llx)\n", ino, pos); inode = logfs_iget(sb, ino); if (IS_ERR(inode)) goto fail; super->s_rename_dir = 0; super->s_rename_pos = 0; err = logfs_delete_dd(inode, pos); iput(inode); if (err) { super->s_rename_dir = ino; super->s_rename_pos = pos; goto fail; } } return 0; fail: LOGFS_BUG(sb); return -EIO; } const struct inode_operations logfs_symlink_iops = { .readlink = generic_readlink, .follow_link = page_follow_link_light, }; const struct inode_operations logfs_dir_iops = { .create = logfs_create, .link = logfs_link, .lookup = logfs_lookup, .mkdir = logfs_mkdir, .mknod = logfs_mknod, .rename = logfs_rename, .rmdir = logfs_rmdir, .symlink = logfs_symlink, .unlink = logfs_unlink, }; const struct file_operations logfs_dir_fops = { .fsync = logfs_fsync, .unlocked_ioctl = logfs_ioctl, .readdir = logfs_readdir, .read = generic_read_dir, .llseek = default_llseek, };
gpl-2.0
cile381/H815_kernel
drivers/power/qpnp-vm-bms.c
242
111846
/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #define pr_fmt(fmt) "BMS: %s: " fmt, __func__ #include <linux/module.h> #include <linux/types.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/err.h> #include <linux/of.h> #include <linux/of_device.h> #include <linux/interrupt.h> #include <linux/sched.h> #include <linux/cdev.h> #include <linux/fs.h> #include <linux/delay.h> #include <linux/rtc.h> #include <linux/power_supply.h> #include <linux/fcntl.h> #include <linux/uaccess.h> #include <linux/spmi.h> #include <linux/wakelock.h> #include <linux/debugfs.h> #include <linux/qpnp/power-on.h> #include <linux/qpnp/qpnp-adc.h> #include <linux/of_batterydata.h> #include <linux/batterydata-interface.h> #include <linux/qpnp-revid.h> #include <uapi/linux/vm_bms.h> #define _BMS_MASK(BITS, POS) \ ((unsigned char)(((1 << (BITS)) - 1) << (POS))) #define BMS_MASK(LEFT_BIT_POS, RIGHT_BIT_POS) \ _BMS_MASK((LEFT_BIT_POS) - (RIGHT_BIT_POS) + 1, \ (RIGHT_BIT_POS)) /* Config / Data registers */ #define REVISION1_REG 0x0 #define STATUS1_REG 0x8 #define FSM_STATE_MASK BMS_MASK(5, 3) #define FSM_STATE_SHIFT 3 #define STATUS2_REG 0x9 #define FIFO_CNT_SD_MASK BMS_MASK(7, 4) #define FIFO_CNT_SD_SHIFT 4 #define MODE_CTL_REG 0x40 #define FORCE_S3_MODE BIT(0) #define ENABLE_S3_MODE BIT(1) #define FORCE_S2_MODE BIT(2) #define ENABLE_S2_MODE BIT(3) #define S2_MODE_MASK BMS_MASK(3, 2) #define S3_MODE_MASK BMS_MASK(1, 0) #define DATA_CTL1_REG 0x42 #define MASTER_HOLD_BIT BIT(0) #define DATA_CTL2_REG 0x43 #define FIFO_CNT_SD_CLR_BIT BIT(2) #define ACC_DATA_SD_CLR_BIT BIT(1) #define ACC_CNT_SD_CLR_BIT BIT(0) #define S3_OCV_TOL_CTL_REG 0x44 #define EN_CTL_REG 0x46 #define BMS_EN_BIT BIT(7) #define FIFO_LENGTH_REG 0x47 #define S1_FIFO_LENGTH_MASK BMS_MASK(3, 0) #define S2_FIFO_LENGTH_MASK BMS_MASK(7, 4) #define S2_FIFO_LENGTH_SHIFT 4 #define S1_SAMPLE_INTVL_REG 0x55 #define S2_SAMPLE_INTVL_REG 0x56 #define S3_SAMPLE_INTVL_REG 0x57 #define S1_ACC_CNT_REG 0x5E #define S2_ACC_CNT_REG 0x5F #define ACC_CNT_MASK BMS_MASK(2, 0) #define ACC_DATA0_SD_REG 0x63 #define ACC_CNT_SD_REG 0x67 #define OCV_DATA0_REG 0x6A #define FIFO_0_LSB_REG 0xC0 #define BMS_SOC_REG 0xB0 #define BMS_OCV_REG 0xB1 /* B1 & B2 */ #define SOC_STORAGE_MASK 0xFE #define CHARGE_INCREASE_STORAGE 0xB3 #define CHARGE_CYCLE_STORAGE_LSB 0xB4 /* B4 & B5 */ #define SEC_ACCESS 0xD0 #define QPNP_CHARGER_PRESENT BIT(7) /* Constants */ #define OCV_TOL_LSB_UV 300 #define MAX_OCV_TOL_THRESHOLD (OCV_TOL_LSB_UV * 0xFF) #define MAX_SAMPLE_COUNT 256 #define MAX_SAMPLE_INTERVAL 2550 #define BMS_READ_TIMEOUT 500 #define BMS_DEFAULT_TEMP 250 #define OCV_INVALID 0xFFFF #define SOC_INVALID 0xFF #define OCV_UNINITIALIZED 0xFFFF #define VBATT_ERROR_MARGIN 20000 #define CV_DROP_MARGIN 10000 #define MIN_OCV_UV 2000000 #define TIME_PER_PERCENT_UUC 60 #define IAVG_SAMPLES 16 #define MIN_SOC_UUC 3 #define QPNP_VM_BMS_DEV_NAME "qcom,qpnp-vm-bms" /* indicates the state of BMS */ enum { IDLE_STATE, S1_STATE, S2_STATE, S3_STATE, S7_STATE, }; enum { WRKARND_PON_OCV_COMP = BIT(0), }; struct bms_irq { int irq; unsigned long disabled; }; struct bms_wakeup_source { struct wakeup_source source; unsigned long disabled; }; struct temp_curr_comp_map { int temp_decideg; int current_ma; }; struct bms_dt_cfg { bool cfg_report_charger_eoc; bool cfg_force_bms_active_on_charger; bool cfg_force_s3_on_suspend; bool cfg_ignore_shutdown_soc; bool cfg_use_voltage_soc; int cfg_v_cutoff_uv; int cfg_max_voltage_uv; int cfg_r_conn_mohm; int cfg_shutdown_soc_valid_limit; int cfg_low_soc_calc_threshold; int cfg_low_soc_calculate_soc_ms; int cfg_low_voltage_threshold; int cfg_low_voltage_calculate_soc_ms; int cfg_low_soc_fifo_length; int cfg_calculate_soc_ms; int cfg_voltage_soc_timeout_ms; int cfg_s1_sample_interval_ms; int cfg_s2_sample_interval_ms; int cfg_s1_sample_count; int cfg_s2_sample_count; int cfg_s1_fifo_length; int cfg_s2_fifo_length; int cfg_disable_bms; int cfg_s3_ocv_tol_uv; int cfg_soc_resume_limit; int cfg_low_temp_threshold; int cfg_ibat_avg_samples; int cfg_battery_aging_comp; bool cfg_use_reported_soc; }; struct qpnp_bms_chip { struct device *dev; struct spmi_device *spmi; dev_t dev_no; u16 base; u8 revision[2]; u32 batt_pres_addr; u32 chg_pres_addr; /* status variables */ u8 current_fsm_state; bool last_soc_invalid; bool warm_reset; bool bms_psy_registered; bool battery_full; bool bms_dev_open; bool data_ready; bool apply_suspend_config; bool in_cv_state; bool low_soc_fifo_set; int battery_status; int calculated_soc; int current_now; int prev_current_now; int prev_voltage_based_soc; int calculate_soc_ms; int voltage_soc_uv; int battery_present; int last_soc; int last_soc_unbound; int last_soc_change_sec; int charge_start_tm_sec; int catch_up_time_sec; int delta_time_s; int uuc_delta_time_s; int ocv_at_100; int last_ocv_uv; int s2_fifo_length; int last_acc; int hi_power_state; unsigned int vadc_v0625; unsigned int vadc_v1250; unsigned long tm_sec; unsigned long workaround_flag; unsigned long uuc_tm_sec; u32 seq_num; u8 shutdown_soc; bool shutdown_soc_invalid; u16 last_ocv_raw; u32 shutdown_ocv; bool suspend_data_valid; int iavg_num_samples; unsigned int iavg_index; int iavg_samples_ma[IAVG_SAMPLES]; int iavg_ma; int prev_soc_uuc; int eoc_reported; u8 charge_increase; u16 charge_cycles; unsigned int start_soc; unsigned int end_soc; struct bms_battery_data *batt_data; struct bms_dt_cfg dt; struct dentry *debug_root; struct bms_wakeup_source vbms_lv_wake_source; struct bms_wakeup_source vbms_cv_wake_source; struct bms_wakeup_source vbms_soc_wake_source; wait_queue_head_t bms_wait_q; struct delayed_work monitor_soc_work; struct delayed_work voltage_soc_timeout_work; struct mutex bms_data_mutex; struct mutex bms_device_mutex; struct mutex last_soc_mutex; struct mutex state_change_mutex; struct class *bms_class; struct device *bms_device; struct cdev bms_cdev; struct qpnp_vm_bms_data bms_data; struct qpnp_vadc_chip *vadc_dev; struct qpnp_adc_tm_chip *adc_tm_dev; struct pmic_revid_data *revid_data; struct qpnp_adc_tm_btm_param vbat_monitor_params; struct bms_irq fifo_update_done_irq; struct bms_irq fsm_state_change_irq; struct power_supply bms_psy; struct power_supply *batt_psy; struct power_supply *usb_psy; bool reported_soc_in_use; bool charger_removed_since_full; bool charger_reinserted; bool reported_soc_high_current; int reported_soc; int reported_soc_change_sec; int reported_soc_delta; }; static struct qpnp_bms_chip *the_chip; static struct temp_curr_comp_map temp_curr_comp_lut[] = { {-300, 15}, {250, 17}, {850, 28}, }; static void disable_bms_irq(struct bms_irq *irq) { if (!__test_and_set_bit(0, &irq->disabled)) { disable_irq(irq->irq); pr_debug("disabled irq %d\n", irq->irq); } } static void bms_stay_awake(struct bms_wakeup_source *source) { if (__test_and_clear_bit(0, &source->disabled)) { __pm_stay_awake(&source->source); pr_debug("enabled source %s\n", source->source.name); } } static void bms_relax(struct bms_wakeup_source *source) { if (!__test_and_set_bit(0, &source->disabled)) { __pm_relax(&source->source); pr_debug("disabled source %s\n", source->source.name); } } static bool bms_wake_active(struct bms_wakeup_source *source) { return !source->disabled; } static int bound_soc(int soc) { soc = max(0, soc); soc = min(100, soc); return soc; } static char *qpnp_vm_bms_supplicants[] = { "battery", }; static int qpnp_read_wrapper(struct qpnp_bms_chip *chip, u8 *val, u16 base, int count) { int rc; struct spmi_device *spmi = chip->spmi; rc = spmi_ext_register_readl(spmi->ctrl, spmi->sid, base, val, count); if (rc) pr_err("SPMI read failed rc=%d\n", rc); return rc; } static int qpnp_write_wrapper(struct qpnp_bms_chip *chip, u8 *val, u16 base, int count) { int rc; struct spmi_device *spmi = chip->spmi; rc = spmi_ext_register_writel(spmi->ctrl, spmi->sid, base, val, count); if (rc) pr_err("SPMI write failed rc=%d\n", rc); return rc; } static int qpnp_masked_write_base(struct qpnp_bms_chip *chip, u16 addr, u8 mask, u8 val) { int rc; u8 reg; rc = qpnp_read_wrapper(chip, &reg, addr, 1); if (rc) { pr_err("read failed addr = %03X, rc = %d\n", addr, rc); return rc; } reg &= ~mask; reg |= val & mask; rc = qpnp_write_wrapper(chip, &reg, addr, 1); if (rc) pr_err("write failed addr = %03X, val = %02x, mask = %02x, reg = %02x, rc = %d\n", addr, val, mask, reg, rc); return rc; } static int qpnp_secure_write_wrapper(struct qpnp_bms_chip *chip, u8 *val, u16 base) { int rc; u8 reg; reg = 0xA5; rc = qpnp_write_wrapper(chip, &reg, chip->base + SEC_ACCESS, 1); if (rc) { pr_err("Error %d writing 0xA5 to 0x%x reg\n", rc, SEC_ACCESS); return rc; } rc = qpnp_write_wrapper(chip, val, base, 1); if (rc) pr_err("Error %d writing %d to 0x%x reg\n", rc, *val, base); return rc; } static int backup_ocv_soc(struct qpnp_bms_chip *chip, int ocv_uv, int soc) { int rc; u16 ocv_mv = ocv_uv / 1000; rc = qpnp_write_wrapper(chip, (u8 *)&ocv_mv, chip->base + BMS_OCV_REG, 2); if (rc) pr_err("Unable to backup OCV rc=%d\n", rc); rc = qpnp_masked_write_base(chip, chip->base + BMS_SOC_REG, SOC_STORAGE_MASK, (soc + 1) << 1); if (rc) pr_err("Unable to backup SOC rc=%d\n", rc); pr_debug("ocv_mv=%d soc=%d\n", ocv_mv, soc); return rc; } static int get_current_time(unsigned long *now_tm_sec) { struct rtc_time tm; struct rtc_device *rtc; int rc; rtc = rtc_class_open(CONFIG_RTC_HCTOSYS_DEVICE); if (rtc == NULL) { pr_err("%s: unable to open rtc device (%s)\n", __FILE__, CONFIG_RTC_HCTOSYS_DEVICE); return -EINVAL; } rc = rtc_read_time(rtc, &tm); if (rc) { pr_err("Error reading rtc device (%s) : %d\n", CONFIG_RTC_HCTOSYS_DEVICE, rc); goto close_time; } rc = rtc_valid_tm(&tm); if (rc) { pr_err("Invalid RTC time (%s): %d\n", CONFIG_RTC_HCTOSYS_DEVICE, rc); goto close_time; } rtc_tm_to_time(&tm, now_tm_sec); close_time: rtc_class_close(rtc); return rc; } static int calculate_delta_time(unsigned long *time_stamp, int *delta_time_s) { unsigned long now_tm_sec = 0; /* default to delta time = 0 if anything fails */ *delta_time_s = 0; if (get_current_time(&now_tm_sec)) { pr_err("RTC read failed\n"); return 0; } *delta_time_s = (now_tm_sec - *time_stamp); /* remember this time */ *time_stamp = now_tm_sec; return 0; } static bool is_charger_present(struct qpnp_bms_chip *chip) { union power_supply_propval ret = {0,}; if (chip->usb_psy == NULL) chip->usb_psy = power_supply_get_by_name("usb"); if (chip->usb_psy) { chip->usb_psy->get_property(chip->usb_psy, POWER_SUPPLY_PROP_PRESENT, &ret); return ret.intval; } return false; } static bool is_battery_charging(struct qpnp_bms_chip *chip) { union power_supply_propval ret = {0,}; if (chip->batt_psy == NULL) chip->batt_psy = power_supply_get_by_name("battery"); if (chip->batt_psy) { /* if battery has been registered, use the type property */ chip->batt_psy->get_property(chip->batt_psy, POWER_SUPPLY_PROP_CHARGE_TYPE, &ret); return ret.intval != POWER_SUPPLY_CHARGE_TYPE_NONE; } /* Default to false if the battery power supply is not registered. */ pr_debug("battery power supply is not registered\n"); return false; } #define BAT_PRES_BIT BIT(7) static bool is_battery_present(struct qpnp_bms_chip *chip) { union power_supply_propval ret = {0,}; int rc; u8 batt_pres; /* first try to use the batt_pres register if given */ if (chip->batt_pres_addr) { rc = qpnp_read_wrapper(chip, &batt_pres, chip->batt_pres_addr, 1); if (!rc && (batt_pres & BAT_PRES_BIT)) return true; else return false; } if (chip->batt_psy == NULL) chip->batt_psy = power_supply_get_by_name("battery"); if (chip->batt_psy) { /* if battery has been registered, use the present property */ chip->batt_psy->get_property(chip->batt_psy, POWER_SUPPLY_PROP_PRESENT, &ret); return ret.intval; } /* Default to false if the battery power supply is not registered. */ pr_debug("battery power supply is not registered\n"); return false; } #define BAT_REMOVED_OFFMODE_BIT BIT(6) static bool is_battery_replaced_in_offmode(struct qpnp_bms_chip *chip) { u8 batt_pres; int rc; if (chip->batt_pres_addr) { rc = qpnp_read_wrapper(chip, &batt_pres, chip->batt_pres_addr, 1); pr_debug("offmode removed: %02x\n", batt_pres); if (!rc && (batt_pres & BAT_REMOVED_OFFMODE_BIT)) return true; } return false; } static bool is_battery_taper_charging(struct qpnp_bms_chip *chip) { union power_supply_propval ret = {0,}; if (chip->batt_psy == NULL) chip->batt_psy = power_supply_get_by_name("battery"); if (chip->batt_psy) { chip->batt_psy->get_property(chip->batt_psy, POWER_SUPPLY_PROP_CHARGE_TYPE, &ret); return ret.intval == POWER_SUPPLY_CHARGE_TYPE_TAPER; } return false; } static int master_hold_control(struct qpnp_bms_chip *chip, bool enable) { u8 reg = 0; int rc; reg = enable ? MASTER_HOLD_BIT : 0; rc = qpnp_secure_write_wrapper(chip, &reg, chip->base + DATA_CTL1_REG); if (rc) pr_err("Unable to write reg=%x rc=%d\n", DATA_CTL1_REG, rc); return rc; } static int force_fsm_state(struct qpnp_bms_chip *chip, u8 state) { int rc; u8 mode_ctl = 0; switch (state) { case S2_STATE: mode_ctl = (FORCE_S2_MODE | ENABLE_S2_MODE); break; case S3_STATE: mode_ctl = (FORCE_S3_MODE | ENABLE_S3_MODE); break; default: pr_debug("Invalid state %d\n", state); return -EINVAL; } rc = qpnp_secure_write_wrapper(chip, &mode_ctl, chip->base + MODE_CTL_REG); if (rc) { pr_err("Unable to write reg=%x rc=%d\n", MODE_CTL_REG, rc); return rc; } /* delay for the FSM state to take affect in hardware */ usleep_range(500, 600); pr_debug("force_mode=%d mode_cntl_reg=%x\n", state, mode_ctl); return 0; } static int get_sample_interval(struct qpnp_bms_chip *chip, u8 fsm_state, u32 *interval) { int rc; u8 val = 0, reg; *interval = 0; switch (fsm_state) { case S1_STATE: reg = S1_SAMPLE_INTVL_REG; break; case S2_STATE: reg = S2_SAMPLE_INTVL_REG; break; case S3_STATE: reg = S3_SAMPLE_INTVL_REG; break; default: pr_err("Invalid state %d\n", fsm_state); return -EINVAL; } rc = qpnp_read_wrapper(chip, &val, chip->base + reg, 1); if (rc) { pr_err("Failed to get state(%d) sample_interval, rc=%d\n", fsm_state, rc); return rc; } *interval = val * 10; return 0; } static int get_sample_count(struct qpnp_bms_chip *chip, u8 fsm_state, u32 *count) { int rc; u8 val = 0, reg; *count = 0; switch (fsm_state) { case S1_STATE: reg = S1_ACC_CNT_REG; break; case S2_STATE: reg = S2_ACC_CNT_REG; break; default: pr_err("Invalid state %d\n", fsm_state); return -EINVAL; } rc = qpnp_read_wrapper(chip, &val, chip->base + reg, 1); if (rc) { pr_err("Failed to get state(%d) sample_count, rc=%d\n", fsm_state, rc); return rc; } val &= ACC_CNT_MASK; *count = val ? (1 << (val + 1)) : 1; return 0; } static int get_fifo_length(struct qpnp_bms_chip *chip, u8 fsm_state, u32 *fifo_length) { int rc; u8 val = 0, reg, mask = 0, shift = 0; *fifo_length = 0; switch (fsm_state) { case S1_STATE: reg = FIFO_LENGTH_REG; mask = S1_FIFO_LENGTH_MASK; shift = 0; break; case S2_STATE: reg = FIFO_LENGTH_REG; mask = S2_FIFO_LENGTH_MASK; shift = S2_FIFO_LENGTH_SHIFT; break; default: pr_err("Invalid state %d\n", fsm_state); return -EINVAL; } rc = qpnp_read_wrapper(chip, &val, chip->base + reg, 1); if (rc) { pr_err("Failed to get state(%d) fifo_length, rc=%d\n", fsm_state, rc); return rc; } val &= mask; val >>= shift; *fifo_length = val; return 0; } static int set_fifo_length(struct qpnp_bms_chip *chip, u8 fsm_state, u32 fifo_length) { int rc; u8 reg, mask = 0, shift = 0; /* fifo_length of 1 is not supported due to a hardware issue */ if ((fifo_length <= 1) || (fifo_length > MAX_FIFO_REGS)) { pr_err("Invalid FIFO length = %d\n", fifo_length); return -EINVAL; } switch (fsm_state) { case S1_STATE: reg = FIFO_LENGTH_REG; mask = S1_FIFO_LENGTH_MASK; shift = 0; break; case S2_STATE: reg = FIFO_LENGTH_REG; mask = S2_FIFO_LENGTH_MASK; shift = S2_FIFO_LENGTH_SHIFT; break; default: pr_err("Invalid state %d\n", fsm_state); return -EINVAL; } rc = master_hold_control(chip, true); if (rc) pr_err("Unable to apply master_hold rc=%d\n", rc); rc = qpnp_masked_write_base(chip, chip->base + reg, mask, fifo_length << shift); if (rc) pr_err("Unable to set fifo length rc=%d\n", rc); rc = master_hold_control(chip, false); if (rc) pr_err("Unable to apply master_hold rc=%d\n", rc); return rc; } static int get_fsm_state(struct qpnp_bms_chip *chip, u8 *state) { int rc; /* * To read the STATUS1 register, write a value(any) to this register, * wait for 10ms and then read the register. */ *state = 0; rc = qpnp_write_wrapper(chip, state, chip->base + STATUS1_REG, 1); if (rc) { pr_err("Unable to write STATUS1_REG rc=%d\n", rc); return rc; } usleep_range(10000, 11000); /* read the current FSM state */ rc = qpnp_read_wrapper(chip, state, chip->base + STATUS1_REG, 1); if (rc) { pr_err("Unable to read STATUS1_REG rc=%d\n", rc); return rc; } *state = (*state & FSM_STATE_MASK) >> FSM_STATE_SHIFT; return rc; } static int update_fsm_state(struct qpnp_bms_chip *chip) { u8 state = 0; int rc; mutex_lock(&chip->state_change_mutex); rc = get_fsm_state(chip, &state); if (rc) { pr_err("Unable to get fsm_state rc=%d\n", rc); goto fail_fsm; } chip->current_fsm_state = state; fail_fsm: mutex_unlock(&chip->state_change_mutex); return rc; } static int backup_charge_cycle(struct qpnp_bms_chip *chip) { int rc = 0; if (chip->charge_increase >= 0) { rc = qpnp_write_wrapper(chip, &chip->charge_increase, chip->base + CHARGE_INCREASE_STORAGE, 1); if (rc) pr_err("Unable to backup charge_increase rc=%d\n", rc); } if (chip->charge_cycles >= 0) { rc = qpnp_write_wrapper(chip, (u8 *)&chip->charge_cycles, chip->base + CHARGE_CYCLE_STORAGE_LSB, 2); if (rc) pr_err("Unable to backup charge_cycles rc=%d\n", rc); } pr_debug("%s storing charge_increase=%u charge_cycle=%u\n", rc ? "Unable to" : "Sucessfully", chip->charge_increase, chip->charge_cycles); return rc; } static int read_chgcycle_data_from_backup(struct qpnp_bms_chip *chip) { int rc; uint16_t temp_u16 = 0; u8 temp_u8 = 0; rc = qpnp_read_wrapper(chip, &temp_u8, chip->base + CHARGE_INCREASE_STORAGE, 1); if (rc) { pr_err("Unable to read charge_increase rc=%d\n", rc); return rc; } rc = qpnp_read_wrapper(chip, (u8 *)&temp_u16, chip->base + CHARGE_CYCLE_STORAGE_LSB, 2); if (rc) { pr_err("Unable to read charge_cycle rc=%d\n", rc); return rc; } if ((temp_u8 == 0xFF) || (temp_u16 == 0xFFFF)) { chip->charge_cycles = 0; chip->charge_increase = 0; pr_info("rejecting aging data charge_increase=%u charge_cycle=%u\n", temp_u8, temp_u16); rc = backup_charge_cycle(chip); if (rc) pr_err("Unable to reset charge cycles rc=%d\n", rc); } else { chip->charge_increase = temp_u8; chip->charge_cycles = temp_u16; } pr_debug("charge_increase=%u charge_cycle=%u\n", chip->charge_increase, chip->charge_cycles); return rc; } static int calculate_uuc_iavg(struct qpnp_bms_chip *chip) { int i; int iavg_ma = chip->current_now / 1000; /* only continue if ibat has changed */ if (chip->current_now == chip->prev_current_now) goto ibat_unchanged; else chip->prev_current_now = chip->current_now; chip->iavg_samples_ma[chip->iavg_index] = iavg_ma; chip->iavg_index = (chip->iavg_index + 1) % chip->dt.cfg_ibat_avg_samples; chip->iavg_num_samples++; if (chip->iavg_num_samples >= chip->dt.cfg_ibat_avg_samples) chip->iavg_num_samples = chip->dt.cfg_ibat_avg_samples; if (chip->iavg_num_samples) { iavg_ma = 0; /* maintain a 16 sample average of ibat */ for (i = 0; i < chip->iavg_num_samples; i++) { pr_debug("iavg_samples_ma[%d] = %d\n", i, chip->iavg_samples_ma[i]); iavg_ma += chip->iavg_samples_ma[i]; } chip->iavg_ma = DIV_ROUND_CLOSEST(iavg_ma, chip->iavg_num_samples); } ibat_unchanged: pr_debug("current_now_ma=%d averaged_iavg_ma=%d\n", chip->current_now / 1000, chip->iavg_ma); return chip->iavg_ma; } static int adjust_uuc(struct qpnp_bms_chip *chip, int soc_uuc) { int max_percent_change; calculate_delta_time(&chip->uuc_tm_sec, &chip->uuc_delta_time_s); /* make sure that the UUC changes 1% at a time */ max_percent_change = max(chip->uuc_delta_time_s / TIME_PER_PERCENT_UUC, 1); if (chip->prev_soc_uuc == -EINVAL) { /* start with a minimum UUC if the initial UUC is high */ if (soc_uuc > MIN_SOC_UUC) chip->prev_soc_uuc = MIN_SOC_UUC; else chip->prev_soc_uuc = soc_uuc; } else { if (abs(chip->prev_soc_uuc - soc_uuc) <= max_percent_change) chip->prev_soc_uuc = soc_uuc; else if (soc_uuc > chip->prev_soc_uuc) chip->prev_soc_uuc += max_percent_change; else chip->prev_soc_uuc -= max_percent_change; } pr_debug("soc_uuc=%d new_soc_uuc=%d\n", soc_uuc, chip->prev_soc_uuc); return chip->prev_soc_uuc; } static int lookup_soc_ocv(struct qpnp_bms_chip *chip, int ocv_uv, int batt_temp) { int soc_ocv = 0, soc_cutoff = 0, soc_final = 0; int fcc, acc, soc_uuc = 0, soc_acc = 0, iavg_ma = 0; soc_ocv = interpolate_pc(chip->batt_data->pc_temp_ocv_lut, batt_temp, ocv_uv / 1000); soc_cutoff = interpolate_pc(chip->batt_data->pc_temp_ocv_lut, batt_temp, chip->dt.cfg_v_cutoff_uv / 1000); soc_final = DIV_ROUND_CLOSEST(100 * (soc_ocv - soc_cutoff), (100 - soc_cutoff)); if (chip->batt_data->ibat_acc_lut) { /* Apply ACC logic only if we discharging */ if (!is_battery_charging(chip) && chip->current_now > 0) { /* * IBAT averaging is disabled at low temp. * allowing the SOC to catcup quickly. */ if (batt_temp > chip->dt.cfg_low_temp_threshold) iavg_ma = calculate_uuc_iavg(chip); else iavg_ma = chip->current_now / 1000; fcc = interpolate_fcc(chip->batt_data->fcc_temp_lut, batt_temp); acc = interpolate_acc(chip->batt_data->ibat_acc_lut, batt_temp, iavg_ma); if (acc <= 0) { if (chip->last_acc) acc = chip->last_acc; else acc = fcc; } soc_uuc = ((fcc - acc) * 100) / fcc; if (batt_temp > chip->dt.cfg_low_temp_threshold) soc_uuc = adjust_uuc(chip, soc_uuc); soc_acc = DIV_ROUND_CLOSEST(100 * (soc_ocv - soc_uuc), (100 - soc_uuc)); pr_debug("fcc=%d acc=%d soc_final=%d soc_uuc=%d soc_acc=%d current_now=%d iavg_ma=%d\n", fcc, acc, soc_final, soc_uuc, soc_acc, chip->current_now / 1000, iavg_ma); soc_final = soc_acc; chip->last_acc = acc; } else { /* charging - reset all the counters */ chip->last_acc = 0; chip->iavg_num_samples = 0; chip->iavg_index = 0; chip->iavg_ma = 0; chip->prev_current_now = 0; chip->prev_soc_uuc = -EINVAL; } } soc_final = bound_soc(soc_final); pr_debug("soc_final=%d soc_ocv=%d soc_cutoff=%d ocv_uv=%u batt_temp=%d\n", soc_final, soc_ocv, soc_cutoff, ocv_uv, batt_temp); return soc_final; } #define V_PER_BIT_MUL_FACTOR 97656 #define V_PER_BIT_DIV_FACTOR 1000 #define VADC_INTRINSIC_OFFSET 0x6000 static int vadc_reading_to_uv(int reading, bool vadc_bms) { int64_t value; if (!vadc_bms) { /* * All the BMS H/W VADC values are pre-compensated * for VADC_INTRINSIC_OFFSET, subtract this offset * only if this reading is not obtained from BMS */ if (reading <= VADC_INTRINSIC_OFFSET) return 0; reading -= VADC_INTRINSIC_OFFSET; } value = (reading * V_PER_BIT_MUL_FACTOR); return div_u64(value, (u32)V_PER_BIT_DIV_FACTOR); } static int get_calculation_delay_ms(struct qpnp_bms_chip *chip) { if (bms_wake_active(&chip->vbms_lv_wake_source)) return chip->dt.cfg_low_voltage_calculate_soc_ms; if (chip->calculated_soc < chip->dt.cfg_low_soc_calc_threshold) return chip->dt.cfg_low_soc_calculate_soc_ms; else return chip->dt.cfg_calculate_soc_ms; } #define VADC_CALIB_UV 625000 #define VBATT_MUL_FACTOR 3 static int adjust_vbatt_reading(struct qpnp_bms_chip *chip, int reading_uv) { s64 numerator, denominator; if (reading_uv == 0) return 0; /* don't adjust if not calibrated */ if (chip->vadc_v0625 == 0 || chip->vadc_v1250 == 0) { pr_debug("No cal yet return %d\n", VBATT_MUL_FACTOR * reading_uv); return VBATT_MUL_FACTOR * reading_uv; } numerator = ((s64)reading_uv - chip->vadc_v0625) * VADC_CALIB_UV; denominator = (s64)chip->vadc_v1250 - chip->vadc_v0625; if (denominator == 0) return reading_uv * VBATT_MUL_FACTOR; return (VADC_CALIB_UV + div_s64(numerator, denominator)) * VBATT_MUL_FACTOR; } static int calib_vadc(struct qpnp_bms_chip *chip) { int rc, raw_0625, raw_1250; struct qpnp_vadc_result result; rc = qpnp_vadc_read(chip->vadc_dev, REF_625MV, &result); if (rc) { pr_debug("vadc read failed with rc = %d\n", rc); return rc; } raw_0625 = result.adc_code; rc = qpnp_vadc_read(chip->vadc_dev, REF_125V, &result); if (rc) { pr_debug("vadc read failed with rc = %d\n", rc); return rc; } raw_1250 = result.adc_code; chip->vadc_v0625 = vadc_reading_to_uv(raw_0625, false); chip->vadc_v1250 = vadc_reading_to_uv(raw_1250, false); pr_debug("vadc calib: 0625=%d raw (%d uv), 1250=%d raw (%d uv)\n", raw_0625, chip->vadc_v0625, raw_1250, chip->vadc_v1250); return 0; } static int convert_vbatt_raw_to_uv(struct qpnp_bms_chip *chip, u16 reading, bool is_pon_ocv) { int64_t uv, vbatt; int rc; uv = vadc_reading_to_uv(reading, true); pr_debug("%u raw converted into %lld uv\n", reading, uv); uv = adjust_vbatt_reading(chip, uv); pr_debug("adjusted into %lld uv\n", uv); vbatt = uv; rc = qpnp_vbat_sns_comp_result(chip->vadc_dev, &uv, is_pon_ocv); if (rc) { pr_debug("Vbatt compensation failed rc = %d\n", rc); uv = vbatt; } else { pr_debug("temp-compensated %lld into %lld uv\n", vbatt, uv); } return uv; } static void convert_and_store_ocv(struct qpnp_bms_chip *chip, int batt_temp, bool is_pon_ocv) { int rc; rc = calib_vadc(chip); if (rc) pr_err("Vadc reference voltage read failed, rc = %d\n", rc); chip->last_ocv_uv = convert_vbatt_raw_to_uv(chip, chip->last_ocv_raw, is_pon_ocv); pr_debug("last_ocv_uv = %d\n", chip->last_ocv_uv); } static int read_and_update_ocv(struct qpnp_bms_chip *chip, int batt_temp, bool is_pon_ocv) { int rc, ocv_uv; u16 ocv_data = 0; /* read the BMS h/w OCV */ rc = qpnp_read_wrapper(chip, (u8 *)&ocv_data, chip->base + OCV_DATA0_REG, 2); if (rc) { pr_err("Error reading ocv: rc = %d\n", rc); return -ENXIO; } /* check if OCV is within limits */ ocv_uv = convert_vbatt_raw_to_uv(chip, ocv_data, is_pon_ocv); if (ocv_uv < MIN_OCV_UV) { pr_err("OCV too low or invalid (%d)- rejecting it\n", ocv_uv); return 0; } if ((chip->last_ocv_raw == OCV_UNINITIALIZED) || (chip->last_ocv_raw != ocv_data)) { pr_debug("new OCV!\n"); chip->last_ocv_raw = ocv_data; convert_and_store_ocv(chip, batt_temp, is_pon_ocv); } pr_debug("ocv_raw=0x%x last_ocv_raw=0x%x last_ocv_uv=%d\n", ocv_data, chip->last_ocv_raw, chip->last_ocv_uv); return 0; } static int get_battery_voltage(struct qpnp_bms_chip *chip, int *result_uv) { int rc; struct qpnp_vadc_result adc_result; rc = qpnp_vadc_read(chip->vadc_dev, VBAT_SNS, &adc_result); if (rc) { pr_err("error reading adc channel = %d, rc = %d\n", VBAT_SNS, rc); return rc; } pr_debug("mvolts phy=%lld meas=0x%llx\n", adc_result.physical, adc_result.measurement); *result_uv = (int)adc_result.physical; return 0; } static int get_battery_status(struct qpnp_bms_chip *chip) { union power_supply_propval ret = {0,}; if (chip->batt_psy == NULL) chip->batt_psy = power_supply_get_by_name("battery"); if (chip->batt_psy) { /* if battery has been registered, use the status property */ chip->batt_psy->get_property(chip->batt_psy, POWER_SUPPLY_PROP_STATUS, &ret); return ret.intval; } /* Default to false if the battery power supply is not registered. */ pr_debug("battery power supply is not registered\n"); return POWER_SUPPLY_STATUS_UNKNOWN; } static int get_batt_therm(struct qpnp_bms_chip *chip, int *batt_temp) { int rc; struct qpnp_vadc_result result; rc = qpnp_vadc_read(chip->vadc_dev, LR_MUX1_BATT_THERM, &result); if (rc) { pr_err("error reading adc channel = %d, rc = %d\n", LR_MUX1_BATT_THERM, rc); return rc; } pr_debug("batt_temp phy = %lld meas = 0x%llx\n", result.physical, result.measurement); *batt_temp = (int)result.physical; return 0; } static int get_prop_bms_rbatt(struct qpnp_bms_chip *chip) { return chip->batt_data->default_rbatt_mohm; } static int get_rbatt(struct qpnp_bms_chip *chip, int soc, int batt_temp) { int rbatt_mohm, scalefactor; rbatt_mohm = chip->batt_data->default_rbatt_mohm; if (chip->batt_data->rbatt_sf_lut == NULL) { pr_debug("RBATT = %d\n", rbatt_mohm); return rbatt_mohm; } scalefactor = interpolate_scalingfactor(chip->batt_data->rbatt_sf_lut, batt_temp, soc); rbatt_mohm = (rbatt_mohm * scalefactor) / 100; if (chip->dt.cfg_r_conn_mohm > 0) rbatt_mohm += chip->dt.cfg_r_conn_mohm; return rbatt_mohm; } static void charging_began(struct qpnp_bms_chip *chip) { int rc; u8 state; mutex_lock(&chip->last_soc_mutex); chip->charge_start_tm_sec = 0; chip->catch_up_time_sec = 0; chip->start_soc = chip->last_soc; /* * reset ocv_at_100 to -EINVAL to indicate * start of charging. */ chip->ocv_at_100 = -EINVAL; mutex_unlock(&chip->last_soc_mutex); /* * If the BMS state is not in S2, force it in S2. Such * a condition can only occur if we are coming out of * suspend. */ mutex_lock(&chip->state_change_mutex); rc = get_fsm_state(chip, &state); if (rc) pr_err("Unable to get FSM state rc=%d\n", rc); if (rc || (state != S2_STATE)) { pr_debug("Forcing S2 state\n"); rc = force_fsm_state(chip, S2_STATE); if (rc) pr_err("Unable to set FSM state rc=%d\n", rc); } mutex_unlock(&chip->state_change_mutex); } static void charging_ended(struct qpnp_bms_chip *chip) { u8 state; int rc, status = get_battery_status(chip); mutex_lock(&chip->last_soc_mutex); chip->charge_start_tm_sec = 0; chip->catch_up_time_sec = 0; chip->end_soc = chip->last_soc; if (status == POWER_SUPPLY_STATUS_FULL) chip->last_soc_invalid = true; mutex_unlock(&chip->last_soc_mutex); /* * If the BMS state is not in S2, force it in S2. Such * a condition can only occur if we are coming out of * suspend. */ mutex_lock(&chip->state_change_mutex); rc = get_fsm_state(chip, &state); if (rc) pr_err("Unable to get FSM state rc=%d\n", rc); if (rc || (state != S2_STATE)) { pr_debug("Forcing S2 state\n"); rc = force_fsm_state(chip, S2_STATE); if (rc) pr_err("Unable to set FSM state rc=%d\n", rc); } mutex_unlock(&chip->state_change_mutex); /* Calculate charge accumulated and update charge cycle */ if (chip->dt.cfg_battery_aging_comp && (chip->end_soc > chip->start_soc)) { chip->charge_increase += (chip->end_soc - chip->start_soc); if (chip->charge_increase > 100) { chip->charge_cycles++; chip->charge_increase %= 100; } pr_debug("start_soc=%u end_soc=%u charge_cycles=%u charge_increase=%u\n", chip->start_soc, chip->end_soc, chip->charge_cycles, chip->charge_increase); rc = backup_charge_cycle(chip); if (rc) pr_err("Unable to store charge cycles rc=%d\n", rc); } } static int estimate_ocv(struct qpnp_bms_chip *chip) { int i, rc, vbatt = 0, vbatt_final = 0; for (i = 0; i < 5; i++) { rc = get_battery_voltage(chip, &vbatt); if (rc) { pr_err("Unable to read battery-voltage rc=%d\n", rc); return rc; } /* * Conservatively select the lowest vbatt to avoid reporting * a higher ocv due to variations in bootup current. */ if (i == 0) vbatt_final = vbatt; else if (vbatt < vbatt_final) vbatt_final = vbatt; msleep(20); } /* * TODO: Revisit the OCV calcuations to use approximate ibatt * and rbatt. */ return vbatt_final; } static int scale_soc_while_chg(struct qpnp_bms_chip *chip, int chg_time_sec, int catch_up_sec, int new_soc, int prev_soc) { int scaled_soc; int numerator; /* * Don't report a high value immediately slowly scale the * value from prev_soc to the new soc based on a charge time * weighted average */ pr_debug("cts=%d catch_up_sec=%d\n", chg_time_sec, catch_up_sec); if (catch_up_sec == 0) return new_soc; if (chg_time_sec > catch_up_sec) return new_soc; numerator = (catch_up_sec - chg_time_sec) * prev_soc + chg_time_sec * new_soc; scaled_soc = numerator / catch_up_sec; pr_debug("cts=%d new_soc=%d prev_soc=%d scaled_soc=%d\n", chg_time_sec, new_soc, prev_soc, scaled_soc); return scaled_soc; } static int report_eoc(struct qpnp_bms_chip *chip) { int rc = -EINVAL; union power_supply_propval ret = {0,}; if (chip->batt_psy == NULL) chip->batt_psy = power_supply_get_by_name("battery"); if (chip->batt_psy) { rc = chip->batt_psy->get_property(chip->batt_psy, POWER_SUPPLY_PROP_STATUS, &ret); if (rc) { pr_err("Unable to get battery 'STATUS' rc=%d\n", rc); } else if (ret.intval != POWER_SUPPLY_STATUS_FULL) { pr_debug("Report EOC to charger\n"); ret.intval = POWER_SUPPLY_STATUS_FULL; rc = chip->batt_psy->set_property(chip->batt_psy, POWER_SUPPLY_PROP_STATUS, &ret); if (rc) { pr_err("Unable to set 'STATUS' rc=%d\n", rc); return rc; } chip->eoc_reported = true; } } else { pr_err("battery psy not registered\n"); } return rc; } static void check_recharge_condition(struct qpnp_bms_chip *chip) { int rc; union power_supply_propval ret = {0,}; int status = get_battery_status(chip); if (chip->last_soc > chip->dt.cfg_soc_resume_limit) return; if (status == POWER_SUPPLY_STATUS_UNKNOWN) { pr_debug("Unable to read battery status\n"); return; } /* Report recharge to charger for SOC based resume of charging */ if ((status != POWER_SUPPLY_STATUS_CHARGING) && chip->eoc_reported) { ret.intval = POWER_SUPPLY_STATUS_CHARGING; rc = chip->batt_psy->set_property(chip->batt_psy, POWER_SUPPLY_PROP_STATUS, &ret); if (rc < 0) { pr_err("Unable to set battery property rc=%d\n", rc); } else { pr_info("soc dropped below resume_soc soc=%d resume_soc=%d, restart charging\n", chip->last_soc, chip->dt.cfg_soc_resume_limit); chip->eoc_reported = false; } } } static void check_eoc_condition(struct qpnp_bms_chip *chip) { int rc; int status = get_battery_status(chip); union power_supply_propval ret = {0,}; if (status == POWER_SUPPLY_STATUS_UNKNOWN) { pr_err("Unable to read battery status\n"); return; } /* * Check battery status: * if last_soc is 100 and battery status is still charging * reset ocv_at_100 and force reporting of eoc to charger. */ if ((chip->last_soc == 100) && (status == POWER_SUPPLY_STATUS_CHARGING)) chip->ocv_at_100 = -EINVAL; /* * Store the OCV value at 100. If the new ocv is greater than * ocv_at_100 (battery settles), update ocv_at_100. Else * if the SOC drops, reset ocv_at_100. */ if (chip->ocv_at_100 == -EINVAL) { if (chip->last_soc == 100) { if (chip->dt.cfg_report_charger_eoc) { rc = report_eoc(chip); if (!rc) { /* * update ocv_at_100 only if EOC is * reported successfully. */ chip->ocv_at_100 = chip->last_ocv_uv; pr_debug("Battery FULL\n"); } else { pr_err("Unable to report eoc rc=%d\n", rc); chip->ocv_at_100 = -EINVAL; } } if (chip->dt.cfg_use_reported_soc) { /* begin reported_soc process */ chip->reported_soc_in_use = true; chip->charger_removed_since_full = false; chip->charger_reinserted = false; chip->reported_soc = 100; pr_debug("Begin reported_soc process\n"); } } } else { if (chip->last_ocv_uv >= chip->ocv_at_100) { pr_debug("new_ocv(%d) > ocv_at_100(%d) maintaining SOC to 100\n", chip->last_ocv_uv, chip->ocv_at_100); chip->ocv_at_100 = chip->last_ocv_uv; chip->last_soc = 100; } else if (chip->last_soc != 100) { /* * Report that the battery is discharging. * This gets called once when the SOC falls * below 100. */ if (chip->reported_soc_in_use && chip->reported_soc == 100) { pr_debug("reported_soc=100, last_soc=%d, do not send DISCHARING status\n", chip->last_soc); } else { ret.intval = POWER_SUPPLY_STATUS_DISCHARGING; chip->batt_psy->set_property(chip->batt_psy, POWER_SUPPLY_PROP_STATUS, &ret); } pr_debug("SOC dropped (%d) discarding ocv_at_100\n", chip->last_soc); chip->ocv_at_100 = -EINVAL; } } } static int report_voltage_based_soc(struct qpnp_bms_chip *chip) { pr_debug("Reported voltage based soc = %d\n", chip->prev_voltage_based_soc); return chip->prev_voltage_based_soc; } static int prepare_reported_soc(struct qpnp_bms_chip *chip) { if (chip->charger_removed_since_full == false) { /* * charger is not removed since full, * keep reported_soc as 100 and calculate the delta soc * between reported_soc and last_soc */ chip->reported_soc = 100; chip->reported_soc_delta = 100 - chip->last_soc; pr_debug("Keep at reported_soc 100, reported_soc_delta=%d, last_soc=%d\n", chip->reported_soc_delta, chip->last_soc); } else { /* charger is removed since full */ if (chip->charger_reinserted) { /* * charger reinserted, keep the reported_soc * until it equals to last_soc. */ if (chip->reported_soc == chip->last_soc) { chip->reported_soc_in_use = false; chip->reported_soc_high_current = false; pr_debug("reported_soc equals to last_soc, stop reported_soc process\n"); } chip->reported_soc_change_sec = 0; } } pr_debug("Reporting reported_soc=%d, last_soc=%d\n", chip->reported_soc, chip->last_soc); return chip->reported_soc; } #define SOC_CATCHUP_SEC_MAX 600 #define SOC_CATCHUP_SEC_PER_PERCENT 60 #define MAX_CATCHUP_SOC (SOC_CATCHUP_SEC_MAX / SOC_CATCHUP_SEC_PER_PERCENT) #define SOC_CHANGE_PER_SEC 5 static int report_vm_bms_soc(struct qpnp_bms_chip *chip) { int soc, soc_change, batt_temp, rc; int time_since_last_change_sec = 0, charge_time_sec = 0; unsigned long last_change_sec; bool charging; soc = chip->calculated_soc; last_change_sec = chip->last_soc_change_sec; calculate_delta_time(&last_change_sec, &time_since_last_change_sec); charging = is_battery_charging(chip); pr_debug("charging=%d last_soc=%d last_soc_unbound=%d\n", charging, chip->last_soc, chip->last_soc_unbound); /* * account for charge time - limit it to SOC_CATCHUP_SEC to * avoid overflows when charging continues for extended periods */ if (charging && chip->last_soc != -EINVAL) { if (chip->charge_start_tm_sec == 0) { /* * calculating soc for the first time * after start of chg. Initialize catchup time */ if (abs(soc - chip->last_soc) < MAX_CATCHUP_SOC) chip->catch_up_time_sec = (soc - chip->last_soc) * SOC_CATCHUP_SEC_PER_PERCENT; else chip->catch_up_time_sec = SOC_CATCHUP_SEC_MAX; if (chip->catch_up_time_sec < 0) chip->catch_up_time_sec = 0; chip->charge_start_tm_sec = last_change_sec; } charge_time_sec = min(SOC_CATCHUP_SEC_MAX, (int)last_change_sec - chip->charge_start_tm_sec); /* end catchup if calculated soc and last soc are same */ if (chip->last_soc == soc) chip->catch_up_time_sec = 0; } if (chip->last_soc != -EINVAL) { /* * last_soc < soc ... if we have not been charging at all * since the last time this was called, report previous SoC. * Otherwise, scale and catch up. */ rc = get_batt_therm(chip, &batt_temp); if (rc) batt_temp = BMS_DEFAULT_TEMP; if (chip->last_soc < soc && !charging) soc = chip->last_soc; else if (chip->last_soc < soc && soc != 100) soc = scale_soc_while_chg(chip, charge_time_sec, chip->catch_up_time_sec, soc, chip->last_soc); /* * if the battery is close to cutoff or if the batt_temp * is under the low-temp threshold allow bigger change */ if (bms_wake_active(&chip->vbms_lv_wake_source) || (batt_temp <= chip->dt.cfg_low_temp_threshold)) soc_change = min((int)abs(chip->last_soc - soc), time_since_last_change_sec); else soc_change = min((int)abs(chip->last_soc - soc), time_since_last_change_sec / SOC_CHANGE_PER_SEC); if (chip->last_soc_unbound) { chip->last_soc_unbound = false; } else { /* * if soc have not been unbound by resume, * only change reported SoC by 1. */ soc_change = min(1, soc_change); } if (soc < chip->last_soc && soc != 0) soc = chip->last_soc - soc_change; if (soc > chip->last_soc && soc != 100) soc = chip->last_soc + soc_change; } if (chip->last_soc != soc && !chip->last_soc_unbound) chip->last_soc_change_sec = last_change_sec; /* * Check/update eoc under following condition: * if there is change in soc: * soc != chip->last_soc * during bootup if soc is 100: */ soc = bound_soc(soc); if ((soc != chip->last_soc) || (soc == 100)) { chip->last_soc = soc; check_eoc_condition(chip); if ((chip->dt.cfg_soc_resume_limit > 0) && !charging) check_recharge_condition(chip); } pr_debug("last_soc=%d calculated_soc=%d soc=%d time_since_last_change=%d\n", chip->last_soc, chip->calculated_soc, soc, time_since_last_change_sec); /* * Backup the actual ocv (last_ocv_uv) and not the * last_soc-interpolated ocv. This makes sure that * the BMS algorithm always uses the correct ocv and * can catch up on the last_soc (across reboots). * We do not want the algorithm to be based of a wrong * initial OCV. */ backup_ocv_soc(chip, chip->last_ocv_uv, chip->last_soc); if (chip->reported_soc_in_use) return prepare_reported_soc(chip); pr_debug("Reported SOC=%d\n", chip->last_soc); return chip->last_soc; } static int report_state_of_charge(struct qpnp_bms_chip *chip) { int soc; mutex_lock(&chip->last_soc_mutex); if (chip->dt.cfg_use_voltage_soc) soc = report_voltage_based_soc(chip); else soc = report_vm_bms_soc(chip); mutex_unlock(&chip->last_soc_mutex); return soc; } static void btm_notify_vbat(enum qpnp_tm_state state, void *ctx) { struct qpnp_bms_chip *chip = ctx; int vbat_uv; int rc; rc = get_battery_voltage(chip, &vbat_uv); if (rc) { pr_err("error reading vbat_sns adc channel=%d, rc=%d\n", VBAT_SNS, rc); goto out; } pr_debug("vbat is at %d, state is at %d\n", vbat_uv, state); if (state == ADC_TM_LOW_STATE) { pr_debug("low voltage btm notification triggered\n"); if (vbat_uv <= (chip->vbat_monitor_params.low_thr + VBATT_ERROR_MARGIN)) { if (!bms_wake_active(&chip->vbms_lv_wake_source)) bms_stay_awake(&chip->vbms_lv_wake_source); chip->vbat_monitor_params.state_request = ADC_TM_HIGH_THR_ENABLE; } else { pr_debug("faulty btm trigger, discarding\n"); goto out; } } else if (state == ADC_TM_HIGH_STATE) { pr_debug("high voltage btm notification triggered\n"); if (vbat_uv > chip->vbat_monitor_params.high_thr) { chip->vbat_monitor_params.state_request = ADC_TM_LOW_THR_ENABLE; if (bms_wake_active(&chip->vbms_lv_wake_source)) bms_relax(&chip->vbms_lv_wake_source); } else { pr_debug("faulty btm trigger, discarding\n"); goto out; } } else { pr_debug("unknown voltage notification state: %d\n", state); goto out; } if (chip->bms_psy_registered) power_supply_changed(&chip->bms_psy); out: qpnp_adc_tm_channel_measure(chip->adc_tm_dev, &chip->vbat_monitor_params); } static int reset_vbat_monitoring(struct qpnp_bms_chip *chip) { int rc; chip->vbat_monitor_params.state_request = ADC_TM_HIGH_LOW_THR_DISABLE; rc = qpnp_adc_tm_channel_measure(chip->adc_tm_dev, &chip->vbat_monitor_params); if (rc) { pr_err("tm disable failed: %d\n", rc); return rc; } if (bms_wake_active(&chip->vbms_lv_wake_source)) bms_relax(&chip->vbms_lv_wake_source); return 0; } static int setup_vbat_monitoring(struct qpnp_bms_chip *chip) { int rc; chip->vbat_monitor_params.low_thr = chip->dt.cfg_low_voltage_threshold; chip->vbat_monitor_params.high_thr = chip->dt.cfg_low_voltage_threshold + VBATT_ERROR_MARGIN; chip->vbat_monitor_params.state_request = ADC_TM_LOW_THR_ENABLE; chip->vbat_monitor_params.channel = VBAT_SNS; chip->vbat_monitor_params.btm_ctx = chip; chip->vbat_monitor_params.timer_interval = ADC_MEAS1_INTERVAL_1S; chip->vbat_monitor_params.threshold_notification = &btm_notify_vbat; pr_debug("set low thr to %d and high to %d\n", chip->vbat_monitor_params.low_thr, chip->vbat_monitor_params.high_thr); rc = qpnp_adc_tm_channel_measure(chip->adc_tm_dev, &chip->vbat_monitor_params); if (rc) { pr_err("adc-tm setup failed: %d\n", rc); return rc; } pr_debug("vbat monitoring setup complete\n"); return 0; } static void very_low_voltage_check(struct qpnp_bms_chip *chip, int vbat_uv) { if (!bms_wake_active(&chip->vbms_lv_wake_source) && (vbat_uv <= chip->dt.cfg_low_voltage_threshold)) { pr_debug("voltage=%d holding low voltage ws\n", vbat_uv); bms_stay_awake(&chip->vbms_lv_wake_source); } else if (bms_wake_active(&chip->vbms_lv_wake_source) && (vbat_uv > chip->dt.cfg_low_voltage_threshold)) { pr_debug("voltage=%d releasing low voltage ws\n", vbat_uv); bms_relax(&chip->vbms_lv_wake_source); } } static void cv_voltage_check(struct qpnp_bms_chip *chip, int vbat_uv) { if (bms_wake_active(&chip->vbms_cv_wake_source)) { if ((vbat_uv < (chip->dt.cfg_max_voltage_uv - VBATT_ERROR_MARGIN + CV_DROP_MARGIN)) && !is_battery_taper_charging(chip)) { pr_debug("Fell below CV, releasing cv ws\n"); chip->in_cv_state = false; bms_relax(&chip->vbms_cv_wake_source); } else if (!is_battery_charging(chip)) { pr_debug("charging stopped, releasing cv ws\n"); chip->in_cv_state = false; bms_relax(&chip->vbms_cv_wake_source); } } else if (!bms_wake_active(&chip->vbms_cv_wake_source) && is_battery_charging(chip) && ((vbat_uv > (chip->dt.cfg_max_voltage_uv - VBATT_ERROR_MARGIN)) || is_battery_taper_charging(chip))) { pr_debug("CC_TO_CV voltage=%d holding cv ws\n", vbat_uv); chip->in_cv_state = true; bms_stay_awake(&chip->vbms_cv_wake_source); } } static void low_soc_check(struct qpnp_bms_chip *chip) { int rc; if (chip->dt.cfg_low_soc_fifo_length < 1) return; mutex_lock(&chip->state_change_mutex); if (chip->calculated_soc <= chip->dt.cfg_low_soc_calc_threshold) { if (!chip->low_soc_fifo_set) { pr_debug("soc=%d (low-soc) setting fifo_length to %d\n", chip->calculated_soc, chip->dt.cfg_low_soc_fifo_length); rc = get_fifo_length(chip, S2_STATE, &chip->s2_fifo_length); if (rc) { pr_err("Unable to get_fifo_length rc=%d", rc); goto low_soc_exit; } rc = set_fifo_length(chip, S2_STATE, chip->dt.cfg_low_soc_fifo_length); if (rc) { pr_err("Unable to set_fifo_length rc=%d", rc); goto low_soc_exit; } chip->low_soc_fifo_set = true; } } else { if (chip->low_soc_fifo_set) { pr_debug("soc=%d setting back fifo_length to %d\n", chip->calculated_soc, chip->s2_fifo_length); rc = set_fifo_length(chip, S2_STATE, chip->s2_fifo_length); if (rc) { pr_err("Unable to set_fifo_length rc=%d", rc); goto low_soc_exit; } chip->low_soc_fifo_set = false; } } low_soc_exit: mutex_unlock(&chip->state_change_mutex); } static int calculate_soc_from_voltage(struct qpnp_bms_chip *chip) { int voltage_range_uv, voltage_remaining_uv, voltage_based_soc; int rc, vbat_uv; /* check if we have the averaged fifo data */ if (chip->voltage_soc_uv) { vbat_uv = chip->voltage_soc_uv; } else { rc = get_battery_voltage(chip, &vbat_uv); if (rc < 0) { pr_err("adc vbat failed err = %d\n", rc); return rc; } pr_debug("instant-voltage based voltage-soc\n"); } voltage_range_uv = chip->dt.cfg_max_voltage_uv - chip->dt.cfg_v_cutoff_uv; voltage_remaining_uv = vbat_uv - chip->dt.cfg_v_cutoff_uv; voltage_based_soc = voltage_remaining_uv * 100 / voltage_range_uv; voltage_based_soc = clamp(voltage_based_soc, 0, 100); if (chip->prev_voltage_based_soc != voltage_based_soc && chip->bms_psy_registered) { pr_debug("update bms_psy\n"); power_supply_changed(&chip->bms_psy); } chip->prev_voltage_based_soc = voltage_based_soc; pr_debug("vbat used = %duv\n", vbat_uv); pr_debug("Calculated voltage based soc=%d\n", voltage_based_soc); if (voltage_based_soc == 100) if (chip->dt.cfg_report_charger_eoc) report_eoc(chip); return 0; } static void calculate_reported_soc(struct qpnp_bms_chip *chip) { union power_supply_propval ret = {0,}; if (chip->reported_soc > chip->last_soc) { /*send DISCHARGING status if the reported_soc drops from 100 */ if (chip->reported_soc == 100) { ret.intval = POWER_SUPPLY_STATUS_DISCHARGING; chip->batt_psy->set_property(chip->batt_psy, POWER_SUPPLY_PROP_STATUS, &ret); pr_debug("Report discharging status, reported_soc=%d, last_soc=%d\n", chip->reported_soc, chip->last_soc); } /* * reported_soc_delta is used to prevent * the big change in last_soc, * this is not used in high current mode */ if (chip->reported_soc_delta > 0) chip->reported_soc_delta--; if (chip->reported_soc_high_current) chip->reported_soc--; else chip->reported_soc = chip->last_soc + chip->reported_soc_delta; pr_debug("New reported_soc=%d, last_soc is=%d\n", chip->reported_soc, chip->last_soc); } else { chip->reported_soc_in_use = false; chip->reported_soc_high_current = false; pr_debug("reported_soc equals last_soc,stop reported_soc process\n"); } pr_debug("bms power_supply_changed\n"); power_supply_changed(&chip->bms_psy); } static int clamp_soc_based_on_voltage(struct qpnp_bms_chip *chip, int soc) { int rc, vbat_uv; rc = get_battery_voltage(chip, &vbat_uv); if (rc < 0) { pr_err("adc vbat failed err = %d\n", rc); return soc; } /* only clamp when discharging */ if (is_battery_charging(chip)) return soc; if (soc <= 0 && vbat_uv > chip->dt.cfg_v_cutoff_uv) { pr_debug("clamping soc to 1, vbat (%d) > cutoff (%d)\n", vbat_uv, chip->dt.cfg_v_cutoff_uv); return 1; } else { pr_debug("not clamping, using soc = %d, vbat = %d and cutoff = %d\n", soc, vbat_uv, chip->dt.cfg_v_cutoff_uv); return soc; } } #define UI_SOC_CATCHUP_TIME (60) static void monitor_soc_work(struct work_struct *work) { struct qpnp_bms_chip *chip = container_of(work, struct qpnp_bms_chip, monitor_soc_work.work); int rc, vbat_uv = 0, new_soc = 0, batt_temp; bms_stay_awake(&chip->vbms_soc_wake_source); calculate_delta_time(&chip->tm_sec, &chip->delta_time_s); pr_debug("elapsed_time=%d\n", chip->delta_time_s); mutex_lock(&chip->last_soc_mutex); if (!is_battery_present(chip)) { /* if battery is not preset report 100% SOC */ pr_debug("battery gone, reporting 100\n"); chip->last_soc_invalid = true; chip->last_soc = -EINVAL; new_soc = 100; } else { rc = get_battery_voltage(chip, &vbat_uv); if (rc < 0) { pr_err("Failed to read battery-voltage rc=%d\n", rc); } else { very_low_voltage_check(chip, vbat_uv); cv_voltage_check(chip, vbat_uv); } if (chip->dt.cfg_use_voltage_soc) { calculate_soc_from_voltage(chip); } else { rc = get_batt_therm(chip, &batt_temp); if (rc < 0) { pr_err("Unable to read batt temp rc=%d, using default=%d\n", rc, BMS_DEFAULT_TEMP); batt_temp = BMS_DEFAULT_TEMP; } if (chip->last_soc_invalid) { chip->last_soc_invalid = false; chip->last_soc = -EINVAL; } new_soc = lookup_soc_ocv(chip, chip->last_ocv_uv, batt_temp); /* clamp soc due to BMS hw/sw immaturities */ new_soc = clamp_soc_based_on_voltage(chip, new_soc); if (chip->calculated_soc != new_soc) { pr_debug("SOC changed! new_soc=%d prev_soc=%d\n", new_soc, chip->calculated_soc); chip->calculated_soc = new_soc; if (chip->calculated_soc == 100) /* update last_soc immediately */ report_vm_bms_soc(chip); pr_debug("update bms_psy\n"); power_supply_changed(&chip->bms_psy); } else if (chip->last_soc != chip->calculated_soc) { pr_debug("update bms_psy\n"); power_supply_changed(&chip->bms_psy); } else { report_vm_bms_soc(chip); } } /* low SOC configuration */ low_soc_check(chip); } /* * schedule the work only if last_soc has not caught up with * the calculated soc or if we are using voltage based soc */ if ((chip->last_soc != chip->calculated_soc) || chip->dt.cfg_use_voltage_soc) schedule_delayed_work(&chip->monitor_soc_work, msecs_to_jiffies(get_calculation_delay_ms(chip))); if (chip->reported_soc_in_use && chip->charger_removed_since_full && !chip->charger_reinserted) { /* record the elapsed time after last reported_soc change */ chip->reported_soc_change_sec += chip->delta_time_s; pr_debug("reported_soc_change_sec=%d\n", chip->reported_soc_change_sec); /* above the catch up time, calculate new reported_soc */ if (chip->reported_soc_change_sec > UI_SOC_CATCHUP_TIME) { calculate_reported_soc(chip); chip->reported_soc_change_sec = 0; } } mutex_unlock(&chip->last_soc_mutex); bms_relax(&chip->vbms_soc_wake_source); } static void voltage_soc_timeout_work(struct work_struct *work) { struct qpnp_bms_chip *chip = container_of(work, struct qpnp_bms_chip, voltage_soc_timeout_work.work); mutex_lock(&chip->bms_device_mutex); if (!chip->bms_dev_open) { pr_warn("BMS device not opened, using voltage based SOC\n"); chip->dt.cfg_use_voltage_soc = true; } mutex_unlock(&chip->bms_device_mutex); } static int get_prop_bms_capacity(struct qpnp_bms_chip *chip) { return report_state_of_charge(chip); } static bool is_hi_power_state_requested(struct qpnp_bms_chip *chip) { pr_debug("hi_power_state=0x%x\n", chip->hi_power_state); if (chip->hi_power_state & VMBMS_IGNORE_ALL_BIT) return false; else return !!chip->hi_power_state; } static int qpnp_vm_bms_config_power_state(struct qpnp_bms_chip *chip, int usecase, bool hi_power_enable) { if (usecase < 0) { pr_err("Invalid power-usecase %x\n", usecase); return -EINVAL; } if (hi_power_enable) chip->hi_power_state |= usecase; else chip->hi_power_state &= ~usecase; pr_debug("hi_power_state=%x usecase=%x hi_power_enable=%d\n", chip->hi_power_state, usecase, hi_power_enable); return 0; } static int get_prop_bms_current_now(struct qpnp_bms_chip *chip) { return chip->current_now; } static enum power_supply_property bms_power_props[] = { POWER_SUPPLY_PROP_CAPACITY, POWER_SUPPLY_PROP_STATUS, POWER_SUPPLY_PROP_RESISTANCE, POWER_SUPPLY_PROP_RESISTANCE_CAPACITIVE, POWER_SUPPLY_PROP_RESISTANCE_NOW, POWER_SUPPLY_PROP_CURRENT_NOW, POWER_SUPPLY_PROP_VOLTAGE_OCV, POWER_SUPPLY_PROP_HI_POWER, POWER_SUPPLY_PROP_LOW_POWER, POWER_SUPPLY_PROP_BATTERY_TYPE, POWER_SUPPLY_PROP_TEMP, POWER_SUPPLY_PROP_CYCLE_COUNT, }; static int qpnp_vm_bms_property_is_writeable(struct power_supply *psy, enum power_supply_property psp) { switch (psp) { case POWER_SUPPLY_PROP_CURRENT_NOW: case POWER_SUPPLY_PROP_VOLTAGE_OCV: case POWER_SUPPLY_PROP_HI_POWER: case POWER_SUPPLY_PROP_LOW_POWER: return 1; default: break; } return 0; } static int qpnp_vm_bms_power_get_property(struct power_supply *psy, enum power_supply_property psp, union power_supply_propval *val) { struct qpnp_bms_chip *chip = container_of(psy, struct qpnp_bms_chip, bms_psy); int value = 0, rc; val->intval = 0; switch (psp) { case POWER_SUPPLY_PROP_CAPACITY: val->intval = get_prop_bms_capacity(chip); break; case POWER_SUPPLY_PROP_STATUS: val->intval = chip->battery_status; break; case POWER_SUPPLY_PROP_RESISTANCE: val->intval = get_prop_bms_rbatt(chip); break; case POWER_SUPPLY_PROP_RESISTANCE_CAPACITIVE: if (chip->batt_data->rbatt_capacitive_mohm > 0) val->intval = chip->batt_data->rbatt_capacitive_mohm; if (chip->dt.cfg_r_conn_mohm > 0) val->intval += chip->dt.cfg_r_conn_mohm; break; case POWER_SUPPLY_PROP_RESISTANCE_NOW: rc = get_batt_therm(chip, &value); if (rc < 0) value = BMS_DEFAULT_TEMP; val->intval = get_rbatt(chip, chip->calculated_soc, value); break; case POWER_SUPPLY_PROP_CURRENT_NOW: val->intval = get_prop_bms_current_now(chip); break; case POWER_SUPPLY_PROP_BATTERY_TYPE: val->strval = chip->batt_data->battery_type; break; case POWER_SUPPLY_PROP_VOLTAGE_OCV: val->intval = chip->last_ocv_uv; break; case POWER_SUPPLY_PROP_TEMP: rc = get_batt_therm(chip, &value); if (rc < 0) value = BMS_DEFAULT_TEMP; val->intval = value; break; case POWER_SUPPLY_PROP_HI_POWER: val->intval = is_hi_power_state_requested(chip); break; case POWER_SUPPLY_PROP_LOW_POWER: val->intval = !is_hi_power_state_requested(chip); break; case POWER_SUPPLY_PROP_CYCLE_COUNT: if (chip->dt.cfg_battery_aging_comp) val->intval = chip->charge_cycles; else val->intval = -EINVAL; break; default: return -EINVAL; } return 0; } static int qpnp_vm_bms_power_set_property(struct power_supply *psy, enum power_supply_property psp, const union power_supply_propval *val) { int rc = 0; struct qpnp_bms_chip *chip = container_of(psy, struct qpnp_bms_chip, bms_psy); switch (psp) { case POWER_SUPPLY_PROP_CURRENT_NOW: chip->current_now = val->intval; pr_debug("IBATT = %d\n", val->intval); break; case POWER_SUPPLY_PROP_VOLTAGE_OCV: cancel_delayed_work_sync(&chip->monitor_soc_work); chip->last_ocv_uv = val->intval; pr_debug("OCV = %d\n", val->intval); schedule_delayed_work(&chip->monitor_soc_work, 0); break; case POWER_SUPPLY_PROP_HI_POWER: rc = qpnp_vm_bms_config_power_state(chip, val->intval, true); if (rc) pr_err("Unable to set power-state rc=%d\n", rc); break; case POWER_SUPPLY_PROP_LOW_POWER: rc = qpnp_vm_bms_config_power_state(chip, val->intval, false); if (rc) pr_err("Unable to set power-state rc=%d\n", rc); break; default: return -EINVAL; } return rc; } static void bms_new_battery_setup(struct qpnp_bms_chip *chip) { int rc; mutex_lock(&chip->bms_data_mutex); chip->last_soc_invalid = true; /* * disable and re-enable the BMS hardware to reset * the realtime-FIFO data and restart accumulation */ rc = qpnp_masked_write_base(chip, chip->base + EN_CTL_REG, BMS_EN_BIT, 0); /* delay for the BMS hardware to reset its state */ msleep(200); rc |= qpnp_masked_write_base(chip, chip->base + EN_CTL_REG, BMS_EN_BIT, BMS_EN_BIT); /* delay for the BMS hardware to re-start */ msleep(200); if (rc) pr_err("Unable to reset BMS rc=%d\n", rc); chip->last_ocv_uv = estimate_ocv(chip); memset(&chip->bms_data, 0, sizeof(chip->bms_data)); /* update the sequence number */ chip->bms_data.seq_num = chip->seq_num++; /* signal the read thread */ chip->data_ready = 1; wake_up_interruptible(&chip->bms_wait_q); /* hold a wake lock until the read thread is scheduled */ if (chip->bms_dev_open) pm_stay_awake(chip->dev); mutex_unlock(&chip->bms_data_mutex); /* reset aging variables */ if (chip->dt.cfg_battery_aging_comp) { chip->charge_cycles = 0; chip->charge_increase = 0; rc = backup_charge_cycle(chip); if (rc) pr_err("Unable to reset aging data rc=%d\n", rc); } } static void battery_insertion_check(struct qpnp_bms_chip *chip) { int present = (int)is_battery_present(chip); if (chip->battery_present != present) { pr_debug("shadow_sts=%d status=%d\n", chip->battery_present, present); if (chip->battery_present != -EINVAL) { if (present) { /* new battery inserted */ bms_new_battery_setup(chip); setup_vbat_monitoring(chip); pr_debug("New battery inserted!\n"); } else { /* battery removed */ reset_vbat_monitoring(chip); pr_debug("Battery removed\n"); } } chip->battery_present = present; } } static void battery_status_check(struct qpnp_bms_chip *chip) { int status = get_battery_status(chip); if (chip->battery_status != status) { if (status == POWER_SUPPLY_STATUS_CHARGING) { pr_debug("charging started\n"); charging_began(chip); } else if (chip->battery_status == POWER_SUPPLY_STATUS_CHARGING) { pr_debug("charging stopped\n"); charging_ended(chip); } if (status == POWER_SUPPLY_STATUS_FULL) { pr_debug("battery full\n"); chip->battery_full = true; } else if (chip->battery_status == POWER_SUPPLY_STATUS_FULL) { pr_debug("battery not-full anymore\n"); chip->battery_full = false; } chip->battery_status = status; } } #define HIGH_CURRENT_TH 2 static void reported_soc_check_status(struct qpnp_bms_chip *chip) { u8 present; present = is_charger_present(chip); pr_debug("usb_present=%d\n", present); if (!present && !chip->charger_removed_since_full) { chip->charger_removed_since_full = true; pr_debug("reported_soc: charger removed since full\n"); return; } if (chip->reported_soc_high_current) { pr_debug("reported_soc in high current mode, return\n"); return; } if ((chip->reported_soc - chip->last_soc) > (100 - chip->dt.cfg_soc_resume_limit + HIGH_CURRENT_TH)) { chip->reported_soc_high_current = true; chip->charger_removed_since_full = true; chip->charger_reinserted = false; pr_debug("reported_soc enters high current mode\n"); return; } if (present && chip->charger_removed_since_full) { chip->charger_reinserted = true; pr_debug("reported_soc: charger reinserted\n"); } if (!present && chip->charger_removed_since_full) { chip->charger_reinserted = false; pr_debug("reported_soc: charger removed again\n"); } } static void qpnp_vm_bms_ext_power_changed(struct power_supply *psy) { struct qpnp_bms_chip *chip = container_of(psy, struct qpnp_bms_chip, bms_psy); pr_debug("Triggered!\n"); battery_status_check(chip); battery_insertion_check(chip); if (chip->reported_soc_in_use) reported_soc_check_status(chip); } static void dump_bms_data(const char *func, struct qpnp_bms_chip *chip) { int i; pr_debug("%s: fifo_count=%d acc_count=%d seq_num=%d\n", func, chip->bms_data.num_fifo, chip->bms_data.acc_count, chip->bms_data.seq_num); for (i = 0; i < chip->bms_data.num_fifo; i++) pr_debug("fifo=%d fifo_uv=%d sample_interval=%d sample_count=%d\n", i, chip->bms_data.fifo_uv[i], chip->bms_data.sample_interval_ms, chip->bms_data.sample_count); pr_debug("avg_acc_data=%d\n", chip->bms_data.acc_uv); } static int read_and_populate_fifo_data(struct qpnp_bms_chip *chip) { u8 fifo_count = 0, val = 0; u8 fifo_data_raw[MAX_FIFO_REGS * 2]; u16 fifo_data; int rc, i, j; int64_t voltage_soc_avg = 0; /* read the completed FIFO count */ rc = qpnp_read_wrapper(chip, &val, chip->base + STATUS2_REG, 1); if (rc) { pr_err("Unable to read STATUS2 register rc=%d\n", rc); return rc; } fifo_count = (val & FIFO_CNT_SD_MASK) >> FIFO_CNT_SD_SHIFT; pr_debug("fifo_count=%d\n", fifo_count); if (!fifo_count) { pr_debug("No data in FIFO\n"); return 0; } else if (fifo_count > MAX_FIFO_REGS) { pr_err("Invalid fifo-length %d rejecting data\n", fifo_count); chip->bms_data.num_fifo = 0; return 0; } /* read the FIFO data */ for (i = 0; i < fifo_count * 2; i++) { rc = qpnp_read_wrapper(chip, &fifo_data_raw[i], chip->base + FIFO_0_LSB_REG + i, 1); if (rc) { pr_err("Unable to read FIFO register(%d) rc=%d\n", i, rc); return rc; } } /* populate the structure */ chip->bms_data.num_fifo = fifo_count; rc = get_sample_interval(chip, chip->current_fsm_state, &chip->bms_data.sample_interval_ms); if (rc) { pr_err("Unable to read state=%d sample_interval rc=%d\n", chip->current_fsm_state, rc); return rc; } rc = get_sample_count(chip, chip->current_fsm_state, &chip->bms_data.sample_count); if (rc) { pr_err("Unable to read state=%d sample_count rc=%d\n", chip->current_fsm_state, rc); return rc; } for (i = 0, j = 0; i < fifo_count * 2; i = i + 2, j++) { fifo_data = fifo_data_raw[i] | (fifo_data_raw[i + 1] << 8); chip->bms_data.fifo_uv[j] = convert_vbatt_raw_to_uv(chip, fifo_data, 0); voltage_soc_avg += chip->bms_data.fifo_uv[j]; } /* store the fifo average for voltage-based-soc */ chip->voltage_soc_uv = div_u64(voltage_soc_avg, fifo_count); return 0; } static int read_and_populate_acc_data(struct qpnp_bms_chip *chip) { int rc; u32 acc_data_sd = 0, acc_count_sd = 0, avg_acc_data = 0; /* read ACC SD count */ rc = qpnp_read_wrapper(chip, (u8 *)&acc_count_sd, chip->base + ACC_CNT_SD_REG, 1); if (rc) { pr_err("Unable to read ACC_CNT_SD_REG rc=%d\n", rc); return rc; } if (!acc_count_sd) { pr_debug("No data in accumulator\n"); return 0; } /* read ACC SD data */ rc = qpnp_read_wrapper(chip, (u8 *)&acc_data_sd, chip->base + ACC_DATA0_SD_REG, 3); if (rc) { pr_err("Unable to read ACC_DATA0_SD_REG rc=%d\n", rc); return rc; } avg_acc_data = div_u64(acc_data_sd, acc_count_sd); chip->bms_data.acc_uv = convert_vbatt_raw_to_uv(chip, avg_acc_data, 0); chip->bms_data.acc_count = acc_count_sd; rc = get_sample_interval(chip, chip->current_fsm_state, &chip->bms_data.sample_interval_ms); if (rc) { pr_err("Unable to read state=%d sample_interval rc=%d\n", chip->current_fsm_state, rc); return rc; } rc = get_sample_count(chip, chip->current_fsm_state, &chip->bms_data.sample_count); if (rc) { pr_err("Unable to read state=%d sample_count rc=%d\n", chip->current_fsm_state, rc); return rc; } return 0; } static int clear_fifo_acc_data(struct qpnp_bms_chip *chip) { int rc; u8 reg = 0; reg = FIFO_CNT_SD_CLR_BIT | ACC_DATA_SD_CLR_BIT | ACC_CNT_SD_CLR_BIT; rc = qpnp_masked_write_base(chip, chip->base + DATA_CTL2_REG, reg, reg); if (rc) pr_err("Unable to write DATA_CTL2_REG rc=%d\n", rc); return rc; } static irqreturn_t bms_fifo_update_done_irq_handler(int irq, void *_chip) { int rc; struct qpnp_bms_chip *chip = _chip; pr_debug("fifo_update_done triggered\n"); mutex_lock(&chip->bms_data_mutex); if (chip->suspend_data_valid) { pr_debug("Suspend data not processed yet\n"); goto fail_fifo; } rc = calib_vadc(chip); if (rc) pr_err("Unable to calibrate vadc rc=%d\n", rc); /* clear old data */ memset(&chip->bms_data, 0, sizeof(chip->bms_data)); /* * 1. Read FIFO and populate the bms_data * 2. Clear FIFO data * 3. Notify userspace */ rc = update_fsm_state(chip); if (rc) { pr_err("Unable to read FSM state rc=%d\n", rc); goto fail_fifo; } pr_debug("fsm_state=%d\n", chip->current_fsm_state); rc = read_and_populate_fifo_data(chip); if (rc) { pr_err("Unable to read FIFO data rc=%d\n", rc); goto fail_fifo; } rc = clear_fifo_acc_data(chip); if (rc) pr_err("Unable to clear FIFO/ACC data rc=%d\n", rc); /* update the sequence number */ chip->bms_data.seq_num = chip->seq_num++; dump_bms_data(__func__, chip); /* signal the read thread */ chip->data_ready = 1; wake_up_interruptible(&chip->bms_wait_q); /* hold a wake lock until the read thread is scheduled */ if (chip->bms_dev_open) pm_stay_awake(chip->dev); fail_fifo: mutex_unlock(&chip->bms_data_mutex); return IRQ_HANDLED; } static irqreturn_t bms_fsm_state_change_irq_handler(int irq, void *_chip) { int rc; struct qpnp_bms_chip *chip = _chip; pr_debug("fsm_state_changed triggered\n"); mutex_lock(&chip->bms_data_mutex); if (chip->suspend_data_valid) { pr_debug("Suspend data not processed yet\n"); goto fail_state; } rc = calib_vadc(chip); if (rc) pr_err("Unable to calibrate vadc rc=%d\n", rc); /* clear old data */ memset(&chip->bms_data, 0, sizeof(chip->bms_data)); /* * 1. Read FIFO and ACC_DATA and populate the bms_data * 2. Clear FIFO & ACC data * 3. Notify userspace */ pr_debug("prev_fsm_state=%d\n", chip->current_fsm_state); rc = read_and_populate_fifo_data(chip); if (rc) { pr_err("Unable to read FIFO data rc=%d\n", rc); goto fail_state; } /* read accumulator data */ rc = read_and_populate_acc_data(chip); if (rc) { pr_err("Unable to read ACC_SD data rc=%d\n", rc); goto fail_state; } rc = update_fsm_state(chip); if (rc) { pr_err("Unable to read FSM state rc=%d\n", rc); goto fail_state; } rc = clear_fifo_acc_data(chip); if (rc) pr_err("Unable to clear FIFO/ACC data rc=%d\n", rc); /* update the sequence number */ chip->bms_data.seq_num = chip->seq_num++; dump_bms_data(__func__, chip); /* signal the read thread */ chip->data_ready = 1; wake_up_interruptible(&chip->bms_wait_q); /* hold a wake lock until the read thread is scheduled */ if (chip->bms_dev_open) pm_stay_awake(chip->dev); fail_state: mutex_unlock(&chip->bms_data_mutex); return IRQ_HANDLED; } static int read_shutdown_ocv_soc(struct qpnp_bms_chip *chip) { u8 stored_soc = 0; u16 stored_ocv = 0; int rc; rc = qpnp_read_wrapper(chip, (u8 *)&stored_ocv, chip->base + BMS_OCV_REG, 2); if (rc) { pr_err("failed to read addr = %d %d\n", chip->base + BMS_OCV_REG, rc); return -EINVAL; } /* if shutdwon ocv is invalid, reject shutdown soc too */ if (!stored_ocv || (stored_ocv == OCV_INVALID)) { pr_debug("shutdown OCV %d - invalid\n", stored_ocv); chip->shutdown_ocv = OCV_INVALID; chip->shutdown_soc = SOC_INVALID; return -EINVAL; } chip->shutdown_ocv = stored_ocv * 1000; /* * The previous SOC is stored in the first 7 bits of the register as * (Shutdown SOC + 1). This allows for register reset values of both * 0x00 and 0xFF. */ rc = qpnp_read_wrapper(chip, &stored_soc, chip->base + BMS_SOC_REG, 1); if (rc) { pr_err("failed to read addr = %d %d\n", chip->base + BMS_SOC_REG, rc); return -EINVAL; } if (!stored_soc || stored_soc == SOC_INVALID) { chip->shutdown_soc = SOC_INVALID; chip->shutdown_ocv = OCV_INVALID; return -EINVAL; } else { chip->shutdown_soc = (stored_soc >> 1) - 1; } pr_debug("shutdown_ocv=%d shutdown_soc=%d\n", chip->shutdown_ocv, chip->shutdown_soc); return 0; } static int interpolate_current_comp(int die_temp) { int i; int num_rows = ARRAY_SIZE(temp_curr_comp_lut); if (die_temp <= (temp_curr_comp_lut[0].temp_decideg)) return temp_curr_comp_lut[0].current_ma; if (die_temp >= (temp_curr_comp_lut[num_rows - 1].temp_decideg)) return temp_curr_comp_lut[num_rows - 1].current_ma; for (i = 0; i < num_rows - 1; i++) if (die_temp <= (temp_curr_comp_lut[i].temp_decideg)) break; if (die_temp == (temp_curr_comp_lut[i].temp_decideg)) return temp_curr_comp_lut[i].current_ma; return linear_interpolate( temp_curr_comp_lut[i - 1].current_ma, temp_curr_comp_lut[i - 1].temp_decideg, temp_curr_comp_lut[i].current_ma, temp_curr_comp_lut[i].temp_decideg, die_temp); } static void adjust_pon_ocv(struct qpnp_bms_chip *chip, int batt_temp) { int rc, current_ma, rbatt_mohm, die_temp, delta_uv, pc; struct qpnp_vadc_result result; rc = qpnp_vadc_read(chip->vadc_dev, DIE_TEMP, &result); if (rc) { pr_err("error reading adc channel=%d, rc=%d\n", DIE_TEMP, rc); } else { pc = interpolate_pc(chip->batt_data->pc_temp_ocv_lut, batt_temp, chip->last_ocv_uv / 1000); /* * For pc < 2, use the rbatt of pc = 2. This is to avoid * the huge rbatt values at pc < 2 which can disrupt the pon_ocv * calculations. */ if (pc < 2) pc = 2; rbatt_mohm = get_rbatt(chip, pc, batt_temp); /* convert die_temp to DECIDEGC */ die_temp = (int)result.physical / 100; current_ma = interpolate_current_comp(die_temp); delta_uv = rbatt_mohm * current_ma; pr_debug("PON OCV changed from %d to %d pc=%d rbatt=%d current_ma=%d die_temp=%d batt_temp=%d delta_uv=%d\n", chip->last_ocv_uv, chip->last_ocv_uv + delta_uv, pc, rbatt_mohm, current_ma, die_temp, batt_temp, delta_uv); chip->last_ocv_uv += delta_uv; } } static int calculate_initial_soc(struct qpnp_bms_chip *chip) { int rc, batt_temp = 0, est_ocv = 0; rc = get_batt_therm(chip, &batt_temp); if (rc < 0) { pr_err("Unable to read batt temp, using default=%d\n", BMS_DEFAULT_TEMP); batt_temp = BMS_DEFAULT_TEMP; } rc = read_and_update_ocv(chip, batt_temp, true); if (rc) { pr_err("Unable to read PON OCV rc=%d\n", rc); return rc; } rc = read_shutdown_ocv_soc(chip); if (rc < 0 || chip->dt.cfg_ignore_shutdown_soc) chip->shutdown_soc_invalid = true; if (chip->warm_reset) { /* * if we have powered on from warm reset - * Always use shutdown SOC. If shudown SOC is invalid then * estimate OCV */ if (chip->shutdown_soc_invalid) { pr_debug("Estimate OCV\n"); est_ocv = estimate_ocv(chip); if (est_ocv <= 0) { pr_err("Unable to estimate OCV rc=%d\n", est_ocv); return -EINVAL; } chip->last_ocv_uv = est_ocv; chip->calculated_soc = lookup_soc_ocv(chip, est_ocv, batt_temp); } else { chip->last_ocv_uv = chip->shutdown_ocv; chip->last_soc = chip->shutdown_soc; chip->calculated_soc = lookup_soc_ocv(chip, chip->shutdown_ocv, batt_temp); pr_debug("Using shutdown SOC\n"); } } else { /* * In PM8916 2.0 PON OCV calculation is delayed due to * change in the ordering of power-on sequence of LDO6. * Adjust PON OCV to include current during PON. */ if (chip->workaround_flag & WRKARND_PON_OCV_COMP) adjust_pon_ocv(chip, batt_temp); /* !warm_reset use PON OCV only if shutdown SOC is invalid */ chip->calculated_soc = lookup_soc_ocv(chip, chip->last_ocv_uv, batt_temp); if (!chip->shutdown_soc_invalid && (abs(chip->shutdown_soc - chip->calculated_soc) < chip->dt.cfg_shutdown_soc_valid_limit)) { chip->last_ocv_uv = chip->shutdown_ocv; chip->last_soc = chip->shutdown_soc; chip->calculated_soc = lookup_soc_ocv(chip, chip->shutdown_ocv, batt_temp); pr_debug("Using shutdown SOC\n"); } else { chip->shutdown_soc_invalid = true; pr_debug("Using PON SOC\n"); } } /* store the start-up OCV for voltage-based-soc */ chip->voltage_soc_uv = chip->last_ocv_uv; pr_info("warm_reset=%d est_ocv=%d shutdown_soc_invalid=%d shutdown_ocv=%d shutdown_soc=%d last_soc=%d calculated_soc=%d last_ocv_uv=%d\n", chip->warm_reset, est_ocv, chip->shutdown_soc_invalid, chip->shutdown_ocv, chip->shutdown_soc, chip->last_soc, chip->calculated_soc, chip->last_ocv_uv); return 0; } static int calculate_initial_aging_comp(struct qpnp_bms_chip *chip) { int rc; bool battery_removed = is_battery_replaced_in_offmode(chip); if (battery_removed || chip->shutdown_soc_invalid) { pr_info("Clearing aging data battery_removed=%d shutdown_soc_invalid=%d\n", battery_removed, chip->shutdown_soc_invalid); chip->charge_cycles = 0; chip->charge_increase = 0; rc = backup_charge_cycle(chip); if (rc) pr_err("Unable to reset aging data rc=%d\n", rc); } else { rc = read_chgcycle_data_from_backup(chip); if (rc) pr_err("Unable to read aging data rc=%d\n", rc); } pr_debug("Initial aging data charge_cycles=%u charge_increase=%u\n", chip->charge_cycles, chip->charge_increase); return rc; } static int bms_load_hw_defaults(struct qpnp_bms_chip *chip) { u8 val, state, bms_en = 0; u32 interval[2], count[2], fifo[2]; int rc; /* S3 OCV tolerence threshold */ if (chip->dt.cfg_s3_ocv_tol_uv >= 0 && chip->dt.cfg_s3_ocv_tol_uv <= MAX_OCV_TOL_THRESHOLD) { val = chip->dt.cfg_s3_ocv_tol_uv / OCV_TOL_LSB_UV; rc = qpnp_masked_write_base(chip, chip->base + S3_OCV_TOL_CTL_REG, 0xFF, val); if (rc) { pr_err("Unable to write s3_ocv_tol_threshold rc=%d\n", rc); return rc; } } /* S1 accumulator threshold */ if (chip->dt.cfg_s1_sample_count >= 1 && chip->dt.cfg_s1_sample_count <= MAX_SAMPLE_COUNT) { val = (chip->dt.cfg_s1_sample_count > 1) ? (ilog2(chip->dt.cfg_s1_sample_count) - 1) : 0; rc = qpnp_masked_write_base(chip, chip->base + S1_ACC_CNT_REG, ACC_CNT_MASK, val); if (rc) { pr_err("Unable to write s1 sample count rc=%d\n", rc); return rc; } } /* S2 accumulator threshold */ if (chip->dt.cfg_s2_sample_count >= 1 && chip->dt.cfg_s2_sample_count <= MAX_SAMPLE_COUNT) { val = (chip->dt.cfg_s2_sample_count > 1) ? (ilog2(chip->dt.cfg_s2_sample_count) - 1) : 0; rc = qpnp_masked_write_base(chip, chip->base + S2_ACC_CNT_REG, ACC_CNT_MASK, val); if (rc) { pr_err("Unable to write s2 sample count rc=%d\n", rc); return rc; } } if (chip->dt.cfg_s1_sample_interval_ms >= 0 && chip->dt.cfg_s1_sample_interval_ms <= MAX_SAMPLE_INTERVAL) { val = chip->dt.cfg_s1_sample_interval_ms / 10; rc = qpnp_write_wrapper(chip, &val, chip->base + S1_SAMPLE_INTVL_REG, 1); if (rc) { pr_err("Unable to write s1 sample inteval rc=%d\n", rc); return rc; } } if (chip->dt.cfg_s2_sample_interval_ms >= 0 && chip->dt.cfg_s2_sample_interval_ms <= MAX_SAMPLE_INTERVAL) { val = chip->dt.cfg_s2_sample_interval_ms / 10; rc = qpnp_write_wrapper(chip, &val, chip->base + S2_SAMPLE_INTVL_REG, 1); if (rc) { pr_err("Unable to write s2 sample inteval rc=%d\n", rc); return rc; } } if (chip->dt.cfg_s1_fifo_length >= 0 && chip->dt.cfg_s1_fifo_length <= MAX_FIFO_REGS) { rc = qpnp_masked_write_base(chip, chip->base + FIFO_LENGTH_REG, S1_FIFO_LENGTH_MASK, chip->dt.cfg_s1_fifo_length); if (rc) { pr_err("Unable to write s1 fifo length rc=%d\n", rc); return rc; } } if (chip->dt.cfg_s2_fifo_length >= 0 && chip->dt.cfg_s2_fifo_length <= MAX_FIFO_REGS) { rc = qpnp_masked_write_base(chip, chip->base + FIFO_LENGTH_REG, S2_FIFO_LENGTH_MASK, chip->dt.cfg_s2_fifo_length << S2_FIFO_LENGTH_SHIFT); if (rc) { pr_err("Unable to write s2 fifo length rc=%d\n", rc); return rc; } } get_sample_interval(chip, S1_STATE, &interval[0]); get_sample_interval(chip, S2_STATE, &interval[1]); get_sample_count(chip, S1_STATE, &count[0]); get_sample_count(chip, S2_STATE, &count[1]); get_fifo_length(chip, S1_STATE, &fifo[0]); get_fifo_length(chip, S2_STATE, &fifo[1]); /* Force the BMS state to S2 at boot-up */ rc = get_fsm_state(chip, &state); if (rc) pr_err("Unable to get FSM state rc=%d\n", rc); if (rc || (state != S2_STATE)) { pr_debug("Forcing S2 state\n"); rc = force_fsm_state(chip, S2_STATE); if (rc) pr_err("Unable to set FSM state rc=%d\n", rc); } rc = qpnp_read_wrapper(chip, &bms_en, chip->base + EN_CTL_REG, 1); if (rc) { pr_err("Unable to read BMS_EN state rc=%d\n", rc); return rc; } rc = update_fsm_state(chip); if (rc) { pr_err("Unable to read FSM state rc=%d\n", rc); return rc; } pr_info("BMS_EN=%d Sample_Interval-S1=[%d]S2=[%d] Sample_Count-S1=[%d]S2=[%d] Fifo_Length-S1=[%d]S2=[%d] FSM_state=%d\n", !!bms_en, interval[0], interval[1], count[0], count[1], fifo[0], fifo[1], chip->current_fsm_state); return 0; } static ssize_t vm_bms_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { int rc; struct qpnp_bms_chip *chip = file->private_data; if (!chip->data_ready && (file->f_flags & O_NONBLOCK)) { rc = -EAGAIN; goto fail_read; } rc = wait_event_interruptible(chip->bms_wait_q, chip->data_ready); if (rc) { pr_debug("wait failed! rc=%d\n", rc); goto fail_read; } if (!chip->data_ready) { pr_debug("No Data, false wakeup\n"); rc = -EFAULT; goto fail_read; } mutex_lock(&chip->bms_data_mutex); if (copy_to_user(buf, &chip->bms_data, sizeof(chip->bms_data))) { pr_err("Failed in copy_to_user\n"); mutex_unlock(&chip->bms_data_mutex); rc = -EFAULT; goto fail_read; } pr_debug("Data copied!!\n"); chip->data_ready = 0; mutex_unlock(&chip->bms_data_mutex); /* wakelock-timeout for userspace to pick up */ pm_wakeup_event(chip->dev, BMS_READ_TIMEOUT); return sizeof(chip->bms_data); fail_read: pm_relax(chip->dev); return rc; } static int vm_bms_open(struct inode *inode, struct file *file) { struct qpnp_bms_chip *chip = container_of(inode->i_cdev, struct qpnp_bms_chip, bms_cdev); mutex_lock(&chip->bms_device_mutex); if (chip->bms_dev_open) { pr_debug("BMS device already open\n"); mutex_unlock(&chip->bms_device_mutex); return -EBUSY; } chip->bms_dev_open = true; file->private_data = chip; pr_debug("BMS device opened\n"); mutex_unlock(&chip->bms_device_mutex); return 0; } static int vm_bms_release(struct inode *inode, struct file *file) { struct qpnp_bms_chip *chip = container_of(inode->i_cdev, struct qpnp_bms_chip, bms_cdev); mutex_lock(&chip->bms_device_mutex); chip->bms_dev_open = false; pm_relax(chip->dev); pr_debug("BMS device closed\n"); mutex_unlock(&chip->bms_device_mutex); return 0; } static const struct file_operations bms_fops = { .owner = THIS_MODULE, .open = vm_bms_open, .read = vm_bms_read, .release = vm_bms_release, }; static void bms_init_defaults(struct qpnp_bms_chip *chip) { chip->data_ready = 0; chip->last_ocv_raw = OCV_UNINITIALIZED; chip->battery_status = POWER_SUPPLY_STATUS_UNKNOWN; chip->battery_present = -EINVAL; chip->calculated_soc = -EINVAL; chip->last_soc = -EINVAL; chip->vbms_lv_wake_source.disabled = 1; chip->vbms_cv_wake_source.disabled = 1; chip->vbms_soc_wake_source.disabled = 1; chip->ocv_at_100 = -EINVAL; chip->prev_soc_uuc = -EINVAL; chip->charge_cycles = 0; chip->start_soc = 0; chip->end_soc = 0; chip->charge_increase = 0; } #define SPMI_REQUEST_IRQ(chip, rc, irq_name) \ do { \ rc = devm_request_threaded_irq(chip->dev, \ chip->irq_name##_irq.irq, NULL, \ bms_##irq_name##_irq_handler, \ IRQF_TRIGGER_RISING | IRQF_ONESHOT, \ #irq_name, chip); \ if (rc < 0) \ pr_err("Unable to request " #irq_name " irq: %d\n", rc);\ } while (0) #define SPMI_FIND_IRQ(chip, irq_name, rc) \ do { \ chip->irq_name##_irq.irq = spmi_get_irq_byname(chip->spmi, \ resource, #irq_name); \ if (chip->irq_name##_irq.irq < 0) { \ rc = chip->irq_name##_irq.irq; \ pr_err("Unable to get " #irq_name " irq rc=%d\n", rc); \ } \ } while (0) static int bms_request_irqs(struct qpnp_bms_chip *chip) { int rc; SPMI_REQUEST_IRQ(chip, rc, fifo_update_done); if (rc < 0) return rc; SPMI_REQUEST_IRQ(chip, rc, fsm_state_change); if (rc < 0) return rc; /* Disable the state change IRQ */ disable_bms_irq(&chip->fsm_state_change_irq); enable_irq_wake(chip->fifo_update_done_irq.irq); return 0; } static int bms_find_irqs(struct qpnp_bms_chip *chip, struct spmi_resource *resource) { int rc = 0; SPMI_FIND_IRQ(chip, fifo_update_done, rc); if (rc < 0) return rc; SPMI_FIND_IRQ(chip, fsm_state_change, rc); if (rc < 0) return rc; return 0; } static int64_t read_battery_id(struct qpnp_bms_chip *chip) { int rc; struct qpnp_vadc_result result; rc = qpnp_vadc_read(chip->vadc_dev, LR_MUX2_BAT_ID, &result); if (rc) { pr_err("error reading batt id channel = %d, rc = %d\n", LR_MUX2_BAT_ID, rc); return rc; } return result.physical; } static int show_bms_config(struct seq_file *m, void *data) { struct qpnp_bms_chip *chip = m->private; int s1_sample_interval, s2_sample_interval; int s1_sample_count, s2_sample_count; int s1_fifo_length, s2_fifo_length; get_sample_interval(chip, S1_STATE, &s1_sample_interval); get_sample_interval(chip, S2_STATE, &s2_sample_interval); get_sample_count(chip, S1_STATE, &s1_sample_count); get_sample_count(chip, S2_STATE, &s2_sample_count); get_fifo_length(chip, S1_STATE, &s1_fifo_length); get_fifo_length(chip, S2_STATE, &s2_fifo_length); seq_printf(m, "r_conn_mohm\t=\t%d\n" "v_cutoff_uv\t=\t%d\n" "max_voltage_uv\t=\t%d\n" "use_voltage_soc\t=\t%d\n" "low_soc_calc_threshold\t=\t%d\n" "low_soc_calculate_soc_ms\t=\t%d\n" "low_voltage_threshold\t=\t%d\n" "low_voltage_calculate_soc_ms\t=\t%d\n" "calculate_soc_ms\t=\t%d\n" "voltage_soc_timeout_ms\t=\t%d\n" "ignore_shutdown_soc\t=\t%d\n" "shutdown_soc_valid_limit\t=\t%d\n" "force_s3_on_suspend\t=\t%d\n" "report_charger_eoc\t=\t%d\n" "aging_compensation\t=\t%d\n" "use_reported_soc\t=\t%d\n" "s1_sample_interval_ms\t=\t%d\n" "s2_sample_interval_ms\t=\t%d\n" "s1_sample_count\t=\t%d\n" "s2_sample_count\t=\t%d\n" "s1_fifo_length\t=\t%d\n" "s2_fifo_length\t=\t%d\n", chip->dt.cfg_r_conn_mohm, chip->dt.cfg_v_cutoff_uv, chip->dt.cfg_max_voltage_uv, chip->dt.cfg_use_voltage_soc, chip->dt.cfg_low_soc_calc_threshold, chip->dt.cfg_low_soc_calculate_soc_ms, chip->dt.cfg_low_voltage_threshold, chip->dt.cfg_low_voltage_calculate_soc_ms, chip->dt.cfg_calculate_soc_ms, chip->dt.cfg_voltage_soc_timeout_ms, chip->dt.cfg_ignore_shutdown_soc, chip->dt.cfg_shutdown_soc_valid_limit, chip->dt.cfg_force_s3_on_suspend, chip->dt.cfg_report_charger_eoc, chip->dt.cfg_battery_aging_comp, chip->dt.cfg_use_reported_soc, s1_sample_interval, s2_sample_interval, s1_sample_count, s2_sample_count, s1_fifo_length, s2_fifo_length); return 0; } static int bms_config_open(struct inode *inode, struct file *file) { struct qpnp_bms_chip *chip = inode->i_private; return single_open(file, show_bms_config, chip); } static const struct file_operations bms_config_debugfs_ops = { .owner = THIS_MODULE, .open = bms_config_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static int show_bms_status(struct seq_file *m, void *data) { struct qpnp_bms_chip *chip = m->private; seq_printf(m, "bms_psy_registered\t=\t%d\n" "bms_dev_open\t=\t%d\n" "warm_reset\t=\t%d\n" "battery_status\t=\t%d\n" "battery_present\t=\t%d\n" "in_cv_state\t=\t%d\n" "calculated_soc\t=\t%d\n" "last_soc\t=\t%d\n" "last_ocv_uv\t=\t%d\n" "last_ocv_raw\t=\t%d\n" "last_soc_unbound\t=\t%d\n" "current_fsm_state\t=\t%d\n" "current_now\t=\t%d\n" "ocv_at_100\t=\t%d\n" "low_voltage_ws_active\t=\t%d\n" "cv_ws_active\t=\t%d\n", chip->bms_psy_registered, chip->bms_dev_open, chip->warm_reset, chip->battery_status, chip->battery_present, chip->in_cv_state, chip->calculated_soc, chip->last_soc, chip->last_ocv_uv, chip->last_ocv_raw, chip->last_soc_unbound, chip->current_fsm_state, chip->current_now, chip->ocv_at_100, bms_wake_active(&chip->vbms_lv_wake_source), bms_wake_active(&chip->vbms_cv_wake_source)); return 0; } static int bms_status_open(struct inode *inode, struct file *file) { struct qpnp_bms_chip *chip = inode->i_private; return single_open(file, show_bms_status, chip); } static const struct file_operations bms_status_debugfs_ops = { .owner = THIS_MODULE, .open = bms_status_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static int show_bms_data(struct seq_file *m, void *data) { struct qpnp_bms_chip *chip = m->private; int i; mutex_lock(&chip->bms_data_mutex); seq_printf(m, "seq_num=%d\n", chip->bms_data.seq_num); for (i = 0; i < chip->bms_data.num_fifo; i++) seq_printf(m, "fifo_uv[%d]=%d sample_count=%d interval_ms=%d\n", i, chip->bms_data.fifo_uv[i], chip->bms_data.sample_count, chip->bms_data.sample_interval_ms); seq_printf(m, "acc_uv=%d sample_count=%d sample_interval=%d\n", chip->bms_data.acc_uv, chip->bms_data.acc_count, chip->bms_data.sample_interval_ms); mutex_unlock(&chip->bms_data_mutex); return 0; } static int bms_data_open(struct inode *inode, struct file *file) { struct qpnp_bms_chip *chip = inode->i_private; return single_open(file, show_bms_data, chip); } static const struct file_operations bms_data_debugfs_ops = { .owner = THIS_MODULE, .open = bms_data_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static int set_battery_data(struct qpnp_bms_chip *chip) { int64_t battery_id; int rc = 0; struct bms_battery_data *batt_data; struct device_node *node; battery_id = read_battery_id(chip); if (battery_id < 0) { pr_err("cannot read battery id err = %lld\n", battery_id); return battery_id; } node = of_find_node_by_name(chip->spmi->dev.of_node, "qcom,battery-data"); if (!node) { pr_err("No available batterydata\n"); return -EINVAL; } batt_data = devm_kzalloc(chip->dev, sizeof(struct bms_battery_data), GFP_KERNEL); if (!batt_data) { pr_err("Could not alloc battery data\n"); return -EINVAL; } batt_data->fcc_temp_lut = devm_kzalloc(chip->dev, sizeof(struct single_row_lut), GFP_KERNEL); batt_data->pc_temp_ocv_lut = devm_kzalloc(chip->dev, sizeof(struct pc_temp_ocv_lut), GFP_KERNEL); batt_data->rbatt_sf_lut = devm_kzalloc(chip->dev, sizeof(struct sf_lut), GFP_KERNEL); batt_data->ibat_acc_lut = devm_kzalloc(chip->dev, sizeof(struct ibat_temp_acc_lut), GFP_KERNEL); batt_data->max_voltage_uv = -1; batt_data->cutoff_uv = -1; batt_data->iterm_ua = -1; /* * if the alloced luts are 0s, of_batterydata_read_data ignores * them. */ rc = of_batterydata_read_data(node, batt_data, battery_id); if (rc || !batt_data->pc_temp_ocv_lut || !batt_data->fcc_temp_lut || !batt_data->rbatt_sf_lut) { pr_err("battery data load failed\n"); devm_kfree(chip->dev, batt_data->fcc_temp_lut); devm_kfree(chip->dev, batt_data->pc_temp_ocv_lut); devm_kfree(chip->dev, batt_data->rbatt_sf_lut); devm_kfree(chip->dev, batt_data->ibat_acc_lut); devm_kfree(chip->dev, batt_data); return rc; } if (batt_data->pc_temp_ocv_lut == NULL) { pr_err("temp ocv lut table has not been loaded\n"); devm_kfree(chip->dev, batt_data->fcc_temp_lut); devm_kfree(chip->dev, batt_data->pc_temp_ocv_lut); devm_kfree(chip->dev, batt_data->rbatt_sf_lut); devm_kfree(chip->dev, batt_data->ibat_acc_lut); devm_kfree(chip->dev, batt_data); return -EINVAL; } /* check if ibat_acc_lut is valid */ if (!batt_data->ibat_acc_lut->rows) { pr_info("ibat_acc_lut not present\n"); devm_kfree(chip->dev, batt_data->ibat_acc_lut); batt_data->ibat_acc_lut = NULL; } /* Override battery properties if specified in the battery profile */ if (batt_data->max_voltage_uv >= 0) chip->dt.cfg_max_voltage_uv = batt_data->max_voltage_uv; if (batt_data->cutoff_uv >= 0) chip->dt.cfg_v_cutoff_uv = batt_data->cutoff_uv; chip->batt_data = batt_data; return 0; } static int parse_spmi_dt_properties(struct qpnp_bms_chip *chip, struct spmi_device *spmi) { struct spmi_resource *spmi_resource; struct resource *resource; int rc; chip->dev = &(spmi->dev); chip->spmi = spmi; spmi_for_each_container_dev(spmi_resource, spmi) { if (!spmi_resource) { pr_err("qpnp_vm_bms: spmi resource absent\n"); return -ENXIO; } resource = spmi_get_resource(spmi, spmi_resource, IORESOURCE_MEM, 0); if (!(resource && resource->start)) { pr_err("node %s IO resource absent!\n", spmi->dev.of_node->full_name); return -ENXIO; } pr_debug("Node name = %s\n", spmi_resource->of_node->name); if (strcmp("qcom,batt-pres-status", spmi_resource->of_node->name) == 0) { chip->batt_pres_addr = resource->start; continue; } if (strcmp("qcom,qpnp-chg-pres", spmi_resource->of_node->name) == 0) { chip->chg_pres_addr = resource->start; continue; } chip->base = resource->start; rc = bms_find_irqs(chip, spmi_resource); if (rc) { pr_err("Could not find irqs rc=%d\n", rc); return rc; } } if (chip->base == 0) { dev_err(&spmi->dev, "BMS peripheral was not registered\n"); return -EINVAL; } pr_debug("bms-base=0x%04x bat-pres-reg=0x%04x qpnp-chg-pres=0x%04x\n", chip->base, chip->batt_pres_addr, chip->chg_pres_addr); return 0; } #define SPMI_PROP_READ(chip_prop, qpnp_spmi_property, retval) \ do { \ if (retval) \ break; \ retval = of_property_read_u32(chip->spmi->dev.of_node, \ "qcom," qpnp_spmi_property, \ &chip->dt.chip_prop); \ if (retval) { \ pr_err("Error reading " #qpnp_spmi_property \ " property %d\n", retval); \ } \ } while (0) #define SPMI_PROP_READ_OPTIONAL(chip_prop, qpnp_spmi_property, retval) \ do { \ retval = of_property_read_u32(chip->spmi->dev.of_node, \ "qcom," qpnp_spmi_property, \ &chip->dt.chip_prop); \ if (retval) \ chip->dt.chip_prop = -EINVAL; \ } while (0) static int parse_bms_dt_properties(struct qpnp_bms_chip *chip) { int rc = 0; SPMI_PROP_READ(cfg_v_cutoff_uv, "v-cutoff-uv", rc); SPMI_PROP_READ(cfg_max_voltage_uv, "max-voltage-uv", rc); SPMI_PROP_READ(cfg_r_conn_mohm, "r-conn-mohm", rc); SPMI_PROP_READ(cfg_shutdown_soc_valid_limit, "shutdown-soc-valid-limit", rc); SPMI_PROP_READ(cfg_low_soc_calc_threshold, "low-soc-calculate-soc-threshold", rc); SPMI_PROP_READ(cfg_low_soc_calculate_soc_ms, "low-soc-calculate-soc-ms", rc); SPMI_PROP_READ(cfg_low_voltage_calculate_soc_ms, "low-voltage-calculate-soc-ms", rc); SPMI_PROP_READ(cfg_calculate_soc_ms, "calculate-soc-ms", rc); SPMI_PROP_READ(cfg_low_voltage_threshold, "low-voltage-threshold", rc); SPMI_PROP_READ(cfg_voltage_soc_timeout_ms, "volatge-soc-timeout-ms", rc); if (rc) { pr_err("Missing required properties rc=%d\n", rc); return rc; } SPMI_PROP_READ_OPTIONAL(cfg_s1_sample_interval_ms, "s1-sample-interval-ms", rc); SPMI_PROP_READ_OPTIONAL(cfg_s2_sample_interval_ms, "s2-sample-interval-ms", rc); SPMI_PROP_READ_OPTIONAL(cfg_s1_sample_count, "s1-sample-count", rc); SPMI_PROP_READ_OPTIONAL(cfg_s2_sample_count, "s2-sample-count", rc); SPMI_PROP_READ_OPTIONAL(cfg_s1_fifo_length, "s1-fifo-length", rc); SPMI_PROP_READ_OPTIONAL(cfg_s2_fifo_length, "s2-fifo-length", rc); SPMI_PROP_READ_OPTIONAL(cfg_s3_ocv_tol_uv, "s3-ocv-tolerence-uv", rc); SPMI_PROP_READ_OPTIONAL(cfg_low_soc_fifo_length, "low-soc-fifo-length", rc); SPMI_PROP_READ_OPTIONAL(cfg_soc_resume_limit, "resume-soc", rc); SPMI_PROP_READ_OPTIONAL(cfg_low_temp_threshold, "low-temp-threshold", rc); if (rc) chip->dt.cfg_low_temp_threshold = 0; SPMI_PROP_READ_OPTIONAL(cfg_ibat_avg_samples, "ibat-avg-samples", rc); if (rc || (chip->dt.cfg_ibat_avg_samples <= 0) || (chip->dt.cfg_ibat_avg_samples > IAVG_SAMPLES)) chip->dt.cfg_ibat_avg_samples = IAVG_SAMPLES; chip->dt.cfg_ignore_shutdown_soc = of_property_read_bool( chip->spmi->dev.of_node, "qcom,ignore-shutdown-soc"); chip->dt.cfg_use_voltage_soc = of_property_read_bool( chip->spmi->dev.of_node, "qcom,use-voltage-soc"); chip->dt.cfg_force_s3_on_suspend = of_property_read_bool( chip->spmi->dev.of_node, "qcom,force-s3-on-suspend"); chip->dt.cfg_report_charger_eoc = of_property_read_bool( chip->spmi->dev.of_node, "qcom,report-charger-eoc"); chip->dt.cfg_disable_bms = of_property_read_bool( chip->spmi->dev.of_node, "qcom,disable-bms"); chip->dt.cfg_force_bms_active_on_charger = of_property_read_bool( chip->spmi->dev.of_node, "qcom,force-bms-active-on-charger"); chip->dt.cfg_battery_aging_comp = of_property_read_bool( chip->spmi->dev.of_node, "qcom,batt-aging-comp"); chip->dt.cfg_use_reported_soc = of_property_read_bool( chip->spmi->dev.of_node, "qcom,use-reported-soc"); pr_debug("v_cutoff_uv=%d, max_v=%d\n", chip->dt.cfg_v_cutoff_uv, chip->dt.cfg_max_voltage_uv); pr_debug("r_conn=%d shutdown_soc_valid_limit=%d low_temp_threshold=%d ibat_avg_samples=%d\n", chip->dt.cfg_r_conn_mohm, chip->dt.cfg_shutdown_soc_valid_limit, chip->dt.cfg_low_temp_threshold, chip->dt.cfg_ibat_avg_samples); pr_debug("ignore_shutdown_soc=%d, use_voltage_soc=%d low_soc_fifo_length=%d\n", chip->dt.cfg_ignore_shutdown_soc, chip->dt.cfg_use_voltage_soc, chip->dt.cfg_low_soc_fifo_length); pr_debug("force-s3-on-suspend=%d report-charger-eoc=%d disable-bms=%d disable-suspend-on-usb=%d aging_compensation=%d\n", chip->dt.cfg_force_s3_on_suspend, chip->dt.cfg_report_charger_eoc, chip->dt.cfg_disable_bms, chip->dt.cfg_force_bms_active_on_charger, chip->dt.cfg_battery_aging_comp); pr_debug("use-reported-soc is %d\n", chip->dt.cfg_use_reported_soc); return 0; } static int bms_get_adc(struct qpnp_bms_chip *chip, struct spmi_device *spmi) { int rc = 0; chip->vadc_dev = qpnp_get_vadc(&spmi->dev, "bms"); if (IS_ERR(chip->vadc_dev)) { rc = PTR_ERR(chip->vadc_dev); if (rc == -EPROBE_DEFER) pr_err("vadc not found - defer probe rc=%d\n", rc); else pr_err("vadc property missing, rc=%d\n", rc); return rc; } chip->adc_tm_dev = qpnp_get_adc_tm(&spmi->dev, "bms"); if (IS_ERR(chip->adc_tm_dev)) { rc = PTR_ERR(chip->adc_tm_dev); if (rc == -EPROBE_DEFER) pr_err("adc-tm not found - defer probe rc=%d\n", rc); else pr_err("adc-tm property missing, rc=%d\n", rc); } return rc; } static int register_bms_char_device(struct qpnp_bms_chip *chip) { int rc; rc = alloc_chrdev_region(&chip->dev_no, 0, 1, "vm_bms"); if (rc) { pr_err("Unable to allocate chrdev rc=%d\n", rc); return rc; } cdev_init(&chip->bms_cdev, &bms_fops); rc = cdev_add(&chip->bms_cdev, chip->dev_no, 1); if (rc) { pr_err("Unable to add bms_cdev rc=%d\n", rc); goto unregister_chrdev; } chip->bms_class = class_create(THIS_MODULE, "vm_bms"); if (IS_ERR_OR_NULL(chip->bms_class)) { pr_err("Fail to create bms class\n"); rc = -EINVAL; goto delete_cdev; } chip->bms_device = device_create(chip->bms_class, NULL, chip->dev_no, NULL, "vm_bms"); if (IS_ERR(chip->bms_device)) { pr_err("Fail to create bms_device device\n"); rc = -EINVAL; goto delete_cdev; } return 0; delete_cdev: cdev_del(&chip->bms_cdev); unregister_chrdev: unregister_chrdev_region(chip->dev_no, 1); return rc; } static int qpnp_vm_bms_probe(struct spmi_device *spmi) { struct qpnp_bms_chip *chip; struct device_node *revid_dev_node; int rc, vbatt = 0; chip = devm_kzalloc(&spmi->dev, sizeof(*chip), GFP_KERNEL); if (!chip) { pr_err("kzalloc() failed.\n"); return -ENOMEM; } rc = bms_get_adc(chip, spmi); if (rc < 0) { pr_err("Failed to get adc rc=%d\n", rc); return rc; } revid_dev_node = of_parse_phandle(spmi->dev.of_node, "qcom,pmic-revid", 0); if (!revid_dev_node) { pr_err("Missing qcom,pmic-revid property\n"); return -EINVAL; } chip->revid_data = get_revid_data(revid_dev_node); if (IS_ERR(chip->revid_data)) { pr_err("revid error rc = %ld\n", PTR_ERR(chip->revid_data)); return -EINVAL; } if ((chip->revid_data->pmic_subtype == PM8916_V2P0_SUBTYPE) && chip->revid_data->rev4 == PM8916_V2P0_REV4) chip->workaround_flag |= WRKARND_PON_OCV_COMP; rc = qpnp_pon_is_warm_reset(); if (rc < 0) { pr_err("Error reading warm reset status rc=%d\n", rc); return rc; } chip->warm_reset = !!rc; rc = parse_spmi_dt_properties(chip, spmi); if (rc) { pr_err("Error registering spmi resource rc=%d\n", rc); return rc; } rc = parse_bms_dt_properties(chip); if (rc) { pr_err("Unable to read all bms properties, rc = %d\n", rc); return rc; } if (chip->dt.cfg_disable_bms) { pr_info("VMBMS disabled (disable-bms = 1)\n"); rc = qpnp_masked_write_base(chip, chip->base + EN_CTL_REG, BMS_EN_BIT, 0); if (rc) pr_err("Unable to disable VMBMS rc=%d\n", rc); return -ENODEV; } rc = qpnp_read_wrapper(chip, chip->revision, chip->base + REVISION1_REG, 2); if (rc) { pr_err("Error reading version register rc=%d\n", rc); return rc; } pr_debug("BMS version: %hhu.%hhu\n", chip->revision[1], chip->revision[0]); dev_set_drvdata(&spmi->dev, chip); device_init_wakeup(&spmi->dev, 1); mutex_init(&chip->bms_data_mutex); mutex_init(&chip->bms_device_mutex); mutex_init(&chip->last_soc_mutex); mutex_init(&chip->state_change_mutex); init_waitqueue_head(&chip->bms_wait_q); /* read battery-id and select the battery profile */ rc = set_battery_data(chip); if (rc) { pr_err("Unable to read battery data %d\n", rc); goto fail_init; } /* set the battery profile */ rc = config_battery_data(chip->batt_data); if (rc) { pr_err("Unable to config battery data %d\n", rc); goto fail_init; } wakeup_source_init(&chip->vbms_lv_wake_source.source, "vbms_lv_wake"); wakeup_source_init(&chip->vbms_cv_wake_source.source, "vbms_cv_wake"); wakeup_source_init(&chip->vbms_soc_wake_source.source, "vbms_soc_wake"); INIT_DELAYED_WORK(&chip->monitor_soc_work, monitor_soc_work); INIT_DELAYED_WORK(&chip->voltage_soc_timeout_work, voltage_soc_timeout_work); bms_init_defaults(chip); bms_load_hw_defaults(chip); if (is_battery_present(chip)) { rc = setup_vbat_monitoring(chip); if (rc) { pr_err("fail to configure vbat monitoring rc=%d\n", rc); goto fail_setup; } } rc = bms_request_irqs(chip); if (rc) { pr_err("error requesting bms irqs, rc = %d\n", rc); goto fail_irq; } battery_insertion_check(chip); battery_status_check(chip); /* character device to pass data to the userspace */ rc = register_bms_char_device(chip); if (rc) { pr_err("Unable to regiter '/dev/vm_bms' rc=%d\n", rc); goto fail_bms_device; } the_chip = chip; calculate_initial_soc(chip); if (chip->dt.cfg_battery_aging_comp) { rc = calculate_initial_aging_comp(chip); if (rc) pr_err("Unable to calculate initial aging data rc=%d\n", rc); } /* setup & register the battery power supply */ chip->bms_psy.name = "bms"; chip->bms_psy.type = POWER_SUPPLY_TYPE_BMS; chip->bms_psy.properties = bms_power_props; chip->bms_psy.num_properties = ARRAY_SIZE(bms_power_props); chip->bms_psy.get_property = qpnp_vm_bms_power_get_property; chip->bms_psy.set_property = qpnp_vm_bms_power_set_property; chip->bms_psy.external_power_changed = qpnp_vm_bms_ext_power_changed; chip->bms_psy.property_is_writeable = qpnp_vm_bms_property_is_writeable; chip->bms_psy.supplied_to = qpnp_vm_bms_supplicants; chip->bms_psy.num_supplicants = ARRAY_SIZE(qpnp_vm_bms_supplicants); rc = power_supply_register(chip->dev, &chip->bms_psy); if (rc < 0) { pr_err("power_supply_register bms failed rc = %d\n", rc); goto fail_psy; } chip->bms_psy_registered = true; rc = get_battery_voltage(chip, &vbatt); if (rc) { pr_err("error reading vbat_sns adc channel=%d, rc=%d\n", VBAT_SNS, rc); goto fail_get_vtg; } chip->debug_root = debugfs_create_dir("qpnp_vmbms", NULL); if (!chip->debug_root) pr_err("Couldn't create debug dir\n"); if (chip->debug_root) { struct dentry *ent; ent = debugfs_create_file("bms_data", S_IFREG | S_IRUGO, chip->debug_root, chip, &bms_data_debugfs_ops); if (!ent) pr_err("Couldn't create bms_data debug file\n"); ent = debugfs_create_file("bms_config", S_IFREG | S_IRUGO, chip->debug_root, chip, &bms_config_debugfs_ops); if (!ent) pr_err("Couldn't create bms_config debug file\n"); ent = debugfs_create_file("bms_status", S_IFREG | S_IRUGO, chip->debug_root, chip, &bms_status_debugfs_ops); if (!ent) pr_err("Couldn't create bms_status debug file\n"); } schedule_delayed_work(&chip->monitor_soc_work, 0); /* * schedule a work to check if the userspace vmbms module * has registered. Fall-back to voltage-based-soc reporting * if it has not. */ schedule_delayed_work(&chip->voltage_soc_timeout_work, msecs_to_jiffies(chip->dt.cfg_voltage_soc_timeout_ms)); pr_info("probe success: soc=%d vbatt=%d ocv=%d warm_reset=%d\n", get_prop_bms_capacity(chip), vbatt, chip->last_ocv_uv, chip->warm_reset); return rc; fail_get_vtg: power_supply_unregister(&chip->bms_psy); fail_psy: device_destroy(chip->bms_class, chip->dev_no); cdev_del(&chip->bms_cdev); unregister_chrdev_region(chip->dev_no, 1); fail_bms_device: chip->bms_psy_registered = false; fail_irq: reset_vbat_monitoring(chip); fail_setup: wakeup_source_trash(&chip->vbms_lv_wake_source.source); wakeup_source_trash(&chip->vbms_cv_wake_source.source); wakeup_source_trash(&chip->vbms_soc_wake_source.source); fail_init: mutex_destroy(&chip->bms_data_mutex); mutex_destroy(&chip->last_soc_mutex); mutex_destroy(&chip->state_change_mutex); mutex_destroy(&chip->bms_device_mutex); the_chip = NULL; return rc; } static int qpnp_vm_bms_remove(struct spmi_device *spmi) { struct qpnp_bms_chip *chip = dev_get_drvdata(&spmi->dev); cancel_delayed_work_sync(&chip->monitor_soc_work); debugfs_remove_recursive(chip->debug_root); device_destroy(chip->bms_class, chip->dev_no); cdev_del(&chip->bms_cdev); unregister_chrdev_region(chip->dev_no, 1); reset_vbat_monitoring(chip); wakeup_source_trash(&chip->vbms_lv_wake_source.source); wakeup_source_trash(&chip->vbms_cv_wake_source.source); wakeup_source_trash(&chip->vbms_soc_wake_source.source); mutex_destroy(&chip->bms_data_mutex); mutex_destroy(&chip->last_soc_mutex); mutex_destroy(&chip->state_change_mutex); mutex_destroy(&chip->bms_device_mutex); power_supply_unregister(&chip->bms_psy); dev_set_drvdata(&spmi->dev, NULL); the_chip = NULL; return 0; } static void process_suspend_data(struct qpnp_bms_chip *chip) { int rc; mutex_lock(&chip->bms_data_mutex); chip->suspend_data_valid = false; memset(&chip->bms_data, 0, sizeof(chip->bms_data)); rc = read_and_populate_fifo_data(chip); if (rc) pr_err("Unable to read FIFO data rc=%d\n", rc); rc = read_and_populate_acc_data(chip); if (rc) pr_err("Unable to read ACC_SD data rc=%d\n", rc); rc = clear_fifo_acc_data(chip); if (rc) pr_err("Unable to clear FIFO/ACC data rc=%d\n", rc); if (chip->bms_data.num_fifo || chip->bms_data.acc_count) { pr_debug("suspend data valid\n"); chip->suspend_data_valid = true; } mutex_unlock(&chip->bms_data_mutex); } static void process_resume_data(struct qpnp_bms_chip *chip) { int rc, batt_temp = 0; int old_ocv = 0; bool ocv_updated = false; rc = get_batt_therm(chip, &batt_temp); if (rc < 0) { pr_err("Unable to read batt temp, using default=%d\n", BMS_DEFAULT_TEMP); batt_temp = BMS_DEFAULT_TEMP; } mutex_lock(&chip->bms_data_mutex); /* * We can get a h/w OCV update when the sleep_b * is low, which is possible when APPS is suspended. * So check for an OCV update only in bms_resume */ old_ocv = chip->last_ocv_uv; rc = read_and_update_ocv(chip, batt_temp, false); if (rc) pr_err("Unable to read/upadate OCV rc=%d\n", rc); if (old_ocv != chip->last_ocv_uv) { ocv_updated = true; /* new OCV, clear suspended data */ chip->suspend_data_valid = false; memset(&chip->bms_data, 0, sizeof(chip->bms_data)); chip->calculated_soc = lookup_soc_ocv(chip, chip->last_ocv_uv, batt_temp); pr_debug("OCV in sleep SOC=%d\n", chip->calculated_soc); chip->last_soc_unbound = true; chip->voltage_soc_uv = chip->last_ocv_uv; pr_debug("update bms_psy\n"); power_supply_changed(&chip->bms_psy); } if (ocv_updated || chip->suspend_data_valid) { /* there is data to be sent */ pr_debug("ocv_updated=%d suspend_data_valid=%d\n", ocv_updated, chip->suspend_data_valid); chip->bms_data.seq_num = chip->seq_num++; dump_bms_data(__func__, chip); chip->data_ready = 1; wake_up_interruptible(&chip->bms_wait_q); if (chip->bms_dev_open) pm_stay_awake(chip->dev); } chip->suspend_data_valid = false; mutex_unlock(&chip->bms_data_mutex); } static int bms_suspend(struct device *dev) { struct qpnp_bms_chip *chip = dev_get_drvdata(dev); bool battery_charging = is_battery_charging(chip); bool hi_power_state = is_hi_power_state_requested(chip); bool charger_present = is_charger_present(chip); bool bms_suspend_config; /* * Keep BMS FSM active if 'cfg_force_bms_active_on_charger' property * is present and charger inserted. This ensures that recharge * starts once battery SOC falls below resume_soc. */ bms_suspend_config = chip->dt.cfg_force_bms_active_on_charger && charger_present; chip->apply_suspend_config = false; if (!battery_charging && !hi_power_state && !bms_suspend_config) chip->apply_suspend_config = true; pr_debug("battery_charging=%d power_state=%s hi_power_state=0x%x apply_suspend_config=%d bms_suspend_config=%d usb_present=%d\n", battery_charging, hi_power_state ? "hi" : "low", chip->hi_power_state, chip->apply_suspend_config, bms_suspend_config, charger_present); if (chip->apply_suspend_config) { if (chip->dt.cfg_force_s3_on_suspend) { pr_debug("Forcing S3 state\n"); mutex_lock(&chip->state_change_mutex); force_fsm_state(chip, S3_STATE); mutex_unlock(&chip->state_change_mutex); /* Store accumulated data if any */ process_suspend_data(chip); } } cancel_delayed_work_sync(&chip->monitor_soc_work); return 0; } static int bms_resume(struct device *dev) { u8 state = 0; int rc, monitor_soc_delay = 0; unsigned long tm_now_sec; struct qpnp_bms_chip *chip = dev_get_drvdata(dev); if (chip->apply_suspend_config) { if (chip->dt.cfg_force_s3_on_suspend) { /* * Update the state to S2 only if we are in S3. There is * a possibility of being in S2 if we resumed on * a charger insertion */ mutex_lock(&chip->state_change_mutex); rc = get_fsm_state(chip, &state); if (rc) pr_err("Unable to get FSM state rc=%d\n", rc); if (rc || (state == S3_STATE)) { pr_debug("Unforcing S3 state, setting S2 state\n"); force_fsm_state(chip, S2_STATE); } mutex_unlock(&chip->state_change_mutex); /* * if we were charging while suspended, we will * be woken up by the fifo done interrupt and no * additional processing is needed. */ process_resume_data(chip); } } /* Start monitor_soc_work based on when it last executed */ rc = get_current_time(&tm_now_sec); if (rc) { pr_err("Could not read current time: %d\n", rc); } else { monitor_soc_delay = get_calculation_delay_ms(chip) - ((tm_now_sec - chip->tm_sec) * 1000); monitor_soc_delay = max(0, monitor_soc_delay); } pr_debug("monitor_soc_delay_sec=%d tm_now_sec=%ld chip->tm_sec=%ld\n", monitor_soc_delay / 1000, tm_now_sec, chip->tm_sec); schedule_delayed_work(&chip->monitor_soc_work, msecs_to_jiffies(monitor_soc_delay)); return 0; } static const struct dev_pm_ops qpnp_vm_bms_pm_ops = { .suspend = bms_suspend, .resume = bms_resume, }; static struct of_device_id qpnp_vm_bms_match_table[] = { { .compatible = QPNP_VM_BMS_DEV_NAME }, {} }; static struct spmi_driver qpnp_vm_bms_driver = { .probe = qpnp_vm_bms_probe, .remove = qpnp_vm_bms_remove, .driver = { .name = QPNP_VM_BMS_DEV_NAME, .owner = THIS_MODULE, .of_match_table = qpnp_vm_bms_match_table, .pm = &qpnp_vm_bms_pm_ops, }, }; static int __init qpnp_vm_bms_init(void) { return spmi_driver_register(&qpnp_vm_bms_driver); } module_init(qpnp_vm_bms_init); static void __exit qpnp_vm_bms_exit(void) { return spmi_driver_unregister(&qpnp_vm_bms_driver); } module_exit(qpnp_vm_bms_exit); MODULE_DESCRIPTION("QPNP VM-BMS Driver"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("platform:" QPNP_VM_BMS_DEV_NAME);
gpl-2.0
aosp-lb-nozomi/android_kernel_sony_msm8660
drivers/media/video/msm/io/msm_io_vfe31.c
242
20592
/* Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/delay.h> #include <linux/clk.h> #include <linux/io.h> #include <linux/pm_qos.h> #include <linux/regulator/consumer.h> #include <mach/gpio.h> #include <mach/board.h> #include <mach/camera.h> #include <mach/vreg.h> #include <mach/clk.h> #define CAMIF_CFG_RMSK 0x1fffff #define CAM_SEL_BMSK 0x2 #define CAM_PCLK_SRC_SEL_BMSK 0x60000 #define CAM_PCLK_INVERT_BMSK 0x80000 #define CAM_PAD_REG_SW_RESET_BMSK 0x100000 #define EXT_CAM_HSYNC_POL_SEL_BMSK 0x10000 #define EXT_CAM_VSYNC_POL_SEL_BMSK 0x8000 #define MDDI_CLK_CHICKEN_BIT_BMSK 0x80 #define CAM_SEL_SHFT 0x1 #define CAM_PCLK_SRC_SEL_SHFT 0x11 #define CAM_PCLK_INVERT_SHFT 0x13 #define CAM_PAD_REG_SW_RESET_SHFT 0x14 #define EXT_CAM_HSYNC_POL_SEL_SHFT 0x10 #define EXT_CAM_VSYNC_POL_SEL_SHFT 0xF #define MDDI_CLK_CHICKEN_BIT_SHFT 0x7 /* MIPI CSI controller registers */ #define MIPI_PHY_CONTROL 0x00000000 #define MIPI_PROTOCOL_CONTROL 0x00000004 #define MIPI_INTERRUPT_STATUS 0x00000008 #define MIPI_INTERRUPT_MASK 0x0000000C #define MIPI_CAMERA_CNTL 0x00000024 #define MIPI_CALIBRATION_CONTROL 0x00000018 #define MIPI_PHY_D0_CONTROL2 0x00000038 #define MIPI_PHY_D1_CONTROL2 0x0000003C #define MIPI_PHY_D2_CONTROL2 0x00000040 #define MIPI_PHY_D3_CONTROL2 0x00000044 #define MIPI_PHY_CL_CONTROL 0x00000048 #define MIPI_PHY_D0_CONTROL 0x00000034 #define MIPI_PHY_D1_CONTROL 0x00000020 #define MIPI_PHY_D2_CONTROL 0x0000002C #define MIPI_PHY_D3_CONTROL 0x00000030 #define MIPI_PROTOCOL_CONTROL_SW_RST_BMSK 0x8000000 #define MIPI_PROTOCOL_CONTROL_LONG_PACKET_HEADER_CAPTURE_BMSK 0x200000 #define MIPI_PROTOCOL_CONTROL_DATA_FORMAT_BMSK 0x180000 #define MIPI_PROTOCOL_CONTROL_DECODE_ID_BMSK 0x40000 #define MIPI_PROTOCOL_CONTROL_ECC_EN_BMSK 0x20000 #define MIPI_CALIBRATION_CONTROL_SWCAL_CAL_EN_SHFT 0x16 #define MIPI_CALIBRATION_CONTROL_SWCAL_STRENGTH_OVERRIDE_EN_SHFT 0x15 #define MIPI_CALIBRATION_CONTROL_CAL_SW_HW_MODE_SHFT 0x14 #define MIPI_CALIBRATION_CONTROL_MANUAL_OVERRIDE_EN_SHFT 0x7 #define MIPI_PROTOCOL_CONTROL_DATA_FORMAT_SHFT 0x13 #define MIPI_PROTOCOL_CONTROL_DPCM_SCHEME_SHFT 0x1e #define MIPI_PHY_D0_CONTROL2_SETTLE_COUNT_SHFT 0x18 #define MIPI_PHY_D0_CONTROL2_HS_TERM_IMP_SHFT 0x10 #define MIPI_PHY_D0_CONTROL2_LP_REC_EN_SHFT 0x4 #define MIPI_PHY_D0_CONTROL2_ERR_SOT_HS_EN_SHFT 0x3 #define MIPI_PHY_D1_CONTROL2_SETTLE_COUNT_SHFT 0x18 #define MIPI_PHY_D1_CONTROL2_HS_TERM_IMP_SHFT 0x10 #define MIPI_PHY_D1_CONTROL2_LP_REC_EN_SHFT 0x4 #define MIPI_PHY_D1_CONTROL2_ERR_SOT_HS_EN_SHFT 0x3 #define MIPI_PHY_D2_CONTROL2_SETTLE_COUNT_SHFT 0x18 #define MIPI_PHY_D2_CONTROL2_HS_TERM_IMP_SHFT 0x10 #define MIPI_PHY_D2_CONTROL2_LP_REC_EN_SHFT 0x4 #define MIPI_PHY_D2_CONTROL2_ERR_SOT_HS_EN_SHFT 0x3 #define MIPI_PHY_D3_CONTROL2_SETTLE_COUNT_SHFT 0x18 #define MIPI_PHY_D3_CONTROL2_HS_TERM_IMP_SHFT 0x10 #define MIPI_PHY_D3_CONTROL2_LP_REC_EN_SHFT 0x4 #define MIPI_PHY_D3_CONTROL2_ERR_SOT_HS_EN_SHFT 0x3 #define MIPI_PHY_CL_CONTROL_HS_TERM_IMP_SHFT 0x18 #define MIPI_PHY_CL_CONTROL_LP_REC_EN_SHFT 0x2 #define MIPI_PHY_D0_CONTROL_HS_REC_EQ_SHFT 0x1c #define MIPI_PHY_D1_CONTROL_MIPI_CLK_PHY_SHUTDOWNB_SHFT 0x9 #define MIPI_PHY_D1_CONTROL_MIPI_DATA_PHY_SHUTDOWNB_SHFT 0x8 #define CAMIO_VFE_CLK_SNAP 122880000 #define CAMIO_VFE_CLK_PREV 122880000 /* AXI rates in KHz */ #define MSM_AXI_QOS_PREVIEW 192000 #define MSM_AXI_QOS_SNAPSHOT 192000 #define MSM_AXI_QOS_RECORDING 192000 static struct clk *camio_vfe_mdc_clk; static struct clk *camio_mdc_clk; static struct clk *camio_vfe_clk; static struct clk *camio_vfe_camif_clk; static struct clk *camio_vfe_pbdg_clk; static struct clk *camio_cam_m_clk; static struct clk *camio_camif_pad_pbdg_clk; static struct clk *camio_csi_clk; static struct clk *camio_csi_pclk; static struct clk *camio_csi_vfe_clk; static struct clk *camio_vpe_clk; static struct regulator *fs_vpe; static struct msm_camera_io_ext camio_ext; static struct msm_camera_io_clk camio_clk; static struct resource *camifpadio, *csiio; void __iomem *camifpadbase, *csibase; static uint32_t vpe_clk_rate; static struct regulator_bulk_data regs[] = { { .supply = "gp2", .min_uV = 2600000, .max_uV = 2600000 }, { .supply = "lvsw1" }, { .supply = "fs_vfe" }, /* sn12m0pz regulators */ { .supply = "gp6", .min_uV = 3050000, .max_uV = 3100000 }, { .supply = "gp16", .min_uV = 1200000, .max_uV = 1200000 }, }; static int reg_count; static void msm_camera_vreg_enable(struct platform_device *pdev) { int count, rc; struct device *dev = &pdev->dev; /* Use gp6 and gp16 if and only if dev name matches. */ if (!strncmp(pdev->name, "msm_camera_sn12m0pz", 20)) count = ARRAY_SIZE(regs); else count = ARRAY_SIZE(regs) - 2; rc = regulator_bulk_get(dev, count, regs); if (rc) { dev_err(dev, "%s: could not get regulators: %d\n", __func__, rc); return; } rc = regulator_bulk_set_voltage(count, regs); if (rc) { dev_err(dev, "%s: could not set voltages: %d\n", __func__, rc); goto reg_free; } rc = regulator_bulk_enable(count, regs); if (rc) { dev_err(dev, "%s: could not enable regulators: %d\n", __func__, rc); goto reg_free; } reg_count = count; return; reg_free: regulator_bulk_free(count, regs); return; } static void msm_camera_vreg_disable(void) { regulator_bulk_disable(reg_count, regs); regulator_bulk_free(reg_count, regs); reg_count = 0; } int msm_camio_clk_enable(enum msm_camio_clk_type clktype) { int rc = 0; struct clk *clk = NULL; switch (clktype) { case CAMIO_VFE_MDC_CLK: camio_vfe_mdc_clk = clk = clk_get(NULL, "vfe_mdc_clk"); break; case CAMIO_MDC_CLK: camio_mdc_clk = clk = clk_get(NULL, "mdc_clk"); break; case CAMIO_VFE_CLK: camio_vfe_clk = clk = clk_get(NULL, "vfe_clk"); msm_camio_clk_rate_set_2(clk, camio_clk.vfe_clk_rate); break; case CAMIO_VFE_CAMIF_CLK: camio_vfe_camif_clk = clk = clk_get(NULL, "vfe_camif_clk"); break; case CAMIO_VFE_PBDG_CLK: camio_vfe_pbdg_clk = clk = clk_get(NULL, "vfe_pclk"); break; case CAMIO_CAM_MCLK_CLK: camio_cam_m_clk = clk = clk_get(NULL, "cam_m_clk"); msm_camio_clk_rate_set_2(clk, camio_clk.mclk_clk_rate); break; case CAMIO_CAMIF_PAD_PBDG_CLK: camio_camif_pad_pbdg_clk = clk = clk_get(NULL, "camif_pad_pclk"); break; case CAMIO_CSI0_CLK: camio_csi_clk = clk = clk_get(NULL, "csi_clk"); msm_camio_clk_rate_set_2(clk, 153600000); break; case CAMIO_CSI0_VFE_CLK: camio_csi_vfe_clk = clk = clk_get(NULL, "csi_vfe_clk"); break; case CAMIO_CSI0_PCLK: camio_csi_pclk = clk = clk_get(NULL, "csi_pclk"); break; case CAMIO_VPE_CLK: camio_vpe_clk = clk = clk_get(NULL, "vpe_clk"); vpe_clk_rate = clk_round_rate(clk, vpe_clk_rate); clk_set_rate(clk, vpe_clk_rate); break; default: break; } if (!IS_ERR(clk)) clk_enable(clk); else rc = -1; return rc; } int msm_camio_clk_disable(enum msm_camio_clk_type clktype) { int rc = 0; struct clk *clk = NULL; switch (clktype) { case CAMIO_VFE_MDC_CLK: clk = camio_vfe_mdc_clk; break; case CAMIO_MDC_CLK: clk = camio_mdc_clk; break; case CAMIO_VFE_CLK: clk = camio_vfe_clk; break; case CAMIO_VFE_CAMIF_CLK: clk = camio_vfe_camif_clk; break; case CAMIO_VFE_PBDG_CLK: clk = camio_vfe_pbdg_clk; break; case CAMIO_CAM_MCLK_CLK: clk = camio_cam_m_clk; break; case CAMIO_CAMIF_PAD_PBDG_CLK: clk = camio_camif_pad_pbdg_clk; break; case CAMIO_CSI0_CLK: clk = camio_csi_clk; break; case CAMIO_CSI0_VFE_CLK: clk = camio_csi_vfe_clk; break; case CAMIO_CSI0_PCLK: clk = camio_csi_pclk; break; case CAMIO_VPE_CLK: clk = camio_vpe_clk; break; default: break; } if (!IS_ERR(clk)) { clk_disable(clk); clk_put(clk); } else rc = -1; return rc; } void msm_camio_clk_rate_set(int rate) { struct clk *clk = camio_cam_m_clk; clk_set_rate(clk, rate); } int msm_camio_vfe_clk_rate_set(int rate) { struct clk *clk = camio_vfe_clk; return clk_set_rate(clk, rate); } void msm_camio_clk_rate_set_2(struct clk *clk, int rate) { clk_set_rate(clk, rate); } static irqreturn_t msm_io_csi_irq(int irq_num, void *data) { uint32_t irq; irq = msm_camera_io_r(csibase + MIPI_INTERRUPT_STATUS); CDBG("%s MIPI_INTERRUPT_STATUS = 0x%x\n", __func__, irq); msm_camera_io_w(irq, csibase + MIPI_INTERRUPT_STATUS); return IRQ_HANDLED; } int msm_camio_vpe_clk_disable(void) { msm_camio_clk_disable(CAMIO_VPE_CLK); if (fs_vpe) { regulator_disable(fs_vpe); regulator_put(fs_vpe); } return 0; } int msm_camio_vpe_clk_enable(uint32_t clk_rate) { fs_vpe = regulator_get(NULL, "fs_vpe"); if (IS_ERR(fs_vpe)) { pr_err("%s: Regulator FS_VPE get failed %ld\n", __func__, PTR_ERR(fs_vpe)); fs_vpe = NULL; } else if (regulator_enable(fs_vpe)) { pr_err("%s: Regulator FS_VPE enable failed\n", __func__); regulator_put(fs_vpe); } vpe_clk_rate = clk_rate; msm_camio_clk_enable(CAMIO_VPE_CLK); return 0; } int msm_camio_enable(struct platform_device *pdev) { int rc = 0; struct msm_camera_sensor_info *sinfo = pdev->dev.platform_data; msm_camio_clk_enable(CAMIO_VFE_PBDG_CLK); if (!sinfo->csi_if) msm_camio_clk_enable(CAMIO_VFE_CAMIF_CLK); else { msm_camio_clk_enable(CAMIO_VFE_CLK); csiio = request_mem_region(camio_ext.csiphy, camio_ext.csisz, pdev->name); if (!csiio) { rc = -EBUSY; goto common_fail; } csibase = ioremap(camio_ext.csiphy, camio_ext.csisz); if (!csibase) { rc = -ENOMEM; goto csi_busy; } rc = request_irq(camio_ext.csiirq, msm_io_csi_irq, IRQF_TRIGGER_RISING, "csi", 0); if (rc < 0) goto csi_irq_fail; /* enable required clocks for CSI */ msm_camio_clk_enable(CAMIO_CSI0_PCLK); msm_camio_clk_enable(CAMIO_CSI0_VFE_CLK); msm_camio_clk_enable(CAMIO_CSI0_CLK); } return 0; csi_irq_fail: iounmap(csibase); csi_busy: release_mem_region(camio_ext.csiphy, camio_ext.csisz); common_fail: msm_camio_clk_disable(CAMIO_VFE_PBDG_CLK); msm_camio_clk_disable(CAMIO_VFE_CLK); return rc; } static void msm_camio_csi_disable(void) { uint32_t val; val = 0x0; CDBG("%s MIPI_PHY_D0_CONTROL2 val=0x%x\n", __func__, val); msm_camera_io_w(val, csibase + MIPI_PHY_D0_CONTROL2); msm_camera_io_w(val, csibase + MIPI_PHY_D1_CONTROL2); msm_camera_io_w(val, csibase + MIPI_PHY_D2_CONTROL2); msm_camera_io_w(val, csibase + MIPI_PHY_D3_CONTROL2); CDBG("%s MIPI_PHY_CL_CONTROL val=0x%x\n", __func__, val); msm_camera_io_w(val, csibase + MIPI_PHY_CL_CONTROL); usleep_range(9000, 10000); free_irq(camio_ext.csiirq, 0); iounmap(csibase); release_mem_region(camio_ext.csiphy, camio_ext.csisz); } void msm_camio_disable(struct platform_device *pdev) { struct msm_camera_sensor_info *sinfo = pdev->dev.platform_data; if (!sinfo->csi_if) { msm_camio_clk_disable(CAMIO_VFE_CAMIF_CLK); } else { CDBG("disable mipi\n"); msm_camio_csi_disable(); CDBG("disable clocks\n"); msm_camio_clk_disable(CAMIO_CSI0_PCLK); msm_camio_clk_disable(CAMIO_CSI0_VFE_CLK); msm_camio_clk_disable(CAMIO_CSI0_CLK); msm_camio_clk_disable(CAMIO_VFE_CLK); } msm_camio_clk_disable(CAMIO_VFE_PBDG_CLK); } void msm_camio_camif_pad_reg_reset(void) { uint32_t reg; msm_camio_clk_sel(MSM_CAMIO_CLK_SRC_INTERNAL); usleep_range(10000, 11000); reg = (msm_camera_io_r(camifpadbase)) & CAMIF_CFG_RMSK; reg |= 0x3; msm_camera_io_w(reg, camifpadbase); usleep_range(10000, 11000); reg = (msm_camera_io_r(camifpadbase)) & CAMIF_CFG_RMSK; reg |= 0x10; msm_camera_io_w(reg, camifpadbase); usleep_range(10000, 11000); reg = (msm_camera_io_r(camifpadbase)) & CAMIF_CFG_RMSK; /* Need to be uninverted*/ reg &= 0x03; msm_camera_io_w(reg, camifpadbase); usleep_range(10000, 11000); } void msm_camio_vfe_blk_reset(void) { return; } void msm_camio_camif_pad_reg_reset_2(void) { uint32_t reg; uint32_t mask, value; reg = (msm_camera_io_r(camifpadbase)) & CAMIF_CFG_RMSK; mask = CAM_PAD_REG_SW_RESET_BMSK; value = 1 << CAM_PAD_REG_SW_RESET_SHFT; msm_camera_io_w((reg & (~mask)) | (value & mask), camifpadbase); usleep_range(10000, 11000); reg = (msm_camera_io_r(camifpadbase)) & CAMIF_CFG_RMSK; mask = CAM_PAD_REG_SW_RESET_BMSK; value = 0 << CAM_PAD_REG_SW_RESET_SHFT; msm_camera_io_w((reg & (~mask)) | (value & mask), camifpadbase); usleep_range(10000, 11000); } void msm_camio_clk_sel(enum msm_camio_clk_src_type srctype) { struct clk *clk = NULL; clk = camio_vfe_clk; if (clk != NULL) { switch (srctype) { case MSM_CAMIO_CLK_SRC_INTERNAL: clk_set_flags(clk, 0x00000100 << 1); break; case MSM_CAMIO_CLK_SRC_EXTERNAL: clk_set_flags(clk, 0x00000100); break; default: break; } } } int msm_camio_probe_on(struct platform_device *pdev) { struct msm_camera_sensor_info *sinfo = pdev->dev.platform_data; struct msm_camera_device_platform_data *camdev = sinfo->pdata; camio_clk = camdev->ioclk; camio_ext = camdev->ioext; camdev->camera_gpio_on(); msm_camera_vreg_enable(pdev); return msm_camio_clk_enable(CAMIO_CAM_MCLK_CLK); } int msm_camio_probe_off(struct platform_device *pdev) { struct msm_camera_sensor_info *sinfo = pdev->dev.platform_data; struct msm_camera_device_platform_data *camdev = sinfo->pdata; msm_camera_vreg_disable(); camdev->camera_gpio_off(); return msm_camio_clk_disable(CAMIO_CAM_MCLK_CLK); } int msm_camio_sensor_clk_on(struct platform_device *pdev) { int rc = 0; struct msm_camera_sensor_info *sinfo = pdev->dev.platform_data; struct msm_camera_device_platform_data *camdev = sinfo->pdata; camio_clk = camdev->ioclk; camio_ext = camdev->ioext; camdev->camera_gpio_on(); msm_camera_vreg_enable(pdev); msm_camio_clk_enable(CAMIO_CAM_MCLK_CLK); msm_camio_clk_enable(CAMIO_CAMIF_PAD_PBDG_CLK); if (!sinfo->csi_if) { camifpadio = request_mem_region(camio_ext.camifpadphy, camio_ext.camifpadsz, pdev->name); msm_camio_clk_enable(CAMIO_VFE_CLK); if (!camifpadio) { rc = -EBUSY; goto common_fail; } camifpadbase = ioremap(camio_ext.camifpadphy, camio_ext.camifpadsz); if (!camifpadbase) { CDBG("msm_camio_sensor_clk_on fail\n"); rc = -ENOMEM; goto parallel_busy; } } return rc; parallel_busy: release_mem_region(camio_ext.camifpadphy, camio_ext.camifpadsz); goto common_fail; common_fail: msm_camio_clk_disable(CAMIO_CAM_MCLK_CLK); msm_camio_clk_disable(CAMIO_VFE_CLK); msm_camio_clk_disable(CAMIO_CAMIF_PAD_PBDG_CLK); msm_camera_vreg_disable(); camdev->camera_gpio_off(); return rc; } int msm_camio_sensor_clk_off(struct platform_device *pdev) { uint32_t rc = 0; struct msm_camera_sensor_info *sinfo = pdev->dev.platform_data; struct msm_camera_device_platform_data *camdev = sinfo->pdata; camdev->camera_gpio_off(); msm_camera_vreg_disable(); rc = msm_camio_clk_disable(CAMIO_CAM_MCLK_CLK); rc = msm_camio_clk_disable(CAMIO_CAMIF_PAD_PBDG_CLK); if (!sinfo->csi_if) { iounmap(camifpadbase); release_mem_region(camio_ext.camifpadphy, camio_ext.camifpadsz); rc = msm_camio_clk_disable(CAMIO_VFE_CLK); } return rc; } int msm_camio_csi_config(struct msm_camera_csi_params *csi_params) { int rc = 0; uint32_t val = 0; int i; CDBG("msm_camio_csi_config\n"); /* SOT_ECC_EN enable error correction for SYNC (data-lane) */ msm_camera_io_w(0x4, csibase + MIPI_PHY_CONTROL); /* SW_RST to the CSI core */ msm_camera_io_w(MIPI_PROTOCOL_CONTROL_SW_RST_BMSK, csibase + MIPI_PROTOCOL_CONTROL); /* PROTOCOL CONTROL */ val = MIPI_PROTOCOL_CONTROL_LONG_PACKET_HEADER_CAPTURE_BMSK | MIPI_PROTOCOL_CONTROL_DECODE_ID_BMSK | MIPI_PROTOCOL_CONTROL_ECC_EN_BMSK; val |= (uint32_t)(csi_params->data_format) << MIPI_PROTOCOL_CONTROL_DATA_FORMAT_SHFT; val |= csi_params->dpcm_scheme << MIPI_PROTOCOL_CONTROL_DPCM_SCHEME_SHFT; CDBG("%s MIPI_PROTOCOL_CONTROL val=0x%x\n", __func__, val); msm_camera_io_w(val, csibase + MIPI_PROTOCOL_CONTROL); /* SW CAL EN */ val = (0x1 << MIPI_CALIBRATION_CONTROL_SWCAL_CAL_EN_SHFT) | (0x1 << MIPI_CALIBRATION_CONTROL_SWCAL_STRENGTH_OVERRIDE_EN_SHFT) | (0x1 << MIPI_CALIBRATION_CONTROL_CAL_SW_HW_MODE_SHFT) | (0x1 << MIPI_CALIBRATION_CONTROL_MANUAL_OVERRIDE_EN_SHFT); CDBG("%s MIPI_CALIBRATION_CONTROL val=0x%x\n", __func__, val); msm_camera_io_w(val, csibase + MIPI_CALIBRATION_CONTROL); /* settle_cnt is very sensitive to speed! increase this value to run at higher speeds */ val = (csi_params->settle_cnt << MIPI_PHY_D0_CONTROL2_SETTLE_COUNT_SHFT) | (0x0F << MIPI_PHY_D0_CONTROL2_HS_TERM_IMP_SHFT) | (0x1 << MIPI_PHY_D0_CONTROL2_LP_REC_EN_SHFT) | (0x1 << MIPI_PHY_D0_CONTROL2_ERR_SOT_HS_EN_SHFT); CDBG("%s MIPI_PHY_D0_CONTROL2 val=0x%x\n", __func__, val); for (i = 0; i < csi_params->lane_cnt; i++) msm_camera_io_w(val, csibase + MIPI_PHY_D0_CONTROL2 + i * 4); val = (0x0F << MIPI_PHY_CL_CONTROL_HS_TERM_IMP_SHFT) | (0x1 << MIPI_PHY_CL_CONTROL_LP_REC_EN_SHFT); CDBG("%s MIPI_PHY_CL_CONTROL val=0x%x\n", __func__, val); msm_camera_io_w(val, csibase + MIPI_PHY_CL_CONTROL); val = 0 << MIPI_PHY_D0_CONTROL_HS_REC_EQ_SHFT; msm_camera_io_w(val, csibase + MIPI_PHY_D0_CONTROL); val = (0x1 << MIPI_PHY_D1_CONTROL_MIPI_CLK_PHY_SHUTDOWNB_SHFT) | (0x1 << MIPI_PHY_D1_CONTROL_MIPI_DATA_PHY_SHUTDOWNB_SHFT); CDBG("%s MIPI_PHY_D1_CONTROL val=0x%x\n", __func__, val); msm_camera_io_w(val, csibase + MIPI_PHY_D1_CONTROL); msm_camera_io_w(0x00000000, csibase + MIPI_PHY_D2_CONTROL); msm_camera_io_w(0x00000000, csibase + MIPI_PHY_D3_CONTROL); /* halcyon only supports 1 or 2 lane */ switch (csi_params->lane_cnt) { case 1: msm_camera_io_w(csi_params->lane_assign << 8 | 0x4, csibase + MIPI_CAMERA_CNTL); break; case 2: msm_camera_io_w(csi_params->lane_assign << 8 | 0x5, csibase + MIPI_CAMERA_CNTL); break; case 3: msm_camera_io_w(csi_params->lane_assign << 8 | 0x6, csibase + MIPI_CAMERA_CNTL); break; case 4: msm_camera_io_w(csi_params->lane_assign << 8 | 0x7, csibase + MIPI_CAMERA_CNTL); break; } /* mask out ID_ERROR[19], DATA_CMM_ERR[11] and CLK_CMM_ERR[10] - de-featured */ msm_camera_io_w(0xFFF7F3FF, csibase + MIPI_INTERRUPT_MASK); /*clear IRQ bits*/ msm_camera_io_w(0xFFF7F3FF, csibase + MIPI_INTERRUPT_STATUS); return rc; } void msm_camio_set_perf_lvl(enum msm_bus_perf_setting perf_setting) { switch (perf_setting) { case S_INIT: add_axi_qos(); break; case S_PREVIEW: update_axi_qos(MSM_AXI_QOS_PREVIEW); break; case S_VIDEO: update_axi_qos(MSM_AXI_QOS_RECORDING); break; case S_CAPTURE: update_axi_qos(MSM_AXI_QOS_SNAPSHOT); break; case S_DEFAULT: update_axi_qos(PM_QOS_DEFAULT_VALUE); break; case S_EXIT: release_axi_qos(); break; default: CDBG("%s: INVALID CASE\n", __func__); } } int msm_cam_core_reset(void) { struct clk *clk1; int rc = 0; clk1 = clk_get(NULL, "csi_vfe_clk"); if (IS_ERR(clk1)) { pr_err("%s: did not get csi_vfe_clk\n", __func__); return PTR_ERR(clk1); } rc = clk_reset(clk1, CLK_RESET_ASSERT); if (rc) { pr_err("%s:csi_vfe_clk assert failed\n", __func__); clk_put(clk1); return rc; } usleep_range(1000, 1200); rc = clk_reset(clk1, CLK_RESET_DEASSERT); if (rc) { pr_err("%s:csi_vfe_clk deassert failed\n", __func__); clk_put(clk1); return rc; } clk_put(clk1); clk1 = clk_get(NULL, "csi_clk"); if (IS_ERR(clk1)) { pr_err("%s: did not get csi_clk\n", __func__); return PTR_ERR(clk1); } rc = clk_reset(clk1, CLK_RESET_ASSERT); if (rc) { pr_err("%s:csi_clk assert failed\n", __func__); clk_put(clk1); return rc; } usleep_range(1000, 1200); rc = clk_reset(clk1, CLK_RESET_DEASSERT); if (rc) { pr_err("%s:csi_clk deassert failed\n", __func__); clk_put(clk1); return rc; } clk_put(clk1); clk1 = clk_get(NULL, "csi_pclk"); if (IS_ERR(clk1)) { pr_err("%s: did not get csi_pclk\n", __func__); return PTR_ERR(clk1); } rc = clk_reset(clk1, CLK_RESET_ASSERT); if (rc) { pr_err("%s:csi_pclk assert failed\n", __func__); clk_put(clk1); return rc; } usleep_range(1000, 1200); rc = clk_reset(clk1, CLK_RESET_DEASSERT); if (rc) { pr_err("%s:csi_pclk deassert failed\n", __func__); clk_put(clk1); return rc; } clk_put(clk1); return rc; }
gpl-2.0
deadman96385/android_kernel_leeco_msm8996
kernel/locking/lockdep.c
498
106481
/* * kernel/lockdep.c * * Runtime locking correctness validator * * Started by Ingo Molnar: * * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> * * this code maps all the lock dependencies as they occur in a live kernel * and will warn about the following classes of locking bugs: * * - lock inversion scenarios * - circular lock dependencies * - hardirq/softirq safe/unsafe locking bugs * * Bugs are reported even if the current locking scenario does not cause * any deadlock at this point. * * I.e. if anytime in the past two locks were taken in a different order, * even if it happened for another task, even if those were different * locks (but of the same class as this lock), this code will detect it. * * Thanks to Arjan van de Ven for coming up with the initial idea of * mapping lock dependencies runtime. */ #define DISABLE_BRANCH_PROFILING #include <linux/mutex.h> #include <linux/sched.h> #include <linux/delay.h> #include <linux/module.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/spinlock.h> #include <linux/kallsyms.h> #include <linux/interrupt.h> #include <linux/stacktrace.h> #include <linux/debug_locks.h> #include <linux/irqflags.h> #include <linux/utsname.h> #include <linux/hash.h> #include <linux/ftrace.h> #include <linux/stringify.h> #include <linux/bitops.h> #include <linux/gfp.h> #include <linux/kmemcheck.h> #include <asm/sections.h> #include "lockdep_internals.h" #define CREATE_TRACE_POINTS #include <trace/events/lock.h> #ifdef CONFIG_PROVE_LOCKING int prove_locking = 1; module_param(prove_locking, int, 0644); #else #define prove_locking 0 #endif #ifdef CONFIG_LOCK_STAT int lock_stat = 1; module_param(lock_stat, int, 0644); #else #define lock_stat 0 #endif /* * lockdep_lock: protects the lockdep graph, the hashes and the * class/list/hash allocators. * * This is one of the rare exceptions where it's justified * to use a raw spinlock - we really dont want the spinlock * code to recurse back into the lockdep code... */ static arch_spinlock_t lockdep_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; static int graph_lock(void) { arch_spin_lock(&lockdep_lock); /* * Make sure that if another CPU detected a bug while * walking the graph we dont change it (while the other * CPU is busy printing out stuff with the graph lock * dropped already) */ if (!debug_locks) { arch_spin_unlock(&lockdep_lock); return 0; } /* prevent any recursions within lockdep from causing deadlocks */ current->lockdep_recursion++; return 1; } static inline int graph_unlock(void) { if (debug_locks && !arch_spin_is_locked(&lockdep_lock)) { /* * The lockdep graph lock isn't locked while we expect it to * be, we're confused now, bye! */ return DEBUG_LOCKS_WARN_ON(1); } current->lockdep_recursion--; arch_spin_unlock(&lockdep_lock); return 0; } /* * Turn lock debugging off and return with 0 if it was off already, * and also release the graph lock: */ static inline int debug_locks_off_graph_unlock(void) { int ret = debug_locks_off(); arch_spin_unlock(&lockdep_lock); return ret; } static int lockdep_initialized; unsigned long nr_list_entries; static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES]; /* * All data structures here are protected by the global debug_lock. * * Mutex key structs only get allocated, once during bootup, and never * get freed - this significantly simplifies the debugging code. */ unsigned long nr_lock_classes; static struct lock_class lock_classes[MAX_LOCKDEP_KEYS]; static inline struct lock_class *hlock_class(struct held_lock *hlock) { if (!hlock->class_idx) { /* * Someone passed in garbage, we give up. */ DEBUG_LOCKS_WARN_ON(1); return NULL; } return lock_classes + hlock->class_idx - 1; } #ifdef CONFIG_LOCK_STAT static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], cpu_lock_stats); static inline u64 lockstat_clock(void) { return local_clock(); } static int lock_point(unsigned long points[], unsigned long ip) { int i; for (i = 0; i < LOCKSTAT_POINTS; i++) { if (points[i] == 0) { points[i] = ip; break; } if (points[i] == ip) break; } return i; } static void lock_time_inc(struct lock_time *lt, u64 time) { if (time > lt->max) lt->max = time; if (time < lt->min || !lt->nr) lt->min = time; lt->total += time; lt->nr++; } static inline void lock_time_add(struct lock_time *src, struct lock_time *dst) { if (!src->nr) return; if (src->max > dst->max) dst->max = src->max; if (src->min < dst->min || !dst->nr) dst->min = src->min; dst->total += src->total; dst->nr += src->nr; } struct lock_class_stats lock_stats(struct lock_class *class) { struct lock_class_stats stats; int cpu, i; memset(&stats, 0, sizeof(struct lock_class_stats)); for_each_possible_cpu(cpu) { struct lock_class_stats *pcs = &per_cpu(cpu_lock_stats, cpu)[class - lock_classes]; for (i = 0; i < ARRAY_SIZE(stats.contention_point); i++) stats.contention_point[i] += pcs->contention_point[i]; for (i = 0; i < ARRAY_SIZE(stats.contending_point); i++) stats.contending_point[i] += pcs->contending_point[i]; lock_time_add(&pcs->read_waittime, &stats.read_waittime); lock_time_add(&pcs->write_waittime, &stats.write_waittime); lock_time_add(&pcs->read_holdtime, &stats.read_holdtime); lock_time_add(&pcs->write_holdtime, &stats.write_holdtime); for (i = 0; i < ARRAY_SIZE(stats.bounces); i++) stats.bounces[i] += pcs->bounces[i]; } return stats; } void clear_lock_stats(struct lock_class *class) { int cpu; for_each_possible_cpu(cpu) { struct lock_class_stats *cpu_stats = &per_cpu(cpu_lock_stats, cpu)[class - lock_classes]; memset(cpu_stats, 0, sizeof(struct lock_class_stats)); } memset(class->contention_point, 0, sizeof(class->contention_point)); memset(class->contending_point, 0, sizeof(class->contending_point)); } static struct lock_class_stats *get_lock_stats(struct lock_class *class) { return &get_cpu_var(cpu_lock_stats)[class - lock_classes]; } static void put_lock_stats(struct lock_class_stats *stats) { put_cpu_var(cpu_lock_stats); } static void lock_release_holdtime(struct held_lock *hlock) { struct lock_class_stats *stats; u64 holdtime; if (!lock_stat) return; holdtime = lockstat_clock() - hlock->holdtime_stamp; stats = get_lock_stats(hlock_class(hlock)); if (hlock->read) lock_time_inc(&stats->read_holdtime, holdtime); else lock_time_inc(&stats->write_holdtime, holdtime); put_lock_stats(stats); } #else static inline void lock_release_holdtime(struct held_lock *hlock) { } #endif /* * We keep a global list of all lock classes. The list only grows, * never shrinks. The list is only accessed with the lockdep * spinlock lock held. */ LIST_HEAD(all_lock_classes); /* * The lockdep classes are in a hash-table as well, for fast lookup: */ #define CLASSHASH_BITS (MAX_LOCKDEP_KEYS_BITS - 1) #define CLASSHASH_SIZE (1UL << CLASSHASH_BITS) #define __classhashfn(key) hash_long((unsigned long)key, CLASSHASH_BITS) #define classhashentry(key) (classhash_table + __classhashfn((key))) static struct list_head classhash_table[CLASSHASH_SIZE]; /* * We put the lock dependency chains into a hash-table as well, to cache * their existence: */ #define CHAINHASH_BITS (MAX_LOCKDEP_CHAINS_BITS-1) #define CHAINHASH_SIZE (1UL << CHAINHASH_BITS) #define __chainhashfn(chain) hash_long(chain, CHAINHASH_BITS) #define chainhashentry(chain) (chainhash_table + __chainhashfn((chain))) static struct list_head chainhash_table[CHAINHASH_SIZE]; /* * The hash key of the lock dependency chains is a hash itself too: * it's a hash of all locks taken up to that lock, including that lock. * It's a 64-bit hash, because it's important for the keys to be * unique. */ #define iterate_chain_key(key1, key2) \ (((key1) << MAX_LOCKDEP_KEYS_BITS) ^ \ ((key1) >> (64-MAX_LOCKDEP_KEYS_BITS)) ^ \ (key2)) void lockdep_off(void) { current->lockdep_recursion++; } EXPORT_SYMBOL(lockdep_off); void lockdep_on(void) { current->lockdep_recursion--; } EXPORT_SYMBOL(lockdep_on); /* * Debugging switches: */ #define VERBOSE 0 #define VERY_VERBOSE 0 #if VERBOSE # define HARDIRQ_VERBOSE 1 # define SOFTIRQ_VERBOSE 1 # define RECLAIM_VERBOSE 1 #else # define HARDIRQ_VERBOSE 0 # define SOFTIRQ_VERBOSE 0 # define RECLAIM_VERBOSE 0 #endif #if VERBOSE || HARDIRQ_VERBOSE || SOFTIRQ_VERBOSE || RECLAIM_VERBOSE /* * Quick filtering for interesting events: */ static int class_filter(struct lock_class *class) { #if 0 /* Example */ if (class->name_version == 1 && !strcmp(class->name, "lockname")) return 1; if (class->name_version == 1 && !strcmp(class->name, "&struct->lockfield")) return 1; #endif /* Filter everything else. 1 would be to allow everything else */ return 0; } #endif static int verbose(struct lock_class *class) { #if VERBOSE return class_filter(class); #endif return 0; } /* * Stack-trace: tightly packed array of stack backtrace * addresses. Protected by the graph_lock. */ unsigned long nr_stack_trace_entries; static unsigned long stack_trace[MAX_STACK_TRACE_ENTRIES]; static void print_lockdep_off(const char *bug_msg) { printk(KERN_DEBUG "%s\n", bug_msg); printk(KERN_DEBUG "turning off the locking correctness validator.\n"); #ifdef CONFIG_LOCK_STAT printk(KERN_DEBUG "Please attach the output of /proc/lock_stat to the bug report\n"); #endif } static int save_trace(struct stack_trace *trace) { trace->nr_entries = 0; trace->max_entries = MAX_STACK_TRACE_ENTRIES - nr_stack_trace_entries; trace->entries = stack_trace + nr_stack_trace_entries; trace->skip = 3; save_stack_trace(trace); /* * Some daft arches put -1 at the end to indicate its a full trace. * * <rant> this is buggy anyway, since it takes a whole extra entry so a * complete trace that maxes out the entries provided will be reported * as incomplete, friggin useless </rant> */ if (trace->nr_entries != 0 && trace->entries[trace->nr_entries-1] == ULONG_MAX) trace->nr_entries--; trace->max_entries = trace->nr_entries; nr_stack_trace_entries += trace->nr_entries; if (nr_stack_trace_entries >= MAX_STACK_TRACE_ENTRIES-1) { if (!debug_locks_off_graph_unlock()) return 0; print_lockdep_off("BUG: MAX_STACK_TRACE_ENTRIES too low!"); dump_stack(); return 0; } return 1; } unsigned int nr_hardirq_chains; unsigned int nr_softirq_chains; unsigned int nr_process_chains; unsigned int max_lockdep_depth; #ifdef CONFIG_DEBUG_LOCKDEP /* * We cannot printk in early bootup code. Not even early_printk() * might work. So we mark any initialization errors and printk * about it later on, in lockdep_info(). */ static int lockdep_init_error; static const char *lock_init_error; static unsigned long lockdep_init_trace_data[20]; static struct stack_trace lockdep_init_trace = { .max_entries = ARRAY_SIZE(lockdep_init_trace_data), .entries = lockdep_init_trace_data, }; /* * Various lockdep statistics: */ DEFINE_PER_CPU(struct lockdep_stats, lockdep_stats); #endif /* * Locking printouts: */ #define __USAGE(__STATE) \ [LOCK_USED_IN_##__STATE] = "IN-"__stringify(__STATE)"-W", \ [LOCK_ENABLED_##__STATE] = __stringify(__STATE)"-ON-W", \ [LOCK_USED_IN_##__STATE##_READ] = "IN-"__stringify(__STATE)"-R",\ [LOCK_ENABLED_##__STATE##_READ] = __stringify(__STATE)"-ON-R", static const char *usage_str[] = { #define LOCKDEP_STATE(__STATE) __USAGE(__STATE) #include "lockdep_states.h" #undef LOCKDEP_STATE [LOCK_USED] = "INITIAL USE", }; const char * __get_key_name(struct lockdep_subclass_key *key, char *str) { return kallsyms_lookup((unsigned long)key, NULL, NULL, NULL, str); } static inline unsigned long lock_flag(enum lock_usage_bit bit) { return 1UL << bit; } static char get_usage_char(struct lock_class *class, enum lock_usage_bit bit) { char c = '.'; if (class->usage_mask & lock_flag(bit + 2)) c = '+'; if (class->usage_mask & lock_flag(bit)) { c = '-'; if (class->usage_mask & lock_flag(bit + 2)) c = '?'; } return c; } void get_usage_chars(struct lock_class *class, char usage[LOCK_USAGE_CHARS]) { int i = 0; #define LOCKDEP_STATE(__STATE) \ usage[i++] = get_usage_char(class, LOCK_USED_IN_##__STATE); \ usage[i++] = get_usage_char(class, LOCK_USED_IN_##__STATE##_READ); #include "lockdep_states.h" #undef LOCKDEP_STATE usage[i] = '\0'; } static void __print_lock_name(struct lock_class *class) { char str[KSYM_NAME_LEN]; const char *name; name = class->name; if (!name) { name = __get_key_name(class->key, str); printk("%s", name); } else { printk("%s", name); if (class->name_version > 1) printk("#%d", class->name_version); if (class->subclass) printk("/%d", class->subclass); } } static void print_lock_name(struct lock_class *class) { char usage[LOCK_USAGE_CHARS]; get_usage_chars(class, usage); printk(" ("); __print_lock_name(class); printk("){%s}", usage); } static void print_lockdep_cache(struct lockdep_map *lock) { const char *name; char str[KSYM_NAME_LEN]; name = lock->name; if (!name) name = __get_key_name(lock->key->subkeys, str); printk("%s", name); } static void print_lock(struct held_lock *hlock) { print_lock_name(hlock_class(hlock)); printk(", at: "); print_ip_sym(hlock->acquire_ip); } static void lockdep_print_held_locks(struct task_struct *curr) { int i, depth = curr->lockdep_depth; if (!depth) { printk("no locks held by %s/%d.\n", curr->comm, task_pid_nr(curr)); return; } printk("%d lock%s held by %s/%d:\n", depth, depth > 1 ? "s" : "", curr->comm, task_pid_nr(curr)); for (i = 0; i < depth; i++) { printk(" #%d: ", i); print_lock(curr->held_locks + i); } } static void print_kernel_ident(void) { printk("%s %.*s %s\n", init_utsname()->release, (int)strcspn(init_utsname()->version, " "), init_utsname()->version, print_tainted()); } static int very_verbose(struct lock_class *class) { #if VERY_VERBOSE return class_filter(class); #endif return 0; } /* * Is this the address of a static object: */ #ifdef __KERNEL__ static int static_obj(void *obj) { unsigned long start = (unsigned long) &_stext, end = (unsigned long) &_end, addr = (unsigned long) obj; /* * static variable? */ if ((addr >= start) && (addr < end)) return 1; if (arch_is_kernel_data(addr)) return 1; /* * in-kernel percpu var? */ if (is_kernel_percpu_address(addr)) return 1; /* * module static or percpu var? */ return is_module_address(addr) || is_module_percpu_address(addr); } #endif /* * To make lock name printouts unique, we calculate a unique * class->name_version generation counter: */ static int count_matching_names(struct lock_class *new_class) { struct lock_class *class; int count = 0; if (!new_class->name) return 0; list_for_each_entry(class, &all_lock_classes, lock_entry) { if (new_class->key - new_class->subclass == class->key) return class->name_version; if (class->name && !strcmp(class->name, new_class->name)) count = max(count, class->name_version); } return count + 1; } /* * Register a lock's class in the hash-table, if the class is not present * yet. Otherwise we look it up. We cache the result in the lock object * itself, so actual lookup of the hash should be once per lock object. */ static inline struct lock_class * look_up_lock_class(struct lockdep_map *lock, unsigned int subclass) { struct lockdep_subclass_key *key; struct list_head *hash_head; struct lock_class *class; #ifdef CONFIG_DEBUG_LOCKDEP /* * If the architecture calls into lockdep before initializing * the hashes then we'll warn about it later. (we cannot printk * right now) */ if (unlikely(!lockdep_initialized)) { lockdep_init(); lockdep_init_error = 1; lock_init_error = lock->name; save_stack_trace(&lockdep_init_trace); } #endif if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) { debug_locks_off(); printk(KERN_ERR "BUG: looking up invalid subclass: %u\n", subclass); printk(KERN_ERR "turning off the locking correctness validator.\n"); dump_stack(); return NULL; } /* * Static locks do not have their class-keys yet - for them the key * is the lock object itself: */ if (unlikely(!lock->key)) lock->key = (void *)lock; /* * NOTE: the class-key must be unique. For dynamic locks, a static * lock_class_key variable is passed in through the mutex_init() * (or spin_lock_init()) call - which acts as the key. For static * locks we use the lock object itself as the key. */ BUILD_BUG_ON(sizeof(struct lock_class_key) > sizeof(struct lockdep_map)); key = lock->key->subkeys + subclass; hash_head = classhashentry(key); /* * We can walk the hash lockfree, because the hash only * grows, and we are careful when adding entries to the end: */ list_for_each_entry(class, hash_head, hash_entry) { if (class->key == key) { /* * Huh! same key, different name? Did someone trample * on some memory? We're most confused. */ WARN_ON_ONCE(class->name != lock->name); return class; } } return NULL; } /* * Register a lock's class in the hash-table, if the class is not present * yet. Otherwise we look it up. We cache the result in the lock object * itself, so actual lookup of the hash should be once per lock object. */ static inline struct lock_class * register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force) { struct lockdep_subclass_key *key; struct list_head *hash_head; struct lock_class *class; unsigned long flags; class = look_up_lock_class(lock, subclass); if (likely(class)) goto out_set_class_cache; /* * Debug-check: all keys must be persistent! */ if (!static_obj(lock->key)) { debug_locks_off(); printk("INFO: trying to register non-static key.\n"); printk("the code is fine but needs lockdep annotation.\n"); printk("turning off the locking correctness validator.\n"); dump_stack(); return NULL; } key = lock->key->subkeys + subclass; hash_head = classhashentry(key); raw_local_irq_save(flags); if (!graph_lock()) { raw_local_irq_restore(flags); return NULL; } /* * We have to do the hash-walk again, to avoid races * with another CPU: */ list_for_each_entry(class, hash_head, hash_entry) if (class->key == key) goto out_unlock_set; /* * Allocate a new key from the static array, and add it to * the hash: */ if (nr_lock_classes >= MAX_LOCKDEP_KEYS) { if (!debug_locks_off_graph_unlock()) { raw_local_irq_restore(flags); return NULL; } raw_local_irq_restore(flags); print_lockdep_off("BUG: MAX_LOCKDEP_KEYS too low!"); dump_stack(); return NULL; } class = lock_classes + nr_lock_classes++; debug_atomic_inc(nr_unused_locks); class->key = key; class->name = lock->name; class->subclass = subclass; INIT_LIST_HEAD(&class->lock_entry); INIT_LIST_HEAD(&class->locks_before); INIT_LIST_HEAD(&class->locks_after); class->name_version = count_matching_names(class); /* * We use RCU's safe list-add method to make * parallel walking of the hash-list safe: */ list_add_tail_rcu(&class->hash_entry, hash_head); /* * Add it to the global list of classes: */ list_add_tail_rcu(&class->lock_entry, &all_lock_classes); if (verbose(class)) { graph_unlock(); raw_local_irq_restore(flags); printk("\nnew class %p: %s", class->key, class->name); if (class->name_version > 1) printk("#%d", class->name_version); printk("\n"); dump_stack(); raw_local_irq_save(flags); if (!graph_lock()) { raw_local_irq_restore(flags); return NULL; } } out_unlock_set: graph_unlock(); raw_local_irq_restore(flags); out_set_class_cache: if (!subclass || force) lock->class_cache[0] = class; else if (subclass < NR_LOCKDEP_CACHING_CLASSES) lock->class_cache[subclass] = class; /* * Hash collision, did we smoke some? We found a class with a matching * hash but the subclass -- which is hashed in -- didn't match. */ if (DEBUG_LOCKS_WARN_ON(class->subclass != subclass)) return NULL; return class; } #ifdef CONFIG_PROVE_LOCKING /* * Allocate a lockdep entry. (assumes the graph_lock held, returns * with NULL on failure) */ static struct lock_list *alloc_list_entry(void) { if (nr_list_entries >= MAX_LOCKDEP_ENTRIES) { if (!debug_locks_off_graph_unlock()) return NULL; print_lockdep_off("BUG: MAX_LOCKDEP_ENTRIES too low!"); dump_stack(); return NULL; } return list_entries + nr_list_entries++; } /* * Add a new dependency to the head of the list: */ static int add_lock_to_list(struct lock_class *class, struct lock_class *this, struct list_head *head, unsigned long ip, int distance, struct stack_trace *trace) { struct lock_list *entry; /* * Lock not present yet - get a new dependency struct and * add it to the list: */ entry = alloc_list_entry(); if (!entry) return 0; entry->class = this; entry->distance = distance; entry->trace = *trace; /* * Since we never remove from the dependency list, the list can * be walked lockless by other CPUs, it's only allocation * that must be protected by the spinlock. But this also means * we must make new entries visible only once writes to the * entry become visible - hence the RCU op: */ list_add_tail_rcu(&entry->entry, head); return 1; } /* * For good efficiency of modular, we use power of 2 */ #define MAX_CIRCULAR_QUEUE_SIZE 4096UL #define CQ_MASK (MAX_CIRCULAR_QUEUE_SIZE-1) /* * The circular_queue and helpers is used to implement the * breadth-first search(BFS)algorithem, by which we can build * the shortest path from the next lock to be acquired to the * previous held lock if there is a circular between them. */ struct circular_queue { unsigned long element[MAX_CIRCULAR_QUEUE_SIZE]; unsigned int front, rear; }; static struct circular_queue lock_cq; unsigned int max_bfs_queue_depth; static unsigned int lockdep_dependency_gen_id; static inline void __cq_init(struct circular_queue *cq) { cq->front = cq->rear = 0; lockdep_dependency_gen_id++; } static inline int __cq_empty(struct circular_queue *cq) { return (cq->front == cq->rear); } static inline int __cq_full(struct circular_queue *cq) { return ((cq->rear + 1) & CQ_MASK) == cq->front; } static inline int __cq_enqueue(struct circular_queue *cq, unsigned long elem) { if (__cq_full(cq)) return -1; cq->element[cq->rear] = elem; cq->rear = (cq->rear + 1) & CQ_MASK; return 0; } static inline int __cq_dequeue(struct circular_queue *cq, unsigned long *elem) { if (__cq_empty(cq)) return -1; *elem = cq->element[cq->front]; cq->front = (cq->front + 1) & CQ_MASK; return 0; } static inline unsigned int __cq_get_elem_count(struct circular_queue *cq) { return (cq->rear - cq->front) & CQ_MASK; } static inline void mark_lock_accessed(struct lock_list *lock, struct lock_list *parent) { unsigned long nr; nr = lock - list_entries; WARN_ON(nr >= nr_list_entries); /* Out-of-bounds, input fail */ lock->parent = parent; lock->class->dep_gen_id = lockdep_dependency_gen_id; } static inline unsigned long lock_accessed(struct lock_list *lock) { unsigned long nr; nr = lock - list_entries; WARN_ON(nr >= nr_list_entries); /* Out-of-bounds, input fail */ return lock->class->dep_gen_id == lockdep_dependency_gen_id; } static inline struct lock_list *get_lock_parent(struct lock_list *child) { return child->parent; } static inline int get_lock_depth(struct lock_list *child) { int depth = 0; struct lock_list *parent; while ((parent = get_lock_parent(child))) { child = parent; depth++; } return depth; } static int __bfs(struct lock_list *source_entry, void *data, int (*match)(struct lock_list *entry, void *data), struct lock_list **target_entry, int forward) { struct lock_list *entry; struct list_head *head; struct circular_queue *cq = &lock_cq; int ret = 1; if (match(source_entry, data)) { *target_entry = source_entry; ret = 0; goto exit; } if (forward) head = &source_entry->class->locks_after; else head = &source_entry->class->locks_before; if (list_empty(head)) goto exit; __cq_init(cq); __cq_enqueue(cq, (unsigned long)source_entry); while (!__cq_empty(cq)) { struct lock_list *lock; __cq_dequeue(cq, (unsigned long *)&lock); if (!lock->class) { ret = -2; goto exit; } if (forward) head = &lock->class->locks_after; else head = &lock->class->locks_before; list_for_each_entry(entry, head, entry) { if (!lock_accessed(entry)) { unsigned int cq_depth; mark_lock_accessed(entry, lock); if (match(entry, data)) { *target_entry = entry; ret = 0; goto exit; } if (__cq_enqueue(cq, (unsigned long)entry)) { ret = -1; goto exit; } cq_depth = __cq_get_elem_count(cq); if (max_bfs_queue_depth < cq_depth) max_bfs_queue_depth = cq_depth; } } } exit: return ret; } static inline int __bfs_forwards(struct lock_list *src_entry, void *data, int (*match)(struct lock_list *entry, void *data), struct lock_list **target_entry) { return __bfs(src_entry, data, match, target_entry, 1); } static inline int __bfs_backwards(struct lock_list *src_entry, void *data, int (*match)(struct lock_list *entry, void *data), struct lock_list **target_entry) { return __bfs(src_entry, data, match, target_entry, 0); } /* * Recursive, forwards-direction lock-dependency checking, used for * both noncyclic checking and for hardirq-unsafe/softirq-unsafe * checking. */ /* * Print a dependency chain entry (this is only done when a deadlock * has been detected): */ static noinline int print_circular_bug_entry(struct lock_list *target, int depth) { if (debug_locks_silent) return 0; printk("\n-> #%u", depth); print_lock_name(target->class); printk(":\n"); print_stack_trace(&target->trace, 6); return 0; } static void print_circular_lock_scenario(struct held_lock *src, struct held_lock *tgt, struct lock_list *prt) { struct lock_class *source = hlock_class(src); struct lock_class *target = hlock_class(tgt); struct lock_class *parent = prt->class; /* * A direct locking problem where unsafe_class lock is taken * directly by safe_class lock, then all we need to show * is the deadlock scenario, as it is obvious that the * unsafe lock is taken under the safe lock. * * But if there is a chain instead, where the safe lock takes * an intermediate lock (middle_class) where this lock is * not the same as the safe lock, then the lock chain is * used to describe the problem. Otherwise we would need * to show a different CPU case for each link in the chain * from the safe_class lock to the unsafe_class lock. */ if (parent != source) { printk("Chain exists of:\n "); __print_lock_name(source); printk(" --> "); __print_lock_name(parent); printk(" --> "); __print_lock_name(target); printk("\n\n"); } printk(" Possible unsafe locking scenario:\n\n"); printk(" CPU0 CPU1\n"); printk(" ---- ----\n"); printk(" lock("); __print_lock_name(target); printk(");\n"); printk(" lock("); __print_lock_name(parent); printk(");\n"); printk(" lock("); __print_lock_name(target); printk(");\n"); printk(" lock("); __print_lock_name(source); printk(");\n"); printk("\n *** DEADLOCK ***\n\n"); } /* * When a circular dependency is detected, print the * header first: */ static noinline int print_circular_bug_header(struct lock_list *entry, unsigned int depth, struct held_lock *check_src, struct held_lock *check_tgt) { struct task_struct *curr = current; if (debug_locks_silent) return 0; printk("\n"); printk("======================================================\n"); printk("[ INFO: possible circular locking dependency detected ]\n"); print_kernel_ident(); printk("-------------------------------------------------------\n"); printk("%s/%d is trying to acquire lock:\n", curr->comm, task_pid_nr(curr)); print_lock(check_src); printk("\nbut task is already holding lock:\n"); print_lock(check_tgt); printk("\nwhich lock already depends on the new lock.\n\n"); printk("\nthe existing dependency chain (in reverse order) is:\n"); print_circular_bug_entry(entry, depth); return 0; } static inline int class_equal(struct lock_list *entry, void *data) { return entry->class == data; } static noinline int print_circular_bug(struct lock_list *this, struct lock_list *target, struct held_lock *check_src, struct held_lock *check_tgt) { struct task_struct *curr = current; struct lock_list *parent; struct lock_list *first_parent; int depth; if (!debug_locks_off_graph_unlock() || debug_locks_silent) return 0; if (!save_trace(&this->trace)) return 0; depth = get_lock_depth(target); print_circular_bug_header(target, depth, check_src, check_tgt); parent = get_lock_parent(target); first_parent = parent; while (parent) { print_circular_bug_entry(parent, --depth); parent = get_lock_parent(parent); } printk("\nother info that might help us debug this:\n\n"); print_circular_lock_scenario(check_src, check_tgt, first_parent); lockdep_print_held_locks(curr); printk("\nstack backtrace:\n"); dump_stack(); return 0; } static noinline int print_bfs_bug(int ret) { if (!debug_locks_off_graph_unlock()) return 0; /* * Breadth-first-search failed, graph got corrupted? */ WARN(1, "lockdep bfs error:%d\n", ret); return 0; } static int noop_count(struct lock_list *entry, void *data) { (*(unsigned long *)data)++; return 0; } static unsigned long __lockdep_count_forward_deps(struct lock_list *this) { unsigned long count = 0; struct lock_list *uninitialized_var(target_entry); __bfs_forwards(this, (void *)&count, noop_count, &target_entry); return count; } unsigned long lockdep_count_forward_deps(struct lock_class *class) { unsigned long ret, flags; struct lock_list this; this.parent = NULL; this.class = class; local_irq_save(flags); arch_spin_lock(&lockdep_lock); ret = __lockdep_count_forward_deps(&this); arch_spin_unlock(&lockdep_lock); local_irq_restore(flags); return ret; } static unsigned long __lockdep_count_backward_deps(struct lock_list *this) { unsigned long count = 0; struct lock_list *uninitialized_var(target_entry); __bfs_backwards(this, (void *)&count, noop_count, &target_entry); return count; } unsigned long lockdep_count_backward_deps(struct lock_class *class) { unsigned long ret, flags; struct lock_list this; this.parent = NULL; this.class = class; local_irq_save(flags); arch_spin_lock(&lockdep_lock); ret = __lockdep_count_backward_deps(&this); arch_spin_unlock(&lockdep_lock); local_irq_restore(flags); return ret; } /* * Prove that the dependency graph starting at <entry> can not * lead to <target>. Print an error and return 0 if it does. */ static noinline int check_noncircular(struct lock_list *root, struct lock_class *target, struct lock_list **target_entry) { int result; debug_atomic_inc(nr_cyclic_checks); result = __bfs_forwards(root, target, class_equal, target_entry); return result; } #if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) /* * Forwards and backwards subgraph searching, for the purposes of * proving that two subgraphs can be connected by a new dependency * without creating any illegal irq-safe -> irq-unsafe lock dependency. */ static inline int usage_match(struct lock_list *entry, void *bit) { return entry->class->usage_mask & (1 << (enum lock_usage_bit)bit); } /* * Find a node in the forwards-direction dependency sub-graph starting * at @root->class that matches @bit. * * Return 0 if such a node exists in the subgraph, and put that node * into *@target_entry. * * Return 1 otherwise and keep *@target_entry unchanged. * Return <0 on error. */ static int find_usage_forwards(struct lock_list *root, enum lock_usage_bit bit, struct lock_list **target_entry) { int result; debug_atomic_inc(nr_find_usage_forwards_checks); result = __bfs_forwards(root, (void *)bit, usage_match, target_entry); return result; } /* * Find a node in the backwards-direction dependency sub-graph starting * at @root->class that matches @bit. * * Return 0 if such a node exists in the subgraph, and put that node * into *@target_entry. * * Return 1 otherwise and keep *@target_entry unchanged. * Return <0 on error. */ static int find_usage_backwards(struct lock_list *root, enum lock_usage_bit bit, struct lock_list **target_entry) { int result; debug_atomic_inc(nr_find_usage_backwards_checks); result = __bfs_backwards(root, (void *)bit, usage_match, target_entry); return result; } static void print_lock_class_header(struct lock_class *class, int depth) { int bit; printk("%*s->", depth, ""); print_lock_name(class); printk(" ops: %lu", class->ops); printk(" {\n"); for (bit = 0; bit < LOCK_USAGE_STATES; bit++) { if (class->usage_mask & (1 << bit)) { int len = depth; len += printk("%*s %s", depth, "", usage_str[bit]); len += printk(" at:\n"); print_stack_trace(class->usage_traces + bit, len); } } printk("%*s }\n", depth, ""); printk("%*s ... key at: ",depth,""); print_ip_sym((unsigned long)class->key); } /* * printk the shortest lock dependencies from @start to @end in reverse order: */ static void __used print_shortest_lock_dependencies(struct lock_list *leaf, struct lock_list *root) { struct lock_list *entry = leaf; int depth; /*compute depth from generated tree by BFS*/ depth = get_lock_depth(leaf); do { print_lock_class_header(entry->class, depth); printk("%*s ... acquired at:\n", depth, ""); print_stack_trace(&entry->trace, 2); printk("\n"); if (depth == 0 && (entry != root)) { printk("lockdep:%s bad path found in chain graph\n", __func__); break; } entry = get_lock_parent(entry); depth--; } while (entry && (depth >= 0)); return; } static void print_irq_lock_scenario(struct lock_list *safe_entry, struct lock_list *unsafe_entry, struct lock_class *prev_class, struct lock_class *next_class) { struct lock_class *safe_class = safe_entry->class; struct lock_class *unsafe_class = unsafe_entry->class; struct lock_class *middle_class = prev_class; if (middle_class == safe_class) middle_class = next_class; /* * A direct locking problem where unsafe_class lock is taken * directly by safe_class lock, then all we need to show * is the deadlock scenario, as it is obvious that the * unsafe lock is taken under the safe lock. * * But if there is a chain instead, where the safe lock takes * an intermediate lock (middle_class) where this lock is * not the same as the safe lock, then the lock chain is * used to describe the problem. Otherwise we would need * to show a different CPU case for each link in the chain * from the safe_class lock to the unsafe_class lock. */ if (middle_class != unsafe_class) { printk("Chain exists of:\n "); __print_lock_name(safe_class); printk(" --> "); __print_lock_name(middle_class); printk(" --> "); __print_lock_name(unsafe_class); printk("\n\n"); } printk(" Possible interrupt unsafe locking scenario:\n\n"); printk(" CPU0 CPU1\n"); printk(" ---- ----\n"); printk(" lock("); __print_lock_name(unsafe_class); printk(");\n"); printk(" local_irq_disable();\n"); printk(" lock("); __print_lock_name(safe_class); printk(");\n"); printk(" lock("); __print_lock_name(middle_class); printk(");\n"); printk(" <Interrupt>\n"); printk(" lock("); __print_lock_name(safe_class); printk(");\n"); printk("\n *** DEADLOCK ***\n\n"); } static int print_bad_irq_dependency(struct task_struct *curr, struct lock_list *prev_root, struct lock_list *next_root, struct lock_list *backwards_entry, struct lock_list *forwards_entry, struct held_lock *prev, struct held_lock *next, enum lock_usage_bit bit1, enum lock_usage_bit bit2, const char *irqclass) { if (!debug_locks_off_graph_unlock() || debug_locks_silent) return 0; printk("\n"); printk("======================================================\n"); printk("[ INFO: %s-safe -> %s-unsafe lock order detected ]\n", irqclass, irqclass); print_kernel_ident(); printk("------------------------------------------------------\n"); printk("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] is trying to acquire:\n", curr->comm, task_pid_nr(curr), curr->hardirq_context, hardirq_count() >> HARDIRQ_SHIFT, curr->softirq_context, softirq_count() >> SOFTIRQ_SHIFT, curr->hardirqs_enabled, curr->softirqs_enabled); print_lock(next); printk("\nand this task is already holding:\n"); print_lock(prev); printk("which would create a new lock dependency:\n"); print_lock_name(hlock_class(prev)); printk(" ->"); print_lock_name(hlock_class(next)); printk("\n"); printk("\nbut this new dependency connects a %s-irq-safe lock:\n", irqclass); print_lock_name(backwards_entry->class); printk("\n... which became %s-irq-safe at:\n", irqclass); print_stack_trace(backwards_entry->class->usage_traces + bit1, 1); printk("\nto a %s-irq-unsafe lock:\n", irqclass); print_lock_name(forwards_entry->class); printk("\n... which became %s-irq-unsafe at:\n", irqclass); printk("..."); print_stack_trace(forwards_entry->class->usage_traces + bit2, 1); printk("\nother info that might help us debug this:\n\n"); print_irq_lock_scenario(backwards_entry, forwards_entry, hlock_class(prev), hlock_class(next)); lockdep_print_held_locks(curr); printk("\nthe dependencies between %s-irq-safe lock", irqclass); printk(" and the holding lock:\n"); if (!save_trace(&prev_root->trace)) return 0; print_shortest_lock_dependencies(backwards_entry, prev_root); printk("\nthe dependencies between the lock to be acquired"); printk(" and %s-irq-unsafe lock:\n", irqclass); if (!save_trace(&next_root->trace)) return 0; print_shortest_lock_dependencies(forwards_entry, next_root); printk("\nstack backtrace:\n"); dump_stack(); return 0; } static int check_usage(struct task_struct *curr, struct held_lock *prev, struct held_lock *next, enum lock_usage_bit bit_backwards, enum lock_usage_bit bit_forwards, const char *irqclass) { int ret; struct lock_list this, that; struct lock_list *uninitialized_var(target_entry); struct lock_list *uninitialized_var(target_entry1); this.parent = NULL; this.class = hlock_class(prev); ret = find_usage_backwards(&this, bit_backwards, &target_entry); if (ret < 0) return print_bfs_bug(ret); if (ret == 1) return ret; that.parent = NULL; that.class = hlock_class(next); ret = find_usage_forwards(&that, bit_forwards, &target_entry1); if (ret < 0) return print_bfs_bug(ret); if (ret == 1) return ret; return print_bad_irq_dependency(curr, &this, &that, target_entry, target_entry1, prev, next, bit_backwards, bit_forwards, irqclass); } static const char *state_names[] = { #define LOCKDEP_STATE(__STATE) \ __stringify(__STATE), #include "lockdep_states.h" #undef LOCKDEP_STATE }; static const char *state_rnames[] = { #define LOCKDEP_STATE(__STATE) \ __stringify(__STATE)"-READ", #include "lockdep_states.h" #undef LOCKDEP_STATE }; static inline const char *state_name(enum lock_usage_bit bit) { return (bit & 1) ? state_rnames[bit >> 2] : state_names[bit >> 2]; } static int exclusive_bit(int new_bit) { /* * USED_IN * USED_IN_READ * ENABLED * ENABLED_READ * * bit 0 - write/read * bit 1 - used_in/enabled * bit 2+ state */ int state = new_bit & ~3; int dir = new_bit & 2; /* * keep state, bit flip the direction and strip read. */ return state | (dir ^ 2); } static int check_irq_usage(struct task_struct *curr, struct held_lock *prev, struct held_lock *next, enum lock_usage_bit bit) { /* * Prove that the new dependency does not connect a hardirq-safe * lock with a hardirq-unsafe lock - to achieve this we search * the backwards-subgraph starting at <prev>, and the * forwards-subgraph starting at <next>: */ if (!check_usage(curr, prev, next, bit, exclusive_bit(bit), state_name(bit))) return 0; bit++; /* _READ */ /* * Prove that the new dependency does not connect a hardirq-safe-read * lock with a hardirq-unsafe lock - to achieve this we search * the backwards-subgraph starting at <prev>, and the * forwards-subgraph starting at <next>: */ if (!check_usage(curr, prev, next, bit, exclusive_bit(bit), state_name(bit))) return 0; return 1; } static int check_prev_add_irq(struct task_struct *curr, struct held_lock *prev, struct held_lock *next) { #define LOCKDEP_STATE(__STATE) \ if (!check_irq_usage(curr, prev, next, LOCK_USED_IN_##__STATE)) \ return 0; #include "lockdep_states.h" #undef LOCKDEP_STATE return 1; } static void inc_chains(void) { if (current->hardirq_context) nr_hardirq_chains++; else { if (current->softirq_context) nr_softirq_chains++; else nr_process_chains++; } } #else static inline int check_prev_add_irq(struct task_struct *curr, struct held_lock *prev, struct held_lock *next) { return 1; } static inline void inc_chains(void) { nr_process_chains++; } #endif static void print_deadlock_scenario(struct held_lock *nxt, struct held_lock *prv) { struct lock_class *next = hlock_class(nxt); struct lock_class *prev = hlock_class(prv); printk(" Possible unsafe locking scenario:\n\n"); printk(" CPU0\n"); printk(" ----\n"); printk(" lock("); __print_lock_name(prev); printk(");\n"); printk(" lock("); __print_lock_name(next); printk(");\n"); printk("\n *** DEADLOCK ***\n\n"); printk(" May be due to missing lock nesting notation\n\n"); } static int print_deadlock_bug(struct task_struct *curr, struct held_lock *prev, struct held_lock *next) { if (!debug_locks_off_graph_unlock() || debug_locks_silent) return 0; printk("\n"); printk("=============================================\n"); printk("[ INFO: possible recursive locking detected ]\n"); print_kernel_ident(); printk("---------------------------------------------\n"); printk("%s/%d is trying to acquire lock:\n", curr->comm, task_pid_nr(curr)); print_lock(next); printk("\nbut task is already holding lock:\n"); print_lock(prev); printk("\nother info that might help us debug this:\n"); print_deadlock_scenario(next, prev); lockdep_print_held_locks(curr); printk("\nstack backtrace:\n"); dump_stack(); return 0; } /* * Check whether we are holding such a class already. * * (Note that this has to be done separately, because the graph cannot * detect such classes of deadlocks.) * * Returns: 0 on deadlock detected, 1 on OK, 2 on recursive read */ static int check_deadlock(struct task_struct *curr, struct held_lock *next, struct lockdep_map *next_instance, int read) { struct held_lock *prev; struct held_lock *nest = NULL; int i; for (i = 0; i < curr->lockdep_depth; i++) { prev = curr->held_locks + i; if (prev->instance == next->nest_lock) nest = prev; if (hlock_class(prev) != hlock_class(next)) continue; /* * Allow read-after-read recursion of the same * lock class (i.e. read_lock(lock)+read_lock(lock)): */ if ((read == 2) && prev->read) return 2; /* * We're holding the nest_lock, which serializes this lock's * nesting behaviour. */ if (nest) return 2; return print_deadlock_bug(curr, prev, next); } return 1; } /* * There was a chain-cache miss, and we are about to add a new dependency * to a previous lock. We recursively validate the following rules: * * - would the adding of the <prev> -> <next> dependency create a * circular dependency in the graph? [== circular deadlock] * * - does the new prev->next dependency connect any hardirq-safe lock * (in the full backwards-subgraph starting at <prev>) with any * hardirq-unsafe lock (in the full forwards-subgraph starting at * <next>)? [== illegal lock inversion with hardirq contexts] * * - does the new prev->next dependency connect any softirq-safe lock * (in the full backwards-subgraph starting at <prev>) with any * softirq-unsafe lock (in the full forwards-subgraph starting at * <next>)? [== illegal lock inversion with softirq contexts] * * any of these scenarios could lead to a deadlock. * * Then if all the validations pass, we add the forwards and backwards * dependency. */ static int check_prev_add(struct task_struct *curr, struct held_lock *prev, struct held_lock *next, int distance, int trylock_loop) { struct lock_list *entry; int ret; struct lock_list this; struct lock_list *uninitialized_var(target_entry); /* * Static variable, serialized by the graph_lock(). * * We use this static variable to save the stack trace in case * we call into this function multiple times due to encountering * trylocks in the held lock stack. */ static struct stack_trace trace; /* * Prove that the new <prev> -> <next> dependency would not * create a circular dependency in the graph. (We do this by * forward-recursing into the graph starting at <next>, and * checking whether we can reach <prev>.) * * We are using global variables to control the recursion, to * keep the stackframe size of the recursive functions low: */ this.class = hlock_class(next); this.parent = NULL; ret = check_noncircular(&this, hlock_class(prev), &target_entry); if (unlikely(!ret)) return print_circular_bug(&this, target_entry, next, prev); else if (unlikely(ret < 0)) return print_bfs_bug(ret); if (!check_prev_add_irq(curr, prev, next)) return 0; /* * For recursive read-locks we do all the dependency checks, * but we dont store read-triggered dependencies (only * write-triggered dependencies). This ensures that only the * write-side dependencies matter, and that if for example a * write-lock never takes any other locks, then the reads are * equivalent to a NOP. */ if (next->read == 2 || prev->read == 2) return 1; /* * Is the <prev> -> <next> dependency already present? * * (this may occur even though this is a new chain: consider * e.g. the L1 -> L2 -> L3 -> L4 and the L5 -> L1 -> L2 -> L3 * chains - the second one will be new, but L1 already has * L2 added to its dependency list, due to the first chain.) */ list_for_each_entry(entry, &hlock_class(prev)->locks_after, entry) { if (entry->class == hlock_class(next)) { if (distance == 1) entry->distance = 1; return 2; } } if (!trylock_loop && !save_trace(&trace)) return 0; /* * Ok, all validations passed, add the new lock * to the previous lock's dependency list: */ ret = add_lock_to_list(hlock_class(prev), hlock_class(next), &hlock_class(prev)->locks_after, next->acquire_ip, distance, &trace); if (!ret) return 0; ret = add_lock_to_list(hlock_class(next), hlock_class(prev), &hlock_class(next)->locks_before, next->acquire_ip, distance, &trace); if (!ret) return 0; /* * Debugging printouts: */ if (verbose(hlock_class(prev)) || verbose(hlock_class(next))) { graph_unlock(); printk("\n new dependency: "); print_lock_name(hlock_class(prev)); printk(" => "); print_lock_name(hlock_class(next)); printk("\n"); dump_stack(); return graph_lock(); } return 1; } /* * Add the dependency to all directly-previous locks that are 'relevant'. * The ones that are relevant are (in increasing distance from curr): * all consecutive trylock entries and the final non-trylock entry - or * the end of this context's lock-chain - whichever comes first. */ static int check_prevs_add(struct task_struct *curr, struct held_lock *next) { int depth = curr->lockdep_depth; int trylock_loop = 0; struct held_lock *hlock; /* * Debugging checks. * * Depth must not be zero for a non-head lock: */ if (!depth) goto out_bug; /* * At least two relevant locks must exist for this * to be a head: */ if (curr->held_locks[depth].irq_context != curr->held_locks[depth-1].irq_context) goto out_bug; for (;;) { int distance = curr->lockdep_depth - depth + 1; hlock = curr->held_locks + depth - 1; /* * Only non-recursive-read entries get new dependencies * added: */ if (hlock->read != 2 && hlock->check) { if (!check_prev_add(curr, hlock, next, distance, trylock_loop)) return 0; /* * Stop after the first non-trylock entry, * as non-trylock entries have added their * own direct dependencies already, so this * lock is connected to them indirectly: */ if (!hlock->trylock) break; } depth--; /* * End of lock-stack? */ if (!depth) break; /* * Stop the search if we cross into another context: */ if (curr->held_locks[depth].irq_context != curr->held_locks[depth-1].irq_context) break; trylock_loop = 1; } return 1; out_bug: if (!debug_locks_off_graph_unlock()) return 0; /* * Clearly we all shouldn't be here, but since we made it we * can reliable say we messed up our state. See the above two * gotos for reasons why we could possibly end up here. */ WARN_ON(1); return 0; } unsigned long nr_lock_chains; struct lock_chain lock_chains[MAX_LOCKDEP_CHAINS]; int nr_chain_hlocks; static u16 chain_hlocks[MAX_LOCKDEP_CHAIN_HLOCKS]; struct lock_class *lock_chain_get_class(struct lock_chain *chain, int i) { return lock_classes + chain_hlocks[chain->base + i]; } /* * Look up a dependency chain. If the key is not present yet then * add it and return 1 - in this case the new dependency chain is * validated. If the key is already hashed, return 0. * (On return with 1 graph_lock is held.) */ static inline int lookup_chain_cache(struct task_struct *curr, struct held_lock *hlock, u64 chain_key) { struct lock_class *class = hlock_class(hlock); struct list_head *hash_head = chainhashentry(chain_key); struct lock_chain *chain; struct held_lock *hlock_curr; int i, j; /* * We might need to take the graph lock, ensure we've got IRQs * disabled to make this an IRQ-safe lock.. for recursion reasons * lockdep won't complain about its own locking errors. */ if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) return 0; /* * We can walk it lock-free, because entries only get added * to the hash: */ list_for_each_entry(chain, hash_head, entry) { if (chain->chain_key == chain_key) { cache_hit: debug_atomic_inc(chain_lookup_hits); if (very_verbose(class)) printk("\nhash chain already cached, key: " "%016Lx tail class: [%p] %s\n", (unsigned long long)chain_key, class->key, class->name); return 0; } } if (very_verbose(class)) printk("\nnew hash chain, key: %016Lx tail class: [%p] %s\n", (unsigned long long)chain_key, class->key, class->name); /* * Allocate a new chain entry from the static array, and add * it to the hash: */ if (!graph_lock()) return 0; /* * We have to walk the chain again locked - to avoid duplicates: */ list_for_each_entry(chain, hash_head, entry) { if (chain->chain_key == chain_key) { graph_unlock(); goto cache_hit; } } if (unlikely(nr_lock_chains >= MAX_LOCKDEP_CHAINS)) { if (!debug_locks_off_graph_unlock()) return 0; print_lockdep_off("BUG: MAX_LOCKDEP_CHAINS too low!"); dump_stack(); return 0; } chain = lock_chains + nr_lock_chains++; chain->chain_key = chain_key; chain->irq_context = hlock->irq_context; /* Find the first held_lock of current chain */ for (i = curr->lockdep_depth - 1; i >= 0; i--) { hlock_curr = curr->held_locks + i; if (hlock_curr->irq_context != hlock->irq_context) break; } i++; chain->depth = curr->lockdep_depth + 1 - i; if (likely(nr_chain_hlocks + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS)) { chain->base = nr_chain_hlocks; nr_chain_hlocks += chain->depth; for (j = 0; j < chain->depth - 1; j++, i++) { int lock_id = curr->held_locks[i].class_idx - 1; chain_hlocks[chain->base + j] = lock_id; } chain_hlocks[chain->base + j] = class - lock_classes; } list_add_tail_rcu(&chain->entry, hash_head); debug_atomic_inc(chain_lookup_misses); inc_chains(); return 1; } static int validate_chain(struct task_struct *curr, struct lockdep_map *lock, struct held_lock *hlock, int chain_head, u64 chain_key) { /* * Trylock needs to maintain the stack of held locks, but it * does not add new dependencies, because trylock can be done * in any order. * * We look up the chain_key and do the O(N^2) check and update of * the dependencies only if this is a new dependency chain. * (If lookup_chain_cache() returns with 1 it acquires * graph_lock for us) */ if (!hlock->trylock && hlock->check && lookup_chain_cache(curr, hlock, chain_key)) { /* * Check whether last held lock: * * - is irq-safe, if this lock is irq-unsafe * - is softirq-safe, if this lock is hardirq-unsafe * * And check whether the new lock's dependency graph * could lead back to the previous lock. * * any of these scenarios could lead to a deadlock. If * All validations */ int ret = check_deadlock(curr, hlock, lock, hlock->read); if (!ret) return 0; /* * Mark recursive read, as we jump over it when * building dependencies (just like we jump over * trylock entries): */ if (ret == 2) hlock->read = 2; /* * Add dependency only if this lock is not the head * of the chain, and if it's not a secondary read-lock: */ if (!chain_head && ret != 2) if (!check_prevs_add(curr, hlock)) return 0; graph_unlock(); } else /* after lookup_chain_cache(): */ if (unlikely(!debug_locks)) return 0; return 1; } #else static inline int validate_chain(struct task_struct *curr, struct lockdep_map *lock, struct held_lock *hlock, int chain_head, u64 chain_key) { return 1; } #endif /* * We are building curr_chain_key incrementally, so double-check * it from scratch, to make sure that it's done correctly: */ static void check_chain_key(struct task_struct *curr) { #ifdef CONFIG_DEBUG_LOCKDEP struct held_lock *hlock, *prev_hlock = NULL; unsigned int i, id; u64 chain_key = 0; for (i = 0; i < curr->lockdep_depth; i++) { hlock = curr->held_locks + i; if (chain_key != hlock->prev_chain_key) { debug_locks_off(); /* * We got mighty confused, our chain keys don't match * with what we expect, someone trample on our task state? */ WARN(1, "hm#1, depth: %u [%u], %016Lx != %016Lx\n", curr->lockdep_depth, i, (unsigned long long)chain_key, (unsigned long long)hlock->prev_chain_key); return; } id = hlock->class_idx - 1; /* * Whoops ran out of static storage again? */ if (DEBUG_LOCKS_WARN_ON(id >= MAX_LOCKDEP_KEYS)) return; if (prev_hlock && (prev_hlock->irq_context != hlock->irq_context)) chain_key = 0; chain_key = iterate_chain_key(chain_key, id); prev_hlock = hlock; } if (chain_key != curr->curr_chain_key) { debug_locks_off(); /* * More smoking hash instead of calculating it, damn see these * numbers float.. I bet that a pink elephant stepped on my memory. */ WARN(1, "hm#2, depth: %u [%u], %016Lx != %016Lx\n", curr->lockdep_depth, i, (unsigned long long)chain_key, (unsigned long long)curr->curr_chain_key); } #endif } static void print_usage_bug_scenario(struct held_lock *lock) { struct lock_class *class = hlock_class(lock); printk(" Possible unsafe locking scenario:\n\n"); printk(" CPU0\n"); printk(" ----\n"); printk(" lock("); __print_lock_name(class); printk(");\n"); printk(" <Interrupt>\n"); printk(" lock("); __print_lock_name(class); printk(");\n"); printk("\n *** DEADLOCK ***\n\n"); } static int print_usage_bug(struct task_struct *curr, struct held_lock *this, enum lock_usage_bit prev_bit, enum lock_usage_bit new_bit) { if (!debug_locks_off_graph_unlock() || debug_locks_silent) return 0; printk("\n"); printk("=================================\n"); printk("[ INFO: inconsistent lock state ]\n"); print_kernel_ident(); printk("---------------------------------\n"); printk("inconsistent {%s} -> {%s} usage.\n", usage_str[prev_bit], usage_str[new_bit]); printk("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] takes:\n", curr->comm, task_pid_nr(curr), trace_hardirq_context(curr), hardirq_count() >> HARDIRQ_SHIFT, trace_softirq_context(curr), softirq_count() >> SOFTIRQ_SHIFT, trace_hardirqs_enabled(curr), trace_softirqs_enabled(curr)); print_lock(this); printk("{%s} state was registered at:\n", usage_str[prev_bit]); print_stack_trace(hlock_class(this)->usage_traces + prev_bit, 1); print_irqtrace_events(curr); printk("\nother info that might help us debug this:\n"); print_usage_bug_scenario(this); lockdep_print_held_locks(curr); printk("\nstack backtrace:\n"); dump_stack(); return 0; } /* * Print out an error if an invalid bit is set: */ static inline int valid_state(struct task_struct *curr, struct held_lock *this, enum lock_usage_bit new_bit, enum lock_usage_bit bad_bit) { if (unlikely(hlock_class(this)->usage_mask & (1 << bad_bit))) return print_usage_bug(curr, this, bad_bit, new_bit); return 1; } static int mark_lock(struct task_struct *curr, struct held_lock *this, enum lock_usage_bit new_bit); #if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) /* * print irq inversion bug: */ static int print_irq_inversion_bug(struct task_struct *curr, struct lock_list *root, struct lock_list *other, struct held_lock *this, int forwards, const char *irqclass) { struct lock_list *entry = other; struct lock_list *middle = NULL; int depth; if (!debug_locks_off_graph_unlock() || debug_locks_silent) return 0; printk("\n"); printk("=========================================================\n"); printk("[ INFO: possible irq lock inversion dependency detected ]\n"); print_kernel_ident(); printk("---------------------------------------------------------\n"); printk("%s/%d just changed the state of lock:\n", curr->comm, task_pid_nr(curr)); print_lock(this); if (forwards) printk("but this lock took another, %s-unsafe lock in the past:\n", irqclass); else printk("but this lock was taken by another, %s-safe lock in the past:\n", irqclass); print_lock_name(other->class); printk("\n\nand interrupts could create inverse lock ordering between them.\n\n"); printk("\nother info that might help us debug this:\n"); /* Find a middle lock (if one exists) */ depth = get_lock_depth(other); do { if (depth == 0 && (entry != root)) { printk("lockdep:%s bad path found in chain graph\n", __func__); break; } middle = entry; entry = get_lock_parent(entry); depth--; } while (entry && entry != root && (depth >= 0)); if (forwards) print_irq_lock_scenario(root, other, middle ? middle->class : root->class, other->class); else print_irq_lock_scenario(other, root, middle ? middle->class : other->class, root->class); lockdep_print_held_locks(curr); printk("\nthe shortest dependencies between 2nd lock and 1st lock:\n"); if (!save_trace(&root->trace)) return 0; print_shortest_lock_dependencies(other, root); printk("\nstack backtrace:\n"); dump_stack(); return 0; } /* * Prove that in the forwards-direction subgraph starting at <this> * there is no lock matching <mask>: */ static int check_usage_forwards(struct task_struct *curr, struct held_lock *this, enum lock_usage_bit bit, const char *irqclass) { int ret; struct lock_list root; struct lock_list *uninitialized_var(target_entry); root.parent = NULL; root.class = hlock_class(this); ret = find_usage_forwards(&root, bit, &target_entry); if (ret < 0) return print_bfs_bug(ret); if (ret == 1) return ret; return print_irq_inversion_bug(curr, &root, target_entry, this, 1, irqclass); } /* * Prove that in the backwards-direction subgraph starting at <this> * there is no lock matching <mask>: */ static int check_usage_backwards(struct task_struct *curr, struct held_lock *this, enum lock_usage_bit bit, const char *irqclass) { int ret; struct lock_list root; struct lock_list *uninitialized_var(target_entry); root.parent = NULL; root.class = hlock_class(this); ret = find_usage_backwards(&root, bit, &target_entry); if (ret < 0) return print_bfs_bug(ret); if (ret == 1) return ret; return print_irq_inversion_bug(curr, &root, target_entry, this, 0, irqclass); } void print_irqtrace_events(struct task_struct *curr) { printk("irq event stamp: %u\n", curr->irq_events); printk("hardirqs last enabled at (%u): ", curr->hardirq_enable_event); print_ip_sym(curr->hardirq_enable_ip); printk("hardirqs last disabled at (%u): ", curr->hardirq_disable_event); print_ip_sym(curr->hardirq_disable_ip); printk("softirqs last enabled at (%u): ", curr->softirq_enable_event); print_ip_sym(curr->softirq_enable_ip); printk("softirqs last disabled at (%u): ", curr->softirq_disable_event); print_ip_sym(curr->softirq_disable_ip); } static int HARDIRQ_verbose(struct lock_class *class) { #if HARDIRQ_VERBOSE return class_filter(class); #endif return 0; } static int SOFTIRQ_verbose(struct lock_class *class) { #if SOFTIRQ_VERBOSE return class_filter(class); #endif return 0; } static int RECLAIM_FS_verbose(struct lock_class *class) { #if RECLAIM_VERBOSE return class_filter(class); #endif return 0; } #define STRICT_READ_CHECKS 1 static int (*state_verbose_f[])(struct lock_class *class) = { #define LOCKDEP_STATE(__STATE) \ __STATE##_verbose, #include "lockdep_states.h" #undef LOCKDEP_STATE }; static inline int state_verbose(enum lock_usage_bit bit, struct lock_class *class) { return state_verbose_f[bit >> 2](class); } typedef int (*check_usage_f)(struct task_struct *, struct held_lock *, enum lock_usage_bit bit, const char *name); static int mark_lock_irq(struct task_struct *curr, struct held_lock *this, enum lock_usage_bit new_bit) { int excl_bit = exclusive_bit(new_bit); int read = new_bit & 1; int dir = new_bit & 2; /* * mark USED_IN has to look forwards -- to ensure no dependency * has ENABLED state, which would allow recursion deadlocks. * * mark ENABLED has to look backwards -- to ensure no dependee * has USED_IN state, which, again, would allow recursion deadlocks. */ check_usage_f usage = dir ? check_usage_backwards : check_usage_forwards; /* * Validate that this particular lock does not have conflicting * usage states. */ if (!valid_state(curr, this, new_bit, excl_bit)) return 0; /* * Validate that the lock dependencies don't have conflicting usage * states. */ if ((!read || !dir || STRICT_READ_CHECKS) && !usage(curr, this, excl_bit, state_name(new_bit & ~1))) return 0; /* * Check for read in write conflicts */ if (!read) { if (!valid_state(curr, this, new_bit, excl_bit + 1)) return 0; if (STRICT_READ_CHECKS && !usage(curr, this, excl_bit + 1, state_name(new_bit + 1))) return 0; } if (state_verbose(new_bit, hlock_class(this))) return 2; return 1; } enum mark_type { #define LOCKDEP_STATE(__STATE) __STATE, #include "lockdep_states.h" #undef LOCKDEP_STATE }; /* * Mark all held locks with a usage bit: */ static int mark_held_locks(struct task_struct *curr, enum mark_type mark) { enum lock_usage_bit usage_bit; struct held_lock *hlock; int i; for (i = 0; i < curr->lockdep_depth; i++) { hlock = curr->held_locks + i; usage_bit = 2 + (mark << 2); /* ENABLED */ if (hlock->read) usage_bit += 1; /* READ */ BUG_ON(usage_bit >= LOCK_USAGE_STATES); if (!hlock->check) continue; if (!mark_lock(curr, hlock, usage_bit)) return 0; } return 1; } /* * Hardirqs will be enabled: */ static void __trace_hardirqs_on_caller(unsigned long ip) { struct task_struct *curr = current; /* we'll do an OFF -> ON transition: */ curr->hardirqs_enabled = 1; /* * We are going to turn hardirqs on, so set the * usage bit for all held locks: */ if (!mark_held_locks(curr, HARDIRQ)) return; /* * If we have softirqs enabled, then set the usage * bit for all held locks. (disabled hardirqs prevented * this bit from being set before) */ if (curr->softirqs_enabled) if (!mark_held_locks(curr, SOFTIRQ)) return; curr->hardirq_enable_ip = ip; curr->hardirq_enable_event = ++curr->irq_events; debug_atomic_inc(hardirqs_on_events); } __visible void trace_hardirqs_on_caller(unsigned long ip) { time_hardirqs_on(CALLER_ADDR0, ip); if (unlikely(!debug_locks || current->lockdep_recursion)) return; if (unlikely(current->hardirqs_enabled)) { /* * Neither irq nor preemption are disabled here * so this is racy by nature but losing one hit * in a stat is not a big deal. */ __debug_atomic_inc(redundant_hardirqs_on); return; } /* * We're enabling irqs and according to our state above irqs weren't * already enabled, yet we find the hardware thinks they are in fact * enabled.. someone messed up their IRQ state tracing. */ if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) return; /* * See the fine text that goes along with this variable definition. */ if (DEBUG_LOCKS_WARN_ON(unlikely(early_boot_irqs_disabled))) return; /* * Can't allow enabling interrupts while in an interrupt handler, * that's general bad form and such. Recursion, limited stack etc.. */ if (DEBUG_LOCKS_WARN_ON(current->hardirq_context)) return; current->lockdep_recursion = 1; __trace_hardirqs_on_caller(ip); current->lockdep_recursion = 0; } EXPORT_SYMBOL(trace_hardirqs_on_caller); void trace_hardirqs_on(void) { trace_hardirqs_on_caller(CALLER_ADDR0); } EXPORT_SYMBOL(trace_hardirqs_on); /* * Hardirqs were disabled: */ __visible void trace_hardirqs_off_caller(unsigned long ip) { struct task_struct *curr = current; time_hardirqs_off(CALLER_ADDR0, ip); if (unlikely(!debug_locks || current->lockdep_recursion)) return; /* * So we're supposed to get called after you mask local IRQs, but for * some reason the hardware doesn't quite think you did a proper job. */ if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) return; if (curr->hardirqs_enabled) { /* * We have done an ON -> OFF transition: */ curr->hardirqs_enabled = 0; curr->hardirq_disable_ip = ip; curr->hardirq_disable_event = ++curr->irq_events; debug_atomic_inc(hardirqs_off_events); } else debug_atomic_inc(redundant_hardirqs_off); } EXPORT_SYMBOL(trace_hardirqs_off_caller); void trace_hardirqs_off(void) { trace_hardirqs_off_caller(CALLER_ADDR0); } EXPORT_SYMBOL(trace_hardirqs_off); /* * Softirqs will be enabled: */ void trace_softirqs_on(unsigned long ip) { struct task_struct *curr = current; if (unlikely(!debug_locks || current->lockdep_recursion)) return; /* * We fancy IRQs being disabled here, see softirq.c, avoids * funny state and nesting things. */ if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) return; if (curr->softirqs_enabled) { debug_atomic_inc(redundant_softirqs_on); return; } current->lockdep_recursion = 1; /* * We'll do an OFF -> ON transition: */ curr->softirqs_enabled = 1; curr->softirq_enable_ip = ip; curr->softirq_enable_event = ++curr->irq_events; debug_atomic_inc(softirqs_on_events); /* * We are going to turn softirqs on, so set the * usage bit for all held locks, if hardirqs are * enabled too: */ if (curr->hardirqs_enabled) mark_held_locks(curr, SOFTIRQ); current->lockdep_recursion = 0; } /* * Softirqs were disabled: */ void trace_softirqs_off(unsigned long ip) { struct task_struct *curr = current; if (unlikely(!debug_locks || current->lockdep_recursion)) return; /* * We fancy IRQs being disabled here, see softirq.c */ if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) return; if (curr->softirqs_enabled) { /* * We have done an ON -> OFF transition: */ curr->softirqs_enabled = 0; curr->softirq_disable_ip = ip; curr->softirq_disable_event = ++curr->irq_events; debug_atomic_inc(softirqs_off_events); /* * Whoops, we wanted softirqs off, so why aren't they? */ DEBUG_LOCKS_WARN_ON(!softirq_count()); } else debug_atomic_inc(redundant_softirqs_off); } static void __lockdep_trace_alloc(gfp_t gfp_mask, unsigned long flags) { struct task_struct *curr = current; if (unlikely(!debug_locks)) return; /* no reclaim without waiting on it */ if (!(gfp_mask & __GFP_WAIT)) return; /* this guy won't enter reclaim */ if ((curr->flags & PF_MEMALLOC) && !(gfp_mask & __GFP_NOMEMALLOC)) return; /* We're only interested __GFP_FS allocations for now */ if (!(gfp_mask & __GFP_FS)) return; /* * Oi! Can't be having __GFP_FS allocations with IRQs disabled. */ if (DEBUG_LOCKS_WARN_ON(irqs_disabled_flags(flags))) return; mark_held_locks(curr, RECLAIM_FS); } static void check_flags(unsigned long flags); void lockdep_trace_alloc(gfp_t gfp_mask) { unsigned long flags; if (unlikely(current->lockdep_recursion)) return; raw_local_irq_save(flags); check_flags(flags); current->lockdep_recursion = 1; __lockdep_trace_alloc(gfp_mask, flags); current->lockdep_recursion = 0; raw_local_irq_restore(flags); } static int mark_irqflags(struct task_struct *curr, struct held_lock *hlock) { /* * If non-trylock use in a hardirq or softirq context, then * mark the lock as used in these contexts: */ if (!hlock->trylock) { if (hlock->read) { if (curr->hardirq_context) if (!mark_lock(curr, hlock, LOCK_USED_IN_HARDIRQ_READ)) return 0; if (curr->softirq_context) if (!mark_lock(curr, hlock, LOCK_USED_IN_SOFTIRQ_READ)) return 0; } else { if (curr->hardirq_context) if (!mark_lock(curr, hlock, LOCK_USED_IN_HARDIRQ)) return 0; if (curr->softirq_context) if (!mark_lock(curr, hlock, LOCK_USED_IN_SOFTIRQ)) return 0; } } if (!hlock->hardirqs_off) { if (hlock->read) { if (!mark_lock(curr, hlock, LOCK_ENABLED_HARDIRQ_READ)) return 0; if (curr->softirqs_enabled) if (!mark_lock(curr, hlock, LOCK_ENABLED_SOFTIRQ_READ)) return 0; } else { if (!mark_lock(curr, hlock, LOCK_ENABLED_HARDIRQ)) return 0; if (curr->softirqs_enabled) if (!mark_lock(curr, hlock, LOCK_ENABLED_SOFTIRQ)) return 0; } } /* * We reuse the irq context infrastructure more broadly as a general * context checking code. This tests GFP_FS recursion (a lock taken * during reclaim for a GFP_FS allocation is held over a GFP_FS * allocation). */ if (!hlock->trylock && (curr->lockdep_reclaim_gfp & __GFP_FS)) { if (hlock->read) { if (!mark_lock(curr, hlock, LOCK_USED_IN_RECLAIM_FS_READ)) return 0; } else { if (!mark_lock(curr, hlock, LOCK_USED_IN_RECLAIM_FS)) return 0; } } return 1; } static int separate_irq_context(struct task_struct *curr, struct held_lock *hlock) { unsigned int depth = curr->lockdep_depth; /* * Keep track of points where we cross into an interrupt context: */ hlock->irq_context = 2*(curr->hardirq_context ? 1 : 0) + curr->softirq_context; if (depth) { struct held_lock *prev_hlock; prev_hlock = curr->held_locks + depth-1; /* * If we cross into another context, reset the * hash key (this also prevents the checking and the * adding of the dependency to 'prev'): */ if (prev_hlock->irq_context != hlock->irq_context) return 1; } return 0; } #else /* defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) */ static inline int mark_lock_irq(struct task_struct *curr, struct held_lock *this, enum lock_usage_bit new_bit) { WARN_ON(1); /* Impossible innit? when we don't have TRACE_IRQFLAG */ return 1; } static inline int mark_irqflags(struct task_struct *curr, struct held_lock *hlock) { return 1; } static inline int separate_irq_context(struct task_struct *curr, struct held_lock *hlock) { return 0; } void lockdep_trace_alloc(gfp_t gfp_mask) { } #endif /* defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) */ /* * Mark a lock with a usage bit, and validate the state transition: */ static int mark_lock(struct task_struct *curr, struct held_lock *this, enum lock_usage_bit new_bit) { unsigned int new_mask = 1 << new_bit, ret = 1; /* * If already set then do not dirty the cacheline, * nor do any checks: */ if (likely(hlock_class(this)->usage_mask & new_mask)) return 1; if (!graph_lock()) return 0; /* * Make sure we didn't race: */ if (unlikely(hlock_class(this)->usage_mask & new_mask)) { graph_unlock(); return 1; } hlock_class(this)->usage_mask |= new_mask; if (!save_trace(hlock_class(this)->usage_traces + new_bit)) return 0; switch (new_bit) { #define LOCKDEP_STATE(__STATE) \ case LOCK_USED_IN_##__STATE: \ case LOCK_USED_IN_##__STATE##_READ: \ case LOCK_ENABLED_##__STATE: \ case LOCK_ENABLED_##__STATE##_READ: #include "lockdep_states.h" #undef LOCKDEP_STATE ret = mark_lock_irq(curr, this, new_bit); if (!ret) return 0; break; case LOCK_USED: debug_atomic_dec(nr_unused_locks); break; default: if (!debug_locks_off_graph_unlock()) return 0; WARN_ON(1); return 0; } graph_unlock(); /* * We must printk outside of the graph_lock: */ if (ret == 2) { printk("\nmarked lock as {%s}:\n", usage_str[new_bit]); print_lock(this); print_irqtrace_events(curr); dump_stack(); } return ret; } /* * Initialize a lock instance's lock-class mapping info: */ void lockdep_init_map(struct lockdep_map *lock, const char *name, struct lock_class_key *key, int subclass) { int i; kmemcheck_mark_initialized(lock, sizeof(*lock)); for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++) lock->class_cache[i] = NULL; #ifdef CONFIG_LOCK_STAT lock->cpu = raw_smp_processor_id(); #endif /* * Can't be having no nameless bastards around this place! */ if (DEBUG_LOCKS_WARN_ON(!name)) { lock->name = "NULL"; return; } lock->name = name; /* * No key, no joy, we need to hash something. */ if (DEBUG_LOCKS_WARN_ON(!key)) return; /* * Sanity check, the lock-class key must be persistent: */ if (!static_obj(key)) { printk("BUG: key %p not in .data!\n", key); /* * What it says above ^^^^^, I suggest you read it. */ DEBUG_LOCKS_WARN_ON(1); return; } lock->key = key; if (unlikely(!debug_locks)) return; if (subclass) register_lock_class(lock, subclass, 1); } EXPORT_SYMBOL_GPL(lockdep_init_map); struct lock_class_key __lockdep_no_validate__; EXPORT_SYMBOL_GPL(__lockdep_no_validate__); static int print_lock_nested_lock_not_held(struct task_struct *curr, struct held_lock *hlock, unsigned long ip) { if (!debug_locks_off()) return 0; if (debug_locks_silent) return 0; printk("\n"); printk("==================================\n"); printk("[ BUG: Nested lock was not taken ]\n"); print_kernel_ident(); printk("----------------------------------\n"); printk("%s/%d is trying to lock:\n", curr->comm, task_pid_nr(curr)); print_lock(hlock); printk("\nbut this task is not holding:\n"); printk("%s\n", hlock->nest_lock->name); printk("\nstack backtrace:\n"); dump_stack(); printk("\nother info that might help us debug this:\n"); lockdep_print_held_locks(curr); printk("\nstack backtrace:\n"); dump_stack(); return 0; } static int __lock_is_held(struct lockdep_map *lock); /* * This gets called for every mutex_lock*()/spin_lock*() operation. * We maintain the dependency maps and validate the locking attempt: */ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass, int trylock, int read, int check, int hardirqs_off, struct lockdep_map *nest_lock, unsigned long ip, int references) { struct task_struct *curr = current; struct lock_class *class = NULL; struct held_lock *hlock; unsigned int depth, id; int chain_head = 0; int class_idx; u64 chain_key; if (unlikely(!debug_locks)) return 0; /* * Lockdep should run with IRQs disabled, otherwise we could * get an interrupt which would want to take locks, which would * end up in lockdep and have you got a head-ache already? */ if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) return 0; if (!prove_locking || lock->key == &__lockdep_no_validate__) check = 0; if (subclass < NR_LOCKDEP_CACHING_CLASSES) class = lock->class_cache[subclass]; /* * Not cached? */ if (unlikely(!class)) { class = register_lock_class(lock, subclass, 0); if (!class) return 0; } atomic_inc((atomic_t *)&class->ops); if (very_verbose(class)) { printk("\nacquire class [%p] %s", class->key, class->name); if (class->name_version > 1) printk("#%d", class->name_version); printk("\n"); dump_stack(); } /* * Add the lock to the list of currently held locks. * (we dont increase the depth just yet, up until the * dependency checks are done) */ depth = curr->lockdep_depth; /* * Ran out of static storage for our per-task lock stack again have we? */ if (DEBUG_LOCKS_WARN_ON(depth >= MAX_LOCK_DEPTH)) return 0; class_idx = class - lock_classes + 1; if (depth) { hlock = curr->held_locks + depth - 1; if (hlock->class_idx == class_idx && nest_lock) { if (hlock->references) hlock->references++; else hlock->references = 2; return 1; } } hlock = curr->held_locks + depth; /* * Plain impossible, we just registered it and checked it weren't no * NULL like.. I bet this mushroom I ate was good! */ if (DEBUG_LOCKS_WARN_ON(!class)) return 0; hlock->class_idx = class_idx; hlock->acquire_ip = ip; hlock->instance = lock; hlock->nest_lock = nest_lock; hlock->trylock = trylock; hlock->read = read; hlock->check = check; hlock->hardirqs_off = !!hardirqs_off; hlock->references = references; #ifdef CONFIG_LOCK_STAT hlock->waittime_stamp = 0; hlock->holdtime_stamp = lockstat_clock(); #endif if (check && !mark_irqflags(curr, hlock)) return 0; /* mark it as used: */ if (!mark_lock(curr, hlock, LOCK_USED)) return 0; /* * Calculate the chain hash: it's the combined hash of all the * lock keys along the dependency chain. We save the hash value * at every step so that we can get the current hash easily * after unlock. The chain hash is then used to cache dependency * results. * * The 'key ID' is what is the most compact key value to drive * the hash, not class->key. */ id = class - lock_classes; /* * Whoops, we did it again.. ran straight out of our static allocation. */ if (DEBUG_LOCKS_WARN_ON(id >= MAX_LOCKDEP_KEYS)) return 0; chain_key = curr->curr_chain_key; if (!depth) { /* * How can we have a chain hash when we ain't got no keys?! */ if (DEBUG_LOCKS_WARN_ON(chain_key != 0)) return 0; chain_head = 1; } hlock->prev_chain_key = chain_key; if (separate_irq_context(curr, hlock)) { chain_key = 0; chain_head = 1; } chain_key = iterate_chain_key(chain_key, id); if (nest_lock && !__lock_is_held(nest_lock)) return print_lock_nested_lock_not_held(curr, hlock, ip); if (!validate_chain(curr, lock, hlock, chain_head, chain_key)) return 0; curr->curr_chain_key = chain_key; curr->lockdep_depth++; check_chain_key(curr); #ifdef CONFIG_DEBUG_LOCKDEP if (unlikely(!debug_locks)) return 0; #endif if (unlikely(curr->lockdep_depth >= MAX_LOCK_DEPTH)) { debug_locks_off(); print_lockdep_off("BUG: MAX_LOCK_DEPTH too low!"); printk(KERN_DEBUG "depth: %i max: %lu!\n", curr->lockdep_depth, MAX_LOCK_DEPTH); lockdep_print_held_locks(current); debug_show_all_locks(); dump_stack(); return 0; } if (unlikely(curr->lockdep_depth > max_lockdep_depth)) max_lockdep_depth = curr->lockdep_depth; return 1; } static int print_unlock_imbalance_bug(struct task_struct *curr, struct lockdep_map *lock, unsigned long ip) { if (!debug_locks_off()) return 0; if (debug_locks_silent) return 0; printk("\n"); printk("=====================================\n"); printk("[ BUG: bad unlock balance detected! ]\n"); print_kernel_ident(); printk("-------------------------------------\n"); printk("%s/%d is trying to release lock (", curr->comm, task_pid_nr(curr)); print_lockdep_cache(lock); printk(") at:\n"); print_ip_sym(ip); printk("but there are no more locks to release!\n"); printk("\nother info that might help us debug this:\n"); lockdep_print_held_locks(curr); printk("\nstack backtrace:\n"); dump_stack(); return 0; } /* * Common debugging checks for both nested and non-nested unlock: */ static int check_unlock(struct task_struct *curr, struct lockdep_map *lock, unsigned long ip) { if (unlikely(!debug_locks)) return 0; /* * Lockdep should run with IRQs disabled, recursion, head-ache, etc.. */ if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) return 0; if (curr->lockdep_depth <= 0) return print_unlock_imbalance_bug(curr, lock, ip); return 1; } static int match_held_lock(struct held_lock *hlock, struct lockdep_map *lock) { if (hlock->instance == lock) return 1; if (hlock->references) { struct lock_class *class = lock->class_cache[0]; if (!class) class = look_up_lock_class(lock, 0); /* * If look_up_lock_class() failed to find a class, we're trying * to test if we hold a lock that has never yet been acquired. * Clearly if the lock hasn't been acquired _ever_, we're not * holding it either, so report failure. */ if (!class) return 0; /* * References, but not a lock we're actually ref-counting? * State got messed up, follow the sites that change ->references * and try to make sense of it. */ if (DEBUG_LOCKS_WARN_ON(!hlock->nest_lock)) return 0; if (hlock->class_idx == class - lock_classes + 1) return 1; } return 0; } static int __lock_set_class(struct lockdep_map *lock, const char *name, struct lock_class_key *key, unsigned int subclass, unsigned long ip) { struct task_struct *curr = current; struct held_lock *hlock, *prev_hlock; struct lock_class *class; unsigned int depth; int i; depth = curr->lockdep_depth; /* * This function is about (re)setting the class of a held lock, * yet we're not actually holding any locks. Naughty user! */ if (DEBUG_LOCKS_WARN_ON(!depth)) return 0; prev_hlock = NULL; for (i = depth-1; i >= 0; i--) { hlock = curr->held_locks + i; /* * We must not cross into another context: */ if (prev_hlock && prev_hlock->irq_context != hlock->irq_context) break; if (match_held_lock(hlock, lock)) goto found_it; prev_hlock = hlock; } return print_unlock_imbalance_bug(curr, lock, ip); found_it: lockdep_init_map(lock, name, key, 0); class = register_lock_class(lock, subclass, 0); hlock->class_idx = class - lock_classes + 1; curr->lockdep_depth = i; curr->curr_chain_key = hlock->prev_chain_key; for (; i < depth; i++) { hlock = curr->held_locks + i; if (!__lock_acquire(hlock->instance, hlock_class(hlock)->subclass, hlock->trylock, hlock->read, hlock->check, hlock->hardirqs_off, hlock->nest_lock, hlock->acquire_ip, hlock->references)) return 0; } /* * I took it apart and put it back together again, except now I have * these 'spare' parts.. where shall I put them. */ if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth)) return 0; return 1; } /* * Remove the lock to the list of currently held locks in a * potentially non-nested (out of order) manner. This is a * relatively rare operation, as all the unlock APIs default * to nested mode (which uses lock_release()): */ static int lock_release_non_nested(struct task_struct *curr, struct lockdep_map *lock, unsigned long ip) { struct held_lock *hlock, *prev_hlock; unsigned int depth; int i; /* * Check whether the lock exists in the current stack * of held locks: */ depth = curr->lockdep_depth; /* * So we're all set to release this lock.. wait what lock? We don't * own any locks, you've been drinking again? */ if (DEBUG_LOCKS_WARN_ON(!depth)) return 0; prev_hlock = NULL; for (i = depth-1; i >= 0; i--) { hlock = curr->held_locks + i; /* * We must not cross into another context: */ if (prev_hlock && prev_hlock->irq_context != hlock->irq_context) break; if (match_held_lock(hlock, lock)) goto found_it; prev_hlock = hlock; } return print_unlock_imbalance_bug(curr, lock, ip); found_it: if (hlock->instance == lock) lock_release_holdtime(hlock); if (hlock->references) { hlock->references--; if (hlock->references) { /* * We had, and after removing one, still have * references, the current lock stack is still * valid. We're done! */ return 1; } } /* * We have the right lock to unlock, 'hlock' points to it. * Now we remove it from the stack, and add back the other * entries (if any), recalculating the hash along the way: */ curr->lockdep_depth = i; curr->curr_chain_key = hlock->prev_chain_key; for (i++; i < depth; i++) { hlock = curr->held_locks + i; if (!__lock_acquire(hlock->instance, hlock_class(hlock)->subclass, hlock->trylock, hlock->read, hlock->check, hlock->hardirqs_off, hlock->nest_lock, hlock->acquire_ip, hlock->references)) return 0; } /* * We had N bottles of beer on the wall, we drank one, but now * there's not N-1 bottles of beer left on the wall... */ if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth - 1)) return 0; return 1; } /* * Remove the lock to the list of currently held locks - this gets * called on mutex_unlock()/spin_unlock*() (or on a failed * mutex_lock_interruptible()). This is done for unlocks that nest * perfectly. (i.e. the current top of the lock-stack is unlocked) */ static int lock_release_nested(struct task_struct *curr, struct lockdep_map *lock, unsigned long ip) { struct held_lock *hlock; unsigned int depth; /* * Pop off the top of the lock stack: */ depth = curr->lockdep_depth - 1; hlock = curr->held_locks + depth; /* * Is the unlock non-nested: */ if (hlock->instance != lock || hlock->references) return lock_release_non_nested(curr, lock, ip); curr->lockdep_depth--; /* * No more locks, but somehow we've got hash left over, who left it? */ if (DEBUG_LOCKS_WARN_ON(!depth && (hlock->prev_chain_key != 0))) return 0; curr->curr_chain_key = hlock->prev_chain_key; lock_release_holdtime(hlock); #ifdef CONFIG_DEBUG_LOCKDEP hlock->prev_chain_key = 0; hlock->class_idx = 0; hlock->acquire_ip = 0; hlock->irq_context = 0; #endif return 1; } /* * Remove the lock to the list of currently held locks - this gets * called on mutex_unlock()/spin_unlock*() (or on a failed * mutex_lock_interruptible()). This is done for unlocks that nest * perfectly. (i.e. the current top of the lock-stack is unlocked) */ static void __lock_release(struct lockdep_map *lock, int nested, unsigned long ip) { struct task_struct *curr = current; if (!check_unlock(curr, lock, ip)) return; if (nested) { if (!lock_release_nested(curr, lock, ip)) return; } else { if (!lock_release_non_nested(curr, lock, ip)) return; } check_chain_key(curr); } static int __lock_is_held(struct lockdep_map *lock) { struct task_struct *curr = current; int i; for (i = 0; i < curr->lockdep_depth; i++) { struct held_lock *hlock = curr->held_locks + i; if (match_held_lock(hlock, lock)) return 1; } return 0; } /* * Check whether we follow the irq-flags state precisely: */ static void check_flags(unsigned long flags) { #if defined(CONFIG_PROVE_LOCKING) && defined(CONFIG_DEBUG_LOCKDEP) && \ defined(CONFIG_TRACE_IRQFLAGS) if (!debug_locks) return; if (irqs_disabled_flags(flags)) { if (DEBUG_LOCKS_WARN_ON(current->hardirqs_enabled)) { printk("possible reason: unannotated irqs-off.\n"); } } else { if (DEBUG_LOCKS_WARN_ON(!current->hardirqs_enabled)) { printk("possible reason: unannotated irqs-on.\n"); } } /* * We dont accurately track softirq state in e.g. * hardirq contexts (such as on 4KSTACKS), so only * check if not in hardirq contexts: */ if (!hardirq_count()) { if (softirq_count()) { /* like the above, but with softirqs */ DEBUG_LOCKS_WARN_ON(current->softirqs_enabled); } else { /* lick the above, does it taste good? */ DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled); } } if (!debug_locks) print_irqtrace_events(current); #endif } void lock_set_class(struct lockdep_map *lock, const char *name, struct lock_class_key *key, unsigned int subclass, unsigned long ip) { unsigned long flags; if (unlikely(current->lockdep_recursion)) return; raw_local_irq_save(flags); current->lockdep_recursion = 1; check_flags(flags); if (__lock_set_class(lock, name, key, subclass, ip)) check_chain_key(current); current->lockdep_recursion = 0; raw_local_irq_restore(flags); } EXPORT_SYMBOL_GPL(lock_set_class); /* * We are not always called with irqs disabled - do that here, * and also avoid lockdep recursion: */ void lock_acquire(struct lockdep_map *lock, unsigned int subclass, int trylock, int read, int check, struct lockdep_map *nest_lock, unsigned long ip) { unsigned long flags; if (unlikely(current->lockdep_recursion)) return; raw_local_irq_save(flags); check_flags(flags); current->lockdep_recursion = 1; trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip); __lock_acquire(lock, subclass, trylock, read, check, irqs_disabled_flags(flags), nest_lock, ip, 0); current->lockdep_recursion = 0; raw_local_irq_restore(flags); } EXPORT_SYMBOL_GPL(lock_acquire); void lock_release(struct lockdep_map *lock, int nested, unsigned long ip) { unsigned long flags; if (unlikely(current->lockdep_recursion)) return; raw_local_irq_save(flags); check_flags(flags); current->lockdep_recursion = 1; trace_lock_release(lock, ip); __lock_release(lock, nested, ip); current->lockdep_recursion = 0; raw_local_irq_restore(flags); } EXPORT_SYMBOL_GPL(lock_release); int lock_is_held(struct lockdep_map *lock) { unsigned long flags; int ret = 0; if (unlikely(current->lockdep_recursion)) return 1; /* avoid false negative lockdep_assert_held() */ raw_local_irq_save(flags); check_flags(flags); current->lockdep_recursion = 1; ret = __lock_is_held(lock); current->lockdep_recursion = 0; raw_local_irq_restore(flags); return ret; } EXPORT_SYMBOL_GPL(lock_is_held); void lockdep_set_current_reclaim_state(gfp_t gfp_mask) { current->lockdep_reclaim_gfp = gfp_mask; } void lockdep_clear_current_reclaim_state(void) { current->lockdep_reclaim_gfp = 0; } #ifdef CONFIG_LOCK_STAT static int print_lock_contention_bug(struct task_struct *curr, struct lockdep_map *lock, unsigned long ip) { if (!debug_locks_off()) return 0; if (debug_locks_silent) return 0; printk("\n"); printk("=================================\n"); printk("[ BUG: bad contention detected! ]\n"); print_kernel_ident(); printk("---------------------------------\n"); printk("%s/%d is trying to contend lock (", curr->comm, task_pid_nr(curr)); print_lockdep_cache(lock); printk(") at:\n"); print_ip_sym(ip); printk("but there are no locks held!\n"); printk("\nother info that might help us debug this:\n"); lockdep_print_held_locks(curr); printk("\nstack backtrace:\n"); dump_stack(); return 0; } static void __lock_contended(struct lockdep_map *lock, unsigned long ip) { struct task_struct *curr = current; struct held_lock *hlock, *prev_hlock; struct lock_class_stats *stats; unsigned int depth; int i, contention_point, contending_point; depth = curr->lockdep_depth; /* * Whee, we contended on this lock, except it seems we're not * actually trying to acquire anything much at all.. */ if (DEBUG_LOCKS_WARN_ON(!depth)) return; prev_hlock = NULL; for (i = depth-1; i >= 0; i--) { hlock = curr->held_locks + i; /* * We must not cross into another context: */ if (prev_hlock && prev_hlock->irq_context != hlock->irq_context) break; if (match_held_lock(hlock, lock)) goto found_it; prev_hlock = hlock; } print_lock_contention_bug(curr, lock, ip); return; found_it: if (hlock->instance != lock) return; hlock->waittime_stamp = lockstat_clock(); contention_point = lock_point(hlock_class(hlock)->contention_point, ip); contending_point = lock_point(hlock_class(hlock)->contending_point, lock->ip); stats = get_lock_stats(hlock_class(hlock)); if (contention_point < LOCKSTAT_POINTS) stats->contention_point[contention_point]++; if (contending_point < LOCKSTAT_POINTS) stats->contending_point[contending_point]++; if (lock->cpu != smp_processor_id()) stats->bounces[bounce_contended + !!hlock->read]++; put_lock_stats(stats); } static void __lock_acquired(struct lockdep_map *lock, unsigned long ip) { struct task_struct *curr = current; struct held_lock *hlock, *prev_hlock; struct lock_class_stats *stats; unsigned int depth; u64 now, waittime = 0; int i, cpu; depth = curr->lockdep_depth; /* * Yay, we acquired ownership of this lock we didn't try to * acquire, how the heck did that happen? */ if (DEBUG_LOCKS_WARN_ON(!depth)) return; prev_hlock = NULL; for (i = depth-1; i >= 0; i--) { hlock = curr->held_locks + i; /* * We must not cross into another context: */ if (prev_hlock && prev_hlock->irq_context != hlock->irq_context) break; if (match_held_lock(hlock, lock)) goto found_it; prev_hlock = hlock; } print_lock_contention_bug(curr, lock, _RET_IP_); return; found_it: if (hlock->instance != lock) return; cpu = smp_processor_id(); if (hlock->waittime_stamp) { now = lockstat_clock(); waittime = now - hlock->waittime_stamp; hlock->holdtime_stamp = now; } trace_lock_acquired(lock, ip); stats = get_lock_stats(hlock_class(hlock)); if (waittime) { if (hlock->read) lock_time_inc(&stats->read_waittime, waittime); else lock_time_inc(&stats->write_waittime, waittime); } if (lock->cpu != cpu) stats->bounces[bounce_acquired + !!hlock->read]++; put_lock_stats(stats); lock->cpu = cpu; lock->ip = ip; } void lock_contended(struct lockdep_map *lock, unsigned long ip) { unsigned long flags; if (unlikely(!lock_stat)) return; if (unlikely(current->lockdep_recursion)) return; raw_local_irq_save(flags); check_flags(flags); current->lockdep_recursion = 1; trace_lock_contended(lock, ip); __lock_contended(lock, ip); current->lockdep_recursion = 0; raw_local_irq_restore(flags); } EXPORT_SYMBOL_GPL(lock_contended); void lock_acquired(struct lockdep_map *lock, unsigned long ip) { unsigned long flags; if (unlikely(!lock_stat)) return; if (unlikely(current->lockdep_recursion)) return; raw_local_irq_save(flags); check_flags(flags); current->lockdep_recursion = 1; __lock_acquired(lock, ip); current->lockdep_recursion = 0; raw_local_irq_restore(flags); } EXPORT_SYMBOL_GPL(lock_acquired); #endif /* * Used by the testsuite, sanitize the validator state * after a simulated failure: */ void lockdep_reset(void) { unsigned long flags; int i; raw_local_irq_save(flags); current->curr_chain_key = 0; current->lockdep_depth = 0; current->lockdep_recursion = 0; memset(current->held_locks, 0, MAX_LOCK_DEPTH*sizeof(struct held_lock)); nr_hardirq_chains = 0; nr_softirq_chains = 0; nr_process_chains = 0; debug_locks = 1; for (i = 0; i < CHAINHASH_SIZE; i++) INIT_LIST_HEAD(chainhash_table + i); raw_local_irq_restore(flags); } static void zap_class(struct lock_class *class) { int i; /* * Remove all dependencies this lock is * involved in: */ for (i = 0; i < nr_list_entries; i++) { if (list_entries[i].class == class) list_del_rcu(&list_entries[i].entry); } /* * Unhash the class and remove it from the all_lock_classes list: */ list_del_rcu(&class->hash_entry); list_del_rcu(&class->lock_entry); class->key = NULL; } static inline int within(const void *addr, void *start, unsigned long size) { return addr >= start && addr < start + size; } void lockdep_free_key_range(void *start, unsigned long size) { struct lock_class *class, *next; struct list_head *head; unsigned long flags; int i; int locked; raw_local_irq_save(flags); locked = graph_lock(); /* * Unhash all classes that were created by this module: */ for (i = 0; i < CLASSHASH_SIZE; i++) { head = classhash_table + i; if (list_empty(head)) continue; list_for_each_entry_safe(class, next, head, hash_entry) { if (within(class->key, start, size)) zap_class(class); else if (within(class->name, start, size)) zap_class(class); } } if (locked) graph_unlock(); raw_local_irq_restore(flags); } void lockdep_reset_lock(struct lockdep_map *lock) { struct lock_class *class, *next; struct list_head *head; unsigned long flags; int i, j; int locked; raw_local_irq_save(flags); /* * Remove all classes this lock might have: */ for (j = 0; j < MAX_LOCKDEP_SUBCLASSES; j++) { /* * If the class exists we look it up and zap it: */ class = look_up_lock_class(lock, j); if (class) zap_class(class); } /* * Debug check: in the end all mapped classes should * be gone. */ locked = graph_lock(); for (i = 0; i < CLASSHASH_SIZE; i++) { head = classhash_table + i; if (list_empty(head)) continue; list_for_each_entry_safe(class, next, head, hash_entry) { int match = 0; for (j = 0; j < NR_LOCKDEP_CACHING_CLASSES; j++) match |= class == lock->class_cache[j]; if (unlikely(match)) { if (debug_locks_off_graph_unlock()) { /* * We all just reset everything, how did it match? */ WARN_ON(1); } goto out_restore; } } } if (locked) graph_unlock(); out_restore: raw_local_irq_restore(flags); } void lockdep_init(void) { int i; /* * Some architectures have their own start_kernel() * code which calls lockdep_init(), while we also * call lockdep_init() from the start_kernel() itself, * and we want to initialize the hashes only once: */ if (lockdep_initialized) return; for (i = 0; i < CLASSHASH_SIZE; i++) INIT_LIST_HEAD(classhash_table + i); for (i = 0; i < CHAINHASH_SIZE; i++) INIT_LIST_HEAD(chainhash_table + i); lockdep_initialized = 1; } void __init lockdep_info(void) { printk("Lock dependency validator: Copyright (c) 2006 Red Hat, Inc., Ingo Molnar\n"); printk("... MAX_LOCKDEP_SUBCLASSES: %lu\n", MAX_LOCKDEP_SUBCLASSES); printk("... MAX_LOCK_DEPTH: %lu\n", MAX_LOCK_DEPTH); printk("... MAX_LOCKDEP_KEYS: %lu\n", MAX_LOCKDEP_KEYS); printk("... CLASSHASH_SIZE: %lu\n", CLASSHASH_SIZE); printk("... MAX_LOCKDEP_ENTRIES: %lu\n", MAX_LOCKDEP_ENTRIES); printk("... MAX_LOCKDEP_CHAINS: %lu\n", MAX_LOCKDEP_CHAINS); printk("... CHAINHASH_SIZE: %lu\n", CHAINHASH_SIZE); printk(" memory used by lock dependency info: %lu kB\n", (sizeof(struct lock_class) * MAX_LOCKDEP_KEYS + sizeof(struct list_head) * CLASSHASH_SIZE + sizeof(struct lock_list) * MAX_LOCKDEP_ENTRIES + sizeof(struct lock_chain) * MAX_LOCKDEP_CHAINS + sizeof(struct list_head) * CHAINHASH_SIZE #ifdef CONFIG_PROVE_LOCKING + sizeof(struct circular_queue) #endif ) / 1024 ); printk(" per task-struct memory footprint: %lu bytes\n", sizeof(struct held_lock) * MAX_LOCK_DEPTH); #ifdef CONFIG_DEBUG_LOCKDEP if (lockdep_init_error) { printk("WARNING: lockdep init error! lock-%s was acquired" "before lockdep_init\n", lock_init_error); printk("Call stack leading to lockdep invocation was:\n"); print_stack_trace(&lockdep_init_trace, 0); } #endif } static void print_freed_lock_bug(struct task_struct *curr, const void *mem_from, const void *mem_to, struct held_lock *hlock) { if (!debug_locks_off()) return; if (debug_locks_silent) return; printk("\n"); printk("=========================\n"); printk("[ BUG: held lock freed! ]\n"); print_kernel_ident(); printk("-------------------------\n"); printk("%s/%d is freeing memory %p-%p, with a lock still held there!\n", curr->comm, task_pid_nr(curr), mem_from, mem_to-1); print_lock(hlock); lockdep_print_held_locks(curr); printk("\nstack backtrace:\n"); dump_stack(); } static inline int not_in_range(const void* mem_from, unsigned long mem_len, const void* lock_from, unsigned long lock_len) { return lock_from + lock_len <= mem_from || mem_from + mem_len <= lock_from; } /* * Called when kernel memory is freed (or unmapped), or if a lock * is destroyed or reinitialized - this code checks whether there is * any held lock in the memory range of <from> to <to>: */ void debug_check_no_locks_freed(const void *mem_from, unsigned long mem_len) { struct task_struct *curr = current; struct held_lock *hlock; unsigned long flags; int i; if (unlikely(!debug_locks)) return; local_irq_save(flags); for (i = 0; i < curr->lockdep_depth; i++) { hlock = curr->held_locks + i; if (not_in_range(mem_from, mem_len, hlock->instance, sizeof(*hlock->instance))) continue; print_freed_lock_bug(curr, mem_from, mem_from + mem_len, hlock); break; } local_irq_restore(flags); } EXPORT_SYMBOL_GPL(debug_check_no_locks_freed); static void print_held_locks_bug(void) { if (!debug_locks_off()) return; if (debug_locks_silent) return; printk("\n"); printk("=====================================\n"); printk("[ BUG: %s/%d still has locks held! ]\n", current->comm, task_pid_nr(current)); print_kernel_ident(); printk("-------------------------------------\n"); lockdep_print_held_locks(current); printk("\nstack backtrace:\n"); dump_stack(); } void debug_check_no_locks_held(void) { if (unlikely(current->lockdep_depth > 0)) print_held_locks_bug(); } EXPORT_SYMBOL_GPL(debug_check_no_locks_held); #ifdef __KERNEL__ void debug_show_all_locks(void) { struct task_struct *g, *p; int count = 10; int unlock = 1; if (unlikely(!debug_locks)) { printk("INFO: lockdep is turned off.\n"); return; } printk("\nShowing all locks held in the system:\n"); /* * Here we try to get the tasklist_lock as hard as possible, * if not successful after 2 seconds we ignore it (but keep * trying). This is to enable a debug printout even if a * tasklist_lock-holding task deadlocks or crashes. */ retry: if (!read_trylock(&tasklist_lock)) { if (count == 10) printk("hm, tasklist_lock locked, retrying... "); if (count) { count--; printk(" #%d", 10-count); mdelay(200); goto retry; } printk(" ignoring it.\n"); unlock = 0; } else { if (count != 10) printk(KERN_CONT " locked it.\n"); } do_each_thread(g, p) { /* * It's not reliable to print a task's held locks * if it's not sleeping (or if it's not the current * task): */ if (p->state == TASK_RUNNING && p != current) continue; if (p->lockdep_depth) lockdep_print_held_locks(p); if (!unlock) if (read_trylock(&tasklist_lock)) unlock = 1; } while_each_thread(g, p); printk("\n"); printk("=============================================\n\n"); if (unlock) read_unlock(&tasklist_lock); } EXPORT_SYMBOL_GPL(debug_show_all_locks); #endif /* * Careful: only use this function if you are sure that * the task cannot run in parallel! */ void debug_show_held_locks(struct task_struct *task) { if (unlikely(!debug_locks)) { printk("INFO: lockdep is turned off.\n"); return; } lockdep_print_held_locks(task); } EXPORT_SYMBOL_GPL(debug_show_held_locks); asmlinkage __visible void lockdep_sys_exit(void) { struct task_struct *curr = current; if (unlikely(curr->lockdep_depth)) { if (!debug_locks_off()) return; printk("\n"); printk("================================================\n"); printk("[ BUG: lock held when returning to user space! ]\n"); print_kernel_ident(); printk("------------------------------------------------\n"); printk("%s/%d is leaving the kernel with locks still held!\n", curr->comm, curr->pid); lockdep_print_held_locks(curr); } } void lockdep_rcu_suspicious(const char *file, const int line, const char *s) { struct task_struct *curr = current; #ifndef CONFIG_PROVE_RCU_REPEATEDLY if (!debug_locks_off()) return; #endif /* #ifdef CONFIG_PROVE_RCU_REPEATEDLY */ /* Note: the following can be executed concurrently, so be careful. */ printk("\n"); printk("===============================\n"); printk("[ INFO: suspicious RCU usage. ]\n"); print_kernel_ident(); printk("-------------------------------\n"); printk("%s:%d %s!\n", file, line, s); printk("\nother info that might help us debug this:\n\n"); printk("\n%srcu_scheduler_active = %d, debug_locks = %d\n", !rcu_lockdep_current_cpu_online() ? "RCU used illegally from offline CPU!\n" : !rcu_is_watching() ? "RCU used illegally from idle CPU!\n" : "", rcu_scheduler_active, debug_locks); /* * If a CPU is in the RCU-free window in idle (ie: in the section * between rcu_idle_enter() and rcu_idle_exit(), then RCU * considers that CPU to be in an "extended quiescent state", * which means that RCU will be completely ignoring that CPU. * Therefore, rcu_read_lock() and friends have absolutely no * effect on a CPU running in that state. In other words, even if * such an RCU-idle CPU has called rcu_read_lock(), RCU might well * delete data structures out from under it. RCU really has no * choice here: we need to keep an RCU-free window in idle where * the CPU may possibly enter into low power mode. This way we can * notice an extended quiescent state to other CPUs that started a grace * period. Otherwise we would delay any grace period as long as we run * in the idle task. * * So complain bitterly if someone does call rcu_read_lock(), * rcu_read_lock_bh() and so on from extended quiescent states. */ if (!rcu_is_watching()) printk("RCU used illegally from extended quiescent state!\n"); lockdep_print_held_locks(curr); printk("\nstack backtrace:\n"); dump_stack(); } EXPORT_SYMBOL_GPL(lockdep_rcu_suspicious);
gpl-2.0
aospan/linux-netup-1.4
drivers/net/arcnet/arc-rawmode.c
498
4962
/* * Linux ARCnet driver - "raw mode" packet encapsulation (no soft headers) * * Written 1994-1999 by Avery Pennarun. * Derived from skeleton.c by Donald Becker. * * Special thanks to Contemporary Controls, Inc. (www.ccontrols.com) * for sponsoring the further development of this driver. * * ********************** * * The original copyright of skeleton.c was as follows: * * skeleton.c Written 1993 by Donald Becker. * Copyright 1993 United States Government as represented by the * Director, National Security Agency. This software may only be used * and distributed according to the terms of the GNU General Public License as * modified by SRC, incorporated herein by reference. * * ********************** * * For more details, see drivers/net/arcnet.c * * ********************** */ #define pr_fmt(fmt) "arcnet:" KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/gfp.h> #include <linux/init.h> #include <linux/if_arp.h> #include <net/arp.h> #include <linux/netdevice.h> #include <linux/skbuff.h> #include "arcdevice.h" /* packet receiver */ static void rx(struct net_device *dev, int bufnum, struct archdr *pkthdr, int length) { struct arcnet_local *lp = netdev_priv(dev); struct sk_buff *skb; struct archdr *pkt = pkthdr; int ofs; arc_printk(D_DURING, dev, "it's a raw packet (length=%d)\n", length); if (length > MTU) ofs = 512 - length; else ofs = 256 - length; skb = alloc_skb(length + ARC_HDR_SIZE, GFP_ATOMIC); if (!skb) { dev->stats.rx_dropped++; return; } skb_put(skb, length + ARC_HDR_SIZE); skb->dev = dev; pkt = (struct archdr *)skb->data; skb_reset_mac_header(skb); skb_pull(skb, ARC_HDR_SIZE); /* up to sizeof(pkt->soft) has already been copied from the card */ memcpy(pkt, pkthdr, sizeof(struct archdr)); if (length > sizeof(pkt->soft)) lp->hw.copy_from_card(dev, bufnum, ofs + sizeof(pkt->soft), pkt->soft.raw + sizeof(pkt->soft), length - sizeof(pkt->soft)); if (BUGLVL(D_SKB)) arcnet_dump_skb(dev, skb, "rx"); skb->protocol = cpu_to_be16(ETH_P_ARCNET); netif_rx(skb); } /* Create the ARCnet hard/soft headers for raw mode. * There aren't any soft headers in raw mode - not even the protocol id. */ static int build_header(struct sk_buff *skb, struct net_device *dev, unsigned short type, uint8_t daddr) { int hdr_size = ARC_HDR_SIZE; struct archdr *pkt = (struct archdr *)skb_push(skb, hdr_size); /* Set the source hardware address. * * This is pretty pointless for most purposes, but it can help in * debugging. ARCnet does not allow us to change the source address * in the actual packet sent. */ pkt->hard.source = *dev->dev_addr; /* see linux/net/ethernet/eth.c to see where I got the following */ if (dev->flags & (IFF_LOOPBACK | IFF_NOARP)) { /* FIXME: fill in the last byte of the dest ipaddr here * to better comply with RFC1051 in "noarp" mode. */ pkt->hard.dest = 0; return hdr_size; } /* otherwise, just fill it in and go! */ pkt->hard.dest = daddr; return hdr_size; /* success */ } static int prepare_tx(struct net_device *dev, struct archdr *pkt, int length, int bufnum) { struct arcnet_local *lp = netdev_priv(dev); struct arc_hardware *hard = &pkt->hard; int ofs; arc_printk(D_DURING, dev, "prepare_tx: txbufs=%d/%d/%d\n", lp->next_tx, lp->cur_tx, bufnum); /* hard header is not included in packet length */ length -= ARC_HDR_SIZE; if (length > XMTU) { /* should never happen! other people already check for this. */ arc_printk(D_NORMAL, dev, "Bug! prepare_tx with size %d (> %d)\n", length, XMTU); length = XMTU; } if (length >= MinTU) { hard->offset[0] = 0; hard->offset[1] = ofs = 512 - length; } else if (length > MTU) { hard->offset[0] = 0; hard->offset[1] = ofs = 512 - length - 3; } else { hard->offset[0] = ofs = 256 - length; } arc_printk(D_DURING, dev, "prepare_tx: length=%d ofs=%d\n", length, ofs); lp->hw.copy_to_card(dev, bufnum, 0, hard, ARC_HDR_SIZE); lp->hw.copy_to_card(dev, bufnum, ofs, &pkt->soft, length); lp->lastload_dest = hard->dest; return 1; /* done */ } static struct ArcProto rawmode_proto = { .suffix = 'r', .mtu = XMTU, .rx = rx, .build_header = build_header, .prepare_tx = prepare_tx, .continue_tx = NULL, .ack_tx = NULL }; static int __init arcnet_raw_init(void) { int count; pr_info("raw mode (`r') encapsulation support loaded\n"); for (count = 0; count < 256; count++) if (arc_proto_map[count] == arc_proto_default) arc_proto_map[count] = &rawmode_proto; /* for raw mode, we only set the bcast proto if there's no better one */ if (arc_bcast_proto == arc_proto_default) arc_bcast_proto = &rawmode_proto; arc_proto_default = &rawmode_proto; return 0; } static void __exit arcnet_raw_exit(void) { arcnet_unregister_proto(&rawmode_proto); } module_init(arcnet_raw_init); module_exit(arcnet_raw_exit); MODULE_LICENSE("GPL");
gpl-2.0
babybearsg/huawei_s7_kernel
net/irda/irlan/irlan_client.c
754
15225
/********************************************************************* * * Filename: irlan_client.c * Version: 0.9 * Description: IrDA LAN Access Protocol (IrLAN) Client * Status: Experimental. * Author: Dag Brattli <dagb@cs.uit.no> * Created at: Sun Aug 31 20:14:37 1997 * Modified at: Tue Dec 14 15:47:02 1999 * Modified by: Dag Brattli <dagb@cs.uit.no> * Sources: skeleton.c by Donald Becker <becker@CESDIS.gsfc.nasa.gov> * slip.c by Laurence Culhane, <loz@holmes.demon.co.uk> * Fred N. van Kempen, <waltje@uwalt.nl.mugnet.org> * * Copyright (c) 1998-1999 Dag Brattli <dagb@cs.uit.no>, * All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * * Neither Dag Brattli nor University of Tromsø admit liability nor * provide warranty for any of this software. This material is * provided "AS-IS" and at no charge. * ********************************************************************/ #include <linux/kernel.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/if_arp.h> #include <linux/bitops.h> #include <net/arp.h> #include <asm/system.h> #include <asm/byteorder.h> #include <net/irda/irda.h> #include <net/irda/irttp.h> #include <net/irda/irlmp.h> #include <net/irda/irias_object.h> #include <net/irda/iriap.h> #include <net/irda/timer.h> #include <net/irda/irlan_common.h> #include <net/irda/irlan_event.h> #include <net/irda/irlan_eth.h> #include <net/irda/irlan_provider.h> #include <net/irda/irlan_client.h> #undef CONFIG_IRLAN_GRATUITOUS_ARP static void irlan_client_ctrl_disconnect_indication(void *instance, void *sap, LM_REASON reason, struct sk_buff *); static int irlan_client_ctrl_data_indication(void *instance, void *sap, struct sk_buff *skb); static void irlan_client_ctrl_connect_confirm(void *instance, void *sap, struct qos_info *qos, __u32 max_sdu_size, __u8 max_header_size, struct sk_buff *); static void irlan_check_response_param(struct irlan_cb *self, char *param, char *value, int val_len); static void irlan_client_open_ctrl_tsap(struct irlan_cb *self); static void irlan_client_kick_timer_expired(void *data) { struct irlan_cb *self = (struct irlan_cb *) data; IRDA_DEBUG(2, "%s()\n", __func__ ); IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); /* * If we are in peer mode, the client may not have got the discovery * indication it needs to make progress. If the client is still in * IDLE state, we must kick it to, but only if the provider is not IDLE */ if ((self->provider.access_type == ACCESS_PEER) && (self->client.state == IRLAN_IDLE) && (self->provider.state != IRLAN_IDLE)) { irlan_client_wakeup(self, self->saddr, self->daddr); } } static void irlan_client_start_kick_timer(struct irlan_cb *self, int timeout) { IRDA_DEBUG(4, "%s()\n", __func__ ); irda_start_timer(&self->client.kick_timer, timeout, (void *) self, irlan_client_kick_timer_expired); } /* * Function irlan_client_wakeup (self, saddr, daddr) * * Wake up client * */ void irlan_client_wakeup(struct irlan_cb *self, __u32 saddr, __u32 daddr) { IRDA_DEBUG(1, "%s()\n", __func__ ); IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); /* * Check if we are already awake, or if we are a provider in direct * mode (in that case we must leave the client idle */ if ((self->client.state != IRLAN_IDLE) || (self->provider.access_type == ACCESS_DIRECT)) { IRDA_DEBUG(0, "%s(), already awake!\n", __func__ ); return; } /* Addresses may have changed! */ self->saddr = saddr; self->daddr = daddr; if (self->disconnect_reason == LM_USER_REQUEST) { IRDA_DEBUG(0, "%s(), still stopped by user\n", __func__ ); return; } /* Open TSAPs */ irlan_client_open_ctrl_tsap(self); irlan_open_data_tsap(self); irlan_do_client_event(self, IRLAN_DISCOVERY_INDICATION, NULL); /* Start kick timer */ irlan_client_start_kick_timer(self, 2*HZ); } /* * Function irlan_discovery_indication (daddr) * * Remote device with IrLAN server support discovered * */ void irlan_client_discovery_indication(discinfo_t *discovery, DISCOVERY_MODE mode, void *priv) { struct irlan_cb *self; __u32 saddr, daddr; IRDA_DEBUG(1, "%s()\n", __func__ ); IRDA_ASSERT(discovery != NULL, return;); /* * I didn't check it, but I bet that IrLAN suffer from the same * deficiency as IrComm and doesn't handle two instances * simultaneously connecting to each other. * Same workaround, drop passive discoveries. * Jean II */ if(mode == DISCOVERY_PASSIVE) return; saddr = discovery->saddr; daddr = discovery->daddr; /* Find instance */ rcu_read_lock(); self = irlan_get_any(); if (self) { IRDA_ASSERT(self->magic == IRLAN_MAGIC, goto out;); IRDA_DEBUG(1, "%s(), Found instance (%08x)!\n", __func__ , daddr); irlan_client_wakeup(self, saddr, daddr); } IRDA_ASSERT_LABEL(out:) rcu_read_unlock(); } /* * Function irlan_client_data_indication (handle, skb) * * This function gets the data that is received on the control channel * */ static int irlan_client_ctrl_data_indication(void *instance, void *sap, struct sk_buff *skb) { struct irlan_cb *self; IRDA_DEBUG(2, "%s()\n", __func__ ); self = (struct irlan_cb *) instance; IRDA_ASSERT(self != NULL, return -1;); IRDA_ASSERT(self->magic == IRLAN_MAGIC, return -1;); IRDA_ASSERT(skb != NULL, return -1;); irlan_do_client_event(self, IRLAN_DATA_INDICATION, skb); /* Ready for a new command */ IRDA_DEBUG(2, "%s(), clearing tx_busy\n", __func__ ); self->client.tx_busy = FALSE; /* Check if we have some queued commands waiting to be sent */ irlan_run_ctrl_tx_queue(self); return 0; } static void irlan_client_ctrl_disconnect_indication(void *instance, void *sap, LM_REASON reason, struct sk_buff *userdata) { struct irlan_cb *self; struct tsap_cb *tsap; struct sk_buff *skb; IRDA_DEBUG(4, "%s(), reason=%d\n", __func__ , reason); self = (struct irlan_cb *) instance; tsap = (struct tsap_cb *) sap; IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); IRDA_ASSERT(tsap != NULL, return;); IRDA_ASSERT(tsap->magic == TTP_TSAP_MAGIC, return;); IRDA_ASSERT(tsap == self->client.tsap_ctrl, return;); /* Remove frames queued on the control channel */ while ((skb = skb_dequeue(&self->client.txq)) != NULL) { dev_kfree_skb(skb); } self->client.tx_busy = FALSE; irlan_do_client_event(self, IRLAN_LMP_DISCONNECT, NULL); } /* * Function irlan_client_open_tsaps (self) * * Initialize callbacks and open IrTTP TSAPs * */ static void irlan_client_open_ctrl_tsap(struct irlan_cb *self) { struct tsap_cb *tsap; notify_t notify; IRDA_DEBUG(4, "%s()\n", __func__ ); IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); /* Check if already open */ if (self->client.tsap_ctrl) return; irda_notify_init(&notify); /* Set up callbacks */ notify.data_indication = irlan_client_ctrl_data_indication; notify.connect_confirm = irlan_client_ctrl_connect_confirm; notify.disconnect_indication = irlan_client_ctrl_disconnect_indication; notify.instance = self; strlcpy(notify.name, "IrLAN ctrl (c)", sizeof(notify.name)); tsap = irttp_open_tsap(LSAP_ANY, DEFAULT_INITIAL_CREDIT, &notify); if (!tsap) { IRDA_DEBUG(2, "%s(), Got no tsap!\n", __func__ ); return; } self->client.tsap_ctrl = tsap; } /* * Function irlan_client_connect_confirm (handle, skb) * * Connection to peer IrLAN laye confirmed * */ static void irlan_client_ctrl_connect_confirm(void *instance, void *sap, struct qos_info *qos, __u32 max_sdu_size, __u8 max_header_size, struct sk_buff *skb) { struct irlan_cb *self; IRDA_DEBUG(4, "%s()\n", __func__ ); self = (struct irlan_cb *) instance; IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); self->client.max_sdu_size = max_sdu_size; self->client.max_header_size = max_header_size; /* TODO: we could set the MTU depending on the max_sdu_size */ irlan_do_client_event(self, IRLAN_CONNECT_COMPLETE, NULL); } /* * Function print_ret_code (code) * * Print return code of request to peer IrLAN layer. * */ static void print_ret_code(__u8 code) { switch(code) { case 0: printk(KERN_INFO "Success\n"); break; case 1: IRDA_WARNING("IrLAN: Insufficient resources\n"); break; case 2: IRDA_WARNING("IrLAN: Invalid command format\n"); break; case 3: IRDA_WARNING("IrLAN: Command not supported\n"); break; case 4: IRDA_WARNING("IrLAN: Parameter not supported\n"); break; case 5: IRDA_WARNING("IrLAN: Value not supported\n"); break; case 6: IRDA_WARNING("IrLAN: Not open\n"); break; case 7: IRDA_WARNING("IrLAN: Authentication required\n"); break; case 8: IRDA_WARNING("IrLAN: Invalid password\n"); break; case 9: IRDA_WARNING("IrLAN: Protocol error\n"); break; case 255: IRDA_WARNING("IrLAN: Asynchronous status\n"); break; } } /* * Function irlan_client_parse_response (self, skb) * * Extract all parameters from received buffer, then feed them to * check_params for parsing */ void irlan_client_parse_response(struct irlan_cb *self, struct sk_buff *skb) { __u8 *frame; __u8 *ptr; int count; int ret; __u16 val_len; int i; char *name; char *value; IRDA_ASSERT(skb != NULL, return;); IRDA_DEBUG(4, "%s() skb->len=%d\n", __func__ , (int) skb->len); IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); if (!skb) { IRDA_ERROR("%s(), Got NULL skb!\n", __func__); return; } frame = skb->data; /* * Check return code and print it if not success */ if (frame[0]) { print_ret_code(frame[0]); return; } name = kmalloc(255, GFP_ATOMIC); if (!name) return; value = kmalloc(1016, GFP_ATOMIC); if (!value) { kfree(name); return; } /* How many parameters? */ count = frame[1]; IRDA_DEBUG(4, "%s(), got %d parameters\n", __func__ , count); ptr = frame+2; /* For all parameters */ for (i=0; i<count;i++) { ret = irlan_extract_param(ptr, name, value, &val_len); if (ret < 0) { IRDA_DEBUG(2, "%s(), IrLAN, Error!\n", __func__ ); break; } ptr += ret; irlan_check_response_param(self, name, value, val_len); } /* Cleanup */ kfree(name); kfree(value); } /* * Function irlan_check_response_param (self, param, value, val_len) * * Check which parameter is received and update local variables * */ static void irlan_check_response_param(struct irlan_cb *self, char *param, char *value, int val_len) { __u16 tmp_cpu; /* Temporary value in host order */ __u8 *bytes; int i; IRDA_DEBUG(4, "%s(), parm=%s\n", __func__ , param); IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); /* Media type */ if (strcmp(param, "MEDIA") == 0) { if (strcmp(value, "802.3") == 0) self->media = MEDIA_802_3; else self->media = MEDIA_802_5; return; } if (strcmp(param, "FILTER_TYPE") == 0) { if (strcmp(value, "DIRECTED") == 0) self->client.filter_type |= IRLAN_DIRECTED; else if (strcmp(value, "FUNCTIONAL") == 0) self->client.filter_type |= IRLAN_FUNCTIONAL; else if (strcmp(value, "GROUP") == 0) self->client.filter_type |= IRLAN_GROUP; else if (strcmp(value, "MAC_FRAME") == 0) self->client.filter_type |= IRLAN_MAC_FRAME; else if (strcmp(value, "MULTICAST") == 0) self->client.filter_type |= IRLAN_MULTICAST; else if (strcmp(value, "BROADCAST") == 0) self->client.filter_type |= IRLAN_BROADCAST; else if (strcmp(value, "IPX_SOCKET") == 0) self->client.filter_type |= IRLAN_IPX_SOCKET; } if (strcmp(param, "ACCESS_TYPE") == 0) { if (strcmp(value, "DIRECT") == 0) self->client.access_type = ACCESS_DIRECT; else if (strcmp(value, "PEER") == 0) self->client.access_type = ACCESS_PEER; else if (strcmp(value, "HOSTED") == 0) self->client.access_type = ACCESS_HOSTED; else { IRDA_DEBUG(2, "%s(), unknown access type!\n", __func__ ); } } /* IRLAN version */ if (strcmp(param, "IRLAN_VER") == 0) { IRDA_DEBUG(4, "IrLAN version %d.%d\n", (__u8) value[0], (__u8) value[1]); self->version[0] = value[0]; self->version[1] = value[1]; return; } /* Which remote TSAP to use for data channel */ if (strcmp(param, "DATA_CHAN") == 0) { self->dtsap_sel_data = value[0]; IRDA_DEBUG(4, "Data TSAP = %02x\n", self->dtsap_sel_data); return; } if (strcmp(param, "CON_ARB") == 0) { memcpy(&tmp_cpu, value, 2); /* Align value */ le16_to_cpus(&tmp_cpu); /* Convert to host order */ self->client.recv_arb_val = tmp_cpu; IRDA_DEBUG(2, "%s(), receive arb val=%d\n", __func__ , self->client.recv_arb_val); } if (strcmp(param, "MAX_FRAME") == 0) { memcpy(&tmp_cpu, value, 2); /* Align value */ le16_to_cpus(&tmp_cpu); /* Convert to host order */ self->client.max_frame = tmp_cpu; IRDA_DEBUG(4, "%s(), max frame=%d\n", __func__ , self->client.max_frame); } /* RECONNECT_KEY, in case the link goes down! */ if (strcmp(param, "RECONNECT_KEY") == 0) { IRDA_DEBUG(4, "Got reconnect key: "); /* for (i = 0; i < val_len; i++) */ /* printk("%02x", value[i]); */ memcpy(self->client.reconnect_key, value, val_len); self->client.key_len = val_len; IRDA_DEBUG(4, "\n"); } /* FILTER_ENTRY, have we got an ethernet address? */ if (strcmp(param, "FILTER_ENTRY") == 0) { bytes = value; IRDA_DEBUG(4, "Ethernet address = %pM\n", bytes); for (i = 0; i < 6; i++) self->dev->dev_addr[i] = bytes[i]; } } /* * Function irlan_client_get_value_confirm (obj_id, value) * * Got results from remote LM-IAS * */ void irlan_client_get_value_confirm(int result, __u16 obj_id, struct ias_value *value, void *priv) { struct irlan_cb *self; IRDA_DEBUG(4, "%s()\n", __func__ ); IRDA_ASSERT(priv != NULL, return;); self = (struct irlan_cb *) priv; IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); /* We probably don't need to make any more queries */ iriap_close(self->client.iriap); self->client.iriap = NULL; /* Check if request succeeded */ if (result != IAS_SUCCESS) { IRDA_DEBUG(2, "%s(), got NULL value!\n", __func__ ); irlan_do_client_event(self, IRLAN_IAS_PROVIDER_NOT_AVAIL, NULL); return; } switch (value->type) { case IAS_INTEGER: self->dtsap_sel_ctrl = value->t.integer; if (value->t.integer != -1) { irlan_do_client_event(self, IRLAN_IAS_PROVIDER_AVAIL, NULL); return; } irias_delete_value(value); break; default: IRDA_DEBUG(2, "%s(), unknown type!\n", __func__ ); break; } irlan_do_client_event(self, IRLAN_IAS_PROVIDER_NOT_AVAIL, NULL); }
gpl-2.0
jituijiaqiezi/linux
arch/sh/boards/mach-se/7724/setup.c
754
23443
/* * linux/arch/sh/boards/se/7724/setup.c * * Copyright (C) 2009 Renesas Solutions Corp. * * Kuninori Morimoto <morimoto.kuninori@renesas.com> * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/init.h> #include <linux/device.h> #include <linux/interrupt.h> #include <linux/platform_device.h> #include <linux/mmc/host.h> #include <linux/mmc/sh_mobile_sdhi.h> #include <linux/mfd/tmio.h> #include <linux/mtd/physmap.h> #include <linux/delay.h> #include <linux/regulator/fixed.h> #include <linux/regulator/machine.h> #include <linux/smc91x.h> #include <linux/gpio.h> #include <linux/input.h> #include <linux/input/sh_keysc.h> #include <linux/usb/r8a66597.h> #include <linux/sh_eth.h> #include <linux/sh_intc.h> #include <linux/videodev2.h> #include <video/sh_mobile_lcdc.h> #include <media/sh_mobile_ceu.h> #include <sound/sh_fsi.h> #include <sound/simple_card.h> #include <asm/io.h> #include <asm/heartbeat.h> #include <asm/clock.h> #include <asm/suspend.h> #include <cpu/sh7724.h> #include <mach-se/mach/se7724.h> /* * SWx 1234 5678 * ------------------------------------ * SW31 : 1001 1100 : default * SW32 : 0111 1111 : use on board flash * * SW41 : abxx xxxx -> a = 0 : Analog monitor * 1 : Digital monitor * b = 0 : VGA * 1 : 720p */ /* * about 720p * * When you use 1280 x 720 lcdc output, * you should change OSC6 lcdc clock from 25.175MHz to 74.25MHz, * and change SW41 to use 720p */ /* * about sound * * This setup.c supports FSI slave mode. * Please change J20, J21, J22 pin to 1-2 connection. */ /* Heartbeat */ static struct resource heartbeat_resource = { .start = PA_LED, .end = PA_LED, .flags = IORESOURCE_MEM | IORESOURCE_MEM_16BIT, }; static struct platform_device heartbeat_device = { .name = "heartbeat", .id = -1, .num_resources = 1, .resource = &heartbeat_resource, }; /* LAN91C111 */ static struct smc91x_platdata smc91x_info = { .flags = SMC91X_USE_16BIT | SMC91X_NOWAIT, }; static struct resource smc91x_eth_resources[] = { [0] = { .name = "SMC91C111" , .start = 0x1a300300, .end = 0x1a30030f, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ0_SMC, .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL, }, }; static struct platform_device smc91x_eth_device = { .name = "smc91x", .num_resources = ARRAY_SIZE(smc91x_eth_resources), .resource = smc91x_eth_resources, .dev = { .platform_data = &smc91x_info, }, }; /* MTD */ static struct mtd_partition nor_flash_partitions[] = { { .name = "uboot", .offset = 0, .size = (1 * 1024 * 1024), .mask_flags = MTD_WRITEABLE, /* Read-only */ }, { .name = "kernel", .offset = MTDPART_OFS_APPEND, .size = (2 * 1024 * 1024), }, { .name = "free-area", .offset = MTDPART_OFS_APPEND, .size = MTDPART_SIZ_FULL, }, }; static struct physmap_flash_data nor_flash_data = { .width = 2, .parts = nor_flash_partitions, .nr_parts = ARRAY_SIZE(nor_flash_partitions), }; static struct resource nor_flash_resources[] = { [0] = { .name = "NOR Flash", .start = 0x00000000, .end = 0x01ffffff, .flags = IORESOURCE_MEM, } }; static struct platform_device nor_flash_device = { .name = "physmap-flash", .resource = nor_flash_resources, .num_resources = ARRAY_SIZE(nor_flash_resources), .dev = { .platform_data = &nor_flash_data, }, }; /* LCDC */ static const struct fb_videomode lcdc_720p_modes[] = { { .name = "LB070WV1", .sync = 0, /* hsync and vsync are active low */ .xres = 1280, .yres = 720, .left_margin = 220, .right_margin = 110, .hsync_len = 40, .upper_margin = 20, .lower_margin = 5, .vsync_len = 5, }, }; static const struct fb_videomode lcdc_vga_modes[] = { { .name = "LB070WV1", .sync = 0, /* hsync and vsync are active low */ .xres = 640, .yres = 480, .left_margin = 105, .right_margin = 50, .hsync_len = 96, .upper_margin = 33, .lower_margin = 10, .vsync_len = 2, }, }; static struct sh_mobile_lcdc_info lcdc_info = { .clock_source = LCDC_CLK_EXTERNAL, .ch[0] = { .chan = LCDC_CHAN_MAINLCD, .fourcc = V4L2_PIX_FMT_RGB565, .clock_divider = 1, .panel_cfg = { /* 7.0 inch */ .width = 152, .height = 91, }, } }; static struct resource lcdc_resources[] = { [0] = { .name = "LCDC", .start = 0xfe940000, .end = 0xfe942fff, .flags = IORESOURCE_MEM, }, [1] = { .start = evt2irq(0xf40), .flags = IORESOURCE_IRQ, }, }; static struct platform_device lcdc_device = { .name = "sh_mobile_lcdc_fb", .num_resources = ARRAY_SIZE(lcdc_resources), .resource = lcdc_resources, .dev = { .platform_data = &lcdc_info, }, }; /* CEU0 */ static struct sh_mobile_ceu_info sh_mobile_ceu0_info = { .flags = SH_CEU_FLAG_USE_8BIT_BUS, }; static struct resource ceu0_resources[] = { [0] = { .name = "CEU0", .start = 0xfe910000, .end = 0xfe91009f, .flags = IORESOURCE_MEM, }, [1] = { .start = evt2irq(0x880), .flags = IORESOURCE_IRQ, }, [2] = { /* place holder for contiguous memory */ }, }; static struct platform_device ceu0_device = { .name = "sh_mobile_ceu", .id = 0, /* "ceu0" clock */ .num_resources = ARRAY_SIZE(ceu0_resources), .resource = ceu0_resources, .dev = { .platform_data = &sh_mobile_ceu0_info, }, }; /* CEU1 */ static struct sh_mobile_ceu_info sh_mobile_ceu1_info = { .flags = SH_CEU_FLAG_USE_8BIT_BUS, }; static struct resource ceu1_resources[] = { [0] = { .name = "CEU1", .start = 0xfe914000, .end = 0xfe91409f, .flags = IORESOURCE_MEM, }, [1] = { .start = evt2irq(0x9e0), .flags = IORESOURCE_IRQ, }, [2] = { /* place holder for contiguous memory */ }, }; static struct platform_device ceu1_device = { .name = "sh_mobile_ceu", .id = 1, /* "ceu1" clock */ .num_resources = ARRAY_SIZE(ceu1_resources), .resource = ceu1_resources, .dev = { .platform_data = &sh_mobile_ceu1_info, }, }; /* FSI */ /* change J20, J21, J22 pin to 1-2 connection to use slave mode */ static struct resource fsi_resources[] = { [0] = { .name = "FSI", .start = 0xFE3C0000, .end = 0xFE3C021d, .flags = IORESOURCE_MEM, }, [1] = { .start = evt2irq(0xf80), .flags = IORESOURCE_IRQ, }, }; static struct platform_device fsi_device = { .name = "sh_fsi", .id = 0, .num_resources = ARRAY_SIZE(fsi_resources), .resource = fsi_resources, }; static struct asoc_simple_card_info fsi_ak4642_info = { .name = "AK4642", .card = "FSIA-AK4642", .codec = "ak4642-codec.0-0012", .platform = "sh_fsi.0", .daifmt = SND_SOC_DAIFMT_LEFT_J | SND_SOC_DAIFMT_CBM_CFM, .cpu_dai = { .name = "fsia-dai", }, .codec_dai = { .name = "ak4642-hifi", .sysclk = 11289600, }, }; static struct platform_device fsi_ak4642_device = { .name = "asoc-simple-card", .dev = { .platform_data = &fsi_ak4642_info, }, }; /* KEYSC in SoC (Needs SW33-2 set to ON) */ static struct sh_keysc_info keysc_info = { .mode = SH_KEYSC_MODE_1, .scan_timing = 3, .delay = 50, .keycodes = { KEY_1, KEY_2, KEY_3, KEY_4, KEY_5, KEY_6, KEY_7, KEY_8, KEY_9, KEY_A, KEY_B, KEY_C, KEY_D, KEY_E, KEY_F, KEY_G, KEY_H, KEY_I, KEY_K, KEY_L, KEY_M, KEY_N, KEY_O, KEY_P, KEY_Q, KEY_R, KEY_S, KEY_T, KEY_U, KEY_V, }, }; static struct resource keysc_resources[] = { [0] = { .name = "KEYSC", .start = 0x044b0000, .end = 0x044b000f, .flags = IORESOURCE_MEM, }, [1] = { .start = evt2irq(0xbe0), .flags = IORESOURCE_IRQ, }, }; static struct platform_device keysc_device = { .name = "sh_keysc", .id = 0, /* "keysc0" clock */ .num_resources = ARRAY_SIZE(keysc_resources), .resource = keysc_resources, .dev = { .platform_data = &keysc_info, }, }; /* SH Eth */ static struct resource sh_eth_resources[] = { [0] = { .start = SH_ETH_ADDR, .end = SH_ETH_ADDR + 0x1FC - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = evt2irq(0xd60), .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL, }, }; static struct sh_eth_plat_data sh_eth_plat = { .phy = 0x1f, /* SMSC LAN8187 */ .edmac_endian = EDMAC_LITTLE_ENDIAN, .phy_interface = PHY_INTERFACE_MODE_MII, }; static struct platform_device sh_eth_device = { .name = "sh7724-ether", .id = 0, .dev = { .platform_data = &sh_eth_plat, }, .num_resources = ARRAY_SIZE(sh_eth_resources), .resource = sh_eth_resources, }; static struct r8a66597_platdata sh7724_usb0_host_data = { .on_chip = 1, }; static struct resource sh7724_usb0_host_resources[] = { [0] = { .start = 0xa4d80000, .end = 0xa4d80124 - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = evt2irq(0xa20), .end = evt2irq(0xa20), .flags = IORESOURCE_IRQ | IRQF_TRIGGER_LOW, }, }; static struct platform_device sh7724_usb0_host_device = { .name = "r8a66597_hcd", .id = 0, .dev = { .dma_mask = NULL, /* not use dma */ .coherent_dma_mask = 0xffffffff, .platform_data = &sh7724_usb0_host_data, }, .num_resources = ARRAY_SIZE(sh7724_usb0_host_resources), .resource = sh7724_usb0_host_resources, }; static struct r8a66597_platdata sh7724_usb1_gadget_data = { .on_chip = 1, }; static struct resource sh7724_usb1_gadget_resources[] = { [0] = { .start = 0xa4d90000, .end = 0xa4d90123, .flags = IORESOURCE_MEM, }, [1] = { .start = evt2irq(0xa40), .end = evt2irq(0xa40), .flags = IORESOURCE_IRQ | IRQF_TRIGGER_LOW, }, }; static struct platform_device sh7724_usb1_gadget_device = { .name = "r8a66597_udc", .id = 1, /* USB1 */ .dev = { .dma_mask = NULL, /* not use dma */ .coherent_dma_mask = 0xffffffff, .platform_data = &sh7724_usb1_gadget_data, }, .num_resources = ARRAY_SIZE(sh7724_usb1_gadget_resources), .resource = sh7724_usb1_gadget_resources, }; /* Fixed 3.3V regulator to be used by SDHI0, SDHI1 */ static struct regulator_consumer_supply fixed3v3_power_consumers[] = { REGULATOR_SUPPLY("vmmc", "sh_mobile_sdhi.0"), REGULATOR_SUPPLY("vqmmc", "sh_mobile_sdhi.0"), REGULATOR_SUPPLY("vmmc", "sh_mobile_sdhi.1"), REGULATOR_SUPPLY("vqmmc", "sh_mobile_sdhi.1"), }; static struct resource sdhi0_cn7_resources[] = { [0] = { .name = "SDHI0", .start = 0x04ce0000, .end = 0x04ce00ff, .flags = IORESOURCE_MEM, }, [1] = { .start = evt2irq(0xe80), .flags = IORESOURCE_IRQ, }, }; static struct tmio_mmc_data sh7724_sdhi0_data = { .chan_priv_tx = (void *)SHDMA_SLAVE_SDHI0_TX, .chan_priv_rx = (void *)SHDMA_SLAVE_SDHI0_RX, .capabilities = MMC_CAP_SDIO_IRQ, }; static struct platform_device sdhi0_cn7_device = { .name = "sh_mobile_sdhi", .id = 0, .num_resources = ARRAY_SIZE(sdhi0_cn7_resources), .resource = sdhi0_cn7_resources, .dev = { .platform_data = &sh7724_sdhi0_data, }, }; static struct resource sdhi1_cn8_resources[] = { [0] = { .name = "SDHI1", .start = 0x04cf0000, .end = 0x04cf00ff, .flags = IORESOURCE_MEM, }, [1] = { .start = evt2irq(0x4e0), .flags = IORESOURCE_IRQ, }, }; static struct tmio_mmc_data sh7724_sdhi1_data = { .chan_priv_tx = (void *)SHDMA_SLAVE_SDHI1_TX, .chan_priv_rx = (void *)SHDMA_SLAVE_SDHI1_RX, .capabilities = MMC_CAP_SDIO_IRQ, }; static struct platform_device sdhi1_cn8_device = { .name = "sh_mobile_sdhi", .id = 1, .num_resources = ARRAY_SIZE(sdhi1_cn8_resources), .resource = sdhi1_cn8_resources, .dev = { .platform_data = &sh7724_sdhi1_data, }, }; /* IrDA */ static struct resource irda_resources[] = { [0] = { .name = "IrDA", .start = 0xA45D0000, .end = 0xA45D0049, .flags = IORESOURCE_MEM, }, [1] = { .start = evt2irq(0x480), .flags = IORESOURCE_IRQ, }, }; static struct platform_device irda_device = { .name = "sh_sir", .num_resources = ARRAY_SIZE(irda_resources), .resource = irda_resources, }; #include <media/ak881x.h> #include <media/sh_vou.h> static struct ak881x_pdata ak881x_pdata = { .flags = AK881X_IF_MODE_SLAVE, }; static struct i2c_board_info ak8813 = { /* With open J18 jumper address is 0x21 */ I2C_BOARD_INFO("ak8813", 0x20), .platform_data = &ak881x_pdata, }; static struct sh_vou_pdata sh_vou_pdata = { .bus_fmt = SH_VOU_BUS_8BIT, .flags = SH_VOU_HSYNC_LOW | SH_VOU_VSYNC_LOW, .board_info = &ak8813, .i2c_adap = 0, }; static struct resource sh_vou_resources[] = { [0] = { .start = 0xfe960000, .end = 0xfe962043, .flags = IORESOURCE_MEM, }, [1] = { .start = evt2irq(0x8e0), .flags = IORESOURCE_IRQ, }, }; static struct platform_device vou_device = { .name = "sh-vou", .id = -1, .num_resources = ARRAY_SIZE(sh_vou_resources), .resource = sh_vou_resources, .dev = { .platform_data = &sh_vou_pdata, }, }; static struct platform_device *ms7724se_devices[] __initdata = { &heartbeat_device, &smc91x_eth_device, &lcdc_device, &nor_flash_device, &ceu0_device, &ceu1_device, &keysc_device, &sh_eth_device, &sh7724_usb0_host_device, &sh7724_usb1_gadget_device, &fsi_device, &fsi_ak4642_device, &sdhi0_cn7_device, &sdhi1_cn8_device, &irda_device, &vou_device, }; /* I2C device */ static struct i2c_board_info i2c0_devices[] = { { I2C_BOARD_INFO("ak4642", 0x12), }, }; #define EEPROM_OP 0xBA206000 #define EEPROM_ADR 0xBA206004 #define EEPROM_DATA 0xBA20600C #define EEPROM_STAT 0xBA206010 #define EEPROM_STRT 0xBA206014 static int __init sh_eth_is_eeprom_ready(void) { int t = 10000; while (t--) { if (!__raw_readw(EEPROM_STAT)) return 1; udelay(1); } printk(KERN_ERR "ms7724se can not access to eeprom\n"); return 0; } static void __init sh_eth_init(void) { int i; u16 mac; /* check EEPROM status */ if (!sh_eth_is_eeprom_ready()) return; /* read MAC addr from EEPROM */ for (i = 0 ; i < 3 ; i++) { __raw_writew(0x0, EEPROM_OP); /* read */ __raw_writew(i*2, EEPROM_ADR); __raw_writew(0x1, EEPROM_STRT); if (!sh_eth_is_eeprom_ready()) return; mac = __raw_readw(EEPROM_DATA); sh_eth_plat.mac_addr[i << 1] = mac & 0xff; sh_eth_plat.mac_addr[(i << 1) + 1] = mac >> 8; } } #define SW4140 0xBA201000 #define FPGA_OUT 0xBA200400 #define PORT_HIZA 0xA4050158 #define PORT_MSELCRB 0xA4050182 #define SW41_A 0x0100 #define SW41_B 0x0200 #define SW41_C 0x0400 #define SW41_D 0x0800 #define SW41_E 0x1000 #define SW41_F 0x2000 #define SW41_G 0x4000 #define SW41_H 0x8000 extern char ms7724se_sdram_enter_start; extern char ms7724se_sdram_enter_end; extern char ms7724se_sdram_leave_start; extern char ms7724se_sdram_leave_end; static int __init arch_setup(void) { /* enable I2C device */ i2c_register_board_info(0, i2c0_devices, ARRAY_SIZE(i2c0_devices)); return 0; } arch_initcall(arch_setup); static int __init devices_setup(void) { u16 sw = __raw_readw(SW4140); /* select camera, monitor */ struct clk *clk; u16 fpga_out; /* register board specific self-refresh code */ sh_mobile_register_self_refresh(SUSP_SH_STANDBY | SUSP_SH_SF | SUSP_SH_RSTANDBY, &ms7724se_sdram_enter_start, &ms7724se_sdram_enter_end, &ms7724se_sdram_leave_start, &ms7724se_sdram_leave_end); regulator_register_always_on(0, "fixed-3.3V", fixed3v3_power_consumers, ARRAY_SIZE(fixed3v3_power_consumers), 3300000); /* Reset Release */ fpga_out = __raw_readw(FPGA_OUT); /* bit4: NTSC_PDN, bit5: NTSC_RESET */ fpga_out &= ~((1 << 1) | /* LAN */ (1 << 4) | /* AK8813 PDN */ (1 << 5) | /* AK8813 RESET */ (1 << 6) | /* VIDEO DAC */ (1 << 7) | /* AK4643 */ (1 << 8) | /* IrDA */ (1 << 12) | /* USB0 */ (1 << 14)); /* RMII */ __raw_writew(fpga_out | (1 << 4), FPGA_OUT); udelay(10); /* AK8813 RESET */ __raw_writew(fpga_out | (1 << 5), FPGA_OUT); udelay(10); __raw_writew(fpga_out, FPGA_OUT); /* turn on USB clocks, use external clock */ __raw_writew((__raw_readw(PORT_MSELCRB) & ~0xc000) | 0x8000, PORT_MSELCRB); /* Let LED9 show STATUS2 */ gpio_request(GPIO_FN_STATUS2, NULL); /* Lit LED10 show STATUS0 */ gpio_request(GPIO_FN_STATUS0, NULL); /* Lit LED11 show PDSTATUS */ gpio_request(GPIO_FN_PDSTATUS, NULL); /* enable USB0 port */ __raw_writew(0x0600, 0xa40501d4); /* enable USB1 port */ __raw_writew(0x0600, 0xa4050192); /* enable IRQ 0,1,2 */ gpio_request(GPIO_FN_INTC_IRQ0, NULL); gpio_request(GPIO_FN_INTC_IRQ1, NULL); gpio_request(GPIO_FN_INTC_IRQ2, NULL); /* enable SCIFA3 */ gpio_request(GPIO_FN_SCIF3_I_SCK, NULL); gpio_request(GPIO_FN_SCIF3_I_RXD, NULL); gpio_request(GPIO_FN_SCIF3_I_TXD, NULL); gpio_request(GPIO_FN_SCIF3_I_CTS, NULL); gpio_request(GPIO_FN_SCIF3_I_RTS, NULL); /* enable LCDC */ gpio_request(GPIO_FN_LCDD23, NULL); gpio_request(GPIO_FN_LCDD22, NULL); gpio_request(GPIO_FN_LCDD21, NULL); gpio_request(GPIO_FN_LCDD20, NULL); gpio_request(GPIO_FN_LCDD19, NULL); gpio_request(GPIO_FN_LCDD18, NULL); gpio_request(GPIO_FN_LCDD17, NULL); gpio_request(GPIO_FN_LCDD16, NULL); gpio_request(GPIO_FN_LCDD15, NULL); gpio_request(GPIO_FN_LCDD14, NULL); gpio_request(GPIO_FN_LCDD13, NULL); gpio_request(GPIO_FN_LCDD12, NULL); gpio_request(GPIO_FN_LCDD11, NULL); gpio_request(GPIO_FN_LCDD10, NULL); gpio_request(GPIO_FN_LCDD9, NULL); gpio_request(GPIO_FN_LCDD8, NULL); gpio_request(GPIO_FN_LCDD7, NULL); gpio_request(GPIO_FN_LCDD6, NULL); gpio_request(GPIO_FN_LCDD5, NULL); gpio_request(GPIO_FN_LCDD4, NULL); gpio_request(GPIO_FN_LCDD3, NULL); gpio_request(GPIO_FN_LCDD2, NULL); gpio_request(GPIO_FN_LCDD1, NULL); gpio_request(GPIO_FN_LCDD0, NULL); gpio_request(GPIO_FN_LCDDISP, NULL); gpio_request(GPIO_FN_LCDHSYN, NULL); gpio_request(GPIO_FN_LCDDCK, NULL); gpio_request(GPIO_FN_LCDVSYN, NULL); gpio_request(GPIO_FN_LCDDON, NULL); gpio_request(GPIO_FN_LCDVEPWC, NULL); gpio_request(GPIO_FN_LCDVCPWC, NULL); gpio_request(GPIO_FN_LCDRD, NULL); gpio_request(GPIO_FN_LCDLCLK, NULL); __raw_writew((__raw_readw(PORT_HIZA) & ~0x0001), PORT_HIZA); /* enable CEU0 */ gpio_request(GPIO_FN_VIO0_D15, NULL); gpio_request(GPIO_FN_VIO0_D14, NULL); gpio_request(GPIO_FN_VIO0_D13, NULL); gpio_request(GPIO_FN_VIO0_D12, NULL); gpio_request(GPIO_FN_VIO0_D11, NULL); gpio_request(GPIO_FN_VIO0_D10, NULL); gpio_request(GPIO_FN_VIO0_D9, NULL); gpio_request(GPIO_FN_VIO0_D8, NULL); gpio_request(GPIO_FN_VIO0_D7, NULL); gpio_request(GPIO_FN_VIO0_D6, NULL); gpio_request(GPIO_FN_VIO0_D5, NULL); gpio_request(GPIO_FN_VIO0_D4, NULL); gpio_request(GPIO_FN_VIO0_D3, NULL); gpio_request(GPIO_FN_VIO0_D2, NULL); gpio_request(GPIO_FN_VIO0_D1, NULL); gpio_request(GPIO_FN_VIO0_D0, NULL); gpio_request(GPIO_FN_VIO0_VD, NULL); gpio_request(GPIO_FN_VIO0_CLK, NULL); gpio_request(GPIO_FN_VIO0_FLD, NULL); gpio_request(GPIO_FN_VIO0_HD, NULL); platform_resource_setup_memory(&ceu0_device, "ceu0", 4 << 20); /* enable CEU1 */ gpio_request(GPIO_FN_VIO1_D7, NULL); gpio_request(GPIO_FN_VIO1_D6, NULL); gpio_request(GPIO_FN_VIO1_D5, NULL); gpio_request(GPIO_FN_VIO1_D4, NULL); gpio_request(GPIO_FN_VIO1_D3, NULL); gpio_request(GPIO_FN_VIO1_D2, NULL); gpio_request(GPIO_FN_VIO1_D1, NULL); gpio_request(GPIO_FN_VIO1_D0, NULL); gpio_request(GPIO_FN_VIO1_FLD, NULL); gpio_request(GPIO_FN_VIO1_HD, NULL); gpio_request(GPIO_FN_VIO1_VD, NULL); gpio_request(GPIO_FN_VIO1_CLK, NULL); platform_resource_setup_memory(&ceu1_device, "ceu1", 4 << 20); /* KEYSC */ gpio_request(GPIO_FN_KEYOUT5_IN5, NULL); gpio_request(GPIO_FN_KEYOUT4_IN6, NULL); gpio_request(GPIO_FN_KEYIN4, NULL); gpio_request(GPIO_FN_KEYIN3, NULL); gpio_request(GPIO_FN_KEYIN2, NULL); gpio_request(GPIO_FN_KEYIN1, NULL); gpio_request(GPIO_FN_KEYIN0, NULL); gpio_request(GPIO_FN_KEYOUT3, NULL); gpio_request(GPIO_FN_KEYOUT2, NULL); gpio_request(GPIO_FN_KEYOUT1, NULL); gpio_request(GPIO_FN_KEYOUT0, NULL); /* enable FSI */ gpio_request(GPIO_FN_FSIMCKA, NULL); gpio_request(GPIO_FN_FSIIASD, NULL); gpio_request(GPIO_FN_FSIOASD, NULL); gpio_request(GPIO_FN_FSIIABCK, NULL); gpio_request(GPIO_FN_FSIIALRCK, NULL); gpio_request(GPIO_FN_FSIOABCK, NULL); gpio_request(GPIO_FN_FSIOALRCK, NULL); gpio_request(GPIO_FN_CLKAUDIOAO, NULL); /* set SPU2 clock to 83.4 MHz */ clk = clk_get(NULL, "spu_clk"); if (!IS_ERR(clk)) { clk_set_rate(clk, clk_round_rate(clk, 83333333)); clk_put(clk); } /* change parent of FSI A */ clk = clk_get(NULL, "fsia_clk"); if (!IS_ERR(clk)) { /* 48kHz dummy clock was used to make sure 1/1 divide */ clk_set_rate(&sh7724_fsimcka_clk, 48000); clk_set_parent(clk, &sh7724_fsimcka_clk); clk_set_rate(clk, 48000); clk_put(clk); } /* SDHI0 connected to cn7 */ gpio_request(GPIO_FN_SDHI0CD, NULL); gpio_request(GPIO_FN_SDHI0WP, NULL); gpio_request(GPIO_FN_SDHI0D3, NULL); gpio_request(GPIO_FN_SDHI0D2, NULL); gpio_request(GPIO_FN_SDHI0D1, NULL); gpio_request(GPIO_FN_SDHI0D0, NULL); gpio_request(GPIO_FN_SDHI0CMD, NULL); gpio_request(GPIO_FN_SDHI0CLK, NULL); /* SDHI1 connected to cn8 */ gpio_request(GPIO_FN_SDHI1CD, NULL); gpio_request(GPIO_FN_SDHI1WP, NULL); gpio_request(GPIO_FN_SDHI1D3, NULL); gpio_request(GPIO_FN_SDHI1D2, NULL); gpio_request(GPIO_FN_SDHI1D1, NULL); gpio_request(GPIO_FN_SDHI1D0, NULL); gpio_request(GPIO_FN_SDHI1CMD, NULL); gpio_request(GPIO_FN_SDHI1CLK, NULL); /* enable IrDA */ gpio_request(GPIO_FN_IRDA_OUT, NULL); gpio_request(GPIO_FN_IRDA_IN, NULL); /* * enable SH-Eth * * please remove J33 pin from your board !! * * ms7724 board should not use GPIO_FN_LNKSTA pin * So, This time PTX5 is set to input pin */ gpio_request(GPIO_FN_RMII_RXD0, NULL); gpio_request(GPIO_FN_RMII_RXD1, NULL); gpio_request(GPIO_FN_RMII_TXD0, NULL); gpio_request(GPIO_FN_RMII_TXD1, NULL); gpio_request(GPIO_FN_RMII_REF_CLK, NULL); gpio_request(GPIO_FN_RMII_TX_EN, NULL); gpio_request(GPIO_FN_RMII_RX_ER, NULL); gpio_request(GPIO_FN_RMII_CRS_DV, NULL); gpio_request(GPIO_FN_MDIO, NULL); gpio_request(GPIO_FN_MDC, NULL); gpio_request(GPIO_PTX5, NULL); gpio_direction_input(GPIO_PTX5); sh_eth_init(); if (sw & SW41_B) { /* 720p */ lcdc_info.ch[0].lcd_modes = lcdc_720p_modes; lcdc_info.ch[0].num_modes = ARRAY_SIZE(lcdc_720p_modes); } else { /* VGA */ lcdc_info.ch[0].lcd_modes = lcdc_vga_modes; lcdc_info.ch[0].num_modes = ARRAY_SIZE(lcdc_vga_modes); } if (sw & SW41_A) { /* Digital monitor */ lcdc_info.ch[0].interface_type = RGB18; lcdc_info.ch[0].flags = 0; } else { /* Analog monitor */ lcdc_info.ch[0].interface_type = RGB24; lcdc_info.ch[0].flags = LCDC_FLAGS_DWPOL; } /* VOU */ gpio_request(GPIO_FN_DV_D15, NULL); gpio_request(GPIO_FN_DV_D14, NULL); gpio_request(GPIO_FN_DV_D13, NULL); gpio_request(GPIO_FN_DV_D12, NULL); gpio_request(GPIO_FN_DV_D11, NULL); gpio_request(GPIO_FN_DV_D10, NULL); gpio_request(GPIO_FN_DV_D9, NULL); gpio_request(GPIO_FN_DV_D8, NULL); gpio_request(GPIO_FN_DV_CLKI, NULL); gpio_request(GPIO_FN_DV_CLK, NULL); gpio_request(GPIO_FN_DV_VSYNC, NULL); gpio_request(GPIO_FN_DV_HSYNC, NULL); return platform_add_devices(ms7724se_devices, ARRAY_SIZE(ms7724se_devices)); } device_initcall(devices_setup); static struct sh_machine_vector mv_ms7724se __initmv = { .mv_name = "ms7724se", .mv_init_irq = init_se7724_IRQ, };
gpl-2.0
galaxy4public/android_kernel_samsung_n7102
arch/sh/boards/mach-kfr2r09/setup.c
754
16138
/* * KFR2R09 board support code * * Copyright (C) 2009 Magnus Damm * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/init.h> #include <linux/platform_device.h> #include <linux/interrupt.h> #include <linux/mmc/host.h> #include <linux/mmc/sh_mobile_sdhi.h> #include <linux/mfd/tmio.h> #include <linux/mtd/physmap.h> #include <linux/mtd/onenand.h> #include <linux/delay.h> #include <linux/clk.h> #include <linux/gpio.h> #include <linux/input.h> #include <linux/input/sh_keysc.h> #include <linux/i2c.h> #include <linux/platform_data/lv5207lp.h> #include <linux/regulator/fixed.h> #include <linux/regulator/machine.h> #include <linux/usb/r8a66597.h> #include <linux/videodev2.h> #include <linux/sh_intc.h> #include <media/rj54n1cb0c.h> #include <media/soc_camera.h> #include <media/sh_mobile_ceu.h> #include <video/sh_mobile_lcdc.h> #include <asm/suspend.h> #include <asm/clock.h> #include <asm/machvec.h> #include <asm/io.h> #include <cpu/sh7724.h> #include <mach/kfr2r09.h> static struct mtd_partition kfr2r09_nor_flash_partitions[] = { { .name = "boot", .offset = 0, .size = (4 * 1024 * 1024), .mask_flags = MTD_WRITEABLE, /* Read-only */ }, { .name = "other", .offset = MTDPART_OFS_APPEND, .size = MTDPART_SIZ_FULL, }, }; static struct physmap_flash_data kfr2r09_nor_flash_data = { .width = 2, .parts = kfr2r09_nor_flash_partitions, .nr_parts = ARRAY_SIZE(kfr2r09_nor_flash_partitions), }; static struct resource kfr2r09_nor_flash_resources[] = { [0] = { .name = "NOR Flash", .start = 0x00000000, .end = 0x03ffffff, .flags = IORESOURCE_MEM, } }; static struct platform_device kfr2r09_nor_flash_device = { .name = "physmap-flash", .resource = kfr2r09_nor_flash_resources, .num_resources = ARRAY_SIZE(kfr2r09_nor_flash_resources), .dev = { .platform_data = &kfr2r09_nor_flash_data, }, }; static struct resource kfr2r09_nand_flash_resources[] = { [0] = { .name = "NAND Flash", .start = 0x10000000, .end = 0x1001ffff, .flags = IORESOURCE_MEM, } }; static struct platform_device kfr2r09_nand_flash_device = { .name = "onenand-flash", .resource = kfr2r09_nand_flash_resources, .num_resources = ARRAY_SIZE(kfr2r09_nand_flash_resources), }; static struct sh_keysc_info kfr2r09_sh_keysc_info = { .mode = SH_KEYSC_MODE_1, /* KEYOUT0->4, KEYIN0->4 */ .scan_timing = 3, .delay = 10, .keycodes = { KEY_PHONE, KEY_CLEAR, KEY_MAIL, KEY_WWW, KEY_ENTER, KEY_1, KEY_2, KEY_3, 0, KEY_UP, KEY_4, KEY_5, KEY_6, 0, KEY_LEFT, KEY_7, KEY_8, KEY_9, KEY_PROG1, KEY_RIGHT, KEY_S, KEY_0, KEY_P, KEY_PROG2, KEY_DOWN, 0, 0, 0, 0, 0 }, }; static struct resource kfr2r09_sh_keysc_resources[] = { [0] = { .name = "KEYSC", .start = 0x044b0000, .end = 0x044b000f, .flags = IORESOURCE_MEM, }, [1] = { .start = evt2irq(0xbe0), .flags = IORESOURCE_IRQ, }, }; static struct platform_device kfr2r09_sh_keysc_device = { .name = "sh_keysc", .id = 0, /* "keysc0" clock */ .num_resources = ARRAY_SIZE(kfr2r09_sh_keysc_resources), .resource = kfr2r09_sh_keysc_resources, .dev = { .platform_data = &kfr2r09_sh_keysc_info, }, }; static const struct fb_videomode kfr2r09_lcdc_modes[] = { { .name = "TX07D34VM0AAA", .xres = 240, .yres = 400, .left_margin = 0, .right_margin = 16, .hsync_len = 8, .upper_margin = 0, .lower_margin = 1, .vsync_len = 1, .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, }, }; static struct sh_mobile_lcdc_info kfr2r09_sh_lcdc_info = { .clock_source = LCDC_CLK_BUS, .ch[0] = { .chan = LCDC_CHAN_MAINLCD, .fourcc = V4L2_PIX_FMT_RGB565, .interface_type = SYS18, .clock_divider = 6, .flags = LCDC_FLAGS_DWPOL, .lcd_modes = kfr2r09_lcdc_modes, .num_modes = ARRAY_SIZE(kfr2r09_lcdc_modes), .panel_cfg = { .width = 35, .height = 58, .setup_sys = kfr2r09_lcd_setup, .start_transfer = kfr2r09_lcd_start, }, .sys_bus_cfg = { .ldmt2r = 0x07010904, .ldmt3r = 0x14012914, /* set 1s delay to encourage fsync() */ .deferred_io_msec = 1000, }, } }; static struct resource kfr2r09_sh_lcdc_resources[] = { [0] = { .name = "LCDC", .start = 0xfe940000, /* P4-only space */ .end = 0xfe942fff, .flags = IORESOURCE_MEM, }, [1] = { .start = evt2irq(0xf40), .flags = IORESOURCE_IRQ, }, }; static struct platform_device kfr2r09_sh_lcdc_device = { .name = "sh_mobile_lcdc_fb", .num_resources = ARRAY_SIZE(kfr2r09_sh_lcdc_resources), .resource = kfr2r09_sh_lcdc_resources, .dev = { .platform_data = &kfr2r09_sh_lcdc_info, }, }; static struct lv5207lp_platform_data kfr2r09_backlight_data = { .fbdev = &kfr2r09_sh_lcdc_device.dev, .def_value = 13, .max_value = 13, }; static struct i2c_board_info kfr2r09_backlight_board_info = { I2C_BOARD_INFO("lv5207lp", 0x75), .platform_data = &kfr2r09_backlight_data, }; static struct r8a66597_platdata kfr2r09_usb0_gadget_data = { .on_chip = 1, }; static struct resource kfr2r09_usb0_gadget_resources[] = { [0] = { .start = 0x04d80000, .end = 0x04d80123, .flags = IORESOURCE_MEM, }, [1] = { .start = evt2irq(0xa20), .end = evt2irq(0xa20), .flags = IORESOURCE_IRQ | IRQF_TRIGGER_LOW, }, }; static struct platform_device kfr2r09_usb0_gadget_device = { .name = "r8a66597_udc", .id = 0, .dev = { .dma_mask = NULL, /* not use dma */ .coherent_dma_mask = 0xffffffff, .platform_data = &kfr2r09_usb0_gadget_data, }, .num_resources = ARRAY_SIZE(kfr2r09_usb0_gadget_resources), .resource = kfr2r09_usb0_gadget_resources, }; static struct sh_mobile_ceu_info sh_mobile_ceu_info = { .flags = SH_CEU_FLAG_USE_8BIT_BUS, }; static struct resource kfr2r09_ceu_resources[] = { [0] = { .name = "CEU", .start = 0xfe910000, .end = 0xfe91009f, .flags = IORESOURCE_MEM, }, [1] = { .start = evt2irq(0x880), .end = evt2irq(0x880), .flags = IORESOURCE_IRQ, }, [2] = { /* place holder for contiguous memory */ }, }; static struct platform_device kfr2r09_ceu_device = { .name = "sh_mobile_ceu", .id = 0, /* "ceu0" clock */ .num_resources = ARRAY_SIZE(kfr2r09_ceu_resources), .resource = kfr2r09_ceu_resources, .dev = { .platform_data = &sh_mobile_ceu_info, }, }; static struct i2c_board_info kfr2r09_i2c_camera = { I2C_BOARD_INFO("rj54n1cb0c", 0x50), }; static struct clk *camera_clk; /* set VIO_CKO clock to 25MHz */ #define CEU_MCLK_FREQ 25000000 #define DRVCRB 0xA405018C static int camera_power(struct device *dev, int mode) { int ret; if (mode) { long rate; camera_clk = clk_get(NULL, "video_clk"); if (IS_ERR(camera_clk)) return PTR_ERR(camera_clk); rate = clk_round_rate(camera_clk, CEU_MCLK_FREQ); ret = clk_set_rate(camera_clk, rate); if (ret < 0) goto eclkrate; /* set DRVCRB * * use 1.8 V for VccQ_VIO * use 2.85V for VccQ_SR */ __raw_writew((__raw_readw(DRVCRB) & ~0x0003) | 0x0001, DRVCRB); /* reset clear */ ret = gpio_request(GPIO_PTB4, NULL); if (ret < 0) goto eptb4; ret = gpio_request(GPIO_PTB7, NULL); if (ret < 0) goto eptb7; ret = gpio_direction_output(GPIO_PTB4, 1); if (!ret) ret = gpio_direction_output(GPIO_PTB7, 1); if (ret < 0) goto egpioout; msleep(1); ret = clk_enable(camera_clk); /* start VIO_CKO */ if (ret < 0) goto eclkon; return 0; } ret = 0; clk_disable(camera_clk); eclkon: gpio_set_value(GPIO_PTB7, 0); egpioout: gpio_set_value(GPIO_PTB4, 0); gpio_free(GPIO_PTB7); eptb7: gpio_free(GPIO_PTB4); eptb4: eclkrate: clk_put(camera_clk); return ret; } static struct rj54n1_pdata rj54n1_priv = { .mclk_freq = CEU_MCLK_FREQ, .ioctl_high = false, }; static struct soc_camera_link rj54n1_link = { .power = camera_power, .board_info = &kfr2r09_i2c_camera, .i2c_adapter_id = 1, .priv = &rj54n1_priv, }; static struct platform_device kfr2r09_camera = { .name = "soc-camera-pdrv", .id = 0, .dev = { .platform_data = &rj54n1_link, }, }; /* Fixed 3.3V regulator to be used by SDHI0 */ static struct regulator_consumer_supply fixed3v3_power_consumers[] = { REGULATOR_SUPPLY("vmmc", "sh_mobile_sdhi.0"), REGULATOR_SUPPLY("vqmmc", "sh_mobile_sdhi.0"), }; static struct resource kfr2r09_sh_sdhi0_resources[] = { [0] = { .name = "SDHI0", .start = 0x04ce0000, .end = 0x04ce00ff, .flags = IORESOURCE_MEM, }, [1] = { .start = evt2irq(0xe80), .flags = IORESOURCE_IRQ, }, }; static struct tmio_mmc_data sh7724_sdhi0_data = { .chan_priv_tx = (void *)SHDMA_SLAVE_SDHI0_TX, .chan_priv_rx = (void *)SHDMA_SLAVE_SDHI0_RX, .flags = TMIO_MMC_WRPROTECT_DISABLE, .capabilities = MMC_CAP_SDIO_IRQ, }; static struct platform_device kfr2r09_sh_sdhi0_device = { .name = "sh_mobile_sdhi", .num_resources = ARRAY_SIZE(kfr2r09_sh_sdhi0_resources), .resource = kfr2r09_sh_sdhi0_resources, .dev = { .platform_data = &sh7724_sdhi0_data, }, }; static struct platform_device *kfr2r09_devices[] __initdata = { &kfr2r09_nor_flash_device, &kfr2r09_nand_flash_device, &kfr2r09_sh_keysc_device, &kfr2r09_sh_lcdc_device, &kfr2r09_ceu_device, &kfr2r09_camera, &kfr2r09_sh_sdhi0_device, }; #define BSC_CS0BCR 0xfec10004 #define BSC_CS0WCR 0xfec10024 #define BSC_CS4BCR 0xfec10010 #define BSC_CS4WCR 0xfec10030 #define PORT_MSELCRB 0xa4050182 #ifdef CONFIG_I2C static int kfr2r09_usb0_gadget_i2c_setup(void) { struct i2c_adapter *a; struct i2c_msg msg; unsigned char buf[2]; int ret; a = i2c_get_adapter(0); if (!a) return -ENODEV; /* set bit 1 (the second bit) of chip at 0x09, register 0x13 */ buf[0] = 0x13; msg.addr = 0x09; msg.buf = buf; msg.len = 1; msg.flags = 0; ret = i2c_transfer(a, &msg, 1); if (ret != 1) return -ENODEV; buf[0] = 0; msg.addr = 0x09; msg.buf = buf; msg.len = 1; msg.flags = I2C_M_RD; ret = i2c_transfer(a, &msg, 1); if (ret != 1) return -ENODEV; buf[1] = buf[0] | (1 << 1); buf[0] = 0x13; msg.addr = 0x09; msg.buf = buf; msg.len = 2; msg.flags = 0; ret = i2c_transfer(a, &msg, 1); if (ret != 1) return -ENODEV; return 0; } static int kfr2r09_serial_i2c_setup(void) { struct i2c_adapter *a; struct i2c_msg msg; unsigned char buf[2]; int ret; a = i2c_get_adapter(0); if (!a) return -ENODEV; /* set bit 6 (the 7th bit) of chip at 0x09, register 0x13 */ buf[0] = 0x13; msg.addr = 0x09; msg.buf = buf; msg.len = 1; msg.flags = 0; ret = i2c_transfer(a, &msg, 1); if (ret != 1) return -ENODEV; buf[0] = 0; msg.addr = 0x09; msg.buf = buf; msg.len = 1; msg.flags = I2C_M_RD; ret = i2c_transfer(a, &msg, 1); if (ret != 1) return -ENODEV; buf[1] = buf[0] | (1 << 6); buf[0] = 0x13; msg.addr = 0x09; msg.buf = buf; msg.len = 2; msg.flags = 0; ret = i2c_transfer(a, &msg, 1); if (ret != 1) return -ENODEV; return 0; } #else static int kfr2r09_usb0_gadget_i2c_setup(void) { return -ENODEV; } static int kfr2r09_serial_i2c_setup(void) { return -ENODEV; } #endif static int kfr2r09_usb0_gadget_setup(void) { int plugged_in; gpio_request(GPIO_PTN4, NULL); /* USB_DET */ gpio_direction_input(GPIO_PTN4); plugged_in = gpio_get_value(GPIO_PTN4); if (!plugged_in) return -ENODEV; /* no cable plugged in */ if (kfr2r09_usb0_gadget_i2c_setup() != 0) return -ENODEV; /* unable to configure using i2c */ __raw_writew((__raw_readw(PORT_MSELCRB) & ~0xc000) | 0x8000, PORT_MSELCRB); gpio_request(GPIO_FN_PDSTATUS, NULL); /* R-standby disables USB clock */ gpio_request(GPIO_PTV6, NULL); /* USBCLK_ON */ gpio_direction_output(GPIO_PTV6, 1); /* USBCLK_ON = H */ msleep(20); /* wait 20ms to let the clock settle */ clk_enable(clk_get(NULL, "usb0")); __raw_writew(0x0600, 0xa40501d4); return 0; } extern char kfr2r09_sdram_enter_start; extern char kfr2r09_sdram_enter_end; extern char kfr2r09_sdram_leave_start; extern char kfr2r09_sdram_leave_end; static int __init kfr2r09_devices_setup(void) { /* register board specific self-refresh code */ sh_mobile_register_self_refresh(SUSP_SH_STANDBY | SUSP_SH_SF | SUSP_SH_RSTANDBY, &kfr2r09_sdram_enter_start, &kfr2r09_sdram_enter_end, &kfr2r09_sdram_leave_start, &kfr2r09_sdram_leave_end); regulator_register_always_on(0, "fixed-3.3V", fixed3v3_power_consumers, ARRAY_SIZE(fixed3v3_power_consumers), 3300000); /* enable SCIF1 serial port for YC401 console support */ gpio_request(GPIO_FN_SCIF1_RXD, NULL); gpio_request(GPIO_FN_SCIF1_TXD, NULL); kfr2r09_serial_i2c_setup(); /* ECONTMSK(bit6=L10ONEN) set 1 */ gpio_request(GPIO_PTG3, NULL); /* HPON_ON */ gpio_direction_output(GPIO_PTG3, 1); /* HPON_ON = H */ /* setup NOR flash at CS0 */ __raw_writel(0x36db0400, BSC_CS0BCR); __raw_writel(0x00000500, BSC_CS0WCR); /* setup NAND flash at CS4 */ __raw_writel(0x36db0400, BSC_CS4BCR); __raw_writel(0x00000500, BSC_CS4WCR); /* setup KEYSC pins */ gpio_request(GPIO_FN_KEYOUT0, NULL); gpio_request(GPIO_FN_KEYOUT1, NULL); gpio_request(GPIO_FN_KEYOUT2, NULL); gpio_request(GPIO_FN_KEYOUT3, NULL); gpio_request(GPIO_FN_KEYOUT4_IN6, NULL); gpio_request(GPIO_FN_KEYIN0, NULL); gpio_request(GPIO_FN_KEYIN1, NULL); gpio_request(GPIO_FN_KEYIN2, NULL); gpio_request(GPIO_FN_KEYIN3, NULL); gpio_request(GPIO_FN_KEYIN4, NULL); gpio_request(GPIO_FN_KEYOUT5_IN5, NULL); /* setup LCDC pins for SYS panel */ gpio_request(GPIO_FN_LCDD17, NULL); gpio_request(GPIO_FN_LCDD16, NULL); gpio_request(GPIO_FN_LCDD15, NULL); gpio_request(GPIO_FN_LCDD14, NULL); gpio_request(GPIO_FN_LCDD13, NULL); gpio_request(GPIO_FN_LCDD12, NULL); gpio_request(GPIO_FN_LCDD11, NULL); gpio_request(GPIO_FN_LCDD10, NULL); gpio_request(GPIO_FN_LCDD9, NULL); gpio_request(GPIO_FN_LCDD8, NULL); gpio_request(GPIO_FN_LCDD7, NULL); gpio_request(GPIO_FN_LCDD6, NULL); gpio_request(GPIO_FN_LCDD5, NULL); gpio_request(GPIO_FN_LCDD4, NULL); gpio_request(GPIO_FN_LCDD3, NULL); gpio_request(GPIO_FN_LCDD2, NULL); gpio_request(GPIO_FN_LCDD1, NULL); gpio_request(GPIO_FN_LCDD0, NULL); gpio_request(GPIO_FN_LCDRS, NULL); /* LCD_RS */ gpio_request(GPIO_FN_LCDCS, NULL); /* LCD_CS/ */ gpio_request(GPIO_FN_LCDRD, NULL); /* LCD_RD/ */ gpio_request(GPIO_FN_LCDWR, NULL); /* LCD_WR/ */ gpio_request(GPIO_FN_LCDVSYN, NULL); /* LCD_VSYNC */ gpio_request(GPIO_PTE4, NULL); /* LCD_RST/ */ gpio_direction_output(GPIO_PTE4, 1); gpio_request(GPIO_PTF4, NULL); /* PROTECT/ */ gpio_direction_output(GPIO_PTF4, 1); gpio_request(GPIO_PTU0, NULL); /* LEDSTDBY/ */ gpio_direction_output(GPIO_PTU0, 1); /* setup USB function */ if (kfr2r09_usb0_gadget_setup() == 0) platform_device_register(&kfr2r09_usb0_gadget_device); /* CEU */ gpio_request(GPIO_FN_VIO_CKO, NULL); gpio_request(GPIO_FN_VIO0_CLK, NULL); gpio_request(GPIO_FN_VIO0_VD, NULL); gpio_request(GPIO_FN_VIO0_HD, NULL); gpio_request(GPIO_FN_VIO0_FLD, NULL); gpio_request(GPIO_FN_VIO0_D7, NULL); gpio_request(GPIO_FN_VIO0_D6, NULL); gpio_request(GPIO_FN_VIO0_D5, NULL); gpio_request(GPIO_FN_VIO0_D4, NULL); gpio_request(GPIO_FN_VIO0_D3, NULL); gpio_request(GPIO_FN_VIO0_D2, NULL); gpio_request(GPIO_FN_VIO0_D1, NULL); gpio_request(GPIO_FN_VIO0_D0, NULL); platform_resource_setup_memory(&kfr2r09_ceu_device, "ceu", 4 << 20); /* SDHI0 connected to yc304 */ gpio_request(GPIO_FN_SDHI0CD, NULL); gpio_request(GPIO_FN_SDHI0D3, NULL); gpio_request(GPIO_FN_SDHI0D2, NULL); gpio_request(GPIO_FN_SDHI0D1, NULL); gpio_request(GPIO_FN_SDHI0D0, NULL); gpio_request(GPIO_FN_SDHI0CMD, NULL); gpio_request(GPIO_FN_SDHI0CLK, NULL); i2c_register_board_info(0, &kfr2r09_backlight_board_info, 1); return platform_add_devices(kfr2r09_devices, ARRAY_SIZE(kfr2r09_devices)); } device_initcall(kfr2r09_devices_setup); /* Return the board specific boot mode pin configuration */ static int kfr2r09_mode_pins(void) { /* MD0=1, MD1=1, MD2=0: Clock Mode 3 * MD3=0: 16-bit Area0 Bus Width * MD5=1: Little Endian * MD8=1: Test Mode Disabled */ return MODE_PIN0 | MODE_PIN1 | MODE_PIN5 | MODE_PIN8; } /* * The Machine Vector */ static struct sh_machine_vector mv_kfr2r09 __initmv = { .mv_name = "kfr2r09", .mv_mode_pins = kfr2r09_mode_pins, };
gpl-2.0
sudosurootdev/linux
kernel/cgroup_freezer.c
1010
12772
/* * cgroup_freezer.c - control group freezer subsystem * * Copyright IBM Corporation, 2007 * * Author : Cedric Le Goater <clg@fr.ibm.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2.1 of the GNU Lesser General Public License * as published by the Free Software Foundation. * * This program is distributed in the hope that it would be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. */ #include <linux/export.h> #include <linux/slab.h> #include <linux/cgroup.h> #include <linux/fs.h> #include <linux/uaccess.h> #include <linux/freezer.h> #include <linux/seq_file.h> #include <linux/mutex.h> /* * A cgroup is freezing if any FREEZING flags are set. FREEZING_SELF is * set if "FROZEN" is written to freezer.state cgroupfs file, and cleared * for "THAWED". FREEZING_PARENT is set if the parent freezer is FREEZING * for whatever reason. IOW, a cgroup has FREEZING_PARENT set if one of * its ancestors has FREEZING_SELF set. */ enum freezer_state_flags { CGROUP_FREEZER_ONLINE = (1 << 0), /* freezer is fully online */ CGROUP_FREEZING_SELF = (1 << 1), /* this freezer is freezing */ CGROUP_FREEZING_PARENT = (1 << 2), /* the parent freezer is freezing */ CGROUP_FROZEN = (1 << 3), /* this and its descendants frozen */ /* mask for all FREEZING flags */ CGROUP_FREEZING = CGROUP_FREEZING_SELF | CGROUP_FREEZING_PARENT, }; struct freezer { struct cgroup_subsys_state css; unsigned int state; }; static DEFINE_MUTEX(freezer_mutex); static inline struct freezer *css_freezer(struct cgroup_subsys_state *css) { return css ? container_of(css, struct freezer, css) : NULL; } static inline struct freezer *task_freezer(struct task_struct *task) { return css_freezer(task_css(task, freezer_cgrp_id)); } static struct freezer *parent_freezer(struct freezer *freezer) { return css_freezer(freezer->css.parent); } bool cgroup_freezing(struct task_struct *task) { bool ret; rcu_read_lock(); ret = task_freezer(task)->state & CGROUP_FREEZING; rcu_read_unlock(); return ret; } static const char *freezer_state_strs(unsigned int state) { if (state & CGROUP_FROZEN) return "FROZEN"; if (state & CGROUP_FREEZING) return "FREEZING"; return "THAWED"; }; static struct cgroup_subsys_state * freezer_css_alloc(struct cgroup_subsys_state *parent_css) { struct freezer *freezer; freezer = kzalloc(sizeof(struct freezer), GFP_KERNEL); if (!freezer) return ERR_PTR(-ENOMEM); return &freezer->css; } /** * freezer_css_online - commit creation of a freezer css * @css: css being created * * We're committing to creation of @css. Mark it online and inherit * parent's freezing state while holding both parent's and our * freezer->lock. */ static int freezer_css_online(struct cgroup_subsys_state *css) { struct freezer *freezer = css_freezer(css); struct freezer *parent = parent_freezer(freezer); mutex_lock(&freezer_mutex); freezer->state |= CGROUP_FREEZER_ONLINE; if (parent && (parent->state & CGROUP_FREEZING)) { freezer->state |= CGROUP_FREEZING_PARENT | CGROUP_FROZEN; atomic_inc(&system_freezing_cnt); } mutex_unlock(&freezer_mutex); return 0; } /** * freezer_css_offline - initiate destruction of a freezer css * @css: css being destroyed * * @css is going away. Mark it dead and decrement system_freezing_count if * it was holding one. */ static void freezer_css_offline(struct cgroup_subsys_state *css) { struct freezer *freezer = css_freezer(css); mutex_lock(&freezer_mutex); if (freezer->state & CGROUP_FREEZING) atomic_dec(&system_freezing_cnt); freezer->state = 0; mutex_unlock(&freezer_mutex); } static void freezer_css_free(struct cgroup_subsys_state *css) { kfree(css_freezer(css)); } /* * Tasks can be migrated into a different freezer anytime regardless of its * current state. freezer_attach() is responsible for making new tasks * conform to the current state. * * Freezer state changes and task migration are synchronized via * @freezer->lock. freezer_attach() makes the new tasks conform to the * current state and all following state changes can see the new tasks. */ static void freezer_attach(struct cgroup_subsys_state *new_css, struct cgroup_taskset *tset) { struct freezer *freezer = css_freezer(new_css); struct task_struct *task; bool clear_frozen = false; mutex_lock(&freezer_mutex); /* * Make the new tasks conform to the current state of @new_css. * For simplicity, when migrating any task to a FROZEN cgroup, we * revert it to FREEZING and let update_if_frozen() determine the * correct state later. * * Tasks in @tset are on @new_css but may not conform to its * current state before executing the following - !frozen tasks may * be visible in a FROZEN cgroup and frozen tasks in a THAWED one. */ cgroup_taskset_for_each(task, tset) { if (!(freezer->state & CGROUP_FREEZING)) { __thaw_task(task); } else { freeze_task(task); freezer->state &= ~CGROUP_FROZEN; clear_frozen = true; } } /* propagate FROZEN clearing upwards */ while (clear_frozen && (freezer = parent_freezer(freezer))) { freezer->state &= ~CGROUP_FROZEN; clear_frozen = freezer->state & CGROUP_FREEZING; } mutex_unlock(&freezer_mutex); } /** * freezer_fork - cgroup post fork callback * @task: a task which has just been forked * * @task has just been created and should conform to the current state of * the cgroup_freezer it belongs to. This function may race against * freezer_attach(). Losing to freezer_attach() means that we don't have * to do anything as freezer_attach() will put @task into the appropriate * state. */ static void freezer_fork(struct task_struct *task) { struct freezer *freezer; /* * The root cgroup is non-freezable, so we can skip locking the * freezer. This is safe regardless of race with task migration. * If we didn't race or won, skipping is obviously the right thing * to do. If we lost and root is the new cgroup, noop is still the * right thing to do. */ if (task_css_is_root(task, freezer_cgrp_id)) return; mutex_lock(&freezer_mutex); rcu_read_lock(); freezer = task_freezer(task); if (freezer->state & CGROUP_FREEZING) freeze_task(task); rcu_read_unlock(); mutex_unlock(&freezer_mutex); } /** * update_if_frozen - update whether a cgroup finished freezing * @css: css of interest * * Once FREEZING is initiated, transition to FROZEN is lazily updated by * calling this function. If the current state is FREEZING but not FROZEN, * this function checks whether all tasks of this cgroup and the descendant * cgroups finished freezing and, if so, sets FROZEN. * * The caller is responsible for grabbing RCU read lock and calling * update_if_frozen() on all descendants prior to invoking this function. * * Task states and freezer state might disagree while tasks are being * migrated into or out of @css, so we can't verify task states against * @freezer state here. See freezer_attach() for details. */ static void update_if_frozen(struct cgroup_subsys_state *css) { struct freezer *freezer = css_freezer(css); struct cgroup_subsys_state *pos; struct css_task_iter it; struct task_struct *task; lockdep_assert_held(&freezer_mutex); if (!(freezer->state & CGROUP_FREEZING) || (freezer->state & CGROUP_FROZEN)) return; /* are all (live) children frozen? */ rcu_read_lock(); css_for_each_child(pos, css) { struct freezer *child = css_freezer(pos); if ((child->state & CGROUP_FREEZER_ONLINE) && !(child->state & CGROUP_FROZEN)) { rcu_read_unlock(); return; } } rcu_read_unlock(); /* are all tasks frozen? */ css_task_iter_start(css, &it); while ((task = css_task_iter_next(&it))) { if (freezing(task)) { /* * freezer_should_skip() indicates that the task * should be skipped when determining freezing * completion. Consider it frozen in addition to * the usual frozen condition. */ if (!frozen(task) && !freezer_should_skip(task)) goto out_iter_end; } } freezer->state |= CGROUP_FROZEN; out_iter_end: css_task_iter_end(&it); } static int freezer_read(struct seq_file *m, void *v) { struct cgroup_subsys_state *css = seq_css(m), *pos; mutex_lock(&freezer_mutex); rcu_read_lock(); /* update states bottom-up */ css_for_each_descendant_post(pos, css) { if (!css_tryget_online(pos)) continue; rcu_read_unlock(); update_if_frozen(pos); rcu_read_lock(); css_put(pos); } rcu_read_unlock(); mutex_unlock(&freezer_mutex); seq_puts(m, freezer_state_strs(css_freezer(css)->state)); seq_putc(m, '\n'); return 0; } static void freeze_cgroup(struct freezer *freezer) { struct css_task_iter it; struct task_struct *task; css_task_iter_start(&freezer->css, &it); while ((task = css_task_iter_next(&it))) freeze_task(task); css_task_iter_end(&it); } static void unfreeze_cgroup(struct freezer *freezer) { struct css_task_iter it; struct task_struct *task; css_task_iter_start(&freezer->css, &it); while ((task = css_task_iter_next(&it))) __thaw_task(task); css_task_iter_end(&it); } /** * freezer_apply_state - apply state change to a single cgroup_freezer * @freezer: freezer to apply state change to * @freeze: whether to freeze or unfreeze * @state: CGROUP_FREEZING_* flag to set or clear * * Set or clear @state on @cgroup according to @freeze, and perform * freezing or thawing as necessary. */ static void freezer_apply_state(struct freezer *freezer, bool freeze, unsigned int state) { /* also synchronizes against task migration, see freezer_attach() */ lockdep_assert_held(&freezer_mutex); if (!(freezer->state & CGROUP_FREEZER_ONLINE)) return; if (freeze) { if (!(freezer->state & CGROUP_FREEZING)) atomic_inc(&system_freezing_cnt); freezer->state |= state; freeze_cgroup(freezer); } else { bool was_freezing = freezer->state & CGROUP_FREEZING; freezer->state &= ~state; if (!(freezer->state & CGROUP_FREEZING)) { if (was_freezing) atomic_dec(&system_freezing_cnt); freezer->state &= ~CGROUP_FROZEN; unfreeze_cgroup(freezer); } } } /** * freezer_change_state - change the freezing state of a cgroup_freezer * @freezer: freezer of interest * @freeze: whether to freeze or thaw * * Freeze or thaw @freezer according to @freeze. The operations are * recursive - all descendants of @freezer will be affected. */ static void freezer_change_state(struct freezer *freezer, bool freeze) { struct cgroup_subsys_state *pos; /* * Update all its descendants in pre-order traversal. Each * descendant will try to inherit its parent's FREEZING state as * CGROUP_FREEZING_PARENT. */ mutex_lock(&freezer_mutex); rcu_read_lock(); css_for_each_descendant_pre(pos, &freezer->css) { struct freezer *pos_f = css_freezer(pos); struct freezer *parent = parent_freezer(pos_f); if (!css_tryget_online(pos)) continue; rcu_read_unlock(); if (pos_f == freezer) freezer_apply_state(pos_f, freeze, CGROUP_FREEZING_SELF); else freezer_apply_state(pos_f, parent->state & CGROUP_FREEZING, CGROUP_FREEZING_PARENT); rcu_read_lock(); css_put(pos); } rcu_read_unlock(); mutex_unlock(&freezer_mutex); } static ssize_t freezer_write(struct kernfs_open_file *of, char *buf, size_t nbytes, loff_t off) { bool freeze; buf = strstrip(buf); if (strcmp(buf, freezer_state_strs(0)) == 0) freeze = false; else if (strcmp(buf, freezer_state_strs(CGROUP_FROZEN)) == 0) freeze = true; else return -EINVAL; freezer_change_state(css_freezer(of_css(of)), freeze); return nbytes; } static u64 freezer_self_freezing_read(struct cgroup_subsys_state *css, struct cftype *cft) { struct freezer *freezer = css_freezer(css); return (bool)(freezer->state & CGROUP_FREEZING_SELF); } static u64 freezer_parent_freezing_read(struct cgroup_subsys_state *css, struct cftype *cft) { struct freezer *freezer = css_freezer(css); return (bool)(freezer->state & CGROUP_FREEZING_PARENT); } static struct cftype files[] = { { .name = "state", .flags = CFTYPE_NOT_ON_ROOT, .seq_show = freezer_read, .write = freezer_write, }, { .name = "self_freezing", .flags = CFTYPE_NOT_ON_ROOT, .read_u64 = freezer_self_freezing_read, }, { .name = "parent_freezing", .flags = CFTYPE_NOT_ON_ROOT, .read_u64 = freezer_parent_freezing_read, }, { } /* terminate */ }; struct cgroup_subsys freezer_cgrp_subsys = { .css_alloc = freezer_css_alloc, .css_online = freezer_css_online, .css_offline = freezer_css_offline, .css_free = freezer_css_free, .attach = freezer_attach, .fork = freezer_fork, .legacy_cftypes = files, };
gpl-2.0
getitnowmarketing/Incredible-2.6.35-gb-mr
arch/arm/plat-spear/clock.c
1010
10629
/* * arch/arm/plat-spear/clock.c * * Clock framework for SPEAr platform * * Copyright (C) 2009 ST Microelectronics * Viresh Kumar<viresh.kumar@st.com> * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ #include <linux/bug.h> #include <linux/err.h> #include <linux/io.h> #include <linux/list.h> #include <linux/module.h> #include <linux/spinlock.h> #include <mach/misc_regs.h> #include <plat/clock.h> static DEFINE_SPINLOCK(clocks_lock); static LIST_HEAD(root_clks); static void propagate_rate(struct list_head *); static int generic_clk_enable(struct clk *clk) { unsigned int val; if (!clk->en_reg) return -EFAULT; val = readl(clk->en_reg); if (unlikely(clk->flags & RESET_TO_ENABLE)) val &= ~(1 << clk->en_reg_bit); else val |= 1 << clk->en_reg_bit; writel(val, clk->en_reg); return 0; } static void generic_clk_disable(struct clk *clk) { unsigned int val; if (!clk->en_reg) return; val = readl(clk->en_reg); if (unlikely(clk->flags & RESET_TO_ENABLE)) val |= 1 << clk->en_reg_bit; else val &= ~(1 << clk->en_reg_bit); writel(val, clk->en_reg); } /* generic clk ops */ static struct clkops generic_clkops = { .enable = generic_clk_enable, .disable = generic_clk_disable, }; /* * clk_enable - inform the system when the clock source should be running. * @clk: clock source * * If the clock can not be enabled/disabled, this should return success. * * Returns success (0) or negative errno. */ int clk_enable(struct clk *clk) { unsigned long flags; int ret = 0; if (!clk || IS_ERR(clk)) return -EFAULT; spin_lock_irqsave(&clocks_lock, flags); if (clk->usage_count == 0) { if (clk->ops && clk->ops->enable) ret = clk->ops->enable(clk); } clk->usage_count++; spin_unlock_irqrestore(&clocks_lock, flags); return ret; } EXPORT_SYMBOL(clk_enable); /* * clk_disable - inform the system when the clock source is no longer required. * @clk: clock source * * Inform the system that a clock source is no longer required by * a driver and may be shut down. * * Implementation detail: if the clock source is shared between * multiple drivers, clk_enable() calls must be balanced by the * same number of clk_disable() calls for the clock source to be * disabled. */ void clk_disable(struct clk *clk) { unsigned long flags; if (!clk || IS_ERR(clk)) return; WARN_ON(clk->usage_count == 0); spin_lock_irqsave(&clocks_lock, flags); clk->usage_count--; if (clk->usage_count == 0) { if (clk->ops && clk->ops->disable) clk->ops->disable(clk); } spin_unlock_irqrestore(&clocks_lock, flags); } EXPORT_SYMBOL(clk_disable); /** * clk_get_rate - obtain the current clock rate (in Hz) for a clock source. * This is only valid once the clock source has been enabled. * @clk: clock source */ unsigned long clk_get_rate(struct clk *clk) { unsigned long flags, rate; spin_lock_irqsave(&clocks_lock, flags); rate = clk->rate; spin_unlock_irqrestore(&clocks_lock, flags); return rate; } EXPORT_SYMBOL(clk_get_rate); /** * clk_set_parent - set the parent clock source for this clock * @clk: clock source * @parent: parent clock source * * Returns success (0) or negative errno. */ int clk_set_parent(struct clk *clk, struct clk *parent) { int i, found = 0, val = 0; unsigned long flags; if (!clk || IS_ERR(clk) || !parent || IS_ERR(parent)) return -EFAULT; if (clk->usage_count) return -EBUSY; if (!clk->pclk_sel) return -EPERM; if (clk->pclk == parent) return 0; for (i = 0; i < clk->pclk_sel->pclk_count; i++) { if (clk->pclk_sel->pclk_info[i].pclk == parent) { found = 1; break; } } if (!found) return -EINVAL; spin_lock_irqsave(&clocks_lock, flags); /* reflect parent change in hardware */ val = readl(clk->pclk_sel->pclk_sel_reg); val &= ~(clk->pclk_sel->pclk_sel_mask << clk->pclk_sel_shift); val |= clk->pclk_sel->pclk_info[i].pclk_mask << clk->pclk_sel_shift; writel(val, clk->pclk_sel->pclk_sel_reg); spin_unlock_irqrestore(&clocks_lock, flags); /* reflect parent change in software */ clk->recalc(clk); propagate_rate(&clk->children); return 0; } EXPORT_SYMBOL(clk_set_parent); /* registers clock in platform clock framework */ void clk_register(struct clk_lookup *cl) { struct clk *clk = cl->clk; unsigned long flags; if (!clk || IS_ERR(clk)) return; spin_lock_irqsave(&clocks_lock, flags); INIT_LIST_HEAD(&clk->children); if (clk->flags & ALWAYS_ENABLED) clk->ops = NULL; else if (!clk->ops) clk->ops = &generic_clkops; /* root clock don't have any parents */ if (!clk->pclk && !clk->pclk_sel) { list_add(&clk->sibling, &root_clks); /* add clocks with only one parent to parent's children list */ } else if (clk->pclk && !clk->pclk_sel) { list_add(&clk->sibling, &clk->pclk->children); } else { /* add clocks with > 1 parent to 1st parent's children list */ list_add(&clk->sibling, &clk->pclk_sel->pclk_info[0].pclk->children); } spin_unlock_irqrestore(&clocks_lock, flags); /* add clock to arm clockdev framework */ clkdev_add(cl); } /** * propagate_rate - recalculate and propagate all clocks in list head * * Recalculates all root clocks in list head, which if the clock's .recalc is * set correctly, should also propagate their rates. */ static void propagate_rate(struct list_head *lhead) { struct clk *clkp, *_temp; list_for_each_entry_safe(clkp, _temp, lhead, sibling) { if (clkp->recalc) clkp->recalc(clkp); propagate_rate(&clkp->children); } } /* returns current programmed clocks clock info structure */ static struct pclk_info *pclk_info_get(struct clk *clk) { unsigned int mask, i; unsigned long flags; struct pclk_info *info = NULL; spin_lock_irqsave(&clocks_lock, flags); mask = (readl(clk->pclk_sel->pclk_sel_reg) >> clk->pclk_sel_shift) & clk->pclk_sel->pclk_sel_mask; for (i = 0; i < clk->pclk_sel->pclk_count; i++) { if (clk->pclk_sel->pclk_info[i].pclk_mask == mask) info = &clk->pclk_sel->pclk_info[i]; } spin_unlock_irqrestore(&clocks_lock, flags); return info; } /* * Set pclk as cclk's parent and add clock sibling node to current parents * children list */ static void change_parent(struct clk *cclk, struct clk *pclk) { unsigned long flags; spin_lock_irqsave(&clocks_lock, flags); list_del(&cclk->sibling); list_add(&cclk->sibling, &pclk->children); cclk->pclk = pclk; spin_unlock_irqrestore(&clocks_lock, flags); } /* * calculates current programmed rate of pll1 * * In normal mode * rate = (2 * M[15:8] * Fin)/(N * 2^P) * * In Dithered mode * rate = (2 * M[15:0] * Fin)/(256 * N * 2^P) */ void pll1_clk_recalc(struct clk *clk) { struct pll_clk_config *config = clk->private_data; unsigned int num = 2, den = 0, val, mode = 0; unsigned long flags; spin_lock_irqsave(&clocks_lock, flags); mode = (readl(config->mode_reg) >> PLL_MODE_SHIFT) & PLL_MODE_MASK; val = readl(config->cfg_reg); /* calculate denominator */ den = (val >> PLL_DIV_P_SHIFT) & PLL_DIV_P_MASK; den = 1 << den; den *= (val >> PLL_DIV_N_SHIFT) & PLL_DIV_N_MASK; /* calculate numerator & denominator */ if (!mode) { /* Normal mode */ num *= (val >> PLL_NORM_FDBK_M_SHIFT) & PLL_NORM_FDBK_M_MASK; } else { /* Dithered mode */ num *= (val >> PLL_DITH_FDBK_M_SHIFT) & PLL_DITH_FDBK_M_MASK; den *= 256; } clk->rate = (((clk->pclk->rate/10000) * num) / den) * 10000; spin_unlock_irqrestore(&clocks_lock, flags); } /* calculates current programmed rate of ahb or apb bus */ void bus_clk_recalc(struct clk *clk) { struct bus_clk_config *config = clk->private_data; unsigned int div; unsigned long flags; spin_lock_irqsave(&clocks_lock, flags); div = ((readl(config->reg) >> config->shift) & config->mask) + 1; clk->rate = (unsigned long)clk->pclk->rate / div; spin_unlock_irqrestore(&clocks_lock, flags); } /* * calculates current programmed rate of auxiliary synthesizers * used by: UART, FIRDA * * Fout from synthesizer can be given from two equations: * Fout1 = (Fin * X/Y)/2 * Fout2 = Fin * X/Y * * Selection of eqn 1 or 2 is programmed in register */ void aux_clk_recalc(struct clk *clk) { struct aux_clk_config *config = clk->private_data; struct pclk_info *pclk_info = NULL; unsigned int num = 1, den = 1, val, eqn; unsigned long flags; /* get current programmed parent */ pclk_info = pclk_info_get(clk); if (!pclk_info) { spin_lock_irqsave(&clocks_lock, flags); clk->pclk = NULL; clk->rate = 0; spin_unlock_irqrestore(&clocks_lock, flags); return; } change_parent(clk, pclk_info->pclk); spin_lock_irqsave(&clocks_lock, flags); if (pclk_info->scalable) { val = readl(config->synth_reg); eqn = (val >> AUX_EQ_SEL_SHIFT) & AUX_EQ_SEL_MASK; if (eqn == AUX_EQ1_SEL) den *= 2; /* calculate numerator */ num = (val >> AUX_XSCALE_SHIFT) & AUX_XSCALE_MASK; /* calculate denominator */ den *= (val >> AUX_YSCALE_SHIFT) & AUX_YSCALE_MASK; val = (((clk->pclk->rate/10000) * num) / den) * 10000; } else val = clk->pclk->rate; clk->rate = val; spin_unlock_irqrestore(&clocks_lock, flags); } /* * calculates current programmed rate of gpt synthesizers * Fout from synthesizer can be given from below equations: * Fout= Fin/((2 ^ (N+1)) * (M+1)) */ void gpt_clk_recalc(struct clk *clk) { struct aux_clk_config *config = clk->private_data; struct pclk_info *pclk_info = NULL; unsigned int div = 1, val; unsigned long flags; pclk_info = pclk_info_get(clk); if (!pclk_info) { spin_lock_irqsave(&clocks_lock, flags); clk->pclk = NULL; clk->rate = 0; spin_unlock_irqrestore(&clocks_lock, flags); return; } change_parent(clk, pclk_info->pclk); spin_lock_irqsave(&clocks_lock, flags); if (pclk_info->scalable) { val = readl(config->synth_reg); div += (val >> GPT_MSCALE_SHIFT) & GPT_MSCALE_MASK; div *= 1 << (((val >> GPT_NSCALE_SHIFT) & GPT_NSCALE_MASK) + 1); } clk->rate = (unsigned long)clk->pclk->rate / div; spin_unlock_irqrestore(&clocks_lock, flags); } /* * Used for clocks that always have same value as the parent clock divided by a * fixed divisor */ void follow_parent(struct clk *clk) { unsigned long flags; spin_lock_irqsave(&clocks_lock, flags); clk->rate = clk->pclk->rate; spin_unlock_irqrestore(&clocks_lock, flags); } /** * recalc_root_clocks - recalculate and propagate all root clocks * * Recalculates all root clocks (clocks with no parent), which if the * clock's .recalc is set correctly, should also propagate their rates. */ void recalc_root_clocks(void) { propagate_rate(&root_clks); }
gpl-2.0
wurikiji/ttFS
ulinux/linux-3.10.61/drivers/i2c/busses/i2c-piix4.c
2034
18676
/* Copyright (c) 1998 - 2002 Frodo Looijaard <frodol@dds.nl> and Philip Edelbrock <phil@netroedge.com> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* Supports: Intel PIIX4, 440MX Serverworks OSB4, CSB5, CSB6, HT-1000, HT-1100 ATI IXP200, IXP300, IXP400, SB600, SB700/SP5100, SB800 AMD Hudson-2, CZ SMSC Victory66 Note: we assume there can only be one device, with one or more SMBus interfaces. */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/pci.h> #include <linux/kernel.h> #include <linux/delay.h> #include <linux/stddef.h> #include <linux/ioport.h> #include <linux/i2c.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/dmi.h> #include <linux/acpi.h> #include <linux/io.h> /* PIIX4 SMBus address offsets */ #define SMBHSTSTS (0 + piix4_smba) #define SMBHSLVSTS (1 + piix4_smba) #define SMBHSTCNT (2 + piix4_smba) #define SMBHSTCMD (3 + piix4_smba) #define SMBHSTADD (4 + piix4_smba) #define SMBHSTDAT0 (5 + piix4_smba) #define SMBHSTDAT1 (6 + piix4_smba) #define SMBBLKDAT (7 + piix4_smba) #define SMBSLVCNT (8 + piix4_smba) #define SMBSHDWCMD (9 + piix4_smba) #define SMBSLVEVT (0xA + piix4_smba) #define SMBSLVDAT (0xC + piix4_smba) /* count for request_region */ #define SMBIOSIZE 8 /* PCI Address Constants */ #define SMBBA 0x090 #define SMBHSTCFG 0x0D2 #define SMBSLVC 0x0D3 #define SMBSHDW1 0x0D4 #define SMBSHDW2 0x0D5 #define SMBREV 0x0D6 /* Other settings */ #define MAX_TIMEOUT 500 #define ENABLE_INT9 0 /* PIIX4 constants */ #define PIIX4_QUICK 0x00 #define PIIX4_BYTE 0x04 #define PIIX4_BYTE_DATA 0x08 #define PIIX4_WORD_DATA 0x0C #define PIIX4_BLOCK_DATA 0x14 /* insmod parameters */ /* If force is set to anything different from 0, we forcibly enable the PIIX4. DANGEROUS! */ static int force; module_param (force, int, 0); MODULE_PARM_DESC(force, "Forcibly enable the PIIX4. DANGEROUS!"); /* If force_addr is set to anything different from 0, we forcibly enable the PIIX4 at the given address. VERY DANGEROUS! */ static int force_addr; module_param (force_addr, int, 0); MODULE_PARM_DESC(force_addr, "Forcibly enable the PIIX4 at the given address. " "EXTREMELY DANGEROUS!"); static int srvrworks_csb5_delay; static struct pci_driver piix4_driver; static const struct dmi_system_id piix4_dmi_blacklist[] = { { .ident = "Sapphire AM2RD790", .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "SAPPHIRE Inc."), DMI_MATCH(DMI_BOARD_NAME, "PC-AM2RD790"), }, }, { .ident = "DFI Lanparty UT 790FX", .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "DFI Inc."), DMI_MATCH(DMI_BOARD_NAME, "LP UT 790FX"), }, }, { } }; /* The IBM entry is in a separate table because we only check it on Intel-based systems */ static const struct dmi_system_id piix4_dmi_ibm[] = { { .ident = "IBM", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "IBM"), }, }, { }, }; struct i2c_piix4_adapdata { unsigned short smba; }; static int piix4_setup(struct pci_dev *PIIX4_dev, const struct pci_device_id *id) { unsigned char temp; unsigned short piix4_smba; if ((PIIX4_dev->vendor == PCI_VENDOR_ID_SERVERWORKS) && (PIIX4_dev->device == PCI_DEVICE_ID_SERVERWORKS_CSB5)) srvrworks_csb5_delay = 1; /* On some motherboards, it was reported that accessing the SMBus caused severe hardware problems */ if (dmi_check_system(piix4_dmi_blacklist)) { dev_err(&PIIX4_dev->dev, "Accessing the SMBus on this system is unsafe!\n"); return -EPERM; } /* Don't access SMBus on IBM systems which get corrupted eeproms */ if (dmi_check_system(piix4_dmi_ibm) && PIIX4_dev->vendor == PCI_VENDOR_ID_INTEL) { dev_err(&PIIX4_dev->dev, "IBM system detected; this module " "may corrupt your serial eeprom! Refusing to load " "module!\n"); return -EPERM; } /* Determine the address of the SMBus areas */ if (force_addr) { piix4_smba = force_addr & 0xfff0; force = 0; } else { pci_read_config_word(PIIX4_dev, SMBBA, &piix4_smba); piix4_smba &= 0xfff0; if(piix4_smba == 0) { dev_err(&PIIX4_dev->dev, "SMBus base address " "uninitialized - upgrade BIOS or use " "force_addr=0xaddr\n"); return -ENODEV; } } if (acpi_check_region(piix4_smba, SMBIOSIZE, piix4_driver.name)) return -ENODEV; if (!request_region(piix4_smba, SMBIOSIZE, piix4_driver.name)) { dev_err(&PIIX4_dev->dev, "SMBus region 0x%x already in use!\n", piix4_smba); return -EBUSY; } pci_read_config_byte(PIIX4_dev, SMBHSTCFG, &temp); /* If force_addr is set, we program the new address here. Just to make sure, we disable the PIIX4 first. */ if (force_addr) { pci_write_config_byte(PIIX4_dev, SMBHSTCFG, temp & 0xfe); pci_write_config_word(PIIX4_dev, SMBBA, piix4_smba); pci_write_config_byte(PIIX4_dev, SMBHSTCFG, temp | 0x01); dev_info(&PIIX4_dev->dev, "WARNING: SMBus interface set to " "new address %04x!\n", piix4_smba); } else if ((temp & 1) == 0) { if (force) { /* This should never need to be done, but has been * noted that many Dell machines have the SMBus * interface on the PIIX4 disabled!? NOTE: This assumes * I/O space and other allocations WERE done by the * Bios! Don't complain if your hardware does weird * things after enabling this. :') Check for Bios * updates before resorting to this. */ pci_write_config_byte(PIIX4_dev, SMBHSTCFG, temp | 1); dev_notice(&PIIX4_dev->dev, "WARNING: SMBus interface has been FORCEFULLY ENABLED!\n"); } else { dev_err(&PIIX4_dev->dev, "Host SMBus controller not enabled!\n"); release_region(piix4_smba, SMBIOSIZE); return -ENODEV; } } if (((temp & 0x0E) == 8) || ((temp & 0x0E) == 2)) dev_dbg(&PIIX4_dev->dev, "Using Interrupt 9 for SMBus.\n"); else if ((temp & 0x0E) == 0) dev_dbg(&PIIX4_dev->dev, "Using Interrupt SMI# for SMBus.\n"); else dev_err(&PIIX4_dev->dev, "Illegal Interrupt configuration " "(or code out of date)!\n"); pci_read_config_byte(PIIX4_dev, SMBREV, &temp); dev_info(&PIIX4_dev->dev, "SMBus Host Controller at 0x%x, revision %d\n", piix4_smba, temp); return piix4_smba; } static int piix4_setup_sb800(struct pci_dev *PIIX4_dev, const struct pci_device_id *id) { unsigned short piix4_smba; unsigned short smba_idx = 0xcd6; u8 smba_en_lo, smba_en_hi, i2ccfg, i2ccfg_offset = 0x10, smb_en = 0x2c; /* SB800 and later SMBus does not support forcing address */ if (force || force_addr) { dev_err(&PIIX4_dev->dev, "SMBus does not support " "forcing address!\n"); return -EINVAL; } /* Determine the address of the SMBus areas */ if (!request_region(smba_idx, 2, "smba_idx")) { dev_err(&PIIX4_dev->dev, "SMBus base address index region " "0x%x already in use!\n", smba_idx); return -EBUSY; } outb_p(smb_en, smba_idx); smba_en_lo = inb_p(smba_idx + 1); outb_p(smb_en + 1, smba_idx); smba_en_hi = inb_p(smba_idx + 1); release_region(smba_idx, 2); if ((smba_en_lo & 1) == 0) { dev_err(&PIIX4_dev->dev, "Host SMBus controller not enabled!\n"); return -ENODEV; } piix4_smba = ((smba_en_hi << 8) | smba_en_lo) & 0xffe0; if (acpi_check_region(piix4_smba, SMBIOSIZE, piix4_driver.name)) return -ENODEV; if (!request_region(piix4_smba, SMBIOSIZE, piix4_driver.name)) { dev_err(&PIIX4_dev->dev, "SMBus region 0x%x already in use!\n", piix4_smba); return -EBUSY; } /* Request the SMBus I2C bus config region */ if (!request_region(piix4_smba + i2ccfg_offset, 1, "i2ccfg")) { dev_err(&PIIX4_dev->dev, "SMBus I2C bus config region " "0x%x already in use!\n", piix4_smba + i2ccfg_offset); release_region(piix4_smba, SMBIOSIZE); return -EBUSY; } i2ccfg = inb_p(piix4_smba + i2ccfg_offset); release_region(piix4_smba + i2ccfg_offset, 1); if (i2ccfg & 1) dev_dbg(&PIIX4_dev->dev, "Using IRQ for SMBus.\n"); else dev_dbg(&PIIX4_dev->dev, "Using SMI# for SMBus.\n"); dev_info(&PIIX4_dev->dev, "SMBus Host Controller at 0x%x, revision %d\n", piix4_smba, i2ccfg >> 4); return piix4_smba; } static int piix4_setup_aux(struct pci_dev *PIIX4_dev, const struct pci_device_id *id, unsigned short base_reg_addr) { /* Set up auxiliary SMBus controllers found on some * AMD chipsets e.g. SP5100 (SB700 derivative) */ unsigned short piix4_smba; /* Read address of auxiliary SMBus controller */ pci_read_config_word(PIIX4_dev, base_reg_addr, &piix4_smba); if ((piix4_smba & 1) == 0) { dev_dbg(&PIIX4_dev->dev, "Auxiliary SMBus controller not enabled\n"); return -ENODEV; } piix4_smba &= 0xfff0; if (piix4_smba == 0) { dev_dbg(&PIIX4_dev->dev, "Auxiliary SMBus base address uninitialized\n"); return -ENODEV; } if (acpi_check_region(piix4_smba, SMBIOSIZE, piix4_driver.name)) return -ENODEV; if (!request_region(piix4_smba, SMBIOSIZE, piix4_driver.name)) { dev_err(&PIIX4_dev->dev, "Auxiliary SMBus region 0x%x " "already in use!\n", piix4_smba); return -EBUSY; } dev_info(&PIIX4_dev->dev, "Auxiliary SMBus Host Controller at 0x%x\n", piix4_smba); return piix4_smba; } static int piix4_transaction(struct i2c_adapter *piix4_adapter) { struct i2c_piix4_adapdata *adapdata = i2c_get_adapdata(piix4_adapter); unsigned short piix4_smba = adapdata->smba; int temp; int result = 0; int timeout = 0; dev_dbg(&piix4_adapter->dev, "Transaction (pre): CNT=%02x, CMD=%02x, " "ADD=%02x, DAT0=%02x, DAT1=%02x\n", inb_p(SMBHSTCNT), inb_p(SMBHSTCMD), inb_p(SMBHSTADD), inb_p(SMBHSTDAT0), inb_p(SMBHSTDAT1)); /* Make sure the SMBus host is ready to start transmitting */ if ((temp = inb_p(SMBHSTSTS)) != 0x00) { dev_dbg(&piix4_adapter->dev, "SMBus busy (%02x). " "Resetting...\n", temp); outb_p(temp, SMBHSTSTS); if ((temp = inb_p(SMBHSTSTS)) != 0x00) { dev_err(&piix4_adapter->dev, "Failed! (%02x)\n", temp); return -EBUSY; } else { dev_dbg(&piix4_adapter->dev, "Successful!\n"); } } /* start the transaction by setting bit 6 */ outb_p(inb(SMBHSTCNT) | 0x040, SMBHSTCNT); /* We will always wait for a fraction of a second! (See PIIX4 docs errata) */ if (srvrworks_csb5_delay) /* Extra delay for SERVERWORKS_CSB5 */ msleep(2); else msleep(1); while ((++timeout < MAX_TIMEOUT) && ((temp = inb_p(SMBHSTSTS)) & 0x01)) msleep(1); /* If the SMBus is still busy, we give up */ if (timeout == MAX_TIMEOUT) { dev_err(&piix4_adapter->dev, "SMBus Timeout!\n"); result = -ETIMEDOUT; } if (temp & 0x10) { result = -EIO; dev_err(&piix4_adapter->dev, "Error: Failed bus transaction\n"); } if (temp & 0x08) { result = -EIO; dev_dbg(&piix4_adapter->dev, "Bus collision! SMBus may be " "locked until next hard reset. (sorry!)\n"); /* Clock stops and slave is stuck in mid-transmission */ } if (temp & 0x04) { result = -ENXIO; dev_dbg(&piix4_adapter->dev, "Error: no response!\n"); } if (inb_p(SMBHSTSTS) != 0x00) outb_p(inb(SMBHSTSTS), SMBHSTSTS); if ((temp = inb_p(SMBHSTSTS)) != 0x00) { dev_err(&piix4_adapter->dev, "Failed reset at end of " "transaction (%02x)\n", temp); } dev_dbg(&piix4_adapter->dev, "Transaction (post): CNT=%02x, CMD=%02x, " "ADD=%02x, DAT0=%02x, DAT1=%02x\n", inb_p(SMBHSTCNT), inb_p(SMBHSTCMD), inb_p(SMBHSTADD), inb_p(SMBHSTDAT0), inb_p(SMBHSTDAT1)); return result; } /* Return negative errno on error. */ static s32 piix4_access(struct i2c_adapter * adap, u16 addr, unsigned short flags, char read_write, u8 command, int size, union i2c_smbus_data * data) { struct i2c_piix4_adapdata *adapdata = i2c_get_adapdata(adap); unsigned short piix4_smba = adapdata->smba; int i, len; int status; switch (size) { case I2C_SMBUS_QUICK: outb_p((addr << 1) | read_write, SMBHSTADD); size = PIIX4_QUICK; break; case I2C_SMBUS_BYTE: outb_p((addr << 1) | read_write, SMBHSTADD); if (read_write == I2C_SMBUS_WRITE) outb_p(command, SMBHSTCMD); size = PIIX4_BYTE; break; case I2C_SMBUS_BYTE_DATA: outb_p((addr << 1) | read_write, SMBHSTADD); outb_p(command, SMBHSTCMD); if (read_write == I2C_SMBUS_WRITE) outb_p(data->byte, SMBHSTDAT0); size = PIIX4_BYTE_DATA; break; case I2C_SMBUS_WORD_DATA: outb_p((addr << 1) | read_write, SMBHSTADD); outb_p(command, SMBHSTCMD); if (read_write == I2C_SMBUS_WRITE) { outb_p(data->word & 0xff, SMBHSTDAT0); outb_p((data->word & 0xff00) >> 8, SMBHSTDAT1); } size = PIIX4_WORD_DATA; break; case I2C_SMBUS_BLOCK_DATA: outb_p((addr << 1) | read_write, SMBHSTADD); outb_p(command, SMBHSTCMD); if (read_write == I2C_SMBUS_WRITE) { len = data->block[0]; if (len == 0 || len > I2C_SMBUS_BLOCK_MAX) return -EINVAL; outb_p(len, SMBHSTDAT0); i = inb_p(SMBHSTCNT); /* Reset SMBBLKDAT */ for (i = 1; i <= len; i++) outb_p(data->block[i], SMBBLKDAT); } size = PIIX4_BLOCK_DATA; break; default: dev_warn(&adap->dev, "Unsupported transaction %d\n", size); return -EOPNOTSUPP; } outb_p((size & 0x1C) + (ENABLE_INT9 & 1), SMBHSTCNT); status = piix4_transaction(adap); if (status) return status; if ((read_write == I2C_SMBUS_WRITE) || (size == PIIX4_QUICK)) return 0; switch (size) { case PIIX4_BYTE: case PIIX4_BYTE_DATA: data->byte = inb_p(SMBHSTDAT0); break; case PIIX4_WORD_DATA: data->word = inb_p(SMBHSTDAT0) + (inb_p(SMBHSTDAT1) << 8); break; case PIIX4_BLOCK_DATA: data->block[0] = inb_p(SMBHSTDAT0); if (data->block[0] == 0 || data->block[0] > I2C_SMBUS_BLOCK_MAX) return -EPROTO; i = inb_p(SMBHSTCNT); /* Reset SMBBLKDAT */ for (i = 1; i <= data->block[0]; i++) data->block[i] = inb_p(SMBBLKDAT); break; } return 0; } static u32 piix4_func(struct i2c_adapter *adapter) { return I2C_FUNC_SMBUS_QUICK | I2C_FUNC_SMBUS_BYTE | I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA | I2C_FUNC_SMBUS_BLOCK_DATA; } static const struct i2c_algorithm smbus_algorithm = { .smbus_xfer = piix4_access, .functionality = piix4_func, }; static DEFINE_PCI_DEVICE_TABLE(piix4_ids) = { { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_3) }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443MX_3) }, { PCI_DEVICE(PCI_VENDOR_ID_EFAR, PCI_DEVICE_ID_EFAR_SLC90E66_3) }, { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP200_SMBUS) }, { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP300_SMBUS) }, { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP400_SMBUS) }, { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_HUDSON2_SMBUS) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x790b) }, { PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_OSB4) }, { PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB5) }, { PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB6) }, { PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT1000SB) }, { PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT1100LD) }, { 0, } }; MODULE_DEVICE_TABLE (pci, piix4_ids); static struct i2c_adapter *piix4_main_adapter; static struct i2c_adapter *piix4_aux_adapter; static int piix4_add_adapter(struct pci_dev *dev, unsigned short smba, struct i2c_adapter **padap) { struct i2c_adapter *adap; struct i2c_piix4_adapdata *adapdata; int retval; adap = kzalloc(sizeof(*adap), GFP_KERNEL); if (adap == NULL) { release_region(smba, SMBIOSIZE); return -ENOMEM; } adap->owner = THIS_MODULE; adap->class = I2C_CLASS_HWMON | I2C_CLASS_SPD; adap->algo = &smbus_algorithm; adapdata = kzalloc(sizeof(*adapdata), GFP_KERNEL); if (adapdata == NULL) { kfree(adap); release_region(smba, SMBIOSIZE); return -ENOMEM; } adapdata->smba = smba; /* set up the sysfs linkage to our parent device */ adap->dev.parent = &dev->dev; snprintf(adap->name, sizeof(adap->name), "SMBus PIIX4 adapter at %04x", smba); i2c_set_adapdata(adap, adapdata); retval = i2c_add_adapter(adap); if (retval) { dev_err(&dev->dev, "Couldn't register adapter!\n"); kfree(adapdata); kfree(adap); release_region(smba, SMBIOSIZE); return retval; } *padap = adap; return 0; } static int piix4_probe(struct pci_dev *dev, const struct pci_device_id *id) { int retval; if ((dev->vendor == PCI_VENDOR_ID_ATI && dev->device == PCI_DEVICE_ID_ATI_SBX00_SMBUS && dev->revision >= 0x40) || dev->vendor == PCI_VENDOR_ID_AMD) /* base address location etc changed in SB800 */ retval = piix4_setup_sb800(dev, id); else retval = piix4_setup(dev, id); /* If no main SMBus found, give up */ if (retval < 0) return retval; /* Try to register main SMBus adapter, give up if we can't */ retval = piix4_add_adapter(dev, retval, &piix4_main_adapter); if (retval < 0) return retval; /* Check for auxiliary SMBus on some AMD chipsets */ if (dev->vendor == PCI_VENDOR_ID_ATI && dev->device == PCI_DEVICE_ID_ATI_SBX00_SMBUS && dev->revision < 0x40) { retval = piix4_setup_aux(dev, id, 0x58); if (retval > 0) { /* Try to add the aux adapter if it exists, * piix4_add_adapter will clean up if this fails */ piix4_add_adapter(dev, retval, &piix4_aux_adapter); } } return 0; } static void piix4_adap_remove(struct i2c_adapter *adap) { struct i2c_piix4_adapdata *adapdata = i2c_get_adapdata(adap); if (adapdata->smba) { i2c_del_adapter(adap); release_region(adapdata->smba, SMBIOSIZE); kfree(adapdata); kfree(adap); } } static void piix4_remove(struct pci_dev *dev) { if (piix4_main_adapter) { piix4_adap_remove(piix4_main_adapter); piix4_main_adapter = NULL; } if (piix4_aux_adapter) { piix4_adap_remove(piix4_aux_adapter); piix4_aux_adapter = NULL; } } static struct pci_driver piix4_driver = { .name = "piix4_smbus", .id_table = piix4_ids, .probe = piix4_probe, .remove = piix4_remove, }; module_pci_driver(piix4_driver); MODULE_AUTHOR("Frodo Looijaard <frodol@dds.nl> and " "Philip Edelbrock <phil@netroedge.com>"); MODULE_DESCRIPTION("PIIX4 SMBus driver"); MODULE_LICENSE("GPL");
gpl-2.0
xdajog/samsung_sources_i927
fs/sysv/itree.c
3058
11647
/* * linux/fs/sysv/itree.c * * Handling of indirect blocks' trees. * AV, Sep--Dec 2000 */ #include <linux/buffer_head.h> #include <linux/mount.h> #include <linux/string.h> #include "sysv.h" enum {DIRECT = 10, DEPTH = 4}; /* Have triple indirect */ static inline void dirty_indirect(struct buffer_head *bh, struct inode *inode) { mark_buffer_dirty_inode(bh, inode); if (IS_SYNC(inode)) sync_dirty_buffer(bh); } static int block_to_path(struct inode *inode, long block, int offsets[DEPTH]) { struct super_block *sb = inode->i_sb; struct sysv_sb_info *sbi = SYSV_SB(sb); int ptrs_bits = sbi->s_ind_per_block_bits; unsigned long indirect_blocks = sbi->s_ind_per_block, double_blocks = sbi->s_ind_per_block_2; int n = 0; if (block < 0) { printk("sysv_block_map: block < 0\n"); } else if (block < DIRECT) { offsets[n++] = block; } else if ( (block -= DIRECT) < indirect_blocks) { offsets[n++] = DIRECT; offsets[n++] = block; } else if ((block -= indirect_blocks) < double_blocks) { offsets[n++] = DIRECT+1; offsets[n++] = block >> ptrs_bits; offsets[n++] = block & (indirect_blocks - 1); } else if (((block -= double_blocks) >> (ptrs_bits * 2)) < indirect_blocks) { offsets[n++] = DIRECT+2; offsets[n++] = block >> (ptrs_bits * 2); offsets[n++] = (block >> ptrs_bits) & (indirect_blocks - 1); offsets[n++] = block & (indirect_blocks - 1); } else { /* nothing */; } return n; } static inline int block_to_cpu(struct sysv_sb_info *sbi, sysv_zone_t nr) { return sbi->s_block_base + fs32_to_cpu(sbi, nr); } typedef struct { sysv_zone_t *p; sysv_zone_t key; struct buffer_head *bh; } Indirect; static DEFINE_RWLOCK(pointers_lock); static inline void add_chain(Indirect *p, struct buffer_head *bh, sysv_zone_t *v) { p->key = *(p->p = v); p->bh = bh; } static inline int verify_chain(Indirect *from, Indirect *to) { while (from <= to && from->key == *from->p) from++; return (from > to); } static inline sysv_zone_t *block_end(struct buffer_head *bh) { return (sysv_zone_t*)((char*)bh->b_data + bh->b_size); } /* * Requires read_lock(&pointers_lock) or write_lock(&pointers_lock) */ static Indirect *get_branch(struct inode *inode, int depth, int offsets[], Indirect chain[], int *err) { struct super_block *sb = inode->i_sb; Indirect *p = chain; struct buffer_head *bh; *err = 0; add_chain(chain, NULL, SYSV_I(inode)->i_data + *offsets); if (!p->key) goto no_block; while (--depth) { int block = block_to_cpu(SYSV_SB(sb), p->key); bh = sb_bread(sb, block); if (!bh) goto failure; if (!verify_chain(chain, p)) goto changed; add_chain(++p, bh, (sysv_zone_t*)bh->b_data + *++offsets); if (!p->key) goto no_block; } return NULL; changed: brelse(bh); *err = -EAGAIN; goto no_block; failure: *err = -EIO; no_block: return p; } static int alloc_branch(struct inode *inode, int num, int *offsets, Indirect *branch) { int blocksize = inode->i_sb->s_blocksize; int n = 0; int i; branch[0].key = sysv_new_block(inode->i_sb); if (branch[0].key) for (n = 1; n < num; n++) { struct buffer_head *bh; int parent; /* Allocate the next block */ branch[n].key = sysv_new_block(inode->i_sb); if (!branch[n].key) break; /* * Get buffer_head for parent block, zero it out and set * the pointer to new one, then send parent to disk. */ parent = block_to_cpu(SYSV_SB(inode->i_sb), branch[n-1].key); bh = sb_getblk(inode->i_sb, parent); lock_buffer(bh); memset(bh->b_data, 0, blocksize); branch[n].bh = bh; branch[n].p = (sysv_zone_t*) bh->b_data + offsets[n]; *branch[n].p = branch[n].key; set_buffer_uptodate(bh); unlock_buffer(bh); dirty_indirect(bh, inode); } if (n == num) return 0; /* Allocation failed, free what we already allocated */ for (i = 1; i < n; i++) bforget(branch[i].bh); for (i = 0; i < n; i++) sysv_free_block(inode->i_sb, branch[i].key); return -ENOSPC; } static inline int splice_branch(struct inode *inode, Indirect chain[], Indirect *where, int num) { int i; /* Verify that place we are splicing to is still there and vacant */ write_lock(&pointers_lock); if (!verify_chain(chain, where-1) || *where->p) goto changed; *where->p = where->key; write_unlock(&pointers_lock); inode->i_ctime = CURRENT_TIME_SEC; /* had we spliced it onto indirect block? */ if (where->bh) dirty_indirect(where->bh, inode); if (IS_SYNC(inode)) sysv_sync_inode(inode); else mark_inode_dirty(inode); return 0; changed: write_unlock(&pointers_lock); for (i = 1; i < num; i++) bforget(where[i].bh); for (i = 0; i < num; i++) sysv_free_block(inode->i_sb, where[i].key); return -EAGAIN; } static int get_block(struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create) { int err = -EIO; int offsets[DEPTH]; Indirect chain[DEPTH]; struct super_block *sb = inode->i_sb; Indirect *partial; int left; int depth = block_to_path(inode, iblock, offsets); if (depth == 0) goto out; reread: read_lock(&pointers_lock); partial = get_branch(inode, depth, offsets, chain, &err); read_unlock(&pointers_lock); /* Simplest case - block found, no allocation needed */ if (!partial) { got_it: map_bh(bh_result, sb, block_to_cpu(SYSV_SB(sb), chain[depth-1].key)); /* Clean up and exit */ partial = chain+depth-1; /* the whole chain */ goto cleanup; } /* Next simple case - plain lookup or failed read of indirect block */ if (!create || err == -EIO) { cleanup: while (partial > chain) { brelse(partial->bh); partial--; } out: return err; } /* * Indirect block might be removed by truncate while we were * reading it. Handling of that case (forget what we've got and * reread) is taken out of the main path. */ if (err == -EAGAIN) goto changed; left = (chain + depth) - partial; err = alloc_branch(inode, left, offsets+(partial-chain), partial); if (err) goto cleanup; if (splice_branch(inode, chain, partial, left) < 0) goto changed; set_buffer_new(bh_result); goto got_it; changed: while (partial > chain) { brelse(partial->bh); partial--; } goto reread; } static inline int all_zeroes(sysv_zone_t *p, sysv_zone_t *q) { while (p < q) if (*p++) return 0; return 1; } static Indirect *find_shared(struct inode *inode, int depth, int offsets[], Indirect chain[], sysv_zone_t *top) { Indirect *partial, *p; int k, err; *top = 0; for (k = depth; k > 1 && !offsets[k-1]; k--) ; write_lock(&pointers_lock); partial = get_branch(inode, k, offsets, chain, &err); if (!partial) partial = chain + k-1; /* * If the branch acquired continuation since we've looked at it - * fine, it should all survive and (new) top doesn't belong to us. */ if (!partial->key && *partial->p) { write_unlock(&pointers_lock); goto no_top; } for (p=partial; p>chain && all_zeroes((sysv_zone_t*)p->bh->b_data,p->p); p--) ; /* * OK, we've found the last block that must survive. The rest of our * branch should be detached before unlocking. However, if that rest * of branch is all ours and does not grow immediately from the inode * it's easier to cheat and just decrement partial->p. */ if (p == chain + k - 1 && p > chain) { p->p--; } else { *top = *p->p; *p->p = 0; } write_unlock(&pointers_lock); while (partial > p) { brelse(partial->bh); partial--; } no_top: return partial; } static inline void free_data(struct inode *inode, sysv_zone_t *p, sysv_zone_t *q) { for ( ; p < q ; p++) { sysv_zone_t nr = *p; if (nr) { *p = 0; sysv_free_block(inode->i_sb, nr); mark_inode_dirty(inode); } } } static void free_branches(struct inode *inode, sysv_zone_t *p, sysv_zone_t *q, int depth) { struct buffer_head * bh; struct super_block *sb = inode->i_sb; if (depth--) { for ( ; p < q ; p++) { int block; sysv_zone_t nr = *p; if (!nr) continue; *p = 0; block = block_to_cpu(SYSV_SB(sb), nr); bh = sb_bread(sb, block); if (!bh) continue; free_branches(inode, (sysv_zone_t*)bh->b_data, block_end(bh), depth); bforget(bh); sysv_free_block(sb, nr); mark_inode_dirty(inode); } } else free_data(inode, p, q); } void sysv_truncate (struct inode * inode) { sysv_zone_t *i_data = SYSV_I(inode)->i_data; int offsets[DEPTH]; Indirect chain[DEPTH]; Indirect *partial; sysv_zone_t nr = 0; int n; long iblock; unsigned blocksize; if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))) return; blocksize = inode->i_sb->s_blocksize; iblock = (inode->i_size + blocksize-1) >> inode->i_sb->s_blocksize_bits; block_truncate_page(inode->i_mapping, inode->i_size, get_block); n = block_to_path(inode, iblock, offsets); if (n == 0) return; if (n == 1) { free_data(inode, i_data+offsets[0], i_data + DIRECT); goto do_indirects; } partial = find_shared(inode, n, offsets, chain, &nr); /* Kill the top of shared branch (already detached) */ if (nr) { if (partial == chain) mark_inode_dirty(inode); else dirty_indirect(partial->bh, inode); free_branches(inode, &nr, &nr+1, (chain+n-1) - partial); } /* Clear the ends of indirect blocks on the shared branch */ while (partial > chain) { free_branches(inode, partial->p + 1, block_end(partial->bh), (chain+n-1) - partial); dirty_indirect(partial->bh, inode); brelse (partial->bh); partial--; } do_indirects: /* Kill the remaining (whole) subtrees (== subtrees deeper than...) */ while (n < DEPTH) { nr = i_data[DIRECT + n - 1]; if (nr) { i_data[DIRECT + n - 1] = 0; mark_inode_dirty(inode); free_branches(inode, &nr, &nr+1, n); } n++; } inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC; if (IS_SYNC(inode)) sysv_sync_inode (inode); else mark_inode_dirty(inode); } static unsigned sysv_nblocks(struct super_block *s, loff_t size) { struct sysv_sb_info *sbi = SYSV_SB(s); int ptrs_bits = sbi->s_ind_per_block_bits; unsigned blocks, res, direct = DIRECT, i = DEPTH; blocks = (size + s->s_blocksize - 1) >> s->s_blocksize_bits; res = blocks; while (--i && blocks > direct) { blocks = ((blocks - direct - 1) >> ptrs_bits) + 1; res += blocks; direct = 1; } return blocks; } int sysv_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) { struct super_block *s = mnt->mnt_sb; generic_fillattr(dentry->d_inode, stat); stat->blocks = (s->s_blocksize / 512) * sysv_nblocks(s, stat->size); stat->blksize = s->s_blocksize; return 0; } static int sysv_writepage(struct page *page, struct writeback_control *wbc) { return block_write_full_page(page,get_block,wbc); } static int sysv_readpage(struct file *file, struct page *page) { return block_read_full_page(page,get_block); } int sysv_prepare_chunk(struct page *page, loff_t pos, unsigned len) { return __block_write_begin(page, pos, len, get_block); } static int sysv_write_begin(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata) { int ret; ret = block_write_begin(mapping, pos, len, flags, pagep, get_block); if (unlikely(ret)) { loff_t isize = mapping->host->i_size; if (pos + len > isize) vmtruncate(mapping->host, isize); } return ret; } static sector_t sysv_bmap(struct address_space *mapping, sector_t block) { return generic_block_bmap(mapping,block,get_block); } const struct address_space_operations sysv_aops = { .readpage = sysv_readpage, .writepage = sysv_writepage, .write_begin = sysv_write_begin, .write_end = generic_write_end, .bmap = sysv_bmap };
gpl-2.0
szezso/T.E.S.C.O-kernel_vivo
drivers/macintosh/rack-meter.c
3058
15820
/* * RackMac vu-meter driver * * (c) Copyright 2006 Benjamin Herrenschmidt, IBM Corp. * <benh@kernel.crashing.org> * * Released under the term of the GNU GPL v2. * * Support the CPU-meter LEDs of the Xserve G5 * * TODO: Implement PWM to do variable intensity and provide userland * interface for fun. Also, the CPU-meter could be made nicer by being * a bit less "immediate" but giving instead a more average load over * time. Patches welcome :-) * */ #undef DEBUG #include <linux/types.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/device.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/dma-mapping.h> #include <linux/kernel_stat.h> #include <asm/io.h> #include <asm/prom.h> #include <asm/machdep.h> #include <asm/pmac_feature.h> #include <asm/dbdma.h> #include <asm/macio.h> #include <asm/keylargo.h> /* Number of samples in a sample buffer */ #define SAMPLE_COUNT 256 /* CPU meter sampling rate in ms */ #define CPU_SAMPLING_RATE 250 struct rackmeter_dma { struct dbdma_cmd cmd[4] ____cacheline_aligned; u32 mark ____cacheline_aligned; u32 buf1[SAMPLE_COUNT] ____cacheline_aligned; u32 buf2[SAMPLE_COUNT] ____cacheline_aligned; } ____cacheline_aligned; struct rackmeter_cpu { struct delayed_work sniffer; struct rackmeter *rm; cputime64_t prev_wall; cputime64_t prev_idle; int zero; } ____cacheline_aligned; struct rackmeter { struct macio_dev *mdev; unsigned int irq; struct device_node *i2s; u8 *ubuf; struct dbdma_regs __iomem *dma_regs; void __iomem *i2s_regs; dma_addr_t dma_buf_p; struct rackmeter_dma *dma_buf_v; int stale_irq; struct rackmeter_cpu cpu[2]; int paused; struct mutex sem; }; /* To be set as a tunable */ static int rackmeter_ignore_nice; /* This GPIO is whacked by the OS X driver when initializing */ #define RACKMETER_MAGIC_GPIO 0x78 /* This is copied from cpufreq_ondemand, maybe we should put it in * a common header somewhere */ static inline cputime64_t get_cpu_idle_time(unsigned int cpu) { cputime64_t retval; retval = cputime64_add(kstat_cpu(cpu).cpustat.idle, kstat_cpu(cpu).cpustat.iowait); if (rackmeter_ignore_nice) retval = cputime64_add(retval, kstat_cpu(cpu).cpustat.nice); return retval; } static void rackmeter_setup_i2s(struct rackmeter *rm) { struct macio_chip *macio = rm->mdev->bus->chip; /* First whack magic GPIO */ pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, RACKMETER_MAGIC_GPIO, 5); /* Call feature code to enable the sound channel and the proper * clock sources */ pmac_call_feature(PMAC_FTR_SOUND_CHIP_ENABLE, rm->i2s, 0, 1); /* Power i2s and stop i2s clock. We whack MacIO FCRs directly for now. * This is a bit racy, thus we should add new platform functions to * handle that. snd-aoa needs that too */ MACIO_BIS(KEYLARGO_FCR1, KL1_I2S0_ENABLE); MACIO_BIC(KEYLARGO_FCR1, KL1_I2S0_CLK_ENABLE_BIT); (void)MACIO_IN32(KEYLARGO_FCR1); udelay(10); /* Then setup i2s. For now, we use the same magic value that * the OS X driver seems to use. We might want to play around * with the clock divisors later */ out_le32(rm->i2s_regs + 0x10, 0x01fa0000); (void)in_le32(rm->i2s_regs + 0x10); udelay(10); /* Fully restart i2s*/ MACIO_BIS(KEYLARGO_FCR1, KL1_I2S0_CELL_ENABLE | KL1_I2S0_CLK_ENABLE_BIT); (void)MACIO_IN32(KEYLARGO_FCR1); udelay(10); } static void rackmeter_set_default_pattern(struct rackmeter *rm) { int i; for (i = 0; i < 16; i++) { if (i < 8) rm->ubuf[i] = (i & 1) * 255; else rm->ubuf[i] = ((~i) & 1) * 255; } } static void rackmeter_do_pause(struct rackmeter *rm, int pause) { struct rackmeter_dma *rdma = rm->dma_buf_v; pr_debug("rackmeter: %s\n", pause ? "paused" : "started"); rm->paused = pause; if (pause) { DBDMA_DO_STOP(rm->dma_regs); return; } memset(rdma->buf1, 0, SAMPLE_COUNT & sizeof(u32)); memset(rdma->buf2, 0, SAMPLE_COUNT & sizeof(u32)); rm->dma_buf_v->mark = 0; mb(); out_le32(&rm->dma_regs->cmdptr_hi, 0); out_le32(&rm->dma_regs->cmdptr, rm->dma_buf_p); out_le32(&rm->dma_regs->control, (RUN << 16) | RUN); } static void rackmeter_setup_dbdma(struct rackmeter *rm) { struct rackmeter_dma *db = rm->dma_buf_v; struct dbdma_cmd *cmd = db->cmd; /* Make sure dbdma is reset */ DBDMA_DO_RESET(rm->dma_regs); pr_debug("rackmeter: mark offset=0x%zx\n", offsetof(struct rackmeter_dma, mark)); pr_debug("rackmeter: buf1 offset=0x%zx\n", offsetof(struct rackmeter_dma, buf1)); pr_debug("rackmeter: buf2 offset=0x%zx\n", offsetof(struct rackmeter_dma, buf2)); /* Prepare 4 dbdma commands for the 2 buffers */ memset(cmd, 0, 4 * sizeof(struct dbdma_cmd)); st_le16(&cmd->req_count, 4); st_le16(&cmd->command, STORE_WORD | INTR_ALWAYS | KEY_SYSTEM); st_le32(&cmd->phy_addr, rm->dma_buf_p + offsetof(struct rackmeter_dma, mark)); st_le32(&cmd->cmd_dep, 0x02000000); cmd++; st_le16(&cmd->req_count, SAMPLE_COUNT * 4); st_le16(&cmd->command, OUTPUT_MORE); st_le32(&cmd->phy_addr, rm->dma_buf_p + offsetof(struct rackmeter_dma, buf1)); cmd++; st_le16(&cmd->req_count, 4); st_le16(&cmd->command, STORE_WORD | INTR_ALWAYS | KEY_SYSTEM); st_le32(&cmd->phy_addr, rm->dma_buf_p + offsetof(struct rackmeter_dma, mark)); st_le32(&cmd->cmd_dep, 0x01000000); cmd++; st_le16(&cmd->req_count, SAMPLE_COUNT * 4); st_le16(&cmd->command, OUTPUT_MORE | BR_ALWAYS); st_le32(&cmd->phy_addr, rm->dma_buf_p + offsetof(struct rackmeter_dma, buf2)); st_le32(&cmd->cmd_dep, rm->dma_buf_p); rackmeter_do_pause(rm, 0); } static void rackmeter_do_timer(struct work_struct *work) { struct rackmeter_cpu *rcpu = container_of(work, struct rackmeter_cpu, sniffer.work); struct rackmeter *rm = rcpu->rm; unsigned int cpu = smp_processor_id(); cputime64_t cur_jiffies, total_idle_ticks; unsigned int total_ticks, idle_ticks; int i, offset, load, cumm, pause; cur_jiffies = jiffies64_to_cputime64(get_jiffies_64()); total_ticks = (unsigned int)cputime64_sub(cur_jiffies, rcpu->prev_wall); rcpu->prev_wall = cur_jiffies; total_idle_ticks = get_cpu_idle_time(cpu); idle_ticks = (unsigned int) cputime64_sub(total_idle_ticks, rcpu->prev_idle); rcpu->prev_idle = total_idle_ticks; /* We do a very dumb calculation to update the LEDs for now, * we'll do better once we have actual PWM implemented */ load = (9 * (total_ticks - idle_ticks)) / total_ticks; offset = cpu << 3; cumm = 0; for (i = 0; i < 8; i++) { u8 ub = (load > i) ? 0xff : 0; rm->ubuf[i + offset] = ub; cumm |= ub; } rcpu->zero = (cumm == 0); /* Now check if LEDs are all 0, we can stop DMA */ pause = (rm->cpu[0].zero && rm->cpu[1].zero); if (pause != rm->paused) { mutex_lock(&rm->sem); pause = (rm->cpu[0].zero && rm->cpu[1].zero); rackmeter_do_pause(rm, pause); mutex_unlock(&rm->sem); } schedule_delayed_work_on(cpu, &rcpu->sniffer, msecs_to_jiffies(CPU_SAMPLING_RATE)); } static void __devinit rackmeter_init_cpu_sniffer(struct rackmeter *rm) { unsigned int cpu; /* This driver works only with 1 or 2 CPUs numbered 0 and 1, * but that's really all we have on Apple Xserve. It doesn't * play very nice with CPU hotplug neither but we don't do that * on those machines yet */ rm->cpu[0].rm = rm; INIT_DELAYED_WORK(&rm->cpu[0].sniffer, rackmeter_do_timer); rm->cpu[1].rm = rm; INIT_DELAYED_WORK(&rm->cpu[1].sniffer, rackmeter_do_timer); for_each_online_cpu(cpu) { struct rackmeter_cpu *rcpu; if (cpu > 1) continue; rcpu = &rm->cpu[cpu]; rcpu->prev_idle = get_cpu_idle_time(cpu); rcpu->prev_wall = jiffies64_to_cputime64(get_jiffies_64()); schedule_delayed_work_on(cpu, &rm->cpu[cpu].sniffer, msecs_to_jiffies(CPU_SAMPLING_RATE)); } } static void rackmeter_stop_cpu_sniffer(struct rackmeter *rm) { cancel_delayed_work_sync(&rm->cpu[0].sniffer); cancel_delayed_work_sync(&rm->cpu[1].sniffer); } static int __devinit rackmeter_setup(struct rackmeter *rm) { pr_debug("rackmeter: setting up i2s..\n"); rackmeter_setup_i2s(rm); pr_debug("rackmeter: setting up default pattern..\n"); rackmeter_set_default_pattern(rm); pr_debug("rackmeter: setting up dbdma..\n"); rackmeter_setup_dbdma(rm); pr_debug("rackmeter: start CPU measurements..\n"); rackmeter_init_cpu_sniffer(rm); printk(KERN_INFO "RackMeter initialized\n"); return 0; } /* XXX FIXME: No PWM yet, this is 0/1 */ static u32 rackmeter_calc_sample(struct rackmeter *rm, unsigned int index) { int led; u32 sample = 0; for (led = 0; led < 16; led++) { sample >>= 1; sample |= ((rm->ubuf[led] >= 0x80) << 15); } return (sample << 17) | (sample >> 15); } static irqreturn_t rackmeter_irq(int irq, void *arg) { struct rackmeter *rm = arg; struct rackmeter_dma *db = rm->dma_buf_v; unsigned int mark, i; u32 *buf; /* Flush PCI buffers with an MMIO read. Maybe we could actually * check the status one day ... in case things go wrong, though * this never happened to me */ (void)in_le32(&rm->dma_regs->status); /* Make sure the CPU gets us in order */ rmb(); /* Read mark */ mark = db->mark; if (mark != 1 && mark != 2) { printk(KERN_WARNING "rackmeter: Incorrect DMA mark 0x%08x\n", mark); /* We allow for 3 errors like that (stale DBDMA irqs) */ if (++rm->stale_irq > 3) { printk(KERN_ERR "rackmeter: Too many errors," " stopping DMA\n"); DBDMA_DO_RESET(rm->dma_regs); } return IRQ_HANDLED; } /* Next buffer we need to fill is mark value */ buf = mark == 1 ? db->buf1 : db->buf2; /* Fill it now. This routine converts the 8 bits depth sample array * into the PWM bitmap for each LED. */ for (i = 0; i < SAMPLE_COUNT; i++) buf[i] = rackmeter_calc_sample(rm, i); return IRQ_HANDLED; } static int __devinit rackmeter_probe(struct macio_dev* mdev, const struct of_device_id *match) { struct device_node *i2s = NULL, *np = NULL; struct rackmeter *rm = NULL; struct resource ri2s, rdma; int rc = -ENODEV; pr_debug("rackmeter_probe()\n"); /* Get i2s-a node */ while ((i2s = of_get_next_child(mdev->ofdev.dev.of_node, i2s)) != NULL) if (strcmp(i2s->name, "i2s-a") == 0) break; if (i2s == NULL) { pr_debug(" i2s-a child not found\n"); goto bail; } /* Get lightshow or virtual sound */ while ((np = of_get_next_child(i2s, np)) != NULL) { if (strcmp(np->name, "lightshow") == 0) break; if ((strcmp(np->name, "sound") == 0) && of_get_property(np, "virtual", NULL) != NULL) break; } if (np == NULL) { pr_debug(" lightshow or sound+virtual child not found\n"); goto bail; } /* Create and initialize our instance data */ rm = kzalloc(sizeof(struct rackmeter), GFP_KERNEL); if (rm == NULL) { printk(KERN_ERR "rackmeter: failed to allocate memory !\n"); rc = -ENOMEM; goto bail_release; } rm->mdev = mdev; rm->i2s = i2s; mutex_init(&rm->sem); dev_set_drvdata(&mdev->ofdev.dev, rm); /* Check resources availability. We need at least resource 0 and 1 */ #if 0 /* Use that when i2s-a is finally an mdev per-se */ if (macio_resource_count(mdev) < 2 || macio_irq_count(mdev) < 2) { printk(KERN_ERR "rackmeter: found match but lacks resources: %s" " (%d resources, %d interrupts)\n", mdev->ofdev.node->full_name); rc = -ENXIO; goto bail_free; } if (macio_request_resources(mdev, "rackmeter")) { printk(KERN_ERR "rackmeter: failed to request resources: %s\n", mdev->ofdev.node->full_name); rc = -EBUSY; goto bail_free; } rm->irq = macio_irq(mdev, 1); #else rm->irq = irq_of_parse_and_map(i2s, 1); if (rm->irq == NO_IRQ || of_address_to_resource(i2s, 0, &ri2s) || of_address_to_resource(i2s, 1, &rdma)) { printk(KERN_ERR "rackmeter: found match but lacks resources: %s", mdev->ofdev.dev.of_node->full_name); rc = -ENXIO; goto bail_free; } #endif pr_debug(" i2s @0x%08x\n", (unsigned int)ri2s.start); pr_debug(" dma @0x%08x\n", (unsigned int)rdma.start); pr_debug(" irq %d\n", rm->irq); rm->ubuf = (u8 *)__get_free_page(GFP_KERNEL); if (rm->ubuf == NULL) { printk(KERN_ERR "rackmeter: failed to allocate samples page !\n"); rc = -ENOMEM; goto bail_release; } rm->dma_buf_v = dma_alloc_coherent(&macio_get_pci_dev(mdev)->dev, sizeof(struct rackmeter_dma), &rm->dma_buf_p, GFP_KERNEL); if (rm->dma_buf_v == NULL) { printk(KERN_ERR "rackmeter: failed to allocate dma buffer !\n"); rc = -ENOMEM; goto bail_free_samples; } #if 0 rm->i2s_regs = ioremap(macio_resource_start(mdev, 0), 0x1000); #else rm->i2s_regs = ioremap(ri2s.start, 0x1000); #endif if (rm->i2s_regs == NULL) { printk(KERN_ERR "rackmeter: failed to map i2s registers !\n"); rc = -ENXIO; goto bail_free_dma; } #if 0 rm->dma_regs = ioremap(macio_resource_start(mdev, 1), 0x100); #else rm->dma_regs = ioremap(rdma.start, 0x100); #endif if (rm->dma_regs == NULL) { printk(KERN_ERR "rackmeter: failed to map dma registers !\n"); rc = -ENXIO; goto bail_unmap_i2s; } rc = rackmeter_setup(rm); if (rc) { printk(KERN_ERR "rackmeter: failed to initialize !\n"); rc = -ENXIO; goto bail_unmap_dma; } rc = request_irq(rm->irq, rackmeter_irq, 0, "rackmeter", rm); if (rc != 0) { printk(KERN_ERR "rackmeter: failed to request interrupt !\n"); goto bail_stop_dma; } of_node_put(np); return 0; bail_stop_dma: DBDMA_DO_RESET(rm->dma_regs); bail_unmap_dma: iounmap(rm->dma_regs); bail_unmap_i2s: iounmap(rm->i2s_regs); bail_free_dma: dma_free_coherent(&macio_get_pci_dev(mdev)->dev, sizeof(struct rackmeter_dma), rm->dma_buf_v, rm->dma_buf_p); bail_free_samples: free_page((unsigned long)rm->ubuf); bail_release: #if 0 macio_release_resources(mdev); #endif bail_free: kfree(rm); bail: of_node_put(i2s); of_node_put(np); dev_set_drvdata(&mdev->ofdev.dev, NULL); return rc; } static int __devexit rackmeter_remove(struct macio_dev* mdev) { struct rackmeter *rm = dev_get_drvdata(&mdev->ofdev.dev); /* Stop CPU sniffer timer & work queues */ rackmeter_stop_cpu_sniffer(rm); /* Clear reference to private data */ dev_set_drvdata(&mdev->ofdev.dev, NULL); /* Stop/reset dbdma */ DBDMA_DO_RESET(rm->dma_regs); /* Release the IRQ */ free_irq(rm->irq, rm); /* Unmap registers */ iounmap(rm->dma_regs); iounmap(rm->i2s_regs); /* Free DMA */ dma_free_coherent(&macio_get_pci_dev(mdev)->dev, sizeof(struct rackmeter_dma), rm->dma_buf_v, rm->dma_buf_p); /* Free samples */ free_page((unsigned long)rm->ubuf); #if 0 /* Release resources */ macio_release_resources(mdev); #endif /* Get rid of me */ kfree(rm); return 0; } static int rackmeter_shutdown(struct macio_dev* mdev) { struct rackmeter *rm = dev_get_drvdata(&mdev->ofdev.dev); if (rm == NULL) return -ENODEV; /* Stop CPU sniffer timer & work queues */ rackmeter_stop_cpu_sniffer(rm); /* Stop/reset dbdma */ DBDMA_DO_RESET(rm->dma_regs); return 0; } static struct of_device_id rackmeter_match[] = { { .name = "i2s" }, { } }; static struct macio_driver rackmeter_driver = { .driver = { .name = "rackmeter", .owner = THIS_MODULE, .of_match_table = rackmeter_match, }, .probe = rackmeter_probe, .remove = __devexit_p(rackmeter_remove), .shutdown = rackmeter_shutdown, }; static int __init rackmeter_init(void) { pr_debug("rackmeter_init()\n"); return macio_register_driver(&rackmeter_driver); } static void __exit rackmeter_exit(void) { pr_debug("rackmeter_exit()\n"); macio_unregister_driver(&rackmeter_driver); } module_init(rackmeter_init); module_exit(rackmeter_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Benjamin Herrenschmidt <benh@kernel.crashing.org>"); MODULE_DESCRIPTION("RackMeter: Support vu-meter on XServe front panel");
gpl-2.0
andrsbub/linux
net/netfilter/nf_nat_irc.c
3570
3484
/* IRC extension for TCP NAT alteration. * * (C) 2000-2001 by Harald Welte <laforge@gnumonks.org> * (C) 2004 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation * based on a copy of RR's ip_nat_ftp.c * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/tcp.h> #include <linux/kernel.h> #include <net/netfilter/nf_nat.h> #include <net/netfilter/nf_nat_helper.h> #include <net/netfilter/nf_conntrack_helper.h> #include <net/netfilter/nf_conntrack_expect.h> #include <linux/netfilter/nf_conntrack_irc.h> MODULE_AUTHOR("Harald Welte <laforge@gnumonks.org>"); MODULE_DESCRIPTION("IRC (DCC) NAT helper"); MODULE_LICENSE("GPL"); MODULE_ALIAS("ip_nat_irc"); static unsigned int help(struct sk_buff *skb, enum ip_conntrack_info ctinfo, unsigned int protoff, unsigned int matchoff, unsigned int matchlen, struct nf_conntrack_expect *exp) { char buffer[sizeof("4294967296 65635")]; struct nf_conn *ct = exp->master; union nf_inet_addr newaddr; u_int16_t port; unsigned int ret; /* Reply comes from server. */ newaddr = ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3; exp->saved_proto.tcp.port = exp->tuple.dst.u.tcp.port; exp->dir = IP_CT_DIR_REPLY; exp->expectfn = nf_nat_follow_master; /* Try to get same port: if not, try to change it. */ for (port = ntohs(exp->saved_proto.tcp.port); port != 0; port++) { int ret; exp->tuple.dst.u.tcp.port = htons(port); ret = nf_ct_expect_related(exp); if (ret == 0) break; else if (ret != -EBUSY) { port = 0; break; } } if (port == 0) { nf_ct_helper_log(skb, ct, "all ports in use"); return NF_DROP; } /* strlen("\1DCC CHAT chat AAAAAAAA P\1\n")=27 * strlen("\1DCC SCHAT chat AAAAAAAA P\1\n")=28 * strlen("\1DCC SEND F AAAAAAAA P S\1\n")=26 * strlen("\1DCC MOVE F AAAAAAAA P S\1\n")=26 * strlen("\1DCC TSEND F AAAAAAAA P S\1\n")=27 * * AAAAAAAAA: bound addr (1.0.0.0==16777216, min 8 digits, * 255.255.255.255==4294967296, 10 digits) * P: bound port (min 1 d, max 5d (65635)) * F: filename (min 1 d ) * S: size (min 1 d ) * 0x01, \n: terminators */ /* AAA = "us", ie. where server normally talks to. */ snprintf(buffer, sizeof(buffer), "%u %u", ntohl(newaddr.ip), port); pr_debug("nf_nat_irc: inserting '%s' == %pI4, port %u\n", buffer, &newaddr.ip, port); ret = nf_nat_mangle_tcp_packet(skb, ct, ctinfo, protoff, matchoff, matchlen, buffer, strlen(buffer)); if (ret != NF_ACCEPT) { nf_ct_helper_log(skb, ct, "cannot mangle packet"); nf_ct_unexpect_related(exp); } return ret; } static void __exit nf_nat_irc_fini(void) { RCU_INIT_POINTER(nf_nat_irc_hook, NULL); synchronize_rcu(); } static int __init nf_nat_irc_init(void) { BUG_ON(nf_nat_irc_hook != NULL); RCU_INIT_POINTER(nf_nat_irc_hook, help); return 0; } /* Prior to 2.6.11, we had a ports param. No longer, but don't break users. */ static int warn_set(const char *val, struct kernel_param *kp) { printk(KERN_INFO KBUILD_MODNAME ": kernel >= 2.6.10 only uses 'ports' for conntrack modules\n"); return 0; } module_param_call(ports, warn_set, NULL, NULL, 0); module_init(nf_nat_irc_init); module_exit(nf_nat_irc_fini);
gpl-2.0
sgp-blackphone/Blackphone-BP1-Kernel
drivers/video/backlight/generic_bl.c
4594
3482
/* * Generic Backlight Driver * * Copyright (c) 2004-2008 Richard Purdie * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/mutex.h> #include <linux/fb.h> #include <linux/backlight.h> static int genericbl_intensity; static struct backlight_device *generic_backlight_device; static struct generic_bl_info *bl_machinfo; /* Flag to signal when the battery is low */ #define GENERICBL_BATTLOW BL_CORE_DRIVER1 static int genericbl_send_intensity(struct backlight_device *bd) { int intensity = bd->props.brightness; if (bd->props.power != FB_BLANK_UNBLANK) intensity = 0; if (bd->props.state & BL_CORE_FBBLANK) intensity = 0; if (bd->props.state & BL_CORE_SUSPENDED) intensity = 0; if (bd->props.state & GENERICBL_BATTLOW) intensity &= bl_machinfo->limit_mask; bl_machinfo->set_bl_intensity(intensity); genericbl_intensity = intensity; if (bl_machinfo->kick_battery) bl_machinfo->kick_battery(); return 0; } static int genericbl_get_intensity(struct backlight_device *bd) { return genericbl_intensity; } /* * Called when the battery is low to limit the backlight intensity. * If limit==0 clear any limit, otherwise limit the intensity */ void genericbl_limit_intensity(int limit) { struct backlight_device *bd = generic_backlight_device; mutex_lock(&bd->ops_lock); if (limit) bd->props.state |= GENERICBL_BATTLOW; else bd->props.state &= ~GENERICBL_BATTLOW; backlight_update_status(generic_backlight_device); mutex_unlock(&bd->ops_lock); } EXPORT_SYMBOL(genericbl_limit_intensity); static const struct backlight_ops genericbl_ops = { .options = BL_CORE_SUSPENDRESUME, .get_brightness = genericbl_get_intensity, .update_status = genericbl_send_intensity, }; static int genericbl_probe(struct platform_device *pdev) { struct backlight_properties props; struct generic_bl_info *machinfo = pdev->dev.platform_data; const char *name = "generic-bl"; struct backlight_device *bd; bl_machinfo = machinfo; if (!machinfo->limit_mask) machinfo->limit_mask = -1; if (machinfo->name) name = machinfo->name; memset(&props, 0, sizeof(struct backlight_properties)); props.type = BACKLIGHT_RAW; props.max_brightness = machinfo->max_intensity; bd = backlight_device_register(name, &pdev->dev, NULL, &genericbl_ops, &props); if (IS_ERR (bd)) return PTR_ERR (bd); platform_set_drvdata(pdev, bd); bd->props.power = FB_BLANK_UNBLANK; bd->props.brightness = machinfo->default_intensity; backlight_update_status(bd); generic_backlight_device = bd; printk("Generic Backlight Driver Initialized.\n"); return 0; } static int genericbl_remove(struct platform_device *pdev) { struct backlight_device *bd = platform_get_drvdata(pdev); bd->props.power = 0; bd->props.brightness = 0; backlight_update_status(bd); backlight_device_unregister(bd); printk("Generic Backlight Driver Unloaded\n"); return 0; } static struct platform_driver genericbl_driver = { .probe = genericbl_probe, .remove = genericbl_remove, .driver = { .name = "generic-bl", }, }; module_platform_driver(genericbl_driver); MODULE_AUTHOR("Richard Purdie <rpurdie@rpsys.net>"); MODULE_DESCRIPTION("Generic Backlight Driver"); MODULE_LICENSE("GPL");
gpl-2.0