repo_name
string
path
string
copies
string
size
string
content
string
license
string
forumber/temiz_kernel_g2
drivers/net/wireless/ath/ath5k/debug.c
4776
31602
/* * Copyright (c) 2007-2008 Bruno Randolf <bruno@thinktube.com> * * This file is free software: you may copy, redistribute and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation, either version 2 of the License, or (at your * option) any later version. * * This file is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. * * * This file incorporates work covered by the following copyright and * permission notice: * * Copyright (c) 2002-2005 Sam Leffler, Errno Consulting * Copyright (c) 2004-2005 Atheros Communications, Inc. * Copyright (c) 2006 Devicescape Software, Inc. * Copyright (c) 2007 Jiri Slaby <jirislaby@gmail.com> * Copyright (c) 2007 Luis R. Rodriguez <mcgrof@winlab.rutgers.edu> * * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any * redistribution must be conditioned upon including a substantially * similar Disclaimer requirement for further binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGES. */ #include <linux/export.h> #include <linux/moduleparam.h> #include <linux/seq_file.h> #include <linux/list.h> #include "debug.h" #include "ath5k.h" #include "reg.h" #include "base.h" static unsigned int ath5k_debug; module_param_named(debug, ath5k_debug, uint, 0); /* debugfs: registers */ struct reg { const char *name; int addr; }; #define REG_STRUCT_INIT(r) { #r, r } /* just a few random registers, might want to add more */ static const struct reg regs[] = { REG_STRUCT_INIT(AR5K_CR), REG_STRUCT_INIT(AR5K_RXDP), REG_STRUCT_INIT(AR5K_CFG), REG_STRUCT_INIT(AR5K_IER), REG_STRUCT_INIT(AR5K_BCR), REG_STRUCT_INIT(AR5K_RTSD0), REG_STRUCT_INIT(AR5K_RTSD1), REG_STRUCT_INIT(AR5K_TXCFG), REG_STRUCT_INIT(AR5K_RXCFG), REG_STRUCT_INIT(AR5K_RXJLA), REG_STRUCT_INIT(AR5K_MIBC), REG_STRUCT_INIT(AR5K_TOPS), REG_STRUCT_INIT(AR5K_RXNOFRM), REG_STRUCT_INIT(AR5K_TXNOFRM), REG_STRUCT_INIT(AR5K_RPGTO), REG_STRUCT_INIT(AR5K_RFCNT), REG_STRUCT_INIT(AR5K_MISC), REG_STRUCT_INIT(AR5K_QCUDCU_CLKGT), REG_STRUCT_INIT(AR5K_ISR), REG_STRUCT_INIT(AR5K_PISR), REG_STRUCT_INIT(AR5K_SISR0), REG_STRUCT_INIT(AR5K_SISR1), REG_STRUCT_INIT(AR5K_SISR2), REG_STRUCT_INIT(AR5K_SISR3), REG_STRUCT_INIT(AR5K_SISR4), REG_STRUCT_INIT(AR5K_IMR), REG_STRUCT_INIT(AR5K_PIMR), REG_STRUCT_INIT(AR5K_SIMR0), REG_STRUCT_INIT(AR5K_SIMR1), REG_STRUCT_INIT(AR5K_SIMR2), REG_STRUCT_INIT(AR5K_SIMR3), REG_STRUCT_INIT(AR5K_SIMR4), REG_STRUCT_INIT(AR5K_DCM_ADDR), REG_STRUCT_INIT(AR5K_DCCFG), REG_STRUCT_INIT(AR5K_CCFG), REG_STRUCT_INIT(AR5K_CPC0), REG_STRUCT_INIT(AR5K_CPC1), REG_STRUCT_INIT(AR5K_CPC2), REG_STRUCT_INIT(AR5K_CPC3), REG_STRUCT_INIT(AR5K_CPCOVF), REG_STRUCT_INIT(AR5K_RESET_CTL), REG_STRUCT_INIT(AR5K_SLEEP_CTL), REG_STRUCT_INIT(AR5K_INTPEND), REG_STRUCT_INIT(AR5K_SFR), REG_STRUCT_INIT(AR5K_PCICFG), REG_STRUCT_INIT(AR5K_GPIOCR), REG_STRUCT_INIT(AR5K_GPIODO), REG_STRUCT_INIT(AR5K_SREV), }; static void *reg_start(struct seq_file *seq, loff_t *pos) { return *pos < ARRAY_SIZE(regs) ? (void *)&regs[*pos] : NULL; } static void reg_stop(struct seq_file *seq, void *p) { /* nothing to do */ } static void *reg_next(struct seq_file *seq, void *p, loff_t *pos) { ++*pos; return *pos < ARRAY_SIZE(regs) ? (void *)&regs[*pos] : NULL; } static int reg_show(struct seq_file *seq, void *p) { struct ath5k_hw *ah = seq->private; struct reg *r = p; seq_printf(seq, "%-25s0x%08x\n", r->name, ath5k_hw_reg_read(ah, r->addr)); return 0; } static const struct seq_operations register_seq_ops = { .start = reg_start, .next = reg_next, .stop = reg_stop, .show = reg_show }; static int open_file_registers(struct inode *inode, struct file *file) { struct seq_file *s; int res; res = seq_open(file, &register_seq_ops); if (res == 0) { s = file->private_data; s->private = inode->i_private; } return res; } static const struct file_operations fops_registers = { .open = open_file_registers, .read = seq_read, .llseek = seq_lseek, .release = seq_release, .owner = THIS_MODULE, }; /* debugfs: beacons */ static ssize_t read_file_beacon(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ath5k_hw *ah = file->private_data; char buf[500]; unsigned int len = 0; unsigned int v; u64 tsf; v = ath5k_hw_reg_read(ah, AR5K_BEACON); len += snprintf(buf + len, sizeof(buf) - len, "%-24s0x%08x\tintval: %d\tTIM: 0x%x\n", "AR5K_BEACON", v, v & AR5K_BEACON_PERIOD, (v & AR5K_BEACON_TIM) >> AR5K_BEACON_TIM_S); len += snprintf(buf + len, sizeof(buf) - len, "%-24s0x%08x\n", "AR5K_LAST_TSTP", ath5k_hw_reg_read(ah, AR5K_LAST_TSTP)); len += snprintf(buf + len, sizeof(buf) - len, "%-24s0x%08x\n\n", "AR5K_BEACON_CNT", ath5k_hw_reg_read(ah, AR5K_BEACON_CNT)); v = ath5k_hw_reg_read(ah, AR5K_TIMER0); len += snprintf(buf + len, sizeof(buf) - len, "%-24s0x%08x\tTU: %08x\n", "AR5K_TIMER0 (TBTT)", v, v); v = ath5k_hw_reg_read(ah, AR5K_TIMER1); len += snprintf(buf + len, sizeof(buf) - len, "%-24s0x%08x\tTU: %08x\n", "AR5K_TIMER1 (DMA)", v, v >> 3); v = ath5k_hw_reg_read(ah, AR5K_TIMER2); len += snprintf(buf + len, sizeof(buf) - len, "%-24s0x%08x\tTU: %08x\n", "AR5K_TIMER2 (SWBA)", v, v >> 3); v = ath5k_hw_reg_read(ah, AR5K_TIMER3); len += snprintf(buf + len, sizeof(buf) - len, "%-24s0x%08x\tTU: %08x\n", "AR5K_TIMER3 (ATIM)", v, v); tsf = ath5k_hw_get_tsf64(ah); len += snprintf(buf + len, sizeof(buf) - len, "TSF\t\t0x%016llx\tTU: %08x\n", (unsigned long long)tsf, TSF_TO_TU(tsf)); if (len > sizeof(buf)) len = sizeof(buf); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } static ssize_t write_file_beacon(struct file *file, const char __user *userbuf, size_t count, loff_t *ppos) { struct ath5k_hw *ah = file->private_data; char buf[20]; if (copy_from_user(buf, userbuf, min(count, sizeof(buf)))) return -EFAULT; if (strncmp(buf, "disable", 7) == 0) { AR5K_REG_DISABLE_BITS(ah, AR5K_BEACON, AR5K_BEACON_ENABLE); printk(KERN_INFO "debugfs disable beacons\n"); } else if (strncmp(buf, "enable", 6) == 0) { AR5K_REG_ENABLE_BITS(ah, AR5K_BEACON, AR5K_BEACON_ENABLE); printk(KERN_INFO "debugfs enable beacons\n"); } return count; } static const struct file_operations fops_beacon = { .read = read_file_beacon, .write = write_file_beacon, .open = simple_open, .owner = THIS_MODULE, .llseek = default_llseek, }; /* debugfs: reset */ static ssize_t write_file_reset(struct file *file, const char __user *userbuf, size_t count, loff_t *ppos) { struct ath5k_hw *ah = file->private_data; ATH5K_DBG(ah, ATH5K_DEBUG_RESET, "debug file triggered reset\n"); ieee80211_queue_work(ah->hw, &ah->reset_work); return count; } static const struct file_operations fops_reset = { .write = write_file_reset, .open = simple_open, .owner = THIS_MODULE, .llseek = noop_llseek, }; /* debugfs: debug level */ static const struct { enum ath5k_debug_level level; const char *name; const char *desc; } dbg_info[] = { { ATH5K_DEBUG_RESET, "reset", "reset and initialization" }, { ATH5K_DEBUG_INTR, "intr", "interrupt handling" }, { ATH5K_DEBUG_MODE, "mode", "mode init/setup" }, { ATH5K_DEBUG_XMIT, "xmit", "basic xmit operation" }, { ATH5K_DEBUG_BEACON, "beacon", "beacon handling" }, { ATH5K_DEBUG_CALIBRATE, "calib", "periodic calibration" }, { ATH5K_DEBUG_TXPOWER, "txpower", "transmit power setting" }, { ATH5K_DEBUG_LED, "led", "LED management" }, { ATH5K_DEBUG_DUMPBANDS, "dumpbands", "dump bands" }, { ATH5K_DEBUG_DMA, "dma", "dma start/stop" }, { ATH5K_DEBUG_ANI, "ani", "adaptive noise immunity" }, { ATH5K_DEBUG_DESC, "desc", "descriptor chains" }, { ATH5K_DEBUG_ANY, "all", "show all debug levels" }, }; static ssize_t read_file_debug(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ath5k_hw *ah = file->private_data; char buf[700]; unsigned int len = 0; unsigned int i; len += snprintf(buf + len, sizeof(buf) - len, "DEBUG LEVEL: 0x%08x\n\n", ah->debug.level); for (i = 0; i < ARRAY_SIZE(dbg_info) - 1; i++) { len += snprintf(buf + len, sizeof(buf) - len, "%10s %c 0x%08x - %s\n", dbg_info[i].name, ah->debug.level & dbg_info[i].level ? '+' : ' ', dbg_info[i].level, dbg_info[i].desc); } len += snprintf(buf + len, sizeof(buf) - len, "%10s %c 0x%08x - %s\n", dbg_info[i].name, ah->debug.level == dbg_info[i].level ? '+' : ' ', dbg_info[i].level, dbg_info[i].desc); if (len > sizeof(buf)) len = sizeof(buf); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } static ssize_t write_file_debug(struct file *file, const char __user *userbuf, size_t count, loff_t *ppos) { struct ath5k_hw *ah = file->private_data; unsigned int i; char buf[20]; if (copy_from_user(buf, userbuf, min(count, sizeof(buf)))) return -EFAULT; for (i = 0; i < ARRAY_SIZE(dbg_info); i++) { if (strncmp(buf, dbg_info[i].name, strlen(dbg_info[i].name)) == 0) { ah->debug.level ^= dbg_info[i].level; /* toggle bit */ break; } } return count; } static const struct file_operations fops_debug = { .read = read_file_debug, .write = write_file_debug, .open = simple_open, .owner = THIS_MODULE, .llseek = default_llseek, }; /* debugfs: antenna */ static ssize_t read_file_antenna(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ath5k_hw *ah = file->private_data; char buf[700]; unsigned int len = 0; unsigned int i; unsigned int v; len += snprintf(buf + len, sizeof(buf) - len, "antenna mode\t%d\n", ah->ah_ant_mode); len += snprintf(buf + len, sizeof(buf) - len, "default antenna\t%d\n", ah->ah_def_ant); len += snprintf(buf + len, sizeof(buf) - len, "tx antenna\t%d\n", ah->ah_tx_ant); len += snprintf(buf + len, sizeof(buf) - len, "\nANTENNA\t\tRX\tTX\n"); for (i = 1; i < ARRAY_SIZE(ah->stats.antenna_rx); i++) { len += snprintf(buf + len, sizeof(buf) - len, "[antenna %d]\t%d\t%d\n", i, ah->stats.antenna_rx[i], ah->stats.antenna_tx[i]); } len += snprintf(buf + len, sizeof(buf) - len, "[invalid]\t%d\t%d\n", ah->stats.antenna_rx[0], ah->stats.antenna_tx[0]); v = ath5k_hw_reg_read(ah, AR5K_DEFAULT_ANTENNA); len += snprintf(buf + len, sizeof(buf) - len, "\nAR5K_DEFAULT_ANTENNA\t0x%08x\n", v); v = ath5k_hw_reg_read(ah, AR5K_STA_ID1); len += snprintf(buf + len, sizeof(buf) - len, "AR5K_STA_ID1_DEFAULT_ANTENNA\t%d\n", (v & AR5K_STA_ID1_DEFAULT_ANTENNA) != 0); len += snprintf(buf + len, sizeof(buf) - len, "AR5K_STA_ID1_DESC_ANTENNA\t%d\n", (v & AR5K_STA_ID1_DESC_ANTENNA) != 0); len += snprintf(buf + len, sizeof(buf) - len, "AR5K_STA_ID1_RTS_DEF_ANTENNA\t%d\n", (v & AR5K_STA_ID1_RTS_DEF_ANTENNA) != 0); len += snprintf(buf + len, sizeof(buf) - len, "AR5K_STA_ID1_SELFGEN_DEF_ANT\t%d\n", (v & AR5K_STA_ID1_SELFGEN_DEF_ANT) != 0); v = ath5k_hw_reg_read(ah, AR5K_PHY_AGCCTL); len += snprintf(buf + len, sizeof(buf) - len, "\nAR5K_PHY_AGCCTL_OFDM_DIV_DIS\t%d\n", (v & AR5K_PHY_AGCCTL_OFDM_DIV_DIS) != 0); v = ath5k_hw_reg_read(ah, AR5K_PHY_RESTART); len += snprintf(buf + len, sizeof(buf) - len, "AR5K_PHY_RESTART_DIV_GC\t\t%x\n", (v & AR5K_PHY_RESTART_DIV_GC) >> AR5K_PHY_RESTART_DIV_GC_S); v = ath5k_hw_reg_read(ah, AR5K_PHY_FAST_ANT_DIV); len += snprintf(buf + len, sizeof(buf) - len, "AR5K_PHY_FAST_ANT_DIV_EN\t%d\n", (v & AR5K_PHY_FAST_ANT_DIV_EN) != 0); v = ath5k_hw_reg_read(ah, AR5K_PHY_ANT_SWITCH_TABLE_0); len += snprintf(buf + len, sizeof(buf) - len, "\nAR5K_PHY_ANT_SWITCH_TABLE_0\t0x%08x\n", v); v = ath5k_hw_reg_read(ah, AR5K_PHY_ANT_SWITCH_TABLE_1); len += snprintf(buf + len, sizeof(buf) - len, "AR5K_PHY_ANT_SWITCH_TABLE_1\t0x%08x\n", v); if (len > sizeof(buf)) len = sizeof(buf); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } static ssize_t write_file_antenna(struct file *file, const char __user *userbuf, size_t count, loff_t *ppos) { struct ath5k_hw *ah = file->private_data; unsigned int i; char buf[20]; if (copy_from_user(buf, userbuf, min(count, sizeof(buf)))) return -EFAULT; if (strncmp(buf, "diversity", 9) == 0) { ath5k_hw_set_antenna_mode(ah, AR5K_ANTMODE_DEFAULT); printk(KERN_INFO "ath5k debug: enable diversity\n"); } else if (strncmp(buf, "fixed-a", 7) == 0) { ath5k_hw_set_antenna_mode(ah, AR5K_ANTMODE_FIXED_A); printk(KERN_INFO "ath5k debugfs: fixed antenna A\n"); } else if (strncmp(buf, "fixed-b", 7) == 0) { ath5k_hw_set_antenna_mode(ah, AR5K_ANTMODE_FIXED_B); printk(KERN_INFO "ath5k debug: fixed antenna B\n"); } else if (strncmp(buf, "clear", 5) == 0) { for (i = 0; i < ARRAY_SIZE(ah->stats.antenna_rx); i++) { ah->stats.antenna_rx[i] = 0; ah->stats.antenna_tx[i] = 0; } printk(KERN_INFO "ath5k debug: cleared antenna stats\n"); } return count; } static const struct file_operations fops_antenna = { .read = read_file_antenna, .write = write_file_antenna, .open = simple_open, .owner = THIS_MODULE, .llseek = default_llseek, }; /* debugfs: misc */ static ssize_t read_file_misc(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ath5k_hw *ah = file->private_data; char buf[700]; unsigned int len = 0; u32 filt = ath5k_hw_get_rx_filter(ah); len += snprintf(buf + len, sizeof(buf) - len, "bssid-mask: %pM\n", ah->bssidmask); len += snprintf(buf + len, sizeof(buf) - len, "filter-flags: 0x%x ", filt); if (filt & AR5K_RX_FILTER_UCAST) len += snprintf(buf + len, sizeof(buf) - len, " UCAST"); if (filt & AR5K_RX_FILTER_MCAST) len += snprintf(buf + len, sizeof(buf) - len, " MCAST"); if (filt & AR5K_RX_FILTER_BCAST) len += snprintf(buf + len, sizeof(buf) - len, " BCAST"); if (filt & AR5K_RX_FILTER_CONTROL) len += snprintf(buf + len, sizeof(buf) - len, " CONTROL"); if (filt & AR5K_RX_FILTER_BEACON) len += snprintf(buf + len, sizeof(buf) - len, " BEACON"); if (filt & AR5K_RX_FILTER_PROM) len += snprintf(buf + len, sizeof(buf) - len, " PROM"); if (filt & AR5K_RX_FILTER_XRPOLL) len += snprintf(buf + len, sizeof(buf) - len, " XRPOLL"); if (filt & AR5K_RX_FILTER_PROBEREQ) len += snprintf(buf + len, sizeof(buf) - len, " PROBEREQ"); if (filt & AR5K_RX_FILTER_PHYERR_5212) len += snprintf(buf + len, sizeof(buf) - len, " PHYERR-5212"); if (filt & AR5K_RX_FILTER_RADARERR_5212) len += snprintf(buf + len, sizeof(buf) - len, " RADARERR-5212"); if (filt & AR5K_RX_FILTER_PHYERR_5211) snprintf(buf + len, sizeof(buf) - len, " PHYERR-5211"); if (filt & AR5K_RX_FILTER_RADARERR_5211) len += snprintf(buf + len, sizeof(buf) - len, " RADARERR-5211"); len += snprintf(buf + len, sizeof(buf) - len, "\nopmode: %s (%d)\n", ath_opmode_to_string(ah->opmode), ah->opmode); if (len > sizeof(buf)) len = sizeof(buf); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } static const struct file_operations fops_misc = { .read = read_file_misc, .open = simple_open, .owner = THIS_MODULE, }; /* debugfs: frameerrors */ static ssize_t read_file_frameerrors(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ath5k_hw *ah = file->private_data; struct ath5k_statistics *st = &ah->stats; char buf[700]; unsigned int len = 0; int i; len += snprintf(buf + len, sizeof(buf) - len, "RX\n---------------------\n"); len += snprintf(buf + len, sizeof(buf) - len, "CRC\t%u\t(%u%%)\n", st->rxerr_crc, st->rx_all_count > 0 ? st->rxerr_crc * 100 / st->rx_all_count : 0); len += snprintf(buf + len, sizeof(buf) - len, "PHY\t%u\t(%u%%)\n", st->rxerr_phy, st->rx_all_count > 0 ? st->rxerr_phy * 100 / st->rx_all_count : 0); for (i = 0; i < 32; i++) { if (st->rxerr_phy_code[i]) len += snprintf(buf + len, sizeof(buf) - len, " phy_err[%u]\t%u\n", i, st->rxerr_phy_code[i]); } len += snprintf(buf + len, sizeof(buf) - len, "FIFO\t%u\t(%u%%)\n", st->rxerr_fifo, st->rx_all_count > 0 ? st->rxerr_fifo * 100 / st->rx_all_count : 0); len += snprintf(buf + len, sizeof(buf) - len, "decrypt\t%u\t(%u%%)\n", st->rxerr_decrypt, st->rx_all_count > 0 ? st->rxerr_decrypt * 100 / st->rx_all_count : 0); len += snprintf(buf + len, sizeof(buf) - len, "MIC\t%u\t(%u%%)\n", st->rxerr_mic, st->rx_all_count > 0 ? st->rxerr_mic * 100 / st->rx_all_count : 0); len += snprintf(buf + len, sizeof(buf) - len, "process\t%u\t(%u%%)\n", st->rxerr_proc, st->rx_all_count > 0 ? st->rxerr_proc * 100 / st->rx_all_count : 0); len += snprintf(buf + len, sizeof(buf) - len, "jumbo\t%u\t(%u%%)\n", st->rxerr_jumbo, st->rx_all_count > 0 ? st->rxerr_jumbo * 100 / st->rx_all_count : 0); len += snprintf(buf + len, sizeof(buf) - len, "[RX all\t%u]\n", st->rx_all_count); len += snprintf(buf + len, sizeof(buf) - len, "RX-all-bytes\t%u\n", st->rx_bytes_count); len += snprintf(buf + len, sizeof(buf) - len, "\nTX\n---------------------\n"); len += snprintf(buf + len, sizeof(buf) - len, "retry\t%u\t(%u%%)\n", st->txerr_retry, st->tx_all_count > 0 ? st->txerr_retry * 100 / st->tx_all_count : 0); len += snprintf(buf + len, sizeof(buf) - len, "FIFO\t%u\t(%u%%)\n", st->txerr_fifo, st->tx_all_count > 0 ? st->txerr_fifo * 100 / st->tx_all_count : 0); len += snprintf(buf + len, sizeof(buf) - len, "filter\t%u\t(%u%%)\n", st->txerr_filt, st->tx_all_count > 0 ? st->txerr_filt * 100 / st->tx_all_count : 0); len += snprintf(buf + len, sizeof(buf) - len, "[TX all\t%u]\n", st->tx_all_count); len += snprintf(buf + len, sizeof(buf) - len, "TX-all-bytes\t%u\n", st->tx_bytes_count); if (len > sizeof(buf)) len = sizeof(buf); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } static ssize_t write_file_frameerrors(struct file *file, const char __user *userbuf, size_t count, loff_t *ppos) { struct ath5k_hw *ah = file->private_data; struct ath5k_statistics *st = &ah->stats; char buf[20]; if (copy_from_user(buf, userbuf, min(count, sizeof(buf)))) return -EFAULT; if (strncmp(buf, "clear", 5) == 0) { st->rxerr_crc = 0; st->rxerr_phy = 0; st->rxerr_fifo = 0; st->rxerr_decrypt = 0; st->rxerr_mic = 0; st->rxerr_proc = 0; st->rxerr_jumbo = 0; st->rx_all_count = 0; st->txerr_retry = 0; st->txerr_fifo = 0; st->txerr_filt = 0; st->tx_all_count = 0; printk(KERN_INFO "ath5k debug: cleared frameerrors stats\n"); } return count; } static const struct file_operations fops_frameerrors = { .read = read_file_frameerrors, .write = write_file_frameerrors, .open = simple_open, .owner = THIS_MODULE, .llseek = default_llseek, }; /* debugfs: ani */ static ssize_t read_file_ani(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ath5k_hw *ah = file->private_data; struct ath5k_statistics *st = &ah->stats; struct ath5k_ani_state *as = &ah->ani_state; char buf[700]; unsigned int len = 0; len += snprintf(buf + len, sizeof(buf) - len, "HW has PHY error counters:\t%s\n", ah->ah_capabilities.cap_has_phyerr_counters ? "yes" : "no"); len += snprintf(buf + len, sizeof(buf) - len, "HW max spur immunity level:\t%d\n", as->max_spur_level); len += snprintf(buf + len, sizeof(buf) - len, "\nANI state\n--------------------------------------------\n"); len += snprintf(buf + len, sizeof(buf) - len, "operating mode:\t\t\t"); switch (as->ani_mode) { case ATH5K_ANI_MODE_OFF: len += snprintf(buf + len, sizeof(buf) - len, "OFF\n"); break; case ATH5K_ANI_MODE_MANUAL_LOW: len += snprintf(buf + len, sizeof(buf) - len, "MANUAL LOW\n"); break; case ATH5K_ANI_MODE_MANUAL_HIGH: len += snprintf(buf + len, sizeof(buf) - len, "MANUAL HIGH\n"); break; case ATH5K_ANI_MODE_AUTO: len += snprintf(buf + len, sizeof(buf) - len, "AUTO\n"); break; default: len += snprintf(buf + len, sizeof(buf) - len, "??? (not good)\n"); break; } len += snprintf(buf + len, sizeof(buf) - len, "noise immunity level:\t\t%d\n", as->noise_imm_level); len += snprintf(buf + len, sizeof(buf) - len, "spur immunity level:\t\t%d\n", as->spur_level); len += snprintf(buf + len, sizeof(buf) - len, "firstep level:\t\t\t%d\n", as->firstep_level); len += snprintf(buf + len, sizeof(buf) - len, "OFDM weak signal detection:\t%s\n", as->ofdm_weak_sig ? "on" : "off"); len += snprintf(buf + len, sizeof(buf) - len, "CCK weak signal detection:\t%s\n", as->cck_weak_sig ? "on" : "off"); len += snprintf(buf + len, sizeof(buf) - len, "\nMIB INTERRUPTS:\t\t%u\n", st->mib_intr); len += snprintf(buf + len, sizeof(buf) - len, "beacon RSSI average:\t%d\n", (int)ewma_read(&ah->ah_beacon_rssi_avg)); #define CC_PRINT(_struct, _field) \ _struct._field, \ _struct.cycles > 0 ? \ _struct._field * 100 / _struct.cycles : 0 len += snprintf(buf + len, sizeof(buf) - len, "profcnt tx\t\t%u\t(%d%%)\n", CC_PRINT(as->last_cc, tx_frame)); len += snprintf(buf + len, sizeof(buf) - len, "profcnt rx\t\t%u\t(%d%%)\n", CC_PRINT(as->last_cc, rx_frame)); len += snprintf(buf + len, sizeof(buf) - len, "profcnt busy\t\t%u\t(%d%%)\n", CC_PRINT(as->last_cc, rx_busy)); #undef CC_PRINT len += snprintf(buf + len, sizeof(buf) - len, "profcnt cycles\t\t%u\n", as->last_cc.cycles); len += snprintf(buf + len, sizeof(buf) - len, "listen time\t\t%d\tlast: %d\n", as->listen_time, as->last_listen); len += snprintf(buf + len, sizeof(buf) - len, "OFDM errors\t\t%u\tlast: %u\tsum: %u\n", as->ofdm_errors, as->last_ofdm_errors, as->sum_ofdm_errors); len += snprintf(buf + len, sizeof(buf) - len, "CCK errors\t\t%u\tlast: %u\tsum: %u\n", as->cck_errors, as->last_cck_errors, as->sum_cck_errors); len += snprintf(buf + len, sizeof(buf) - len, "AR5K_PHYERR_CNT1\t%x\t(=%d)\n", ath5k_hw_reg_read(ah, AR5K_PHYERR_CNT1), ATH5K_ANI_OFDM_TRIG_HIGH - (ATH5K_PHYERR_CNT_MAX - ath5k_hw_reg_read(ah, AR5K_PHYERR_CNT1))); len += snprintf(buf + len, sizeof(buf) - len, "AR5K_PHYERR_CNT2\t%x\t(=%d)\n", ath5k_hw_reg_read(ah, AR5K_PHYERR_CNT2), ATH5K_ANI_CCK_TRIG_HIGH - (ATH5K_PHYERR_CNT_MAX - ath5k_hw_reg_read(ah, AR5K_PHYERR_CNT2))); if (len > sizeof(buf)) len = sizeof(buf); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } static ssize_t write_file_ani(struct file *file, const char __user *userbuf, size_t count, loff_t *ppos) { struct ath5k_hw *ah = file->private_data; char buf[20]; if (copy_from_user(buf, userbuf, min(count, sizeof(buf)))) return -EFAULT; if (strncmp(buf, "sens-low", 8) == 0) { ath5k_ani_init(ah, ATH5K_ANI_MODE_MANUAL_HIGH); } else if (strncmp(buf, "sens-high", 9) == 0) { ath5k_ani_init(ah, ATH5K_ANI_MODE_MANUAL_LOW); } else if (strncmp(buf, "ani-off", 7) == 0) { ath5k_ani_init(ah, ATH5K_ANI_MODE_OFF); } else if (strncmp(buf, "ani-on", 6) == 0) { ath5k_ani_init(ah, ATH5K_ANI_MODE_AUTO); } else if (strncmp(buf, "noise-low", 9) == 0) { ath5k_ani_set_noise_immunity_level(ah, 0); } else if (strncmp(buf, "noise-high", 10) == 0) { ath5k_ani_set_noise_immunity_level(ah, ATH5K_ANI_MAX_NOISE_IMM_LVL); } else if (strncmp(buf, "spur-low", 8) == 0) { ath5k_ani_set_spur_immunity_level(ah, 0); } else if (strncmp(buf, "spur-high", 9) == 0) { ath5k_ani_set_spur_immunity_level(ah, ah->ani_state.max_spur_level); } else if (strncmp(buf, "fir-low", 7) == 0) { ath5k_ani_set_firstep_level(ah, 0); } else if (strncmp(buf, "fir-high", 8) == 0) { ath5k_ani_set_firstep_level(ah, ATH5K_ANI_MAX_FIRSTEP_LVL); } else if (strncmp(buf, "ofdm-off", 8) == 0) { ath5k_ani_set_ofdm_weak_signal_detection(ah, false); } else if (strncmp(buf, "ofdm-on", 7) == 0) { ath5k_ani_set_ofdm_weak_signal_detection(ah, true); } else if (strncmp(buf, "cck-off", 7) == 0) { ath5k_ani_set_cck_weak_signal_detection(ah, false); } else if (strncmp(buf, "cck-on", 6) == 0) { ath5k_ani_set_cck_weak_signal_detection(ah, true); } return count; } static const struct file_operations fops_ani = { .read = read_file_ani, .write = write_file_ani, .open = simple_open, .owner = THIS_MODULE, .llseek = default_llseek, }; /* debugfs: queues etc */ static ssize_t read_file_queue(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ath5k_hw *ah = file->private_data; char buf[700]; unsigned int len = 0; struct ath5k_txq *txq; struct ath5k_buf *bf, *bf0; int i, n; len += snprintf(buf + len, sizeof(buf) - len, "available txbuffers: %d\n", ah->txbuf_len); for (i = 0; i < ARRAY_SIZE(ah->txqs); i++) { txq = &ah->txqs[i]; len += snprintf(buf + len, sizeof(buf) - len, "%02d: %ssetup\n", i, txq->setup ? "" : "not "); if (!txq->setup) continue; n = 0; spin_lock_bh(&txq->lock); list_for_each_entry_safe(bf, bf0, &txq->q, list) n++; spin_unlock_bh(&txq->lock); len += snprintf(buf + len, sizeof(buf) - len, " len: %d bufs: %d\n", txq->txq_len, n); len += snprintf(buf + len, sizeof(buf) - len, " stuck: %d\n", txq->txq_stuck); } if (len > sizeof(buf)) len = sizeof(buf); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } static ssize_t write_file_queue(struct file *file, const char __user *userbuf, size_t count, loff_t *ppos) { struct ath5k_hw *ah = file->private_data; char buf[20]; if (copy_from_user(buf, userbuf, min(count, sizeof(buf)))) return -EFAULT; if (strncmp(buf, "start", 5) == 0) ieee80211_wake_queues(ah->hw); else if (strncmp(buf, "stop", 4) == 0) ieee80211_stop_queues(ah->hw); return count; } static const struct file_operations fops_queue = { .read = read_file_queue, .write = write_file_queue, .open = simple_open, .owner = THIS_MODULE, .llseek = default_llseek, }; void ath5k_debug_init_device(struct ath5k_hw *ah) { struct dentry *phydir; ah->debug.level = ath5k_debug; phydir = debugfs_create_dir("ath5k", ah->hw->wiphy->debugfsdir); if (!phydir) return; debugfs_create_file("debug", S_IWUSR | S_IRUSR, phydir, ah, &fops_debug); debugfs_create_file("registers", S_IRUSR, phydir, ah, &fops_registers); debugfs_create_file("beacon", S_IWUSR | S_IRUSR, phydir, ah, &fops_beacon); debugfs_create_file("reset", S_IWUSR, phydir, ah, &fops_reset); debugfs_create_file("antenna", S_IWUSR | S_IRUSR, phydir, ah, &fops_antenna); debugfs_create_file("misc", S_IRUSR, phydir, ah, &fops_misc); debugfs_create_file("frameerrors", S_IWUSR | S_IRUSR, phydir, ah, &fops_frameerrors); debugfs_create_file("ani", S_IWUSR | S_IRUSR, phydir, ah, &fops_ani); debugfs_create_file("queue", S_IWUSR | S_IRUSR, phydir, ah, &fops_queue); debugfs_create_bool("32khz_clock", S_IWUSR | S_IRUSR, phydir, &ah->ah_use_32khz_clock); } /* functions used in other places */ void ath5k_debug_dump_bands(struct ath5k_hw *ah) { unsigned int b, i; if (likely(!(ah->debug.level & ATH5K_DEBUG_DUMPBANDS))) return; BUG_ON(!ah->sbands); for (b = 0; b < IEEE80211_NUM_BANDS; b++) { struct ieee80211_supported_band *band = &ah->sbands[b]; char bname[6]; switch (band->band) { case IEEE80211_BAND_2GHZ: strcpy(bname, "2 GHz"); break; case IEEE80211_BAND_5GHZ: strcpy(bname, "5 GHz"); break; default: printk(KERN_DEBUG "Band not supported: %d\n", band->band); return; } printk(KERN_DEBUG "Band %s: channels %d, rates %d\n", bname, band->n_channels, band->n_bitrates); printk(KERN_DEBUG " channels:\n"); for (i = 0; i < band->n_channels; i++) printk(KERN_DEBUG " %3d %d %.4x %.4x\n", ieee80211_frequency_to_channel( band->channels[i].center_freq), band->channels[i].center_freq, band->channels[i].hw_value, band->channels[i].flags); printk(KERN_DEBUG " rates:\n"); for (i = 0; i < band->n_bitrates; i++) printk(KERN_DEBUG " %4d %.4x %.4x %.4x\n", band->bitrates[i].bitrate, band->bitrates[i].hw_value, band->bitrates[i].flags, band->bitrates[i].hw_value_short); } } static inline void ath5k_debug_printrxbuf(struct ath5k_buf *bf, int done, struct ath5k_rx_status *rs) { struct ath5k_desc *ds = bf->desc; struct ath5k_hw_all_rx_desc *rd = &ds->ud.ds_rx; printk(KERN_DEBUG "R (%p %llx) %08x %08x %08x %08x %08x %08x %c\n", ds, (unsigned long long)bf->daddr, ds->ds_link, ds->ds_data, rd->rx_ctl.rx_control_0, rd->rx_ctl.rx_control_1, rd->rx_stat.rx_status_0, rd->rx_stat.rx_status_1, !done ? ' ' : (rs->rs_status == 0) ? '*' : '!'); } void ath5k_debug_printrxbuffs(struct ath5k_hw *ah) { struct ath5k_desc *ds; struct ath5k_buf *bf; struct ath5k_rx_status rs = {}; int status; if (likely(!(ah->debug.level & ATH5K_DEBUG_DESC))) return; printk(KERN_DEBUG "rxdp %x, rxlink %p\n", ath5k_hw_get_rxdp(ah), ah->rxlink); spin_lock_bh(&ah->rxbuflock); list_for_each_entry(bf, &ah->rxbuf, list) { ds = bf->desc; status = ah->ah_proc_rx_desc(ah, ds, &rs); if (!status) ath5k_debug_printrxbuf(bf, status == 0, &rs); } spin_unlock_bh(&ah->rxbuflock); } void ath5k_debug_printtxbuf(struct ath5k_hw *ah, struct ath5k_buf *bf) { struct ath5k_desc *ds = bf->desc; struct ath5k_hw_5212_tx_desc *td = &ds->ud.ds_tx5212; struct ath5k_tx_status ts = {}; int done; if (likely(!(ah->debug.level & ATH5K_DEBUG_DESC))) return; done = ah->ah_proc_tx_desc(ah, bf->desc, &ts); printk(KERN_DEBUG "T (%p %llx) %08x %08x %08x %08x %08x %08x %08x " "%08x %c\n", ds, (unsigned long long)bf->daddr, ds->ds_link, ds->ds_data, td->tx_ctl.tx_control_0, td->tx_ctl.tx_control_1, td->tx_ctl.tx_control_2, td->tx_ctl.tx_control_3, td->tx_stat.tx_status_0, td->tx_stat.tx_status_1, done ? ' ' : (ts.ts_status == 0) ? '*' : '!'); }
gpl-2.0
codeworkx/android_kernel_rockchip_rk30xx
mm/net/ceph/ceph_hash.c
8104
2831
#include <linux/ceph/types.h> #include <linux/module.h> /* * Robert Jenkin's hash function. * http://burtleburtle.net/bob/hash/evahash.html * This is in the public domain. */ #define mix(a, b, c) \ do { \ a = a - b; a = a - c; a = a ^ (c >> 13); \ b = b - c; b = b - a; b = b ^ (a << 8); \ c = c - a; c = c - b; c = c ^ (b >> 13); \ a = a - b; a = a - c; a = a ^ (c >> 12); \ b = b - c; b = b - a; b = b ^ (a << 16); \ c = c - a; c = c - b; c = c ^ (b >> 5); \ a = a - b; a = a - c; a = a ^ (c >> 3); \ b = b - c; b = b - a; b = b ^ (a << 10); \ c = c - a; c = c - b; c = c ^ (b >> 15); \ } while (0) unsigned ceph_str_hash_rjenkins(const char *str, unsigned length) { const unsigned char *k = (const unsigned char *)str; __u32 a, b, c; /* the internal state */ __u32 len; /* how many key bytes still need mixing */ /* Set up the internal state */ len = length; a = 0x9e3779b9; /* the golden ratio; an arbitrary value */ b = a; c = 0; /* variable initialization of internal state */ /* handle most of the key */ while (len >= 12) { a = a + (k[0] + ((__u32)k[1] << 8) + ((__u32)k[2] << 16) + ((__u32)k[3] << 24)); b = b + (k[4] + ((__u32)k[5] << 8) + ((__u32)k[6] << 16) + ((__u32)k[7] << 24)); c = c + (k[8] + ((__u32)k[9] << 8) + ((__u32)k[10] << 16) + ((__u32)k[11] << 24)); mix(a, b, c); k = k + 12; len = len - 12; } /* handle the last 11 bytes */ c = c + length; switch (len) { /* all the case statements fall through */ case 11: c = c + ((__u32)k[10] << 24); case 10: c = c + ((__u32)k[9] << 16); case 9: c = c + ((__u32)k[8] << 8); /* the first byte of c is reserved for the length */ case 8: b = b + ((__u32)k[7] << 24); case 7: b = b + ((__u32)k[6] << 16); case 6: b = b + ((__u32)k[5] << 8); case 5: b = b + k[4]; case 4: a = a + ((__u32)k[3] << 24); case 3: a = a + ((__u32)k[2] << 16); case 2: a = a + ((__u32)k[1] << 8); case 1: a = a + k[0]; /* case 0: nothing left to add */ } mix(a, b, c); return c; } /* * linux dcache hash */ unsigned ceph_str_hash_linux(const char *str, unsigned length) { unsigned long hash = 0; unsigned char c; while (length--) { c = *str++; hash = (hash + (c << 4) + (c >> 4)) * 11; } return hash; } unsigned ceph_str_hash(int type, const char *s, unsigned len) { switch (type) { case CEPH_STR_HASH_LINUX: return ceph_str_hash_linux(s, len); case CEPH_STR_HASH_RJENKINS: return ceph_str_hash_rjenkins(s, len); default: return -1; } } EXPORT_SYMBOL(ceph_str_hash); const char *ceph_str_hash_name(int type) { switch (type) { case CEPH_STR_HASH_LINUX: return "linux"; case CEPH_STR_HASH_RJENKINS: return "rjenkins"; default: return "unknown"; } } EXPORT_SYMBOL(ceph_str_hash_name);
gpl-2.0
gabry3795/android_kernel_huawei_mt7_l09
sound/core/hrtimer.c
10152
4151
/* * ALSA timer back-end using hrtimer * Copyright (C) 2008 Takashi Iwai * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/init.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/hrtimer.h> #include <sound/core.h> #include <sound/timer.h> MODULE_AUTHOR("Takashi Iwai <tiwai@suse.de>"); MODULE_DESCRIPTION("ALSA hrtimer backend"); MODULE_LICENSE("GPL"); MODULE_ALIAS("snd-timer-" __stringify(SNDRV_TIMER_GLOBAL_HRTIMER)); #define NANO_SEC 1000000000UL /* 10^9 in sec */ static unsigned int resolution; struct snd_hrtimer { struct snd_timer *timer; struct hrtimer hrt; atomic_t running; }; static enum hrtimer_restart snd_hrtimer_callback(struct hrtimer *hrt) { struct snd_hrtimer *stime = container_of(hrt, struct snd_hrtimer, hrt); struct snd_timer *t = stime->timer; unsigned long oruns; if (!atomic_read(&stime->running)) return HRTIMER_NORESTART; oruns = hrtimer_forward_now(hrt, ns_to_ktime(t->sticks * resolution)); snd_timer_interrupt(stime->timer, t->sticks * oruns); if (!atomic_read(&stime->running)) return HRTIMER_NORESTART; return HRTIMER_RESTART; } static int snd_hrtimer_open(struct snd_timer *t) { struct snd_hrtimer *stime; stime = kmalloc(sizeof(*stime), GFP_KERNEL); if (!stime) return -ENOMEM; hrtimer_init(&stime->hrt, CLOCK_MONOTONIC, HRTIMER_MODE_REL); stime->timer = t; stime->hrt.function = snd_hrtimer_callback; atomic_set(&stime->running, 0); t->private_data = stime; return 0; } static int snd_hrtimer_close(struct snd_timer *t) { struct snd_hrtimer *stime = t->private_data; if (stime) { hrtimer_cancel(&stime->hrt); kfree(stime); t->private_data = NULL; } return 0; } static int snd_hrtimer_start(struct snd_timer *t) { struct snd_hrtimer *stime = t->private_data; atomic_set(&stime->running, 0); hrtimer_cancel(&stime->hrt); hrtimer_start(&stime->hrt, ns_to_ktime(t->sticks * resolution), HRTIMER_MODE_REL); atomic_set(&stime->running, 1); return 0; } static int snd_hrtimer_stop(struct snd_timer *t) { struct snd_hrtimer *stime = t->private_data; atomic_set(&stime->running, 0); return 0; } static struct snd_timer_hardware hrtimer_hw = { .flags = SNDRV_TIMER_HW_AUTO | SNDRV_TIMER_HW_TASKLET, .open = snd_hrtimer_open, .close = snd_hrtimer_close, .start = snd_hrtimer_start, .stop = snd_hrtimer_stop, }; /* * entry functions */ static struct snd_timer *mytimer; static int __init snd_hrtimer_init(void) { struct snd_timer *timer; struct timespec tp; int err; hrtimer_get_res(CLOCK_MONOTONIC, &tp); if (tp.tv_sec > 0 || !tp.tv_nsec) { snd_printk(KERN_ERR "snd-hrtimer: Invalid resolution %u.%09u", (unsigned)tp.tv_sec, (unsigned)tp.tv_nsec); return -EINVAL; } resolution = tp.tv_nsec; /* Create a new timer and set up the fields */ err = snd_timer_global_new("hrtimer", SNDRV_TIMER_GLOBAL_HRTIMER, &timer); if (err < 0) return err; timer->module = THIS_MODULE; strcpy(timer->name, "HR timer"); timer->hw = hrtimer_hw; timer->hw.resolution = resolution; timer->hw.ticks = NANO_SEC / resolution; err = snd_timer_global_register(timer); if (err < 0) { snd_timer_global_free(timer); return err; } mytimer = timer; /* remember this */ return 0; } static void __exit snd_hrtimer_exit(void) { if (mytimer) { snd_timer_global_free(mytimer); mytimer = NULL; } } module_init(snd_hrtimer_init); module_exit(snd_hrtimer_exit);
gpl-2.0
mtk00874/kernel-mediatek
block/blk-settings.c
169
27046
/* * Functions related to setting various queue properties from drivers */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/bio.h> #include <linux/blkdev.h> #include <linux/bootmem.h> /* for max_pfn/max_low_pfn */ #include <linux/gcd.h> #include <linux/lcm.h> #include <linux/jiffies.h> #include <linux/gfp.h> #include "blk.h" unsigned long blk_max_low_pfn; EXPORT_SYMBOL(blk_max_low_pfn); unsigned long blk_max_pfn; /** * blk_queue_prep_rq - set a prepare_request function for queue * @q: queue * @pfn: prepare_request function * * It's possible for a queue to register a prepare_request callback which * is invoked before the request is handed to the request_fn. The goal of * the function is to prepare a request for I/O, it can be used to build a * cdb from the request data for instance. * */ void blk_queue_prep_rq(struct request_queue *q, prep_rq_fn *pfn) { q->prep_rq_fn = pfn; } EXPORT_SYMBOL(blk_queue_prep_rq); /** * blk_queue_unprep_rq - set an unprepare_request function for queue * @q: queue * @ufn: unprepare_request function * * It's possible for a queue to register an unprepare_request callback * which is invoked before the request is finally completed. The goal * of the function is to deallocate any data that was allocated in the * prepare_request callback. * */ void blk_queue_unprep_rq(struct request_queue *q, unprep_rq_fn *ufn) { q->unprep_rq_fn = ufn; } EXPORT_SYMBOL(blk_queue_unprep_rq); /** * blk_queue_merge_bvec - set a merge_bvec function for queue * @q: queue * @mbfn: merge_bvec_fn * * Usually queues have static limitations on the max sectors or segments that * we can put in a request. Stacking drivers may have some settings that * are dynamic, and thus we have to query the queue whether it is ok to * add a new bio_vec to a bio at a given offset or not. If the block device * has such limitations, it needs to register a merge_bvec_fn to control * the size of bio's sent to it. Note that a block device *must* allow a * single page to be added to an empty bio. The block device driver may want * to use the bio_split() function to deal with these bio's. By default * no merge_bvec_fn is defined for a queue, and only the fixed limits are * honored. */ void blk_queue_merge_bvec(struct request_queue *q, merge_bvec_fn *mbfn) { q->merge_bvec_fn = mbfn; } EXPORT_SYMBOL(blk_queue_merge_bvec); void blk_queue_softirq_done(struct request_queue *q, softirq_done_fn *fn) { q->softirq_done_fn = fn; } EXPORT_SYMBOL(blk_queue_softirq_done); void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout) { q->rq_timeout = timeout; } EXPORT_SYMBOL_GPL(blk_queue_rq_timeout); void blk_queue_rq_timed_out(struct request_queue *q, rq_timed_out_fn *fn) { q->rq_timed_out_fn = fn; } EXPORT_SYMBOL_GPL(blk_queue_rq_timed_out); void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn) { q->lld_busy_fn = fn; } EXPORT_SYMBOL_GPL(blk_queue_lld_busy); /** * blk_set_default_limits - reset limits to default values * @lim: the queue_limits structure to reset * * Description: * Returns a queue_limit struct to its default state. */ void blk_set_default_limits(struct queue_limits *lim) { lim->max_segments = BLK_MAX_SEGMENTS; lim->max_integrity_segments = 0; lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK; lim->max_segment_size = BLK_MAX_SEGMENT_SIZE; lim->max_sectors = lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS; lim->max_write_same_sectors = 0; lim->max_discard_sectors = 0; lim->discard_granularity = 0; lim->discard_alignment = 0; lim->discard_misaligned = 0; lim->discard_zeroes_data = 0; lim->logical_block_size = lim->physical_block_size = lim->io_min = 512; lim->bounce_pfn = (unsigned long)(BLK_BOUNCE_ANY >> PAGE_SHIFT); lim->alignment_offset = 0; lim->io_opt = 0; lim->misaligned = 0; lim->cluster = 1; } EXPORT_SYMBOL(blk_set_default_limits); /** * blk_set_stacking_limits - set default limits for stacking devices * @lim: the queue_limits structure to reset * * Description: * Returns a queue_limit struct to its default state. Should be used * by stacking drivers like DM that have no internal limits. */ void blk_set_stacking_limits(struct queue_limits *lim) { blk_set_default_limits(lim); /* Inherit limits from component devices */ lim->discard_zeroes_data = 1; lim->max_segments = USHRT_MAX; lim->max_hw_sectors = UINT_MAX; lim->max_segment_size = UINT_MAX; lim->max_sectors = UINT_MAX; lim->max_write_same_sectors = UINT_MAX; } EXPORT_SYMBOL(blk_set_stacking_limits); /** * blk_queue_make_request - define an alternate make_request function for a device * @q: the request queue for the device to be affected * @mfn: the alternate make_request function * * Description: * The normal way for &struct bios to be passed to a device * driver is for them to be collected into requests on a request * queue, and then to allow the device driver to select requests * off that queue when it is ready. This works well for many block * devices. However some block devices (typically virtual devices * such as md or lvm) do not benefit from the processing on the * request queue, and are served best by having the requests passed * directly to them. This can be achieved by providing a function * to blk_queue_make_request(). * * Caveat: * The driver that does this *must* be able to deal appropriately * with buffers in "highmemory". This can be accomplished by either calling * __bio_kmap_atomic() to get a temporary kernel mapping, or by calling * blk_queue_bounce() to create a buffer in normal memory. **/ void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn) { /* * set defaults */ q->nr_requests = BLKDEV_MAX_RQ; q->make_request_fn = mfn; blk_queue_dma_alignment(q, 511); blk_queue_congestion_threshold(q); q->nr_batching = BLK_BATCH_REQ; blk_set_default_limits(&q->limits); /* * by default assume old behaviour and bounce for any highmem page */ blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH); } EXPORT_SYMBOL(blk_queue_make_request); /** * blk_queue_bounce_limit - set bounce buffer limit for queue * @q: the request queue for the device * @max_addr: the maximum address the device can handle * * Description: * Different hardware can have different requirements as to what pages * it can do I/O directly to. A low level driver can call * blk_queue_bounce_limit to have lower memory pages allocated as bounce * buffers for doing I/O to pages residing above @max_addr. **/ void blk_queue_bounce_limit(struct request_queue *q, u64 max_addr) { unsigned long b_pfn = max_addr >> PAGE_SHIFT; int dma = 0; q->bounce_gfp = GFP_NOIO; #if BITS_PER_LONG == 64 /* * Assume anything <= 4GB can be handled by IOMMU. Actually * some IOMMUs can handle everything, but I don't know of a * way to test this here. */ if (b_pfn < (min_t(u64, 0xffffffffUL, BLK_BOUNCE_HIGH) >> PAGE_SHIFT)) dma = 1; q->limits.bounce_pfn = max(max_low_pfn, b_pfn); #else if (b_pfn < blk_max_low_pfn) dma = 1; q->limits.bounce_pfn = b_pfn; #endif if (dma) { init_emergency_isa_pool(); q->bounce_gfp = GFP_NOIO | GFP_DMA; q->limits.bounce_pfn = b_pfn; } } EXPORT_SYMBOL(blk_queue_bounce_limit); /** * blk_limits_max_hw_sectors - set hard and soft limit of max sectors for request * @limits: the queue limits * @max_hw_sectors: max hardware sectors in the usual 512b unit * * Description: * Enables a low level driver to set a hard upper limit, * max_hw_sectors, on the size of requests. max_hw_sectors is set by * the device driver based upon the combined capabilities of I/O * controller and storage device. * * max_sectors is a soft limit imposed by the block layer for * filesystem type requests. This value can be overridden on a * per-device basis in /sys/block/<device>/queue/max_sectors_kb. * The soft limit can not exceed max_hw_sectors. **/ void blk_limits_max_hw_sectors(struct queue_limits *limits, unsigned int max_hw_sectors) { if ((max_hw_sectors << 9) < PAGE_CACHE_SIZE) { max_hw_sectors = 1 << (PAGE_CACHE_SHIFT - 9); printk(KERN_INFO "%s: set to minimum %d\n", __func__, max_hw_sectors); } limits->max_hw_sectors = max_hw_sectors; limits->max_sectors = min_t(unsigned int, max_hw_sectors, BLK_DEF_MAX_SECTORS); } EXPORT_SYMBOL(blk_limits_max_hw_sectors); /** * blk_queue_max_hw_sectors - set max sectors for a request for this queue * @q: the request queue for the device * @max_hw_sectors: max hardware sectors in the usual 512b unit * * Description: * See description for blk_limits_max_hw_sectors(). **/ void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors) { blk_limits_max_hw_sectors(&q->limits, max_hw_sectors); } EXPORT_SYMBOL(blk_queue_max_hw_sectors); /** * blk_queue_max_discard_sectors - set max sectors for a single discard * @q: the request queue for the device * @max_discard_sectors: maximum number of sectors to discard **/ void blk_queue_max_discard_sectors(struct request_queue *q, unsigned int max_discard_sectors) { q->limits.max_discard_sectors = max_discard_sectors; } EXPORT_SYMBOL(blk_queue_max_discard_sectors); /** * blk_queue_max_write_same_sectors - set max sectors for a single write same * @q: the request queue for the device * @max_write_same_sectors: maximum number of sectors to write per command **/ void blk_queue_max_write_same_sectors(struct request_queue *q, unsigned int max_write_same_sectors) { q->limits.max_write_same_sectors = max_write_same_sectors; } EXPORT_SYMBOL(blk_queue_max_write_same_sectors); /** * blk_queue_max_segments - set max hw segments for a request for this queue * @q: the request queue for the device * @max_segments: max number of segments * * Description: * Enables a low level driver to set an upper limit on the number of * hw data segments in a request. **/ void blk_queue_max_segments(struct request_queue *q, unsigned short max_segments) { if (!max_segments) { max_segments = 1; printk(KERN_INFO "%s: set to minimum %d\n", __func__, max_segments); } q->limits.max_segments = max_segments; } EXPORT_SYMBOL(blk_queue_max_segments); /** * blk_queue_max_segment_size - set max segment size for blk_rq_map_sg * @q: the request queue for the device * @max_size: max size of segment in bytes * * Description: * Enables a low level driver to set an upper limit on the size of a * coalesced segment **/ void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size) { if (max_size < PAGE_CACHE_SIZE) { max_size = PAGE_CACHE_SIZE; printk(KERN_INFO "%s: set to minimum %d\n", __func__, max_size); } q->limits.max_segment_size = max_size; } EXPORT_SYMBOL(blk_queue_max_segment_size); /** * blk_queue_logical_block_size - set logical block size for the queue * @q: the request queue for the device * @size: the logical block size, in bytes * * Description: * This should be set to the lowest possible block size that the * storage device can address. The default of 512 covers most * hardware. **/ void blk_queue_logical_block_size(struct request_queue *q, unsigned short size) { q->limits.logical_block_size = size; if (q->limits.physical_block_size < size) q->limits.physical_block_size = size; if (q->limits.io_min < q->limits.physical_block_size) q->limits.io_min = q->limits.physical_block_size; } EXPORT_SYMBOL(blk_queue_logical_block_size); /** * blk_queue_physical_block_size - set physical block size for the queue * @q: the request queue for the device * @size: the physical block size, in bytes * * Description: * This should be set to the lowest possible sector size that the * hardware can operate on without reverting to read-modify-write * operations. */ void blk_queue_physical_block_size(struct request_queue *q, unsigned int size) { q->limits.physical_block_size = size; if (q->limits.physical_block_size < q->limits.logical_block_size) q->limits.physical_block_size = q->limits.logical_block_size; if (q->limits.io_min < q->limits.physical_block_size) q->limits.io_min = q->limits.physical_block_size; } EXPORT_SYMBOL(blk_queue_physical_block_size); /** * blk_queue_alignment_offset - set physical block alignment offset * @q: the request queue for the device * @offset: alignment offset in bytes * * Description: * Some devices are naturally misaligned to compensate for things like * the legacy DOS partition table 63-sector offset. Low-level drivers * should call this function for devices whose first sector is not * naturally aligned. */ void blk_queue_alignment_offset(struct request_queue *q, unsigned int offset) { q->limits.alignment_offset = offset & (q->limits.physical_block_size - 1); q->limits.misaligned = 0; } EXPORT_SYMBOL(blk_queue_alignment_offset); /** * blk_limits_io_min - set minimum request size for a device * @limits: the queue limits * @min: smallest I/O size in bytes * * Description: * Some devices have an internal block size bigger than the reported * hardware sector size. This function can be used to signal the * smallest I/O the device can perform without incurring a performance * penalty. */ void blk_limits_io_min(struct queue_limits *limits, unsigned int min) { limits->io_min = min; if (limits->io_min < limits->logical_block_size) limits->io_min = limits->logical_block_size; if (limits->io_min < limits->physical_block_size) limits->io_min = limits->physical_block_size; } EXPORT_SYMBOL(blk_limits_io_min); /** * blk_queue_io_min - set minimum request size for the queue * @q: the request queue for the device * @min: smallest I/O size in bytes * * Description: * Storage devices may report a granularity or preferred minimum I/O * size which is the smallest request the device can perform without * incurring a performance penalty. For disk drives this is often the * physical block size. For RAID arrays it is often the stripe chunk * size. A properly aligned multiple of minimum_io_size is the * preferred request size for workloads where a high number of I/O * operations is desired. */ void blk_queue_io_min(struct request_queue *q, unsigned int min) { blk_limits_io_min(&q->limits, min); } EXPORT_SYMBOL(blk_queue_io_min); /** * blk_limits_io_opt - set optimal request size for a device * @limits: the queue limits * @opt: smallest I/O size in bytes * * Description: * Storage devices may report an optimal I/O size, which is the * device's preferred unit for sustained I/O. This is rarely reported * for disk drives. For RAID arrays it is usually the stripe width or * the internal track size. A properly aligned multiple of * optimal_io_size is the preferred request size for workloads where * sustained throughput is desired. */ void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt) { limits->io_opt = opt; } EXPORT_SYMBOL(blk_limits_io_opt); /** * blk_queue_io_opt - set optimal request size for the queue * @q: the request queue for the device * @opt: optimal request size in bytes * * Description: * Storage devices may report an optimal I/O size, which is the * device's preferred unit for sustained I/O. This is rarely reported * for disk drives. For RAID arrays it is usually the stripe width or * the internal track size. A properly aligned multiple of * optimal_io_size is the preferred request size for workloads where * sustained throughput is desired. */ void blk_queue_io_opt(struct request_queue *q, unsigned int opt) { blk_limits_io_opt(&q->limits, opt); } EXPORT_SYMBOL(blk_queue_io_opt); /** * blk_queue_stack_limits - inherit underlying queue limits for stacked drivers * @t: the stacking driver (top) * @b: the underlying device (bottom) **/ void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b) { blk_stack_limits(&t->limits, &b->limits, 0); } EXPORT_SYMBOL(blk_queue_stack_limits); /** * blk_stack_limits - adjust queue_limits for stacked devices * @t: the stacking driver limits (top device) * @b: the underlying queue limits (bottom, component device) * @start: first data sector within component device * * Description: * This function is used by stacking drivers like MD and DM to ensure * that all component devices have compatible block sizes and * alignments. The stacking driver must provide a queue_limits * struct (top) and then iteratively call the stacking function for * all component (bottom) devices. The stacking function will * attempt to combine the values and ensure proper alignment. * * Returns 0 if the top and bottom queue_limits are compatible. The * top device's block sizes and alignment offsets may be adjusted to * ensure alignment with the bottom device. If no compatible sizes * and alignments exist, -1 is returned and the resulting top * queue_limits will have the misaligned flag set to indicate that * the alignment_offset is undefined. */ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, sector_t start) { unsigned int top, bottom, alignment, ret = 0; t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors); t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors); t->max_write_same_sectors = min(t->max_write_same_sectors, b->max_write_same_sectors); t->bounce_pfn = min_not_zero(t->bounce_pfn, b->bounce_pfn); t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask, b->seg_boundary_mask); t->max_segments = min_not_zero(t->max_segments, b->max_segments); t->max_integrity_segments = min_not_zero(t->max_integrity_segments, b->max_integrity_segments); t->max_segment_size = min_not_zero(t->max_segment_size, b->max_segment_size); t->misaligned |= b->misaligned; alignment = queue_limit_alignment_offset(b, start); /* Bottom device has different alignment. Check that it is * compatible with the current top alignment. */ if (t->alignment_offset != alignment) { top = max(t->physical_block_size, t->io_min) + t->alignment_offset; bottom = max(b->physical_block_size, b->io_min) + alignment; /* Verify that top and bottom intervals line up */ if (max(top, bottom) & (min(top, bottom) - 1)) { t->misaligned = 1; ret = -1; } } t->logical_block_size = max(t->logical_block_size, b->logical_block_size); t->physical_block_size = max(t->physical_block_size, b->physical_block_size); t->io_min = max(t->io_min, b->io_min); t->io_opt = lcm(t->io_opt, b->io_opt); t->cluster &= b->cluster; t->discard_zeroes_data &= b->discard_zeroes_data; /* Physical block size a multiple of the logical block size? */ if (t->physical_block_size & (t->logical_block_size - 1)) { t->physical_block_size = t->logical_block_size; t->misaligned = 1; ret = -1; } /* Minimum I/O a multiple of the physical block size? */ if (t->io_min & (t->physical_block_size - 1)) { t->io_min = t->physical_block_size; t->misaligned = 1; ret = -1; } /* Optimal I/O a multiple of the physical block size? */ if (t->io_opt & (t->physical_block_size - 1)) { t->io_opt = 0; t->misaligned = 1; ret = -1; } t->raid_partial_stripes_expensive = max(t->raid_partial_stripes_expensive, b->raid_partial_stripes_expensive); /* Find lowest common alignment_offset */ t->alignment_offset = lcm(t->alignment_offset, alignment) & (max(t->physical_block_size, t->io_min) - 1); /* Verify that new alignment_offset is on a logical block boundary */ if (t->alignment_offset & (t->logical_block_size - 1)) { t->misaligned = 1; ret = -1; } /* Discard alignment and granularity */ if (b->discard_granularity) { alignment = queue_limit_discard_alignment(b, start); if (t->discard_granularity != 0 && t->discard_alignment != alignment) { top = t->discard_granularity + t->discard_alignment; bottom = b->discard_granularity + alignment; /* Verify that top and bottom intervals line up */ if ((max(top, bottom) % min(top, bottom)) != 0) t->discard_misaligned = 1; } t->max_discard_sectors = min_not_zero(t->max_discard_sectors, b->max_discard_sectors); t->discard_granularity = max(t->discard_granularity, b->discard_granularity); t->discard_alignment = lcm(t->discard_alignment, alignment) % t->discard_granularity; } return ret; } EXPORT_SYMBOL(blk_stack_limits); /** * bdev_stack_limits - adjust queue limits for stacked drivers * @t: the stacking driver limits (top device) * @bdev: the component block_device (bottom) * @start: first data sector within component device * * Description: * Merges queue limits for a top device and a block_device. Returns * 0 if alignment didn't change. Returns -1 if adding the bottom * device caused misalignment. */ int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev, sector_t start) { struct request_queue *bq = bdev_get_queue(bdev); start += get_start_sect(bdev); return blk_stack_limits(t, &bq->limits, start); } EXPORT_SYMBOL(bdev_stack_limits); /** * disk_stack_limits - adjust queue limits for stacked drivers * @disk: MD/DM gendisk (top) * @bdev: the underlying block device (bottom) * @offset: offset to beginning of data within component device * * Description: * Merges the limits for a top level gendisk and a bottom level * block_device. */ void disk_stack_limits(struct gendisk *disk, struct block_device *bdev, sector_t offset) { struct request_queue *t = disk->queue; if (bdev_stack_limits(&t->limits, bdev, offset >> 9) < 0) { char top[BDEVNAME_SIZE], bottom[BDEVNAME_SIZE]; disk_name(disk, 0, top); bdevname(bdev, bottom); printk(KERN_NOTICE "%s: Warning: Device %s is misaligned\n", top, bottom); } } EXPORT_SYMBOL(disk_stack_limits); /** * blk_queue_dma_pad - set pad mask * @q: the request queue for the device * @mask: pad mask * * Set dma pad mask. * * Appending pad buffer to a request modifies the last entry of a * scatter list such that it includes the pad buffer. **/ void blk_queue_dma_pad(struct request_queue *q, unsigned int mask) { q->dma_pad_mask = mask; } EXPORT_SYMBOL(blk_queue_dma_pad); /** * blk_queue_update_dma_pad - update pad mask * @q: the request queue for the device * @mask: pad mask * * Update dma pad mask. * * Appending pad buffer to a request modifies the last entry of a * scatter list such that it includes the pad buffer. **/ void blk_queue_update_dma_pad(struct request_queue *q, unsigned int mask) { if (mask > q->dma_pad_mask) q->dma_pad_mask = mask; } EXPORT_SYMBOL(blk_queue_update_dma_pad); /** * blk_queue_dma_drain - Set up a drain buffer for excess dma. * @q: the request queue for the device * @dma_drain_needed: fn which returns non-zero if drain is necessary * @buf: physically contiguous buffer * @size: size of the buffer in bytes * * Some devices have excess DMA problems and can't simply discard (or * zero fill) the unwanted piece of the transfer. They have to have a * real area of memory to transfer it into. The use case for this is * ATAPI devices in DMA mode. If the packet command causes a transfer * bigger than the transfer size some HBAs will lock up if there * aren't DMA elements to contain the excess transfer. What this API * does is adjust the queue so that the buf is always appended * silently to the scatterlist. * * Note: This routine adjusts max_hw_segments to make room for appending * the drain buffer. If you call blk_queue_max_segments() after calling * this routine, you must set the limit to one fewer than your device * can support otherwise there won't be room for the drain buffer. */ int blk_queue_dma_drain(struct request_queue *q, dma_drain_needed_fn *dma_drain_needed, void *buf, unsigned int size) { if (queue_max_segments(q) < 2) return -EINVAL; /* make room for appending the drain */ blk_queue_max_segments(q, queue_max_segments(q) - 1); q->dma_drain_needed = dma_drain_needed; q->dma_drain_buffer = buf; q->dma_drain_size = size; return 0; } EXPORT_SYMBOL_GPL(blk_queue_dma_drain); /** * blk_queue_segment_boundary - set boundary rules for segment merging * @q: the request queue for the device * @mask: the memory boundary mask **/ void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask) { if (mask < PAGE_CACHE_SIZE - 1) { mask = PAGE_CACHE_SIZE - 1; printk(KERN_INFO "%s: set to minimum %lx\n", __func__, mask); } q->limits.seg_boundary_mask = mask; } EXPORT_SYMBOL(blk_queue_segment_boundary); /** * blk_queue_dma_alignment - set dma length and memory alignment * @q: the request queue for the device * @mask: alignment mask * * description: * set required memory and length alignment for direct dma transactions. * this is used when building direct io requests for the queue. * **/ void blk_queue_dma_alignment(struct request_queue *q, int mask) { q->dma_alignment = mask; } EXPORT_SYMBOL(blk_queue_dma_alignment); /** * blk_queue_update_dma_alignment - update dma length and memory alignment * @q: the request queue for the device * @mask: alignment mask * * description: * update required memory and length alignment for direct dma transactions. * If the requested alignment is larger than the current alignment, then * the current queue alignment is updated to the new value, otherwise it * is left alone. The design of this is to allow multiple objects * (driver, device, transport etc) to set their respective * alignments without having them interfere. * **/ void blk_queue_update_dma_alignment(struct request_queue *q, int mask) { BUG_ON(mask > PAGE_SIZE); if (mask > q->dma_alignment) q->dma_alignment = mask; } EXPORT_SYMBOL(blk_queue_update_dma_alignment); /** * blk_queue_flush - configure queue's cache flush capability * @q: the request queue for the device * @flush: 0, REQ_FLUSH or REQ_FLUSH | REQ_FUA * * Tell block layer cache flush capability of @q. If it supports * flushing, REQ_FLUSH should be set. If it supports bypassing * write cache for individual writes, REQ_FUA should be set. */ void blk_queue_flush(struct request_queue *q, unsigned int flush) { WARN_ON_ONCE(flush & ~(REQ_FLUSH | REQ_FUA)); if (WARN_ON_ONCE(!(flush & REQ_FLUSH) && (flush & REQ_FUA))) flush &= ~REQ_FUA; q->flush_flags = flush & (REQ_FLUSH | REQ_FUA); } EXPORT_SYMBOL_GPL(blk_queue_flush); void blk_queue_flush_queueable(struct request_queue *q, bool queueable) { q->flush_not_queueable = !queueable; } EXPORT_SYMBOL_GPL(blk_queue_flush_queueable); static int __init blk_settings_init(void) { blk_max_low_pfn = max_low_pfn - 1; blk_max_pfn = max_pfn - 1; return 0; } subsys_initcall(blk_settings_init);
gpl-2.0
john1117/u-boot-odroid
board/eltec/bab7xx/flash.c
425
12762
/* * (C) Copyright 2000 * Wolfgang Denk, DENX Software Engineering, wd@denx.de. * * See file CREDITS for list of people who contributed to this * project. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, * MA 02111-1307 USA */ /* * 07-10-2002 Frank Gottschling: added 29F032 flash (ELPPC). * fixed monitor protection part * * 09-18-2001 Andreas Heppel: Reduced the code in here to the usage * of AMD's 29F040 and 29F016 flashes, since the BAB7xx does use * any other. */ #include <common.h> #include <asm/processor.h> #include <asm/pci_io.h> flash_info_t flash_info[CONFIG_SYS_MAX_FLASH_BANKS]; /* info for FLASH chips */ ulong flash_get_size (vu_long *addr, flash_info_t *info); static int write_word (flash_info_t *info, ulong dest, ulong data); /*flash command address offsets*/ #define ADDR0 (0x555) #define ADDR1 (0x2AA) #define ADDR3 (0x001) #define FLASH_WORD_SIZE unsigned char /*----------------------------------------------------------------------------*/ unsigned long flash_init (void) { unsigned long size1, size2; int i; /* Init: no FLASHes known */ for (i=0; i<CONFIG_SYS_MAX_FLASH_BANKS; ++i) { flash_info[i].flash_id = FLASH_UNKNOWN; } /* initialise 1st flash */ size1 = flash_get_size((vu_long *)FLASH_BASE0_PRELIM, &flash_info[0]); if (flash_info[0].flash_id == FLASH_UNKNOWN) { printf ("## Unknown FLASH on Bank 0 - Size = 0x%08lx = %ld MB\n", size1, size1<<20); } /* initialise 2nd flash */ size2 = flash_get_size((vu_long *)FLASH_BASE1_PRELIM, &flash_info[1]); if (flash_info[1].flash_id == FLASH_UNKNOWN) { printf ("## Unknown FLASH on Bank 1 - Size = 0x%08lx = %ld MB\n", size2, size2<<20); } /* monitor protection ON by default */ if (size1 == 512*1024) { (void)flash_protect(FLAG_PROTECT_SET, FLASH_BASE0_PRELIM, FLASH_BASE0_PRELIM+monitor_flash_len-1, &flash_info[0]); } if (size2 == 512*1024) { (void)flash_protect(FLAG_PROTECT_SET, FLASH_BASE1_PRELIM, FLASH_BASE1_PRELIM+monitor_flash_len-1, &flash_info[1]); } if (size2 == 4*1024*1024) { (void)flash_protect(FLAG_PROTECT_SET, CONFIG_SYS_FLASH_BASE, CONFIG_SYS_FLASH_BASE+monitor_flash_len-1, &flash_info[1]); } return (size1 + size2); } /*----------------------------------------------------------------------------*/ void flash_print_info (flash_info_t *info) { int i; int k; int size; int erased; volatile unsigned long *flash; if (info->flash_id == FLASH_UNKNOWN) { printf ("missing or unknown FLASH type\n"); flash_init(); } if (info->flash_id == FLASH_UNKNOWN) { printf ("missing or unknown FLASH type\n"); return; } switch (info->flash_id & FLASH_VENDMASK) { case FLASH_MAN_AMD: printf ("AMD "); break; default: printf ("Unknown Vendor "); break; } switch (info->flash_id & FLASH_TYPEMASK) { case AMD_ID_F040B: printf ("AM29F040B (4 Mbit)\n"); break; case AMD_ID_F016D: printf ("AM29F016D (16 Mbit)\n"); break; case AMD_ID_F032B: printf ("AM29F032B (32 Mbit)\n"); break; default: printf ("Unknown Chip Type\n"); break; } if (info->size >= (1 << 20)) { printf (" Size: %ld MB in %d Sectors\n", info->size >> 20, info->sector_count); } else { printf (" Size: %ld kB in %d Sectors\n", info->size >> 10, info->sector_count); } printf (" Sector Start Addresses:"); for (i=0; i<info->sector_count; ++i) { /* * Check if whole sector is erased */ if (i != (info->sector_count-1)) size = info->start[i+1] - info->start[i]; else size = info->start[0] + info->size - info->start[i]; erased = 1; flash = (volatile unsigned long *)info->start[i]; size = size >> 2; /* divide by 4 for longword access */ for (k=0; k<size; k++) { if (*flash++ != 0xffffffff) { erased = 0; break; } } if ((i % 5) == 0) printf ("\n "); printf (" %08lX%s%s", info->start[i], erased ? " E" : " ", info->protect[i] ? "RO " : " "); } printf ("\n"); } /*----------------------------------------------------------------------------*/ /* * The following code cannot be run from FLASH! */ ulong flash_get_size (vu_long *addr, flash_info_t *info) { short i; ulong vendor, devid; ulong base = (ulong)addr; volatile unsigned char *caddr = (unsigned char *)addr; #ifdef DEBUG printf("flash_get_size for address 0x%lx: \n", (unsigned long)caddr); #endif /* Write auto select command: read Manufacturer ID */ caddr[0] = 0xF0; /* reset bank */ udelay(10); eieio(); caddr[0x555] = 0xAA; udelay(10); caddr[0x2AA] = 0x55; udelay(10); caddr[0x555] = 0x90; udelay(10); vendor = caddr[0]; devid = caddr[1]; #ifdef DEBUG printf("Manufacturer: 0x%lx\n", vendor); #endif vendor &= 0xff; devid &= 0xff; /* We accept only two AMD types */ switch (vendor) { case (FLASH_WORD_SIZE)AMD_MANUFACT: info->flash_id = FLASH_MAN_AMD; break; default: info->flash_id = FLASH_UNKNOWN; info->sector_count = 0; info->size = 0; return (0); /* no or unknown flash */ } switch (devid) { case (FLASH_WORD_SIZE)AMD_ID_F040B: info->flash_id |= AMD_ID_F040B; info->sector_count = 8; info->size = 0x00080000; break; /* => 0.5 MB */ case (FLASH_WORD_SIZE)AMD_ID_F016D: info->flash_id |= AMD_ID_F016D; info->sector_count = 32; info->size = 0x00200000; break; /* => 2 MB */ case (FLASH_WORD_SIZE)AMD_ID_F032B: info->flash_id |= AMD_ID_F032B; info->sector_count = 64; info->size = 0x00400000; break; /* => 4 MB */ default: info->flash_id = FLASH_UNKNOWN; return (0); /* => no or unknown flash */ } #ifdef DEBUG printf("flash id 0x%lx; sector count 0x%x, size 0x%lx\n", info->flash_id, info->sector_count, info->size); #endif /* check for protected sectors */ for (i = 0; i < info->sector_count; i++) { /* sector base address */ info->start[i] = base + i * (info->size / info->sector_count); /* read sector protection at sector address, (A7 .. A0) = 0x02 */ /* D0 = 1 if protected */ caddr = (volatile unsigned char *)(info->start[i]); info->protect[i] = caddr[2] & 1; } /* * Prevent writes to uninitialized FLASH. */ if (info->flash_id != FLASH_UNKNOWN) { caddr = (volatile unsigned char *)info->start[0]; caddr[0] = 0xF0; /* reset bank */ } return (info->size); } /*----------------------------------------------------------------------------*/ int flash_erase (flash_info_t *info, int s_first, int s_last) { volatile FLASH_WORD_SIZE *addr = (FLASH_WORD_SIZE *)(info->start[0]); int flag, prot, sect, l_sect; ulong start, now, last; int rc = 0; if ((s_first < 0) || (s_first > s_last)) { if (info->flash_id == FLASH_UNKNOWN) { printf ("- missing\n"); } else { printf ("- no sectors to erase\n"); } return 1; } if ((info->flash_id == FLASH_UNKNOWN) || (info->flash_id > FLASH_AMD_COMP)) { printf ("Can't erase unknown flash type - aborted\n"); return 1; } prot = 0; for (sect=s_first; sect<=s_last; ++sect) { if (info->protect[sect]) { prot++; } } if (prot) { printf ("- Warning: %d protected sectors will not be erased!\n", prot); } else { printf ("\n"); } l_sect = -1; /* Disable interrupts which might cause a timeout here */ flag = disable_interrupts(); addr[ADDR0] = (FLASH_WORD_SIZE)0x00AA00AA; addr[ADDR1] = (FLASH_WORD_SIZE)0x00550055; addr[ADDR0] = (FLASH_WORD_SIZE)0x00800080; addr[ADDR0] = (FLASH_WORD_SIZE)0x00AA00AA; addr[ADDR1] = (FLASH_WORD_SIZE)0x00550055; /* Start erase on unprotected sectors */ for (sect = s_first; sect<=s_last; sect++) { if (info->protect[sect] == 0) { /* not protected */ addr = (FLASH_WORD_SIZE *)(info->start[sect]); if (info->flash_id & FLASH_MAN_SST) { addr[ADDR0] = (FLASH_WORD_SIZE)0x00AA00AA; addr[ADDR1] = (FLASH_WORD_SIZE)0x00550055; addr[ADDR0] = (FLASH_WORD_SIZE)0x00800080; addr[ADDR0] = (FLASH_WORD_SIZE)0x00AA00AA; addr[ADDR1] = (FLASH_WORD_SIZE)0x00550055; addr[0] = (FLASH_WORD_SIZE)0x00500050; /* block erase */ udelay(30000); /* wait 30 ms */ } else addr[0] = (FLASH_WORD_SIZE)0x00300030; /* sector erase */ l_sect = sect; } } /* re-enable interrupts if necessary */ if (flag) enable_interrupts(); /* wait at least 80us - let's wait 1 ms */ udelay (1000); /* * We wait for the last triggered sector */ if (l_sect < 0) goto DONE; start = get_timer (0); last = start; addr = (FLASH_WORD_SIZE *)(info->start[l_sect]); while ((addr[0] & (FLASH_WORD_SIZE)0x00800080) != (FLASH_WORD_SIZE)0x00800080) { if ((now = get_timer(start)) > CONFIG_SYS_FLASH_ERASE_TOUT) { printf ("Timeout\n"); return 1; } /* show that we're waiting */ if ((now - last) > 1000) { /* every second */ serial_putc ('.'); last = now; } } DONE: /* reset to read mode */ addr = (FLASH_WORD_SIZE *)info->start[0]; addr[0] = (FLASH_WORD_SIZE)0x00F000F0; /* reset bank */ printf (" done\n"); return rc; } /*----------------------------------------------------------------------------*/ /* * Copy memory to flash, returns: * 0 - OK * 1 - write timeout * 2 - Flash not erased */ int write_buff (flash_info_t *info, uchar *src, ulong addr, ulong cnt) { ulong cp, wp, data; int i, l, rc; wp = (addr & ~3); /* get lower word aligned address */ /* * handle unaligned start bytes */ if ((l = addr - wp) != 0) { data = 0; for (i=0, cp=wp; i<l; ++i, ++cp) { data = (data << 8) | (*(uchar *)cp); } for (; i<4 && cnt>0; ++i) { data = (data << 8) | *src++; --cnt; ++cp; } for (; cnt==0 && i<4; ++i, ++cp) { data = (data << 8) | (*(uchar *)cp); } if ((rc = write_word(info, wp, data)) != 0) { return (rc); } wp += 4; } /* * handle word aligned part */ while (cnt >= 4) { data = 0; for (i=0; i<4; ++i) { data = (data << 8) | *src++; } if ((rc = write_word(info, wp, data)) != 0) { return (rc); } wp += 4; cnt -= 4; } if (cnt == 0) { return (0); } /* * handle unaligned tail bytes */ data = 0; for (i=0, cp=wp; i<4 && cnt>0; ++i, ++cp) { data = (data << 8) | *src++; --cnt; } for (; i<4; ++i, ++cp) { data = (data << 8) | (*(uchar *)cp); } return (write_word(info, wp, data)); } /*----------------------------------------------------------------------------*/ /* Write a word to Flash, returns: * 0 - OK * 1 - write timeout * 2 - Flash not erased */ static int write_word (flash_info_t *info, ulong dest, ulong data) { volatile FLASH_WORD_SIZE *addr2 = (FLASH_WORD_SIZE *)(info->start[0]); volatile FLASH_WORD_SIZE *dest2 = (FLASH_WORD_SIZE *)dest; volatile FLASH_WORD_SIZE *data2 = (FLASH_WORD_SIZE *)&data; ulong start; int flag; int i; /* Check if Flash is (sufficiently) erased */ if ((*((volatile FLASH_WORD_SIZE *)dest) & (FLASH_WORD_SIZE)data) != (FLASH_WORD_SIZE)data) { return (2); } /* Disable interrupts which might cause a timeout here */ flag = disable_interrupts(); for (i=0; i<4/sizeof(FLASH_WORD_SIZE); i++) { addr2[ADDR0] = (FLASH_WORD_SIZE)0x00AA00AA; addr2[ADDR1] = (FLASH_WORD_SIZE)0x00550055; addr2[ADDR0] = (FLASH_WORD_SIZE)0x00A000A0; dest2[i] = data2[i]; /* re-enable interrupts if necessary */ if (flag) enable_interrupts(); /* data polling for D7 */ start = get_timer (0); while ((dest2[i] & (FLASH_WORD_SIZE)0x00800080) != (data2[i] & (FLASH_WORD_SIZE)0x00800080)) { if (get_timer(start) > CONFIG_SYS_FLASH_WRITE_TOUT) { return (1); } } } return (0); } /*----------------------------------------------------------------------------*/
gpl-2.0
cjdoucette/XIA-for-Linux
drivers/tty/n_hdlc.c
681
27767
/* generic HDLC line discipline for Linux * * Written by Paul Fulghum paulkf@microgate.com * for Microgate Corporation * * Microgate and SyncLink are registered trademarks of Microgate Corporation * * Adapted from ppp.c, written by Michael Callahan <callahan@maths.ox.ac.uk>, * Al Longyear <longyear@netcom.com>, * Paul Mackerras <Paul.Mackerras@cs.anu.edu.au> * * Original release 01/11/99 * * This code is released under the GNU General Public License (GPL) * * This module implements the tty line discipline N_HDLC for use with * tty device drivers that support bit-synchronous HDLC communications. * * All HDLC data is frame oriented which means: * * 1. tty write calls represent one complete transmit frame of data * The device driver should accept the complete frame or none of * the frame (busy) in the write method. Each write call should have * a byte count in the range of 2-65535 bytes (2 is min HDLC frame * with 1 addr byte and 1 ctrl byte). The max byte count of 65535 * should include any crc bytes required. For example, when using * CCITT CRC32, 4 crc bytes are required, so the maximum size frame * the application may transmit is limited to 65531 bytes. For CCITT * CRC16, the maximum application frame size would be 65533. * * * 2. receive callbacks from the device driver represents * one received frame. The device driver should bypass * the tty flip buffer and call the line discipline receive * callback directly to avoid fragmenting or concatenating * multiple frames into a single receive callback. * * The HDLC line discipline queues the receive frames in separate * buffers so complete receive frames can be returned by the * tty read calls. * * 3. tty read calls returns an entire frame of data or nothing. * * 4. all send and receive data is considered raw. No processing * or translation is performed by the line discipline, regardless * of the tty flags * * 5. When line discipline is queried for the amount of receive * data available (FIOC), 0 is returned if no data available, * otherwise the count of the next available frame is returned. * (instead of the sum of all received frame counts). * * These conventions allow the standard tty programming interface * to be used for synchronous HDLC applications when used with * this line discipline (or another line discipline that is frame * oriented such as N_PPP). * * The SyncLink driver (synclink.c) implements both asynchronous * (using standard line discipline N_TTY) and synchronous HDLC * (using N_HDLC) communications, with the latter using the above * conventions. * * This implementation is very basic and does not maintain * any statistics. The main point is to enforce the raw data * and frame orientation of HDLC communications. * * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED * OF THE POSSIBILITY OF SUCH DAMAGE. */ #define HDLC_MAGIC 0x239e #include <linux/module.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/types.h> #include <linux/fcntl.h> #include <linux/interrupt.h> #include <linux/ptrace.h> #undef VERSION #define VERSION(major,minor,patch) (((((major)<<8)+(minor))<<8)+(patch)) #include <linux/poll.h> #include <linux/in.h> #include <linux/ioctl.h> #include <linux/slab.h> #include <linux/tty.h> #include <linux/errno.h> #include <linux/string.h> /* used in new tty drivers */ #include <linux/signal.h> /* used in new tty drivers */ #include <linux/if.h> #include <linux/bitops.h> #include <asm/termios.h> #include <asm/uaccess.h> /* * Buffers for individual HDLC frames */ #define MAX_HDLC_FRAME_SIZE 65535 #define DEFAULT_RX_BUF_COUNT 10 #define MAX_RX_BUF_COUNT 60 #define DEFAULT_TX_BUF_COUNT 3 struct n_hdlc_buf { struct n_hdlc_buf *link; int count; char buf[1]; }; #define N_HDLC_BUF_SIZE (sizeof(struct n_hdlc_buf) + maxframe) struct n_hdlc_buf_list { struct n_hdlc_buf *head; struct n_hdlc_buf *tail; int count; spinlock_t spinlock; }; /** * struct n_hdlc - per device instance data structure * @magic - magic value for structure * @flags - miscellaneous control flags * @tty - ptr to TTY structure * @backup_tty - TTY to use if tty gets closed * @tbusy - reentrancy flag for tx wakeup code * @woke_up - FIXME: describe this field * @tbuf - currently transmitting tx buffer * @tx_buf_list - list of pending transmit frame buffers * @rx_buf_list - list of received frame buffers * @tx_free_buf_list - list unused transmit frame buffers * @rx_free_buf_list - list unused received frame buffers */ struct n_hdlc { int magic; __u32 flags; struct tty_struct *tty; struct tty_struct *backup_tty; int tbusy; int woke_up; struct n_hdlc_buf *tbuf; struct n_hdlc_buf_list tx_buf_list; struct n_hdlc_buf_list rx_buf_list; struct n_hdlc_buf_list tx_free_buf_list; struct n_hdlc_buf_list rx_free_buf_list; }; /* * HDLC buffer list manipulation functions */ static void n_hdlc_buf_list_init(struct n_hdlc_buf_list *list); static void n_hdlc_buf_put(struct n_hdlc_buf_list *list, struct n_hdlc_buf *buf); static struct n_hdlc_buf *n_hdlc_buf_get(struct n_hdlc_buf_list *list); /* Local functions */ static struct n_hdlc *n_hdlc_alloc (void); /* debug level can be set by insmod for debugging purposes */ #define DEBUG_LEVEL_INFO 1 static int debuglevel; /* max frame size for memory allocations */ static int maxframe = 4096; /* TTY callbacks */ static ssize_t n_hdlc_tty_read(struct tty_struct *tty, struct file *file, __u8 __user *buf, size_t nr); static ssize_t n_hdlc_tty_write(struct tty_struct *tty, struct file *file, const unsigned char *buf, size_t nr); static int n_hdlc_tty_ioctl(struct tty_struct *tty, struct file *file, unsigned int cmd, unsigned long arg); static unsigned int n_hdlc_tty_poll(struct tty_struct *tty, struct file *filp, poll_table *wait); static int n_hdlc_tty_open(struct tty_struct *tty); static void n_hdlc_tty_close(struct tty_struct *tty); static void n_hdlc_tty_receive(struct tty_struct *tty, const __u8 *cp, char *fp, int count); static void n_hdlc_tty_wakeup(struct tty_struct *tty); #define bset(p,b) ((p)[(b) >> 5] |= (1 << ((b) & 0x1f))) #define tty2n_hdlc(tty) ((struct n_hdlc *) ((tty)->disc_data)) #define n_hdlc2tty(n_hdlc) ((n_hdlc)->tty) static void flush_rx_queue(struct tty_struct *tty) { struct n_hdlc *n_hdlc = tty2n_hdlc(tty); struct n_hdlc_buf *buf; while ((buf = n_hdlc_buf_get(&n_hdlc->rx_buf_list))) n_hdlc_buf_put(&n_hdlc->rx_free_buf_list, buf); } static void flush_tx_queue(struct tty_struct *tty) { struct n_hdlc *n_hdlc = tty2n_hdlc(tty); struct n_hdlc_buf *buf; unsigned long flags; while ((buf = n_hdlc_buf_get(&n_hdlc->tx_buf_list))) n_hdlc_buf_put(&n_hdlc->tx_free_buf_list, buf); spin_lock_irqsave(&n_hdlc->tx_buf_list.spinlock, flags); if (n_hdlc->tbuf) { n_hdlc_buf_put(&n_hdlc->tx_free_buf_list, n_hdlc->tbuf); n_hdlc->tbuf = NULL; } spin_unlock_irqrestore(&n_hdlc->tx_buf_list.spinlock, flags); } static struct tty_ldisc_ops n_hdlc_ldisc = { .owner = THIS_MODULE, .magic = TTY_LDISC_MAGIC, .name = "hdlc", .open = n_hdlc_tty_open, .close = n_hdlc_tty_close, .read = n_hdlc_tty_read, .write = n_hdlc_tty_write, .ioctl = n_hdlc_tty_ioctl, .poll = n_hdlc_tty_poll, .receive_buf = n_hdlc_tty_receive, .write_wakeup = n_hdlc_tty_wakeup, .flush_buffer = flush_rx_queue, }; /** * n_hdlc_release - release an n_hdlc per device line discipline info structure * @n_hdlc - per device line discipline info structure */ static void n_hdlc_release(struct n_hdlc *n_hdlc) { struct tty_struct *tty = n_hdlc2tty (n_hdlc); struct n_hdlc_buf *buf; if (debuglevel >= DEBUG_LEVEL_INFO) printk("%s(%d)n_hdlc_release() called\n",__FILE__,__LINE__); /* Ensure that the n_hdlcd process is not hanging on select()/poll() */ wake_up_interruptible (&tty->read_wait); wake_up_interruptible (&tty->write_wait); if (tty->disc_data == n_hdlc) tty->disc_data = NULL; /* Break the tty->n_hdlc link */ /* Release transmit and receive buffers */ for(;;) { buf = n_hdlc_buf_get(&n_hdlc->rx_free_buf_list); if (buf) { kfree(buf); } else break; } for(;;) { buf = n_hdlc_buf_get(&n_hdlc->tx_free_buf_list); if (buf) { kfree(buf); } else break; } for(;;) { buf = n_hdlc_buf_get(&n_hdlc->rx_buf_list); if (buf) { kfree(buf); } else break; } for(;;) { buf = n_hdlc_buf_get(&n_hdlc->tx_buf_list); if (buf) { kfree(buf); } else break; } kfree(n_hdlc->tbuf); kfree(n_hdlc); } /* end of n_hdlc_release() */ /** * n_hdlc_tty_close - line discipline close * @tty - pointer to tty info structure * * Called when the line discipline is changed to something * else, the tty is closed, or the tty detects a hangup. */ static void n_hdlc_tty_close(struct tty_struct *tty) { struct n_hdlc *n_hdlc = tty2n_hdlc (tty); if (debuglevel >= DEBUG_LEVEL_INFO) printk("%s(%d)n_hdlc_tty_close() called\n",__FILE__,__LINE__); if (n_hdlc != NULL) { if (n_hdlc->magic != HDLC_MAGIC) { printk (KERN_WARNING"n_hdlc: trying to close unopened tty!\n"); return; } #if defined(TTY_NO_WRITE_SPLIT) clear_bit(TTY_NO_WRITE_SPLIT,&tty->flags); #endif tty->disc_data = NULL; if (tty == n_hdlc->backup_tty) n_hdlc->backup_tty = NULL; if (tty != n_hdlc->tty) return; if (n_hdlc->backup_tty) { n_hdlc->tty = n_hdlc->backup_tty; } else { n_hdlc_release (n_hdlc); } } if (debuglevel >= DEBUG_LEVEL_INFO) printk("%s(%d)n_hdlc_tty_close() success\n",__FILE__,__LINE__); } /* end of n_hdlc_tty_close() */ /** * n_hdlc_tty_open - called when line discipline changed to n_hdlc * @tty - pointer to tty info structure * * Returns 0 if success, otherwise error code */ static int n_hdlc_tty_open (struct tty_struct *tty) { struct n_hdlc *n_hdlc = tty2n_hdlc (tty); if (debuglevel >= DEBUG_LEVEL_INFO) printk("%s(%d)n_hdlc_tty_open() called (device=%s)\n", __FILE__,__LINE__, tty->name); /* There should not be an existing table for this slot. */ if (n_hdlc) { printk (KERN_ERR"n_hdlc_tty_open:tty already associated!\n" ); return -EEXIST; } n_hdlc = n_hdlc_alloc(); if (!n_hdlc) { printk (KERN_ERR "n_hdlc_alloc failed\n"); return -ENFILE; } tty->disc_data = n_hdlc; n_hdlc->tty = tty; tty->receive_room = 65536; #if defined(TTY_NO_WRITE_SPLIT) /* change tty_io write() to not split large writes into 8K chunks */ set_bit(TTY_NO_WRITE_SPLIT,&tty->flags); #endif /* flush receive data from driver */ tty_driver_flush_buffer(tty); if (debuglevel >= DEBUG_LEVEL_INFO) printk("%s(%d)n_hdlc_tty_open() success\n",__FILE__,__LINE__); return 0; } /* end of n_tty_hdlc_open() */ /** * n_hdlc_send_frames - send frames on pending send buffer list * @n_hdlc - pointer to ldisc instance data * @tty - pointer to tty instance data * * Send frames on pending send buffer list until the driver does not accept a * frame (busy) this function is called after adding a frame to the send buffer * list and by the tty wakeup callback. */ static void n_hdlc_send_frames(struct n_hdlc *n_hdlc, struct tty_struct *tty) { register int actual; unsigned long flags; struct n_hdlc_buf *tbuf; if (debuglevel >= DEBUG_LEVEL_INFO) printk("%s(%d)n_hdlc_send_frames() called\n",__FILE__,__LINE__); check_again: spin_lock_irqsave(&n_hdlc->tx_buf_list.spinlock, flags); if (n_hdlc->tbusy) { n_hdlc->woke_up = 1; spin_unlock_irqrestore(&n_hdlc->tx_buf_list.spinlock, flags); return; } n_hdlc->tbusy = 1; n_hdlc->woke_up = 0; spin_unlock_irqrestore(&n_hdlc->tx_buf_list.spinlock, flags); /* get current transmit buffer or get new transmit */ /* buffer from list of pending transmit buffers */ tbuf = n_hdlc->tbuf; if (!tbuf) tbuf = n_hdlc_buf_get(&n_hdlc->tx_buf_list); while (tbuf) { if (debuglevel >= DEBUG_LEVEL_INFO) printk("%s(%d)sending frame %p, count=%d\n", __FILE__,__LINE__,tbuf,tbuf->count); /* Send the next block of data to device */ set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); actual = tty->ops->write(tty, tbuf->buf, tbuf->count); /* rollback was possible and has been done */ if (actual == -ERESTARTSYS) { n_hdlc->tbuf = tbuf; break; } /* if transmit error, throw frame away by */ /* pretending it was accepted by driver */ if (actual < 0) actual = tbuf->count; if (actual == tbuf->count) { if (debuglevel >= DEBUG_LEVEL_INFO) printk("%s(%d)frame %p completed\n", __FILE__,__LINE__,tbuf); /* free current transmit buffer */ n_hdlc_buf_put(&n_hdlc->tx_free_buf_list, tbuf); /* this tx buffer is done */ n_hdlc->tbuf = NULL; /* wait up sleeping writers */ wake_up_interruptible(&tty->write_wait); /* get next pending transmit buffer */ tbuf = n_hdlc_buf_get(&n_hdlc->tx_buf_list); } else { if (debuglevel >= DEBUG_LEVEL_INFO) printk("%s(%d)frame %p pending\n", __FILE__,__LINE__,tbuf); /* buffer not accepted by driver */ /* set this buffer as pending buffer */ n_hdlc->tbuf = tbuf; break; } } if (!tbuf) clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); /* Clear the re-entry flag */ spin_lock_irqsave(&n_hdlc->tx_buf_list.spinlock, flags); n_hdlc->tbusy = 0; spin_unlock_irqrestore(&n_hdlc->tx_buf_list.spinlock, flags); if (n_hdlc->woke_up) goto check_again; if (debuglevel >= DEBUG_LEVEL_INFO) printk("%s(%d)n_hdlc_send_frames() exit\n",__FILE__,__LINE__); } /* end of n_hdlc_send_frames() */ /** * n_hdlc_tty_wakeup - Callback for transmit wakeup * @tty - pointer to associated tty instance data * * Called when low level device driver can accept more send data. */ static void n_hdlc_tty_wakeup(struct tty_struct *tty) { struct n_hdlc *n_hdlc = tty2n_hdlc(tty); if (debuglevel >= DEBUG_LEVEL_INFO) printk("%s(%d)n_hdlc_tty_wakeup() called\n",__FILE__,__LINE__); if (!n_hdlc) return; if (tty != n_hdlc->tty) { clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); return; } n_hdlc_send_frames (n_hdlc, tty); } /* end of n_hdlc_tty_wakeup() */ /** * n_hdlc_tty_receive - Called by tty driver when receive data is available * @tty - pointer to tty instance data * @data - pointer to received data * @flags - pointer to flags for data * @count - count of received data in bytes * * Called by tty low level driver when receive data is available. Data is * interpreted as one HDLC frame. */ static void n_hdlc_tty_receive(struct tty_struct *tty, const __u8 *data, char *flags, int count) { register struct n_hdlc *n_hdlc = tty2n_hdlc (tty); register struct n_hdlc_buf *buf; if (debuglevel >= DEBUG_LEVEL_INFO) printk("%s(%d)n_hdlc_tty_receive() called count=%d\n", __FILE__,__LINE__, count); /* This can happen if stuff comes in on the backup tty */ if (!n_hdlc || tty != n_hdlc->tty) return; /* verify line is using HDLC discipline */ if (n_hdlc->magic != HDLC_MAGIC) { printk("%s(%d) line not using HDLC discipline\n", __FILE__,__LINE__); return; } if ( count>maxframe ) { if (debuglevel >= DEBUG_LEVEL_INFO) printk("%s(%d) rx count>maxframesize, data discarded\n", __FILE__,__LINE__); return; } /* get a free HDLC buffer */ buf = n_hdlc_buf_get(&n_hdlc->rx_free_buf_list); if (!buf) { /* no buffers in free list, attempt to allocate another rx buffer */ /* unless the maximum count has been reached */ if (n_hdlc->rx_buf_list.count < MAX_RX_BUF_COUNT) buf = kmalloc(N_HDLC_BUF_SIZE, GFP_ATOMIC); } if (!buf) { if (debuglevel >= DEBUG_LEVEL_INFO) printk("%s(%d) no more rx buffers, data discarded\n", __FILE__,__LINE__); return; } /* copy received data to HDLC buffer */ memcpy(buf->buf,data,count); buf->count=count; /* add HDLC buffer to list of received frames */ n_hdlc_buf_put(&n_hdlc->rx_buf_list, buf); /* wake up any blocked reads and perform async signalling */ wake_up_interruptible (&tty->read_wait); if (n_hdlc->tty->fasync != NULL) kill_fasync (&n_hdlc->tty->fasync, SIGIO, POLL_IN); } /* end of n_hdlc_tty_receive() */ /** * n_hdlc_tty_read - Called to retrieve one frame of data (if available) * @tty - pointer to tty instance data * @file - pointer to open file object * @buf - pointer to returned data buffer * @nr - size of returned data buffer * * Returns the number of bytes returned or error code. */ static ssize_t n_hdlc_tty_read(struct tty_struct *tty, struct file *file, __u8 __user *buf, size_t nr) { struct n_hdlc *n_hdlc = tty2n_hdlc(tty); int ret = 0; struct n_hdlc_buf *rbuf; DECLARE_WAITQUEUE(wait, current); if (debuglevel >= DEBUG_LEVEL_INFO) printk("%s(%d)n_hdlc_tty_read() called\n",__FILE__,__LINE__); /* Validate the pointers */ if (!n_hdlc) return -EIO; /* verify user access to buffer */ if (!access_ok(VERIFY_WRITE, buf, nr)) { printk(KERN_WARNING "%s(%d) n_hdlc_tty_read() can't verify user " "buffer\n", __FILE__, __LINE__); return -EFAULT; } add_wait_queue(&tty->read_wait, &wait); for (;;) { if (test_bit(TTY_OTHER_CLOSED, &tty->flags)) { ret = -EIO; break; } if (tty_hung_up_p(file)) break; set_current_state(TASK_INTERRUPTIBLE); rbuf = n_hdlc_buf_get(&n_hdlc->rx_buf_list); if (rbuf) { if (rbuf->count > nr) { /* too large for caller's buffer */ ret = -EOVERFLOW; } else { if (copy_to_user(buf, rbuf->buf, rbuf->count)) ret = -EFAULT; else ret = rbuf->count; } if (n_hdlc->rx_free_buf_list.count > DEFAULT_RX_BUF_COUNT) kfree(rbuf); else n_hdlc_buf_put(&n_hdlc->rx_free_buf_list, rbuf); break; } /* no data */ if (file->f_flags & O_NONBLOCK) { ret = -EAGAIN; break; } schedule(); if (signal_pending(current)) { ret = -EINTR; break; } } remove_wait_queue(&tty->read_wait, &wait); __set_current_state(TASK_RUNNING); return ret; } /* end of n_hdlc_tty_read() */ /** * n_hdlc_tty_write - write a single frame of data to device * @tty - pointer to associated tty device instance data * @file - pointer to file object data * @data - pointer to transmit data (one frame) * @count - size of transmit frame in bytes * * Returns the number of bytes written (or error code). */ static ssize_t n_hdlc_tty_write(struct tty_struct *tty, struct file *file, const unsigned char *data, size_t count) { struct n_hdlc *n_hdlc = tty2n_hdlc (tty); int error = 0; DECLARE_WAITQUEUE(wait, current); struct n_hdlc_buf *tbuf; if (debuglevel >= DEBUG_LEVEL_INFO) printk("%s(%d)n_hdlc_tty_write() called count=%Zd\n", __FILE__,__LINE__,count); /* Verify pointers */ if (!n_hdlc) return -EIO; if (n_hdlc->magic != HDLC_MAGIC) return -EIO; /* verify frame size */ if (count > maxframe ) { if (debuglevel & DEBUG_LEVEL_INFO) printk (KERN_WARNING "n_hdlc_tty_write: truncating user packet " "from %lu to %d\n", (unsigned long) count, maxframe ); count = maxframe; } add_wait_queue(&tty->write_wait, &wait); for (;;) { set_current_state(TASK_INTERRUPTIBLE); tbuf = n_hdlc_buf_get(&n_hdlc->tx_free_buf_list); if (tbuf) break; if (file->f_flags & O_NONBLOCK) { error = -EAGAIN; break; } schedule(); n_hdlc = tty2n_hdlc (tty); if (!n_hdlc || n_hdlc->magic != HDLC_MAGIC || tty != n_hdlc->tty) { printk("n_hdlc_tty_write: %p invalid after wait!\n", n_hdlc); error = -EIO; break; } if (signal_pending(current)) { error = -EINTR; break; } } __set_current_state(TASK_RUNNING); remove_wait_queue(&tty->write_wait, &wait); if (!error) { /* Retrieve the user's buffer */ memcpy(tbuf->buf, data, count); /* Send the data */ tbuf->count = error = count; n_hdlc_buf_put(&n_hdlc->tx_buf_list,tbuf); n_hdlc_send_frames(n_hdlc,tty); } return error; } /* end of n_hdlc_tty_write() */ /** * n_hdlc_tty_ioctl - process IOCTL system call for the tty device. * @tty - pointer to tty instance data * @file - pointer to open file object for device * @cmd - IOCTL command code * @arg - argument for IOCTL call (cmd dependent) * * Returns command dependent result. */ static int n_hdlc_tty_ioctl(struct tty_struct *tty, struct file *file, unsigned int cmd, unsigned long arg) { struct n_hdlc *n_hdlc = tty2n_hdlc (tty); int error = 0; int count; unsigned long flags; if (debuglevel >= DEBUG_LEVEL_INFO) printk("%s(%d)n_hdlc_tty_ioctl() called %d\n", __FILE__,__LINE__,cmd); /* Verify the status of the device */ if (!n_hdlc || n_hdlc->magic != HDLC_MAGIC) return -EBADF; switch (cmd) { case FIONREAD: /* report count of read data available */ /* in next available frame (if any) */ spin_lock_irqsave(&n_hdlc->rx_buf_list.spinlock,flags); if (n_hdlc->rx_buf_list.head) count = n_hdlc->rx_buf_list.head->count; else count = 0; spin_unlock_irqrestore(&n_hdlc->rx_buf_list.spinlock,flags); error = put_user(count, (int __user *)arg); break; case TIOCOUTQ: /* get the pending tx byte count in the driver */ count = tty_chars_in_buffer(tty); /* add size of next output frame in queue */ spin_lock_irqsave(&n_hdlc->tx_buf_list.spinlock,flags); if (n_hdlc->tx_buf_list.head) count += n_hdlc->tx_buf_list.head->count; spin_unlock_irqrestore(&n_hdlc->tx_buf_list.spinlock,flags); error = put_user(count, (int __user *)arg); break; case TCFLSH: switch (arg) { case TCIOFLUSH: case TCOFLUSH: flush_tx_queue(tty); } /* fall through to default */ default: error = n_tty_ioctl_helper(tty, file, cmd, arg); break; } return error; } /* end of n_hdlc_tty_ioctl() */ /** * n_hdlc_tty_poll - TTY callback for poll system call * @tty - pointer to tty instance data * @filp - pointer to open file object for device * @poll_table - wait queue for operations * * Determine which operations (read/write) will not block and return info * to caller. * Returns a bit mask containing info on which ops will not block. */ static unsigned int n_hdlc_tty_poll(struct tty_struct *tty, struct file *filp, poll_table *wait) { struct n_hdlc *n_hdlc = tty2n_hdlc (tty); unsigned int mask = 0; if (debuglevel >= DEBUG_LEVEL_INFO) printk("%s(%d)n_hdlc_tty_poll() called\n",__FILE__,__LINE__); if (n_hdlc && n_hdlc->magic == HDLC_MAGIC && tty == n_hdlc->tty) { /* queue current process into any wait queue that */ /* may awaken in the future (read and write) */ poll_wait(filp, &tty->read_wait, wait); poll_wait(filp, &tty->write_wait, wait); /* set bits for operations that won't block */ if (n_hdlc->rx_buf_list.head) mask |= POLLIN | POLLRDNORM; /* readable */ if (test_bit(TTY_OTHER_CLOSED, &tty->flags)) mask |= POLLHUP; if (tty_hung_up_p(filp)) mask |= POLLHUP; if (!tty_is_writelocked(tty) && n_hdlc->tx_free_buf_list.head) mask |= POLLOUT | POLLWRNORM; /* writable */ } return mask; } /* end of n_hdlc_tty_poll() */ /** * n_hdlc_alloc - allocate an n_hdlc instance data structure * * Returns a pointer to newly created structure if success, otherwise %NULL */ static struct n_hdlc *n_hdlc_alloc(void) { struct n_hdlc_buf *buf; int i; struct n_hdlc *n_hdlc = kzalloc(sizeof(*n_hdlc), GFP_KERNEL); if (!n_hdlc) return NULL; n_hdlc_buf_list_init(&n_hdlc->rx_free_buf_list); n_hdlc_buf_list_init(&n_hdlc->tx_free_buf_list); n_hdlc_buf_list_init(&n_hdlc->rx_buf_list); n_hdlc_buf_list_init(&n_hdlc->tx_buf_list); /* allocate free rx buffer list */ for(i=0;i<DEFAULT_RX_BUF_COUNT;i++) { buf = kmalloc(N_HDLC_BUF_SIZE, GFP_KERNEL); if (buf) n_hdlc_buf_put(&n_hdlc->rx_free_buf_list,buf); else if (debuglevel >= DEBUG_LEVEL_INFO) printk("%s(%d)n_hdlc_alloc(), kalloc() failed for rx buffer %d\n",__FILE__,__LINE__, i); } /* allocate free tx buffer list */ for(i=0;i<DEFAULT_TX_BUF_COUNT;i++) { buf = kmalloc(N_HDLC_BUF_SIZE, GFP_KERNEL); if (buf) n_hdlc_buf_put(&n_hdlc->tx_free_buf_list,buf); else if (debuglevel >= DEBUG_LEVEL_INFO) printk("%s(%d)n_hdlc_alloc(), kalloc() failed for tx buffer %d\n",__FILE__,__LINE__, i); } /* Initialize the control block */ n_hdlc->magic = HDLC_MAGIC; n_hdlc->flags = 0; return n_hdlc; } /* end of n_hdlc_alloc() */ /** * n_hdlc_buf_list_init - initialize specified HDLC buffer list * @list - pointer to buffer list */ static void n_hdlc_buf_list_init(struct n_hdlc_buf_list *list) { memset(list, 0, sizeof(*list)); spin_lock_init(&list->spinlock); } /* end of n_hdlc_buf_list_init() */ /** * n_hdlc_buf_put - add specified HDLC buffer to tail of specified list * @list - pointer to buffer list * @buf - pointer to buffer */ static void n_hdlc_buf_put(struct n_hdlc_buf_list *list, struct n_hdlc_buf *buf) { unsigned long flags; spin_lock_irqsave(&list->spinlock,flags); buf->link=NULL; if (list->tail) list->tail->link = buf; else list->head = buf; list->tail = buf; (list->count)++; spin_unlock_irqrestore(&list->spinlock,flags); } /* end of n_hdlc_buf_put() */ /** * n_hdlc_buf_get - remove and return an HDLC buffer from list * @list - pointer to HDLC buffer list * * Remove and return an HDLC buffer from the head of the specified HDLC buffer * list. * Returns a pointer to HDLC buffer if available, otherwise %NULL. */ static struct n_hdlc_buf* n_hdlc_buf_get(struct n_hdlc_buf_list *list) { unsigned long flags; struct n_hdlc_buf *buf; spin_lock_irqsave(&list->spinlock,flags); buf = list->head; if (buf) { list->head = buf->link; (list->count)--; } if (!list->head) list->tail = NULL; spin_unlock_irqrestore(&list->spinlock,flags); return buf; } /* end of n_hdlc_buf_get() */ static char hdlc_banner[] __initdata = KERN_INFO "HDLC line discipline maxframe=%u\n"; static char hdlc_register_ok[] __initdata = KERN_INFO "N_HDLC line discipline registered.\n"; static char hdlc_register_fail[] __initdata = KERN_ERR "error registering line discipline: %d\n"; static int __init n_hdlc_init(void) { int status; /* range check maxframe arg */ if (maxframe < 4096) maxframe = 4096; else if (maxframe > 65535) maxframe = 65535; printk(hdlc_banner, maxframe); status = tty_register_ldisc(N_HDLC, &n_hdlc_ldisc); if (!status) printk(hdlc_register_ok); else printk(hdlc_register_fail, status); return status; } /* end of init_module() */ static char hdlc_unregister_ok[] __exitdata = KERN_INFO "N_HDLC: line discipline unregistered\n"; static char hdlc_unregister_fail[] __exitdata = KERN_ERR "N_HDLC: can't unregister line discipline (err = %d)\n"; static void __exit n_hdlc_exit(void) { /* Release tty registration of line discipline */ int status = tty_unregister_ldisc(N_HDLC); if (status) printk(hdlc_unregister_fail, status); else printk(hdlc_unregister_ok); } module_init(n_hdlc_init); module_exit(n_hdlc_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Paul Fulghum paulkf@microgate.com"); module_param(debuglevel, int, 0); module_param(maxframe, int, 0); MODULE_ALIAS_LDISC(N_HDLC);
gpl-2.0
dtaht/cerowrt-3.10
target/linux/ar71xx/files/arch/mips/ath79/mach-wrt400n.c
681
3935
/* * Linksys WRT400N board support * * Copyright (C) 2009-2012 Gabor Juhos <juhosg@openwrt.org> * Copyright (C) 2009 Imre Kaloz <kaloz@openwrt.org> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation. */ #include <linux/mtd/mtd.h> #include <linux/mtd/partitions.h> #include <asm/mach-ath79/ath79.h> #include "dev-ap9x-pci.h" #include "dev-eth.h" #include "dev-gpio-buttons.h" #include "dev-leds-gpio.h" #include "dev-m25p80.h" #include "machtypes.h" #define WRT400N_GPIO_LED_POWER 1 #define WRT400N_GPIO_LED_WPS_BLUE 4 #define WRT400N_GPIO_LED_WPS_AMBER 5 #define WRT400N_GPIO_LED_WLAN 6 #define WRT400N_GPIO_BTN_RESET 8 #define WRT400N_GPIO_BTN_WLSEC 3 #define WRT400N_KEYS_POLL_INTERVAL 20 /* msecs */ #define WRT400N_KEYS_DEBOUNE_INTERVAL (3 * WRT400N_KEYS_POLL_INTERVAL) #define WRT400N_MAC_ADDR_OFFSET 0x120c #define WRT400N_CALDATA0_OFFSET 0x1000 #define WRT400N_CALDATA1_OFFSET 0x5000 static struct mtd_partition wrt400n_partitions[] = { { .name = "uboot", .offset = 0, .size = 0x030000, .mask_flags = MTD_WRITEABLE, }, { .name = "env", .offset = 0x030000, .size = 0x010000, .mask_flags = MTD_WRITEABLE, }, { .name = "linux", .offset = 0x040000, .size = 0x140000, }, { .name = "rootfs", .offset = 0x180000, .size = 0x630000, }, { .name = "nvram", .offset = 0x7b0000, .size = 0x010000, .mask_flags = MTD_WRITEABLE, }, { .name = "factory", .offset = 0x7c0000, .size = 0x010000, .mask_flags = MTD_WRITEABLE, }, { .name = "language", .offset = 0x7d0000, .size = 0x020000, .mask_flags = MTD_WRITEABLE, }, { .name = "caldata", .offset = 0x7f0000, .size = 0x010000, .mask_flags = MTD_WRITEABLE, }, { .name = "firmware", .offset = 0x040000, .size = 0x770000, } }; static struct flash_platform_data wrt400n_flash_data = { .parts = wrt400n_partitions, .nr_parts = ARRAY_SIZE(wrt400n_partitions), }; static struct gpio_led wrt400n_leds_gpio[] __initdata = { { .name = "wrt400n:blue:wps", .gpio = WRT400N_GPIO_LED_WPS_BLUE, .active_low = 1, }, { .name = "wrt400n:amber:wps", .gpio = WRT400N_GPIO_LED_WPS_AMBER, .active_low = 1, }, { .name = "wrt400n:blue:wlan", .gpio = WRT400N_GPIO_LED_WLAN, .active_low = 1, }, { .name = "wrt400n:blue:power", .gpio = WRT400N_GPIO_LED_POWER, .active_low = 0, .default_trigger = "default-on", } }; static struct gpio_keys_button wrt400n_gpio_keys[] __initdata = { { .desc = "reset", .type = EV_KEY, .code = KEY_RESTART, .debounce_interval = WRT400N_KEYS_DEBOUNE_INTERVAL, .gpio = WRT400N_GPIO_BTN_RESET, .active_low = 1, }, { .desc = "wlsec", .type = EV_KEY, .code = KEY_WPS_BUTTON, .debounce_interval = WRT400N_KEYS_DEBOUNE_INTERVAL, .gpio = WRT400N_GPIO_BTN_WLSEC, .active_low = 1, } }; static void __init wrt400n_setup(void) { u8 *art = (u8 *) KSEG1ADDR(0x1fff0000); u8 *mac = art + WRT400N_MAC_ADDR_OFFSET; ath79_register_mdio(0, 0x0); ath79_init_mac(ath79_eth0_data.mac_addr, mac, 1); ath79_eth0_data.phy_if_mode = PHY_INTERFACE_MODE_RMII; ath79_eth0_data.speed = SPEED_100; ath79_eth0_data.duplex = DUPLEX_FULL; ath79_init_mac(ath79_eth1_data.mac_addr, mac, 2); ath79_eth1_data.phy_if_mode = PHY_INTERFACE_MODE_RMII; ath79_eth1_data.phy_mask = 0x10; ath79_register_eth(0); ath79_register_eth(1); ath79_register_m25p80(&wrt400n_flash_data); ath79_register_leds_gpio(-1, ARRAY_SIZE(wrt400n_leds_gpio), wrt400n_leds_gpio); ath79_register_gpio_keys_polled(-1, WRT400N_KEYS_POLL_INTERVAL, ARRAY_SIZE(wrt400n_gpio_keys), wrt400n_gpio_keys); ap94_pci_init(art + WRT400N_CALDATA0_OFFSET, NULL, art + WRT400N_CALDATA1_OFFSET, NULL); } MIPS_MACHINE(ATH79_MACH_WRT400N, "WRT400N", "Linksys WRT400N", wrt400n_setup);
gpl-2.0
nuxeh/linux
drivers/edac/i82875p_edac.c
681
15090
/* * Intel D82875P Memory Controller kernel module * (C) 2003 Linux Networx (http://lnxi.com) * This file may be distributed under the terms of the * GNU General Public License. * * Written by Thayne Harbaugh * Contributors: * Wang Zhenyu at intel.com * * $Id: edac_i82875p.c,v 1.5.2.11 2005/10/05 00:43:44 dsp_llnl Exp $ * * Note: E7210 appears same as D82875P - zhenyu.z.wang at intel.com */ #include <linux/module.h> #include <linux/init.h> #include <linux/pci.h> #include <linux/pci_ids.h> #include <linux/edac.h> #include "edac_core.h" #define I82875P_REVISION " Ver: 2.0.2" #define EDAC_MOD_STR "i82875p_edac" #define i82875p_printk(level, fmt, arg...) \ edac_printk(level, "i82875p", fmt, ##arg) #define i82875p_mc_printk(mci, level, fmt, arg...) \ edac_mc_chipset_printk(mci, level, "i82875p", fmt, ##arg) #ifndef PCI_DEVICE_ID_INTEL_82875_0 #define PCI_DEVICE_ID_INTEL_82875_0 0x2578 #endif /* PCI_DEVICE_ID_INTEL_82875_0 */ #ifndef PCI_DEVICE_ID_INTEL_82875_6 #define PCI_DEVICE_ID_INTEL_82875_6 0x257e #endif /* PCI_DEVICE_ID_INTEL_82875_6 */ /* four csrows in dual channel, eight in single channel */ #define I82875P_NR_DIMMS 8 #define I82875P_NR_CSROWS(nr_chans) (I82875P_NR_DIMMS / (nr_chans)) /* Intel 82875p register addresses - device 0 function 0 - DRAM Controller */ #define I82875P_EAP 0x58 /* Error Address Pointer (32b) * * 31:12 block address * 11:0 reserved */ #define I82875P_DERRSYN 0x5c /* DRAM Error Syndrome (8b) * * 7:0 DRAM ECC Syndrome */ #define I82875P_DES 0x5d /* DRAM Error Status (8b) * * 7:1 reserved * 0 Error channel 0/1 */ #define I82875P_ERRSTS 0xc8 /* Error Status Register (16b) * * 15:10 reserved * 9 non-DRAM lock error (ndlock) * 8 Sftwr Generated SMI * 7 ECC UE * 6 reserved * 5 MCH detects unimplemented cycle * 4 AGP access outside GA * 3 Invalid AGP access * 2 Invalid GA translation table * 1 Unsupported AGP command * 0 ECC CE */ #define I82875P_ERRCMD 0xca /* Error Command (16b) * * 15:10 reserved * 9 SERR on non-DRAM lock * 8 SERR on ECC UE * 7 SERR on ECC CE * 6 target abort on high exception * 5 detect unimplemented cyc * 4 AGP access outside of GA * 3 SERR on invalid AGP access * 2 invalid translation table * 1 SERR on unsupported AGP command * 0 reserved */ /* Intel 82875p register addresses - device 6 function 0 - DRAM Controller */ #define I82875P_PCICMD6 0x04 /* PCI Command Register (16b) * * 15:10 reserved * 9 fast back-to-back - ro 0 * 8 SERR enable - ro 0 * 7 addr/data stepping - ro 0 * 6 parity err enable - ro 0 * 5 VGA palette snoop - ro 0 * 4 mem wr & invalidate - ro 0 * 3 special cycle - ro 0 * 2 bus master - ro 0 * 1 mem access dev6 - 0(dis),1(en) * 0 IO access dev3 - 0(dis),1(en) */ #define I82875P_BAR6 0x10 /* Mem Delays Base ADDR Reg (32b) * * 31:12 mem base addr [31:12] * 11:4 address mask - ro 0 * 3 prefetchable - ro 0(non),1(pre) * 2:1 mem type - ro 0 * 0 mem space - ro 0 */ /* Intel 82875p MMIO register space - device 0 function 0 - MMR space */ #define I82875P_DRB_SHIFT 26 /* 64MiB grain */ #define I82875P_DRB 0x00 /* DRAM Row Boundary (8b x 8) * * 7 reserved * 6:0 64MiB row boundary addr */ #define I82875P_DRA 0x10 /* DRAM Row Attribute (4b x 8) * * 7 reserved * 6:4 row attr row 1 * 3 reserved * 2:0 row attr row 0 * * 000 = 4KiB * 001 = 8KiB * 010 = 16KiB * 011 = 32KiB */ #define I82875P_DRC 0x68 /* DRAM Controller Mode (32b) * * 31:30 reserved * 29 init complete * 28:23 reserved * 22:21 nr chan 00=1,01=2 * 20 reserved * 19:18 Data Integ Mode 00=none,01=ecc * 17:11 reserved * 10:8 refresh mode * 7 reserved * 6:4 mode select * 3:2 reserved * 1:0 DRAM type 01=DDR */ enum i82875p_chips { I82875P = 0, }; struct i82875p_pvt { struct pci_dev *ovrfl_pdev; void __iomem *ovrfl_window; }; struct i82875p_dev_info { const char *ctl_name; }; struct i82875p_error_info { u16 errsts; u32 eap; u8 des; u8 derrsyn; u16 errsts2; }; static const struct i82875p_dev_info i82875p_devs[] = { [I82875P] = { .ctl_name = "i82875p"}, }; static struct pci_dev *mci_pdev; /* init dev: in case that AGP code has * already registered driver */ static struct edac_pci_ctl_info *i82875p_pci; static void i82875p_get_error_info(struct mem_ctl_info *mci, struct i82875p_error_info *info) { struct pci_dev *pdev; pdev = to_pci_dev(mci->pdev); /* * This is a mess because there is no atomic way to read all the * registers at once and the registers can transition from CE being * overwritten by UE. */ pci_read_config_word(pdev, I82875P_ERRSTS, &info->errsts); if (!(info->errsts & 0x0081)) return; pci_read_config_dword(pdev, I82875P_EAP, &info->eap); pci_read_config_byte(pdev, I82875P_DES, &info->des); pci_read_config_byte(pdev, I82875P_DERRSYN, &info->derrsyn); pci_read_config_word(pdev, I82875P_ERRSTS, &info->errsts2); /* * If the error is the same then we can for both reads then * the first set of reads is valid. If there is a change then * there is a CE no info and the second set of reads is valid * and should be UE info. */ if ((info->errsts ^ info->errsts2) & 0x0081) { pci_read_config_dword(pdev, I82875P_EAP, &info->eap); pci_read_config_byte(pdev, I82875P_DES, &info->des); pci_read_config_byte(pdev, I82875P_DERRSYN, &info->derrsyn); } pci_write_bits16(pdev, I82875P_ERRSTS, 0x0081, 0x0081); } static int i82875p_process_error_info(struct mem_ctl_info *mci, struct i82875p_error_info *info, int handle_errors) { int row, multi_chan; multi_chan = mci->csrows[0]->nr_channels - 1; if (!(info->errsts & 0x0081)) return 0; if (!handle_errors) return 1; if ((info->errsts ^ info->errsts2) & 0x0081) { edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, 0, 0, 0, -1, -1, -1, "UE overwrote CE", ""); info->errsts = info->errsts2; } info->eap >>= PAGE_SHIFT; row = edac_mc_find_csrow_by_page(mci, info->eap); if (info->errsts & 0x0080) edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, info->eap, 0, 0, row, -1, -1, "i82875p UE", ""); else edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, info->eap, 0, info->derrsyn, row, multi_chan ? (info->des & 0x1) : 0, -1, "i82875p CE", ""); return 1; } static void i82875p_check(struct mem_ctl_info *mci) { struct i82875p_error_info info; edac_dbg(1, "MC%d\n", mci->mc_idx); i82875p_get_error_info(mci, &info); i82875p_process_error_info(mci, &info, 1); } /* Return 0 on success or 1 on failure. */ static int i82875p_setup_overfl_dev(struct pci_dev *pdev, struct pci_dev **ovrfl_pdev, void __iomem **ovrfl_window) { struct pci_dev *dev; void __iomem *window; *ovrfl_pdev = NULL; *ovrfl_window = NULL; dev = pci_get_device(PCI_VEND_DEV(INTEL, 82875_6), NULL); if (dev == NULL) { /* Intel tells BIOS developers to hide device 6 which * configures the overflow device access containing * the DRBs - this is where we expose device 6. * http://www.x86-secret.com/articles/tweak/pat/patsecrets-2.htm */ pci_write_bits8(pdev, 0xf4, 0x2, 0x2); dev = pci_scan_single_device(pdev->bus, PCI_DEVFN(6, 0)); if (dev == NULL) return 1; pci_bus_assign_resources(dev->bus); pci_bus_add_device(dev); } *ovrfl_pdev = dev; if (pci_enable_device(dev)) { i82875p_printk(KERN_ERR, "%s(): Failed to enable overflow " "device\n", __func__); return 1; } if (pci_request_regions(dev, pci_name(dev))) { #ifdef CORRECT_BIOS goto fail0; #endif } /* cache is irrelevant for PCI bus reads/writes */ window = pci_ioremap_bar(dev, 0); if (window == NULL) { i82875p_printk(KERN_ERR, "%s(): Failed to ioremap bar6\n", __func__); goto fail1; } *ovrfl_window = window; return 0; fail1: pci_release_regions(dev); #ifdef CORRECT_BIOS fail0: pci_disable_device(dev); #endif /* NOTE: the ovrfl proc entry and pci_dev are intentionally left */ return 1; } /* Return 1 if dual channel mode is active. Else return 0. */ static inline int dual_channel_active(u32 drc) { return (drc >> 21) & 0x1; } static void i82875p_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev, void __iomem * ovrfl_window, u32 drc) { struct csrow_info *csrow; struct dimm_info *dimm; unsigned nr_chans = dual_channel_active(drc) + 1; unsigned long last_cumul_size; u8 value; u32 drc_ddim; /* DRAM Data Integrity Mode 0=none,2=edac */ u32 cumul_size, nr_pages; int index, j; drc_ddim = (drc >> 18) & 0x1; last_cumul_size = 0; /* The dram row boundary (DRB) reg values are boundary address * for each DRAM row with a granularity of 32 or 64MB (single/dual * channel operation). DRB regs are cumulative; therefore DRB7 will * contain the total memory contained in all eight rows. */ for (index = 0; index < mci->nr_csrows; index++) { csrow = mci->csrows[index]; value = readb(ovrfl_window + I82875P_DRB + index); cumul_size = value << (I82875P_DRB_SHIFT - PAGE_SHIFT); edac_dbg(3, "(%d) cumul_size 0x%x\n", index, cumul_size); if (cumul_size == last_cumul_size) continue; /* not populated */ csrow->first_page = last_cumul_size; csrow->last_page = cumul_size - 1; nr_pages = cumul_size - last_cumul_size; last_cumul_size = cumul_size; for (j = 0; j < nr_chans; j++) { dimm = csrow->channels[j]->dimm; dimm->nr_pages = nr_pages / nr_chans; dimm->grain = 1 << 12; /* I82875P_EAP has 4KiB reolution */ dimm->mtype = MEM_DDR; dimm->dtype = DEV_UNKNOWN; dimm->edac_mode = drc_ddim ? EDAC_SECDED : EDAC_NONE; } } } static int i82875p_probe1(struct pci_dev *pdev, int dev_idx) { int rc = -ENODEV; struct mem_ctl_info *mci; struct edac_mc_layer layers[2]; struct i82875p_pvt *pvt; struct pci_dev *ovrfl_pdev; void __iomem *ovrfl_window; u32 drc; u32 nr_chans; struct i82875p_error_info discard; edac_dbg(0, "\n"); if (i82875p_setup_overfl_dev(pdev, &ovrfl_pdev, &ovrfl_window)) return -ENODEV; drc = readl(ovrfl_window + I82875P_DRC); nr_chans = dual_channel_active(drc) + 1; layers[0].type = EDAC_MC_LAYER_CHIP_SELECT; layers[0].size = I82875P_NR_CSROWS(nr_chans); layers[0].is_virt_csrow = true; layers[1].type = EDAC_MC_LAYER_CHANNEL; layers[1].size = nr_chans; layers[1].is_virt_csrow = false; mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt)); if (!mci) { rc = -ENOMEM; goto fail0; } edac_dbg(3, "init mci\n"); mci->pdev = &pdev->dev; mci->mtype_cap = MEM_FLAG_DDR; mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED; mci->edac_cap = EDAC_FLAG_UNKNOWN; mci->mod_name = EDAC_MOD_STR; mci->mod_ver = I82875P_REVISION; mci->ctl_name = i82875p_devs[dev_idx].ctl_name; mci->dev_name = pci_name(pdev); mci->edac_check = i82875p_check; mci->ctl_page_to_phys = NULL; edac_dbg(3, "init pvt\n"); pvt = (struct i82875p_pvt *)mci->pvt_info; pvt->ovrfl_pdev = ovrfl_pdev; pvt->ovrfl_window = ovrfl_window; i82875p_init_csrows(mci, pdev, ovrfl_window, drc); i82875p_get_error_info(mci, &discard); /* clear counters */ /* Here we assume that we will never see multiple instances of this * type of memory controller. The ID is therefore hardcoded to 0. */ if (edac_mc_add_mc(mci)) { edac_dbg(3, "failed edac_mc_add_mc()\n"); goto fail1; } /* allocating generic PCI control info */ i82875p_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR); if (!i82875p_pci) { printk(KERN_WARNING "%s(): Unable to create PCI control\n", __func__); printk(KERN_WARNING "%s(): PCI error report via EDAC not setup\n", __func__); } /* get this far and it's successful */ edac_dbg(3, "success\n"); return 0; fail1: edac_mc_free(mci); fail0: iounmap(ovrfl_window); pci_release_regions(ovrfl_pdev); pci_disable_device(ovrfl_pdev); /* NOTE: the ovrfl proc entry and pci_dev are intentionally left */ return rc; } /* returns count (>= 0), or negative on error */ static int i82875p_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { int rc; edac_dbg(0, "\n"); i82875p_printk(KERN_INFO, "i82875p init one\n"); if (pci_enable_device(pdev) < 0) return -EIO; rc = i82875p_probe1(pdev, ent->driver_data); if (mci_pdev == NULL) mci_pdev = pci_dev_get(pdev); return rc; } static void i82875p_remove_one(struct pci_dev *pdev) { struct mem_ctl_info *mci; struct i82875p_pvt *pvt = NULL; edac_dbg(0, "\n"); if (i82875p_pci) edac_pci_release_generic_ctl(i82875p_pci); if ((mci = edac_mc_del_mc(&pdev->dev)) == NULL) return; pvt = (struct i82875p_pvt *)mci->pvt_info; if (pvt->ovrfl_window) iounmap(pvt->ovrfl_window); if (pvt->ovrfl_pdev) { #ifdef CORRECT_BIOS pci_release_regions(pvt->ovrfl_pdev); #endif /*CORRECT_BIOS */ pci_disable_device(pvt->ovrfl_pdev); pci_dev_put(pvt->ovrfl_pdev); } edac_mc_free(mci); } static const struct pci_device_id i82875p_pci_tbl[] = { { PCI_VEND_DEV(INTEL, 82875_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, I82875P}, { 0, } /* 0 terminated list. */ }; MODULE_DEVICE_TABLE(pci, i82875p_pci_tbl); static struct pci_driver i82875p_driver = { .name = EDAC_MOD_STR, .probe = i82875p_init_one, .remove = i82875p_remove_one, .id_table = i82875p_pci_tbl, }; static int __init i82875p_init(void) { int pci_rc; edac_dbg(3, "\n"); /* Ensure that the OPSTATE is set correctly for POLL or NMI */ opstate_init(); pci_rc = pci_register_driver(&i82875p_driver); if (pci_rc < 0) goto fail0; if (mci_pdev == NULL) { mci_pdev = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82875_0, NULL); if (!mci_pdev) { edac_dbg(0, "875p pci_get_device fail\n"); pci_rc = -ENODEV; goto fail1; } pci_rc = i82875p_init_one(mci_pdev, i82875p_pci_tbl); if (pci_rc < 0) { edac_dbg(0, "875p init fail\n"); pci_rc = -ENODEV; goto fail1; } } return 0; fail1: pci_unregister_driver(&i82875p_driver); fail0: if (mci_pdev != NULL) pci_dev_put(mci_pdev); return pci_rc; } static void __exit i82875p_exit(void) { edac_dbg(3, "\n"); i82875p_remove_one(mci_pdev); pci_dev_put(mci_pdev); pci_unregister_driver(&i82875p_driver); } module_init(i82875p_init); module_exit(i82875p_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Linux Networx (http://lnxi.com) Thayne Harbaugh"); MODULE_DESCRIPTION("MC support for Intel 82875 memory hub controllers"); module_param(edac_op_state, int, 0444); MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
gpl-2.0
nasser-embedded/linux
drivers/net/wireless/wl12xx/wl1251_init.c
937
9246
/* * This file is part of wl1251 * * Copyright (C) 2009 Nokia Corporation * * Contact: Kalle Valo <kalle.valo@nokia.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA * 02110-1301 USA * */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> #include "wl1251_init.h" #include "wl12xx_80211.h" #include "wl1251_acx.h" #include "wl1251_cmd.h" #include "wl1251_reg.h" int wl1251_hw_init_hwenc_config(struct wl1251 *wl) { int ret; ret = wl1251_acx_feature_cfg(wl); if (ret < 0) { wl1251_warning("couldn't set feature config"); return ret; } ret = wl1251_acx_default_key(wl, wl->default_key); if (ret < 0) { wl1251_warning("couldn't set default key"); return ret; } return 0; } int wl1251_hw_init_templates_config(struct wl1251 *wl) { int ret; u8 partial_vbm[PARTIAL_VBM_MAX]; /* send empty templates for fw memory reservation */ ret = wl1251_cmd_template_set(wl, CMD_PROBE_REQ, NULL, sizeof(struct wl12xx_probe_req_template)); if (ret < 0) return ret; ret = wl1251_cmd_template_set(wl, CMD_NULL_DATA, NULL, sizeof(struct wl12xx_null_data_template)); if (ret < 0) return ret; ret = wl1251_cmd_template_set(wl, CMD_PS_POLL, NULL, sizeof(struct wl12xx_ps_poll_template)); if (ret < 0) return ret; ret = wl1251_cmd_template_set(wl, CMD_QOS_NULL_DATA, NULL, sizeof (struct wl12xx_qos_null_data_template)); if (ret < 0) return ret; ret = wl1251_cmd_template_set(wl, CMD_PROBE_RESP, NULL, sizeof (struct wl12xx_probe_resp_template)); if (ret < 0) return ret; ret = wl1251_cmd_template_set(wl, CMD_BEACON, NULL, sizeof (struct wl12xx_beacon_template)); if (ret < 0) return ret; /* tim templates, first reserve space then allocate an empty one */ memset(partial_vbm, 0, PARTIAL_VBM_MAX); ret = wl1251_cmd_vbm(wl, TIM_ELE_ID, partial_vbm, PARTIAL_VBM_MAX, 0); if (ret < 0) return ret; ret = wl1251_cmd_vbm(wl, TIM_ELE_ID, partial_vbm, 1, 0); if (ret < 0) return ret; return 0; } int wl1251_hw_init_rx_config(struct wl1251 *wl, u32 config, u32 filter) { int ret; ret = wl1251_acx_rx_msdu_life_time(wl, RX_MSDU_LIFETIME_DEF); if (ret < 0) return ret; ret = wl1251_acx_rx_config(wl, config, filter); if (ret < 0) return ret; return 0; } int wl1251_hw_init_phy_config(struct wl1251 *wl) { int ret; ret = wl1251_acx_pd_threshold(wl); if (ret < 0) return ret; ret = wl1251_acx_slot(wl, DEFAULT_SLOT_TIME); if (ret < 0) return ret; ret = wl1251_acx_group_address_tbl(wl); if (ret < 0) return ret; ret = wl1251_acx_service_period_timeout(wl); if (ret < 0) return ret; ret = wl1251_acx_rts_threshold(wl, RTS_THRESHOLD_DEF); if (ret < 0) return ret; return 0; } int wl1251_hw_init_beacon_filter(struct wl1251 *wl) { int ret; /* disable beacon filtering at this stage */ ret = wl1251_acx_beacon_filter_opt(wl, false); if (ret < 0) return ret; ret = wl1251_acx_beacon_filter_table(wl); if (ret < 0) return ret; return 0; } int wl1251_hw_init_pta(struct wl1251 *wl) { int ret; ret = wl1251_acx_sg_enable(wl); if (ret < 0) return ret; ret = wl1251_acx_sg_cfg(wl); if (ret < 0) return ret; return 0; } int wl1251_hw_init_energy_detection(struct wl1251 *wl) { int ret; ret = wl1251_acx_cca_threshold(wl); if (ret < 0) return ret; return 0; } int wl1251_hw_init_beacon_broadcast(struct wl1251 *wl) { int ret; ret = wl1251_acx_bcn_dtim_options(wl); if (ret < 0) return ret; return 0; } int wl1251_hw_init_power_auth(struct wl1251 *wl) { return wl1251_acx_sleep_auth(wl, WL1251_PSM_CAM); } int wl1251_hw_init_mem_config(struct wl1251 *wl) { int ret; ret = wl1251_acx_mem_cfg(wl); if (ret < 0) return ret; wl->target_mem_map = kzalloc(sizeof(struct wl1251_acx_mem_map), GFP_KERNEL); if (!wl->target_mem_map) { wl1251_error("couldn't allocate target memory map"); return -ENOMEM; } /* we now ask for the firmware built memory map */ ret = wl1251_acx_mem_map(wl, wl->target_mem_map, sizeof(struct wl1251_acx_mem_map)); if (ret < 0) { wl1251_error("couldn't retrieve firmware memory map"); kfree(wl->target_mem_map); wl->target_mem_map = NULL; return ret; } return 0; } static int wl1251_hw_init_txq_fill(u8 qid, struct acx_tx_queue_qos_config *config, u32 num_blocks) { config->qid = qid; switch (qid) { case QOS_AC_BE: config->high_threshold = (QOS_TX_HIGH_BE_DEF * num_blocks) / 100; config->low_threshold = (QOS_TX_LOW_BE_DEF * num_blocks) / 100; break; case QOS_AC_BK: config->high_threshold = (QOS_TX_HIGH_BK_DEF * num_blocks) / 100; config->low_threshold = (QOS_TX_LOW_BK_DEF * num_blocks) / 100; break; case QOS_AC_VI: config->high_threshold = (QOS_TX_HIGH_VI_DEF * num_blocks) / 100; config->low_threshold = (QOS_TX_LOW_VI_DEF * num_blocks) / 100; break; case QOS_AC_VO: config->high_threshold = (QOS_TX_HIGH_VO_DEF * num_blocks) / 100; config->low_threshold = (QOS_TX_LOW_VO_DEF * num_blocks) / 100; break; default: wl1251_error("Invalid TX queue id: %d", qid); return -EINVAL; } return 0; } static int wl1251_hw_init_tx_queue_config(struct wl1251 *wl) { struct acx_tx_queue_qos_config *config; struct wl1251_acx_mem_map *wl_mem_map = wl->target_mem_map; int ret, i; wl1251_debug(DEBUG_ACX, "acx tx queue config"); config = kzalloc(sizeof(*config), GFP_KERNEL); if (!config) { ret = -ENOMEM; goto out; } for (i = 0; i < MAX_NUM_OF_AC; i++) { ret = wl1251_hw_init_txq_fill(i, config, wl_mem_map->num_tx_mem_blocks); if (ret < 0) goto out; ret = wl1251_cmd_configure(wl, ACX_TX_QUEUE_CFG, config, sizeof(*config)); if (ret < 0) goto out; } wl1251_acx_ac_cfg(wl, AC_BE, CWMIN_BE, CWMAX_BE, AIFS_DIFS, TXOP_BE); wl1251_acx_ac_cfg(wl, AC_BK, CWMIN_BK, CWMAX_BK, AIFS_DIFS, TXOP_BK); wl1251_acx_ac_cfg(wl, AC_VI, CWMIN_VI, CWMAX_VI, AIFS_DIFS, TXOP_VI); wl1251_acx_ac_cfg(wl, AC_VO, CWMIN_VO, CWMAX_VO, AIFS_DIFS, TXOP_VO); out: kfree(config); return ret; } static int wl1251_hw_init_data_path_config(struct wl1251 *wl) { int ret; /* asking for the data path parameters */ wl->data_path = kzalloc(sizeof(struct acx_data_path_params_resp), GFP_KERNEL); if (!wl->data_path) { wl1251_error("Couldnt allocate data path parameters"); return -ENOMEM; } ret = wl1251_acx_data_path_params(wl, wl->data_path); if (ret < 0) { kfree(wl->data_path); wl->data_path = NULL; return ret; } return 0; } int wl1251_hw_init(struct wl1251 *wl) { struct wl1251_acx_mem_map *wl_mem_map; int ret; ret = wl1251_hw_init_hwenc_config(wl); if (ret < 0) return ret; /* Template settings */ ret = wl1251_hw_init_templates_config(wl); if (ret < 0) return ret; /* Default memory configuration */ ret = wl1251_hw_init_mem_config(wl); if (ret < 0) return ret; /* Default data path configuration */ ret = wl1251_hw_init_data_path_config(wl); if (ret < 0) goto out_free_memmap; /* RX config */ ret = wl1251_hw_init_rx_config(wl, RX_CFG_PROMISCUOUS | RX_CFG_TSF, RX_FILTER_OPTION_DEF); /* RX_CONFIG_OPTION_ANY_DST_ANY_BSS, RX_FILTER_OPTION_FILTER_ALL); */ if (ret < 0) goto out_free_data_path; /* TX queues config */ ret = wl1251_hw_init_tx_queue_config(wl); if (ret < 0) goto out_free_data_path; /* PHY layer config */ ret = wl1251_hw_init_phy_config(wl); if (ret < 0) goto out_free_data_path; /* Initialize connection monitoring thresholds */ ret = wl1251_acx_conn_monit_params(wl); if (ret < 0) goto out_free_data_path; /* Beacon filtering */ ret = wl1251_hw_init_beacon_filter(wl); if (ret < 0) goto out_free_data_path; /* Bluetooth WLAN coexistence */ ret = wl1251_hw_init_pta(wl); if (ret < 0) goto out_free_data_path; /* Energy detection */ ret = wl1251_hw_init_energy_detection(wl); if (ret < 0) goto out_free_data_path; /* Beacons and boradcast settings */ ret = wl1251_hw_init_beacon_broadcast(wl); if (ret < 0) goto out_free_data_path; /* Enable data path */ ret = wl1251_cmd_data_path(wl, wl->channel, 1); if (ret < 0) goto out_free_data_path; /* Default power state */ ret = wl1251_hw_init_power_auth(wl); if (ret < 0) goto out_free_data_path; wl_mem_map = wl->target_mem_map; wl1251_info("%d tx blocks at 0x%x, %d rx blocks at 0x%x", wl_mem_map->num_tx_mem_blocks, wl->data_path->tx_control_addr, wl_mem_map->num_rx_mem_blocks, wl->data_path->rx_control_addr); return 0; out_free_data_path: kfree(wl->data_path); out_free_memmap: kfree(wl->target_mem_map); return ret; }
gpl-2.0
dwindsor/linux-stable
drivers/net/ethernet/freescale/xgmac_mdio.c
937
7936
/* * QorIQ 10G MDIO Controller * * Copyright 2012 Freescale Semiconductor, Inc. * * Authors: Andy Fleming <afleming@freescale.com> * Timur Tabi <timur@freescale.com> * * This file is licensed under the terms of the GNU General Public License * version 2. This program is licensed "as is" without any warranty of any * kind, whether express or implied. */ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/phy.h> #include <linux/mdio.h> #include <linux/of_address.h> #include <linux/of_platform.h> #include <linux/of_mdio.h> /* Number of microseconds to wait for a register to respond */ #define TIMEOUT 1000 struct tgec_mdio_controller { __be32 reserved[12]; __be32 mdio_stat; /* MDIO configuration and status */ __be32 mdio_ctl; /* MDIO control */ __be32 mdio_data; /* MDIO data */ __be32 mdio_addr; /* MDIO address */ } __packed; #define MDIO_STAT_ENC BIT(6) #define MDIO_STAT_CLKDIV(x) (((x>>1) & 0xff) << 8) #define MDIO_STAT_BSY BIT(0) #define MDIO_STAT_RD_ER BIT(1) #define MDIO_CTL_DEV_ADDR(x) (x & 0x1f) #define MDIO_CTL_PORT_ADDR(x) ((x & 0x1f) << 5) #define MDIO_CTL_PRE_DIS BIT(10) #define MDIO_CTL_SCAN_EN BIT(11) #define MDIO_CTL_POST_INC BIT(14) #define MDIO_CTL_READ BIT(15) #define MDIO_DATA(x) (x & 0xffff) #define MDIO_DATA_BSY BIT(31) struct mdio_fsl_priv { struct tgec_mdio_controller __iomem *mdio_base; bool is_little_endian; }; static u32 xgmac_read32(void __iomem *regs, bool is_little_endian) { if (is_little_endian) return ioread32(regs); else return ioread32be(regs); } static void xgmac_write32(u32 value, void __iomem *regs, bool is_little_endian) { if (is_little_endian) iowrite32(value, regs); else iowrite32be(value, regs); } /* * Wait until the MDIO bus is free */ static int xgmac_wait_until_free(struct device *dev, struct tgec_mdio_controller __iomem *regs, bool is_little_endian) { unsigned int timeout; /* Wait till the bus is free */ timeout = TIMEOUT; while ((xgmac_read32(&regs->mdio_stat, is_little_endian) & MDIO_STAT_BSY) && timeout) { cpu_relax(); timeout--; } if (!timeout) { dev_err(dev, "timeout waiting for bus to be free\n"); return -ETIMEDOUT; } return 0; } /* * Wait till the MDIO read or write operation is complete */ static int xgmac_wait_until_done(struct device *dev, struct tgec_mdio_controller __iomem *regs, bool is_little_endian) { unsigned int timeout; /* Wait till the MDIO write is complete */ timeout = TIMEOUT; while ((xgmac_read32(&regs->mdio_stat, is_little_endian) & MDIO_STAT_BSY) && timeout) { cpu_relax(); timeout--; } if (!timeout) { dev_err(dev, "timeout waiting for operation to complete\n"); return -ETIMEDOUT; } return 0; } /* * Write value to the PHY for this device to the register at regnum,waiting * until the write is done before it returns. All PHY configuration has to be * done through the TSEC1 MIIM regs. */ static int xgmac_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 value) { struct mdio_fsl_priv *priv = (struct mdio_fsl_priv *)bus->priv; struct tgec_mdio_controller __iomem *regs = priv->mdio_base; uint16_t dev_addr; u32 mdio_ctl, mdio_stat; int ret; bool endian = priv->is_little_endian; mdio_stat = xgmac_read32(&regs->mdio_stat, endian); if (regnum & MII_ADDR_C45) { /* Clause 45 (ie 10G) */ dev_addr = (regnum >> 16) & 0x1f; mdio_stat |= MDIO_STAT_ENC; } else { /* Clause 22 (ie 1G) */ dev_addr = regnum & 0x1f; mdio_stat &= ~MDIO_STAT_ENC; } xgmac_write32(mdio_stat, &regs->mdio_stat, endian); ret = xgmac_wait_until_free(&bus->dev, regs, endian); if (ret) return ret; /* Set the port and dev addr */ mdio_ctl = MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr); xgmac_write32(mdio_ctl, &regs->mdio_ctl, endian); /* Set the register address */ if (regnum & MII_ADDR_C45) { xgmac_write32(regnum & 0xffff, &regs->mdio_addr, endian); ret = xgmac_wait_until_free(&bus->dev, regs, endian); if (ret) return ret; } /* Write the value to the register */ xgmac_write32(MDIO_DATA(value), &regs->mdio_data, endian); ret = xgmac_wait_until_done(&bus->dev, regs, endian); if (ret) return ret; return 0; } /* * Reads from register regnum in the PHY for device dev, returning the value. * Clears miimcom first. All PHY configuration has to be done through the * TSEC1 MIIM regs. */ static int xgmac_mdio_read(struct mii_bus *bus, int phy_id, int regnum) { struct mdio_fsl_priv *priv = (struct mdio_fsl_priv *)bus->priv; struct tgec_mdio_controller __iomem *regs = priv->mdio_base; uint16_t dev_addr; uint32_t mdio_stat; uint32_t mdio_ctl; uint16_t value; int ret; bool endian = priv->is_little_endian; mdio_stat = xgmac_read32(&regs->mdio_stat, endian); if (regnum & MII_ADDR_C45) { dev_addr = (regnum >> 16) & 0x1f; mdio_stat |= MDIO_STAT_ENC; } else { dev_addr = regnum & 0x1f; mdio_stat &= ~MDIO_STAT_ENC; } xgmac_write32(mdio_stat, &regs->mdio_stat, endian); ret = xgmac_wait_until_free(&bus->dev, regs, endian); if (ret) return ret; /* Set the Port and Device Addrs */ mdio_ctl = MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr); xgmac_write32(mdio_ctl, &regs->mdio_ctl, endian); /* Set the register address */ if (regnum & MII_ADDR_C45) { xgmac_write32(regnum & 0xffff, &regs->mdio_addr, endian); ret = xgmac_wait_until_free(&bus->dev, regs, endian); if (ret) return ret; } /* Initiate the read */ xgmac_write32(mdio_ctl | MDIO_CTL_READ, &regs->mdio_ctl, endian); ret = xgmac_wait_until_done(&bus->dev, regs, endian); if (ret) return ret; /* Return all Fs if nothing was there */ if (xgmac_read32(&regs->mdio_stat, endian) & MDIO_STAT_RD_ER) { dev_err(&bus->dev, "Error while reading PHY%d reg at %d.%hhu\n", phy_id, dev_addr, regnum); return 0xffff; } value = xgmac_read32(&regs->mdio_data, endian) & 0xffff; dev_dbg(&bus->dev, "read %04x\n", value); return value; } static int xgmac_mdio_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; struct mii_bus *bus; struct resource res; struct mdio_fsl_priv *priv; int ret; ret = of_address_to_resource(np, 0, &res); if (ret) { dev_err(&pdev->dev, "could not obtain address\n"); return ret; } bus = mdiobus_alloc_size(sizeof(struct mdio_fsl_priv)); if (!bus) return -ENOMEM; bus->name = "Freescale XGMAC MDIO Bus"; bus->read = xgmac_mdio_read; bus->write = xgmac_mdio_write; bus->parent = &pdev->dev; snprintf(bus->id, MII_BUS_ID_SIZE, "%llx", (unsigned long long)res.start); /* Set the PHY base address */ priv = bus->priv; priv->mdio_base = of_iomap(np, 0); if (!priv->mdio_base) { ret = -ENOMEM; goto err_ioremap; } if (of_get_property(pdev->dev.of_node, "little-endian", NULL)) priv->is_little_endian = true; else priv->is_little_endian = false; ret = of_mdiobus_register(bus, np); if (ret) { dev_err(&pdev->dev, "cannot register MDIO bus\n"); goto err_registration; } platform_set_drvdata(pdev, bus); return 0; err_registration: iounmap(priv->mdio_base); err_ioremap: mdiobus_free(bus); return ret; } static int xgmac_mdio_remove(struct platform_device *pdev) { struct mii_bus *bus = platform_get_drvdata(pdev); mdiobus_unregister(bus); iounmap(bus->priv); mdiobus_free(bus); return 0; } static const struct of_device_id xgmac_mdio_match[] = { { .compatible = "fsl,fman-xmdio", }, { .compatible = "fsl,fman-memac-mdio", }, {}, }; MODULE_DEVICE_TABLE(of, xgmac_mdio_match); static struct platform_driver xgmac_mdio_driver = { .driver = { .name = "fsl-fman_xmdio", .of_match_table = xgmac_mdio_match, }, .probe = xgmac_mdio_probe, .remove = xgmac_mdio_remove, }; module_platform_driver(xgmac_mdio_driver); MODULE_DESCRIPTION("Freescale QorIQ 10G MDIO Controller"); MODULE_LICENSE("GPL v2");
gpl-2.0
ShinySide/HispAsian_Kernel_NH7
drivers/media/video/msm-bayer/mercury/msm_mercury_platform.c
1193
5276
/* Copyright (c) 2012, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/module.h> #include <linux/clk.h> #include <linux/io.h> #include <linux/android_pmem.h> #include <mach/clk.h> #include <mach/camera.h> #include <mach/msm_subsystem_map.h> #include "msm_mercury_platform.h" #include "msm_mercury_sync.h" #include "msm_mercury_common.h" #include "msm_mercury_hw.h" struct ion_client *mercury_client; static struct msm_cam_clk_info mercury_jpegd_clk_info[] = { {"core_clk", 200000000}, {"iface_clk", -1} }; void msm_mercury_platform_p2v(struct file *file, struct ion_handle **ionhandle) { #ifdef CONFIG_MSM_MULTIMEDIA_USE_ION ion_unmap_iommu(mercury_client, *ionhandle, CAMERA_DOMAIN, GEN_POOL); ion_free(mercury_client, *ionhandle); *ionhandle = NULL; #elif CONFIG_ANDROID_PMEM put_pmem_file(file); #endif } uint32_t msm_mercury_platform_v2p(int fd, uint32_t len, struct file **file_p, struct ion_handle **ionhandle) { unsigned long paddr; unsigned long size; int rc; #ifdef CONFIG_MSM_MULTIMEDIA_USE_ION *ionhandle = ion_import_dma_buf(mercury_client, fd); if (IS_ERR_OR_NULL(*ionhandle)) return 0; rc = ion_map_iommu(mercury_client, *ionhandle, CAMERA_DOMAIN, GEN_POOL, SZ_4K, 0, &paddr, (unsigned long *)&size, 0, 0); #elif CONFIG_ANDROID_PMEM unsigned long kvstart; rc = get_pmem_file(fd, &paddr, &kvstart, &size, file_p); #else rc = 0; paddr = 0; size = 0; #endif if (rc < 0) { MCR_PR_ERR("%s: get_pmem_file fd %d error %d\n", __func__, fd, rc); goto error1; } /* validate user input */ if (len > size) { MCR_PR_ERR("%s: invalid offset + len\n", __func__); goto error1; } return paddr; error1: #ifdef CONFIG_MSM_MULTIMEDIA_USE_ION ion_free(mercury_client, *ionhandle); #endif return 0; } int msm_mercury_platform_init(struct platform_device *pdev, struct resource **mem, void **base, int *irq, irqreturn_t (*handler) (int, void *), void *context) { int rc = 0; int mercury_irq; struct resource *mercury_mem, *mercury_io, *mercury_irq_res; void *mercury_base; struct msm_mercury_device *pmercury_dev = (struct msm_mercury_device *) context; MCR_DBG("%s:%d]\n", __func__, __LINE__); mercury_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!mercury_mem) { MCR_PR_ERR("%s: no mem resource?\n", __func__); return -ENODEV; } mercury_irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (!mercury_irq_res) { MCR_PR_ERR("no irq resource?\n"); return -ENODEV; } mercury_irq = mercury_irq_res->start; mercury_io = request_mem_region(mercury_mem->start, resource_size(mercury_mem), pdev->name); if (!mercury_io) { MCR_PR_ERR("%s: region already claimed\n", __func__); return -EBUSY; } MCR_DBG("%s:%d]\n", __func__, __LINE__); mercury_base = ioremap(mercury_mem->start, resource_size(mercury_mem)); if (!mercury_base) { rc = -ENOMEM; MCR_PR_ERR("%s: ioremap failed\n", __func__); goto fail1; } MCR_DBG("%s:%d]\n", __func__, __LINE__); rc = msm_cam_clk_enable(&pmercury_dev->pdev->dev, mercury_jpegd_clk_info, pmercury_dev->mercury_clk, ARRAY_SIZE(mercury_jpegd_clk_info), 1); if (rc < 0) MCR_PR_ERR("%s:%d] rc = %d\n", __func__, __LINE__, rc); MCR_DBG("%s:%d]\n", __func__, __LINE__); msm_mercury_hw_init(mercury_base, resource_size(mercury_mem)); rc = request_irq(mercury_irq, handler, IRQF_TRIGGER_RISING, "mercury", context); if (rc) { MCR_PR_ERR("%s: request_irq failed, %d\n", __func__, mercury_irq); goto fail3; } MCR_DBG("%s:%d]\n", __func__, __LINE__); *mem = mercury_mem; *base = mercury_base; *irq = mercury_irq; MCR_DBG("%s:%d]\n", __func__, __LINE__); #ifdef CONFIG_MSM_MULTIMEDIA_USE_ION mercury_client = msm_ion_client_create(-1, "camera/mercury"); #endif MCR_PR_ERR("%s:%d] success\n", __func__, __LINE__); return rc; fail3: MCR_DBG("%s:%d]\n", __func__, __LINE__); msm_cam_clk_enable(&pmercury_dev->pdev->dev, mercury_jpegd_clk_info, pmercury_dev->mercury_clk, ARRAY_SIZE(mercury_jpegd_clk_info), 0); MCR_DBG("%s:%d]\n", __func__, __LINE__); iounmap(mercury_base); fail1: MCR_DBG("%s:%d]\n", __func__, __LINE__); release_mem_region(mercury_mem->start, resource_size(mercury_mem)); MCR_DBG("%s:%d]\n", __func__, __LINE__); return rc; } int msm_mercury_platform_release(struct resource *mem, void *base, int irq, void *context) { int result = 0; struct msm_mercury_device *pmercury_dev = (struct msm_mercury_device *) context; free_irq(irq, context); msm_cam_clk_enable(&pmercury_dev->pdev->dev, mercury_jpegd_clk_info, pmercury_dev->mercury_clk, ARRAY_SIZE(mercury_jpegd_clk_info), 0); iounmap(base); release_mem_region(mem->start, resource_size(mem)); #ifdef CONFIG_MSM_MULTIMEDIA_USE_ION ion_client_destroy(mercury_client); #endif MCR_DBG("%s:%d] success\n", __func__, __LINE__); return result; }
gpl-2.0
MingquanLiang/linux
arch/powerpc/platforms/85xx/bsc913x_qds.c
1449
1723
/* * BSC913xQDS Board Setup * * Author: * Harninder Rai <harninder.rai@freescale.com> * Priyanka Jain <Priyanka.Jain@freescale.com> * * Copyright 2014 Freescale Semiconductor Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/of_platform.h> #include <linux/pci.h> #include <asm/mpic.h> #include <sysdev/fsl_soc.h> #include <asm/udbg.h> #include "mpc85xx.h" #include "smp.h" void __init bsc913x_qds_pic_init(void) { struct mpic *mpic = mpic_alloc(NULL, 0, MPIC_BIG_ENDIAN | MPIC_SINGLE_DEST_CPU, 0, 256, " OpenPIC "); if (!mpic) pr_err("bsc913x: Failed to allocate MPIC structure\n"); else mpic_init(mpic); } /* * Setup the architecture */ static void __init bsc913x_qds_setup_arch(void) { if (ppc_md.progress) ppc_md.progress("bsc913x_qds_setup_arch()", 0); #if defined(CONFIG_SMP) mpc85xx_smp_init(); #endif pr_info("bsc913x board from Freescale Semiconductor\n"); } machine_device_initcall(bsc9132_qds, mpc85xx_common_publish_devices); /* * Called very early, device-tree isn't unflattened */ static int __init bsc9132_qds_probe(void) { unsigned long root = of_get_flat_dt_root(); return of_flat_dt_is_compatible(root, "fsl,bsc9132qds"); } define_machine(bsc9132_qds) { .name = "BSC9132 QDS", .probe = bsc9132_qds_probe, .setup_arch = bsc913x_qds_setup_arch, .init_IRQ = bsc913x_qds_pic_init, .get_irq = mpic_get_irq, .restart = fsl_rstcr_restart, .calibrate_decr = generic_calibrate_decr, .progress = udbg_progress, };
gpl-2.0
SM-G920P/arter-kernel
drivers/staging/ozwpan/ozcdev.c
1705
13715
/* ----------------------------------------------------------------------------- * Copyright (c) 2011 Ozmo Inc * Released under the GNU General Public License Version 2 (GPLv2). * ----------------------------------------------------------------------------- */ #include <linux/module.h> #include <linux/fs.h> #include <linux/cdev.h> #include <linux/uaccess.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/poll.h> #include <linux/sched.h> #include "ozconfig.h" #include "ozprotocol.h" #include "oztrace.h" #include "ozappif.h" #include "ozeltbuf.h" #include "ozpd.h" #include "ozproto.h" #include "ozevent.h" #include "ozcdev.h" /*------------------------------------------------------------------------------ */ #define OZ_RD_BUF_SZ 256 struct oz_cdev { dev_t devnum; struct cdev cdev; wait_queue_head_t rdq; spinlock_t lock; u8 active_addr[ETH_ALEN]; struct oz_pd *active_pd; }; /* Per PD context for the serial service stored in the PD. */ struct oz_serial_ctx { atomic_t ref_count; u8 tx_seq_num; u8 rx_seq_num; u8 rd_buf[OZ_RD_BUF_SZ]; int rd_in; int rd_out; }; /*------------------------------------------------------------------------------ */ static struct oz_cdev g_cdev; static struct class *g_oz_class; /*------------------------------------------------------------------------------ * Context: process and softirq */ static struct oz_serial_ctx *oz_cdev_claim_ctx(struct oz_pd *pd) { struct oz_serial_ctx *ctx; spin_lock_bh(&pd->app_lock[OZ_APPID_SERIAL-1]); ctx = (struct oz_serial_ctx *)pd->app_ctx[OZ_APPID_SERIAL-1]; if (ctx) atomic_inc(&ctx->ref_count); spin_unlock_bh(&pd->app_lock[OZ_APPID_SERIAL-1]); return ctx; } /*------------------------------------------------------------------------------ * Context: softirq or process */ static void oz_cdev_release_ctx(struct oz_serial_ctx *ctx) { if (atomic_dec_and_test(&ctx->ref_count)) { oz_trace("Dealloc serial context.\n"); kfree(ctx); } } /*------------------------------------------------------------------------------ * Context: process */ static int oz_cdev_open(struct inode *inode, struct file *filp) { struct oz_cdev *dev; oz_trace("oz_cdev_open()\n"); oz_trace("major = %d minor = %d\n", imajor(inode), iminor(inode)); dev = container_of(inode->i_cdev, struct oz_cdev, cdev); filp->private_data = dev; return 0; } /*------------------------------------------------------------------------------ * Context: process */ static int oz_cdev_release(struct inode *inode, struct file *filp) { oz_trace("oz_cdev_release()\n"); return 0; } /*------------------------------------------------------------------------------ * Context: process */ static ssize_t oz_cdev_read(struct file *filp, char __user *buf, size_t count, loff_t *fpos) { int n; int ix; struct oz_pd *pd; struct oz_serial_ctx *ctx; spin_lock_bh(&g_cdev.lock); pd = g_cdev.active_pd; if (pd) oz_pd_get(pd); spin_unlock_bh(&g_cdev.lock); if (pd == NULL) return -1; ctx = oz_cdev_claim_ctx(pd); if (ctx == NULL) goto out2; n = ctx->rd_in - ctx->rd_out; if (n < 0) n += OZ_RD_BUF_SZ; if (count > n) count = n; ix = ctx->rd_out; n = OZ_RD_BUF_SZ - ix; if (n > count) n = count; if (copy_to_user(buf, &ctx->rd_buf[ix], n)) { count = 0; goto out1; } ix += n; if (ix == OZ_RD_BUF_SZ) ix = 0; if (n < count) { if (copy_to_user(&buf[n], ctx->rd_buf, count-n)) { count = 0; goto out1; } ix = count-n; } ctx->rd_out = ix; out1: oz_cdev_release_ctx(ctx); out2: oz_pd_put(pd); return count; } /*------------------------------------------------------------------------------ * Context: process */ static ssize_t oz_cdev_write(struct file *filp, const char __user *buf, size_t count, loff_t *fpos) { struct oz_pd *pd; struct oz_elt_buf *eb; struct oz_elt_info *ei; struct oz_elt *elt; struct oz_app_hdr *app_hdr; struct oz_serial_ctx *ctx; if (count > sizeof(ei->data) - sizeof(*elt) - sizeof(*app_hdr)) return -EINVAL; spin_lock_bh(&g_cdev.lock); pd = g_cdev.active_pd; if (pd) oz_pd_get(pd); spin_unlock_bh(&g_cdev.lock); if (pd == NULL) return -1; eb = &pd->elt_buff; ei = oz_elt_info_alloc(eb); if (ei == NULL) { count = 0; goto out; } elt = (struct oz_elt *)ei->data; app_hdr = (struct oz_app_hdr *)(elt+1); elt->length = sizeof(struct oz_app_hdr) + count; elt->type = OZ_ELT_APP_DATA; ei->app_id = OZ_APPID_SERIAL; ei->length = elt->length + sizeof(struct oz_elt); app_hdr->app_id = OZ_APPID_SERIAL; if (copy_from_user(app_hdr+1, buf, count)) goto out; spin_lock_bh(&pd->app_lock[OZ_APPID_USB-1]); ctx = (struct oz_serial_ctx *)pd->app_ctx[OZ_APPID_SERIAL-1]; if (ctx) { app_hdr->elt_seq_num = ctx->tx_seq_num++; if (ctx->tx_seq_num == 0) ctx->tx_seq_num = 1; spin_lock(&eb->lock); if (oz_queue_elt_info(eb, 0, 0, ei) == 0) ei = NULL; spin_unlock(&eb->lock); } spin_unlock_bh(&pd->app_lock[OZ_APPID_USB-1]); out: if (ei) { count = 0; spin_lock_bh(&eb->lock); oz_elt_info_free(eb, ei); spin_unlock_bh(&eb->lock); } oz_pd_put(pd); return count; } /*------------------------------------------------------------------------------ * Context: process */ static int oz_set_active_pd(const u8 *addr) { int rc = 0; struct oz_pd *pd; struct oz_pd *old_pd; pd = oz_pd_find(addr); if (pd) { spin_lock_bh(&g_cdev.lock); memcpy(g_cdev.active_addr, addr, ETH_ALEN); old_pd = g_cdev.active_pd; g_cdev.active_pd = pd; spin_unlock_bh(&g_cdev.lock); if (old_pd) oz_pd_put(old_pd); } else { if (is_zero_ether_addr(addr)) { spin_lock_bh(&g_cdev.lock); pd = g_cdev.active_pd; g_cdev.active_pd = NULL; memset(g_cdev.active_addr, 0, sizeof(g_cdev.active_addr)); spin_unlock_bh(&g_cdev.lock); if (pd) oz_pd_put(pd); } else { rc = -1; } } return rc; } /*------------------------------------------------------------------------------ * Context: process */ static long oz_cdev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { int rc = 0; if (_IOC_TYPE(cmd) != OZ_IOCTL_MAGIC) return -ENOTTY; if (_IOC_NR(cmd) > OZ_IOCTL_MAX) return -ENOTTY; if (_IOC_DIR(cmd) & _IOC_READ) rc = !access_ok(VERIFY_WRITE, (void __user *)arg, _IOC_SIZE(cmd)); else if (_IOC_DIR(cmd) & _IOC_WRITE) rc = !access_ok(VERIFY_READ, (void __user *)arg, _IOC_SIZE(cmd)); if (rc) return -EFAULT; switch (cmd) { case OZ_IOCTL_GET_PD_LIST: { struct oz_pd_list list; oz_trace("OZ_IOCTL_GET_PD_LIST\n"); memset(&list, 0, sizeof(list)); list.count = oz_get_pd_list(list.addr, OZ_MAX_PDS); if (copy_to_user((void __user *)arg, &list, sizeof(list))) return -EFAULT; } break; case OZ_IOCTL_SET_ACTIVE_PD: { u8 addr[ETH_ALEN]; oz_trace("OZ_IOCTL_SET_ACTIVE_PD\n"); if (copy_from_user(addr, (void __user *)arg, ETH_ALEN)) return -EFAULT; rc = oz_set_active_pd(addr); } break; case OZ_IOCTL_GET_ACTIVE_PD: { u8 addr[ETH_ALEN]; oz_trace("OZ_IOCTL_GET_ACTIVE_PD\n"); spin_lock_bh(&g_cdev.lock); memcpy(addr, g_cdev.active_addr, ETH_ALEN); spin_unlock_bh(&g_cdev.lock); if (copy_to_user((void __user *)arg, addr, ETH_ALEN)) return -EFAULT; } break; case OZ_IOCTL_ADD_BINDING: case OZ_IOCTL_REMOVE_BINDING: { struct oz_binding_info b; if (copy_from_user(&b, (void __user *)arg, sizeof(struct oz_binding_info))) { return -EFAULT; } /* Make sure name is null terminated. */ b.name[OZ_MAX_BINDING_LEN-1] = 0; if (cmd == OZ_IOCTL_ADD_BINDING) oz_binding_add(b.name); else oz_binding_remove(b.name); } break; } return rc; } /*------------------------------------------------------------------------------ * Context: process */ static unsigned int oz_cdev_poll(struct file *filp, poll_table *wait) { unsigned int ret = 0; struct oz_cdev *dev = filp->private_data; oz_trace("Poll called wait = %p\n", wait); spin_lock_bh(&dev->lock); if (dev->active_pd) { struct oz_serial_ctx *ctx = oz_cdev_claim_ctx(dev->active_pd); if (ctx) { if (ctx->rd_in != ctx->rd_out) ret |= POLLIN | POLLRDNORM; oz_cdev_release_ctx(ctx); } } spin_unlock_bh(&dev->lock); if (wait) poll_wait(filp, &dev->rdq, wait); return ret; } /*------------------------------------------------------------------------------ */ static const struct file_operations oz_fops = { .owner = THIS_MODULE, .open = oz_cdev_open, .release = oz_cdev_release, .read = oz_cdev_read, .write = oz_cdev_write, .unlocked_ioctl = oz_cdev_ioctl, .poll = oz_cdev_poll }; /*------------------------------------------------------------------------------ * Context: process */ int oz_cdev_register(void) { int err; struct device *dev; memset(&g_cdev, 0, sizeof(g_cdev)); err = alloc_chrdev_region(&g_cdev.devnum, 0, 1, "ozwpan"); if (err < 0) goto out3; oz_trace("Alloc dev number %d:%d\n", MAJOR(g_cdev.devnum), MINOR(g_cdev.devnum)); cdev_init(&g_cdev.cdev, &oz_fops); g_cdev.cdev.owner = THIS_MODULE; g_cdev.cdev.ops = &oz_fops; spin_lock_init(&g_cdev.lock); init_waitqueue_head(&g_cdev.rdq); err = cdev_add(&g_cdev.cdev, g_cdev.devnum, 1); if (err < 0) { oz_trace("Failed to add cdev\n"); goto out2; } g_oz_class = class_create(THIS_MODULE, "ozmo_wpan"); if (IS_ERR(g_oz_class)) { oz_trace("Failed to register ozmo_wpan class\n"); goto out1; } dev = device_create(g_oz_class, NULL, g_cdev.devnum, NULL, "ozwpan"); if (IS_ERR(dev)) { oz_trace("Failed to create sysfs entry for cdev\n"); goto out1; } return 0; out1: cdev_del(&g_cdev.cdev); out2: unregister_chrdev_region(g_cdev.devnum, 1); out3: return err; } /*------------------------------------------------------------------------------ * Context: process */ int oz_cdev_deregister(void) { cdev_del(&g_cdev.cdev); unregister_chrdev_region(g_cdev.devnum, 1); if (g_oz_class) { device_destroy(g_oz_class, g_cdev.devnum); class_destroy(g_oz_class); } return 0; } /*------------------------------------------------------------------------------ * Context: process */ int oz_cdev_init(void) { oz_event_log(OZ_EVT_SERVICE, 1, OZ_APPID_SERIAL, NULL, 0); oz_app_enable(OZ_APPID_SERIAL, 1); return 0; } /*------------------------------------------------------------------------------ * Context: process */ void oz_cdev_term(void) { oz_event_log(OZ_EVT_SERVICE, 2, OZ_APPID_SERIAL, NULL, 0); oz_app_enable(OZ_APPID_SERIAL, 0); } /*------------------------------------------------------------------------------ * Context: softirq-serialized */ int oz_cdev_start(struct oz_pd *pd, int resume) { struct oz_serial_ctx *ctx; struct oz_serial_ctx *old_ctx; oz_event_log(OZ_EVT_SERVICE, 3, OZ_APPID_SERIAL, NULL, resume); if (resume) { oz_trace("Serial service resumed.\n"); return 0; } ctx = kzalloc(sizeof(struct oz_serial_ctx), GFP_ATOMIC); if (ctx == NULL) return -ENOMEM; atomic_set(&ctx->ref_count, 1); ctx->tx_seq_num = 1; spin_lock_bh(&pd->app_lock[OZ_APPID_SERIAL-1]); old_ctx = pd->app_ctx[OZ_APPID_SERIAL-1]; if (old_ctx) { spin_unlock_bh(&pd->app_lock[OZ_APPID_SERIAL-1]); kfree(ctx); } else { pd->app_ctx[OZ_APPID_SERIAL-1] = ctx; spin_unlock_bh(&pd->app_lock[OZ_APPID_SERIAL-1]); } spin_lock(&g_cdev.lock); if ((g_cdev.active_pd == NULL) && (memcmp(pd->mac_addr, g_cdev.active_addr, ETH_ALEN) == 0)) { oz_pd_get(pd); g_cdev.active_pd = pd; oz_trace("Active PD arrived.\n"); } spin_unlock(&g_cdev.lock); oz_trace("Serial service started.\n"); return 0; } /*------------------------------------------------------------------------------ * Context: softirq or process */ void oz_cdev_stop(struct oz_pd *pd, int pause) { struct oz_serial_ctx *ctx; oz_event_log(OZ_EVT_SERVICE, 4, OZ_APPID_SERIAL, NULL, pause); if (pause) { oz_trace("Serial service paused.\n"); return; } spin_lock_bh(&pd->app_lock[OZ_APPID_SERIAL-1]); ctx = (struct oz_serial_ctx *)pd->app_ctx[OZ_APPID_SERIAL-1]; pd->app_ctx[OZ_APPID_SERIAL-1] = NULL; spin_unlock_bh(&pd->app_lock[OZ_APPID_SERIAL-1]); if (ctx) oz_cdev_release_ctx(ctx); spin_lock(&g_cdev.lock); if (pd == g_cdev.active_pd) g_cdev.active_pd = NULL; else pd = NULL; spin_unlock(&g_cdev.lock); if (pd) { oz_pd_put(pd); oz_trace("Active PD departed.\n"); } oz_trace("Serial service stopped.\n"); } /*------------------------------------------------------------------------------ * Context: softirq-serialized */ void oz_cdev_rx(struct oz_pd *pd, struct oz_elt *elt) { struct oz_serial_ctx *ctx; struct oz_app_hdr *app_hdr; u8 *data; int len; int space; int copy_sz; int ix; ctx = oz_cdev_claim_ctx(pd); if (ctx == NULL) { oz_trace("Cannot claim serial context.\n"); return; } app_hdr = (struct oz_app_hdr *)(elt+1); /* If sequence number is non-zero then check it is not a duplicate. */ if (app_hdr->elt_seq_num != 0) { if (((ctx->rx_seq_num - app_hdr->elt_seq_num) & 0x80) == 0) { /* Reject duplicate element. */ oz_trace("Duplicate element:%02x %02x\n", app_hdr->elt_seq_num, ctx->rx_seq_num); goto out; } } ctx->rx_seq_num = app_hdr->elt_seq_num; len = elt->length - sizeof(struct oz_app_hdr); data = ((u8 *)(elt+1)) + sizeof(struct oz_app_hdr); if (len <= 0) goto out; space = ctx->rd_out - ctx->rd_in - 1; if (space < 0) space += OZ_RD_BUF_SZ; if (len > space) { oz_trace("Not enough space:%d %d\n", len, space); len = space; } ix = ctx->rd_in; copy_sz = OZ_RD_BUF_SZ - ix; if (copy_sz > len) copy_sz = len; memcpy(&ctx->rd_buf[ix], data, copy_sz); len -= copy_sz; ix += copy_sz; if (ix == OZ_RD_BUF_SZ) ix = 0; if (len) { memcpy(ctx->rd_buf, data+copy_sz, len); ix = len; } ctx->rd_in = ix; wake_up(&g_cdev.rdq); out: oz_cdev_release_ctx(ctx); }
gpl-2.0
SciAps/android-dm3730-kernel
arch/arm/mach-ixp4xx/gtwx5715-setup.c
2473
4619
/* * arch/arm/mach-ixp4xx/gtwx5715-setup.c * * Gemtek GTWX5715 (Linksys WRV54G) board setup * * Copyright (C) 2004 George T. Joseph * Derived from Coyote * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * */ #include <linux/init.h> #include <linux/device.h> #include <linux/serial.h> #include <linux/tty.h> #include <linux/serial_8250.h> #include <asm/types.h> #include <asm/setup.h> #include <asm/memory.h> #include <mach/hardware.h> #include <asm/irq.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <asm/mach/flash.h> /* GPIO 5,6,7 and 12 are hard wired to the Kendin KS8995M Switch and operate as an SPI type interface. The details of the interface are available on Kendin/Micrel's web site. */ #define GTWX5715_KSSPI_SELECT 5 #define GTWX5715_KSSPI_TXD 6 #define GTWX5715_KSSPI_CLOCK 7 #define GTWX5715_KSSPI_RXD 12 /* The "reset" button is wired to GPIO 3. The GPIO is brought "low" when the button is pushed. */ #define GTWX5715_BUTTON_GPIO 3 /* Board Label Front Label LED1 Power LED2 Wireless-G LED3 not populated but could be LED4 Internet LED5 - LED8 Controlled by KS8995M Switch LED9 DMZ */ #define GTWX5715_LED1_GPIO 2 #define GTWX5715_LED2_GPIO 9 #define GTWX5715_LED3_GPIO 8 #define GTWX5715_LED4_GPIO 1 #define GTWX5715_LED9_GPIO 4 /* * Xscale UART registers are 32 bits wide with only the least * significant 8 bits having any meaning. From a configuration * perspective, this means 2 things... * * Setting .regshift = 2 so that the standard 16550 registers * line up on every 4th byte. * * Shifting the register start virtual address +3 bytes when * compiled big-endian. Since register writes are done on a * single byte basis, if the shift isn't done the driver will * write the value into the most significant byte of the register, * which is ignored, instead of the least significant. */ #ifdef __ARMEB__ #define REG_OFFSET 3 #else #define REG_OFFSET 0 #endif /* * Only the second or "console" uart is connected on the gtwx5715. */ static struct resource gtwx5715_uart_resources[] = { { .start = IXP4XX_UART2_BASE_PHYS, .end = IXP4XX_UART2_BASE_PHYS + 0x0fff, .flags = IORESOURCE_MEM, }, { .start = IRQ_IXP4XX_UART2, .end = IRQ_IXP4XX_UART2, .flags = IORESOURCE_IRQ, }, { }, }; static struct plat_serial8250_port gtwx5715_uart_platform_data[] = { { .mapbase = IXP4XX_UART2_BASE_PHYS, .membase = (char *)IXP4XX_UART2_BASE_VIRT + REG_OFFSET, .irq = IRQ_IXP4XX_UART2, .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST, .iotype = UPIO_MEM, .regshift = 2, .uartclk = IXP4XX_UART_XTAL, }, { }, }; static struct platform_device gtwx5715_uart_device = { .name = "serial8250", .id = PLAT8250_DEV_PLATFORM, .dev = { .platform_data = gtwx5715_uart_platform_data, }, .num_resources = 2, .resource = gtwx5715_uart_resources, }; static struct flash_platform_data gtwx5715_flash_data = { .map_name = "cfi_probe", .width = 2, }; static struct resource gtwx5715_flash_resource = { .flags = IORESOURCE_MEM, }; static struct platform_device gtwx5715_flash = { .name = "IXP4XX-Flash", .id = 0, .dev = { .platform_data = &gtwx5715_flash_data, }, .num_resources = 1, .resource = &gtwx5715_flash_resource, }; static struct platform_device *gtwx5715_devices[] __initdata = { &gtwx5715_uart_device, &gtwx5715_flash, }; static void __init gtwx5715_init(void) { ixp4xx_sys_init(); gtwx5715_flash_resource.start = IXP4XX_EXP_BUS_BASE(0); gtwx5715_flash_resource.end = IXP4XX_EXP_BUS_BASE(0) + SZ_8M - 1; platform_add_devices(gtwx5715_devices, ARRAY_SIZE(gtwx5715_devices)); } MACHINE_START(GTWX5715, "Gemtek GTWX5715 (Linksys WRV54G)") /* Maintainer: George Joseph */ .map_io = ixp4xx_map_io, .init_irq = ixp4xx_init_irq, .timer = &ixp4xx_timer, .boot_params = 0x0100, .init_machine = gtwx5715_init, MACHINE_END
gpl-2.0
zeroblade1984/msm8939-yureka-kk
drivers/bluetooth/btsdio.c
2473
8423
/* * * Generic Bluetooth SDIO driver * * Copyright (C) 2007 Cambridge Silicon Radio Ltd. * Copyright (C) 2007 Marcel Holtmann <marcel@holtmann.org> * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/types.h> #include <linux/sched.h> #include <linux/errno.h> #include <linux/skbuff.h> #include <linux/mmc/sdio_ids.h> #include <linux/mmc/sdio_func.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> #define VERSION "0.1" static const struct sdio_device_id btsdio_table[] = { /* Generic Bluetooth Type-A SDIO device */ { SDIO_DEVICE_CLASS(SDIO_CLASS_BT_A) }, /* Generic Bluetooth Type-B SDIO device */ { SDIO_DEVICE_CLASS(SDIO_CLASS_BT_B) }, /* Generic Bluetooth AMP controller */ { SDIO_DEVICE_CLASS(SDIO_CLASS_BT_AMP) }, { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(sdio, btsdio_table); struct btsdio_data { struct hci_dev *hdev; struct sdio_func *func; struct work_struct work; struct sk_buff_head txq; }; #define REG_RDAT 0x00 /* Receiver Data */ #define REG_TDAT 0x00 /* Transmitter Data */ #define REG_PC_RRT 0x10 /* Read Packet Control */ #define REG_PC_WRT 0x11 /* Write Packet Control */ #define REG_RTC_STAT 0x12 /* Retry Control Status */ #define REG_RTC_SET 0x12 /* Retry Control Set */ #define REG_INTRD 0x13 /* Interrupt Indication */ #define REG_CL_INTRD 0x13 /* Interrupt Clear */ #define REG_EN_INTRD 0x14 /* Interrupt Enable */ #define REG_MD_STAT 0x20 /* Bluetooth Mode Status */ static int btsdio_tx_packet(struct btsdio_data *data, struct sk_buff *skb) { int err; BT_DBG("%s", data->hdev->name); /* Prepend Type-A header */ skb_push(skb, 4); skb->data[0] = (skb->len & 0x0000ff); skb->data[1] = (skb->len & 0x00ff00) >> 8; skb->data[2] = (skb->len & 0xff0000) >> 16; skb->data[3] = bt_cb(skb)->pkt_type; err = sdio_writesb(data->func, REG_TDAT, skb->data, skb->len); if (err < 0) { skb_pull(skb, 4); sdio_writeb(data->func, 0x01, REG_PC_WRT, NULL); return err; } data->hdev->stat.byte_tx += skb->len; kfree_skb(skb); return 0; } static void btsdio_work(struct work_struct *work) { struct btsdio_data *data = container_of(work, struct btsdio_data, work); struct sk_buff *skb; int err; BT_DBG("%s", data->hdev->name); sdio_claim_host(data->func); while ((skb = skb_dequeue(&data->txq))) { err = btsdio_tx_packet(data, skb); if (err < 0) { data->hdev->stat.err_tx++; skb_queue_head(&data->txq, skb); break; } } sdio_release_host(data->func); } static int btsdio_rx_packet(struct btsdio_data *data) { u8 hdr[4] __attribute__ ((aligned(4))); struct sk_buff *skb; int err, len; BT_DBG("%s", data->hdev->name); err = sdio_readsb(data->func, hdr, REG_RDAT, 4); if (err < 0) return err; len = hdr[0] | (hdr[1] << 8) | (hdr[2] << 16); if (len < 4 || len > 65543) return -EILSEQ; skb = bt_skb_alloc(len - 4, GFP_KERNEL); if (!skb) { /* Out of memory. Prepare a read retry and just * return with the expectation that the next time * we're called we'll have more memory. */ return -ENOMEM; } skb_put(skb, len - 4); err = sdio_readsb(data->func, skb->data, REG_RDAT, len - 4); if (err < 0) { kfree_skb(skb); return err; } data->hdev->stat.byte_rx += len; skb->dev = (void *) data->hdev; bt_cb(skb)->pkt_type = hdr[3]; err = hci_recv_frame(skb); if (err < 0) return err; sdio_writeb(data->func, 0x00, REG_PC_RRT, NULL); return 0; } static void btsdio_interrupt(struct sdio_func *func) { struct btsdio_data *data = sdio_get_drvdata(func); int intrd; BT_DBG("%s", data->hdev->name); intrd = sdio_readb(func, REG_INTRD, NULL); if (intrd & 0x01) { sdio_writeb(func, 0x01, REG_CL_INTRD, NULL); if (btsdio_rx_packet(data) < 0) { data->hdev->stat.err_rx++; sdio_writeb(data->func, 0x01, REG_PC_RRT, NULL); } } } static int btsdio_open(struct hci_dev *hdev) { struct btsdio_data *data = hci_get_drvdata(hdev); int err; BT_DBG("%s", hdev->name); if (test_and_set_bit(HCI_RUNNING, &hdev->flags)) return 0; sdio_claim_host(data->func); err = sdio_enable_func(data->func); if (err < 0) { clear_bit(HCI_RUNNING, &hdev->flags); goto release; } err = sdio_claim_irq(data->func, btsdio_interrupt); if (err < 0) { sdio_disable_func(data->func); clear_bit(HCI_RUNNING, &hdev->flags); goto release; } if (data->func->class == SDIO_CLASS_BT_B) sdio_writeb(data->func, 0x00, REG_MD_STAT, NULL); sdio_writeb(data->func, 0x01, REG_EN_INTRD, NULL); release: sdio_release_host(data->func); return err; } static int btsdio_close(struct hci_dev *hdev) { struct btsdio_data *data = hci_get_drvdata(hdev); BT_DBG("%s", hdev->name); if (!test_and_clear_bit(HCI_RUNNING, &hdev->flags)) return 0; sdio_claim_host(data->func); sdio_writeb(data->func, 0x00, REG_EN_INTRD, NULL); sdio_release_irq(data->func); sdio_disable_func(data->func); sdio_release_host(data->func); return 0; } static int btsdio_flush(struct hci_dev *hdev) { struct btsdio_data *data = hci_get_drvdata(hdev); BT_DBG("%s", hdev->name); skb_queue_purge(&data->txq); return 0; } static int btsdio_send_frame(struct sk_buff *skb) { struct hci_dev *hdev = (struct hci_dev *) skb->dev; struct btsdio_data *data = hci_get_drvdata(hdev); BT_DBG("%s", hdev->name); if (!test_bit(HCI_RUNNING, &hdev->flags)) return -EBUSY; switch (bt_cb(skb)->pkt_type) { case HCI_COMMAND_PKT: hdev->stat.cmd_tx++; break; case HCI_ACLDATA_PKT: hdev->stat.acl_tx++; break; case HCI_SCODATA_PKT: hdev->stat.sco_tx++; break; default: return -EILSEQ; } skb_queue_tail(&data->txq, skb); schedule_work(&data->work); return 0; } static int btsdio_probe(struct sdio_func *func, const struct sdio_device_id *id) { struct btsdio_data *data; struct hci_dev *hdev; struct sdio_func_tuple *tuple = func->tuples; int err; BT_DBG("func %p id %p class 0x%04x", func, id, func->class); while (tuple) { BT_DBG("code 0x%x size %d", tuple->code, tuple->size); tuple = tuple->next; } data = devm_kzalloc(&func->dev, sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; data->func = func; INIT_WORK(&data->work, btsdio_work); skb_queue_head_init(&data->txq); hdev = hci_alloc_dev(); if (!hdev) return -ENOMEM; hdev->bus = HCI_SDIO; hci_set_drvdata(hdev, data); if (id->class == SDIO_CLASS_BT_AMP) hdev->dev_type = HCI_AMP; else hdev->dev_type = HCI_BREDR; data->hdev = hdev; SET_HCIDEV_DEV(hdev, &func->dev); hdev->open = btsdio_open; hdev->close = btsdio_close; hdev->flush = btsdio_flush; hdev->send = btsdio_send_frame; err = hci_register_dev(hdev); if (err < 0) { hci_free_dev(hdev); return err; } sdio_set_drvdata(func, data); return 0; } static void btsdio_remove(struct sdio_func *func) { struct btsdio_data *data = sdio_get_drvdata(func); struct hci_dev *hdev; BT_DBG("func %p", func); if (!data) return; hdev = data->hdev; sdio_set_drvdata(func, NULL); hci_unregister_dev(hdev); hci_free_dev(hdev); } static struct sdio_driver btsdio_driver = { .name = "btsdio", .probe = btsdio_probe, .remove = btsdio_remove, .id_table = btsdio_table, }; static int __init btsdio_init(void) { BT_INFO("Generic Bluetooth SDIO driver ver %s", VERSION); return sdio_register_driver(&btsdio_driver); } static void __exit btsdio_exit(void) { sdio_unregister_driver(&btsdio_driver); } module_init(btsdio_init); module_exit(btsdio_exit); MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>"); MODULE_DESCRIPTION("Generic Bluetooth SDIO driver ver " VERSION); MODULE_VERSION(VERSION); MODULE_LICENSE("GPL");
gpl-2.0
ambikadash/linux-fqt
drivers/isdn/mISDN/core.c
2473
8995
/* * Copyright 2008 by Karsten Keil <kkeil@novell.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/slab.h> #include <linux/types.h> #include <linux/stddef.h> #include <linux/module.h> #include <linux/spinlock.h> #include <linux/mISDNif.h> #include "core.h" static u_int debug; MODULE_AUTHOR("Karsten Keil"); MODULE_LICENSE("GPL"); module_param(debug, uint, S_IRUGO | S_IWUSR); static u64 device_ids; #define MAX_DEVICE_ID 63 static LIST_HEAD(Bprotocols); static DEFINE_RWLOCK(bp_lock); static void mISDN_dev_release(struct device *dev) { /* nothing to do: the device is part of its parent's data structure */ } static ssize_t id_show(struct device *dev, struct device_attribute *attr, char *buf) { struct mISDNdevice *mdev = dev_to_mISDN(dev); if (!mdev) return -ENODEV; return sprintf(buf, "%d\n", mdev->id); } static DEVICE_ATTR_RO(id); static ssize_t nrbchan_show(struct device *dev, struct device_attribute *attr, char *buf) { struct mISDNdevice *mdev = dev_to_mISDN(dev); if (!mdev) return -ENODEV; return sprintf(buf, "%d\n", mdev->nrbchan); } static DEVICE_ATTR_RO(nrbchan); static ssize_t d_protocols_show(struct device *dev, struct device_attribute *attr, char *buf) { struct mISDNdevice *mdev = dev_to_mISDN(dev); if (!mdev) return -ENODEV; return sprintf(buf, "%d\n", mdev->Dprotocols); } static DEVICE_ATTR_RO(d_protocols); static ssize_t b_protocols_show(struct device *dev, struct device_attribute *attr, char *buf) { struct mISDNdevice *mdev = dev_to_mISDN(dev); if (!mdev) return -ENODEV; return sprintf(buf, "%d\n", mdev->Bprotocols | get_all_Bprotocols()); } static DEVICE_ATTR_RO(b_protocols); static ssize_t protocol_show(struct device *dev, struct device_attribute *attr, char *buf) { struct mISDNdevice *mdev = dev_to_mISDN(dev); if (!mdev) return -ENODEV; return sprintf(buf, "%d\n", mdev->D.protocol); } static DEVICE_ATTR_RO(protocol); static ssize_t name_show(struct device *dev, struct device_attribute *attr, char *buf) { strcpy(buf, dev_name(dev)); return strlen(buf); } static DEVICE_ATTR_RO(name); #if 0 /* hangs */ static ssize_t name_set(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int err = 0; char *out = kmalloc(count + 1, GFP_KERNEL); if (!out) return -ENOMEM; memcpy(out, buf, count); if (count && out[count - 1] == '\n') out[--count] = 0; if (count) err = device_rename(dev, out); kfree(out); return (err < 0) ? err : count; } static DEVICE_ATTR_RW(name); #endif static ssize_t channelmap_show(struct device *dev, struct device_attribute *attr, char *buf) { struct mISDNdevice *mdev = dev_to_mISDN(dev); char *bp = buf; int i; for (i = 0; i <= mdev->nrbchan; i++) *bp++ = test_channelmap(i, mdev->channelmap) ? '1' : '0'; return bp - buf; } static DEVICE_ATTR_RO(channelmap); static struct attribute *mISDN_attrs[] = { &dev_attr_id.attr, &dev_attr_d_protocols.attr, &dev_attr_b_protocols.attr, &dev_attr_protocol.attr, &dev_attr_channelmap.attr, &dev_attr_nrbchan.attr, &dev_attr_name.attr, NULL, }; ATTRIBUTE_GROUPS(mISDN); static int mISDN_uevent(struct device *dev, struct kobj_uevent_env *env) { struct mISDNdevice *mdev = dev_to_mISDN(dev); if (!mdev) return 0; if (add_uevent_var(env, "nchans=%d", mdev->nrbchan)) return -ENOMEM; return 0; } static void mISDN_class_release(struct class *cls) { /* do nothing, it's static */ } static struct class mISDN_class = { .name = "mISDN", .owner = THIS_MODULE, .dev_uevent = mISDN_uevent, .dev_groups = mISDN_groups, .dev_release = mISDN_dev_release, .class_release = mISDN_class_release, }; static int _get_mdevice(struct device *dev, const void *id) { struct mISDNdevice *mdev = dev_to_mISDN(dev); if (!mdev) return 0; if (mdev->id != *(const u_int *)id) return 0; return 1; } struct mISDNdevice *get_mdevice(u_int id) { return dev_to_mISDN(class_find_device(&mISDN_class, NULL, &id, _get_mdevice)); } static int _get_mdevice_count(struct device *dev, void *cnt) { *(int *)cnt += 1; return 0; } int get_mdevice_count(void) { int cnt = 0; class_for_each_device(&mISDN_class, NULL, &cnt, _get_mdevice_count); return cnt; } static int get_free_devid(void) { u_int i; for (i = 0; i <= MAX_DEVICE_ID; i++) if (!test_and_set_bit(i, (u_long *)&device_ids)) break; if (i > MAX_DEVICE_ID) return -EBUSY; return i; } int mISDN_register_device(struct mISDNdevice *dev, struct device *parent, char *name) { int err; err = get_free_devid(); if (err < 0) goto error1; dev->id = err; device_initialize(&dev->dev); if (name && name[0]) dev_set_name(&dev->dev, "%s", name); else dev_set_name(&dev->dev, "mISDN%d", dev->id); if (debug & DEBUG_CORE) printk(KERN_DEBUG "mISDN_register %s %d\n", dev_name(&dev->dev), dev->id); err = create_stack(dev); if (err) goto error1; dev->dev.class = &mISDN_class; dev->dev.platform_data = dev; dev->dev.parent = parent; dev_set_drvdata(&dev->dev, dev); err = device_add(&dev->dev); if (err) goto error3; return 0; error3: delete_stack(dev); return err; error1: return err; } EXPORT_SYMBOL(mISDN_register_device); void mISDN_unregister_device(struct mISDNdevice *dev) { if (debug & DEBUG_CORE) printk(KERN_DEBUG "mISDN_unregister %s %d\n", dev_name(&dev->dev), dev->id); /* sysfs_remove_link(&dev->dev.kobj, "device"); */ device_del(&dev->dev); dev_set_drvdata(&dev->dev, NULL); test_and_clear_bit(dev->id, (u_long *)&device_ids); delete_stack(dev); put_device(&dev->dev); } EXPORT_SYMBOL(mISDN_unregister_device); u_int get_all_Bprotocols(void) { struct Bprotocol *bp; u_int m = 0; read_lock(&bp_lock); list_for_each_entry(bp, &Bprotocols, list) m |= bp->Bprotocols; read_unlock(&bp_lock); return m; } struct Bprotocol * get_Bprotocol4mask(u_int m) { struct Bprotocol *bp; read_lock(&bp_lock); list_for_each_entry(bp, &Bprotocols, list) if (bp->Bprotocols & m) { read_unlock(&bp_lock); return bp; } read_unlock(&bp_lock); return NULL; } struct Bprotocol * get_Bprotocol4id(u_int id) { u_int m; if (id < ISDN_P_B_START || id > 63) { printk(KERN_WARNING "%s id not in range %d\n", __func__, id); return NULL; } m = 1 << (id & ISDN_P_B_MASK); return get_Bprotocol4mask(m); } int mISDN_register_Bprotocol(struct Bprotocol *bp) { u_long flags; struct Bprotocol *old; if (debug & DEBUG_CORE) printk(KERN_DEBUG "%s: %s/%x\n", __func__, bp->name, bp->Bprotocols); old = get_Bprotocol4mask(bp->Bprotocols); if (old) { printk(KERN_WARNING "register duplicate protocol old %s/%x new %s/%x\n", old->name, old->Bprotocols, bp->name, bp->Bprotocols); return -EBUSY; } write_lock_irqsave(&bp_lock, flags); list_add_tail(&bp->list, &Bprotocols); write_unlock_irqrestore(&bp_lock, flags); return 0; } EXPORT_SYMBOL(mISDN_register_Bprotocol); void mISDN_unregister_Bprotocol(struct Bprotocol *bp) { u_long flags; if (debug & DEBUG_CORE) printk(KERN_DEBUG "%s: %s/%x\n", __func__, bp->name, bp->Bprotocols); write_lock_irqsave(&bp_lock, flags); list_del(&bp->list); write_unlock_irqrestore(&bp_lock, flags); } EXPORT_SYMBOL(mISDN_unregister_Bprotocol); static const char *msg_no_channel = "<no channel>"; static const char *msg_no_stack = "<no stack>"; static const char *msg_no_stackdev = "<no stack device>"; const char *mISDNDevName4ch(struct mISDNchannel *ch) { if (!ch) return msg_no_channel; if (!ch->st) return msg_no_stack; if (!ch->st->dev) return msg_no_stackdev; return dev_name(&ch->st->dev->dev); }; EXPORT_SYMBOL(mISDNDevName4ch); static int mISDNInit(void) { int err; printk(KERN_INFO "Modular ISDN core version %d.%d.%d\n", MISDN_MAJOR_VERSION, MISDN_MINOR_VERSION, MISDN_RELEASE); mISDN_init_clock(&debug); mISDN_initstack(&debug); err = class_register(&mISDN_class); if (err) goto error1; err = mISDN_inittimer(&debug); if (err) goto error2; err = l1_init(&debug); if (err) goto error3; err = Isdnl2_Init(&debug); if (err) goto error4; err = misdn_sock_init(&debug); if (err) goto error5; return 0; error5: Isdnl2_cleanup(); error4: l1_cleanup(); error3: mISDN_timer_cleanup(); error2: class_unregister(&mISDN_class); error1: return err; } static void mISDN_cleanup(void) { misdn_sock_cleanup(); Isdnl2_cleanup(); l1_cleanup(); mISDN_timer_cleanup(); class_unregister(&mISDN_class); printk(KERN_DEBUG "mISDNcore unloaded\n"); } module_init(mISDNInit); module_exit(mISDN_cleanup);
gpl-2.0
carepack/android_kernel_google_msm
fs/udf/inode.c
3241
63150
/* * inode.c * * PURPOSE * Inode handling routines for the OSTA-UDF(tm) filesystem. * * COPYRIGHT * This file is distributed under the terms of the GNU General Public * License (GPL). Copies of the GPL can be obtained from: * ftp://prep.ai.mit.edu/pub/gnu/GPL * Each contributing author retains all rights to their own work. * * (C) 1998 Dave Boynton * (C) 1998-2004 Ben Fennema * (C) 1999-2000 Stelias Computing Inc * * HISTORY * * 10/04/98 dgb Added rudimentary directory functions * 10/07/98 Fully working udf_block_map! It works! * 11/25/98 bmap altered to better support extents * 12/06/98 blf partition support in udf_iget, udf_block_map * and udf_read_inode * 12/12/98 rewrote udf_block_map to handle next extents and descs across * block boundaries (which is not actually allowed) * 12/20/98 added support for strategy 4096 * 03/07/99 rewrote udf_block_map (again) * New funcs, inode_bmap, udf_next_aext * 04/19/99 Support for writing device EA's for major/minor # */ #include "udfdecl.h" #include <linux/mm.h> #include <linux/module.h> #include <linux/pagemap.h> #include <linux/buffer_head.h> #include <linux/writeback.h> #include <linux/slab.h> #include <linux/crc-itu-t.h> #include <linux/mpage.h> #include "udf_i.h" #include "udf_sb.h" MODULE_AUTHOR("Ben Fennema"); MODULE_DESCRIPTION("Universal Disk Format Filesystem"); MODULE_LICENSE("GPL"); #define EXTENT_MERGE_SIZE 5 static umode_t udf_convert_permissions(struct fileEntry *); static int udf_update_inode(struct inode *, int); static void udf_fill_inode(struct inode *, struct buffer_head *); static int udf_sync_inode(struct inode *inode); static int udf_alloc_i_data(struct inode *inode, size_t size); static sector_t inode_getblk(struct inode *, sector_t, int *, int *); static int8_t udf_insert_aext(struct inode *, struct extent_position, struct kernel_lb_addr, uint32_t); static void udf_split_extents(struct inode *, int *, int, int, struct kernel_long_ad[EXTENT_MERGE_SIZE], int *); static void udf_prealloc_extents(struct inode *, int, int, struct kernel_long_ad[EXTENT_MERGE_SIZE], int *); static void udf_merge_extents(struct inode *, struct kernel_long_ad[EXTENT_MERGE_SIZE], int *); static void udf_update_extents(struct inode *, struct kernel_long_ad[EXTENT_MERGE_SIZE], int, int, struct extent_position *); static int udf_get_block(struct inode *, sector_t, struct buffer_head *, int); void udf_evict_inode(struct inode *inode) { struct udf_inode_info *iinfo = UDF_I(inode); int want_delete = 0; if (!inode->i_nlink && !is_bad_inode(inode)) { want_delete = 1; udf_setsize(inode, 0); udf_update_inode(inode, IS_SYNC(inode)); } else truncate_inode_pages(&inode->i_data, 0); invalidate_inode_buffers(inode); end_writeback(inode); if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB && inode->i_size != iinfo->i_lenExtents) { udf_warn(inode->i_sb, "Inode %lu (mode %o) has inode size %llu different from extent length %llu. Filesystem need not be standards compliant.\n", inode->i_ino, inode->i_mode, (unsigned long long)inode->i_size, (unsigned long long)iinfo->i_lenExtents); } kfree(iinfo->i_ext.i_data); iinfo->i_ext.i_data = NULL; if (want_delete) { udf_free_inode(inode); } } static int udf_writepage(struct page *page, struct writeback_control *wbc) { return block_write_full_page(page, udf_get_block, wbc); } static int udf_readpage(struct file *file, struct page *page) { return mpage_readpage(page, udf_get_block); } static int udf_readpages(struct file *file, struct address_space *mapping, struct list_head *pages, unsigned nr_pages) { return mpage_readpages(mapping, pages, nr_pages, udf_get_block); } static int udf_write_begin(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata) { int ret; ret = block_write_begin(mapping, pos, len, flags, pagep, udf_get_block); if (unlikely(ret)) { struct inode *inode = mapping->host; struct udf_inode_info *iinfo = UDF_I(inode); loff_t isize = inode->i_size; if (pos + len > isize) { truncate_pagecache(inode, pos + len, isize); if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) { down_write(&iinfo->i_data_sem); udf_truncate_extents(inode); up_write(&iinfo->i_data_sem); } } } return ret; } static sector_t udf_bmap(struct address_space *mapping, sector_t block) { return generic_block_bmap(mapping, block, udf_get_block); } const struct address_space_operations udf_aops = { .readpage = udf_readpage, .readpages = udf_readpages, .writepage = udf_writepage, .write_begin = udf_write_begin, .write_end = generic_write_end, .bmap = udf_bmap, }; /* * Expand file stored in ICB to a normal one-block-file * * This function requires i_data_sem for writing and releases it. * This function requires i_mutex held */ int udf_expand_file_adinicb(struct inode *inode) { struct page *page; char *kaddr; struct udf_inode_info *iinfo = UDF_I(inode); int err; struct writeback_control udf_wbc = { .sync_mode = WB_SYNC_NONE, .nr_to_write = 1, }; if (!iinfo->i_lenAlloc) { if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD)) iinfo->i_alloc_type = ICBTAG_FLAG_AD_SHORT; else iinfo->i_alloc_type = ICBTAG_FLAG_AD_LONG; /* from now on we have normal address_space methods */ inode->i_data.a_ops = &udf_aops; up_write(&iinfo->i_data_sem); mark_inode_dirty(inode); return 0; } /* * Release i_data_sem so that we can lock a page - page lock ranks * above i_data_sem. i_mutex still protects us against file changes. */ up_write(&iinfo->i_data_sem); page = find_or_create_page(inode->i_mapping, 0, GFP_NOFS); if (!page) return -ENOMEM; if (!PageUptodate(page)) { kaddr = kmap(page); memset(kaddr + iinfo->i_lenAlloc, 0x00, PAGE_CACHE_SIZE - iinfo->i_lenAlloc); memcpy(kaddr, iinfo->i_ext.i_data + iinfo->i_lenEAttr, iinfo->i_lenAlloc); flush_dcache_page(page); SetPageUptodate(page); kunmap(page); } down_write(&iinfo->i_data_sem); memset(iinfo->i_ext.i_data + iinfo->i_lenEAttr, 0x00, iinfo->i_lenAlloc); iinfo->i_lenAlloc = 0; if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD)) iinfo->i_alloc_type = ICBTAG_FLAG_AD_SHORT; else iinfo->i_alloc_type = ICBTAG_FLAG_AD_LONG; /* from now on we have normal address_space methods */ inode->i_data.a_ops = &udf_aops; up_write(&iinfo->i_data_sem); err = inode->i_data.a_ops->writepage(page, &udf_wbc); if (err) { /* Restore everything back so that we don't lose data... */ lock_page(page); kaddr = kmap(page); down_write(&iinfo->i_data_sem); memcpy(iinfo->i_ext.i_data + iinfo->i_lenEAttr, kaddr, inode->i_size); kunmap(page); unlock_page(page); iinfo->i_alloc_type = ICBTAG_FLAG_AD_IN_ICB; inode->i_data.a_ops = &udf_adinicb_aops; up_write(&iinfo->i_data_sem); } page_cache_release(page); mark_inode_dirty(inode); return err; } struct buffer_head *udf_expand_dir_adinicb(struct inode *inode, int *block, int *err) { int newblock; struct buffer_head *dbh = NULL; struct kernel_lb_addr eloc; uint8_t alloctype; struct extent_position epos; struct udf_fileident_bh sfibh, dfibh; loff_t f_pos = udf_ext0_offset(inode); int size = udf_ext0_offset(inode) + inode->i_size; struct fileIdentDesc cfi, *sfi, *dfi; struct udf_inode_info *iinfo = UDF_I(inode); if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD)) alloctype = ICBTAG_FLAG_AD_SHORT; else alloctype = ICBTAG_FLAG_AD_LONG; if (!inode->i_size) { iinfo->i_alloc_type = alloctype; mark_inode_dirty(inode); return NULL; } /* alloc block, and copy data to it */ *block = udf_new_block(inode->i_sb, inode, iinfo->i_location.partitionReferenceNum, iinfo->i_location.logicalBlockNum, err); if (!(*block)) return NULL; newblock = udf_get_pblock(inode->i_sb, *block, iinfo->i_location.partitionReferenceNum, 0); if (!newblock) return NULL; dbh = udf_tgetblk(inode->i_sb, newblock); if (!dbh) return NULL; lock_buffer(dbh); memset(dbh->b_data, 0x00, inode->i_sb->s_blocksize); set_buffer_uptodate(dbh); unlock_buffer(dbh); mark_buffer_dirty_inode(dbh, inode); sfibh.soffset = sfibh.eoffset = f_pos & (inode->i_sb->s_blocksize - 1); sfibh.sbh = sfibh.ebh = NULL; dfibh.soffset = dfibh.eoffset = 0; dfibh.sbh = dfibh.ebh = dbh; while (f_pos < size) { iinfo->i_alloc_type = ICBTAG_FLAG_AD_IN_ICB; sfi = udf_fileident_read(inode, &f_pos, &sfibh, &cfi, NULL, NULL, NULL, NULL); if (!sfi) { brelse(dbh); return NULL; } iinfo->i_alloc_type = alloctype; sfi->descTag.tagLocation = cpu_to_le32(*block); dfibh.soffset = dfibh.eoffset; dfibh.eoffset += (sfibh.eoffset - sfibh.soffset); dfi = (struct fileIdentDesc *)(dbh->b_data + dfibh.soffset); if (udf_write_fi(inode, sfi, dfi, &dfibh, sfi->impUse, sfi->fileIdent + le16_to_cpu(sfi->lengthOfImpUse))) { iinfo->i_alloc_type = ICBTAG_FLAG_AD_IN_ICB; brelse(dbh); return NULL; } } mark_buffer_dirty_inode(dbh, inode); memset(iinfo->i_ext.i_data + iinfo->i_lenEAttr, 0, iinfo->i_lenAlloc); iinfo->i_lenAlloc = 0; eloc.logicalBlockNum = *block; eloc.partitionReferenceNum = iinfo->i_location.partitionReferenceNum; iinfo->i_lenExtents = inode->i_size; epos.bh = NULL; epos.block = iinfo->i_location; epos.offset = udf_file_entry_alloc_offset(inode); udf_add_aext(inode, &epos, &eloc, inode->i_size, 0); /* UniqueID stuff */ brelse(epos.bh); mark_inode_dirty(inode); return dbh; } static int udf_get_block(struct inode *inode, sector_t block, struct buffer_head *bh_result, int create) { int err, new; sector_t phys = 0; struct udf_inode_info *iinfo; if (!create) { phys = udf_block_map(inode, block); if (phys) map_bh(bh_result, inode->i_sb, phys); return 0; } err = -EIO; new = 0; iinfo = UDF_I(inode); down_write(&iinfo->i_data_sem); if (block == iinfo->i_next_alloc_block + 1) { iinfo->i_next_alloc_block++; iinfo->i_next_alloc_goal++; } phys = inode_getblk(inode, block, &err, &new); if (!phys) goto abort; if (new) set_buffer_new(bh_result); map_bh(bh_result, inode->i_sb, phys); abort: up_write(&iinfo->i_data_sem); return err; } static struct buffer_head *udf_getblk(struct inode *inode, long block, int create, int *err) { struct buffer_head *bh; struct buffer_head dummy; dummy.b_state = 0; dummy.b_blocknr = -1000; *err = udf_get_block(inode, block, &dummy, create); if (!*err && buffer_mapped(&dummy)) { bh = sb_getblk(inode->i_sb, dummy.b_blocknr); if (buffer_new(&dummy)) { lock_buffer(bh); memset(bh->b_data, 0x00, inode->i_sb->s_blocksize); set_buffer_uptodate(bh); unlock_buffer(bh); mark_buffer_dirty_inode(bh, inode); } return bh; } return NULL; } /* Extend the file by 'blocks' blocks, return the number of extents added */ static int udf_do_extend_file(struct inode *inode, struct extent_position *last_pos, struct kernel_long_ad *last_ext, sector_t blocks) { sector_t add; int count = 0, fake = !(last_ext->extLength & UDF_EXTENT_LENGTH_MASK); struct super_block *sb = inode->i_sb; struct kernel_lb_addr prealloc_loc = {}; int prealloc_len = 0; struct udf_inode_info *iinfo; int err; /* The previous extent is fake and we should not extend by anything * - there's nothing to do... */ if (!blocks && fake) return 0; iinfo = UDF_I(inode); /* Round the last extent up to a multiple of block size */ if (last_ext->extLength & (sb->s_blocksize - 1)) { last_ext->extLength = (last_ext->extLength & UDF_EXTENT_FLAG_MASK) | (((last_ext->extLength & UDF_EXTENT_LENGTH_MASK) + sb->s_blocksize - 1) & ~(sb->s_blocksize - 1)); iinfo->i_lenExtents = (iinfo->i_lenExtents + sb->s_blocksize - 1) & ~(sb->s_blocksize - 1); } /* Last extent are just preallocated blocks? */ if ((last_ext->extLength & UDF_EXTENT_FLAG_MASK) == EXT_NOT_RECORDED_ALLOCATED) { /* Save the extent so that we can reattach it to the end */ prealloc_loc = last_ext->extLocation; prealloc_len = last_ext->extLength; /* Mark the extent as a hole */ last_ext->extLength = EXT_NOT_RECORDED_NOT_ALLOCATED | (last_ext->extLength & UDF_EXTENT_LENGTH_MASK); last_ext->extLocation.logicalBlockNum = 0; last_ext->extLocation.partitionReferenceNum = 0; } /* Can we merge with the previous extent? */ if ((last_ext->extLength & UDF_EXTENT_FLAG_MASK) == EXT_NOT_RECORDED_NOT_ALLOCATED) { add = ((1 << 30) - sb->s_blocksize - (last_ext->extLength & UDF_EXTENT_LENGTH_MASK)) >> sb->s_blocksize_bits; if (add > blocks) add = blocks; blocks -= add; last_ext->extLength += add << sb->s_blocksize_bits; } if (fake) { udf_add_aext(inode, last_pos, &last_ext->extLocation, last_ext->extLength, 1); count++; } else udf_write_aext(inode, last_pos, &last_ext->extLocation, last_ext->extLength, 1); /* Managed to do everything necessary? */ if (!blocks) goto out; /* All further extents will be NOT_RECORDED_NOT_ALLOCATED */ last_ext->extLocation.logicalBlockNum = 0; last_ext->extLocation.partitionReferenceNum = 0; add = (1 << (30-sb->s_blocksize_bits)) - 1; last_ext->extLength = EXT_NOT_RECORDED_NOT_ALLOCATED | (add << sb->s_blocksize_bits); /* Create enough extents to cover the whole hole */ while (blocks > add) { blocks -= add; err = udf_add_aext(inode, last_pos, &last_ext->extLocation, last_ext->extLength, 1); if (err) return err; count++; } if (blocks) { last_ext->extLength = EXT_NOT_RECORDED_NOT_ALLOCATED | (blocks << sb->s_blocksize_bits); err = udf_add_aext(inode, last_pos, &last_ext->extLocation, last_ext->extLength, 1); if (err) return err; count++; } out: /* Do we have some preallocated blocks saved? */ if (prealloc_len) { err = udf_add_aext(inode, last_pos, &prealloc_loc, prealloc_len, 1); if (err) return err; last_ext->extLocation = prealloc_loc; last_ext->extLength = prealloc_len; count++; } /* last_pos should point to the last written extent... */ if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT) last_pos->offset -= sizeof(struct short_ad); else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG) last_pos->offset -= sizeof(struct long_ad); else return -EIO; return count; } static int udf_extend_file(struct inode *inode, loff_t newsize) { struct extent_position epos; struct kernel_lb_addr eloc; uint32_t elen; int8_t etype; struct super_block *sb = inode->i_sb; sector_t first_block = newsize >> sb->s_blocksize_bits, offset; int adsize; struct udf_inode_info *iinfo = UDF_I(inode); struct kernel_long_ad extent; int err; if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT) adsize = sizeof(struct short_ad); else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG) adsize = sizeof(struct long_ad); else BUG(); etype = inode_bmap(inode, first_block, &epos, &eloc, &elen, &offset); /* File has extent covering the new size (could happen when extending * inside a block)? */ if (etype != -1) return 0; if (newsize & (sb->s_blocksize - 1)) offset++; /* Extended file just to the boundary of the last file block? */ if (offset == 0) return 0; /* Truncate is extending the file by 'offset' blocks */ if ((!epos.bh && epos.offset == udf_file_entry_alloc_offset(inode)) || (epos.bh && epos.offset == sizeof(struct allocExtDesc))) { /* File has no extents at all or has empty last * indirect extent! Create a fake extent... */ extent.extLocation.logicalBlockNum = 0; extent.extLocation.partitionReferenceNum = 0; extent.extLength = EXT_NOT_RECORDED_NOT_ALLOCATED; } else { epos.offset -= adsize; etype = udf_next_aext(inode, &epos, &extent.extLocation, &extent.extLength, 0); extent.extLength |= etype << 30; } err = udf_do_extend_file(inode, &epos, &extent, offset); if (err < 0) goto out; err = 0; iinfo->i_lenExtents = newsize; out: brelse(epos.bh); return err; } static sector_t inode_getblk(struct inode *inode, sector_t block, int *err, int *new) { static sector_t last_block; struct kernel_long_ad laarr[EXTENT_MERGE_SIZE]; struct extent_position prev_epos, cur_epos, next_epos; int count = 0, startnum = 0, endnum = 0; uint32_t elen = 0, tmpelen; struct kernel_lb_addr eloc, tmpeloc; int c = 1; loff_t lbcount = 0, b_off = 0; uint32_t newblocknum, newblock; sector_t offset = 0; int8_t etype; struct udf_inode_info *iinfo = UDF_I(inode); int goal = 0, pgoal = iinfo->i_location.logicalBlockNum; int lastblock = 0; *err = 0; *new = 0; prev_epos.offset = udf_file_entry_alloc_offset(inode); prev_epos.block = iinfo->i_location; prev_epos.bh = NULL; cur_epos = next_epos = prev_epos; b_off = (loff_t)block << inode->i_sb->s_blocksize_bits; /* find the extent which contains the block we are looking for. alternate between laarr[0] and laarr[1] for locations of the current extent, and the previous extent */ do { if (prev_epos.bh != cur_epos.bh) { brelse(prev_epos.bh); get_bh(cur_epos.bh); prev_epos.bh = cur_epos.bh; } if (cur_epos.bh != next_epos.bh) { brelse(cur_epos.bh); get_bh(next_epos.bh); cur_epos.bh = next_epos.bh; } lbcount += elen; prev_epos.block = cur_epos.block; cur_epos.block = next_epos.block; prev_epos.offset = cur_epos.offset; cur_epos.offset = next_epos.offset; etype = udf_next_aext(inode, &next_epos, &eloc, &elen, 1); if (etype == -1) break; c = !c; laarr[c].extLength = (etype << 30) | elen; laarr[c].extLocation = eloc; if (etype != (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) pgoal = eloc.logicalBlockNum + ((elen + inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits); count++; } while (lbcount + elen <= b_off); b_off -= lbcount; offset = b_off >> inode->i_sb->s_blocksize_bits; /* * Move prev_epos and cur_epos into indirect extent if we are at * the pointer to it */ udf_next_aext(inode, &prev_epos, &tmpeloc, &tmpelen, 0); udf_next_aext(inode, &cur_epos, &tmpeloc, &tmpelen, 0); /* if the extent is allocated and recorded, return the block if the extent is not a multiple of the blocksize, round up */ if (etype == (EXT_RECORDED_ALLOCATED >> 30)) { if (elen & (inode->i_sb->s_blocksize - 1)) { elen = EXT_RECORDED_ALLOCATED | ((elen + inode->i_sb->s_blocksize - 1) & ~(inode->i_sb->s_blocksize - 1)); udf_write_aext(inode, &cur_epos, &eloc, elen, 1); } brelse(prev_epos.bh); brelse(cur_epos.bh); brelse(next_epos.bh); newblock = udf_get_lb_pblock(inode->i_sb, &eloc, offset); return newblock; } last_block = block; /* Are we beyond EOF? */ if (etype == -1) { int ret; if (count) { if (c) laarr[0] = laarr[1]; startnum = 1; } else { /* Create a fake extent when there's not one */ memset(&laarr[0].extLocation, 0x00, sizeof(struct kernel_lb_addr)); laarr[0].extLength = EXT_NOT_RECORDED_NOT_ALLOCATED; /* Will udf_do_extend_file() create real extent from a fake one? */ startnum = (offset > 0); } /* Create extents for the hole between EOF and offset */ ret = udf_do_extend_file(inode, &prev_epos, laarr, offset); if (ret < 0) { brelse(prev_epos.bh); brelse(cur_epos.bh); brelse(next_epos.bh); *err = ret; return 0; } c = 0; offset = 0; count += ret; /* We are not covered by a preallocated extent? */ if ((laarr[0].extLength & UDF_EXTENT_FLAG_MASK) != EXT_NOT_RECORDED_ALLOCATED) { /* Is there any real extent? - otherwise we overwrite * the fake one... */ if (count) c = !c; laarr[c].extLength = EXT_NOT_RECORDED_NOT_ALLOCATED | inode->i_sb->s_blocksize; memset(&laarr[c].extLocation, 0x00, sizeof(struct kernel_lb_addr)); count++; endnum++; } endnum = c + 1; lastblock = 1; } else { endnum = startnum = ((count > 2) ? 2 : count); /* if the current extent is in position 0, swap it with the previous */ if (!c && count != 1) { laarr[2] = laarr[0]; laarr[0] = laarr[1]; laarr[1] = laarr[2]; c = 1; } /* if the current block is located in an extent, read the next extent */ etype = udf_next_aext(inode, &next_epos, &eloc, &elen, 0); if (etype != -1) { laarr[c + 1].extLength = (etype << 30) | elen; laarr[c + 1].extLocation = eloc; count++; startnum++; endnum++; } else lastblock = 1; } /* if the current extent is not recorded but allocated, get the * block in the extent corresponding to the requested block */ if ((laarr[c].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30)) newblocknum = laarr[c].extLocation.logicalBlockNum + offset; else { /* otherwise, allocate a new block */ if (iinfo->i_next_alloc_block == block) goal = iinfo->i_next_alloc_goal; if (!goal) { if (!(goal = pgoal)) /* XXX: what was intended here? */ goal = iinfo->i_location.logicalBlockNum + 1; } newblocknum = udf_new_block(inode->i_sb, inode, iinfo->i_location.partitionReferenceNum, goal, err); if (!newblocknum) { brelse(prev_epos.bh); *err = -ENOSPC; return 0; } iinfo->i_lenExtents += inode->i_sb->s_blocksize; } /* if the extent the requsted block is located in contains multiple * blocks, split the extent into at most three extents. blocks prior * to requested block, requested block, and blocks after requested * block */ udf_split_extents(inode, &c, offset, newblocknum, laarr, &endnum); #ifdef UDF_PREALLOCATE /* We preallocate blocks only for regular files. It also makes sense * for directories but there's a problem when to drop the * preallocation. We might use some delayed work for that but I feel * it's overengineering for a filesystem like UDF. */ if (S_ISREG(inode->i_mode)) udf_prealloc_extents(inode, c, lastblock, laarr, &endnum); #endif /* merge any continuous blocks in laarr */ udf_merge_extents(inode, laarr, &endnum); /* write back the new extents, inserting new extents if the new number * of extents is greater than the old number, and deleting extents if * the new number of extents is less than the old number */ udf_update_extents(inode, laarr, startnum, endnum, &prev_epos); brelse(prev_epos.bh); newblock = udf_get_pblock(inode->i_sb, newblocknum, iinfo->i_location.partitionReferenceNum, 0); if (!newblock) { *err = -EIO; return 0; } *new = 1; iinfo->i_next_alloc_block = block; iinfo->i_next_alloc_goal = newblocknum; inode->i_ctime = current_fs_time(inode->i_sb); if (IS_SYNC(inode)) udf_sync_inode(inode); else mark_inode_dirty(inode); return newblock; } static void udf_split_extents(struct inode *inode, int *c, int offset, int newblocknum, struct kernel_long_ad laarr[EXTENT_MERGE_SIZE], int *endnum) { unsigned long blocksize = inode->i_sb->s_blocksize; unsigned char blocksize_bits = inode->i_sb->s_blocksize_bits; if ((laarr[*c].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30) || (laarr[*c].extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) { int curr = *c; int blen = ((laarr[curr].extLength & UDF_EXTENT_LENGTH_MASK) + blocksize - 1) >> blocksize_bits; int8_t etype = (laarr[curr].extLength >> 30); if (blen == 1) ; else if (!offset || blen == offset + 1) { laarr[curr + 2] = laarr[curr + 1]; laarr[curr + 1] = laarr[curr]; } else { laarr[curr + 3] = laarr[curr + 1]; laarr[curr + 2] = laarr[curr + 1] = laarr[curr]; } if (offset) { if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30)) { udf_free_blocks(inode->i_sb, inode, &laarr[curr].extLocation, 0, offset); laarr[curr].extLength = EXT_NOT_RECORDED_NOT_ALLOCATED | (offset << blocksize_bits); laarr[curr].extLocation.logicalBlockNum = 0; laarr[curr].extLocation. partitionReferenceNum = 0; } else laarr[curr].extLength = (etype << 30) | (offset << blocksize_bits); curr++; (*c)++; (*endnum)++; } laarr[curr].extLocation.logicalBlockNum = newblocknum; if (etype == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) laarr[curr].extLocation.partitionReferenceNum = UDF_I(inode)->i_location.partitionReferenceNum; laarr[curr].extLength = EXT_RECORDED_ALLOCATED | blocksize; curr++; if (blen != offset + 1) { if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30)) laarr[curr].extLocation.logicalBlockNum += offset + 1; laarr[curr].extLength = (etype << 30) | ((blen - (offset + 1)) << blocksize_bits); curr++; (*endnum)++; } } } static void udf_prealloc_extents(struct inode *inode, int c, int lastblock, struct kernel_long_ad laarr[EXTENT_MERGE_SIZE], int *endnum) { int start, length = 0, currlength = 0, i; if (*endnum >= (c + 1)) { if (!lastblock) return; else start = c; } else { if ((laarr[c + 1].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30)) { start = c + 1; length = currlength = (((laarr[c + 1].extLength & UDF_EXTENT_LENGTH_MASK) + inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits); } else start = c; } for (i = start + 1; i <= *endnum; i++) { if (i == *endnum) { if (lastblock) length += UDF_DEFAULT_PREALLOC_BLOCKS; } else if ((laarr[i].extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) { length += (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) + inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits); } else break; } if (length) { int next = laarr[start].extLocation.logicalBlockNum + (((laarr[start].extLength & UDF_EXTENT_LENGTH_MASK) + inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits); int numalloc = udf_prealloc_blocks(inode->i_sb, inode, laarr[start].extLocation.partitionReferenceNum, next, (UDF_DEFAULT_PREALLOC_BLOCKS > length ? length : UDF_DEFAULT_PREALLOC_BLOCKS) - currlength); if (numalloc) { if (start == (c + 1)) laarr[start].extLength += (numalloc << inode->i_sb->s_blocksize_bits); else { memmove(&laarr[c + 2], &laarr[c + 1], sizeof(struct long_ad) * (*endnum - (c + 1))); (*endnum)++; laarr[c + 1].extLocation.logicalBlockNum = next; laarr[c + 1].extLocation.partitionReferenceNum = laarr[c].extLocation. partitionReferenceNum; laarr[c + 1].extLength = EXT_NOT_RECORDED_ALLOCATED | (numalloc << inode->i_sb->s_blocksize_bits); start = c + 1; } for (i = start + 1; numalloc && i < *endnum; i++) { int elen = ((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) + inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits; if (elen > numalloc) { laarr[i].extLength -= (numalloc << inode->i_sb->s_blocksize_bits); numalloc = 0; } else { numalloc -= elen; if (*endnum > (i + 1)) memmove(&laarr[i], &laarr[i + 1], sizeof(struct long_ad) * (*endnum - (i + 1))); i--; (*endnum)--; } } UDF_I(inode)->i_lenExtents += numalloc << inode->i_sb->s_blocksize_bits; } } } static void udf_merge_extents(struct inode *inode, struct kernel_long_ad laarr[EXTENT_MERGE_SIZE], int *endnum) { int i; unsigned long blocksize = inode->i_sb->s_blocksize; unsigned char blocksize_bits = inode->i_sb->s_blocksize_bits; for (i = 0; i < (*endnum - 1); i++) { struct kernel_long_ad *li /*l[i]*/ = &laarr[i]; struct kernel_long_ad *lip1 /*l[i plus 1]*/ = &laarr[i + 1]; if (((li->extLength >> 30) == (lip1->extLength >> 30)) && (((li->extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) || ((lip1->extLocation.logicalBlockNum - li->extLocation.logicalBlockNum) == (((li->extLength & UDF_EXTENT_LENGTH_MASK) + blocksize - 1) >> blocksize_bits)))) { if (((li->extLength & UDF_EXTENT_LENGTH_MASK) + (lip1->extLength & UDF_EXTENT_LENGTH_MASK) + blocksize - 1) & ~UDF_EXTENT_LENGTH_MASK) { lip1->extLength = (lip1->extLength - (li->extLength & UDF_EXTENT_LENGTH_MASK) + UDF_EXTENT_LENGTH_MASK) & ~(blocksize - 1); li->extLength = (li->extLength & UDF_EXTENT_FLAG_MASK) + (UDF_EXTENT_LENGTH_MASK + 1) - blocksize; lip1->extLocation.logicalBlockNum = li->extLocation.logicalBlockNum + ((li->extLength & UDF_EXTENT_LENGTH_MASK) >> blocksize_bits); } else { li->extLength = lip1->extLength + (((li->extLength & UDF_EXTENT_LENGTH_MASK) + blocksize - 1) & ~(blocksize - 1)); if (*endnum > (i + 2)) memmove(&laarr[i + 1], &laarr[i + 2], sizeof(struct long_ad) * (*endnum - (i + 2))); i--; (*endnum)--; } } else if (((li->extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30)) && ((lip1->extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))) { udf_free_blocks(inode->i_sb, inode, &li->extLocation, 0, ((li->extLength & UDF_EXTENT_LENGTH_MASK) + blocksize - 1) >> blocksize_bits); li->extLocation.logicalBlockNum = 0; li->extLocation.partitionReferenceNum = 0; if (((li->extLength & UDF_EXTENT_LENGTH_MASK) + (lip1->extLength & UDF_EXTENT_LENGTH_MASK) + blocksize - 1) & ~UDF_EXTENT_LENGTH_MASK) { lip1->extLength = (lip1->extLength - (li->extLength & UDF_EXTENT_LENGTH_MASK) + UDF_EXTENT_LENGTH_MASK) & ~(blocksize - 1); li->extLength = (li->extLength & UDF_EXTENT_FLAG_MASK) + (UDF_EXTENT_LENGTH_MASK + 1) - blocksize; } else { li->extLength = lip1->extLength + (((li->extLength & UDF_EXTENT_LENGTH_MASK) + blocksize - 1) & ~(blocksize - 1)); if (*endnum > (i + 2)) memmove(&laarr[i + 1], &laarr[i + 2], sizeof(struct long_ad) * (*endnum - (i + 2))); i--; (*endnum)--; } } else if ((li->extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30)) { udf_free_blocks(inode->i_sb, inode, &li->extLocation, 0, ((li->extLength & UDF_EXTENT_LENGTH_MASK) + blocksize - 1) >> blocksize_bits); li->extLocation.logicalBlockNum = 0; li->extLocation.partitionReferenceNum = 0; li->extLength = (li->extLength & UDF_EXTENT_LENGTH_MASK) | EXT_NOT_RECORDED_NOT_ALLOCATED; } } } static void udf_update_extents(struct inode *inode, struct kernel_long_ad laarr[EXTENT_MERGE_SIZE], int startnum, int endnum, struct extent_position *epos) { int start = 0, i; struct kernel_lb_addr tmploc; uint32_t tmplen; if (startnum > endnum) { for (i = 0; i < (startnum - endnum); i++) udf_delete_aext(inode, *epos, laarr[i].extLocation, laarr[i].extLength); } else if (startnum < endnum) { for (i = 0; i < (endnum - startnum); i++) { udf_insert_aext(inode, *epos, laarr[i].extLocation, laarr[i].extLength); udf_next_aext(inode, epos, &laarr[i].extLocation, &laarr[i].extLength, 1); start++; } } for (i = start; i < endnum; i++) { udf_next_aext(inode, epos, &tmploc, &tmplen, 0); udf_write_aext(inode, epos, &laarr[i].extLocation, laarr[i].extLength, 1); } } struct buffer_head *udf_bread(struct inode *inode, int block, int create, int *err) { struct buffer_head *bh = NULL; bh = udf_getblk(inode, block, create, err); if (!bh) return NULL; if (buffer_uptodate(bh)) return bh; ll_rw_block(READ, 1, &bh); wait_on_buffer(bh); if (buffer_uptodate(bh)) return bh; brelse(bh); *err = -EIO; return NULL; } int udf_setsize(struct inode *inode, loff_t newsize) { int err; struct udf_inode_info *iinfo; int bsize = 1 << inode->i_blkbits; if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))) return -EINVAL; if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) return -EPERM; iinfo = UDF_I(inode); if (newsize > inode->i_size) { down_write(&iinfo->i_data_sem); if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) { if (bsize < (udf_file_entry_alloc_offset(inode) + newsize)) { err = udf_expand_file_adinicb(inode); if (err) return err; down_write(&iinfo->i_data_sem); } else iinfo->i_lenAlloc = newsize; } err = udf_extend_file(inode, newsize); if (err) { up_write(&iinfo->i_data_sem); return err; } truncate_setsize(inode, newsize); up_write(&iinfo->i_data_sem); } else { if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) { down_write(&iinfo->i_data_sem); memset(iinfo->i_ext.i_data + iinfo->i_lenEAttr + newsize, 0x00, bsize - newsize - udf_file_entry_alloc_offset(inode)); iinfo->i_lenAlloc = newsize; truncate_setsize(inode, newsize); up_write(&iinfo->i_data_sem); goto update_time; } err = block_truncate_page(inode->i_mapping, newsize, udf_get_block); if (err) return err; down_write(&iinfo->i_data_sem); truncate_setsize(inode, newsize); udf_truncate_extents(inode); up_write(&iinfo->i_data_sem); } update_time: inode->i_mtime = inode->i_ctime = current_fs_time(inode->i_sb); if (IS_SYNC(inode)) udf_sync_inode(inode); else mark_inode_dirty(inode); return 0; } static void __udf_read_inode(struct inode *inode) { struct buffer_head *bh = NULL; struct fileEntry *fe; uint16_t ident; struct udf_inode_info *iinfo = UDF_I(inode); /* * Set defaults, but the inode is still incomplete! * Note: get_new_inode() sets the following on a new inode: * i_sb = sb * i_no = ino * i_flags = sb->s_flags * i_state = 0 * clean_inode(): zero fills and sets * i_count = 1 * i_nlink = 1 * i_op = NULL; */ bh = udf_read_ptagged(inode->i_sb, &iinfo->i_location, 0, &ident); if (!bh) { udf_err(inode->i_sb, "(ino %ld) failed !bh\n", inode->i_ino); make_bad_inode(inode); return; } if (ident != TAG_IDENT_FE && ident != TAG_IDENT_EFE && ident != TAG_IDENT_USE) { udf_err(inode->i_sb, "(ino %ld) failed ident=%d\n", inode->i_ino, ident); brelse(bh); make_bad_inode(inode); return; } fe = (struct fileEntry *)bh->b_data; if (fe->icbTag.strategyType == cpu_to_le16(4096)) { struct buffer_head *ibh; ibh = udf_read_ptagged(inode->i_sb, &iinfo->i_location, 1, &ident); if (ident == TAG_IDENT_IE && ibh) { struct buffer_head *nbh = NULL; struct kernel_lb_addr loc; struct indirectEntry *ie; ie = (struct indirectEntry *)ibh->b_data; loc = lelb_to_cpu(ie->indirectICB.extLocation); if (ie->indirectICB.extLength && (nbh = udf_read_ptagged(inode->i_sb, &loc, 0, &ident))) { if (ident == TAG_IDENT_FE || ident == TAG_IDENT_EFE) { memcpy(&iinfo->i_location, &loc, sizeof(struct kernel_lb_addr)); brelse(bh); brelse(ibh); brelse(nbh); __udf_read_inode(inode); return; } brelse(nbh); } } brelse(ibh); } else if (fe->icbTag.strategyType != cpu_to_le16(4)) { udf_err(inode->i_sb, "unsupported strategy type: %d\n", le16_to_cpu(fe->icbTag.strategyType)); brelse(bh); make_bad_inode(inode); return; } udf_fill_inode(inode, bh); brelse(bh); } static void udf_fill_inode(struct inode *inode, struct buffer_head *bh) { struct fileEntry *fe; struct extendedFileEntry *efe; int offset; struct udf_sb_info *sbi = UDF_SB(inode->i_sb); struct udf_inode_info *iinfo = UDF_I(inode); unsigned int link_count; fe = (struct fileEntry *)bh->b_data; efe = (struct extendedFileEntry *)bh->b_data; if (fe->icbTag.strategyType == cpu_to_le16(4)) iinfo->i_strat4096 = 0; else /* if (fe->icbTag.strategyType == cpu_to_le16(4096)) */ iinfo->i_strat4096 = 1; iinfo->i_alloc_type = le16_to_cpu(fe->icbTag.flags) & ICBTAG_FLAG_AD_MASK; iinfo->i_unique = 0; iinfo->i_lenEAttr = 0; iinfo->i_lenExtents = 0; iinfo->i_lenAlloc = 0; iinfo->i_next_alloc_block = 0; iinfo->i_next_alloc_goal = 0; if (fe->descTag.tagIdent == cpu_to_le16(TAG_IDENT_EFE)) { iinfo->i_efe = 1; iinfo->i_use = 0; if (udf_alloc_i_data(inode, inode->i_sb->s_blocksize - sizeof(struct extendedFileEntry))) { make_bad_inode(inode); return; } memcpy(iinfo->i_ext.i_data, bh->b_data + sizeof(struct extendedFileEntry), inode->i_sb->s_blocksize - sizeof(struct extendedFileEntry)); } else if (fe->descTag.tagIdent == cpu_to_le16(TAG_IDENT_FE)) { iinfo->i_efe = 0; iinfo->i_use = 0; if (udf_alloc_i_data(inode, inode->i_sb->s_blocksize - sizeof(struct fileEntry))) { make_bad_inode(inode); return; } memcpy(iinfo->i_ext.i_data, bh->b_data + sizeof(struct fileEntry), inode->i_sb->s_blocksize - sizeof(struct fileEntry)); } else if (fe->descTag.tagIdent == cpu_to_le16(TAG_IDENT_USE)) { iinfo->i_efe = 0; iinfo->i_use = 1; iinfo->i_lenAlloc = le32_to_cpu( ((struct unallocSpaceEntry *)bh->b_data)-> lengthAllocDescs); if (udf_alloc_i_data(inode, inode->i_sb->s_blocksize - sizeof(struct unallocSpaceEntry))) { make_bad_inode(inode); return; } memcpy(iinfo->i_ext.i_data, bh->b_data + sizeof(struct unallocSpaceEntry), inode->i_sb->s_blocksize - sizeof(struct unallocSpaceEntry)); return; } read_lock(&sbi->s_cred_lock); inode->i_uid = le32_to_cpu(fe->uid); if (inode->i_uid == -1 || UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_UID_IGNORE) || UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_UID_SET)) inode->i_uid = UDF_SB(inode->i_sb)->s_uid; inode->i_gid = le32_to_cpu(fe->gid); if (inode->i_gid == -1 || UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_GID_IGNORE) || UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_GID_SET)) inode->i_gid = UDF_SB(inode->i_sb)->s_gid; if (fe->icbTag.fileType != ICBTAG_FILE_TYPE_DIRECTORY && sbi->s_fmode != UDF_INVALID_MODE) inode->i_mode = sbi->s_fmode; else if (fe->icbTag.fileType == ICBTAG_FILE_TYPE_DIRECTORY && sbi->s_dmode != UDF_INVALID_MODE) inode->i_mode = sbi->s_dmode; else inode->i_mode = udf_convert_permissions(fe); inode->i_mode &= ~sbi->s_umask; read_unlock(&sbi->s_cred_lock); link_count = le16_to_cpu(fe->fileLinkCount); if (!link_count) link_count = 1; set_nlink(inode, link_count); inode->i_size = le64_to_cpu(fe->informationLength); iinfo->i_lenExtents = inode->i_size; if (iinfo->i_efe == 0) { inode->i_blocks = le64_to_cpu(fe->logicalBlocksRecorded) << (inode->i_sb->s_blocksize_bits - 9); if (!udf_disk_stamp_to_time(&inode->i_atime, fe->accessTime)) inode->i_atime = sbi->s_record_time; if (!udf_disk_stamp_to_time(&inode->i_mtime, fe->modificationTime)) inode->i_mtime = sbi->s_record_time; if (!udf_disk_stamp_to_time(&inode->i_ctime, fe->attrTime)) inode->i_ctime = sbi->s_record_time; iinfo->i_unique = le64_to_cpu(fe->uniqueID); iinfo->i_lenEAttr = le32_to_cpu(fe->lengthExtendedAttr); iinfo->i_lenAlloc = le32_to_cpu(fe->lengthAllocDescs); iinfo->i_checkpoint = le32_to_cpu(fe->checkpoint); offset = sizeof(struct fileEntry) + iinfo->i_lenEAttr; } else { inode->i_blocks = le64_to_cpu(efe->logicalBlocksRecorded) << (inode->i_sb->s_blocksize_bits - 9); if (!udf_disk_stamp_to_time(&inode->i_atime, efe->accessTime)) inode->i_atime = sbi->s_record_time; if (!udf_disk_stamp_to_time(&inode->i_mtime, efe->modificationTime)) inode->i_mtime = sbi->s_record_time; if (!udf_disk_stamp_to_time(&iinfo->i_crtime, efe->createTime)) iinfo->i_crtime = sbi->s_record_time; if (!udf_disk_stamp_to_time(&inode->i_ctime, efe->attrTime)) inode->i_ctime = sbi->s_record_time; iinfo->i_unique = le64_to_cpu(efe->uniqueID); iinfo->i_lenEAttr = le32_to_cpu(efe->lengthExtendedAttr); iinfo->i_lenAlloc = le32_to_cpu(efe->lengthAllocDescs); iinfo->i_checkpoint = le32_to_cpu(efe->checkpoint); offset = sizeof(struct extendedFileEntry) + iinfo->i_lenEAttr; } switch (fe->icbTag.fileType) { case ICBTAG_FILE_TYPE_DIRECTORY: inode->i_op = &udf_dir_inode_operations; inode->i_fop = &udf_dir_operations; inode->i_mode |= S_IFDIR; inc_nlink(inode); break; case ICBTAG_FILE_TYPE_REALTIME: case ICBTAG_FILE_TYPE_REGULAR: case ICBTAG_FILE_TYPE_UNDEF: case ICBTAG_FILE_TYPE_VAT20: if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) inode->i_data.a_ops = &udf_adinicb_aops; else inode->i_data.a_ops = &udf_aops; inode->i_op = &udf_file_inode_operations; inode->i_fop = &udf_file_operations; inode->i_mode |= S_IFREG; break; case ICBTAG_FILE_TYPE_BLOCK: inode->i_mode |= S_IFBLK; break; case ICBTAG_FILE_TYPE_CHAR: inode->i_mode |= S_IFCHR; break; case ICBTAG_FILE_TYPE_FIFO: init_special_inode(inode, inode->i_mode | S_IFIFO, 0); break; case ICBTAG_FILE_TYPE_SOCKET: init_special_inode(inode, inode->i_mode | S_IFSOCK, 0); break; case ICBTAG_FILE_TYPE_SYMLINK: inode->i_data.a_ops = &udf_symlink_aops; inode->i_op = &udf_symlink_inode_operations; inode->i_mode = S_IFLNK | S_IRWXUGO; break; case ICBTAG_FILE_TYPE_MAIN: udf_debug("METADATA FILE-----\n"); break; case ICBTAG_FILE_TYPE_MIRROR: udf_debug("METADATA MIRROR FILE-----\n"); break; case ICBTAG_FILE_TYPE_BITMAP: udf_debug("METADATA BITMAP FILE-----\n"); break; default: udf_err(inode->i_sb, "(ino %ld) failed unknown file type=%d\n", inode->i_ino, fe->icbTag.fileType); make_bad_inode(inode); return; } if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) { struct deviceSpec *dsea = (struct deviceSpec *)udf_get_extendedattr(inode, 12, 1); if (dsea) { init_special_inode(inode, inode->i_mode, MKDEV(le32_to_cpu(dsea->majorDeviceIdent), le32_to_cpu(dsea->minorDeviceIdent))); /* Developer ID ??? */ } else make_bad_inode(inode); } } static int udf_alloc_i_data(struct inode *inode, size_t size) { struct udf_inode_info *iinfo = UDF_I(inode); iinfo->i_ext.i_data = kmalloc(size, GFP_KERNEL); if (!iinfo->i_ext.i_data) { udf_err(inode->i_sb, "(ino %ld) no free memory\n", inode->i_ino); return -ENOMEM; } return 0; } static umode_t udf_convert_permissions(struct fileEntry *fe) { umode_t mode; uint32_t permissions; uint32_t flags; permissions = le32_to_cpu(fe->permissions); flags = le16_to_cpu(fe->icbTag.flags); mode = ((permissions) & S_IRWXO) | ((permissions >> 2) & S_IRWXG) | ((permissions >> 4) & S_IRWXU) | ((flags & ICBTAG_FLAG_SETUID) ? S_ISUID : 0) | ((flags & ICBTAG_FLAG_SETGID) ? S_ISGID : 0) | ((flags & ICBTAG_FLAG_STICKY) ? S_ISVTX : 0); return mode; } int udf_write_inode(struct inode *inode, struct writeback_control *wbc) { return udf_update_inode(inode, wbc->sync_mode == WB_SYNC_ALL); } static int udf_sync_inode(struct inode *inode) { return udf_update_inode(inode, 1); } static int udf_update_inode(struct inode *inode, int do_sync) { struct buffer_head *bh = NULL; struct fileEntry *fe; struct extendedFileEntry *efe; uint64_t lb_recorded; uint32_t udfperms; uint16_t icbflags; uint16_t crclen; int err = 0; struct udf_sb_info *sbi = UDF_SB(inode->i_sb); unsigned char blocksize_bits = inode->i_sb->s_blocksize_bits; struct udf_inode_info *iinfo = UDF_I(inode); bh = udf_tgetblk(inode->i_sb, udf_get_lb_pblock(inode->i_sb, &iinfo->i_location, 0)); if (!bh) { udf_debug("getblk failure\n"); return -ENOMEM; } lock_buffer(bh); memset(bh->b_data, 0, inode->i_sb->s_blocksize); fe = (struct fileEntry *)bh->b_data; efe = (struct extendedFileEntry *)bh->b_data; if (iinfo->i_use) { struct unallocSpaceEntry *use = (struct unallocSpaceEntry *)bh->b_data; use->lengthAllocDescs = cpu_to_le32(iinfo->i_lenAlloc); memcpy(bh->b_data + sizeof(struct unallocSpaceEntry), iinfo->i_ext.i_data, inode->i_sb->s_blocksize - sizeof(struct unallocSpaceEntry)); use->descTag.tagIdent = cpu_to_le16(TAG_IDENT_USE); use->descTag.tagLocation = cpu_to_le32(iinfo->i_location.logicalBlockNum); crclen = sizeof(struct unallocSpaceEntry) + iinfo->i_lenAlloc - sizeof(struct tag); use->descTag.descCRCLength = cpu_to_le16(crclen); use->descTag.descCRC = cpu_to_le16(crc_itu_t(0, (char *)use + sizeof(struct tag), crclen)); use->descTag.tagChecksum = udf_tag_checksum(&use->descTag); goto out; } if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_UID_FORGET)) fe->uid = cpu_to_le32(-1); else fe->uid = cpu_to_le32(inode->i_uid); if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_GID_FORGET)) fe->gid = cpu_to_le32(-1); else fe->gid = cpu_to_le32(inode->i_gid); udfperms = ((inode->i_mode & S_IRWXO)) | ((inode->i_mode & S_IRWXG) << 2) | ((inode->i_mode & S_IRWXU) << 4); udfperms |= (le32_to_cpu(fe->permissions) & (FE_PERM_O_DELETE | FE_PERM_O_CHATTR | FE_PERM_G_DELETE | FE_PERM_G_CHATTR | FE_PERM_U_DELETE | FE_PERM_U_CHATTR)); fe->permissions = cpu_to_le32(udfperms); if (S_ISDIR(inode->i_mode)) fe->fileLinkCount = cpu_to_le16(inode->i_nlink - 1); else fe->fileLinkCount = cpu_to_le16(inode->i_nlink); fe->informationLength = cpu_to_le64(inode->i_size); if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) { struct regid *eid; struct deviceSpec *dsea = (struct deviceSpec *)udf_get_extendedattr(inode, 12, 1); if (!dsea) { dsea = (struct deviceSpec *) udf_add_extendedattr(inode, sizeof(struct deviceSpec) + sizeof(struct regid), 12, 0x3); dsea->attrType = cpu_to_le32(12); dsea->attrSubtype = 1; dsea->attrLength = cpu_to_le32( sizeof(struct deviceSpec) + sizeof(struct regid)); dsea->impUseLength = cpu_to_le32(sizeof(struct regid)); } eid = (struct regid *)dsea->impUse; memset(eid, 0, sizeof(struct regid)); strcpy(eid->ident, UDF_ID_DEVELOPER); eid->identSuffix[0] = UDF_OS_CLASS_UNIX; eid->identSuffix[1] = UDF_OS_ID_LINUX; dsea->majorDeviceIdent = cpu_to_le32(imajor(inode)); dsea->minorDeviceIdent = cpu_to_le32(iminor(inode)); } if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) lb_recorded = 0; /* No extents => no blocks! */ else lb_recorded = (inode->i_blocks + (1 << (blocksize_bits - 9)) - 1) >> (blocksize_bits - 9); if (iinfo->i_efe == 0) { memcpy(bh->b_data + sizeof(struct fileEntry), iinfo->i_ext.i_data, inode->i_sb->s_blocksize - sizeof(struct fileEntry)); fe->logicalBlocksRecorded = cpu_to_le64(lb_recorded); udf_time_to_disk_stamp(&fe->accessTime, inode->i_atime); udf_time_to_disk_stamp(&fe->modificationTime, inode->i_mtime); udf_time_to_disk_stamp(&fe->attrTime, inode->i_ctime); memset(&(fe->impIdent), 0, sizeof(struct regid)); strcpy(fe->impIdent.ident, UDF_ID_DEVELOPER); fe->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX; fe->impIdent.identSuffix[1] = UDF_OS_ID_LINUX; fe->uniqueID = cpu_to_le64(iinfo->i_unique); fe->lengthExtendedAttr = cpu_to_le32(iinfo->i_lenEAttr); fe->lengthAllocDescs = cpu_to_le32(iinfo->i_lenAlloc); fe->checkpoint = cpu_to_le32(iinfo->i_checkpoint); fe->descTag.tagIdent = cpu_to_le16(TAG_IDENT_FE); crclen = sizeof(struct fileEntry); } else { memcpy(bh->b_data + sizeof(struct extendedFileEntry), iinfo->i_ext.i_data, inode->i_sb->s_blocksize - sizeof(struct extendedFileEntry)); efe->objectSize = cpu_to_le64(inode->i_size); efe->logicalBlocksRecorded = cpu_to_le64(lb_recorded); if (iinfo->i_crtime.tv_sec > inode->i_atime.tv_sec || (iinfo->i_crtime.tv_sec == inode->i_atime.tv_sec && iinfo->i_crtime.tv_nsec > inode->i_atime.tv_nsec)) iinfo->i_crtime = inode->i_atime; if (iinfo->i_crtime.tv_sec > inode->i_mtime.tv_sec || (iinfo->i_crtime.tv_sec == inode->i_mtime.tv_sec && iinfo->i_crtime.tv_nsec > inode->i_mtime.tv_nsec)) iinfo->i_crtime = inode->i_mtime; if (iinfo->i_crtime.tv_sec > inode->i_ctime.tv_sec || (iinfo->i_crtime.tv_sec == inode->i_ctime.tv_sec && iinfo->i_crtime.tv_nsec > inode->i_ctime.tv_nsec)) iinfo->i_crtime = inode->i_ctime; udf_time_to_disk_stamp(&efe->accessTime, inode->i_atime); udf_time_to_disk_stamp(&efe->modificationTime, inode->i_mtime); udf_time_to_disk_stamp(&efe->createTime, iinfo->i_crtime); udf_time_to_disk_stamp(&efe->attrTime, inode->i_ctime); memset(&(efe->impIdent), 0, sizeof(struct regid)); strcpy(efe->impIdent.ident, UDF_ID_DEVELOPER); efe->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX; efe->impIdent.identSuffix[1] = UDF_OS_ID_LINUX; efe->uniqueID = cpu_to_le64(iinfo->i_unique); efe->lengthExtendedAttr = cpu_to_le32(iinfo->i_lenEAttr); efe->lengthAllocDescs = cpu_to_le32(iinfo->i_lenAlloc); efe->checkpoint = cpu_to_le32(iinfo->i_checkpoint); efe->descTag.tagIdent = cpu_to_le16(TAG_IDENT_EFE); crclen = sizeof(struct extendedFileEntry); } if (iinfo->i_strat4096) { fe->icbTag.strategyType = cpu_to_le16(4096); fe->icbTag.strategyParameter = cpu_to_le16(1); fe->icbTag.numEntries = cpu_to_le16(2); } else { fe->icbTag.strategyType = cpu_to_le16(4); fe->icbTag.numEntries = cpu_to_le16(1); } if (S_ISDIR(inode->i_mode)) fe->icbTag.fileType = ICBTAG_FILE_TYPE_DIRECTORY; else if (S_ISREG(inode->i_mode)) fe->icbTag.fileType = ICBTAG_FILE_TYPE_REGULAR; else if (S_ISLNK(inode->i_mode)) fe->icbTag.fileType = ICBTAG_FILE_TYPE_SYMLINK; else if (S_ISBLK(inode->i_mode)) fe->icbTag.fileType = ICBTAG_FILE_TYPE_BLOCK; else if (S_ISCHR(inode->i_mode)) fe->icbTag.fileType = ICBTAG_FILE_TYPE_CHAR; else if (S_ISFIFO(inode->i_mode)) fe->icbTag.fileType = ICBTAG_FILE_TYPE_FIFO; else if (S_ISSOCK(inode->i_mode)) fe->icbTag.fileType = ICBTAG_FILE_TYPE_SOCKET; icbflags = iinfo->i_alloc_type | ((inode->i_mode & S_ISUID) ? ICBTAG_FLAG_SETUID : 0) | ((inode->i_mode & S_ISGID) ? ICBTAG_FLAG_SETGID : 0) | ((inode->i_mode & S_ISVTX) ? ICBTAG_FLAG_STICKY : 0) | (le16_to_cpu(fe->icbTag.flags) & ~(ICBTAG_FLAG_AD_MASK | ICBTAG_FLAG_SETUID | ICBTAG_FLAG_SETGID | ICBTAG_FLAG_STICKY)); fe->icbTag.flags = cpu_to_le16(icbflags); if (sbi->s_udfrev >= 0x0200) fe->descTag.descVersion = cpu_to_le16(3); else fe->descTag.descVersion = cpu_to_le16(2); fe->descTag.tagSerialNum = cpu_to_le16(sbi->s_serial_number); fe->descTag.tagLocation = cpu_to_le32( iinfo->i_location.logicalBlockNum); crclen += iinfo->i_lenEAttr + iinfo->i_lenAlloc - sizeof(struct tag); fe->descTag.descCRCLength = cpu_to_le16(crclen); fe->descTag.descCRC = cpu_to_le16(crc_itu_t(0, (char *)fe + sizeof(struct tag), crclen)); fe->descTag.tagChecksum = udf_tag_checksum(&fe->descTag); out: set_buffer_uptodate(bh); unlock_buffer(bh); /* write the data blocks */ mark_buffer_dirty(bh); if (do_sync) { sync_dirty_buffer(bh); if (buffer_write_io_error(bh)) { udf_warn(inode->i_sb, "IO error syncing udf inode [%08lx]\n", inode->i_ino); err = -EIO; } } brelse(bh); return err; } struct inode *udf_iget(struct super_block *sb, struct kernel_lb_addr *ino) { unsigned long block = udf_get_lb_pblock(sb, ino, 0); struct inode *inode = iget_locked(sb, block); if (!inode) return NULL; if (inode->i_state & I_NEW) { memcpy(&UDF_I(inode)->i_location, ino, sizeof(struct kernel_lb_addr)); __udf_read_inode(inode); unlock_new_inode(inode); } if (is_bad_inode(inode)) goto out_iput; if (ino->logicalBlockNum >= UDF_SB(sb)-> s_partmaps[ino->partitionReferenceNum].s_partition_len) { udf_debug("block=%d, partition=%d out of range\n", ino->logicalBlockNum, ino->partitionReferenceNum); make_bad_inode(inode); goto out_iput; } return inode; out_iput: iput(inode); return NULL; } int udf_add_aext(struct inode *inode, struct extent_position *epos, struct kernel_lb_addr *eloc, uint32_t elen, int inc) { int adsize; struct short_ad *sad = NULL; struct long_ad *lad = NULL; struct allocExtDesc *aed; uint8_t *ptr; struct udf_inode_info *iinfo = UDF_I(inode); if (!epos->bh) ptr = iinfo->i_ext.i_data + epos->offset - udf_file_entry_alloc_offset(inode) + iinfo->i_lenEAttr; else ptr = epos->bh->b_data + epos->offset; if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT) adsize = sizeof(struct short_ad); else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG) adsize = sizeof(struct long_ad); else return -EIO; if (epos->offset + (2 * adsize) > inode->i_sb->s_blocksize) { unsigned char *sptr, *dptr; struct buffer_head *nbh; int err, loffset; struct kernel_lb_addr obloc = epos->block; epos->block.logicalBlockNum = udf_new_block(inode->i_sb, NULL, obloc.partitionReferenceNum, obloc.logicalBlockNum, &err); if (!epos->block.logicalBlockNum) return -ENOSPC; nbh = udf_tgetblk(inode->i_sb, udf_get_lb_pblock(inode->i_sb, &epos->block, 0)); if (!nbh) return -EIO; lock_buffer(nbh); memset(nbh->b_data, 0x00, inode->i_sb->s_blocksize); set_buffer_uptodate(nbh); unlock_buffer(nbh); mark_buffer_dirty_inode(nbh, inode); aed = (struct allocExtDesc *)(nbh->b_data); if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT)) aed->previousAllocExtLocation = cpu_to_le32(obloc.logicalBlockNum); if (epos->offset + adsize > inode->i_sb->s_blocksize) { loffset = epos->offset; aed->lengthAllocDescs = cpu_to_le32(adsize); sptr = ptr - adsize; dptr = nbh->b_data + sizeof(struct allocExtDesc); memcpy(dptr, sptr, adsize); epos->offset = sizeof(struct allocExtDesc) + adsize; } else { loffset = epos->offset + adsize; aed->lengthAllocDescs = cpu_to_le32(0); sptr = ptr; epos->offset = sizeof(struct allocExtDesc); if (epos->bh) { aed = (struct allocExtDesc *)epos->bh->b_data; le32_add_cpu(&aed->lengthAllocDescs, adsize); } else { iinfo->i_lenAlloc += adsize; mark_inode_dirty(inode); } } if (UDF_SB(inode->i_sb)->s_udfrev >= 0x0200) udf_new_tag(nbh->b_data, TAG_IDENT_AED, 3, 1, epos->block.logicalBlockNum, sizeof(struct tag)); else udf_new_tag(nbh->b_data, TAG_IDENT_AED, 2, 1, epos->block.logicalBlockNum, sizeof(struct tag)); switch (iinfo->i_alloc_type) { case ICBTAG_FLAG_AD_SHORT: sad = (struct short_ad *)sptr; sad->extLength = cpu_to_le32(EXT_NEXT_EXTENT_ALLOCDECS | inode->i_sb->s_blocksize); sad->extPosition = cpu_to_le32(epos->block.logicalBlockNum); break; case ICBTAG_FLAG_AD_LONG: lad = (struct long_ad *)sptr; lad->extLength = cpu_to_le32(EXT_NEXT_EXTENT_ALLOCDECS | inode->i_sb->s_blocksize); lad->extLocation = cpu_to_lelb(epos->block); memset(lad->impUse, 0x00, sizeof(lad->impUse)); break; } if (epos->bh) { if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB(inode->i_sb)->s_udfrev >= 0x0201) udf_update_tag(epos->bh->b_data, loffset); else udf_update_tag(epos->bh->b_data, sizeof(struct allocExtDesc)); mark_buffer_dirty_inode(epos->bh, inode); brelse(epos->bh); } else { mark_inode_dirty(inode); } epos->bh = nbh; } udf_write_aext(inode, epos, eloc, elen, inc); if (!epos->bh) { iinfo->i_lenAlloc += adsize; mark_inode_dirty(inode); } else { aed = (struct allocExtDesc *)epos->bh->b_data; le32_add_cpu(&aed->lengthAllocDescs, adsize); if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB(inode->i_sb)->s_udfrev >= 0x0201) udf_update_tag(epos->bh->b_data, epos->offset + (inc ? 0 : adsize)); else udf_update_tag(epos->bh->b_data, sizeof(struct allocExtDesc)); mark_buffer_dirty_inode(epos->bh, inode); } return 0; } void udf_write_aext(struct inode *inode, struct extent_position *epos, struct kernel_lb_addr *eloc, uint32_t elen, int inc) { int adsize; uint8_t *ptr; struct short_ad *sad; struct long_ad *lad; struct udf_inode_info *iinfo = UDF_I(inode); if (!epos->bh) ptr = iinfo->i_ext.i_data + epos->offset - udf_file_entry_alloc_offset(inode) + iinfo->i_lenEAttr; else ptr = epos->bh->b_data + epos->offset; switch (iinfo->i_alloc_type) { case ICBTAG_FLAG_AD_SHORT: sad = (struct short_ad *)ptr; sad->extLength = cpu_to_le32(elen); sad->extPosition = cpu_to_le32(eloc->logicalBlockNum); adsize = sizeof(struct short_ad); break; case ICBTAG_FLAG_AD_LONG: lad = (struct long_ad *)ptr; lad->extLength = cpu_to_le32(elen); lad->extLocation = cpu_to_lelb(*eloc); memset(lad->impUse, 0x00, sizeof(lad->impUse)); adsize = sizeof(struct long_ad); break; default: return; } if (epos->bh) { if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB(inode->i_sb)->s_udfrev >= 0x0201) { struct allocExtDesc *aed = (struct allocExtDesc *)epos->bh->b_data; udf_update_tag(epos->bh->b_data, le32_to_cpu(aed->lengthAllocDescs) + sizeof(struct allocExtDesc)); } mark_buffer_dirty_inode(epos->bh, inode); } else { mark_inode_dirty(inode); } if (inc) epos->offset += adsize; } int8_t udf_next_aext(struct inode *inode, struct extent_position *epos, struct kernel_lb_addr *eloc, uint32_t *elen, int inc) { int8_t etype; while ((etype = udf_current_aext(inode, epos, eloc, elen, inc)) == (EXT_NEXT_EXTENT_ALLOCDECS >> 30)) { int block; epos->block = *eloc; epos->offset = sizeof(struct allocExtDesc); brelse(epos->bh); block = udf_get_lb_pblock(inode->i_sb, &epos->block, 0); epos->bh = udf_tread(inode->i_sb, block); if (!epos->bh) { udf_debug("reading block %d failed!\n", block); return -1; } } return etype; } int8_t udf_current_aext(struct inode *inode, struct extent_position *epos, struct kernel_lb_addr *eloc, uint32_t *elen, int inc) { int alen; int8_t etype; uint8_t *ptr; struct short_ad *sad; struct long_ad *lad; struct udf_inode_info *iinfo = UDF_I(inode); if (!epos->bh) { if (!epos->offset) epos->offset = udf_file_entry_alloc_offset(inode); ptr = iinfo->i_ext.i_data + epos->offset - udf_file_entry_alloc_offset(inode) + iinfo->i_lenEAttr; alen = udf_file_entry_alloc_offset(inode) + iinfo->i_lenAlloc; } else { if (!epos->offset) epos->offset = sizeof(struct allocExtDesc); ptr = epos->bh->b_data + epos->offset; alen = sizeof(struct allocExtDesc) + le32_to_cpu(((struct allocExtDesc *)epos->bh->b_data)-> lengthAllocDescs); } switch (iinfo->i_alloc_type) { case ICBTAG_FLAG_AD_SHORT: sad = udf_get_fileshortad(ptr, alen, &epos->offset, inc); if (!sad) return -1; etype = le32_to_cpu(sad->extLength) >> 30; eloc->logicalBlockNum = le32_to_cpu(sad->extPosition); eloc->partitionReferenceNum = iinfo->i_location.partitionReferenceNum; *elen = le32_to_cpu(sad->extLength) & UDF_EXTENT_LENGTH_MASK; break; case ICBTAG_FLAG_AD_LONG: lad = udf_get_filelongad(ptr, alen, &epos->offset, inc); if (!lad) return -1; etype = le32_to_cpu(lad->extLength) >> 30; *eloc = lelb_to_cpu(lad->extLocation); *elen = le32_to_cpu(lad->extLength) & UDF_EXTENT_LENGTH_MASK; break; default: udf_debug("alloc_type = %d unsupported\n", iinfo->i_alloc_type); return -1; } return etype; } static int8_t udf_insert_aext(struct inode *inode, struct extent_position epos, struct kernel_lb_addr neloc, uint32_t nelen) { struct kernel_lb_addr oeloc; uint32_t oelen; int8_t etype; if (epos.bh) get_bh(epos.bh); while ((etype = udf_next_aext(inode, &epos, &oeloc, &oelen, 0)) != -1) { udf_write_aext(inode, &epos, &neloc, nelen, 1); neloc = oeloc; nelen = (etype << 30) | oelen; } udf_add_aext(inode, &epos, &neloc, nelen, 1); brelse(epos.bh); return (nelen >> 30); } int8_t udf_delete_aext(struct inode *inode, struct extent_position epos, struct kernel_lb_addr eloc, uint32_t elen) { struct extent_position oepos; int adsize; int8_t etype; struct allocExtDesc *aed; struct udf_inode_info *iinfo; if (epos.bh) { get_bh(epos.bh); get_bh(epos.bh); } iinfo = UDF_I(inode); if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT) adsize = sizeof(struct short_ad); else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG) adsize = sizeof(struct long_ad); else adsize = 0; oepos = epos; if (udf_next_aext(inode, &epos, &eloc, &elen, 1) == -1) return -1; while ((etype = udf_next_aext(inode, &epos, &eloc, &elen, 1)) != -1) { udf_write_aext(inode, &oepos, &eloc, (etype << 30) | elen, 1); if (oepos.bh != epos.bh) { oepos.block = epos.block; brelse(oepos.bh); get_bh(epos.bh); oepos.bh = epos.bh; oepos.offset = epos.offset - adsize; } } memset(&eloc, 0x00, sizeof(struct kernel_lb_addr)); elen = 0; if (epos.bh != oepos.bh) { udf_free_blocks(inode->i_sb, inode, &epos.block, 0, 1); udf_write_aext(inode, &oepos, &eloc, elen, 1); udf_write_aext(inode, &oepos, &eloc, elen, 1); if (!oepos.bh) { iinfo->i_lenAlloc -= (adsize * 2); mark_inode_dirty(inode); } else { aed = (struct allocExtDesc *)oepos.bh->b_data; le32_add_cpu(&aed->lengthAllocDescs, -(2 * adsize)); if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB(inode->i_sb)->s_udfrev >= 0x0201) udf_update_tag(oepos.bh->b_data, oepos.offset - (2 * adsize)); else udf_update_tag(oepos.bh->b_data, sizeof(struct allocExtDesc)); mark_buffer_dirty_inode(oepos.bh, inode); } } else { udf_write_aext(inode, &oepos, &eloc, elen, 1); if (!oepos.bh) { iinfo->i_lenAlloc -= adsize; mark_inode_dirty(inode); } else { aed = (struct allocExtDesc *)oepos.bh->b_data; le32_add_cpu(&aed->lengthAllocDescs, -adsize); if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB(inode->i_sb)->s_udfrev >= 0x0201) udf_update_tag(oepos.bh->b_data, epos.offset - adsize); else udf_update_tag(oepos.bh->b_data, sizeof(struct allocExtDesc)); mark_buffer_dirty_inode(oepos.bh, inode); } } brelse(epos.bh); brelse(oepos.bh); return (elen >> 30); } int8_t inode_bmap(struct inode *inode, sector_t block, struct extent_position *pos, struct kernel_lb_addr *eloc, uint32_t *elen, sector_t *offset) { unsigned char blocksize_bits = inode->i_sb->s_blocksize_bits; loff_t lbcount = 0, bcount = (loff_t) block << blocksize_bits; int8_t etype; struct udf_inode_info *iinfo; iinfo = UDF_I(inode); pos->offset = 0; pos->block = iinfo->i_location; pos->bh = NULL; *elen = 0; do { etype = udf_next_aext(inode, pos, eloc, elen, 1); if (etype == -1) { *offset = (bcount - lbcount) >> blocksize_bits; iinfo->i_lenExtents = lbcount; return -1; } lbcount += *elen; } while (lbcount <= bcount); *offset = (bcount + *elen - lbcount) >> blocksize_bits; return etype; } long udf_block_map(struct inode *inode, sector_t block) { struct kernel_lb_addr eloc; uint32_t elen; sector_t offset; struct extent_position epos = {}; int ret; down_read(&UDF_I(inode)->i_data_sem); if (inode_bmap(inode, block, &epos, &eloc, &elen, &offset) == (EXT_RECORDED_ALLOCATED >> 30)) ret = udf_get_lb_pblock(inode->i_sb, &eloc, offset); else ret = 0; up_read(&UDF_I(inode)->i_data_sem); brelse(epos.bh); if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_VARCONV)) return udf_fixed_to_variable(ret); else return ret; }
gpl-2.0
qiyei2015/firefly-rk3288-pad-kernel-linux3.10
arch/parisc/kernel/binfmt_elf32.c
4265
3588
/* * Support for 32-bit Linux/Parisc ELF binaries on 64 bit kernels * * Copyright (C) 2000 John Marvin * Copyright (C) 2000 Hewlett Packard Co. * * Heavily inspired from various other efforts to do the same thing * (ia64,sparc64/mips64) */ /* Make sure include/asm-parisc/elf.h does the right thing */ #define ELF_CLASS ELFCLASS32 #define ELF_CORE_COPY_REGS(dst, pt) \ memset(dst, 0, sizeof(dst)); /* don't leak any "random" bits */ \ { int i; \ for (i = 0; i < 32; i++) dst[i] = (elf_greg_t) pt->gr[i]; \ for (i = 0; i < 8; i++) dst[32 + i] = (elf_greg_t) pt->sr[i]; \ } \ dst[40] = (elf_greg_t) pt->iaoq[0]; dst[41] = (elf_greg_t) pt->iaoq[1]; \ dst[42] = (elf_greg_t) pt->iasq[0]; dst[43] = (elf_greg_t) pt->iasq[1]; \ dst[44] = (elf_greg_t) pt->sar; dst[45] = (elf_greg_t) pt->iir; \ dst[46] = (elf_greg_t) pt->isr; dst[47] = (elf_greg_t) pt->ior; \ dst[48] = (elf_greg_t) mfctl(22); dst[49] = (elf_greg_t) mfctl(0); \ dst[50] = (elf_greg_t) mfctl(24); dst[51] = (elf_greg_t) mfctl(25); \ dst[52] = (elf_greg_t) mfctl(26); dst[53] = (elf_greg_t) mfctl(27); \ dst[54] = (elf_greg_t) mfctl(28); dst[55] = (elf_greg_t) mfctl(29); \ dst[56] = (elf_greg_t) mfctl(30); dst[57] = (elf_greg_t) mfctl(31); \ dst[58] = (elf_greg_t) mfctl( 8); dst[59] = (elf_greg_t) mfctl( 9); \ dst[60] = (elf_greg_t) mfctl(12); dst[61] = (elf_greg_t) mfctl(13); \ dst[62] = (elf_greg_t) mfctl(10); dst[63] = (elf_greg_t) mfctl(15); typedef unsigned int elf_greg_t; #include <linux/spinlock.h> #include <asm/processor.h> #include <linux/module.h> #include <linux/elfcore.h> #include <linux/compat.h> /* struct compat_timeval */ #define elf_prstatus elf_prstatus32 struct elf_prstatus32 { struct elf_siginfo pr_info; /* Info associated with signal */ short pr_cursig; /* Current signal */ unsigned int pr_sigpend; /* Set of pending signals */ unsigned int pr_sighold; /* Set of held signals */ pid_t pr_pid; pid_t pr_ppid; pid_t pr_pgrp; pid_t pr_sid; struct compat_timeval pr_utime; /* User time */ struct compat_timeval pr_stime; /* System time */ struct compat_timeval pr_cutime; /* Cumulative user time */ struct compat_timeval pr_cstime; /* Cumulative system time */ elf_gregset_t pr_reg; /* GP registers */ int pr_fpvalid; /* True if math co-processor being used. */ }; #define elf_prpsinfo elf_prpsinfo32 struct elf_prpsinfo32 { char pr_state; /* numeric process state */ char pr_sname; /* char for pr_state */ char pr_zomb; /* zombie */ char pr_nice; /* nice val */ unsigned int pr_flag; /* flags */ u16 pr_uid; u16 pr_gid; pid_t pr_pid, pr_ppid, pr_pgrp, pr_sid; /* Lots missing */ char pr_fname[16]; /* filename of executable */ char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */ }; #define init_elf_binfmt init_elf32_binfmt #define ELF_PLATFORM ("PARISC32\0") /* * We should probably use this macro to set a flag somewhere to indicate * this is a 32 on 64 process. We could use PER_LINUX_32BIT, or we * could set a processor dependent flag in the thread_struct. */ #undef SET_PERSONALITY #define SET_PERSONALITY(ex) \ set_thread_flag(TIF_32BIT); \ current->thread.map_base = DEFAULT_MAP_BASE32; \ current->thread.task_size = DEFAULT_TASK_SIZE32 \ #undef cputime_to_timeval #define cputime_to_timeval cputime_to_compat_timeval static __inline__ void cputime_to_compat_timeval(const cputime_t cputime, struct compat_timeval *value) { unsigned long jiffies = cputime_to_jiffies(cputime); value->tv_usec = (jiffies % HZ) * (1000000L / HZ); value->tv_sec = jiffies / HZ; } #include "../../../fs/binfmt_elf.c"
gpl-2.0
kogone/android_kernel_lge_hammerhead
drivers/gpu/drm/exynos/exynos_drm_encoder.c
5033
12709
/* exynos_drm_encoder.c * * Copyright (c) 2011 Samsung Electronics Co., Ltd. * Authors: * Inki Dae <inki.dae@samsung.com> * Joonyoung Shim <jy0922.shim@samsung.com> * Seung-Woo Kim <sw0312.kim@samsung.com> * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. */ #include "drmP.h" #include "drm_crtc_helper.h" #include "exynos_drm_drv.h" #include "exynos_drm_crtc.h" #include "exynos_drm_encoder.h" #define to_exynos_encoder(x) container_of(x, struct exynos_drm_encoder,\ drm_encoder) /* * exynos specific encoder structure. * * @drm_encoder: encoder object. * @manager: specific encoder has its own manager to control a hardware * appropriately and we can access a hardware drawing on this manager. * @dpms: store the encoder dpms value. */ struct exynos_drm_encoder { struct drm_encoder drm_encoder; struct exynos_drm_manager *manager; int dpms; }; static void exynos_drm_display_power(struct drm_encoder *encoder, int mode) { struct drm_device *dev = encoder->dev; struct drm_connector *connector; struct exynos_drm_manager *manager = exynos_drm_get_manager(encoder); list_for_each_entry(connector, &dev->mode_config.connector_list, head) { if (connector->encoder == encoder) { struct exynos_drm_display_ops *display_ops = manager->display_ops; DRM_DEBUG_KMS("connector[%d] dpms[%d]\n", connector->base.id, mode); if (display_ops && display_ops->power_on) display_ops->power_on(manager->dev, mode); } } } static void exynos_drm_encoder_dpms(struct drm_encoder *encoder, int mode) { struct drm_device *dev = encoder->dev; struct exynos_drm_manager *manager = exynos_drm_get_manager(encoder); struct exynos_drm_manager_ops *manager_ops = manager->ops; struct exynos_drm_encoder *exynos_encoder = to_exynos_encoder(encoder); DRM_DEBUG_KMS("%s, encoder dpms: %d\n", __FILE__, mode); if (exynos_encoder->dpms == mode) { DRM_DEBUG_KMS("desired dpms mode is same as previous one.\n"); return; } mutex_lock(&dev->struct_mutex); switch (mode) { case DRM_MODE_DPMS_ON: if (manager_ops && manager_ops->apply) manager_ops->apply(manager->dev); exynos_drm_display_power(encoder, mode); exynos_encoder->dpms = mode; break; case DRM_MODE_DPMS_STANDBY: case DRM_MODE_DPMS_SUSPEND: case DRM_MODE_DPMS_OFF: exynos_drm_display_power(encoder, mode); exynos_encoder->dpms = mode; break; default: DRM_ERROR("unspecified mode %d\n", mode); break; } mutex_unlock(&dev->struct_mutex); } static bool exynos_drm_encoder_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct drm_device *dev = encoder->dev; struct drm_connector *connector; struct exynos_drm_manager *manager = exynos_drm_get_manager(encoder); struct exynos_drm_manager_ops *manager_ops = manager->ops; DRM_DEBUG_KMS("%s\n", __FILE__); list_for_each_entry(connector, &dev->mode_config.connector_list, head) { if (connector->encoder == encoder) if (manager_ops && manager_ops->mode_fixup) manager_ops->mode_fixup(manager->dev, connector, mode, adjusted_mode); } return true; } static void exynos_drm_encoder_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct drm_device *dev = encoder->dev; struct drm_connector *connector; struct exynos_drm_manager *manager = exynos_drm_get_manager(encoder); struct exynos_drm_manager_ops *manager_ops = manager->ops; struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops; struct exynos_drm_overlay *overlay = get_exynos_drm_overlay(dev, encoder->crtc); DRM_DEBUG_KMS("%s\n", __FILE__); list_for_each_entry(connector, &dev->mode_config.connector_list, head) { if (connector->encoder == encoder) { if (manager_ops && manager_ops->mode_set) manager_ops->mode_set(manager->dev, adjusted_mode); if (overlay_ops && overlay_ops->mode_set) overlay_ops->mode_set(manager->dev, overlay); } } } static void exynos_drm_encoder_prepare(struct drm_encoder *encoder) { DRM_DEBUG_KMS("%s\n", __FILE__); /* drm framework doesn't check NULL. */ } static void exynos_drm_encoder_commit(struct drm_encoder *encoder) { struct exynos_drm_manager *manager = exynos_drm_get_manager(encoder); struct exynos_drm_manager_ops *manager_ops = manager->ops; DRM_DEBUG_KMS("%s\n", __FILE__); if (manager_ops && manager_ops->commit) manager_ops->commit(manager->dev); } static struct drm_crtc * exynos_drm_encoder_get_crtc(struct drm_encoder *encoder) { return encoder->crtc; } static struct drm_encoder_helper_funcs exynos_encoder_helper_funcs = { .dpms = exynos_drm_encoder_dpms, .mode_fixup = exynos_drm_encoder_mode_fixup, .mode_set = exynos_drm_encoder_mode_set, .prepare = exynos_drm_encoder_prepare, .commit = exynos_drm_encoder_commit, .get_crtc = exynos_drm_encoder_get_crtc, }; static void exynos_drm_encoder_destroy(struct drm_encoder *encoder) { struct exynos_drm_encoder *exynos_encoder = to_exynos_encoder(encoder); DRM_DEBUG_KMS("%s\n", __FILE__); exynos_encoder->manager->pipe = -1; drm_encoder_cleanup(encoder); kfree(exynos_encoder); } static struct drm_encoder_funcs exynos_encoder_funcs = { .destroy = exynos_drm_encoder_destroy, }; static unsigned int exynos_drm_encoder_clones(struct drm_encoder *encoder) { struct drm_encoder *clone; struct drm_device *dev = encoder->dev; struct exynos_drm_encoder *exynos_encoder = to_exynos_encoder(encoder); struct exynos_drm_display_ops *display_ops = exynos_encoder->manager->display_ops; unsigned int clone_mask = 0; int cnt = 0; list_for_each_entry(clone, &dev->mode_config.encoder_list, head) { switch (display_ops->type) { case EXYNOS_DISPLAY_TYPE_LCD: case EXYNOS_DISPLAY_TYPE_HDMI: case EXYNOS_DISPLAY_TYPE_VIDI: clone_mask |= (1 << (cnt++)); break; default: continue; } } return clone_mask; } void exynos_drm_encoder_setup(struct drm_device *dev) { struct drm_encoder *encoder; DRM_DEBUG_KMS("%s\n", __FILE__); list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) encoder->possible_clones = exynos_drm_encoder_clones(encoder); } struct drm_encoder * exynos_drm_encoder_create(struct drm_device *dev, struct exynos_drm_manager *manager, unsigned int possible_crtcs) { struct drm_encoder *encoder; struct exynos_drm_encoder *exynos_encoder; DRM_DEBUG_KMS("%s\n", __FILE__); if (!manager || !possible_crtcs) return NULL; if (!manager->dev) return NULL; exynos_encoder = kzalloc(sizeof(*exynos_encoder), GFP_KERNEL); if (!exynos_encoder) { DRM_ERROR("failed to allocate encoder\n"); return NULL; } exynos_encoder->dpms = DRM_MODE_DPMS_OFF; exynos_encoder->manager = manager; encoder = &exynos_encoder->drm_encoder; encoder->possible_crtcs = possible_crtcs; DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs); drm_encoder_init(dev, encoder, &exynos_encoder_funcs, DRM_MODE_ENCODER_TMDS); drm_encoder_helper_add(encoder, &exynos_encoder_helper_funcs); DRM_DEBUG_KMS("encoder has been created\n"); return encoder; } struct exynos_drm_manager *exynos_drm_get_manager(struct drm_encoder *encoder) { return to_exynos_encoder(encoder)->manager; } void exynos_drm_fn_encoder(struct drm_crtc *crtc, void *data, void (*fn)(struct drm_encoder *, void *)) { struct drm_device *dev = crtc->dev; struct drm_encoder *encoder; struct exynos_drm_private *private = dev->dev_private; struct exynos_drm_manager *manager; list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { /* * if crtc is detached from encoder, check pipe, * otherwise check crtc attached to encoder */ if (!encoder->crtc) { manager = to_exynos_encoder(encoder)->manager; if (manager->pipe < 0 || private->crtc[manager->pipe] != crtc) continue; } else { if (encoder->crtc != crtc) continue; } fn(encoder, data); } } void exynos_drm_enable_vblank(struct drm_encoder *encoder, void *data) { struct exynos_drm_manager *manager = to_exynos_encoder(encoder)->manager; struct exynos_drm_manager_ops *manager_ops = manager->ops; int crtc = *(int *)data; if (manager->pipe == -1) manager->pipe = crtc; if (manager_ops->enable_vblank) manager_ops->enable_vblank(manager->dev); } void exynos_drm_disable_vblank(struct drm_encoder *encoder, void *data) { struct exynos_drm_manager *manager = to_exynos_encoder(encoder)->manager; struct exynos_drm_manager_ops *manager_ops = manager->ops; int crtc = *(int *)data; if (manager->pipe == -1) manager->pipe = crtc; if (manager_ops->disable_vblank) manager_ops->disable_vblank(manager->dev); } void exynos_drm_encoder_crtc_plane_commit(struct drm_encoder *encoder, void *data) { struct exynos_drm_manager *manager = to_exynos_encoder(encoder)->manager; struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops; int zpos = DEFAULT_ZPOS; if (data) zpos = *(int *)data; if (overlay_ops && overlay_ops->commit) overlay_ops->commit(manager->dev, zpos); } void exynos_drm_encoder_crtc_commit(struct drm_encoder *encoder, void *data) { struct exynos_drm_manager *manager = to_exynos_encoder(encoder)->manager; int crtc = *(int *)data; int zpos = DEFAULT_ZPOS; DRM_DEBUG_KMS("%s\n", __FILE__); /* * when crtc is detached from encoder, this pipe is used * to select manager operation */ manager->pipe = crtc; exynos_drm_encoder_crtc_plane_commit(encoder, &zpos); } void exynos_drm_encoder_dpms_from_crtc(struct drm_encoder *encoder, void *data) { struct exynos_drm_encoder *exynos_encoder = to_exynos_encoder(encoder); int mode = *(int *)data; DRM_DEBUG_KMS("%s\n", __FILE__); exynos_drm_encoder_dpms(encoder, mode); exynos_encoder->dpms = mode; } void exynos_drm_encoder_crtc_dpms(struct drm_encoder *encoder, void *data) { struct drm_device *dev = encoder->dev; struct exynos_drm_encoder *exynos_encoder = to_exynos_encoder(encoder); struct exynos_drm_manager *manager = exynos_encoder->manager; struct exynos_drm_manager_ops *manager_ops = manager->ops; struct drm_connector *connector; int mode = *(int *)data; DRM_DEBUG_KMS("%s\n", __FILE__); if (manager_ops && manager_ops->dpms) manager_ops->dpms(manager->dev, mode); /* * set current dpms mode to the connector connected to * current encoder. connector->dpms would be checked * at drm_helper_connector_dpms() */ list_for_each_entry(connector, &dev->mode_config.connector_list, head) if (connector->encoder == encoder) connector->dpms = mode; /* * if this condition is ok then it means that the crtc is already * detached from encoder and last function for detaching is properly * done, so clear pipe from manager to prevent repeated call. */ if (mode > DRM_MODE_DPMS_ON) { if (!encoder->crtc) manager->pipe = -1; } } void exynos_drm_encoder_crtc_mode_set(struct drm_encoder *encoder, void *data) { struct exynos_drm_manager *manager = to_exynos_encoder(encoder)->manager; struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops; struct exynos_drm_overlay *overlay = data; if (overlay_ops && overlay_ops->mode_set) overlay_ops->mode_set(manager->dev, overlay); } void exynos_drm_encoder_crtc_disable(struct drm_encoder *encoder, void *data) { struct exynos_drm_manager *manager = to_exynos_encoder(encoder)->manager; struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops; int zpos = DEFAULT_ZPOS; DRM_DEBUG_KMS("\n"); if (data) zpos = *(int *)data; if (overlay_ops && overlay_ops->disable) overlay_ops->disable(manager->dev, zpos); }
gpl-2.0
TeamBliss-Devices/android_kernel_asus_flo
drivers/hwmon/ntc_thermistor.c
5033
11455
/* * ntc_thermistor.c - NTC Thermistors * * Copyright (C) 2010 Samsung Electronics * MyungJoo Ham <myungjoo.ham@samsung.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/slab.h> #include <linux/module.h> #include <linux/pm_runtime.h> #include <linux/math64.h> #include <linux/platform_device.h> #include <linux/err.h> #include <linux/platform_data/ntc_thermistor.h> #include <linux/hwmon.h> #include <linux/hwmon-sysfs.h> struct ntc_compensation { int temp_C; unsigned int ohm; }; /* * A compensation table should be sorted by the values of .ohm * in descending order. * The following compensation tables are from the specification of Murata NTC * Thermistors Datasheet */ const struct ntc_compensation ncpXXwb473[] = { { .temp_C = -40, .ohm = 1747920 }, { .temp_C = -35, .ohm = 1245428 }, { .temp_C = -30, .ohm = 898485 }, { .temp_C = -25, .ohm = 655802 }, { .temp_C = -20, .ohm = 483954 }, { .temp_C = -15, .ohm = 360850 }, { .temp_C = -10, .ohm = 271697 }, { .temp_C = -5, .ohm = 206463 }, { .temp_C = 0, .ohm = 158214 }, { .temp_C = 5, .ohm = 122259 }, { .temp_C = 10, .ohm = 95227 }, { .temp_C = 15, .ohm = 74730 }, { .temp_C = 20, .ohm = 59065 }, { .temp_C = 25, .ohm = 47000 }, { .temp_C = 30, .ohm = 37643 }, { .temp_C = 35, .ohm = 30334 }, { .temp_C = 40, .ohm = 24591 }, { .temp_C = 45, .ohm = 20048 }, { .temp_C = 50, .ohm = 16433 }, { .temp_C = 55, .ohm = 13539 }, { .temp_C = 60, .ohm = 11209 }, { .temp_C = 65, .ohm = 9328 }, { .temp_C = 70, .ohm = 7798 }, { .temp_C = 75, .ohm = 6544 }, { .temp_C = 80, .ohm = 5518 }, { .temp_C = 85, .ohm = 4674 }, { .temp_C = 90, .ohm = 3972 }, { .temp_C = 95, .ohm = 3388 }, { .temp_C = 100, .ohm = 2902 }, { .temp_C = 105, .ohm = 2494 }, { .temp_C = 110, .ohm = 2150 }, { .temp_C = 115, .ohm = 1860 }, { .temp_C = 120, .ohm = 1615 }, { .temp_C = 125, .ohm = 1406 }, }; const struct ntc_compensation ncpXXwl333[] = { { .temp_C = -40, .ohm = 1610154 }, { .temp_C = -35, .ohm = 1130850 }, { .temp_C = -30, .ohm = 802609 }, { .temp_C = -25, .ohm = 575385 }, { .temp_C = -20, .ohm = 416464 }, { .temp_C = -15, .ohm = 304219 }, { .temp_C = -10, .ohm = 224193 }, { .temp_C = -5, .ohm = 166623 }, { .temp_C = 0, .ohm = 124850 }, { .temp_C = 5, .ohm = 94287 }, { .temp_C = 10, .ohm = 71747 }, { .temp_C = 15, .ohm = 54996 }, { .temp_C = 20, .ohm = 42455 }, { .temp_C = 25, .ohm = 33000 }, { .temp_C = 30, .ohm = 25822 }, { .temp_C = 35, .ohm = 20335 }, { .temp_C = 40, .ohm = 16115 }, { .temp_C = 45, .ohm = 12849 }, { .temp_C = 50, .ohm = 10306 }, { .temp_C = 55, .ohm = 8314 }, { .temp_C = 60, .ohm = 6746 }, { .temp_C = 65, .ohm = 5503 }, { .temp_C = 70, .ohm = 4513 }, { .temp_C = 75, .ohm = 3721 }, { .temp_C = 80, .ohm = 3084 }, { .temp_C = 85, .ohm = 2569 }, { .temp_C = 90, .ohm = 2151 }, { .temp_C = 95, .ohm = 1809 }, { .temp_C = 100, .ohm = 1529 }, { .temp_C = 105, .ohm = 1299 }, { .temp_C = 110, .ohm = 1108 }, { .temp_C = 115, .ohm = 949 }, { .temp_C = 120, .ohm = 817 }, { .temp_C = 125, .ohm = 707 }, }; struct ntc_data { struct device *hwmon_dev; struct ntc_thermistor_platform_data *pdata; const struct ntc_compensation *comp; struct device *dev; int n_comp; char name[PLATFORM_NAME_SIZE]; }; static inline u64 div64_u64_safe(u64 dividend, u64 divisor) { if (divisor == 0 && dividend == 0) return 0; if (divisor == 0) return UINT_MAX; return div64_u64(dividend, divisor); } static unsigned int get_ohm_of_thermistor(struct ntc_data *data, unsigned int uV) { struct ntc_thermistor_platform_data *pdata = data->pdata; u64 mV = uV / 1000; u64 pmV = pdata->pullup_uV / 1000; u64 N, puO, pdO; puO = pdata->pullup_ohm; pdO = pdata->pulldown_ohm; if (mV == 0) { if (pdata->connect == NTC_CONNECTED_POSITIVE) return UINT_MAX; return 0; } if (mV >= pmV) return (pdata->connect == NTC_CONNECTED_POSITIVE) ? 0 : UINT_MAX; if (pdata->connect == NTC_CONNECTED_POSITIVE && puO == 0) N = div64_u64_safe(pdO * (pmV - mV), mV); else if (pdata->connect == NTC_CONNECTED_GROUND && pdO == 0) N = div64_u64_safe(puO * mV, pmV - mV); else if (pdata->connect == NTC_CONNECTED_POSITIVE) N = div64_u64_safe(pdO * puO * (pmV - mV), puO * mV - pdO * (pmV - mV)); else N = div64_u64_safe(pdO * puO * mV, pdO * (pmV - mV) - puO * mV); return (unsigned int) N; } static int lookup_comp(struct ntc_data *data, unsigned int ohm, int *i_low, int *i_high) { int start, end, mid = -1; /* Do a binary search on compensation table */ start = 0; end = data->n_comp; while (end > start) { mid = start + (end - start) / 2; if (data->comp[mid].ohm < ohm) end = mid; else if (data->comp[mid].ohm > ohm) start = mid + 1; else break; } if (mid == 0) { if (data->comp[mid].ohm > ohm) { *i_high = mid; *i_low = mid + 1; return 0; } else { *i_low = mid; *i_high = -1; return -EINVAL; } } if (mid == (data->n_comp - 1)) { if (data->comp[mid].ohm <= ohm) { *i_low = mid; *i_high = mid - 1; return 0; } else { *i_low = -1; *i_high = mid; return -EINVAL; } } if (data->comp[mid].ohm <= ohm) { *i_low = mid; *i_high = mid - 1; } else { *i_low = mid + 1; *i_high = mid; } return 0; } static int get_temp_mC(struct ntc_data *data, unsigned int ohm, int *temp) { int low, high; int ret; ret = lookup_comp(data, ohm, &low, &high); if (ret) { /* Unable to use linear approximation */ if (low != -1) *temp = data->comp[low].temp_C * 1000; else if (high != -1) *temp = data->comp[high].temp_C * 1000; else return ret; } else { *temp = data->comp[low].temp_C * 1000 + ((data->comp[high].temp_C - data->comp[low].temp_C) * 1000 * ((int)ohm - (int)data->comp[low].ohm)) / ((int)data->comp[high].ohm - (int)data->comp[low].ohm); } return 0; } static int ntc_thermistor_read(struct ntc_data *data, int *temp) { int ret; int read_ohm, read_uV; unsigned int ohm = 0; if (data->pdata->read_ohm) { read_ohm = data->pdata->read_ohm(); if (read_ohm < 0) return read_ohm; ohm = (unsigned int)read_ohm; } if (data->pdata->read_uV) { read_uV = data->pdata->read_uV(); if (read_uV < 0) return read_uV; ohm = get_ohm_of_thermistor(data, (unsigned int)read_uV); } ret = get_temp_mC(data, ohm, temp); if (ret) { dev_dbg(data->dev, "Sensor reading function not available.\n"); return ret; } return 0; } static ssize_t ntc_show_name(struct device *dev, struct device_attribute *attr, char *buf) { struct ntc_data *data = dev_get_drvdata(dev); return sprintf(buf, "%s\n", data->name); } static ssize_t ntc_show_type(struct device *dev, struct device_attribute *attr, char *buf) { return sprintf(buf, "4\n"); } static ssize_t ntc_show_temp(struct device *dev, struct device_attribute *attr, char *buf) { struct ntc_data *data = dev_get_drvdata(dev); int temp, ret; ret = ntc_thermistor_read(data, &temp); if (ret) return ret; return sprintf(buf, "%d\n", temp); } static SENSOR_DEVICE_ATTR(temp1_type, S_IRUGO, ntc_show_type, NULL, 0); static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, ntc_show_temp, NULL, 0); static DEVICE_ATTR(name, S_IRUGO, ntc_show_name, NULL); static struct attribute *ntc_attributes[] = { &dev_attr_name.attr, &sensor_dev_attr_temp1_type.dev_attr.attr, &sensor_dev_attr_temp1_input.dev_attr.attr, NULL, }; static const struct attribute_group ntc_attr_group = { .attrs = ntc_attributes, }; static int __devinit ntc_thermistor_probe(struct platform_device *pdev) { struct ntc_data *data; struct ntc_thermistor_platform_data *pdata = pdev->dev.platform_data; int ret = 0; if (!pdata) { dev_err(&pdev->dev, "No platform init data supplied.\n"); return -ENODEV; } /* Either one of the two is required. */ if (!pdata->read_uV && !pdata->read_ohm) { dev_err(&pdev->dev, "Both read_uV and read_ohm missing." "Need either one of the two.\n"); return -EINVAL; } if (pdata->read_uV && pdata->read_ohm) { dev_warn(&pdev->dev, "Only one of read_uV and read_ohm " "is needed; ignoring read_uV.\n"); pdata->read_uV = NULL; } if (pdata->read_uV && (pdata->pullup_uV == 0 || (pdata->pullup_ohm == 0 && pdata->connect == NTC_CONNECTED_GROUND) || (pdata->pulldown_ohm == 0 && pdata->connect == NTC_CONNECTED_POSITIVE) || (pdata->connect != NTC_CONNECTED_POSITIVE && pdata->connect != NTC_CONNECTED_GROUND))) { dev_err(&pdev->dev, "Required data to use read_uV not " "supplied.\n"); return -EINVAL; } data = kzalloc(sizeof(struct ntc_data), GFP_KERNEL); if (!data) return -ENOMEM; data->dev = &pdev->dev; data->pdata = pdata; strncpy(data->name, pdev->id_entry->name, PLATFORM_NAME_SIZE); switch (pdev->id_entry->driver_data) { case TYPE_NCPXXWB473: data->comp = ncpXXwb473; data->n_comp = ARRAY_SIZE(ncpXXwb473); break; case TYPE_NCPXXWL333: data->comp = ncpXXwl333; data->n_comp = ARRAY_SIZE(ncpXXwl333); break; default: dev_err(&pdev->dev, "Unknown device type: %lu(%s)\n", pdev->id_entry->driver_data, pdev->id_entry->name); ret = -EINVAL; goto err; } platform_set_drvdata(pdev, data); ret = sysfs_create_group(&data->dev->kobj, &ntc_attr_group); if (ret) { dev_err(data->dev, "unable to create sysfs files\n"); goto err; } data->hwmon_dev = hwmon_device_register(data->dev); if (IS_ERR_OR_NULL(data->hwmon_dev)) { dev_err(data->dev, "unable to register as hwmon device.\n"); ret = -EINVAL; goto err_after_sysfs; } dev_info(&pdev->dev, "Thermistor %s:%d (type: %s/%lu) successfully probed.\n", pdev->name, pdev->id, pdev->id_entry->name, pdev->id_entry->driver_data); return 0; err_after_sysfs: sysfs_remove_group(&data->dev->kobj, &ntc_attr_group); err: kfree(data); return ret; } static int __devexit ntc_thermistor_remove(struct platform_device *pdev) { struct ntc_data *data = platform_get_drvdata(pdev); hwmon_device_unregister(data->hwmon_dev); sysfs_remove_group(&data->dev->kobj, &ntc_attr_group); platform_set_drvdata(pdev, NULL); kfree(data); return 0; } static const struct platform_device_id ntc_thermistor_id[] = { { "ncp15wb473", TYPE_NCPXXWB473 }, { "ncp18wb473", TYPE_NCPXXWB473 }, { "ncp21wb473", TYPE_NCPXXWB473 }, { "ncp03wb473", TYPE_NCPXXWB473 }, { "ncp15wl333", TYPE_NCPXXWL333 }, { }, }; static struct platform_driver ntc_thermistor_driver = { .driver = { .name = "ntc-thermistor", .owner = THIS_MODULE, }, .probe = ntc_thermistor_probe, .remove = __devexit_p(ntc_thermistor_remove), .id_table = ntc_thermistor_id, }; module_platform_driver(ntc_thermistor_driver); MODULE_DESCRIPTION("NTC Thermistor Driver"); MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:ntc-thermistor");
gpl-2.0
majunggil/nexus7_android-tegra3-grouper-3.1-kitkat-mr1_dm-verity
drivers/scsi/bfa/bfa_hw_ct.c
9897
4137
/* * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. * All rights reserved * www.brocade.com * * Linux driver for Brocade Fibre Channel Host Bus Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as * published by the Free Software Foundation * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ #include "bfad_drv.h" #include "bfa_modules.h" #include "bfi_reg.h" BFA_TRC_FILE(HAL, IOCFC_CT); /* * Dummy interrupt handler for handling spurious interrupt during chip-reinit. */ static void bfa_hwct_msix_dummy(struct bfa_s *bfa, int vec) { } void bfa_hwct_reginit(struct bfa_s *bfa) { struct bfa_iocfc_regs_s *bfa_regs = &bfa->iocfc.bfa_regs; void __iomem *kva = bfa_ioc_bar0(&bfa->ioc); int fn = bfa_ioc_pcifn(&bfa->ioc); if (fn == 0) { bfa_regs->intr_status = (kva + HOSTFN0_INT_STATUS); bfa_regs->intr_mask = (kva + HOSTFN0_INT_MSK); } else { bfa_regs->intr_status = (kva + HOSTFN1_INT_STATUS); bfa_regs->intr_mask = (kva + HOSTFN1_INT_MSK); } } void bfa_hwct2_reginit(struct bfa_s *bfa) { struct bfa_iocfc_regs_s *bfa_regs = &bfa->iocfc.bfa_regs; void __iomem *kva = bfa_ioc_bar0(&bfa->ioc); bfa_regs->intr_status = (kva + CT2_HOSTFN_INT_STATUS); bfa_regs->intr_mask = (kva + CT2_HOSTFN_INTR_MASK); } void bfa_hwct_reqq_ack(struct bfa_s *bfa, int reqq) { u32 r32; r32 = readl(bfa->iocfc.bfa_regs.cpe_q_ctrl[reqq]); writel(r32, bfa->iocfc.bfa_regs.cpe_q_ctrl[reqq]); } /* * Actions to respond RME Interrupt for Catapult ASIC: * - Write 1 to Interrupt Status register (INTx only - done in bfa_intx()) * - Acknowledge by writing to RME Queue Control register * - Update CI */ void bfa_hwct_rspq_ack(struct bfa_s *bfa, int rspq, u32 ci) { u32 r32; r32 = readl(bfa->iocfc.bfa_regs.rme_q_ctrl[rspq]); writel(r32, bfa->iocfc.bfa_regs.rme_q_ctrl[rspq]); bfa_rspq_ci(bfa, rspq) = ci; writel(ci, bfa->iocfc.bfa_regs.rme_q_ci[rspq]); mmiowb(); } /* * Actions to respond RME Interrupt for Catapult2 ASIC: * - Write 1 to Interrupt Status register (INTx only - done in bfa_intx()) * - Update CI */ void bfa_hwct2_rspq_ack(struct bfa_s *bfa, int rspq, u32 ci) { bfa_rspq_ci(bfa, rspq) = ci; writel(ci, bfa->iocfc.bfa_regs.rme_q_ci[rspq]); mmiowb(); } void bfa_hwct_msix_getvecs(struct bfa_s *bfa, u32 *msix_vecs_bmap, u32 *num_vecs, u32 *max_vec_bit) { *msix_vecs_bmap = (1 << BFI_MSIX_CT_MAX) - 1; *max_vec_bit = (1 << (BFI_MSIX_CT_MAX - 1)); *num_vecs = BFI_MSIX_CT_MAX; } /* * Setup MSI-X vector for catapult */ void bfa_hwct_msix_init(struct bfa_s *bfa, int nvecs) { WARN_ON((nvecs != 1) && (nvecs != BFI_MSIX_CT_MAX)); bfa_trc(bfa, nvecs); bfa->msix.nvecs = nvecs; bfa_hwct_msix_uninstall(bfa); } void bfa_hwct_msix_ctrl_install(struct bfa_s *bfa) { if (bfa->msix.nvecs == 0) return; if (bfa->msix.nvecs == 1) bfa->msix.handler[BFI_MSIX_LPU_ERR_CT] = bfa_msix_all; else bfa->msix.handler[BFI_MSIX_LPU_ERR_CT] = bfa_msix_lpu_err; } void bfa_hwct_msix_queue_install(struct bfa_s *bfa) { int i; if (bfa->msix.nvecs == 0) return; if (bfa->msix.nvecs == 1) { for (i = BFI_MSIX_CPE_QMIN_CT; i < BFI_MSIX_CT_MAX; i++) bfa->msix.handler[i] = bfa_msix_all; return; } for (i = BFI_MSIX_CPE_QMIN_CT; i <= BFI_MSIX_CPE_QMAX_CT; i++) bfa->msix.handler[i] = bfa_msix_reqq; for (i = BFI_MSIX_RME_QMIN_CT; i <= BFI_MSIX_RME_QMAX_CT; i++) bfa->msix.handler[i] = bfa_msix_rspq; } void bfa_hwct_msix_uninstall(struct bfa_s *bfa) { int i; for (i = 0; i < BFI_MSIX_CT_MAX; i++) bfa->msix.handler[i] = bfa_hwct_msix_dummy; } /* * Enable MSI-X vectors */ void bfa_hwct_isr_mode_set(struct bfa_s *bfa, bfa_boolean_t msix) { bfa_trc(bfa, 0); bfa_ioc_isr_mode_set(&bfa->ioc, msix); } void bfa_hwct_msix_get_rme_range(struct bfa_s *bfa, u32 *start, u32 *end) { *start = BFI_MSIX_RME_QMIN_CT; *end = BFI_MSIX_RME_QMAX_CT; }
gpl-2.0
OneB1t/android_kernel_alcatel_alto5
arch/powerpc/oprofile/op_model_pa6t.c
11689
5937
/* * Copyright (C) 2006-2007 PA Semi, Inc * * Author: Shashi Rao, PA Semi * * Maintained by: Olof Johansson <olof@lixom.net> * * Based on arch/powerpc/oprofile/op_model_power4.c * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/oprofile.h> #include <linux/init.h> #include <linux/smp.h> #include <linux/percpu.h> #include <asm/processor.h> #include <asm/cputable.h> #include <asm/oprofile_impl.h> #include <asm/reg.h> static unsigned char oprofile_running; /* mmcr values are set in pa6t_reg_setup, used in pa6t_cpu_setup */ static u64 mmcr0_val; static u64 mmcr1_val; /* inited in pa6t_reg_setup */ static u64 reset_value[OP_MAX_COUNTER]; static inline u64 ctr_read(unsigned int i) { switch (i) { case 0: return mfspr(SPRN_PA6T_PMC0); case 1: return mfspr(SPRN_PA6T_PMC1); case 2: return mfspr(SPRN_PA6T_PMC2); case 3: return mfspr(SPRN_PA6T_PMC3); case 4: return mfspr(SPRN_PA6T_PMC4); case 5: return mfspr(SPRN_PA6T_PMC5); default: printk(KERN_ERR "ctr_read called with bad arg %u\n", i); return 0; } } static inline void ctr_write(unsigned int i, u64 val) { switch (i) { case 0: mtspr(SPRN_PA6T_PMC0, val); break; case 1: mtspr(SPRN_PA6T_PMC1, val); break; case 2: mtspr(SPRN_PA6T_PMC2, val); break; case 3: mtspr(SPRN_PA6T_PMC3, val); break; case 4: mtspr(SPRN_PA6T_PMC4, val); break; case 5: mtspr(SPRN_PA6T_PMC5, val); break; default: printk(KERN_ERR "ctr_write called with bad arg %u\n", i); break; } } /* precompute the values to stuff in the hardware registers */ static int pa6t_reg_setup(struct op_counter_config *ctr, struct op_system_config *sys, int num_ctrs) { int pmc; /* * adjust the mmcr0.en[0-5] and mmcr0.inten[0-5] values obtained from the * event_mappings file by turning off the counters that the user doesn't * care about * * setup user and kernel profiling */ for (pmc = 0; pmc < cur_cpu_spec->num_pmcs; pmc++) if (!ctr[pmc].enabled) { sys->mmcr0 &= ~(0x1UL << pmc); sys->mmcr0 &= ~(0x1UL << (pmc+12)); pr_debug("turned off counter %u\n", pmc); } if (sys->enable_kernel) sys->mmcr0 |= PA6T_MMCR0_SUPEN | PA6T_MMCR0_HYPEN; else sys->mmcr0 &= ~(PA6T_MMCR0_SUPEN | PA6T_MMCR0_HYPEN); if (sys->enable_user) sys->mmcr0 |= PA6T_MMCR0_PREN; else sys->mmcr0 &= ~PA6T_MMCR0_PREN; /* * The performance counter event settings are given in the mmcr0 and * mmcr1 values passed from the user in the op_system_config * structure (sys variable). */ mmcr0_val = sys->mmcr0; mmcr1_val = sys->mmcr1; pr_debug("mmcr0_val inited to %016lx\n", sys->mmcr0); pr_debug("mmcr1_val inited to %016lx\n", sys->mmcr1); for (pmc = 0; pmc < cur_cpu_spec->num_pmcs; pmc++) { /* counters are 40 bit. Move to cputable at some point? */ reset_value[pmc] = (0x1UL << 39) - ctr[pmc].count; pr_debug("reset_value for pmc%u inited to 0x%llx\n", pmc, reset_value[pmc]); } return 0; } /* configure registers on this cpu */ static int pa6t_cpu_setup(struct op_counter_config *ctr) { u64 mmcr0 = mmcr0_val; u64 mmcr1 = mmcr1_val; /* Default is all PMCs off */ mmcr0 &= ~(0x3FUL); mtspr(SPRN_PA6T_MMCR0, mmcr0); /* program selected programmable events in */ mtspr(SPRN_PA6T_MMCR1, mmcr1); pr_debug("setup on cpu %d, mmcr0 %016lx\n", smp_processor_id(), mfspr(SPRN_PA6T_MMCR0)); pr_debug("setup on cpu %d, mmcr1 %016lx\n", smp_processor_id(), mfspr(SPRN_PA6T_MMCR1)); return 0; } static int pa6t_start(struct op_counter_config *ctr) { int i; /* Hold off event counting until rfid */ u64 mmcr0 = mmcr0_val | PA6T_MMCR0_HANDDIS; for (i = 0; i < cur_cpu_spec->num_pmcs; i++) if (ctr[i].enabled) ctr_write(i, reset_value[i]); else ctr_write(i, 0UL); mtspr(SPRN_PA6T_MMCR0, mmcr0); oprofile_running = 1; pr_debug("start on cpu %d, mmcr0 %llx\n", smp_processor_id(), mmcr0); return 0; } static void pa6t_stop(void) { u64 mmcr0; /* freeze counters */ mmcr0 = mfspr(SPRN_PA6T_MMCR0); mmcr0 |= PA6T_MMCR0_FCM0; mtspr(SPRN_PA6T_MMCR0, mmcr0); oprofile_running = 0; pr_debug("stop on cpu %d, mmcr0 %llx\n", smp_processor_id(), mmcr0); } /* handle the perfmon overflow vector */ static void pa6t_handle_interrupt(struct pt_regs *regs, struct op_counter_config *ctr) { unsigned long pc = mfspr(SPRN_PA6T_SIAR); int is_kernel = is_kernel_addr(pc); u64 val; int i; u64 mmcr0; /* disable perfmon counting until rfid */ mmcr0 = mfspr(SPRN_PA6T_MMCR0); mtspr(SPRN_PA6T_MMCR0, mmcr0 | PA6T_MMCR0_HANDDIS); /* Record samples. We've got one global bit for whether a sample * was taken, so add it for any counter that triggered overflow. */ for (i = 0; i < cur_cpu_spec->num_pmcs; i++) { val = ctr_read(i); if (val & (0x1UL << 39)) { /* Overflow bit set */ if (oprofile_running && ctr[i].enabled) { if (mmcr0 & PA6T_MMCR0_SIARLOG) oprofile_add_ext_sample(pc, regs, i, is_kernel); ctr_write(i, reset_value[i]); } else { ctr_write(i, 0UL); } } } /* Restore mmcr0 to a good known value since the PMI changes it */ mmcr0 = mmcr0_val | PA6T_MMCR0_HANDDIS; mtspr(SPRN_PA6T_MMCR0, mmcr0); } struct op_powerpc_model op_model_pa6t = { .reg_setup = pa6t_reg_setup, .cpu_setup = pa6t_cpu_setup, .start = pa6t_start, .stop = pa6t_stop, .handle_interrupt = pa6t_handle_interrupt, };
gpl-2.0
zaclimon/android_kernel_samsung_hugo
drivers/ide/ide-sysfs.c
12457
2835
#include <linux/kernel.h> #include <linux/ide.h> char *ide_media_string(ide_drive_t *drive) { switch (drive->media) { case ide_disk: return "disk"; case ide_cdrom: return "cdrom"; case ide_tape: return "tape"; case ide_floppy: return "floppy"; case ide_optical: return "optical"; default: return "UNKNOWN"; } } static ssize_t media_show(struct device *dev, struct device_attribute *attr, char *buf) { ide_drive_t *drive = to_ide_device(dev); return sprintf(buf, "%s\n", ide_media_string(drive)); } static ssize_t drivename_show(struct device *dev, struct device_attribute *attr, char *buf) { ide_drive_t *drive = to_ide_device(dev); return sprintf(buf, "%s\n", drive->name); } static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, char *buf) { ide_drive_t *drive = to_ide_device(dev); return sprintf(buf, "ide:m-%s\n", ide_media_string(drive)); } static ssize_t model_show(struct device *dev, struct device_attribute *attr, char *buf) { ide_drive_t *drive = to_ide_device(dev); return sprintf(buf, "%s\n", (char *)&drive->id[ATA_ID_PROD]); } static ssize_t firmware_show(struct device *dev, struct device_attribute *attr, char *buf) { ide_drive_t *drive = to_ide_device(dev); return sprintf(buf, "%s\n", (char *)&drive->id[ATA_ID_FW_REV]); } static ssize_t serial_show(struct device *dev, struct device_attribute *attr, char *buf) { ide_drive_t *drive = to_ide_device(dev); return sprintf(buf, "%s\n", (char *)&drive->id[ATA_ID_SERNO]); } struct device_attribute ide_dev_attrs[] = { __ATTR_RO(media), __ATTR_RO(drivename), __ATTR_RO(modalias), __ATTR_RO(model), __ATTR_RO(firmware), __ATTR(serial, 0400, serial_show, NULL), __ATTR(unload_heads, 0644, ide_park_show, ide_park_store), __ATTR_NULL }; static ssize_t store_delete_devices(struct device *portdev, struct device_attribute *attr, const char *buf, size_t n) { ide_hwif_t *hwif = dev_get_drvdata(portdev); if (strncmp(buf, "1", n)) return -EINVAL; ide_port_unregister_devices(hwif); return n; }; static DEVICE_ATTR(delete_devices, S_IWUSR, NULL, store_delete_devices); static ssize_t store_scan(struct device *portdev, struct device_attribute *attr, const char *buf, size_t n) { ide_hwif_t *hwif = dev_get_drvdata(portdev); if (strncmp(buf, "1", n)) return -EINVAL; ide_port_unregister_devices(hwif); ide_port_scan(hwif); return n; }; static DEVICE_ATTR(scan, S_IWUSR, NULL, store_scan); static struct device_attribute *ide_port_attrs[] = { &dev_attr_delete_devices, &dev_attr_scan, NULL }; int ide_sysfs_register_port(ide_hwif_t *hwif) { int i, uninitialized_var(rc); for (i = 0; ide_port_attrs[i]; i++) { rc = device_create_file(hwif->portdev, ide_port_attrs[i]); if (rc) break; } return rc; }
gpl-2.0
qpzm1258/shooterct-ics-3.0.16
drivers/video/via/via_utility.c
12969
6019
/* * Copyright 1998-2008 VIA Technologies, Inc. All Rights Reserved. * Copyright 2001-2008 S3 Graphics, Inc. All Rights Reserved. * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; * either version 2, or (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTIES OR REPRESENTATIONS; without even * the implied warranty of MERCHANTABILITY or FITNESS FOR * A PARTICULAR PURPOSE.See the GNU General Public License * for more details. * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #include <linux/via-core.h> #include "global.h" void viafb_get_device_support_state(u32 *support_state) { *support_state = CRT_Device; if (viaparinfo->chip_info->tmds_chip_info.tmds_chip_name == VT1632_TMDS) *support_state |= DVI_Device; if (viaparinfo->chip_info->lvds_chip_info.lvds_chip_name == VT1631_LVDS) *support_state |= LCD_Device; } void viafb_get_device_connect_state(u32 *connect_state) { bool mobile = false; *connect_state = CRT_Device; if (viafb_dvi_sense()) *connect_state |= DVI_Device; viafb_lcd_get_mobile_state(&mobile); if (mobile) *connect_state |= LCD_Device; } bool viafb_lcd_get_support_expand_state(u32 xres, u32 yres) { unsigned int support_state = 0; switch (viafb_lcd_panel_id) { case LCD_PANEL_ID0_640X480: if ((xres < 640) && (yres < 480)) support_state = true; break; case LCD_PANEL_ID1_800X600: if ((xres < 800) && (yres < 600)) support_state = true; break; case LCD_PANEL_ID2_1024X768: if ((xres < 1024) && (yres < 768)) support_state = true; break; case LCD_PANEL_ID3_1280X768: if ((xres < 1280) && (yres < 768)) support_state = true; break; case LCD_PANEL_ID4_1280X1024: if ((xres < 1280) && (yres < 1024)) support_state = true; break; case LCD_PANEL_ID5_1400X1050: if ((xres < 1400) && (yres < 1050)) support_state = true; break; case LCD_PANEL_ID6_1600X1200: if ((xres < 1600) && (yres < 1200)) support_state = true; break; case LCD_PANEL_ID7_1366X768: if ((xres < 1366) && (yres < 768)) support_state = true; break; case LCD_PANEL_ID8_1024X600: if ((xres < 1024) && (yres < 600)) support_state = true; break; case LCD_PANEL_ID9_1280X800: if ((xres < 1280) && (yres < 800)) support_state = true; break; case LCD_PANEL_IDA_800X480: if ((xres < 800) && (yres < 480)) support_state = true; break; case LCD_PANEL_IDB_1360X768: if ((xres < 1360) && (yres < 768)) support_state = true; break; case LCD_PANEL_IDC_480X640: if ((xres < 480) && (yres < 640)) support_state = true; break; default: support_state = false; break; } return support_state; } /*====================================================================*/ /* Gamma Function Implementation*/ /*====================================================================*/ void viafb_set_gamma_table(int bpp, unsigned int *gamma_table) { int i, sr1a; int active_device_amount = 0; int device_status = viafb_DeviceStatus; for (i = 0; i < sizeof(viafb_DeviceStatus) * 8; i++) { if (device_status & 1) active_device_amount++; device_status >>= 1; } /* 8 bpp mode can't adjust gamma */ if (bpp == 8) return ; /* Enable Gamma */ switch (viaparinfo->chip_info->gfx_chip_name) { case UNICHROME_CLE266: case UNICHROME_K400: viafb_write_reg_mask(SR16, VIASR, 0x80, BIT7); break; case UNICHROME_K800: case UNICHROME_PM800: case UNICHROME_CN700: case UNICHROME_CX700: case UNICHROME_K8M890: case UNICHROME_P4M890: case UNICHROME_P4M900: viafb_write_reg_mask(CR33, VIACR, 0x80, BIT7); break; } sr1a = (unsigned int)viafb_read_reg(VIASR, SR1A); viafb_write_reg_mask(SR1A, VIASR, 0x0, BIT0); /* Fill IGA1 Gamma Table */ outb(0, LUT_INDEX_WRITE); for (i = 0; i < 256; i++) { outb(gamma_table[i] >> 16, LUT_DATA); outb(gamma_table[i] >> 8 & 0xFF, LUT_DATA); outb(gamma_table[i] & 0xFF, LUT_DATA); } /* If adjust Gamma value in SAMM, fill IGA1, IGA2 Gamma table simultaneous. */ /* Switch to IGA2 Gamma Table */ if ((active_device_amount > 1) && !((viaparinfo->chip_info->gfx_chip_name == UNICHROME_CLE266) && (viaparinfo->chip_info->gfx_chip_revision < 15))) { viafb_write_reg_mask(SR1A, VIASR, 0x01, BIT0); viafb_write_reg_mask(CR6A, VIACR, 0x02, BIT1); /* Fill IGA2 Gamma Table */ outb(0, LUT_INDEX_WRITE); for (i = 0; i < 256; i++) { outb(gamma_table[i] >> 16, LUT_DATA); outb(gamma_table[i] >> 8 & 0xFF, LUT_DATA); outb(gamma_table[i] & 0xFF, LUT_DATA); } } viafb_write_reg(SR1A, VIASR, sr1a); } void viafb_get_gamma_table(unsigned int *gamma_table) { unsigned char color_r, color_g, color_b; unsigned char sr1a = 0; int i; /* Enable Gamma */ switch (viaparinfo->chip_info->gfx_chip_name) { case UNICHROME_CLE266: case UNICHROME_K400: viafb_write_reg_mask(SR16, VIASR, 0x80, BIT7); break; case UNICHROME_K800: case UNICHROME_PM800: case UNICHROME_CN700: case UNICHROME_CX700: case UNICHROME_K8M890: case UNICHROME_P4M890: case UNICHROME_P4M900: viafb_write_reg_mask(CR33, VIACR, 0x80, BIT7); break; } sr1a = viafb_read_reg(VIASR, SR1A); viafb_write_reg_mask(SR1A, VIASR, 0x0, BIT0); /* Reading gamma table to get color value */ outb(0, LUT_INDEX_READ); for (i = 0; i < 256; i++) { color_r = inb(LUT_DATA); color_g = inb(LUT_DATA); color_b = inb(LUT_DATA); gamma_table[i] = ((((u32) color_r) << 16) | (((u16) color_g) << 8)) | color_b; } viafb_write_reg(SR1A, VIASR, sr1a); } void viafb_get_gamma_support_state(int bpp, unsigned int *support_state) { if (bpp == 8) *support_state = None_Device; else *support_state = CRT_Device | DVI_Device | LCD_Device; }
gpl-2.0
krizky82/Xperia-2011-Kernel-2.6.32.X
net/ipv6/raw.c
426
31438
/* * RAW sockets for IPv6 * Linux INET6 implementation * * Authors: * Pedro Roque <roque@di.fc.ul.pt> * * Adapted from linux/net/ipv4/raw.c * * Fixes: * Hideaki YOSHIFUJI : sin6_scope_id support * YOSHIFUJI,H.@USAGI : raw checksum (RFC2292(bis) compliance) * Kazunori MIYAZAWA @USAGI: change process style to use ip6_append_data * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/errno.h> #include <linux/types.h> #include <linux/socket.h> #include <linux/sockios.h> #include <linux/net.h> #include <linux/in6.h> #include <linux/netdevice.h> #include <linux/if_arp.h> #include <linux/icmpv6.h> #include <linux/netfilter.h> #include <linux/netfilter_ipv6.h> #include <linux/skbuff.h> #include <asm/uaccess.h> #include <asm/ioctls.h> #include <net/net_namespace.h> #include <net/ip.h> #include <net/sock.h> #include <net/snmp.h> #include <net/ipv6.h> #include <net/ndisc.h> #include <net/protocol.h> #include <net/ip6_route.h> #include <net/ip6_checksum.h> #include <net/addrconf.h> #include <net/transp_v6.h> #include <net/udp.h> #include <net/inet_common.h> #include <net/tcp_states.h> #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) #include <net/mip6.h> #endif #include <linux/mroute6.h> #include <net/raw.h> #include <net/rawv6.h> #include <net/xfrm.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> static struct raw_hashinfo raw_v6_hashinfo = { .lock = __RW_LOCK_UNLOCKED(raw_v6_hashinfo.lock), }; static struct sock *__raw_v6_lookup(struct net *net, struct sock *sk, unsigned short num, struct in6_addr *loc_addr, struct in6_addr *rmt_addr, int dif) { struct hlist_node *node; int is_multicast = ipv6_addr_is_multicast(loc_addr); sk_for_each_from(sk, node) if (inet_sk(sk)->num == num) { struct ipv6_pinfo *np = inet6_sk(sk); if (!net_eq(sock_net(sk), net)) continue; if (!ipv6_addr_any(&np->daddr) && !ipv6_addr_equal(&np->daddr, rmt_addr)) continue; if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif) continue; if (!ipv6_addr_any(&np->rcv_saddr)) { if (ipv6_addr_equal(&np->rcv_saddr, loc_addr)) goto found; if (is_multicast && inet6_mc_check(sk, loc_addr, rmt_addr)) goto found; continue; } goto found; } sk = NULL; found: return sk; } /* * 0 - deliver * 1 - block */ static __inline__ int icmpv6_filter(struct sock *sk, struct sk_buff *skb) { struct icmp6hdr *icmph; struct raw6_sock *rp = raw6_sk(sk); if (pskb_may_pull(skb, sizeof(struct icmp6hdr))) { __u32 *data = &rp->filter.data[0]; int bit_nr; icmph = (struct icmp6hdr *) skb->data; bit_nr = icmph->icmp6_type; return (data[bit_nr >> 5] & (1 << (bit_nr & 31))) != 0; } return 0; } #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) static int (*mh_filter)(struct sock *sock, struct sk_buff *skb); int rawv6_mh_filter_register(int (*filter)(struct sock *sock, struct sk_buff *skb)) { rcu_assign_pointer(mh_filter, filter); return 0; } EXPORT_SYMBOL(rawv6_mh_filter_register); int rawv6_mh_filter_unregister(int (*filter)(struct sock *sock, struct sk_buff *skb)) { rcu_assign_pointer(mh_filter, NULL); synchronize_rcu(); return 0; } EXPORT_SYMBOL(rawv6_mh_filter_unregister); #endif /* * demultiplex raw sockets. * (should consider queueing the skb in the sock receive_queue * without calling rawv6.c) * * Caller owns SKB so we must make clones. */ static int ipv6_raw_deliver(struct sk_buff *skb, int nexthdr) { struct in6_addr *saddr; struct in6_addr *daddr; struct sock *sk; int delivered = 0; __u8 hash; struct net *net; saddr = &ipv6_hdr(skb)->saddr; daddr = saddr + 1; hash = nexthdr & (MAX_INET_PROTOS - 1); read_lock(&raw_v6_hashinfo.lock); sk = sk_head(&raw_v6_hashinfo.ht[hash]); if (sk == NULL) goto out; net = dev_net(skb->dev); sk = __raw_v6_lookup(net, sk, nexthdr, daddr, saddr, IP6CB(skb)->iif); while (sk) { int filtered; delivered = 1; switch (nexthdr) { case IPPROTO_ICMPV6: filtered = icmpv6_filter(sk, skb); break; #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) case IPPROTO_MH: { /* XXX: To validate MH only once for each packet, * this is placed here. It should be after checking * xfrm policy, however it doesn't. The checking xfrm * policy is placed in rawv6_rcv() because it is * required for each socket. */ int (*filter)(struct sock *sock, struct sk_buff *skb); filter = rcu_dereference(mh_filter); filtered = filter ? filter(sk, skb) : 0; break; } #endif default: filtered = 0; break; } if (filtered < 0) break; if (filtered == 0) { struct sk_buff *clone = skb_clone(skb, GFP_ATOMIC); /* Not releasing hash table! */ if (clone) { nf_reset(clone); rawv6_rcv(sk, clone); } } sk = __raw_v6_lookup(net, sk_next(sk), nexthdr, daddr, saddr, IP6CB(skb)->iif); } out: read_unlock(&raw_v6_hashinfo.lock); return delivered; } int raw6_local_deliver(struct sk_buff *skb, int nexthdr) { struct sock *raw_sk; raw_sk = sk_head(&raw_v6_hashinfo.ht[nexthdr & (MAX_INET_PROTOS - 1)]); if (raw_sk && !ipv6_raw_deliver(skb, nexthdr)) raw_sk = NULL; return raw_sk != NULL; } /* This cleans up af_inet6 a bit. -DaveM */ static int rawv6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len) { struct inet_sock *inet = inet_sk(sk); struct ipv6_pinfo *np = inet6_sk(sk); struct sockaddr_in6 *addr = (struct sockaddr_in6 *) uaddr; __be32 v4addr = 0; int addr_type; int err; if (addr_len < SIN6_LEN_RFC2133) return -EINVAL; addr_type = ipv6_addr_type(&addr->sin6_addr); /* Raw sockets are IPv6 only */ if (addr_type == IPV6_ADDR_MAPPED) return(-EADDRNOTAVAIL); lock_sock(sk); err = -EINVAL; if (sk->sk_state != TCP_CLOSE) goto out; /* Check if the address belongs to the host. */ if (addr_type != IPV6_ADDR_ANY) { struct net_device *dev = NULL; if (addr_type & IPV6_ADDR_LINKLOCAL) { if (addr_len >= sizeof(struct sockaddr_in6) && addr->sin6_scope_id) { /* Override any existing binding, if another * one is supplied by user. */ sk->sk_bound_dev_if = addr->sin6_scope_id; } /* Binding to link-local address requires an interface */ if (!sk->sk_bound_dev_if) goto out; dev = dev_get_by_index(sock_net(sk), sk->sk_bound_dev_if); if (!dev) { err = -ENODEV; goto out; } } /* ipv4 addr of the socket is invalid. Only the * unspecified and mapped address have a v4 equivalent. */ v4addr = LOOPBACK4_IPV6; if (!(addr_type & IPV6_ADDR_MULTICAST)) { err = -EADDRNOTAVAIL; if (!ipv6_chk_addr(sock_net(sk), &addr->sin6_addr, dev, 0)) { if (dev) dev_put(dev); goto out; } } if (dev) dev_put(dev); } inet->rcv_saddr = inet->saddr = v4addr; ipv6_addr_copy(&np->rcv_saddr, &addr->sin6_addr); if (!(addr_type & IPV6_ADDR_MULTICAST)) ipv6_addr_copy(&np->saddr, &addr->sin6_addr); err = 0; out: release_sock(sk); return err; } static void rawv6_err(struct sock *sk, struct sk_buff *skb, struct inet6_skb_parm *opt, u8 type, u8 code, int offset, __be32 info) { struct inet_sock *inet = inet_sk(sk); struct ipv6_pinfo *np = inet6_sk(sk); int err; int harderr; /* Report error on raw socket, if: 1. User requested recverr. 2. Socket is connected (otherwise the error indication is useless without recverr and error is hard. */ if (!np->recverr && sk->sk_state != TCP_ESTABLISHED) return; harderr = icmpv6_err_convert(type, code, &err); if (type == ICMPV6_PKT_TOOBIG) harderr = (np->pmtudisc == IPV6_PMTUDISC_DO); if (np->recverr) { u8 *payload = skb->data; if (!inet->hdrincl) payload += offset; ipv6_icmp_error(sk, skb, err, 0, ntohl(info), payload); } if (np->recverr || harderr) { sk->sk_err = err; sk->sk_error_report(sk); } } void raw6_icmp_error(struct sk_buff *skb, int nexthdr, u8 type, u8 code, int inner_offset, __be32 info) { struct sock *sk; int hash; struct in6_addr *saddr, *daddr; struct net *net; hash = nexthdr & (RAW_HTABLE_SIZE - 1); read_lock(&raw_v6_hashinfo.lock); sk = sk_head(&raw_v6_hashinfo.ht[hash]); if (sk != NULL) { /* Note: ipv6_hdr(skb) != skb->data */ struct ipv6hdr *ip6h = (struct ipv6hdr *)skb->data; saddr = &ip6h->saddr; daddr = &ip6h->daddr; net = dev_net(skb->dev); while ((sk = __raw_v6_lookup(net, sk, nexthdr, saddr, daddr, IP6CB(skb)->iif))) { rawv6_err(sk, skb, NULL, type, code, inner_offset, info); sk = sk_next(sk); } } read_unlock(&raw_v6_hashinfo.lock); } static inline int rawv6_rcv_skb(struct sock * sk, struct sk_buff * skb) { if ((raw6_sk(sk)->checksum || sk->sk_filter) && skb_checksum_complete(skb)) { atomic_inc(&sk->sk_drops); kfree_skb(skb); return NET_RX_DROP; } /* Charge it to the socket. */ if (sock_queue_rcv_skb(sk,skb)<0) { atomic_inc(&sk->sk_drops); kfree_skb(skb); return NET_RX_DROP; } return 0; } /* * This is next to useless... * if we demultiplex in network layer we don't need the extra call * just to queue the skb... * maybe we could have the network decide upon a hint if it * should call raw_rcv for demultiplexing */ int rawv6_rcv(struct sock *sk, struct sk_buff *skb) { struct inet_sock *inet = inet_sk(sk); struct raw6_sock *rp = raw6_sk(sk); if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) { atomic_inc(&sk->sk_drops); kfree_skb(skb); return NET_RX_DROP; } if (!rp->checksum) skb->ip_summed = CHECKSUM_UNNECESSARY; if (skb->ip_summed == CHECKSUM_COMPLETE) { skb_postpull_rcsum(skb, skb_network_header(skb), skb_network_header_len(skb)); if (!csum_ipv6_magic(&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr, skb->len, inet->num, skb->csum)) skb->ip_summed = CHECKSUM_UNNECESSARY; } if (!skb_csum_unnecessary(skb)) skb->csum = ~csum_unfold(csum_ipv6_magic(&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr, skb->len, inet->num, 0)); if (inet->hdrincl) { if (skb_checksum_complete(skb)) { atomic_inc(&sk->sk_drops); kfree_skb(skb); return NET_RX_DROP; } } rawv6_rcv_skb(sk, skb); return 0; } /* * This should be easy, if there is something there * we return it, otherwise we block. */ static int rawv6_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, size_t len, int noblock, int flags, int *addr_len) { struct ipv6_pinfo *np = inet6_sk(sk); struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)msg->msg_name; struct sk_buff *skb; size_t copied; int err; if (flags & MSG_OOB) return -EOPNOTSUPP; if (addr_len) *addr_len=sizeof(*sin6); if (flags & MSG_ERRQUEUE) return ipv6_recv_error(sk, msg, len); skb = skb_recv_datagram(sk, flags, noblock, &err); if (!skb) goto out; copied = skb->len; if (copied > len) { copied = len; msg->msg_flags |= MSG_TRUNC; } if (skb_csum_unnecessary(skb)) { err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); } else if (msg->msg_flags&MSG_TRUNC) { if (__skb_checksum_complete(skb)) goto csum_copy_err; err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); } else { err = skb_copy_and_csum_datagram_iovec(skb, 0, msg->msg_iov); if (err == -EINVAL) goto csum_copy_err; } if (err) goto out_free; /* Copy the address. */ if (sin6) { sin6->sin6_family = AF_INET6; sin6->sin6_port = 0; ipv6_addr_copy(&sin6->sin6_addr, &ipv6_hdr(skb)->saddr); sin6->sin6_flowinfo = 0; sin6->sin6_scope_id = 0; if (ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL) sin6->sin6_scope_id = IP6CB(skb)->iif; } sock_recv_timestamp(msg, sk, skb); if (np->rxopt.all) datagram_recv_ctl(sk, msg, skb); err = copied; if (flags & MSG_TRUNC) err = skb->len; out_free: skb_free_datagram(sk, skb); out: return err; csum_copy_err: skb_kill_datagram(sk, skb, flags); /* Error for blocking case is chosen to masquerade as some normal condition. */ err = (flags&MSG_DONTWAIT) ? -EAGAIN : -EHOSTUNREACH; atomic_inc(&sk->sk_drops); goto out; } static int rawv6_push_pending_frames(struct sock *sk, struct flowi *fl, struct raw6_sock *rp) { struct sk_buff *skb; int err = 0; int offset; int len; int total_len; __wsum tmp_csum; __sum16 csum; if (!rp->checksum) goto send; if ((skb = skb_peek(&sk->sk_write_queue)) == NULL) goto out; offset = rp->offset; total_len = inet_sk(sk)->cork.length - (skb_network_header(skb) - skb->data); if (offset >= total_len - 1) { err = -EINVAL; ip6_flush_pending_frames(sk); goto out; } /* should be check HW csum miyazawa */ if (skb_queue_len(&sk->sk_write_queue) == 1) { /* * Only one fragment on the socket. */ tmp_csum = skb->csum; } else { struct sk_buff *csum_skb = NULL; tmp_csum = 0; skb_queue_walk(&sk->sk_write_queue, skb) { tmp_csum = csum_add(tmp_csum, skb->csum); if (csum_skb) continue; len = skb->len - skb_transport_offset(skb); if (offset >= len) { offset -= len; continue; } csum_skb = skb; } skb = csum_skb; } offset += skb_transport_offset(skb); if (skb_copy_bits(skb, offset, &csum, 2)) BUG(); /* in case cksum was not initialized */ if (unlikely(csum)) tmp_csum = csum_sub(tmp_csum, csum_unfold(csum)); csum = csum_ipv6_magic(&fl->fl6_src, &fl->fl6_dst, total_len, fl->proto, tmp_csum); if (csum == 0 && fl->proto == IPPROTO_UDP) csum = CSUM_MANGLED_0; if (skb_store_bits(skb, offset, &csum, 2)) BUG(); send: err = ip6_push_pending_frames(sk); out: return err; } static int rawv6_send_hdrinc(struct sock *sk, void *from, int length, struct flowi *fl, struct rt6_info *rt, unsigned int flags) { struct ipv6_pinfo *np = inet6_sk(sk); struct ipv6hdr *iph; struct sk_buff *skb; int err; if (length > rt->u.dst.dev->mtu) { ipv6_local_error(sk, EMSGSIZE, fl, rt->u.dst.dev->mtu); return -EMSGSIZE; } if (flags&MSG_PROBE) goto out; skb = sock_alloc_send_skb(sk, length + LL_ALLOCATED_SPACE(rt->u.dst.dev) + 15, flags & MSG_DONTWAIT, &err); if (skb == NULL) goto error; skb_reserve(skb, LL_RESERVED_SPACE(rt->u.dst.dev)); skb->priority = sk->sk_priority; skb->mark = sk->sk_mark; skb_dst_set(skb, dst_clone(&rt->u.dst)); skb_put(skb, length); skb_reset_network_header(skb); iph = ipv6_hdr(skb); skb->ip_summed = CHECKSUM_NONE; skb->transport_header = skb->network_header; err = memcpy_fromiovecend((void *)iph, from, 0, length); if (err) goto error_fault; IP6_UPD_PO_STATS(sock_net(sk), rt->rt6i_idev, IPSTATS_MIB_OUT, skb->len); err = NF_HOOK(PF_INET6, NF_INET_LOCAL_OUT, skb, NULL, rt->u.dst.dev, dst_output); if (err > 0) err = net_xmit_errno(err); if (err) goto error; out: return 0; error_fault: err = -EFAULT; kfree_skb(skb); error: IP6_INC_STATS(sock_net(sk), rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS); if (err == -ENOBUFS && !np->recverr) err = 0; return err; } static int rawv6_probe_proto_opt(struct flowi *fl, struct msghdr *msg) { struct iovec *iov; u8 __user *type = NULL; u8 __user *code = NULL; u8 len = 0; int probed = 0; int i; if (!msg->msg_iov) return 0; for (i = 0; i < msg->msg_iovlen; i++) { iov = &msg->msg_iov[i]; if (!iov) continue; switch (fl->proto) { case IPPROTO_ICMPV6: /* check if one-byte field is readable or not. */ if (iov->iov_base && iov->iov_len < 1) break; if (!type) { type = iov->iov_base; /* check if code field is readable or not. */ if (iov->iov_len > 1) code = type + 1; } else if (!code) code = iov->iov_base; if (type && code) { if (get_user(fl->fl_icmp_type, type) || get_user(fl->fl_icmp_code, code)) return -EFAULT; probed = 1; } break; case IPPROTO_MH: if (iov->iov_base && iov->iov_len < 1) break; /* check if type field is readable or not. */ if (iov->iov_len > 2 - len) { u8 __user *p = iov->iov_base; if (get_user(fl->fl_mh_type, &p[2 - len])) return -EFAULT; probed = 1; } else len += iov->iov_len; break; default: probed = 1; break; } if (probed) break; } return 0; } static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, size_t len) { struct ipv6_txoptions opt_space; struct sockaddr_in6 * sin6 = (struct sockaddr_in6 *) msg->msg_name; struct in6_addr *daddr, *final_p = NULL, final; struct inet_sock *inet = inet_sk(sk); struct ipv6_pinfo *np = inet6_sk(sk); struct raw6_sock *rp = raw6_sk(sk); struct ipv6_txoptions *opt = NULL; struct ip6_flowlabel *flowlabel = NULL; struct dst_entry *dst = NULL; struct flowi fl; int addr_len = msg->msg_namelen; int hlimit = -1; int tclass = -1; u16 proto; int err; /* Rough check on arithmetic overflow, better check is made in ip6_append_data(). */ if (len > INT_MAX) return -EMSGSIZE; /* Mirror BSD error message compatibility */ if (msg->msg_flags & MSG_OOB) return -EOPNOTSUPP; /* * Get and verify the address. */ memset(&fl, 0, sizeof(fl)); fl.mark = sk->sk_mark; if (sin6) { if (addr_len < SIN6_LEN_RFC2133) return -EINVAL; if (sin6->sin6_family && sin6->sin6_family != AF_INET6) return(-EAFNOSUPPORT); /* port is the proto value [0..255] carried in nexthdr */ proto = ntohs(sin6->sin6_port); if (!proto) proto = inet->num; else if (proto != inet->num) return(-EINVAL); if (proto > 255) return(-EINVAL); daddr = &sin6->sin6_addr; if (np->sndflow) { fl.fl6_flowlabel = sin6->sin6_flowinfo&IPV6_FLOWINFO_MASK; if (fl.fl6_flowlabel&IPV6_FLOWLABEL_MASK) { flowlabel = fl6_sock_lookup(sk, fl.fl6_flowlabel); if (flowlabel == NULL) return -EINVAL; daddr = &flowlabel->dst; } } /* * Otherwise it will be difficult to maintain * sk->sk_dst_cache. */ if (sk->sk_state == TCP_ESTABLISHED && ipv6_addr_equal(daddr, &np->daddr)) daddr = &np->daddr; if (addr_len >= sizeof(struct sockaddr_in6) && sin6->sin6_scope_id && ipv6_addr_type(daddr)&IPV6_ADDR_LINKLOCAL) fl.oif = sin6->sin6_scope_id; } else { if (sk->sk_state != TCP_ESTABLISHED) return -EDESTADDRREQ; proto = inet->num; daddr = &np->daddr; fl.fl6_flowlabel = np->flow_label; } if (fl.oif == 0) fl.oif = sk->sk_bound_dev_if; if (msg->msg_controllen) { opt = &opt_space; memset(opt, 0, sizeof(struct ipv6_txoptions)); opt->tot_len = sizeof(struct ipv6_txoptions); err = datagram_send_ctl(sock_net(sk), msg, &fl, opt, &hlimit, &tclass); if (err < 0) { fl6_sock_release(flowlabel); return err; } if ((fl.fl6_flowlabel&IPV6_FLOWLABEL_MASK) && !flowlabel) { flowlabel = fl6_sock_lookup(sk, fl.fl6_flowlabel); if (flowlabel == NULL) return -EINVAL; } if (!(opt->opt_nflen|opt->opt_flen)) opt = NULL; } if (opt == NULL) opt = np->opt; if (flowlabel) opt = fl6_merge_options(&opt_space, flowlabel, opt); opt = ipv6_fixup_options(&opt_space, opt); fl.proto = proto; err = rawv6_probe_proto_opt(&fl, msg); if (err) goto out; if (!ipv6_addr_any(daddr)) ipv6_addr_copy(&fl.fl6_dst, daddr); else fl.fl6_dst.s6_addr[15] = 0x1; /* :: means loopback (BSD'ism) */ if (ipv6_addr_any(&fl.fl6_src) && !ipv6_addr_any(&np->saddr)) ipv6_addr_copy(&fl.fl6_src, &np->saddr); /* merge ip6_build_xmit from ip6_output */ if (opt && opt->srcrt) { struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt; ipv6_addr_copy(&final, &fl.fl6_dst); ipv6_addr_copy(&fl.fl6_dst, rt0->addr); final_p = &final; } if (!fl.oif && ipv6_addr_is_multicast(&fl.fl6_dst)) fl.oif = np->mcast_oif; security_sk_classify_flow(sk, &fl); err = ip6_dst_lookup(sk, &dst, &fl); if (err) goto out; if (final_p) ipv6_addr_copy(&fl.fl6_dst, final_p); err = __xfrm_lookup(sock_net(sk), &dst, &fl, sk, XFRM_LOOKUP_WAIT); if (err < 0) { if (err == -EREMOTE) err = ip6_dst_blackhole(sk, &dst, &fl); if (err < 0) goto out; } if (hlimit < 0) { if (ipv6_addr_is_multicast(&fl.fl6_dst)) hlimit = np->mcast_hops; else hlimit = np->hop_limit; if (hlimit < 0) hlimit = ip6_dst_hoplimit(dst); } if (tclass < 0) tclass = np->tclass; if (msg->msg_flags&MSG_CONFIRM) goto do_confirm; back_from_confirm: if (inet->hdrincl) { err = rawv6_send_hdrinc(sk, msg->msg_iov, len, &fl, (struct rt6_info*)dst, msg->msg_flags); } else { lock_sock(sk); err = ip6_append_data(sk, ip_generic_getfrag, msg->msg_iov, len, 0, hlimit, tclass, opt, &fl, (struct rt6_info*)dst, msg->msg_flags); if (err) ip6_flush_pending_frames(sk); else if (!(msg->msg_flags & MSG_MORE)) err = rawv6_push_pending_frames(sk, &fl, rp); release_sock(sk); } done: dst_release(dst); out: fl6_sock_release(flowlabel); return err<0?err:len; do_confirm: dst_confirm(dst); if (!(msg->msg_flags & MSG_PROBE) || len) goto back_from_confirm; err = 0; goto done; } static int rawv6_seticmpfilter(struct sock *sk, int level, int optname, char __user *optval, int optlen) { switch (optname) { case ICMPV6_FILTER: if (optlen > sizeof(struct icmp6_filter)) optlen = sizeof(struct icmp6_filter); if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen)) return -EFAULT; return 0; default: return -ENOPROTOOPT; } return 0; } static int rawv6_geticmpfilter(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen) { int len; switch (optname) { case ICMPV6_FILTER: if (get_user(len, optlen)) return -EFAULT; if (len < 0) return -EINVAL; if (len > sizeof(struct icmp6_filter)) len = sizeof(struct icmp6_filter); if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &raw6_sk(sk)->filter, len)) return -EFAULT; return 0; default: return -ENOPROTOOPT; } return 0; } static int do_rawv6_setsockopt(struct sock *sk, int level, int optname, char __user *optval, unsigned int optlen) { struct raw6_sock *rp = raw6_sk(sk); int val; if (get_user(val, (int __user *)optval)) return -EFAULT; switch (optname) { case IPV6_CHECKSUM: if (inet_sk(sk)->num == IPPROTO_ICMPV6 && level == IPPROTO_IPV6) { /* * RFC3542 tells that IPV6_CHECKSUM socket * option in the IPPROTO_IPV6 level is not * allowed on ICMPv6 sockets. * If you want to set it, use IPPROTO_RAW * level IPV6_CHECKSUM socket option * (Linux extension). */ return -EINVAL; } /* You may get strange result with a positive odd offset; RFC2292bis agrees with me. */ if (val > 0 && (val&1)) return(-EINVAL); if (val < 0) { rp->checksum = 0; } else { rp->checksum = 1; rp->offset = val; } return 0; break; default: return(-ENOPROTOOPT); } } static int rawv6_setsockopt(struct sock *sk, int level, int optname, char __user *optval, unsigned int optlen) { switch(level) { case SOL_RAW: break; case SOL_ICMPV6: if (inet_sk(sk)->num != IPPROTO_ICMPV6) return -EOPNOTSUPP; return rawv6_seticmpfilter(sk, level, optname, optval, optlen); case SOL_IPV6: if (optname == IPV6_CHECKSUM) break; default: return ipv6_setsockopt(sk, level, optname, optval, optlen); } return do_rawv6_setsockopt(sk, level, optname, optval, optlen); } #ifdef CONFIG_COMPAT static int compat_rawv6_setsockopt(struct sock *sk, int level, int optname, char __user *optval, unsigned int optlen) { switch (level) { case SOL_RAW: break; case SOL_ICMPV6: if (inet_sk(sk)->num != IPPROTO_ICMPV6) return -EOPNOTSUPP; return rawv6_seticmpfilter(sk, level, optname, optval, optlen); case SOL_IPV6: if (optname == IPV6_CHECKSUM) break; default: return compat_ipv6_setsockopt(sk, level, optname, optval, optlen); } return do_rawv6_setsockopt(sk, level, optname, optval, optlen); } #endif static int do_rawv6_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen) { struct raw6_sock *rp = raw6_sk(sk); int val, len; if (get_user(len,optlen)) return -EFAULT; switch (optname) { case IPV6_CHECKSUM: /* * We allow getsockopt() for IPPROTO_IPV6-level * IPV6_CHECKSUM socket option on ICMPv6 sockets * since RFC3542 is silent about it. */ if (rp->checksum == 0) val = -1; else val = rp->offset; break; default: return -ENOPROTOOPT; } len = min_t(unsigned int, sizeof(int), len); if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval,&val,len)) return -EFAULT; return 0; } static int rawv6_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen) { switch(level) { case SOL_RAW: break; case SOL_ICMPV6: if (inet_sk(sk)->num != IPPROTO_ICMPV6) return -EOPNOTSUPP; return rawv6_geticmpfilter(sk, level, optname, optval, optlen); case SOL_IPV6: if (optname == IPV6_CHECKSUM) break; default: return ipv6_getsockopt(sk, level, optname, optval, optlen); } return do_rawv6_getsockopt(sk, level, optname, optval, optlen); } #ifdef CONFIG_COMPAT static int compat_rawv6_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen) { switch (level) { case SOL_RAW: break; case SOL_ICMPV6: if (inet_sk(sk)->num != IPPROTO_ICMPV6) return -EOPNOTSUPP; return rawv6_geticmpfilter(sk, level, optname, optval, optlen); case SOL_IPV6: if (optname == IPV6_CHECKSUM) break; default: return compat_ipv6_getsockopt(sk, level, optname, optval, optlen); } return do_rawv6_getsockopt(sk, level, optname, optval, optlen); } #endif static int rawv6_ioctl(struct sock *sk, int cmd, unsigned long arg) { switch(cmd) { case SIOCOUTQ: { int amount = sk_wmem_alloc_get(sk); return put_user(amount, (int __user *)arg); } case SIOCINQ: { struct sk_buff *skb; int amount = 0; spin_lock_bh(&sk->sk_receive_queue.lock); skb = skb_peek(&sk->sk_receive_queue); if (skb != NULL) amount = skb->tail - skb->transport_header; spin_unlock_bh(&sk->sk_receive_queue.lock); return put_user(amount, (int __user *)arg); } default: #ifdef CONFIG_IPV6_MROUTE return ip6mr_ioctl(sk, cmd, (void __user *)arg); #else return -ENOIOCTLCMD; #endif } } static void rawv6_close(struct sock *sk, long timeout) { if (inet_sk(sk)->num == IPPROTO_RAW) ip6_ra_control(sk, -1); ip6mr_sk_done(sk); sk_common_release(sk); } static void raw6_destroy(struct sock *sk) { lock_sock(sk); ip6_flush_pending_frames(sk); release_sock(sk); inet6_destroy_sock(sk); } static int rawv6_init_sk(struct sock *sk) { struct raw6_sock *rp = raw6_sk(sk); switch (inet_sk(sk)->num) { case IPPROTO_ICMPV6: rp->checksum = 1; rp->offset = 2; break; case IPPROTO_MH: rp->checksum = 1; rp->offset = 4; break; default: break; } return(0); } struct proto rawv6_prot = { .name = "RAWv6", .owner = THIS_MODULE, .close = rawv6_close, .destroy = raw6_destroy, .connect = ip6_datagram_connect, .disconnect = udp_disconnect, .ioctl = rawv6_ioctl, .init = rawv6_init_sk, .setsockopt = rawv6_setsockopt, .getsockopt = rawv6_getsockopt, .sendmsg = rawv6_sendmsg, .recvmsg = rawv6_recvmsg, .bind = rawv6_bind, .backlog_rcv = rawv6_rcv_skb, .hash = raw_hash_sk, .unhash = raw_unhash_sk, .obj_size = sizeof(struct raw6_sock), .h.raw_hash = &raw_v6_hashinfo, #ifdef CONFIG_COMPAT .compat_setsockopt = compat_rawv6_setsockopt, .compat_getsockopt = compat_rawv6_getsockopt, #endif }; #ifdef CONFIG_PROC_FS static void raw6_sock_seq_show(struct seq_file *seq, struct sock *sp, int i) { struct ipv6_pinfo *np = inet6_sk(sp); struct in6_addr *dest, *src; __u16 destp, srcp; dest = &np->daddr; src = &np->rcv_saddr; destp = 0; srcp = inet_sk(sp)->num; seq_printf(seq, "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X " "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %d\n", i, src->s6_addr32[0], src->s6_addr32[1], src->s6_addr32[2], src->s6_addr32[3], srcp, dest->s6_addr32[0], dest->s6_addr32[1], dest->s6_addr32[2], dest->s6_addr32[3], destp, sp->sk_state, sk_wmem_alloc_get(sp), sk_rmem_alloc_get(sp), 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp), atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops)); } static int raw6_seq_show(struct seq_file *seq, void *v) { if (v == SEQ_START_TOKEN) seq_printf(seq, " sl " "local_address " "remote_address " "st tx_queue rx_queue tr tm->when retrnsmt" " uid timeout inode ref pointer drops\n"); else raw6_sock_seq_show(seq, v, raw_seq_private(seq)->bucket); return 0; } static const struct seq_operations raw6_seq_ops = { .start = raw_seq_start, .next = raw_seq_next, .stop = raw_seq_stop, .show = raw6_seq_show, }; static int raw6_seq_open(struct inode *inode, struct file *file) { return raw_seq_open(inode, file, &raw_v6_hashinfo, &raw6_seq_ops); } static const struct file_operations raw6_seq_fops = { .owner = THIS_MODULE, .open = raw6_seq_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release_net, }; static int raw6_init_net(struct net *net) { if (!proc_net_fops_create(net, "raw6", S_IRUGO, &raw6_seq_fops)) return -ENOMEM; return 0; } static void raw6_exit_net(struct net *net) { proc_net_remove(net, "raw6"); } static struct pernet_operations raw6_net_ops = { .init = raw6_init_net, .exit = raw6_exit_net, }; int __init raw6_proc_init(void) { return register_pernet_subsys(&raw6_net_ops); } void raw6_proc_exit(void) { unregister_pernet_subsys(&raw6_net_ops); } #endif /* CONFIG_PROC_FS */ /* Same as inet6_dgram_ops, sans udp_poll. */ static const struct proto_ops inet6_sockraw_ops = { .family = PF_INET6, .owner = THIS_MODULE, .release = inet6_release, .bind = inet6_bind, .connect = inet_dgram_connect, /* ok */ .socketpair = sock_no_socketpair, /* a do nothing */ .accept = sock_no_accept, /* a do nothing */ .getname = inet6_getname, .poll = datagram_poll, /* ok */ .ioctl = inet6_ioctl, /* must change */ .listen = sock_no_listen, /* ok */ .shutdown = inet_shutdown, /* ok */ .setsockopt = sock_common_setsockopt, /* ok */ .getsockopt = sock_common_getsockopt, /* ok */ .sendmsg = inet_sendmsg, /* ok */ .recvmsg = sock_common_recvmsg, /* ok */ .mmap = sock_no_mmap, .sendpage = sock_no_sendpage, #ifdef CONFIG_COMPAT .compat_setsockopt = compat_sock_common_setsockopt, .compat_getsockopt = compat_sock_common_getsockopt, #endif }; static struct inet_protosw rawv6_protosw = { .type = SOCK_RAW, .protocol = IPPROTO_IP, /* wild card */ .prot = &rawv6_prot, .ops = &inet6_sockraw_ops, .capability = CAP_NET_RAW, .no_check = UDP_CSUM_DEFAULT, .flags = INET_PROTOSW_REUSE, }; int __init rawv6_init(void) { int ret; ret = inet6_register_protosw(&rawv6_protosw); if (ret) goto out; out: return ret; } void rawv6_exit(void) { inet6_unregister_protosw(&rawv6_protosw); }
gpl-2.0
SeKwonLee/pmfs
arch/arm/mach-omap2/am33xx-restart.c
938
1053
/* * am33xx-restart.c - Code common to all AM33xx machines. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/reboot.h> #include "common.h" #include "prm-regbits-33xx.h" #include "prm33xx.h" /** * am3xx_restart - trigger a software restart of the SoC * @mode: the "reboot mode", see arch/arm/kernel/{setup,process}.c * @cmd: passed from the userspace program rebooting the system (if provided) * * Resets the SoC. For @cmd, see the 'reboot' syscall in * kernel/sys.c. No return value. */ void am33xx_restart(enum reboot_mode mode, const char *cmd) { /* TODO: Handle mode and cmd if necessary */ am33xx_prm_rmw_reg_bits(AM33XX_GLOBAL_WARM_SW_RST_MASK, AM33XX_GLOBAL_WARM_SW_RST_MASK, AM33XX_PRM_DEVICE_MOD, AM33XX_PRM_RSTCTRL_OFFSET); /* OCP barrier */ (void)am33xx_prm_read_reg(AM33XX_PRM_DEVICE_MOD, AM33XX_PRM_RSTCTRL_OFFSET); }
gpl-2.0
jasonzhong/linux
drivers/usb/host/ehci-pci.c
938
12594
/* * EHCI HCD (Host Controller Driver) PCI Bus Glue. * * Copyright (c) 2000-2004 by David Brownell * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/usb.h> #include <linux/usb/hcd.h> #include "ehci.h" #include "pci-quirks.h" #define DRIVER_DESC "EHCI PCI platform driver" static const char hcd_name[] = "ehci-pci"; /* defined here to avoid adding to pci_ids.h for single instance use */ #define PCI_DEVICE_ID_INTEL_CE4100_USB 0x2e70 /*-------------------------------------------------------------------------*/ #define PCI_DEVICE_ID_INTEL_QUARK_X1000_SOC 0x0939 static inline bool is_intel_quark_x1000(struct pci_dev *pdev) { return pdev->vendor == PCI_VENDOR_ID_INTEL && pdev->device == PCI_DEVICE_ID_INTEL_QUARK_X1000_SOC; } /* * This is the list of PCI IDs for the devices that have EHCI USB class and * specific drivers for that. One of the example is a ChipIdea device installed * on some Intel MID platforms. */ static const struct pci_device_id bypass_pci_id_table[] = { /* ChipIdea on Intel MID platform */ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0811), }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0829), }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe006), }, {} }; static inline bool is_bypassed_id(struct pci_dev *pdev) { return !!pci_match_id(bypass_pci_id_table, pdev); } /* * 0x84 is the offset of in/out threshold register, * and it is the same offset as the register of 'hostpc'. */ #define intel_quark_x1000_insnreg01 hostpc /* Maximum usable threshold value is 0x7f dwords for both IN and OUT */ #define INTEL_QUARK_X1000_EHCI_MAX_THRESHOLD 0x007f007f /* called after powerup, by probe or system-pm "wakeup" */ static int ehci_pci_reinit(struct ehci_hcd *ehci, struct pci_dev *pdev) { int retval; /* we expect static quirk code to handle the "extended capabilities" * (currently just BIOS handoff) allowed starting with EHCI 0.96 */ /* PCI Memory-Write-Invalidate cycle support is optional (uncommon) */ retval = pci_set_mwi(pdev); if (!retval) ehci_dbg(ehci, "MWI active\n"); /* Reset the threshold limit */ if (is_intel_quark_x1000(pdev)) { /* * For the Intel QUARK X1000, raise the I/O threshold to the * maximum usable value in order to improve performance. */ ehci_writel(ehci, INTEL_QUARK_X1000_EHCI_MAX_THRESHOLD, ehci->regs->intel_quark_x1000_insnreg01); } return 0; } /* called during probe() after chip reset completes */ static int ehci_pci_setup(struct usb_hcd *hcd) { struct ehci_hcd *ehci = hcd_to_ehci(hcd); struct pci_dev *pdev = to_pci_dev(hcd->self.controller); u32 temp; int retval; ehci->caps = hcd->regs; /* * ehci_init() causes memory for DMA transfers to be * allocated. Thus, any vendor-specific workarounds based on * limiting the type of memory used for DMA transfers must * happen before ehci_setup() is called. * * Most other workarounds can be done either before or after * init and reset; they are located here too. */ switch (pdev->vendor) { case PCI_VENDOR_ID_TOSHIBA_2: /* celleb's companion chip */ if (pdev->device == 0x01b5) { #ifdef CONFIG_USB_EHCI_BIG_ENDIAN_MMIO ehci->big_endian_mmio = 1; #else ehci_warn(ehci, "unsupported big endian Toshiba quirk\n"); #endif } break; case PCI_VENDOR_ID_NVIDIA: /* NVidia reports that certain chips don't handle * QH, ITD, or SITD addresses above 2GB. (But TD, * data buffer, and periodic schedule are normal.) */ switch (pdev->device) { case 0x003c: /* MCP04 */ case 0x005b: /* CK804 */ case 0x00d8: /* CK8 */ case 0x00e8: /* CK8S */ if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(31)) < 0) ehci_warn(ehci, "can't enable NVidia " "workaround for >2GB RAM\n"); break; /* Some NForce2 chips have problems with selective suspend; * fixed in newer silicon. */ case 0x0068: if (pdev->revision < 0xa4) ehci->no_selective_suspend = 1; break; } break; case PCI_VENDOR_ID_INTEL: if (pdev->device == PCI_DEVICE_ID_INTEL_CE4100_USB) hcd->has_tt = 1; break; case PCI_VENDOR_ID_TDI: if (pdev->device == PCI_DEVICE_ID_TDI_EHCI) hcd->has_tt = 1; break; case PCI_VENDOR_ID_AMD: /* AMD PLL quirk */ if (usb_amd_find_chipset_info()) ehci->amd_pll_fix = 1; /* AMD8111 EHCI doesn't work, according to AMD errata */ if (pdev->device == 0x7463) { ehci_info(ehci, "ignoring AMD8111 (errata)\n"); retval = -EIO; goto done; } /* * EHCI controller on AMD SB700/SB800/Hudson-2/3 platforms may * read/write memory space which does not belong to it when * there is NULL pointer with T-bit set to 1 in the frame list * table. To avoid the issue, the frame list link pointer * should always contain a valid pointer to a inactive qh. */ if (pdev->device == 0x7808) { ehci->use_dummy_qh = 1; ehci_info(ehci, "applying AMD SB700/SB800/Hudson-2/3 EHCI dummy qh workaround\n"); } break; case PCI_VENDOR_ID_VIA: if (pdev->device == 0x3104 && (pdev->revision & 0xf0) == 0x60) { u8 tmp; /* The VT6212 defaults to a 1 usec EHCI sleep time which * hogs the PCI bus *badly*. Setting bit 5 of 0x4B makes * that sleep time use the conventional 10 usec. */ pci_read_config_byte(pdev, 0x4b, &tmp); if (tmp & 0x20) break; pci_write_config_byte(pdev, 0x4b, tmp | 0x20); } break; case PCI_VENDOR_ID_ATI: /* AMD PLL quirk */ if (usb_amd_find_chipset_info()) ehci->amd_pll_fix = 1; /* * EHCI controller on AMD SB700/SB800/Hudson-2/3 platforms may * read/write memory space which does not belong to it when * there is NULL pointer with T-bit set to 1 in the frame list * table. To avoid the issue, the frame list link pointer * should always contain a valid pointer to a inactive qh. */ if (pdev->device == 0x4396) { ehci->use_dummy_qh = 1; ehci_info(ehci, "applying AMD SB700/SB800/Hudson-2/3 EHCI dummy qh workaround\n"); } /* SB600 and old version of SB700 have a bug in EHCI controller, * which causes usb devices lose response in some cases. */ if ((pdev->device == 0x4386 || pdev->device == 0x4396) && usb_amd_hang_symptom_quirk()) { u8 tmp; ehci_info(ehci, "applying AMD SB600/SB700 USB freeze workaround\n"); pci_read_config_byte(pdev, 0x53, &tmp); pci_write_config_byte(pdev, 0x53, tmp | (1<<3)); } break; case PCI_VENDOR_ID_NETMOS: /* MosChip frame-index-register bug */ ehci_info(ehci, "applying MosChip frame-index workaround\n"); ehci->frame_index_bug = 1; break; } /* optional debug port, normally in the first BAR */ temp = pci_find_capability(pdev, PCI_CAP_ID_DBG); if (temp) { pci_read_config_dword(pdev, temp, &temp); temp >>= 16; if (((temp >> 13) & 7) == 1) { u32 hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params); temp &= 0x1fff; ehci->debug = hcd->regs + temp; temp = ehci_readl(ehci, &ehci->debug->control); ehci_info(ehci, "debug port %d%s\n", HCS_DEBUG_PORT(hcs_params), (temp & DBGP_ENABLED) ? " IN USE" : ""); if (!(temp & DBGP_ENABLED)) ehci->debug = NULL; } } retval = ehci_setup(hcd); if (retval) return retval; /* These workarounds need to be applied after ehci_setup() */ switch (pdev->vendor) { case PCI_VENDOR_ID_NEC: ehci->need_io_watchdog = 0; break; case PCI_VENDOR_ID_INTEL: ehci->need_io_watchdog = 0; break; case PCI_VENDOR_ID_NVIDIA: switch (pdev->device) { /* MCP89 chips on the MacBookAir3,1 give EPROTO when * fetching device descriptors unless LPM is disabled. * There are also intermittent problems enumerating * devices with PPCD enabled. */ case 0x0d9d: ehci_info(ehci, "disable ppcd for nvidia mcp89\n"); ehci->has_ppcd = 0; ehci->command &= ~CMD_PPCEE; break; } break; } /* at least the Genesys GL880S needs fixup here */ temp = HCS_N_CC(ehci->hcs_params) * HCS_N_PCC(ehci->hcs_params); temp &= 0x0f; if (temp && HCS_N_PORTS(ehci->hcs_params) > temp) { ehci_dbg(ehci, "bogus port configuration: " "cc=%d x pcc=%d < ports=%d\n", HCS_N_CC(ehci->hcs_params), HCS_N_PCC(ehci->hcs_params), HCS_N_PORTS(ehci->hcs_params)); switch (pdev->vendor) { case 0x17a0: /* GENESYS */ /* GL880S: should be PORTS=2 */ temp |= (ehci->hcs_params & ~0xf); ehci->hcs_params = temp; break; case PCI_VENDOR_ID_NVIDIA: /* NF4: should be PCC=10 */ break; } } /* Serial Bus Release Number is at PCI 0x60 offset */ if (pdev->vendor == PCI_VENDOR_ID_STMICRO && pdev->device == PCI_DEVICE_ID_STMICRO_USB_HOST) ; /* ConneXT has no sbrn register */ else pci_read_config_byte(pdev, 0x60, &ehci->sbrn); /* Keep this around for a while just in case some EHCI * implementation uses legacy PCI PM support. This test * can be removed on 17 Dec 2009 if the dev_warn() hasn't * been triggered by then. */ if (!device_can_wakeup(&pdev->dev)) { u16 port_wake; pci_read_config_word(pdev, 0x62, &port_wake); if (port_wake & 0x0001) { dev_warn(&pdev->dev, "Enabling legacy PCI PM\n"); device_set_wakeup_capable(&pdev->dev, 1); } } #ifdef CONFIG_PM if (ehci->no_selective_suspend && device_can_wakeup(&pdev->dev)) ehci_warn(ehci, "selective suspend/wakeup unavailable\n"); #endif retval = ehci_pci_reinit(ehci, pdev); done: return retval; } /*-------------------------------------------------------------------------*/ #ifdef CONFIG_PM /* suspend/resume, section 4.3 */ /* These routines rely on the PCI bus glue * to handle powerdown and wakeup, and currently also on * transceivers that don't need any software attention to set up * the right sort of wakeup. * Also they depend on separate root hub suspend/resume. */ static int ehci_pci_resume(struct usb_hcd *hcd, bool hibernated) { struct ehci_hcd *ehci = hcd_to_ehci(hcd); struct pci_dev *pdev = to_pci_dev(hcd->self.controller); if (ehci_resume(hcd, hibernated) != 0) (void) ehci_pci_reinit(ehci, pdev); return 0; } #else #define ehci_suspend NULL #define ehci_pci_resume NULL #endif /* CONFIG_PM */ static struct hc_driver __read_mostly ehci_pci_hc_driver; static const struct ehci_driver_overrides pci_overrides __initconst = { .reset = ehci_pci_setup, }; /*-------------------------------------------------------------------------*/ static int ehci_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) { if (is_bypassed_id(pdev)) return -ENODEV; return usb_hcd_pci_probe(pdev, id); } /* PCI driver selection metadata; PCI hotplugging uses this */ static const struct pci_device_id pci_ids [] = { { /* handle any USB 2.0 EHCI controller */ PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_USB_EHCI, ~0), .driver_data = (unsigned long) &ehci_pci_hc_driver, }, { PCI_VDEVICE(STMICRO, PCI_DEVICE_ID_STMICRO_USB_HOST), .driver_data = (unsigned long) &ehci_pci_hc_driver, }, { /* end: all zeroes */ } }; MODULE_DEVICE_TABLE(pci, pci_ids); /* pci driver glue; this is a "new style" PCI driver module */ static struct pci_driver ehci_pci_driver = { .name = (char *) hcd_name, .id_table = pci_ids, .probe = ehci_pci_probe, .remove = usb_hcd_pci_remove, .shutdown = usb_hcd_pci_shutdown, #ifdef CONFIG_PM .driver = { .pm = &usb_hcd_pci_pm_ops }, #endif }; static int __init ehci_pci_init(void) { if (usb_disabled()) return -ENODEV; pr_info("%s: " DRIVER_DESC "\n", hcd_name); ehci_init_driver(&ehci_pci_hc_driver, &pci_overrides); /* Entries for the PCI suspend/resume callbacks are special */ ehci_pci_hc_driver.pci_suspend = ehci_suspend; ehci_pci_hc_driver.pci_resume = ehci_pci_resume; return pci_register_driver(&ehci_pci_driver); } module_init(ehci_pci_init); static void __exit ehci_pci_cleanup(void) { pci_unregister_driver(&ehci_pci_driver); } module_exit(ehci_pci_cleanup); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_AUTHOR("David Brownell"); MODULE_AUTHOR("Alan Stern"); MODULE_LICENSE("GPL");
gpl-2.0
garwynn/SC02E_MA6_Kernel
arch/arm/mach-ixp4xx/coyote-setup.c
2474
3218
/* * arch/arm/mach-ixp4xx/coyote-setup.c * * Board setup for ADI Engineering and IXDGP425 boards * * Copyright (C) 2003-2005 MontaVista Software, Inc. * * Author: Deepak Saxena <dsaxena@plexity.net> */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/device.h> #include <linux/serial.h> #include <linux/tty.h> #include <linux/serial_8250.h> #include <asm/types.h> #include <asm/setup.h> #include <asm/memory.h> #include <mach/hardware.h> #include <asm/irq.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <asm/mach/flash.h> #define COYOTE_IDE_BASE_PHYS IXP4XX_EXP_BUS_BASE(3) #define COYOTE_IDE_BASE_VIRT 0xFFFE1000 #define COYOTE_IDE_REGION_SIZE 0x1000 #define COYOTE_IDE_DATA_PORT 0xFFFE10E0 #define COYOTE_IDE_CTRL_PORT 0xFFFE10FC #define COYOTE_IDE_ERROR_PORT 0xFFFE10E2 #define IRQ_COYOTE_IDE IRQ_IXP4XX_GPIO5 static struct flash_platform_data coyote_flash_data = { .map_name = "cfi_probe", .width = 2, }; static struct resource coyote_flash_resource = { .flags = IORESOURCE_MEM, }; static struct platform_device coyote_flash = { .name = "IXP4XX-Flash", .id = 0, .dev = { .platform_data = &coyote_flash_data, }, .num_resources = 1, .resource = &coyote_flash_resource, }; static struct resource coyote_uart_resource = { .start = IXP4XX_UART2_BASE_PHYS, .end = IXP4XX_UART2_BASE_PHYS + 0x0fff, .flags = IORESOURCE_MEM, }; static struct plat_serial8250_port coyote_uart_data[] = { { .mapbase = IXP4XX_UART2_BASE_PHYS, .membase = (char *)IXP4XX_UART2_BASE_VIRT + REG_OFFSET, .irq = IRQ_IXP4XX_UART2, .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST, .iotype = UPIO_MEM, .regshift = 2, .uartclk = IXP4XX_UART_XTAL, }, { }, }; static struct platform_device coyote_uart = { .name = "serial8250", .id = PLAT8250_DEV_PLATFORM, .dev = { .platform_data = coyote_uart_data, }, .num_resources = 1, .resource = &coyote_uart_resource, }; static struct platform_device *coyote_devices[] __initdata = { &coyote_flash, &coyote_uart }; static void __init coyote_init(void) { ixp4xx_sys_init(); coyote_flash_resource.start = IXP4XX_EXP_BUS_BASE(0); coyote_flash_resource.end = IXP4XX_EXP_BUS_BASE(0) + SZ_32M - 1; *IXP4XX_EXP_CS0 |= IXP4XX_FLASH_WRITABLE; *IXP4XX_EXP_CS1 = *IXP4XX_EXP_CS0; if (machine_is_ixdpg425()) { coyote_uart_data[0].membase = (char*)(IXP4XX_UART1_BASE_VIRT + REG_OFFSET); coyote_uart_data[0].mapbase = IXP4XX_UART1_BASE_PHYS; coyote_uart_data[0].irq = IRQ_IXP4XX_UART1; } platform_add_devices(coyote_devices, ARRAY_SIZE(coyote_devices)); } #ifdef CONFIG_ARCH_ADI_COYOTE MACHINE_START(ADI_COYOTE, "ADI Engineering Coyote") /* Maintainer: MontaVista Software, Inc. */ .map_io = ixp4xx_map_io, .init_irq = ixp4xx_init_irq, .timer = &ixp4xx_timer, .boot_params = 0x0100, .init_machine = coyote_init, MACHINE_END #endif /* * IXDPG425 is identical to Coyote except for which serial port * is connected. */ #ifdef CONFIG_MACH_IXDPG425 MACHINE_START(IXDPG425, "Intel IXDPG425") /* Maintainer: MontaVista Software, Inc. */ .map_io = ixp4xx_map_io, .init_irq = ixp4xx_init_irq, .timer = &ixp4xx_timer, .boot_params = 0x0100, .init_machine = coyote_init, MACHINE_END #endif
gpl-2.0
dtuchsch/rpi-linux-preempt_rt
drivers/hid/hid-zydacron.c
2474
4233
/* * HID driver for zydacron remote control * * Copyright (c) 2010 Don Prince <dhprince.devel@yahoo.co.uk> */ /* * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. */ #include <linux/device.h> #include <linux/hid.h> #include <linux/module.h> #include "hid-ids.h" struct zc_device { struct input_dev *input_ep81; unsigned short last_key[4]; }; /* * Zydacron remote control has an invalid HID report descriptor, * that needs fixing before we can parse it. */ static __u8 *zc_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *rsize) { if (*rsize >= 253 && rdesc[0x96] == 0xbc && rdesc[0x97] == 0xff && rdesc[0xca] == 0xbc && rdesc[0xcb] == 0xff && rdesc[0xe1] == 0xbc && rdesc[0xe2] == 0xff) { hid_info(hdev, "fixing up zydacron remote control report descriptor\n"); rdesc[0x96] = rdesc[0xca] = rdesc[0xe1] = 0x0c; rdesc[0x97] = rdesc[0xcb] = rdesc[0xe2] = 0x00; } return rdesc; } #define zc_map_key_clear(c) \ hid_map_usage_clear(hi, usage, bit, max, EV_KEY, (c)) static int zc_input_mapping(struct hid_device *hdev, struct hid_input *hi, struct hid_field *field, struct hid_usage *usage, unsigned long **bit, int *max) { int i; struct zc_device *zc = hid_get_drvdata(hdev); zc->input_ep81 = hi->input; if ((usage->hid & HID_USAGE_PAGE) != HID_UP_CONSUMER) return 0; dbg_hid("zynacron input mapping event [0x%x]\n", usage->hid & HID_USAGE); switch (usage->hid & HID_USAGE) { /* report 2 */ case 0x10: zc_map_key_clear(KEY_MODE); break; case 0x30: zc_map_key_clear(KEY_SCREEN); break; case 0x70: zc_map_key_clear(KEY_INFO); break; /* report 3 */ case 0x04: zc_map_key_clear(KEY_RADIO); break; /* report 4 */ case 0x0d: zc_map_key_clear(KEY_PVR); break; case 0x25: zc_map_key_clear(KEY_TV); break; case 0x47: zc_map_key_clear(KEY_AUDIO); break; case 0x49: zc_map_key_clear(KEY_AUX); break; case 0x4a: zc_map_key_clear(KEY_VIDEO); break; case 0x48: zc_map_key_clear(KEY_DVD); break; case 0x24: zc_map_key_clear(KEY_MENU); break; case 0x32: zc_map_key_clear(KEY_TEXT); break; default: return 0; } for (i = 0; i < 4; i++) zc->last_key[i] = 0; return 1; } static int zc_raw_event(struct hid_device *hdev, struct hid_report *report, u8 *data, int size) { struct zc_device *zc = hid_get_drvdata(hdev); int ret = 0; unsigned key; unsigned short index; if (report->id == data[0]) { /* break keys */ for (index = 0; index < 4; index++) { key = zc->last_key[index]; if (key) { input_event(zc->input_ep81, EV_KEY, key, 0); zc->last_key[index] = 0; } } key = 0; switch (report->id) { case 0x02: case 0x03: switch (data[1]) { case 0x10: key = KEY_MODE; index = 0; break; case 0x30: key = KEY_SCREEN; index = 1; break; case 0x70: key = KEY_INFO; index = 2; break; case 0x04: key = KEY_RADIO; index = 3; break; } if (key) { input_event(zc->input_ep81, EV_KEY, key, 1); zc->last_key[index] = key; } ret = 1; break; } } return ret; } static int zc_probe(struct hid_device *hdev, const struct hid_device_id *id) { int ret; struct zc_device *zc; zc = devm_kzalloc(&hdev->dev, sizeof(*zc), GFP_KERNEL); if (zc == NULL) { hid_err(hdev, "can't alloc descriptor\n"); return -ENOMEM; } hid_set_drvdata(hdev, zc); ret = hid_parse(hdev); if (ret) { hid_err(hdev, "parse failed\n"); return ret; } ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT); if (ret) { hid_err(hdev, "hw start failed\n"); return ret; } return 0; } static const struct hid_device_id zc_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_ZYDACRON, USB_DEVICE_ID_ZYDACRON_REMOTE_CONTROL) }, { } }; MODULE_DEVICE_TABLE(hid, zc_devices); static struct hid_driver zc_driver = { .name = "zydacron", .id_table = zc_devices, .report_fixup = zc_report_fixup, .input_mapping = zc_input_mapping, .raw_event = zc_raw_event, .probe = zc_probe, }; module_hid_driver(zc_driver); MODULE_LICENSE("GPL");
gpl-2.0
gimmeitorilltell/slim_kernel_samsung_msm8660
drivers/gpio/max730x.c
2730
6272
/** * drivers/gpio/max7301.c * * Copyright (C) 2006 Juergen Beisert, Pengutronix * Copyright (C) 2008 Guennadi Liakhovetski, Pengutronix * Copyright (C) 2009 Wolfram Sang, Pengutronix * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * The Maxim MAX7300/1 device is an I2C/SPI driven GPIO expander. There are * 28 GPIOs. 8 of them can trigger an interrupt. See datasheet for more * details * Note: * - DIN must be stable at the rising edge of clock. * - when writing: * - always clock in 16 clocks at once * - at DIN: D15 first, D0 last * - D0..D7 = databyte, D8..D14 = commandbyte * - D15 = low -> write command * - when reading * - always clock in 16 clocks at once * - at DIN: D15 first, D0 last * - D0..D7 = dummy, D8..D14 = register address * - D15 = high -> read command * - raise CS and assert it again * - always clock in 16 clocks at once * - at DOUT: D15 first, D0 last * - D0..D7 contains the data from the first cycle * * The driver exports a standard gpiochip interface */ #include <linux/module.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/mutex.h> #include <linux/spi/max7301.h> #include <linux/gpio.h> #include <linux/slab.h> /* * Pin configurations, see MAX7301 datasheet page 6 */ #define PIN_CONFIG_MASK 0x03 #define PIN_CONFIG_IN_PULLUP 0x03 #define PIN_CONFIG_IN_WO_PULLUP 0x02 #define PIN_CONFIG_OUT 0x01 #define PIN_NUMBER 28 static int max7301_direction_input(struct gpio_chip *chip, unsigned offset) { struct max7301 *ts = container_of(chip, struct max7301, chip); u8 *config; u8 offset_bits, pin_config; int ret; /* First 4 pins are unused in the controller */ offset += 4; offset_bits = (offset & 3) << 1; config = &ts->port_config[offset >> 2]; if (ts->input_pullup_active & BIT(offset)) pin_config = PIN_CONFIG_IN_PULLUP; else pin_config = PIN_CONFIG_IN_WO_PULLUP; mutex_lock(&ts->lock); *config = (*config & ~(PIN_CONFIG_MASK << offset_bits)) | (pin_config << offset_bits); ret = ts->write(ts->dev, 0x08 + (offset >> 2), *config); mutex_unlock(&ts->lock); return ret; } static int __max7301_set(struct max7301 *ts, unsigned offset, int value) { if (value) { ts->out_level |= 1 << offset; return ts->write(ts->dev, 0x20 + offset, 0x01); } else { ts->out_level &= ~(1 << offset); return ts->write(ts->dev, 0x20 + offset, 0x00); } } static int max7301_direction_output(struct gpio_chip *chip, unsigned offset, int value) { struct max7301 *ts = container_of(chip, struct max7301, chip); u8 *config; u8 offset_bits; int ret; /* First 4 pins are unused in the controller */ offset += 4; offset_bits = (offset & 3) << 1; config = &ts->port_config[offset >> 2]; mutex_lock(&ts->lock); *config = (*config & ~(PIN_CONFIG_MASK << offset_bits)) | (PIN_CONFIG_OUT << offset_bits); ret = __max7301_set(ts, offset, value); if (!ret) ret = ts->write(ts->dev, 0x08 + (offset >> 2), *config); mutex_unlock(&ts->lock); return ret; } static int max7301_get(struct gpio_chip *chip, unsigned offset) { struct max7301 *ts = container_of(chip, struct max7301, chip); int config, level = -EINVAL; /* First 4 pins are unused in the controller */ offset += 4; mutex_lock(&ts->lock); config = (ts->port_config[offset >> 2] >> ((offset & 3) << 1)) & PIN_CONFIG_MASK; switch (config) { case PIN_CONFIG_OUT: /* Output: return cached level */ level = !!(ts->out_level & (1 << offset)); break; case PIN_CONFIG_IN_WO_PULLUP: case PIN_CONFIG_IN_PULLUP: /* Input: read out */ level = ts->read(ts->dev, 0x20 + offset) & 0x01; } mutex_unlock(&ts->lock); return level; } static void max7301_set(struct gpio_chip *chip, unsigned offset, int value) { struct max7301 *ts = container_of(chip, struct max7301, chip); /* First 4 pins are unused in the controller */ offset += 4; mutex_lock(&ts->lock); __max7301_set(ts, offset, value); mutex_unlock(&ts->lock); } int __devinit __max730x_probe(struct max7301 *ts) { struct device *dev = ts->dev; struct max7301_platform_data *pdata; int i, ret; pdata = dev->platform_data; if (!pdata || !pdata->base) { dev_err(dev, "incorrect or missing platform data\n"); return -EINVAL; } mutex_init(&ts->lock); dev_set_drvdata(dev, ts); /* Power up the chip and disable IRQ output */ ts->write(dev, 0x04, 0x01); ts->input_pullup_active = pdata->input_pullup_active; ts->chip.label = dev->driver->name; ts->chip.direction_input = max7301_direction_input; ts->chip.get = max7301_get; ts->chip.direction_output = max7301_direction_output; ts->chip.set = max7301_set; ts->chip.base = pdata->base; ts->chip.ngpio = PIN_NUMBER; ts->chip.can_sleep = 1; ts->chip.dev = dev; ts->chip.owner = THIS_MODULE; /* * initialize pullups according to platform data and cache the * register values for later use. */ for (i = 1; i < 8; i++) { int j; /* * initialize port_config with "0xAA", which means * input with internal pullup disabled. This is needed * to avoid writing zeros (in the inner for loop), * which is not allowed according to the datasheet. */ ts->port_config[i] = 0xAA; for (j = 0; j < 4; j++) { int offset = (i - 1) * 4 + j; ret = max7301_direction_input(&ts->chip, offset); if (ret) goto exit_destroy; } } ret = gpiochip_add(&ts->chip); if (ret) goto exit_destroy; return ret; exit_destroy: dev_set_drvdata(dev, NULL); mutex_destroy(&ts->lock); return ret; } EXPORT_SYMBOL_GPL(__max730x_probe); int __devexit __max730x_remove(struct device *dev) { struct max7301 *ts = dev_get_drvdata(dev); int ret; if (ts == NULL) return -ENODEV; dev_set_drvdata(dev, NULL); /* Power down the chip and disable IRQ output */ ts->write(dev, 0x04, 0x00); ret = gpiochip_remove(&ts->chip); if (!ret) { mutex_destroy(&ts->lock); kfree(ts); } else dev_err(dev, "Failed to remove GPIO controller: %d\n", ret); return ret; } EXPORT_SYMBOL_GPL(__max730x_remove); MODULE_AUTHOR("Juergen Beisert, Wolfram Sang"); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("MAX730x GPIO-Expanders, generic parts");
gpl-2.0
sachinthomaspj/android_kernel_htc_pico
arch/mn10300/kernel/gdb-stub.c
2730
45632
/* MN10300 GDB stub * * Originally written by Glenn Engel, Lake Stevens Instrument Division * * Contributed by HP Systems * * Modified for SPARC by Stu Grossman, Cygnus Support. * * Modified for Linux/MIPS (and MIPS in general) by Andreas Busse * Send complaints, suggestions etc. to <andy@waldorf-gmbh.de> * * Copyright (C) 1995 Andreas Busse * * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. * Modified for Linux/mn10300 by David Howells <dhowells@redhat.com> */ /* * To enable debugger support, two things need to happen. One, a * call to set_debug_traps() is necessary in order to allow any breakpoints * or error conditions to be properly intercepted and reported to gdb. * Two, a breakpoint needs to be generated to begin communication. This * is most easily accomplished by a call to breakpoint(). Breakpoint() * simulates a breakpoint by executing a BREAK instruction. * * * The following gdb commands are supported: * * command function Return value * * g return the value of the CPU registers hex data or ENN * G set the value of the CPU registers OK or ENN * * mAA..AA,LLLL Read LLLL bytes at address AA..AA hex data or ENN * MAA..AA,LLLL: Write LLLL bytes at address AA.AA OK or ENN * * c Resume at current address SNN ( signal NN) * cAA..AA Continue at address AA..AA SNN * * s Step one instruction SNN * sAA..AA Step one instruction from AA..AA SNN * * k kill * * ? What was the last sigval ? SNN (signal NN) * * bBB..BB Set baud rate to BB..BB OK or BNN, then sets * baud rate * * All commands and responses are sent with a packet which includes a * checksum. A packet consists of * * $<packet info>#<checksum>. * * where * <packet info> :: <characters representing the command or response> * <checksum> :: < two hex digits computed as modulo 256 sum of <packetinfo>> * * When a packet is received, it is first acknowledged with either '+' or '-'. * '+' indicates a successful transfer. '-' indicates a failed transfer. * * Example: * * Host: Reply: * $m0,10#2a +$00010203040506070809101112131415#42 * * * ============== * MORE EXAMPLES: * ============== * * For reference -- the following are the steps that one * company took (RidgeRun Inc) to get remote gdb debugging * going. In this scenario the host machine was a PC and the * target platform was a Galileo EVB64120A MIPS evaluation * board. * * Step 1: * First download gdb-5.0.tar.gz from the internet. * and then build/install the package. * * Example: * $ tar zxf gdb-5.0.tar.gz * $ cd gdb-5.0 * $ ./configure --target=am33_2.0-linux-gnu * $ make * $ install * am33_2.0-linux-gnu-gdb * * Step 2: * Configure linux for remote debugging and build it. * * Example: * $ cd ~/linux * $ make menuconfig <go to "Kernel Hacking" and turn on remote debugging> * $ make dep; make vmlinux * * Step 3: * Download the kernel to the remote target and start * the kernel running. It will promptly halt and wait * for the host gdb session to connect. It does this * since the "Kernel Hacking" option has defined * CONFIG_REMOTE_DEBUG which in turn enables your calls * to: * set_debug_traps(); * breakpoint(); * * Step 4: * Start the gdb session on the host. * * Example: * $ am33_2.0-linux-gnu-gdb vmlinux * (gdb) set remotebaud 115200 * (gdb) target remote /dev/ttyS1 * ...at this point you are connected to * the remote target and can use gdb * in the normal fasion. Setting * breakpoints, single stepping, * printing variables, etc. * */ #include <linux/string.h> #include <linux/kernel.h> #include <linux/signal.h> #include <linux/sched.h> #include <linux/mm.h> #include <linux/console.h> #include <linux/init.h> #include <linux/bug.h> #include <asm/pgtable.h> #include <asm/system.h> #include <asm/gdb-stub.h> #include <asm/exceptions.h> #include <asm/debugger.h> #include <asm/serial-regs.h> #include <asm/busctl-regs.h> #include <unit/leds.h> #include <unit/serial.h> /* define to use F7F7 rather than FF which is subverted by JTAG debugger */ #undef GDBSTUB_USE_F7F7_AS_BREAKPOINT /* * BUFMAX defines the maximum number of characters in inbound/outbound buffers * at least NUMREGBYTES*2 are needed for register packets */ #define BUFMAX 2048 static const char gdbstub_banner[] = "Linux/MN10300 GDB Stub (c) RedHat 2007\n"; u8 gdbstub_rx_buffer[PAGE_SIZE] __attribute__((aligned(PAGE_SIZE))); u32 gdbstub_rx_inp; u32 gdbstub_rx_outp; u8 gdbstub_busy; u8 gdbstub_rx_overflow; u8 gdbstub_rx_unget; static u8 gdbstub_flush_caches; static char input_buffer[BUFMAX]; static char output_buffer[BUFMAX]; static char trans_buffer[BUFMAX]; struct gdbstub_bkpt { u8 *addr; /* address of breakpoint */ u8 len; /* size of breakpoint */ u8 origbytes[7]; /* original bytes */ }; static struct gdbstub_bkpt gdbstub_bkpts[256]; /* * local prototypes */ static void getpacket(char *buffer); static int putpacket(char *buffer); static int computeSignal(enum exception_code excep); static int hex(unsigned char ch); static int hexToInt(char **ptr, int *intValue); static unsigned char *mem2hex(const void *mem, char *buf, int count, int may_fault); static const char *hex2mem(const char *buf, void *_mem, int count, int may_fault); /* * Convert ch from a hex digit to an int */ static int hex(unsigned char ch) { if (ch >= 'a' && ch <= 'f') return ch - 'a' + 10; if (ch >= '0' && ch <= '9') return ch - '0'; if (ch >= 'A' && ch <= 'F') return ch - 'A' + 10; return -1; } #ifdef CONFIG_GDBSTUB_DEBUGGING void debug_to_serial(const char *p, int n) { __debug_to_serial(p, n); /* gdbstub_console_write(NULL, p, n); */ } void gdbstub_printk(const char *fmt, ...) { va_list args; int len; /* Emit the output into the temporary buffer */ va_start(args, fmt); len = vsnprintf(trans_buffer, sizeof(trans_buffer), fmt, args); va_end(args); debug_to_serial(trans_buffer, len); } #endif static inline char *gdbstub_strcpy(char *dst, const char *src) { int loop = 0; while ((dst[loop] = src[loop])) loop++; return dst; } /* * scan for the sequence $<data>#<checksum> */ static void getpacket(char *buffer) { unsigned char checksum; unsigned char xmitcsum; unsigned char ch; int count, i, ret, error; for (;;) { /* * wait around for the start character, * ignore all other characters */ do { gdbstub_io_rx_char(&ch, 0); } while (ch != '$'); checksum = 0; xmitcsum = -1; count = 0; error = 0; /* * now, read until a # or end of buffer is found */ while (count < BUFMAX) { ret = gdbstub_io_rx_char(&ch, 0); if (ret < 0) error = ret; if (ch == '#') break; checksum += ch; buffer[count] = ch; count++; } if (error == -EIO) { gdbstub_proto("### GDB Rx Error - Skipping packet" " ###\n"); gdbstub_proto("### GDB Tx NAK\n"); gdbstub_io_tx_char('-'); continue; } if (count >= BUFMAX || error) continue; buffer[count] = 0; /* read the checksum */ ret = gdbstub_io_rx_char(&ch, 0); if (ret < 0) error = ret; xmitcsum = hex(ch) << 4; ret = gdbstub_io_rx_char(&ch, 0); if (ret < 0) error = ret; xmitcsum |= hex(ch); if (error) { if (error == -EIO) gdbstub_io("### GDB Rx Error -" " Skipping packet\n"); gdbstub_io("### GDB Tx NAK\n"); gdbstub_io_tx_char('-'); continue; } /* check the checksum */ if (checksum != xmitcsum) { gdbstub_io("### GDB Tx NAK\n"); gdbstub_io_tx_char('-'); /* failed checksum */ continue; } gdbstub_proto("### GDB Rx '$%s#%02x' ###\n", buffer, checksum); gdbstub_io("### GDB Tx ACK\n"); gdbstub_io_tx_char('+'); /* successful transfer */ /* * if a sequence char is present, * reply the sequence ID */ if (buffer[2] == ':') { gdbstub_io_tx_char(buffer[0]); gdbstub_io_tx_char(buffer[1]); /* * remove sequence chars from buffer */ count = 0; while (buffer[count]) count++; for (i = 3; i <= count; i++) buffer[i - 3] = buffer[i]; } break; } } /* * send the packet in buffer. * - return 0 if successfully ACK'd * - return 1 if abandoned due to new incoming packet */ static int putpacket(char *buffer) { unsigned char checksum; unsigned char ch; int count; /* * $<packet info>#<checksum>. */ gdbstub_proto("### GDB Tx $'%s'#?? ###\n", buffer); do { gdbstub_io_tx_char('$'); checksum = 0; count = 0; while ((ch = buffer[count]) != 0) { gdbstub_io_tx_char(ch); checksum += ch; count += 1; } gdbstub_io_tx_char('#'); gdbstub_io_tx_char(hex_asc_hi(checksum)); gdbstub_io_tx_char(hex_asc_lo(checksum)); } while (gdbstub_io_rx_char(&ch, 0), ch == '-' && (gdbstub_io("### GDB Rx NAK\n"), 0), ch != '-' && ch != '+' && (gdbstub_io("### GDB Rx ??? %02x\n", ch), 0), ch != '+' && ch != '$'); if (ch == '+') { gdbstub_io("### GDB Rx ACK\n"); return 0; } gdbstub_io("### GDB Tx Abandoned\n"); gdbstub_rx_unget = ch; return 1; } /* * While we find nice hex chars, build an int. * Return number of chars processed. */ static int hexToInt(char **ptr, int *intValue) { int numChars = 0; int hexValue; *intValue = 0; while (**ptr) { hexValue = hex(**ptr); if (hexValue < 0) break; *intValue = (*intValue << 4) | hexValue; numChars++; (*ptr)++; } return (numChars); } #ifdef CONFIG_GDBSTUB_ALLOW_SINGLE_STEP /* * We single-step by setting breakpoints. When an exception * is handled, we need to restore the instructions hoisted * when the breakpoints were set. * * This is where we save the original instructions. */ static struct gdb_bp_save { u8 *addr; u8 opcode[2]; } step_bp[2]; static const unsigned char gdbstub_insn_sizes[256] = { /* 1 2 3 4 5 6 7 8 9 a b c d e f */ 1, 3, 3, 3, 1, 3, 3, 3, 1, 3, 3, 3, 1, 3, 3, 3, /* 0 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 1 */ 2, 2, 2, 2, 3, 3, 3, 3, 2, 2, 2, 2, 3, 3, 3, 3, /* 2 */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 1, 1, 1, 1, /* 3 */ 1, 1, 2, 2, 1, 1, 2, 2, 1, 1, 2, 2, 1, 1, 2, 2, /* 4 */ 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, /* 5 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 6 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 7 */ 2, 1, 1, 1, 1, 2, 1, 1, 1, 1, 2, 1, 1, 1, 1, 2, /* 8 */ 2, 1, 1, 1, 1, 2, 1, 1, 1, 1, 2, 1, 1, 1, 1, 2, /* 9 */ 2, 1, 1, 1, 1, 2, 1, 1, 1, 1, 2, 1, 1, 1, 1, 2, /* a */ 2, 1, 1, 1, 1, 2, 1, 1, 1, 1, 2, 1, 1, 1, 1, 2, /* b */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 2, 2, /* c */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* d */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* e */ 0, 2, 2, 2, 2, 2, 2, 4, 0, 3, 0, 4, 0, 6, 7, 1 /* f */ }; static int __gdbstub_mark_bp(u8 *addr, int ix) { /* vmalloc area */ if (((u8 *) VMALLOC_START <= addr) && (addr < (u8 *) VMALLOC_END)) goto okay; /* SRAM, SDRAM */ if (((u8 *) 0x80000000UL <= addr) && (addr < (u8 *) 0xa0000000UL)) goto okay; return 0; okay: if (gdbstub_read_byte(addr + 0, &step_bp[ix].opcode[0]) < 0 || gdbstub_read_byte(addr + 1, &step_bp[ix].opcode[1]) < 0) return 0; step_bp[ix].addr = addr; return 1; } static inline void __gdbstub_restore_bp(void) { #ifdef GDBSTUB_USE_F7F7_AS_BREAKPOINT if (step_bp[0].addr) { gdbstub_write_byte(step_bp[0].opcode[0], step_bp[0].addr + 0); gdbstub_write_byte(step_bp[0].opcode[1], step_bp[0].addr + 1); } if (step_bp[1].addr) { gdbstub_write_byte(step_bp[1].opcode[0], step_bp[1].addr + 0); gdbstub_write_byte(step_bp[1].opcode[1], step_bp[1].addr + 1); } #else if (step_bp[0].addr) gdbstub_write_byte(step_bp[0].opcode[0], step_bp[0].addr + 0); if (step_bp[1].addr) gdbstub_write_byte(step_bp[1].opcode[0], step_bp[1].addr + 0); #endif gdbstub_flush_caches = 1; step_bp[0].addr = NULL; step_bp[0].opcode[0] = 0; step_bp[0].opcode[1] = 0; step_bp[1].addr = NULL; step_bp[1].opcode[0] = 0; step_bp[1].opcode[1] = 0; } /* * emulate single stepping by means of breakpoint instructions */ static int gdbstub_single_step(struct pt_regs *regs) { unsigned size; uint32_t x; uint8_t cur, *pc, *sp; step_bp[0].addr = NULL; step_bp[0].opcode[0] = 0; step_bp[0].opcode[1] = 0; step_bp[1].addr = NULL; step_bp[1].opcode[0] = 0; step_bp[1].opcode[1] = 0; x = 0; pc = (u8 *) regs->pc; sp = (u8 *) (regs + 1); if (gdbstub_read_byte(pc, &cur) < 0) return -EFAULT; gdbstub_bkpt("Single Step from %p { %02x }\n", pc, cur); gdbstub_flush_caches = 1; size = gdbstub_insn_sizes[cur]; if (size > 0) { if (!__gdbstub_mark_bp(pc + size, 0)) goto fault; } else { switch (cur) { /* Bxx (d8,PC) */ case 0xc0 ... 0xca: if (gdbstub_read_byte(pc + 1, (u8 *) &x) < 0) goto fault; if (!__gdbstub_mark_bp(pc + 2, 0)) goto fault; if ((x < 0 || x > 2) && !__gdbstub_mark_bp(pc + (s8) x, 1)) goto fault; break; /* LXX (d8,PC) */ case 0xd0 ... 0xda: if (!__gdbstub_mark_bp(pc + 1, 0)) goto fault; if (regs->pc != regs->lar && !__gdbstub_mark_bp((u8 *) regs->lar, 1)) goto fault; break; /* SETLB - loads the next for bytes into the LIR * register */ case 0xdb: if (!__gdbstub_mark_bp(pc + 1, 0)) goto fault; break; /* JMP (d16,PC) or CALL (d16,PC) */ case 0xcc: case 0xcd: if (gdbstub_read_byte(pc + 1, ((u8 *) &x) + 0) < 0 || gdbstub_read_byte(pc + 2, ((u8 *) &x) + 1) < 0) goto fault; if (!__gdbstub_mark_bp(pc + (s16) x, 0)) goto fault; break; /* JMP (d32,PC) or CALL (d32,PC) */ case 0xdc: case 0xdd: if (gdbstub_read_byte(pc + 1, ((u8 *) &x) + 0) < 0 || gdbstub_read_byte(pc + 2, ((u8 *) &x) + 1) < 0 || gdbstub_read_byte(pc + 3, ((u8 *) &x) + 2) < 0 || gdbstub_read_byte(pc + 4, ((u8 *) &x) + 3) < 0) goto fault; if (!__gdbstub_mark_bp(pc + (s32) x, 0)) goto fault; break; /* RETF */ case 0xde: if (!__gdbstub_mark_bp((u8 *) regs->mdr, 0)) goto fault; break; /* RET */ case 0xdf: if (gdbstub_read_byte(pc + 2, (u8 *) &x) < 0) goto fault; sp += (s8)x; if (gdbstub_read_byte(sp + 0, ((u8 *) &x) + 0) < 0 || gdbstub_read_byte(sp + 1, ((u8 *) &x) + 1) < 0 || gdbstub_read_byte(sp + 2, ((u8 *) &x) + 2) < 0 || gdbstub_read_byte(sp + 3, ((u8 *) &x) + 3) < 0) goto fault; if (!__gdbstub_mark_bp((u8 *) x, 0)) goto fault; break; case 0xf0: if (gdbstub_read_byte(pc + 1, &cur) < 0) goto fault; if (cur >= 0xf0 && cur <= 0xf7) { /* JMP (An) / CALLS (An) */ switch (cur & 3) { case 0: x = regs->a0; break; case 1: x = regs->a1; break; case 2: x = regs->a2; break; case 3: x = regs->a3; break; } if (!__gdbstub_mark_bp((u8 *) x, 0)) goto fault; } else if (cur == 0xfc) { /* RETS */ if (gdbstub_read_byte( sp + 0, ((u8 *) &x) + 0) < 0 || gdbstub_read_byte( sp + 1, ((u8 *) &x) + 1) < 0 || gdbstub_read_byte( sp + 2, ((u8 *) &x) + 2) < 0 || gdbstub_read_byte( sp + 3, ((u8 *) &x) + 3) < 0) goto fault; if (!__gdbstub_mark_bp((u8 *) x, 0)) goto fault; } else if (cur == 0xfd) { /* RTI */ if (gdbstub_read_byte( sp + 4, ((u8 *) &x) + 0) < 0 || gdbstub_read_byte( sp + 5, ((u8 *) &x) + 1) < 0 || gdbstub_read_byte( sp + 6, ((u8 *) &x) + 2) < 0 || gdbstub_read_byte( sp + 7, ((u8 *) &x) + 3) < 0) goto fault; if (!__gdbstub_mark_bp((u8 *) x, 0)) goto fault; } else { if (!__gdbstub_mark_bp(pc + 2, 0)) goto fault; } break; /* potential 3-byte conditional branches */ case 0xf8: if (gdbstub_read_byte(pc + 1, &cur) < 0) goto fault; if (!__gdbstub_mark_bp(pc + 3, 0)) goto fault; if (cur >= 0xe8 && cur <= 0xeb) { if (gdbstub_read_byte( pc + 2, ((u8 *) &x) + 0) < 0) goto fault; if ((x < 0 || x > 3) && !__gdbstub_mark_bp(pc + (s8) x, 1)) goto fault; } break; case 0xfa: if (gdbstub_read_byte(pc + 1, &cur) < 0) goto fault; if (cur == 0xff) { /* CALLS (d16,PC) */ if (gdbstub_read_byte( pc + 2, ((u8 *) &x) + 0) < 0 || gdbstub_read_byte( pc + 3, ((u8 *) &x) + 1) < 0) goto fault; if (!__gdbstub_mark_bp(pc + (s16) x, 0)) goto fault; } else { if (!__gdbstub_mark_bp(pc + 4, 0)) goto fault; } break; case 0xfc: if (gdbstub_read_byte(pc + 1, &cur) < 0) goto fault; if (cur == 0xff) { /* CALLS (d32,PC) */ if (gdbstub_read_byte( pc + 2, ((u8 *) &x) + 0) < 0 || gdbstub_read_byte( pc + 3, ((u8 *) &x) + 1) < 0 || gdbstub_read_byte( pc + 4, ((u8 *) &x) + 2) < 0 || gdbstub_read_byte( pc + 5, ((u8 *) &x) + 3) < 0) goto fault; if (!__gdbstub_mark_bp( pc + (s32) x, 0)) goto fault; } else { if (!__gdbstub_mark_bp( pc + 6, 0)) goto fault; } break; } } gdbstub_bkpt("Step: %02x at %p; %02x at %p\n", step_bp[0].opcode[0], step_bp[0].addr, step_bp[1].opcode[0], step_bp[1].addr); if (step_bp[0].addr) { #ifdef GDBSTUB_USE_F7F7_AS_BREAKPOINT if (gdbstub_write_byte(0xF7, step_bp[0].addr + 0) < 0 || gdbstub_write_byte(0xF7, step_bp[0].addr + 1) < 0) goto fault; #else if (gdbstub_write_byte(0xFF, step_bp[0].addr + 0) < 0) goto fault; #endif } if (step_bp[1].addr) { #ifdef GDBSTUB_USE_F7F7_AS_BREAKPOINT if (gdbstub_write_byte(0xF7, step_bp[1].addr + 0) < 0 || gdbstub_write_byte(0xF7, step_bp[1].addr + 1) < 0) goto fault; #else if (gdbstub_write_byte(0xFF, step_bp[1].addr + 0) < 0) goto fault; #endif } return 0; fault: /* uh-oh - silly address alert, try and restore things */ __gdbstub_restore_bp(); return -EFAULT; } #endif /* CONFIG_GDBSTUB_ALLOW_SINGLE_STEP */ #ifdef CONFIG_GDBSTUB_CONSOLE void gdbstub_console_write(struct console *con, const char *p, unsigned n) { static const char gdbstub_cr[] = { 0x0d }; char outbuf[26]; int qty; u8 busy; busy = gdbstub_busy; gdbstub_busy = 1; outbuf[0] = 'O'; while (n > 0) { qty = 1; while (n > 0 && qty < 20) { mem2hex(p, outbuf + qty, 2, 0); qty += 2; if (*p == 0x0a) { mem2hex(gdbstub_cr, outbuf + qty, 2, 0); qty += 2; } p++; n--; } outbuf[qty] = 0; putpacket(outbuf); } gdbstub_busy = busy; } static kdev_t gdbstub_console_dev(struct console *con) { return MKDEV(1, 3); /* /dev/null */ } static struct console gdbstub_console = { .name = "gdb", .write = gdbstub_console_write, .device = gdbstub_console_dev, .flags = CON_PRINTBUFFER, .index = -1, }; #endif /* * Convert the memory pointed to by mem into hex, placing result in buf. * - if successful, return a pointer to the last char put in buf (NUL) * - in case of mem fault, return NULL * may_fault is non-zero if we are reading from arbitrary memory, but is * currently not used. */ static unsigned char *mem2hex(const void *_mem, char *buf, int count, int may_fault) { const u8 *mem = _mem; u8 ch[4]; if ((u32) mem & 1 && count >= 1) { if (gdbstub_read_byte(mem, ch) != 0) return 0; buf = pack_hex_byte(buf, ch[0]); mem++; count--; } if ((u32) mem & 3 && count >= 2) { if (gdbstub_read_word(mem, ch) != 0) return 0; buf = pack_hex_byte(buf, ch[0]); buf = pack_hex_byte(buf, ch[1]); mem += 2; count -= 2; } while (count >= 4) { if (gdbstub_read_dword(mem, ch) != 0) return 0; buf = pack_hex_byte(buf, ch[0]); buf = pack_hex_byte(buf, ch[1]); buf = pack_hex_byte(buf, ch[2]); buf = pack_hex_byte(buf, ch[3]); mem += 4; count -= 4; } if (count >= 2) { if (gdbstub_read_word(mem, ch) != 0) return 0; buf = pack_hex_byte(buf, ch[0]); buf = pack_hex_byte(buf, ch[1]); mem += 2; count -= 2; } if (count >= 1) { if (gdbstub_read_byte(mem, ch) != 0) return 0; buf = pack_hex_byte(buf, ch[0]); } *buf = 0; return buf; } /* * convert the hex array pointed to by buf into binary to be placed in mem * return a pointer to the character AFTER the last byte written * may_fault is non-zero if we are reading from arbitrary memory, but is * currently not used. */ static const char *hex2mem(const char *buf, void *_mem, int count, int may_fault) { u8 *mem = _mem; union { u32 val; u8 b[4]; } ch; if ((u32) mem & 1 && count >= 1) { ch.b[0] = hex(*buf++) << 4; ch.b[0] |= hex(*buf++); if (gdbstub_write_byte(ch.val, mem) != 0) return 0; mem++; count--; } if ((u32) mem & 3 && count >= 2) { ch.b[0] = hex(*buf++) << 4; ch.b[0] |= hex(*buf++); ch.b[1] = hex(*buf++) << 4; ch.b[1] |= hex(*buf++); if (gdbstub_write_word(ch.val, mem) != 0) return 0; mem += 2; count -= 2; } while (count >= 4) { ch.b[0] = hex(*buf++) << 4; ch.b[0] |= hex(*buf++); ch.b[1] = hex(*buf++) << 4; ch.b[1] |= hex(*buf++); ch.b[2] = hex(*buf++) << 4; ch.b[2] |= hex(*buf++); ch.b[3] = hex(*buf++) << 4; ch.b[3] |= hex(*buf++); if (gdbstub_write_dword(ch.val, mem) != 0) return 0; mem += 4; count -= 4; } if (count >= 2) { ch.b[0] = hex(*buf++) << 4; ch.b[0] |= hex(*buf++); ch.b[1] = hex(*buf++) << 4; ch.b[1] |= hex(*buf++); if (gdbstub_write_word(ch.val, mem) != 0) return 0; mem += 2; count -= 2; } if (count >= 1) { ch.b[0] = hex(*buf++) << 4; ch.b[0] |= hex(*buf++); if (gdbstub_write_byte(ch.val, mem) != 0) return 0; } return buf; } /* * This table contains the mapping between MN10300 exception codes, and * signals, which are primarily what GDB understands. It also indicates * which hardware traps we need to commandeer when initializing the stub. */ static const struct excep_to_sig_map { enum exception_code excep; /* MN10300 exception code */ unsigned char signo; /* Signal that we map this into */ } excep_to_sig_map[] = { { EXCEP_ITLBMISS, SIGSEGV }, { EXCEP_DTLBMISS, SIGSEGV }, { EXCEP_TRAP, SIGTRAP }, { EXCEP_ISTEP, SIGTRAP }, { EXCEP_IBREAK, SIGTRAP }, { EXCEP_OBREAK, SIGTRAP }, { EXCEP_UNIMPINS, SIGILL }, { EXCEP_UNIMPEXINS, SIGILL }, { EXCEP_MEMERR, SIGSEGV }, { EXCEP_MISALIGN, SIGSEGV }, { EXCEP_BUSERROR, SIGBUS }, { EXCEP_ILLINSACC, SIGSEGV }, { EXCEP_ILLDATACC, SIGSEGV }, { EXCEP_IOINSACC, SIGSEGV }, { EXCEP_PRIVINSACC, SIGSEGV }, { EXCEP_PRIVDATACC, SIGSEGV }, { EXCEP_FPU_DISABLED, SIGFPE }, { EXCEP_FPU_UNIMPINS, SIGFPE }, { EXCEP_FPU_OPERATION, SIGFPE }, { EXCEP_WDT, SIGALRM }, { EXCEP_NMI, SIGQUIT }, { EXCEP_IRQ_LEVEL0, SIGINT }, { EXCEP_IRQ_LEVEL1, SIGINT }, { EXCEP_IRQ_LEVEL2, SIGINT }, { EXCEP_IRQ_LEVEL3, SIGINT }, { EXCEP_IRQ_LEVEL4, SIGINT }, { EXCEP_IRQ_LEVEL5, SIGINT }, { EXCEP_IRQ_LEVEL6, SIGINT }, { 0, 0} }; /* * convert the MN10300 exception code into a UNIX signal number */ static int computeSignal(enum exception_code excep) { const struct excep_to_sig_map *map; for (map = excep_to_sig_map; map->signo; map++) if (map->excep == excep) return map->signo; return SIGHUP; /* default for things we don't know about */ } static u32 gdbstub_fpcr, gdbstub_fpufs_array[32]; /* * */ static void gdbstub_store_fpu(void) { #ifdef CONFIG_FPU asm volatile( "or %2,epsw\n" #ifdef CONFIG_MN10300_PROC_MN103E010 "nop\n" "nop\n" #endif "mov %1, a1\n" "fmov fs0, (a1+)\n" "fmov fs1, (a1+)\n" "fmov fs2, (a1+)\n" "fmov fs3, (a1+)\n" "fmov fs4, (a1+)\n" "fmov fs5, (a1+)\n" "fmov fs6, (a1+)\n" "fmov fs7, (a1+)\n" "fmov fs8, (a1+)\n" "fmov fs9, (a1+)\n" "fmov fs10, (a1+)\n" "fmov fs11, (a1+)\n" "fmov fs12, (a1+)\n" "fmov fs13, (a1+)\n" "fmov fs14, (a1+)\n" "fmov fs15, (a1+)\n" "fmov fs16, (a1+)\n" "fmov fs17, (a1+)\n" "fmov fs18, (a1+)\n" "fmov fs19, (a1+)\n" "fmov fs20, (a1+)\n" "fmov fs21, (a1+)\n" "fmov fs22, (a1+)\n" "fmov fs23, (a1+)\n" "fmov fs24, (a1+)\n" "fmov fs25, (a1+)\n" "fmov fs26, (a1+)\n" "fmov fs27, (a1+)\n" "fmov fs28, (a1+)\n" "fmov fs29, (a1+)\n" "fmov fs30, (a1+)\n" "fmov fs31, (a1+)\n" "fmov fpcr, %0\n" : "=d"(gdbstub_fpcr) : "g" (&gdbstub_fpufs_array), "i"(EPSW_FE) : "a1" ); #endif } /* * */ static void gdbstub_load_fpu(void) { #ifdef CONFIG_FPU asm volatile( "or %1,epsw\n" #ifdef CONFIG_MN10300_PROC_MN103E010 "nop\n" "nop\n" #endif "mov %0, a1\n" "fmov (a1+), fs0\n" "fmov (a1+), fs1\n" "fmov (a1+), fs2\n" "fmov (a1+), fs3\n" "fmov (a1+), fs4\n" "fmov (a1+), fs5\n" "fmov (a1+), fs6\n" "fmov (a1+), fs7\n" "fmov (a1+), fs8\n" "fmov (a1+), fs9\n" "fmov (a1+), fs10\n" "fmov (a1+), fs11\n" "fmov (a1+), fs12\n" "fmov (a1+), fs13\n" "fmov (a1+), fs14\n" "fmov (a1+), fs15\n" "fmov (a1+), fs16\n" "fmov (a1+), fs17\n" "fmov (a1+), fs18\n" "fmov (a1+), fs19\n" "fmov (a1+), fs20\n" "fmov (a1+), fs21\n" "fmov (a1+), fs22\n" "fmov (a1+), fs23\n" "fmov (a1+), fs24\n" "fmov (a1+), fs25\n" "fmov (a1+), fs26\n" "fmov (a1+), fs27\n" "fmov (a1+), fs28\n" "fmov (a1+), fs29\n" "fmov (a1+), fs30\n" "fmov (a1+), fs31\n" "fmov %2, fpcr\n" : : "g" (&gdbstub_fpufs_array), "i"(EPSW_FE), "d"(gdbstub_fpcr) : "a1" ); #endif } /* * set a software breakpoint */ int gdbstub_set_breakpoint(u8 *addr, int len) { int bkpt, loop, xloop; #ifdef GDBSTUB_USE_F7F7_AS_BREAKPOINT len = (len + 1) & ~1; #endif gdbstub_bkpt("setbkpt(%p,%d)\n", addr, len); for (bkpt = 255; bkpt >= 0; bkpt--) if (!gdbstub_bkpts[bkpt].addr) break; if (bkpt < 0) return -ENOSPC; for (loop = 0; loop < len; loop++) if (gdbstub_read_byte(&addr[loop], &gdbstub_bkpts[bkpt].origbytes[loop] ) < 0) return -EFAULT; gdbstub_flush_caches = 1; #ifdef GDBSTUB_USE_F7F7_AS_BREAKPOINT for (loop = 0; loop < len; loop++) if (gdbstub_write_byte(0xF7, &addr[loop]) < 0) goto restore; #else for (loop = 0; loop < len; loop++) if (gdbstub_write_byte(0xFF, &addr[loop]) < 0) goto restore; #endif gdbstub_bkpts[bkpt].addr = addr; gdbstub_bkpts[bkpt].len = len; gdbstub_bkpt("Set BKPT[%02x]: %p-%p {%02x%02x%02x%02x%02x%02x%02x}\n", bkpt, gdbstub_bkpts[bkpt].addr, gdbstub_bkpts[bkpt].addr + gdbstub_bkpts[bkpt].len - 1, gdbstub_bkpts[bkpt].origbytes[0], gdbstub_bkpts[bkpt].origbytes[1], gdbstub_bkpts[bkpt].origbytes[2], gdbstub_bkpts[bkpt].origbytes[3], gdbstub_bkpts[bkpt].origbytes[4], gdbstub_bkpts[bkpt].origbytes[5], gdbstub_bkpts[bkpt].origbytes[6] ); return 0; restore: for (xloop = 0; xloop < loop; xloop++) gdbstub_write_byte(gdbstub_bkpts[bkpt].origbytes[xloop], addr + xloop); return -EFAULT; } /* * clear a software breakpoint */ int gdbstub_clear_breakpoint(u8 *addr, int len) { int bkpt, loop; #ifdef GDBSTUB_USE_F7F7_AS_BREAKPOINT len = (len + 1) & ~1; #endif gdbstub_bkpt("clearbkpt(%p,%d)\n", addr, len); for (bkpt = 255; bkpt >= 0; bkpt--) if (gdbstub_bkpts[bkpt].addr == addr && gdbstub_bkpts[bkpt].len == len) break; if (bkpt < 0) return -ENOENT; gdbstub_bkpts[bkpt].addr = NULL; gdbstub_flush_caches = 1; for (loop = 0; loop < len; loop++) if (gdbstub_write_byte(gdbstub_bkpts[bkpt].origbytes[loop], addr + loop) < 0) return -EFAULT; return 0; } /* * This function does all command processing for interfacing to gdb * - returns 0 if the exception should be skipped, -ERROR otherwise. */ static int gdbstub(struct pt_regs *regs, enum exception_code excep) { unsigned long *stack; unsigned long epsw, mdr; uint32_t zero, ssp; uint8_t broke; char *ptr; int sigval; int addr; int length; int loop; if (excep == EXCEP_FPU_DISABLED) return -ENOTSUPP; gdbstub_flush_caches = 0; mn10300_set_gdbleds(1); asm volatile("mov mdr,%0" : "=d"(mdr)); local_save_flags(epsw); arch_local_change_intr_mask_level( NUM2EPSW_IM(CONFIG_DEBUGGER_IRQ_LEVEL + 1)); gdbstub_store_fpu(); #ifdef CONFIG_GDBSTUB_IMMEDIATE /* skip the initial pause loop */ if (regs->pc == (unsigned long) __gdbstub_pause) regs->pc = (unsigned long) start_kernel; #endif /* if we were single stepping, restore the opcodes hoisted for the * breakpoint[s] */ broke = 0; #ifdef CONFIG_GDBSTUB_ALLOW_SINGLE_STEP if ((step_bp[0].addr && step_bp[0].addr == (u8 *) regs->pc) || (step_bp[1].addr && step_bp[1].addr == (u8 *) regs->pc)) broke = 1; __gdbstub_restore_bp(); #endif if (gdbstub_rx_unget) { sigval = SIGINT; if (gdbstub_rx_unget != 3) goto packet_waiting; gdbstub_rx_unget = 0; } stack = (unsigned long *) regs->sp; sigval = broke ? SIGTRAP : computeSignal(excep); /* send information about a BUG() */ if (!user_mode(regs) && excep == EXCEP_SYSCALL15) { const struct bug_entry *bug; bug = find_bug(regs->pc); if (bug) goto found_bug; length = snprintf(trans_buffer, sizeof(trans_buffer), "BUG() at address %lx\n", regs->pc); goto send_bug_pkt; found_bug: length = snprintf(trans_buffer, sizeof(trans_buffer), "BUG() at address %lx (%s:%d)\n", regs->pc, bug->file, bug->line); send_bug_pkt: ptr = output_buffer; *ptr++ = 'O'; ptr = mem2hex(trans_buffer, ptr, length, 0); *ptr = 0; putpacket(output_buffer); regs->pc -= 2; sigval = SIGABRT; } else if (regs->pc == (unsigned long) __gdbstub_bug_trap) { regs->pc = regs->mdr; sigval = SIGABRT; } /* * send a message to the debugger's user saying what happened if it may * not be clear cut (we can't map exceptions onto signals properly) */ if (sigval != SIGINT && sigval != SIGTRAP && sigval != SIGILL) { static const char title[] = "Excep ", tbcberr[] = "BCBERR "; static const char crlf[] = "\r\n"; char hx; u32 bcberr = BCBERR; ptr = output_buffer; *ptr++ = 'O'; ptr = mem2hex(title, ptr, sizeof(title) - 1, 0); hx = hex_asc_hi(excep >> 8); ptr = pack_hex_byte(ptr, hx); hx = hex_asc_lo(excep >> 8); ptr = pack_hex_byte(ptr, hx); hx = hex_asc_hi(excep); ptr = pack_hex_byte(ptr, hx); hx = hex_asc_lo(excep); ptr = pack_hex_byte(ptr, hx); ptr = mem2hex(crlf, ptr, sizeof(crlf) - 1, 0); *ptr = 0; putpacket(output_buffer); /* send it off... */ /* BCBERR */ ptr = output_buffer; *ptr++ = 'O'; ptr = mem2hex(tbcberr, ptr, sizeof(tbcberr) - 1, 0); hx = hex_asc_hi(bcberr >> 24); ptr = pack_hex_byte(ptr, hx); hx = hex_asc_lo(bcberr >> 24); ptr = pack_hex_byte(ptr, hx); hx = hex_asc_hi(bcberr >> 16); ptr = pack_hex_byte(ptr, hx); hx = hex_asc_lo(bcberr >> 16); ptr = pack_hex_byte(ptr, hx); hx = hex_asc_hi(bcberr >> 8); ptr = pack_hex_byte(ptr, hx); hx = hex_asc_lo(bcberr >> 8); ptr = pack_hex_byte(ptr, hx); hx = hex_asc_hi(bcberr); ptr = pack_hex_byte(ptr, hx); hx = hex_asc_lo(bcberr); ptr = pack_hex_byte(ptr, hx); ptr = mem2hex(crlf, ptr, sizeof(crlf) - 1, 0); *ptr = 0; putpacket(output_buffer); /* send it off... */ } /* * tell the debugger that an exception has occurred */ ptr = output_buffer; /* * Send trap type (converted to signal) */ *ptr++ = 'T'; ptr = pack_hex_byte(ptr, sigval); /* * Send Error PC */ ptr = pack_hex_byte(ptr, GDB_REGID_PC); *ptr++ = ':'; ptr = mem2hex(&regs->pc, ptr, 4, 0); *ptr++ = ';'; /* * Send frame pointer */ ptr = pack_hex_byte(ptr, GDB_REGID_FP); *ptr++ = ':'; ptr = mem2hex(&regs->a3, ptr, 4, 0); *ptr++ = ';'; /* * Send stack pointer */ ssp = (unsigned long) (regs + 1); ptr = pack_hex_byte(ptr, GDB_REGID_SP); *ptr++ = ':'; ptr = mem2hex(&ssp, ptr, 4, 0); *ptr++ = ';'; *ptr++ = 0; putpacket(output_buffer); /* send it off... */ packet_waiting: /* * Wait for input from remote GDB */ while (1) { output_buffer[0] = 0; getpacket(input_buffer); switch (input_buffer[0]) { /* request repeat of last signal number */ case '?': output_buffer[0] = 'S'; output_buffer[1] = hex_asc_hi(sigval); output_buffer[2] = hex_asc_lo(sigval); output_buffer[3] = 0; break; case 'd': /* toggle debug flag */ break; /* * Return the value of the CPU registers */ case 'g': zero = 0; ssp = (u32) (regs + 1); ptr = output_buffer; ptr = mem2hex(&regs->d0, ptr, 4, 0); ptr = mem2hex(&regs->d1, ptr, 4, 0); ptr = mem2hex(&regs->d2, ptr, 4, 0); ptr = mem2hex(&regs->d3, ptr, 4, 0); ptr = mem2hex(&regs->a0, ptr, 4, 0); ptr = mem2hex(&regs->a1, ptr, 4, 0); ptr = mem2hex(&regs->a2, ptr, 4, 0); ptr = mem2hex(&regs->a3, ptr, 4, 0); ptr = mem2hex(&ssp, ptr, 4, 0); /* 8 */ ptr = mem2hex(&regs->pc, ptr, 4, 0); ptr = mem2hex(&regs->mdr, ptr, 4, 0); ptr = mem2hex(&regs->epsw, ptr, 4, 0); ptr = mem2hex(&regs->lir, ptr, 4, 0); ptr = mem2hex(&regs->lar, ptr, 4, 0); ptr = mem2hex(&regs->mdrq, ptr, 4, 0); ptr = mem2hex(&regs->e0, ptr, 4, 0); /* 15 */ ptr = mem2hex(&regs->e1, ptr, 4, 0); ptr = mem2hex(&regs->e2, ptr, 4, 0); ptr = mem2hex(&regs->e3, ptr, 4, 0); ptr = mem2hex(&regs->e4, ptr, 4, 0); ptr = mem2hex(&regs->e5, ptr, 4, 0); ptr = mem2hex(&regs->e6, ptr, 4, 0); ptr = mem2hex(&regs->e7, ptr, 4, 0); ptr = mem2hex(&ssp, ptr, 4, 0); ptr = mem2hex(&regs, ptr, 4, 0); ptr = mem2hex(&regs->sp, ptr, 4, 0); ptr = mem2hex(&regs->mcrh, ptr, 4, 0); /* 26 */ ptr = mem2hex(&regs->mcrl, ptr, 4, 0); ptr = mem2hex(&regs->mcvf, ptr, 4, 0); ptr = mem2hex(&gdbstub_fpcr, ptr, 4, 0); /* 29 - FPCR */ ptr = mem2hex(&zero, ptr, 4, 0); ptr = mem2hex(&zero, ptr, 4, 0); for (loop = 0; loop < 32; loop++) ptr = mem2hex(&gdbstub_fpufs_array[loop], ptr, 4, 0); /* 32 - FS0-31 */ break; /* * set the value of the CPU registers - return OK */ case 'G': { const char *ptr; ptr = &input_buffer[1]; ptr = hex2mem(ptr, &regs->d0, 4, 0); ptr = hex2mem(ptr, &regs->d1, 4, 0); ptr = hex2mem(ptr, &regs->d2, 4, 0); ptr = hex2mem(ptr, &regs->d3, 4, 0); ptr = hex2mem(ptr, &regs->a0, 4, 0); ptr = hex2mem(ptr, &regs->a1, 4, 0); ptr = hex2mem(ptr, &regs->a2, 4, 0); ptr = hex2mem(ptr, &regs->a3, 4, 0); ptr = hex2mem(ptr, &ssp, 4, 0); /* 8 */ ptr = hex2mem(ptr, &regs->pc, 4, 0); ptr = hex2mem(ptr, &regs->mdr, 4, 0); ptr = hex2mem(ptr, &regs->epsw, 4, 0); ptr = hex2mem(ptr, &regs->lir, 4, 0); ptr = hex2mem(ptr, &regs->lar, 4, 0); ptr = hex2mem(ptr, &regs->mdrq, 4, 0); ptr = hex2mem(ptr, &regs->e0, 4, 0); /* 15 */ ptr = hex2mem(ptr, &regs->e1, 4, 0); ptr = hex2mem(ptr, &regs->e2, 4, 0); ptr = hex2mem(ptr, &regs->e3, 4, 0); ptr = hex2mem(ptr, &regs->e4, 4, 0); ptr = hex2mem(ptr, &regs->e5, 4, 0); ptr = hex2mem(ptr, &regs->e6, 4, 0); ptr = hex2mem(ptr, &regs->e7, 4, 0); ptr = hex2mem(ptr, &ssp, 4, 0); ptr = hex2mem(ptr, &zero, 4, 0); ptr = hex2mem(ptr, &regs->sp, 4, 0); ptr = hex2mem(ptr, &regs->mcrh, 4, 0); /* 26 */ ptr = hex2mem(ptr, &regs->mcrl, 4, 0); ptr = hex2mem(ptr, &regs->mcvf, 4, 0); ptr = hex2mem(ptr, &zero, 4, 0); /* 29 - FPCR */ ptr = hex2mem(ptr, &zero, 4, 0); ptr = hex2mem(ptr, &zero, 4, 0); for (loop = 0; loop < 32; loop++) /* 32 - FS0-31 */ ptr = hex2mem(ptr, &zero, 4, 0); #if 0 /* * See if the stack pointer has moved. If so, then copy * the saved locals and ins to the new location. */ unsigned long *newsp = (unsigned long *) registers[SP]; if (sp != newsp) sp = memcpy(newsp, sp, 16 * 4); #endif gdbstub_strcpy(output_buffer, "OK"); } break; /* * mAA..AA,LLLL Read LLLL bytes at address AA..AA */ case 'm': ptr = &input_buffer[1]; if (hexToInt(&ptr, &addr) && *ptr++ == ',' && hexToInt(&ptr, &length) ) { if (mem2hex((char *) addr, output_buffer, length, 1)) break; gdbstub_strcpy(output_buffer, "E03"); } else { gdbstub_strcpy(output_buffer, "E01"); } break; /* * MAA..AA,LLLL: Write LLLL bytes at address AA.AA * return OK */ case 'M': ptr = &input_buffer[1]; if (hexToInt(&ptr, &addr) && *ptr++ == ',' && hexToInt(&ptr, &length) && *ptr++ == ':' ) { if (hex2mem(ptr, (char *) addr, length, 1)) gdbstub_strcpy(output_buffer, "OK"); else gdbstub_strcpy(output_buffer, "E03"); gdbstub_flush_caches = 1; } else { gdbstub_strcpy(output_buffer, "E02"); } break; /* * cAA..AA Continue at address AA..AA(optional) */ case 'c': /* try to read optional parameter, pc unchanged if no * parm */ ptr = &input_buffer[1]; if (hexToInt(&ptr, &addr)) regs->pc = addr; goto done; /* * kill the program */ case 'k' : goto done; /* just continue */ /* * Reset the whole machine (FIXME: system dependent) */ case 'r': break; /* * Step to next instruction */ case 's': /* Using the T flag doesn't seem to perform single * stepping (it seems to wind up being caught by the * JTAG unit), so we have to use breakpoints and * continue instead. */ #ifdef CONFIG_GDBSTUB_ALLOW_SINGLE_STEP if (gdbstub_single_step(regs) < 0) /* ignore any fault error for now */ gdbstub_printk("unable to set single-step" " bp\n"); goto done; #else gdbstub_strcpy(output_buffer, "E01"); break; #endif /* * Set baud rate (bBB) */ case 'b': do { int baudrate; ptr = &input_buffer[1]; if (!hexToInt(&ptr, &baudrate)) { gdbstub_strcpy(output_buffer, "B01"); break; } if (baudrate) { /* ACK before changing speed */ putpacket("OK"); gdbstub_io_set_baud(baudrate); } } while (0); break; /* * Set breakpoint */ case 'Z': ptr = &input_buffer[1]; if (!hexToInt(&ptr, &loop) || *ptr++ != ',' || !hexToInt(&ptr, &addr) || *ptr++ != ',' || !hexToInt(&ptr, &length) ) { gdbstub_strcpy(output_buffer, "E01"); break; } /* only support software breakpoints */ gdbstub_strcpy(output_buffer, "E03"); if (loop != 0 || length < 1 || length > 7 || (unsigned long) addr < 4096) break; if (gdbstub_set_breakpoint((u8 *) addr, length) < 0) break; gdbstub_strcpy(output_buffer, "OK"); break; /* * Clear breakpoint */ case 'z': ptr = &input_buffer[1]; if (!hexToInt(&ptr, &loop) || *ptr++ != ',' || !hexToInt(&ptr, &addr) || *ptr++ != ',' || !hexToInt(&ptr, &length) ) { gdbstub_strcpy(output_buffer, "E01"); break; } /* only support software breakpoints */ gdbstub_strcpy(output_buffer, "E03"); if (loop != 0 || length < 1 || length > 7 || (unsigned long) addr < 4096) break; if (gdbstub_clear_breakpoint((u8 *) addr, length) < 0) break; gdbstub_strcpy(output_buffer, "OK"); break; default: gdbstub_proto("### GDB Unsupported Cmd '%s'\n", input_buffer); break; } /* reply to the request */ putpacket(output_buffer); } done: /* * Need to flush the instruction cache here, as we may * have deposited a breakpoint, and the icache probably * has no way of knowing that a data ref to some location * may have changed something that is in the instruction * cache. * NB: We flush both caches, just to be sure... */ if (gdbstub_flush_caches) debugger_local_cache_flushinv(); gdbstub_load_fpu(); mn10300_set_gdbleds(0); if (excep == EXCEP_NMI) NMICR = NMICR_NMIF; touch_softlockup_watchdog(); local_irq_restore(epsw); return 0; } /* * Determine if we hit a debugger special breakpoint that needs skipping over * automatically. */ int at_debugger_breakpoint(struct pt_regs *regs) { return 0; } /* * handle event interception */ asmlinkage int debugger_intercept(enum exception_code excep, int signo, int si_code, struct pt_regs *regs) { static u8 notfirst = 1; int ret; if (gdbstub_busy) gdbstub_printk("--> gdbstub reentered itself\n"); gdbstub_busy = 1; if (notfirst) { unsigned long mdr; asm("mov mdr,%0" : "=d"(mdr)); gdbstub_entry( "--> debugger_intercept(%p,%04x) [MDR=%lx PC=%lx]\n", regs, excep, mdr, regs->pc); gdbstub_entry( "PC: %08lx EPSW: %08lx SSP: %08lx mode: %s\n", regs->pc, regs->epsw, (unsigned long) &ret, user_mode(regs) ? "User" : "Super"); gdbstub_entry( "d0: %08lx d1: %08lx d2: %08lx d3: %08lx\n", regs->d0, regs->d1, regs->d2, regs->d3); gdbstub_entry( "a0: %08lx a1: %08lx a2: %08lx a3: %08lx\n", regs->a0, regs->a1, regs->a2, regs->a3); gdbstub_entry( "e0: %08lx e1: %08lx e2: %08lx e3: %08lx\n", regs->e0, regs->e1, regs->e2, regs->e3); gdbstub_entry( "e4: %08lx e5: %08lx e6: %08lx e7: %08lx\n", regs->e4, regs->e5, regs->e6, regs->e7); gdbstub_entry( "lar: %08lx lir: %08lx mdr: %08lx usp: %08lx\n", regs->lar, regs->lir, regs->mdr, regs->sp); gdbstub_entry( "cvf: %08lx crl: %08lx crh: %08lx drq: %08lx\n", regs->mcvf, regs->mcrl, regs->mcrh, regs->mdrq); gdbstub_entry( "threadinfo=%p task=%p)\n", current_thread_info(), current); } else { notfirst = 1; } ret = gdbstub(regs, excep); gdbstub_entry("<-- debugger_intercept()\n"); gdbstub_busy = 0; return ret; } /* * handle the GDB stub itself causing an exception */ asmlinkage void gdbstub_exception(struct pt_regs *regs, enum exception_code excep) { unsigned long mdr; asm("mov mdr,%0" : "=d"(mdr)); gdbstub_entry("--> gdbstub exception({%p},%04x) [MDR=%lx]\n", regs, excep, mdr); while ((unsigned long) regs == 0xffffffff) {} /* handle guarded memory accesses where we know it might fault */ if (regs->pc == (unsigned) gdbstub_read_byte_guard) { regs->pc = (unsigned) gdbstub_read_byte_cont; goto fault; } if (regs->pc == (unsigned) gdbstub_read_word_guard) { regs->pc = (unsigned) gdbstub_read_word_cont; goto fault; } if (regs->pc == (unsigned) gdbstub_read_dword_guard) { regs->pc = (unsigned) gdbstub_read_dword_cont; goto fault; } if (regs->pc == (unsigned) gdbstub_write_byte_guard) { regs->pc = (unsigned) gdbstub_write_byte_cont; goto fault; } if (regs->pc == (unsigned) gdbstub_write_word_guard) { regs->pc = (unsigned) gdbstub_write_word_cont; goto fault; } if (regs->pc == (unsigned) gdbstub_write_dword_guard) { regs->pc = (unsigned) gdbstub_write_dword_cont; goto fault; } gdbstub_printk("\n### GDB stub caused an exception ###\n"); /* something went horribly wrong */ console_verbose(); show_registers(regs); panic("GDB Stub caused an unexpected exception - can't continue\n"); /* we caught an attempt by the stub to access silly memory */ fault: gdbstub_entry("<-- gdbstub exception() = EFAULT\n"); regs->d0 = -EFAULT; return; } /* * send an exit message to GDB */ void gdbstub_exit(int status) { unsigned char checksum; unsigned char ch; int count; gdbstub_busy = 1; output_buffer[0] = 'W'; output_buffer[1] = hex_asc_hi(status); output_buffer[2] = hex_asc_lo(status); output_buffer[3] = 0; gdbstub_io_tx_char('$'); checksum = 0; count = 0; while ((ch = output_buffer[count]) != 0) { gdbstub_io_tx_char(ch); checksum += ch; count += 1; } gdbstub_io_tx_char('#'); gdbstub_io_tx_char(hex_asc_hi(checksum)); gdbstub_io_tx_char(hex_asc_lo(checksum)); /* make sure the output is flushed, or else RedBoot might clobber it */ gdbstub_io_tx_flush(); gdbstub_busy = 0; } /* * initialise the GDB stub */ asmlinkage void __init gdbstub_init(void) { #ifdef CONFIG_GDBSTUB_IMMEDIATE unsigned char ch; int ret; #endif gdbstub_busy = 1; printk(KERN_INFO "%s", gdbstub_banner); gdbstub_io_init(); gdbstub_entry("--> gdbstub_init\n"); /* try to talk to GDB (or anyone insane enough to want to type GDB * protocol by hand) */ gdbstub_io("### GDB Tx ACK\n"); gdbstub_io_tx_char('+'); /* 'hello world' */ #ifdef CONFIG_GDBSTUB_IMMEDIATE gdbstub_printk("GDB Stub waiting for packet\n"); /* in case GDB is started before us, ACK any packets that are already * sitting there (presumably "$?#xx") */ do { gdbstub_io_rx_char(&ch, 0); } while (ch != '$'); do { gdbstub_io_rx_char(&ch, 0); } while (ch != '#'); /* eat first csum byte */ do { ret = gdbstub_io_rx_char(&ch, 0); } while (ret != 0); /* eat second csum byte */ do { ret = gdbstub_io_rx_char(&ch, 0); } while (ret != 0); gdbstub_io("### GDB Tx NAK\n"); gdbstub_io_tx_char('-'); /* NAK it */ #else printk("GDB Stub ready\n"); #endif gdbstub_busy = 0; gdbstub_entry("<-- gdbstub_init\n"); } /* * register the console at a more appropriate time */ #ifdef CONFIG_GDBSTUB_CONSOLE static int __init gdbstub_postinit(void) { printk(KERN_NOTICE "registering console\n"); register_console(&gdbstub_console); return 0; } __initcall(gdbstub_postinit); #endif /* * handle character reception on GDB serial port * - jump into the GDB stub if BREAK is detected on the serial line */ asmlinkage void gdbstub_rx_irq(struct pt_regs *regs, enum exception_code excep) { char ch; int ret; gdbstub_entry("--> gdbstub_rx_irq\n"); do { ret = gdbstub_io_rx_char(&ch, 1); if (ret != -EIO && ret != -EAGAIN) { if (ret != -EINTR) gdbstub_rx_unget = ch; gdbstub(regs, excep); } } while (ret != -EAGAIN); gdbstub_entry("<-- gdbstub_rx_irq\n"); }
gpl-2.0
gingerboy92/android_kernel_motorola_msm8916
drivers/staging/vme/devices/vme_pio2_core.c
2730
11694
/* * GE PIO2 6U VME I/O Driver * * Author: Martyn Welch <martyn.welch@ge.com> * Copyright 2009 GE Intelligent Platforms Embedded Systems, Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/device.h> #include <linux/ctype.h> #include <linux/gpio.h> #include <linux/slab.h> #include <linux/vme.h> #include "vme_pio2.h" static const char driver_name[] = "pio2"; static int bus[PIO2_CARDS_MAX]; static int bus_num; static long base[PIO2_CARDS_MAX]; static int base_num; static int vector[PIO2_CARDS_MAX]; static int vector_num; static int level[PIO2_CARDS_MAX]; static int level_num; static char *variant[PIO2_CARDS_MAX]; static int variant_num; static bool loopback; static int pio2_match(struct vme_dev *); static int pio2_probe(struct vme_dev *); static int pio2_remove(struct vme_dev *); static int pio2_get_led(struct pio2_card *card) { /* Can't read hardware, state saved in structure */ return card->led; } static int pio2_set_led(struct pio2_card *card, int state) { u8 reg; int retval; reg = card->irq_level; /* Register state inverse of led state */ if (!state) reg |= PIO2_LED; if (loopback) reg |= PIO2_LOOP; retval = vme_master_write(card->window, &reg, 1, PIO2_REGS_CTRL); if (retval < 0) return retval; card->led = state ? 1 : 0; return 0; } static void pio2_int(int level, int vector, void *ptr) { int vec, i, channel, retval; u8 reg; struct pio2_card *card = ptr; vec = vector & ~PIO2_VME_VECTOR_MASK; switch (vec) { case 0: dev_warn(&card->vdev->dev, "Spurious Interrupt\n"); break; case 1: case 2: case 3: case 4: /* Channels 0 to 7 */ retval = vme_master_read(card->window, &reg, 1, PIO2_REGS_INT_STAT[vec - 1]); if (retval < 0) { dev_err(&card->vdev->dev, "Unable to read IRQ status register\n"); return; } for (i = 0; i < 8; i++) { channel = ((vec - 1) * 8) + i; if (reg & PIO2_CHANNEL_BIT[channel]) dev_info(&card->vdev->dev, "Interrupt on I/O channel %d\n", channel); } break; case 5: case 6: case 7: case 8: case 9: case 10: /* Counters are dealt with by their own handler */ dev_err(&card->vdev->dev, "Counter interrupt\n"); break; } } /* * We return whether this has been successful - this is used in the probe to * ensure we have a valid card. */ static int pio2_reset_card(struct pio2_card *card) { int retval = 0; u8 data = 0; /* Clear main register*/ retval = vme_master_write(card->window, &data, 1, PIO2_REGS_CTRL); if (retval < 0) return retval; /* Clear VME vector */ retval = vme_master_write(card->window, &data, 1, PIO2_REGS_VME_VECTOR); if (retval < 0) return retval; /* Reset GPIO */ retval = pio2_gpio_reset(card); if (retval < 0) return retval; /* Reset counters */ retval = pio2_cntr_reset(card); if (retval < 0) return retval; return 0; } static struct vme_driver pio2_driver = { .name = driver_name, .match = pio2_match, .probe = pio2_probe, .remove = pio2_remove, }; static int __init pio2_init(void) { if (bus_num == 0) { pr_err("No cards, skipping registration\n"); return -ENODEV; } if (bus_num > PIO2_CARDS_MAX) { pr_err("Driver only able to handle %d PIO2 Cards\n", PIO2_CARDS_MAX); bus_num = PIO2_CARDS_MAX; } /* Register the PIO2 driver */ return vme_register_driver(&pio2_driver, bus_num); } static int pio2_match(struct vme_dev *vdev) { if (vdev->num >= bus_num) { dev_err(&vdev->dev, "The enumeration of the VMEbus to which the board is connected must be specified"); return 0; } if (vdev->num >= base_num) { dev_err(&vdev->dev, "The VME address for the cards registers must be specified"); return 0; } if (vdev->num >= vector_num) { dev_err(&vdev->dev, "The IRQ vector used by the card must be specified"); return 0; } if (vdev->num >= level_num) { dev_err(&vdev->dev, "The IRQ level used by the card must be specified"); return 0; } if (vdev->num >= variant_num) { dev_err(&vdev->dev, "The variant of the card must be specified"); return 0; } return 1; } static int pio2_probe(struct vme_dev *vdev) { struct pio2_card *card; int retval; int i; u8 reg; int vec; card = kzalloc(sizeof(struct pio2_card), GFP_KERNEL); if (card == NULL) { retval = -ENOMEM; goto err_struct; } card->id = vdev->num; card->bus = bus[card->id]; card->base = base[card->id]; card->irq_vector = vector[card->id]; card->irq_level = level[card->id] & PIO2_VME_INT_MASK; strncpy(card->variant, variant[card->id], PIO2_VARIANT_LENGTH); card->vdev = vdev; for (i = 0; i < PIO2_VARIANT_LENGTH; i++) { if (isdigit(card->variant[i]) == 0) { dev_err(&card->vdev->dev, "Variant invalid\n"); retval = -EINVAL; goto err_variant; } } /* * Bottom 4 bits of VME interrupt vector used to determine source, * provided vector should only use upper 4 bits. */ if (card->irq_vector & ~PIO2_VME_VECTOR_MASK) { dev_err(&card->vdev->dev, "Invalid VME IRQ Vector, vector must not use lower 4 bits\n"); retval = -EINVAL; goto err_vector; } /* * There is no way to determine the build variant or whether each bank * is input, output or both at run time. The inputs are also inverted * if configured as both. * * We pass in the board variant and use that to determine the * configuration of the banks. */ for (i = 1; i < PIO2_VARIANT_LENGTH; i++) { switch (card->variant[i]) { case '0': card->bank[i-1].config = NOFIT; break; case '1': case '2': case '3': case '4': card->bank[i-1].config = INPUT; break; case '5': card->bank[i-1].config = OUTPUT; break; case '6': case '7': case '8': case '9': card->bank[i-1].config = BOTH; break; } } /* Get a master window and position over regs */ card->window = vme_master_request(vdev, VME_A24, VME_SCT, VME_D16); if (card->window == NULL) { dev_err(&card->vdev->dev, "Unable to assign VME master resource\n"); retval = -EIO; goto err_window; } retval = vme_master_set(card->window, 1, card->base, 0x10000, VME_A24, (VME_SCT | VME_USER | VME_DATA), VME_D16); if (retval) { dev_err(&card->vdev->dev, "Unable to configure VME master resource\n"); goto err_set; } /* * There is also no obvious register which we can probe to determine * whether the provided base is valid. If we can read the "ID Register" * offset and the reset function doesn't error, assume we have a valid * location. */ retval = vme_master_read(card->window, &reg, 1, PIO2_REGS_ID); if (retval < 0) { dev_err(&card->vdev->dev, "Unable to read from device\n"); goto err_read; } dev_dbg(&card->vdev->dev, "ID Register:%x\n", reg); /* * Ensure all the I/O is cleared. We can't read back the states, so * this is the only method we have to ensure that the I/O is in a known * state. */ retval = pio2_reset_card(card); if (retval) { dev_err(&card->vdev->dev, "Failed to reset card, is location valid?"); retval = -ENODEV; goto err_reset; } /* Configure VME Interrupts */ reg = card->irq_level; if (pio2_get_led(card)) reg |= PIO2_LED; if (loopback) reg |= PIO2_LOOP; retval = vme_master_write(card->window, &reg, 1, PIO2_REGS_CTRL); if (retval < 0) return retval; /* Set VME vector */ retval = vme_master_write(card->window, &card->irq_vector, 1, PIO2_REGS_VME_VECTOR); if (retval < 0) return retval; /* Attach spurious interrupt handler. */ vec = card->irq_vector | PIO2_VME_VECTOR_SPUR; retval = vme_irq_request(vdev, card->irq_level, vec, &pio2_int, (void *)card); if (retval < 0) { dev_err(&card->vdev->dev, "Unable to attach VME interrupt vector0x%x, level 0x%x\n", vec, card->irq_level); goto err_irq; } /* Attach GPIO interrupt handlers. */ for (i = 0; i < 4; i++) { vec = card->irq_vector | PIO2_VECTOR_BANK[i]; retval = vme_irq_request(vdev, card->irq_level, vec, &pio2_int, (void *)card); if (retval < 0) { dev_err(&card->vdev->dev, "Unable to attach VME interrupt vector0x%x, level 0x%x\n", vec, card->irq_level); goto err_gpio_irq; } } /* Attach counter interrupt handlers. */ for (i = 0; i < 6; i++) { vec = card->irq_vector | PIO2_VECTOR_CNTR[i]; retval = vme_irq_request(vdev, card->irq_level, vec, &pio2_int, (void *)card); if (retval < 0) { dev_err(&card->vdev->dev, "Unable to attach VME interrupt vector0x%x, level 0x%x\n", vec, card->irq_level); goto err_cntr_irq; } } /* Register IO */ retval = pio2_gpio_init(card); if (retval < 0) { dev_err(&card->vdev->dev, "Unable to register with GPIO framework\n"); goto err_gpio; } /* Set LED - This also sets interrupt level */ retval = pio2_set_led(card, 0); if (retval < 0) { dev_err(&card->vdev->dev, "Unable to set LED\n"); goto err_led; } dev_set_drvdata(&card->vdev->dev, card); dev_info(&card->vdev->dev, "PIO2 (variant %s) configured at 0x%lx\n", card->variant, card->base); return 0; err_led: pio2_gpio_exit(card); err_gpio: i = 6; err_cntr_irq: while (i > 0) { i--; vec = card->irq_vector | PIO2_VECTOR_CNTR[i]; vme_irq_free(vdev, card->irq_level, vec); } i = 4; err_gpio_irq: while (i > 0) { i--; vec = card->irq_vector | PIO2_VECTOR_BANK[i]; vme_irq_free(vdev, card->irq_level, vec); } vec = (card->irq_vector & PIO2_VME_VECTOR_MASK) | PIO2_VME_VECTOR_SPUR; vme_irq_free(vdev, card->irq_level, vec); err_irq: pio2_reset_card(card); err_reset: err_read: vme_master_set(card->window, 0, 0, 0, VME_A16, 0, VME_D16); err_set: vme_master_free(card->window); err_window: err_vector: err_variant: kfree(card); err_struct: return retval; } static int pio2_remove(struct vme_dev *vdev) { int vec; int i; struct pio2_card *card = dev_get_drvdata(&vdev->dev); pio2_gpio_exit(card); for (i = 0; i < 6; i++) { vec = card->irq_vector | PIO2_VECTOR_CNTR[i]; vme_irq_free(vdev, card->irq_level, vec); } for (i = 0; i < 4; i++) { vec = card->irq_vector | PIO2_VECTOR_BANK[i]; vme_irq_free(vdev, card->irq_level, vec); } vec = (card->irq_vector & PIO2_VME_VECTOR_MASK) | PIO2_VME_VECTOR_SPUR; vme_irq_free(vdev, card->irq_level, vec); pio2_reset_card(card); vme_master_set(card->window, 0, 0, 0, VME_A16, 0, VME_D16); vme_master_free(card->window); kfree(card); return 0; } static void __exit pio2_exit(void) { vme_unregister_driver(&pio2_driver); } /* These are required for each board */ MODULE_PARM_DESC(bus, "Enumeration of VMEbus to which the board is connected"); module_param_array(bus, int, &bus_num, S_IRUGO); MODULE_PARM_DESC(base, "Base VME address for PIO2 Registers"); module_param_array(base, long, &base_num, S_IRUGO); MODULE_PARM_DESC(vector, "VME IRQ Vector (Lower 4 bits masked)"); module_param_array(vector, int, &vector_num, S_IRUGO); MODULE_PARM_DESC(level, "VME IRQ Level"); module_param_array(level, int, &level_num, S_IRUGO); MODULE_PARM_DESC(variant, "Last 4 characters of PIO2 board variant"); module_param_array(variant, charp, &variant_num, S_IRUGO); /* This is for debugging */ MODULE_PARM_DESC(loopback, "Enable loopback mode on all cards"); module_param(loopback, bool, S_IRUGO); MODULE_DESCRIPTION("GE PIO2 6U VME I/O Driver"); MODULE_AUTHOR("Martyn Welch <martyn.welch@ge.com"); MODULE_LICENSE("GPL"); module_init(pio2_init); module_exit(pio2_exit);
gpl-2.0
jdkoreclipse/cloaked-cyril
arch/arm/mach-pxa/am300epd.c
2986
6572
/* * am300epd.c -- Platform device for AM300 EPD kit * * Copyright (C) 2008, Jaya Kumar * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive for * more details. * * This work was made possible by help and equipment support from E-Ink * Corporation. http://support.eink.com/community * * This driver is written to be used with the Broadsheet display controller. * on the AM300 EPD prototype kit/development kit with an E-Ink 800x600 * Vizplex EPD on a Gumstix board using the Broadsheet interface board. * */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/fb.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/irq.h> #include <linux/gpio.h> #include <mach/gumstix.h> #include <mach/mfp-pxa25x.h> #include <mach/pxafb.h> #include "generic.h" #include <video/broadsheetfb.h> static unsigned int panel_type = 6; static struct platform_device *am300_device; static struct broadsheet_board am300_board; static unsigned long am300_pin_config[] __initdata = { GPIO16_GPIO, GPIO17_GPIO, GPIO32_GPIO, GPIO48_GPIO, GPIO49_GPIO, GPIO51_GPIO, GPIO74_GPIO, GPIO75_GPIO, GPIO76_GPIO, GPIO77_GPIO, /* this is the 16-bit hdb bus 58-73 */ GPIO58_GPIO, GPIO59_GPIO, GPIO60_GPIO, GPIO61_GPIO, GPIO62_GPIO, GPIO63_GPIO, GPIO64_GPIO, GPIO65_GPIO, GPIO66_GPIO, GPIO67_GPIO, GPIO68_GPIO, GPIO69_GPIO, GPIO70_GPIO, GPIO71_GPIO, GPIO72_GPIO, GPIO73_GPIO, }; /* register offsets for gpio control */ #define PWR_GPIO_PIN 16 #define CFG_GPIO_PIN 17 #define RDY_GPIO_PIN 32 #define DC_GPIO_PIN 48 #define RST_GPIO_PIN 49 #define LED_GPIO_PIN 51 #define RD_GPIO_PIN 74 #define WR_GPIO_PIN 75 #define CS_GPIO_PIN 76 #define IRQ_GPIO_PIN 77 /* hdb bus */ #define DB0_GPIO_PIN 58 #define DB15_GPIO_PIN 73 static int gpios[] = { PWR_GPIO_PIN, CFG_GPIO_PIN, RDY_GPIO_PIN, DC_GPIO_PIN, RST_GPIO_PIN, RD_GPIO_PIN, WR_GPIO_PIN, CS_GPIO_PIN, IRQ_GPIO_PIN, LED_GPIO_PIN }; static char *gpio_names[] = { "PWR", "CFG", "RDY", "DC", "RST", "RD", "WR", "CS", "IRQ", "LED" }; static int am300_wait_event(struct broadsheetfb_par *par) { /* todo: improve err recovery */ wait_event(par->waitq, gpio_get_value(RDY_GPIO_PIN)); return 0; } static int am300_init_gpio_regs(struct broadsheetfb_par *par) { int i; int err; char dbname[8]; for (i = 0; i < ARRAY_SIZE(gpios); i++) { err = gpio_request(gpios[i], gpio_names[i]); if (err) { dev_err(&am300_device->dev, "failed requesting " "gpio %s, err=%d\n", gpio_names[i], err); goto err_req_gpio; } } /* we also need to take care of the hdb bus */ for (i = DB0_GPIO_PIN; i <= DB15_GPIO_PIN; i++) { sprintf(dbname, "DB%d", i); err = gpio_request(i, dbname); if (err) { dev_err(&am300_device->dev, "failed requesting " "gpio %d, err=%d\n", i, err); goto err_req_gpio2; } } /* setup the outputs and init values */ gpio_direction_output(PWR_GPIO_PIN, 0); gpio_direction_output(CFG_GPIO_PIN, 1); gpio_direction_output(DC_GPIO_PIN, 0); gpio_direction_output(RD_GPIO_PIN, 1); gpio_direction_output(WR_GPIO_PIN, 1); gpio_direction_output(CS_GPIO_PIN, 1); gpio_direction_output(RST_GPIO_PIN, 0); /* setup the inputs */ gpio_direction_input(RDY_GPIO_PIN); gpio_direction_input(IRQ_GPIO_PIN); /* start the hdb bus as an input */ for (i = DB0_GPIO_PIN; i <= DB15_GPIO_PIN; i++) gpio_direction_output(i, 0); /* go into command mode */ gpio_set_value(CFG_GPIO_PIN, 1); gpio_set_value(RST_GPIO_PIN, 0); msleep(10); gpio_set_value(RST_GPIO_PIN, 1); msleep(10); am300_wait_event(par); return 0; err_req_gpio2: while (--i >= DB0_GPIO_PIN) gpio_free(i); i = ARRAY_SIZE(gpios); err_req_gpio: while (--i >= 0) gpio_free(gpios[i]); return err; } static int am300_init_board(struct broadsheetfb_par *par) { return am300_init_gpio_regs(par); } static void am300_cleanup(struct broadsheetfb_par *par) { int i; free_irq(IRQ_GPIO(RDY_GPIO_PIN), par); for (i = 0; i < ARRAY_SIZE(gpios); i++) gpio_free(gpios[i]); for (i = DB0_GPIO_PIN; i <= DB15_GPIO_PIN; i++) gpio_free(i); } static u16 am300_get_hdb(struct broadsheetfb_par *par) { u16 res = 0; int i; for (i = 0; i <= (DB15_GPIO_PIN - DB0_GPIO_PIN) ; i++) res |= (gpio_get_value(DB0_GPIO_PIN + i)) ? (1 << i) : 0; return res; } static void am300_set_hdb(struct broadsheetfb_par *par, u16 data) { int i; for (i = 0; i <= (DB15_GPIO_PIN - DB0_GPIO_PIN) ; i++) gpio_set_value(DB0_GPIO_PIN + i, (data >> i) & 0x01); } static void am300_set_ctl(struct broadsheetfb_par *par, unsigned char bit, u8 state) { switch (bit) { case BS_CS: gpio_set_value(CS_GPIO_PIN, state); break; case BS_DC: gpio_set_value(DC_GPIO_PIN, state); break; case BS_WR: gpio_set_value(WR_GPIO_PIN, state); break; } } static int am300_get_panel_type(void) { return panel_type; } static irqreturn_t am300_handle_irq(int irq, void *dev_id) { struct broadsheetfb_par *par = dev_id; wake_up(&par->waitq); return IRQ_HANDLED; } static int am300_setup_irq(struct fb_info *info) { int ret; struct broadsheetfb_par *par = info->par; ret = request_irq(IRQ_GPIO(RDY_GPIO_PIN), am300_handle_irq, IRQF_DISABLED|IRQF_TRIGGER_RISING, "AM300", par); if (ret) dev_err(&am300_device->dev, "request_irq failed: %d\n", ret); return ret; } static struct broadsheet_board am300_board = { .owner = THIS_MODULE, .init = am300_init_board, .cleanup = am300_cleanup, .set_hdb = am300_set_hdb, .get_hdb = am300_get_hdb, .set_ctl = am300_set_ctl, .wait_for_rdy = am300_wait_event, .get_panel_type = am300_get_panel_type, .setup_irq = am300_setup_irq, }; int __init am300_init(void) { int ret; pxa2xx_mfp_config(ARRAY_AND_SIZE(am300_pin_config)); /* request our platform independent driver */ request_module("broadsheetfb"); am300_device = platform_device_alloc("broadsheetfb", -1); if (!am300_device) return -ENOMEM; /* the am300_board that will be seen by broadsheetfb is a copy */ platform_device_add_data(am300_device, &am300_board, sizeof(am300_board)); ret = platform_device_add(am300_device); if (ret) { platform_device_put(am300_device); return ret; } return 0; } module_param(panel_type, uint, 0); MODULE_PARM_DESC(panel_type, "Select the panel type: 37, 6, 97"); MODULE_DESCRIPTION("board driver for am300 epd kit"); MODULE_AUTHOR("Jaya Kumar"); MODULE_LICENSE("GPL");
gpl-2.0
cozybit/aosp-omap-kernel
arch/arm/plat-s3c24xx/spi-bus0-gpe11_12_13.c
3242
1217
/* linux/arch/arm/plat-s3c24xx/spi-bus0-gpe11_12_13.c * * Copyright (c) 2008 Simtec Electronics * http://armlinux.simtec.co.uk/ * Ben Dooks <ben@simtec.co.uk> * * S3C24XX SPI - gpio configuration for bus 0 on gpe11,12,13 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License. */ #include <linux/kernel.h> #include <linux/gpio.h> #include <mach/spi.h> #include <mach/regs-gpio.h> void s3c24xx_spi_gpiocfg_bus0_gpe11_12_13(struct s3c2410_spi_info *spi, int enable) { if (enable) { s3c_gpio_cfgpin(S3C2410_GPE(13), S3C2410_GPE13_SPICLK0); s3c_gpio_cfgpin(S3C2410_GPE(12), S3C2410_GPE12_SPIMOSI0); s3c_gpio_cfgpin(S3C2410_GPE(11), S3C2410_GPE11_SPIMISO0); s3c2410_gpio_pullup(S3C2410_GPE(11), 0); s3c2410_gpio_pullup(S3C2410_GPE(13), 0); } else { s3c_gpio_cfgpin(S3C2410_GPE(13), S3C2410_GPIO_INPUT); s3c_gpio_cfgpin(S3C2410_GPE(11), S3C2410_GPIO_INPUT); s3c_gpio_setpull(S3C2410_GPE(11), S3C_GPIO_PULL_NONE); s3c_gpio_setpull(S3C2410_GPE(12), S3C_GPIO_PULL_NONE); s3c_gpio_setpull(S3C2410_GPE(13), S3C_GPIO_PULL_NONE); } }
gpl-2.0
MikeC84/mac_kernel_lge_hammerhead
drivers/clocksource/i8253.c
5034
4934
/* * i8253 PIT clocksource */ #include <linux/clockchips.h> #include <linux/init.h> #include <linux/io.h> #include <linux/spinlock.h> #include <linux/timex.h> #include <linux/module.h> #include <linux/i8253.h> #include <linux/smp.h> /* * Protects access to I/O ports * * 0040-0043 : timer0, i8253 / i8254 * 0061-0061 : NMI Control Register which contains two speaker control bits. */ DEFINE_RAW_SPINLOCK(i8253_lock); EXPORT_SYMBOL(i8253_lock); #ifdef CONFIG_CLKSRC_I8253 /* * Since the PIT overflows every tick, its not very useful * to just read by itself. So use jiffies to emulate a free * running counter: */ static cycle_t i8253_read(struct clocksource *cs) { static int old_count; static u32 old_jifs; unsigned long flags; int count; u32 jifs; raw_spin_lock_irqsave(&i8253_lock, flags); /* * Although our caller may have the read side of xtime_lock, * this is now a seqlock, and we are cheating in this routine * by having side effects on state that we cannot undo if * there is a collision on the seqlock and our caller has to * retry. (Namely, old_jifs and old_count.) So we must treat * jiffies as volatile despite the lock. We read jiffies * before latching the timer count to guarantee that although * the jiffies value might be older than the count (that is, * the counter may underflow between the last point where * jiffies was incremented and the point where we latch the * count), it cannot be newer. */ jifs = jiffies; outb_p(0x00, PIT_MODE); /* latch the count ASAP */ count = inb_p(PIT_CH0); /* read the latched count */ count |= inb_p(PIT_CH0) << 8; /* VIA686a test code... reset the latch if count > max + 1 */ if (count > PIT_LATCH) { outb_p(0x34, PIT_MODE); outb_p(PIT_LATCH & 0xff, PIT_CH0); outb_p(PIT_LATCH >> 8, PIT_CH0); count = PIT_LATCH - 1; } /* * It's possible for count to appear to go the wrong way for a * couple of reasons: * * 1. The timer counter underflows, but we haven't handled the * resulting interrupt and incremented jiffies yet. * 2. Hardware problem with the timer, not giving us continuous time, * the counter does small "jumps" upwards on some Pentium systems, * (see c't 95/10 page 335 for Neptun bug.) * * Previous attempts to handle these cases intelligently were * buggy, so we just do the simple thing now. */ if (count > old_count && jifs == old_jifs) count = old_count; old_count = count; old_jifs = jifs; raw_spin_unlock_irqrestore(&i8253_lock, flags); count = (PIT_LATCH - 1) - count; return (cycle_t)(jifs * PIT_LATCH) + count; } static struct clocksource i8253_cs = { .name = "pit", .rating = 110, .read = i8253_read, .mask = CLOCKSOURCE_MASK(32), }; int __init clocksource_i8253_init(void) { return clocksource_register_hz(&i8253_cs, PIT_TICK_RATE); } #endif #ifdef CONFIG_CLKEVT_I8253 /* * Initialize the PIT timer. * * This is also called after resume to bring the PIT into operation again. */ static void init_pit_timer(enum clock_event_mode mode, struct clock_event_device *evt) { raw_spin_lock(&i8253_lock); switch (mode) { case CLOCK_EVT_MODE_PERIODIC: /* binary, mode 2, LSB/MSB, ch 0 */ outb_p(0x34, PIT_MODE); outb_p(PIT_LATCH & 0xff , PIT_CH0); /* LSB */ outb_p(PIT_LATCH >> 8 , PIT_CH0); /* MSB */ break; case CLOCK_EVT_MODE_SHUTDOWN: case CLOCK_EVT_MODE_UNUSED: if (evt->mode == CLOCK_EVT_MODE_PERIODIC || evt->mode == CLOCK_EVT_MODE_ONESHOT) { outb_p(0x30, PIT_MODE); outb_p(0, PIT_CH0); outb_p(0, PIT_CH0); } break; case CLOCK_EVT_MODE_ONESHOT: /* One shot setup */ outb_p(0x38, PIT_MODE); break; case CLOCK_EVT_MODE_RESUME: /* Nothing to do here */ break; } raw_spin_unlock(&i8253_lock); } /* * Program the next event in oneshot mode * * Delta is given in PIT ticks */ static int pit_next_event(unsigned long delta, struct clock_event_device *evt) { raw_spin_lock(&i8253_lock); outb_p(delta & 0xff , PIT_CH0); /* LSB */ outb_p(delta >> 8 , PIT_CH0); /* MSB */ raw_spin_unlock(&i8253_lock); return 0; } /* * On UP the PIT can serve all of the possible timer functions. On SMP systems * it can be solely used for the global tick. */ struct clock_event_device i8253_clockevent = { .name = "pit", .features = CLOCK_EVT_FEAT_PERIODIC, .set_mode = init_pit_timer, .set_next_event = pit_next_event, }; /* * Initialize the conversion factor and the min/max deltas of the clock event * structure and register the clock event source with the framework. */ void __init clockevent_i8253_init(bool oneshot) { if (oneshot) i8253_clockevent.features |= CLOCK_EVT_FEAT_ONESHOT; /* * Start pit with the boot cpu mask. x86 might make it global * when it is used as broadcast device later. */ i8253_clockevent.cpumask = cpumask_of(smp_processor_id()); clockevents_config_and_register(&i8253_clockevent, PIT_TICK_RATE, 0xF, 0x7FFF); } #endif
gpl-2.0
MoKee/android_kernel_asus_grouper
tools/power/cpupower/utils/helpers/sysfs.c
5290
8945
/* * (C) 2004-2009 Dominik Brodowski <linux@dominikbrodowski.de> * (C) 2011 Thomas Renninger <trenn@novell.com> Novell Inc. * * Licensed under the terms of the GNU GPL License version 2. */ #include <stdio.h> #include <errno.h> #include <stdlib.h> #include <string.h> #include <sys/types.h> #include <sys/stat.h> #include <fcntl.h> #include <unistd.h> #include "helpers/sysfs.h" unsigned int sysfs_read_file(const char *path, char *buf, size_t buflen) { int fd; ssize_t numread; fd = open(path, O_RDONLY); if (fd == -1) return 0; numread = read(fd, buf, buflen - 1); if (numread < 1) { close(fd); return 0; } buf[numread] = '\0'; close(fd); return (unsigned int) numread; } static unsigned int sysfs_write_file(const char *path, const char *value, size_t len) { int fd; ssize_t numwrite; fd = open(path, O_WRONLY); if (fd == -1) return 0; numwrite = write(fd, value, len); if (numwrite < 1) { close(fd); return 0; } close(fd); return (unsigned int) numwrite; } /* * Detect whether a CPU is online * * Returns: * 1 -> if CPU is online * 0 -> if CPU is offline * negative errno values in error case */ int sysfs_is_cpu_online(unsigned int cpu) { char path[SYSFS_PATH_MAX]; int fd; ssize_t numread; unsigned long long value; char linebuf[MAX_LINE_LEN]; char *endp; struct stat statbuf; snprintf(path, sizeof(path), PATH_TO_CPU "cpu%u", cpu); if (stat(path, &statbuf) != 0) return 0; /* * kernel without CONFIG_HOTPLUG_CPU * -> cpuX directory exists, but not cpuX/online file */ snprintf(path, sizeof(path), PATH_TO_CPU "cpu%u/online", cpu); if (stat(path, &statbuf) != 0) return 1; fd = open(path, O_RDONLY); if (fd == -1) return -errno; numread = read(fd, linebuf, MAX_LINE_LEN - 1); if (numread < 1) { close(fd); return -EIO; } linebuf[numread] = '\0'; close(fd); value = strtoull(linebuf, &endp, 0); if (value > 1 || value < 0) return -EINVAL; return value; } /* CPUidle idlestate specific /sys/devices/system/cpu/cpuX/cpuidle/ access */ /* * helper function to read file from /sys into given buffer * fname is a relative path under "cpuX/cpuidle/stateX/" dir * cstates starting with 0, C0 is not counted as cstate. * This means if you want C1 info, pass 0 as idlestate param */ unsigned int sysfs_idlestate_read_file(unsigned int cpu, unsigned int idlestate, const char *fname, char *buf, size_t buflen) { char path[SYSFS_PATH_MAX]; int fd; ssize_t numread; snprintf(path, sizeof(path), PATH_TO_CPU "cpu%u/cpuidle/state%u/%s", cpu, idlestate, fname); fd = open(path, O_RDONLY); if (fd == -1) return 0; numread = read(fd, buf, buflen - 1); if (numread < 1) { close(fd); return 0; } buf[numread] = '\0'; close(fd); return (unsigned int) numread; } /* read access to files which contain one numeric value */ enum idlestate_value { IDLESTATE_USAGE, IDLESTATE_POWER, IDLESTATE_LATENCY, IDLESTATE_TIME, MAX_IDLESTATE_VALUE_FILES }; static const char *idlestate_value_files[MAX_IDLESTATE_VALUE_FILES] = { [IDLESTATE_USAGE] = "usage", [IDLESTATE_POWER] = "power", [IDLESTATE_LATENCY] = "latency", [IDLESTATE_TIME] = "time", }; static unsigned long long sysfs_idlestate_get_one_value(unsigned int cpu, unsigned int idlestate, enum idlestate_value which) { unsigned long long value; unsigned int len; char linebuf[MAX_LINE_LEN]; char *endp; if (which >= MAX_IDLESTATE_VALUE_FILES) return 0; len = sysfs_idlestate_read_file(cpu, idlestate, idlestate_value_files[which], linebuf, sizeof(linebuf)); if (len == 0) return 0; value = strtoull(linebuf, &endp, 0); if (endp == linebuf || errno == ERANGE) return 0; return value; } /* read access to files which contain one string */ enum idlestate_string { IDLESTATE_DESC, IDLESTATE_NAME, MAX_IDLESTATE_STRING_FILES }; static const char *idlestate_string_files[MAX_IDLESTATE_STRING_FILES] = { [IDLESTATE_DESC] = "desc", [IDLESTATE_NAME] = "name", }; static char *sysfs_idlestate_get_one_string(unsigned int cpu, unsigned int idlestate, enum idlestate_string which) { char linebuf[MAX_LINE_LEN]; char *result; unsigned int len; if (which >= MAX_IDLESTATE_STRING_FILES) return NULL; len = sysfs_idlestate_read_file(cpu, idlestate, idlestate_string_files[which], linebuf, sizeof(linebuf)); if (len == 0) return NULL; result = strdup(linebuf); if (result == NULL) return NULL; if (result[strlen(result) - 1] == '\n') result[strlen(result) - 1] = '\0'; return result; } unsigned long sysfs_get_idlestate_latency(unsigned int cpu, unsigned int idlestate) { return sysfs_idlestate_get_one_value(cpu, idlestate, IDLESTATE_LATENCY); } unsigned long sysfs_get_idlestate_usage(unsigned int cpu, unsigned int idlestate) { return sysfs_idlestate_get_one_value(cpu, idlestate, IDLESTATE_USAGE); } unsigned long long sysfs_get_idlestate_time(unsigned int cpu, unsigned int idlestate) { return sysfs_idlestate_get_one_value(cpu, idlestate, IDLESTATE_TIME); } char *sysfs_get_idlestate_name(unsigned int cpu, unsigned int idlestate) { return sysfs_idlestate_get_one_string(cpu, idlestate, IDLESTATE_NAME); } char *sysfs_get_idlestate_desc(unsigned int cpu, unsigned int idlestate) { return sysfs_idlestate_get_one_string(cpu, idlestate, IDLESTATE_DESC); } /* * Returns number of supported C-states of CPU core cpu * Negativ in error case * Zero if cpuidle does not export any C-states */ int sysfs_get_idlestate_count(unsigned int cpu) { char file[SYSFS_PATH_MAX]; struct stat statbuf; int idlestates = 1; snprintf(file, SYSFS_PATH_MAX, PATH_TO_CPU "cpuidle"); if (stat(file, &statbuf) != 0 || !S_ISDIR(statbuf.st_mode)) return -ENODEV; snprintf(file, SYSFS_PATH_MAX, PATH_TO_CPU "cpu%u/cpuidle/state0", cpu); if (stat(file, &statbuf) != 0 || !S_ISDIR(statbuf.st_mode)) return 0; while (stat(file, &statbuf) == 0 && S_ISDIR(statbuf.st_mode)) { snprintf(file, SYSFS_PATH_MAX, PATH_TO_CPU "cpu%u/cpuidle/state%d", cpu, idlestates); idlestates++; } idlestates--; return idlestates; } /* CPUidle general /sys/devices/system/cpu/cpuidle/ sysfs access ********/ /* * helper function to read file from /sys into given buffer * fname is a relative path under "cpu/cpuidle/" dir */ static unsigned int sysfs_cpuidle_read_file(const char *fname, char *buf, size_t buflen) { char path[SYSFS_PATH_MAX]; snprintf(path, sizeof(path), PATH_TO_CPU "cpuidle/%s", fname); return sysfs_read_file(path, buf, buflen); } /* read access to files which contain one string */ enum cpuidle_string { CPUIDLE_GOVERNOR, CPUIDLE_GOVERNOR_RO, CPUIDLE_DRIVER, MAX_CPUIDLE_STRING_FILES }; static const char *cpuidle_string_files[MAX_CPUIDLE_STRING_FILES] = { [CPUIDLE_GOVERNOR] = "current_governor", [CPUIDLE_GOVERNOR_RO] = "current_governor_ro", [CPUIDLE_DRIVER] = "current_driver", }; static char *sysfs_cpuidle_get_one_string(enum cpuidle_string which) { char linebuf[MAX_LINE_LEN]; char *result; unsigned int len; if (which >= MAX_CPUIDLE_STRING_FILES) return NULL; len = sysfs_cpuidle_read_file(cpuidle_string_files[which], linebuf, sizeof(linebuf)); if (len == 0) return NULL; result = strdup(linebuf); if (result == NULL) return NULL; if (result[strlen(result) - 1] == '\n') result[strlen(result) - 1] = '\0'; return result; } char *sysfs_get_cpuidle_governor(void) { char *tmp = sysfs_cpuidle_get_one_string(CPUIDLE_GOVERNOR_RO); if (!tmp) return sysfs_cpuidle_get_one_string(CPUIDLE_GOVERNOR); else return tmp; } char *sysfs_get_cpuidle_driver(void) { return sysfs_cpuidle_get_one_string(CPUIDLE_DRIVER); } /* CPUidle idlestate specific /sys/devices/system/cpu/cpuX/cpuidle/ access */ /* * Get sched_mc or sched_smt settings * Pass "mc" or "smt" as argument * * Returns negative value on failure */ int sysfs_get_sched(const char *smt_mc) { unsigned long value; char linebuf[MAX_LINE_LEN]; char *endp; char path[SYSFS_PATH_MAX]; if (strcmp("mc", smt_mc) && strcmp("smt", smt_mc)) return -EINVAL; snprintf(path, sizeof(path), PATH_TO_CPU "sched_%s_power_savings", smt_mc); if (sysfs_read_file(path, linebuf, MAX_LINE_LEN) == 0) return -1; value = strtoul(linebuf, &endp, 0); if (endp == linebuf || errno == ERANGE) return -1; return value; } /* * Get sched_mc or sched_smt settings * Pass "mc" or "smt" as argument * * Returns negative value on failure */ int sysfs_set_sched(const char *smt_mc, int val) { char linebuf[MAX_LINE_LEN]; char path[SYSFS_PATH_MAX]; struct stat statbuf; if (strcmp("mc", smt_mc) && strcmp("smt", smt_mc)) return -EINVAL; snprintf(path, sizeof(path), PATH_TO_CPU "sched_%s_power_savings", smt_mc); sprintf(linebuf, "%d", val); if (stat(path, &statbuf) != 0) return -ENODEV; if (sysfs_write_file(path, linebuf, MAX_LINE_LEN) == 0) return -1; return 0; }
gpl-2.0
koradiavatsal/Viper-kernel
drivers/rtc/rtc-at32ap700x.c
5802
7505
/* * An RTC driver for the AVR32 AT32AP700x processor series. * * Copyright (C) 2007 Atmel Corporation * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/rtc.h> #include <linux/io.h> /* * This is a bare-bones RTC. It runs during most system sleep states, but has * no battery backup and gets reset during system restart. It must be * initialized from an external clock (network, I2C, etc) before it can be of * much use. * * The alarm functionality is limited by the hardware, not supporting * periodic interrupts. */ #define RTC_CTRL 0x00 #define RTC_CTRL_EN 0 #define RTC_CTRL_PCLR 1 #define RTC_CTRL_TOPEN 2 #define RTC_CTRL_PSEL 8 #define RTC_VAL 0x04 #define RTC_TOP 0x08 #define RTC_IER 0x10 #define RTC_IER_TOPI 0 #define RTC_IDR 0x14 #define RTC_IDR_TOPI 0 #define RTC_IMR 0x18 #define RTC_IMR_TOPI 0 #define RTC_ISR 0x1c #define RTC_ISR_TOPI 0 #define RTC_ICR 0x20 #define RTC_ICR_TOPI 0 #define RTC_BIT(name) (1 << RTC_##name) #define RTC_BF(name, value) ((value) << RTC_##name) #define rtc_readl(dev, reg) \ __raw_readl((dev)->regs + RTC_##reg) #define rtc_writel(dev, reg, value) \ __raw_writel((value), (dev)->regs + RTC_##reg) struct rtc_at32ap700x { struct rtc_device *rtc; void __iomem *regs; unsigned long alarm_time; unsigned long irq; /* Protect against concurrent register access. */ spinlock_t lock; }; static int at32_rtc_readtime(struct device *dev, struct rtc_time *tm) { struct rtc_at32ap700x *rtc = dev_get_drvdata(dev); unsigned long now; now = rtc_readl(rtc, VAL); rtc_time_to_tm(now, tm); return 0; } static int at32_rtc_settime(struct device *dev, struct rtc_time *tm) { struct rtc_at32ap700x *rtc = dev_get_drvdata(dev); unsigned long now; int ret; ret = rtc_tm_to_time(tm, &now); if (ret == 0) rtc_writel(rtc, VAL, now); return ret; } static int at32_rtc_readalarm(struct device *dev, struct rtc_wkalrm *alrm) { struct rtc_at32ap700x *rtc = dev_get_drvdata(dev); spin_lock_irq(&rtc->lock); rtc_time_to_tm(rtc->alarm_time, &alrm->time); alrm->enabled = rtc_readl(rtc, IMR) & RTC_BIT(IMR_TOPI) ? 1 : 0; alrm->pending = rtc_readl(rtc, ISR) & RTC_BIT(ISR_TOPI) ? 1 : 0; spin_unlock_irq(&rtc->lock); return 0; } static int at32_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm) { struct rtc_at32ap700x *rtc = dev_get_drvdata(dev); unsigned long rtc_unix_time; unsigned long alarm_unix_time; int ret; rtc_unix_time = rtc_readl(rtc, VAL); ret = rtc_tm_to_time(&alrm->time, &alarm_unix_time); if (ret) return ret; if (alarm_unix_time < rtc_unix_time) return -EINVAL; spin_lock_irq(&rtc->lock); rtc->alarm_time = alarm_unix_time; rtc_writel(rtc, TOP, rtc->alarm_time); if (alrm->enabled) rtc_writel(rtc, CTRL, rtc_readl(rtc, CTRL) | RTC_BIT(CTRL_TOPEN)); else rtc_writel(rtc, CTRL, rtc_readl(rtc, CTRL) & ~RTC_BIT(CTRL_TOPEN)); spin_unlock_irq(&rtc->lock); return ret; } static int at32_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled) { struct rtc_at32ap700x *rtc = dev_get_drvdata(dev); int ret = 0; spin_lock_irq(&rtc->lock); if(enabled) { if (rtc_readl(rtc, VAL) > rtc->alarm_time) { ret = -EINVAL; goto out; } rtc_writel(rtc, CTRL, rtc_readl(rtc, CTRL) | RTC_BIT(CTRL_TOPEN)); rtc_writel(rtc, ICR, RTC_BIT(ICR_TOPI)); rtc_writel(rtc, IER, RTC_BIT(IER_TOPI)); } else { rtc_writel(rtc, CTRL, rtc_readl(rtc, CTRL) & ~RTC_BIT(CTRL_TOPEN)); rtc_writel(rtc, IDR, RTC_BIT(IDR_TOPI)); rtc_writel(rtc, ICR, RTC_BIT(ICR_TOPI)); } out: spin_unlock_irq(&rtc->lock); return ret; } static irqreturn_t at32_rtc_interrupt(int irq, void *dev_id) { struct rtc_at32ap700x *rtc = (struct rtc_at32ap700x *)dev_id; unsigned long isr = rtc_readl(rtc, ISR); unsigned long events = 0; int ret = IRQ_NONE; spin_lock(&rtc->lock); if (isr & RTC_BIT(ISR_TOPI)) { rtc_writel(rtc, ICR, RTC_BIT(ICR_TOPI)); rtc_writel(rtc, IDR, RTC_BIT(IDR_TOPI)); rtc_writel(rtc, CTRL, rtc_readl(rtc, CTRL) & ~RTC_BIT(CTRL_TOPEN)); rtc_writel(rtc, VAL, rtc->alarm_time); events = RTC_AF | RTC_IRQF; rtc_update_irq(rtc->rtc, 1, events); ret = IRQ_HANDLED; } spin_unlock(&rtc->lock); return ret; } static struct rtc_class_ops at32_rtc_ops = { .read_time = at32_rtc_readtime, .set_time = at32_rtc_settime, .read_alarm = at32_rtc_readalarm, .set_alarm = at32_rtc_setalarm, .alarm_irq_enable = at32_rtc_alarm_irq_enable, }; static int __init at32_rtc_probe(struct platform_device *pdev) { struct resource *regs; struct rtc_at32ap700x *rtc; int irq; int ret; rtc = kzalloc(sizeof(struct rtc_at32ap700x), GFP_KERNEL); if (!rtc) { dev_dbg(&pdev->dev, "out of memory\n"); return -ENOMEM; } regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!regs) { dev_dbg(&pdev->dev, "no mmio resource defined\n"); ret = -ENXIO; goto out; } irq = platform_get_irq(pdev, 0); if (irq <= 0) { dev_dbg(&pdev->dev, "could not get irq\n"); ret = -ENXIO; goto out; } rtc->irq = irq; rtc->regs = ioremap(regs->start, resource_size(regs)); if (!rtc->regs) { ret = -ENOMEM; dev_dbg(&pdev->dev, "could not map I/O memory\n"); goto out; } spin_lock_init(&rtc->lock); /* * Maybe init RTC: count from zero at 1 Hz, disable wrap irq. * * Do not reset VAL register, as it can hold an old time * from last JTAG reset. */ if (!(rtc_readl(rtc, CTRL) & RTC_BIT(CTRL_EN))) { rtc_writel(rtc, CTRL, RTC_BIT(CTRL_PCLR)); rtc_writel(rtc, IDR, RTC_BIT(IDR_TOPI)); rtc_writel(rtc, CTRL, RTC_BF(CTRL_PSEL, 0xe) | RTC_BIT(CTRL_EN)); } ret = request_irq(irq, at32_rtc_interrupt, IRQF_SHARED, "rtc", rtc); if (ret) { dev_dbg(&pdev->dev, "could not request irq %d\n", irq); goto out_iounmap; } platform_set_drvdata(pdev, rtc); rtc->rtc = rtc_device_register(pdev->name, &pdev->dev, &at32_rtc_ops, THIS_MODULE); if (IS_ERR(rtc->rtc)) { dev_dbg(&pdev->dev, "could not register rtc device\n"); ret = PTR_ERR(rtc->rtc); goto out_free_irq; } device_init_wakeup(&pdev->dev, 1); dev_info(&pdev->dev, "Atmel RTC for AT32AP700x at %08lx irq %ld\n", (unsigned long)rtc->regs, rtc->irq); return 0; out_free_irq: platform_set_drvdata(pdev, NULL); free_irq(irq, rtc); out_iounmap: iounmap(rtc->regs); out: kfree(rtc); return ret; } static int __exit at32_rtc_remove(struct platform_device *pdev) { struct rtc_at32ap700x *rtc = platform_get_drvdata(pdev); device_init_wakeup(&pdev->dev, 0); free_irq(rtc->irq, rtc); iounmap(rtc->regs); rtc_device_unregister(rtc->rtc); kfree(rtc); platform_set_drvdata(pdev, NULL); return 0; } MODULE_ALIAS("platform:at32ap700x_rtc"); static struct platform_driver at32_rtc_driver = { .remove = __exit_p(at32_rtc_remove), .driver = { .name = "at32ap700x_rtc", .owner = THIS_MODULE, }, }; static int __init at32_rtc_init(void) { return platform_driver_probe(&at32_rtc_driver, at32_rtc_probe); } module_init(at32_rtc_init); static void __exit at32_rtc_exit(void) { platform_driver_unregister(&at32_rtc_driver); } module_exit(at32_rtc_exit); MODULE_AUTHOR("Hans-Christian Egtvedt <hcegtvedt@atmel.com>"); MODULE_DESCRIPTION("Real time clock for AVR32 AT32AP700x"); MODULE_LICENSE("GPL");
gpl-2.0
davidmueller13/kcal
arch/m32r/kernel/sys_m32r.c
7850
2567
/* * linux/arch/m32r/kernel/sys_m32r.c * * This file contains various random system calls that * have a non-standard calling sequence on the Linux/M32R platform. * * Taken from i386 version. */ #include <linux/errno.h> #include <linux/sched.h> #include <linux/mm.h> #include <linux/fs.h> #include <linux/smp.h> #include <linux/sem.h> #include <linux/msg.h> #include <linux/shm.h> #include <linux/stat.h> #include <linux/syscalls.h> #include <linux/mman.h> #include <linux/file.h> #include <linux/utsname.h> #include <linux/ipc.h> #include <asm/uaccess.h> #include <asm/cachectl.h> #include <asm/cacheflush.h> #include <asm/syscall.h> #include <asm/unistd.h> /* * sys_tas() - test-and-set */ asmlinkage int sys_tas(int __user *addr) { int oldval; if (!access_ok(VERIFY_WRITE, addr, sizeof (int))) return -EFAULT; /* atomic operation: * oldval = *addr; *addr = 1; */ __asm__ __volatile__ ( DCACHE_CLEAR("%0", "r4", "%1") " .fillinsn\n" "1:\n" " lock %0, @%1 -> unlock %2, @%1\n" "2:\n" /* NOTE: * The m32r processor can accept interrupts only * at the 32-bit instruction boundary. * So, in the above code, the "unlock" instruction * can be executed continuously after the "lock" * instruction execution without any interruptions. */ ".section .fixup,\"ax\"\n" " .balign 4\n" "3: ldi %0, #%3\n" " seth r14, #high(2b)\n" " or3 r14, r14, #low(2b)\n" " jmp r14\n" ".previous\n" ".section __ex_table,\"a\"\n" " .balign 4\n" " .long 1b,3b\n" ".previous\n" : "=&r" (oldval) : "r" (addr), "r" (1), "i"(-EFAULT) : "r14", "memory" #ifdef CONFIG_CHIP_M32700_TS1 , "r4" #endif /* CONFIG_CHIP_M32700_TS1 */ ); return oldval; } asmlinkage int sys_cacheflush(void *addr, int bytes, int cache) { /* This should flush more selectively ... */ _flush_cache_all(); return 0; } asmlinkage int sys_cachectl(char *addr, int nbytes, int op) { /* Not implemented yet. */ return -ENOSYS; } /* * Do a system call from kernel instead of calling sys_execve so we * end up with proper pt_regs. */ int kernel_execve(const char *filename, const char *const argv[], const char *const envp[]) { register long __scno __asm__ ("r7") = __NR_execve; register long __arg3 __asm__ ("r2") = (long)(envp); register long __arg2 __asm__ ("r1") = (long)(argv); register long __res __asm__ ("r0") = (long)(filename); __asm__ __volatile__ ( "trap #" SYSCALL_VECTOR "|| nop" : "=r" (__res) : "r" (__scno), "0" (__res), "r" (__arg2), "r" (__arg3) : "memory"); return __res; }
gpl-2.0
cmetcalf-tilera/linux-tile
arch/m32r/kernel/sys_m32r.c
7850
2567
/* * linux/arch/m32r/kernel/sys_m32r.c * * This file contains various random system calls that * have a non-standard calling sequence on the Linux/M32R platform. * * Taken from i386 version. */ #include <linux/errno.h> #include <linux/sched.h> #include <linux/mm.h> #include <linux/fs.h> #include <linux/smp.h> #include <linux/sem.h> #include <linux/msg.h> #include <linux/shm.h> #include <linux/stat.h> #include <linux/syscalls.h> #include <linux/mman.h> #include <linux/file.h> #include <linux/utsname.h> #include <linux/ipc.h> #include <asm/uaccess.h> #include <asm/cachectl.h> #include <asm/cacheflush.h> #include <asm/syscall.h> #include <asm/unistd.h> /* * sys_tas() - test-and-set */ asmlinkage int sys_tas(int __user *addr) { int oldval; if (!access_ok(VERIFY_WRITE, addr, sizeof (int))) return -EFAULT; /* atomic operation: * oldval = *addr; *addr = 1; */ __asm__ __volatile__ ( DCACHE_CLEAR("%0", "r4", "%1") " .fillinsn\n" "1:\n" " lock %0, @%1 -> unlock %2, @%1\n" "2:\n" /* NOTE: * The m32r processor can accept interrupts only * at the 32-bit instruction boundary. * So, in the above code, the "unlock" instruction * can be executed continuously after the "lock" * instruction execution without any interruptions. */ ".section .fixup,\"ax\"\n" " .balign 4\n" "3: ldi %0, #%3\n" " seth r14, #high(2b)\n" " or3 r14, r14, #low(2b)\n" " jmp r14\n" ".previous\n" ".section __ex_table,\"a\"\n" " .balign 4\n" " .long 1b,3b\n" ".previous\n" : "=&r" (oldval) : "r" (addr), "r" (1), "i"(-EFAULT) : "r14", "memory" #ifdef CONFIG_CHIP_M32700_TS1 , "r4" #endif /* CONFIG_CHIP_M32700_TS1 */ ); return oldval; } asmlinkage int sys_cacheflush(void *addr, int bytes, int cache) { /* This should flush more selectively ... */ _flush_cache_all(); return 0; } asmlinkage int sys_cachectl(char *addr, int nbytes, int op) { /* Not implemented yet. */ return -ENOSYS; } /* * Do a system call from kernel instead of calling sys_execve so we * end up with proper pt_regs. */ int kernel_execve(const char *filename, const char *const argv[], const char *const envp[]) { register long __scno __asm__ ("r7") = __NR_execve; register long __arg3 __asm__ ("r2") = (long)(envp); register long __arg2 __asm__ ("r1") = (long)(argv); register long __res __asm__ ("r0") = (long)(filename); __asm__ __volatile__ ( "trap #" SYSCALL_VECTOR "|| nop" : "=r" (__res) : "r" (__scno), "0" (__res), "r" (__arg2), "r" (__arg3) : "memory"); return __res; }
gpl-2.0
netico-solutions/linux-am335x-xeno
drivers/video/i810/i810_gtf.c
15530
9148
/*-*- linux-c -*- * linux/drivers/video/i810_main.h -- Intel 810 Non-discrete Video Timings * (VESA GTF) * * Copyright (C) 2001 Antonino Daplas<adaplas@pol.net> * All Rights Reserved * * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive for * more details. */ #include <linux/kernel.h> #include "i810_regs.h" #include "i810.h" #include "i810_main.h" /* * FIFO and Watermark tables - based almost wholly on i810_wmark.c in * XFree86 v4.03 by Precision Insight. Slightly modified for integer * operation, instead of float */ struct wm_info { u32 freq; u32 wm; }; static struct wm_info i810_wm_8_100[] = { { 15, 0x0070c000 }, { 19, 0x0070c000 }, { 25, 0x22003000 }, { 28, 0x22003000 }, { 31, 0x22003000 }, { 36, 0x22007000 }, { 40, 0x22007000 }, { 45, 0x22007000 }, { 49, 0x22008000 }, { 50, 0x22008000 }, { 56, 0x22008000 }, { 65, 0x22008000 }, { 75, 0x22008000 }, { 78, 0x22008000 }, { 80, 0x22008000 }, { 94, 0x22008000 }, { 96, 0x22107000 }, { 99, 0x22107000 }, { 108, 0x22107000 }, { 121, 0x22107000 }, { 128, 0x22107000 }, { 132, 0x22109000 }, { 135, 0x22109000 }, { 157, 0x2210b000 }, { 162, 0x2210b000 }, { 175, 0x2210b000 }, { 189, 0x2220e000 }, { 195, 0x2220e000 }, { 202, 0x2220e000 }, { 204, 0x2220e000 }, { 218, 0x2220f000 }, { 229, 0x22210000 }, { 234, 0x22210000 }, }; static struct wm_info i810_wm_16_100[] = { { 15, 0x0070c000 }, { 19, 0x0020c000 }, { 25, 0x22006000 }, { 28, 0x22006000 }, { 31, 0x22007000 }, { 36, 0x22007000 }, { 40, 0x22007000 }, { 45, 0x22007000 }, { 49, 0x22009000 }, { 50, 0x22009000 }, { 56, 0x22108000 }, { 65, 0x2210e000 }, { 75, 0x2210e000 }, { 78, 0x2210e000 }, { 80, 0x22210000 }, { 94, 0x22210000 }, { 96, 0x22210000 }, { 99, 0x22210000 }, { 108, 0x22210000 }, { 121, 0x22210000 }, { 128, 0x22210000 }, { 132, 0x22314000 }, { 135, 0x22314000 }, { 157, 0x22415000 }, { 162, 0x22416000 }, { 175, 0x22416000 }, { 189, 0x22416000 }, { 195, 0x22416000 }, { 202, 0x22416000 }, { 204, 0x22416000 }, { 218, 0x22416000 }, { 229, 0x22416000 }, }; static struct wm_info i810_wm_24_100[] = { { 15, 0x0020c000 }, { 19, 0x0040c000 }, { 25, 0x22009000 }, { 28, 0x22009000 }, { 31, 0x2200a000 }, { 36, 0x2210c000 }, { 40, 0x2210c000 }, { 45, 0x2210c000 }, { 49, 0x22111000 }, { 50, 0x22111000 }, { 56, 0x22111000 }, { 65, 0x22214000 }, { 75, 0x22214000 }, { 78, 0x22215000 }, { 80, 0x22216000 }, { 94, 0x22218000 }, { 96, 0x22418000 }, { 99, 0x22418000 }, { 108, 0x22418000 }, { 121, 0x22418000 }, { 128, 0x22419000 }, { 132, 0x22519000 }, { 135, 0x4441d000 }, { 157, 0x44419000 }, { 162, 0x44419000 }, { 175, 0x44419000 }, { 189, 0x44419000 }, { 195, 0x44419000 }, { 202, 0x44419000 }, { 204, 0x44419000 }, }; static struct wm_info i810_wm_8_133[] = { { 15, 0x0070c000 }, { 19, 0x0070c000 }, { 25, 0x22003000 }, { 28, 0x22003000 }, { 31, 0x22003000 }, { 36, 0x22007000 }, { 40, 0x22007000 }, { 45, 0x22007000 }, { 49, 0x22008000 }, { 50, 0x22008000 }, { 56, 0x22008000 }, { 65, 0x22008000 }, { 75, 0x22008000 }, { 78, 0x22008000 }, { 80, 0x22008000 }, { 94, 0x22008000 }, { 96, 0x22107000 }, { 99, 0x22107000 }, { 108, 0x22107000 }, { 121, 0x22107000 }, { 128, 0x22107000 }, { 132, 0x22109000 }, { 135, 0x22109000 }, { 157, 0x2210b000 }, { 162, 0x2210b000 }, { 175, 0x2210b000 }, { 189, 0x2220e000 }, { 195, 0x2220e000 }, { 202, 0x2220e000 }, { 204, 0x2220e000 }, { 218, 0x2220f000 }, { 229, 0x22210000 }, { 234, 0x22210000 }, }; static struct wm_info i810_wm_16_133[] = { { 15, 0x0020c000 }, { 19, 0x0020c000 }, { 25, 0x22006000 }, { 28, 0x22006000 }, { 31, 0x22007000 }, { 36, 0x22007000 }, { 40, 0x22007000 }, { 45, 0x22007000 }, { 49, 0x22009000 }, { 50, 0x22009000 }, { 56, 0x22108000 }, { 65, 0x2210e000 }, { 75, 0x2210e000 }, { 78, 0x2210e000 }, { 80, 0x22210000 }, { 94, 0x22210000 }, { 96, 0x22210000 }, { 99, 0x22210000 }, { 108, 0x22210000 }, { 121, 0x22210000 }, { 128, 0x22210000 }, { 132, 0x22314000 }, { 135, 0x22314000 }, { 157, 0x22415000 }, { 162, 0x22416000 }, { 175, 0x22416000 }, { 189, 0x22416000 }, { 195, 0x22416000 }, { 202, 0x22416000 }, { 204, 0x22416000 }, { 218, 0x22416000 }, { 229, 0x22416000 }, }; static struct wm_info i810_wm_24_133[] = { { 15, 0x0020c000 }, { 19, 0x00408000 }, { 25, 0x22009000 }, { 28, 0x22009000 }, { 31, 0x2200a000 }, { 36, 0x2210c000 }, { 40, 0x2210c000 }, { 45, 0x2210c000 }, { 49, 0x22111000 }, { 50, 0x22111000 }, { 56, 0x22111000 }, { 65, 0x22214000 }, { 75, 0x22214000 }, { 78, 0x22215000 }, { 80, 0x22216000 }, { 94, 0x22218000 }, { 96, 0x22418000 }, { 99, 0x22418000 }, { 108, 0x22418000 }, { 121, 0x22418000 }, { 128, 0x22419000 }, { 132, 0x22519000 }, { 135, 0x4441d000 }, { 157, 0x44419000 }, { 162, 0x44419000 }, { 175, 0x44419000 }, { 189, 0x44419000 }, { 195, 0x44419000 }, { 202, 0x44419000 }, { 204, 0x44419000 }, }; void round_off_xres(u32 *xres) { } void round_off_yres(u32 *xres, u32 *yres) { } /** * i810fb_encode_registers - encode @var to hardware register values * @var: pointer to var structure * @par: pointer to hardware par structure * * DESCRIPTION: * Timing values in @var will be converted to appropriate * register values of @par. */ void i810fb_encode_registers(const struct fb_var_screeninfo *var, struct i810fb_par *par, u32 xres, u32 yres) { int n, blank_s, blank_e; u8 __iomem *mmio = par->mmio_start_virtual; u8 msr = 0; /* Horizontal */ /* htotal */ n = ((xres + var->right_margin + var->hsync_len + var->left_margin) >> 3) - 5; par->regs.cr00 = (u8) n; par->regs.cr35 = (u8) ((n >> 8) & 1); /* xres */ par->regs.cr01 = (u8) ((xres >> 3) - 1); /* hblank */ blank_e = (xres + var->right_margin + var->hsync_len + var->left_margin) >> 3; blank_e--; blank_s = blank_e - 127; if (blank_s < (xres >> 3)) blank_s = xres >> 3; par->regs.cr02 = (u8) blank_s; par->regs.cr03 = (u8) (blank_e & 0x1F); par->regs.cr05 = (u8) ((blank_e & (1 << 5)) << 2); par->regs.cr39 = (u8) ((blank_e >> 6) & 1); /* hsync */ par->regs.cr04 = (u8) ((xres + var->right_margin) >> 3); par->regs.cr05 |= (u8) (((xres + var->right_margin + var->hsync_len) >> 3) & 0x1F); /* Vertical */ /* vtotal */ n = yres + var->lower_margin + var->vsync_len + var->upper_margin - 2; par->regs.cr06 = (u8) (n & 0xFF); par->regs.cr30 = (u8) ((n >> 8) & 0x0F); /* vsync */ n = yres + var->lower_margin; par->regs.cr10 = (u8) (n & 0xFF); par->regs.cr32 = (u8) ((n >> 8) & 0x0F); par->regs.cr11 = i810_readb(CR11, mmio) & ~0x0F; par->regs.cr11 |= (u8) ((yres + var->lower_margin + var->vsync_len) & 0x0F); /* yres */ n = yres - 1; par->regs.cr12 = (u8) (n & 0xFF); par->regs.cr31 = (u8) ((n >> 8) & 0x0F); /* vblank */ blank_e = yres + var->lower_margin + var->vsync_len + var->upper_margin; blank_e--; blank_s = blank_e - 127; if (blank_s < yres) blank_s = yres; par->regs.cr15 = (u8) (blank_s & 0xFF); par->regs.cr33 = (u8) ((blank_s >> 8) & 0x0F); par->regs.cr16 = (u8) (blank_e & 0xFF); par->regs.cr09 = 0; /* sync polarity */ if (!(var->sync & FB_SYNC_HOR_HIGH_ACT)) msr |= 1 << 6; if (!(var->sync & FB_SYNC_VERT_HIGH_ACT)) msr |= 1 << 7; par->regs.msr = msr; /* interlace */ if (var->vmode & FB_VMODE_INTERLACED) par->interlace = (1 << 7) | ((u8) (var->yres >> 4)); else par->interlace = 0; if (var->vmode & FB_VMODE_DOUBLE) par->regs.cr09 |= 1 << 7; /* overlay */ par->ovract = ((var->xres + var->right_margin + var->hsync_len + var->left_margin - 32) | ((var->xres - 32) << 16)); } void i810fb_fill_var_timings(struct fb_var_screeninfo *var) { } /** * i810_get_watermark - gets watermark * @var: pointer to fb_var_screeninfo * @par: pointer to i810fb_par structure * * DESCRIPTION: * Gets the required watermark based on * pixelclock and RAMBUS frequency. * * RETURNS: * watermark */ u32 i810_get_watermark(const struct fb_var_screeninfo *var, struct i810fb_par *par) { struct wm_info *wmark = NULL; u32 i, size = 0, pixclock, wm_best = 0, min, diff; if (par->mem_freq == 100) { switch (var->bits_per_pixel) { case 8: wmark = i810_wm_8_100; size = ARRAY_SIZE(i810_wm_8_100); break; case 16: wmark = i810_wm_16_100; size = ARRAY_SIZE(i810_wm_16_100); break; case 24: case 32: wmark = i810_wm_24_100; size = ARRAY_SIZE(i810_wm_24_100); } } else { switch(var->bits_per_pixel) { case 8: wmark = i810_wm_8_133; size = ARRAY_SIZE(i810_wm_8_133); break; case 16: wmark = i810_wm_16_133; size = ARRAY_SIZE(i810_wm_16_133); break; case 24: case 32: wmark = i810_wm_24_133; size = ARRAY_SIZE(i810_wm_24_133); } } pixclock = 1000000/var->pixclock; min = ~0; for (i = 0; i < size; i++) { if (pixclock <= wmark[i].freq) diff = wmark[i].freq - pixclock; else diff = pixclock - wmark[i].freq; if (diff < min) { wm_best = wmark[i].wm; min = diff; } } return wm_best; }
gpl-2.0
AaronNGray/Freescale-kernel
arch/x86/kvm/mmu_audit.c
171
6501
/* * mmu_audit.c: * * Audit code for KVM MMU * * Copyright (C) 2006 Qumranet, Inc. * Copyright 2010 Red Hat, Inc. and/or its affiliates. * * Authors: * Yaniv Kamay <yaniv@qumranet.com> * Avi Kivity <avi@qumranet.com> * Marcelo Tosatti <mtosatti@redhat.com> * Xiao Guangrong <xiaoguangrong@cn.fujitsu.com> * * This work is licensed under the terms of the GNU GPL, version 2. See * the COPYING file in the top-level directory. * */ #include <linux/ratelimit.h> #define audit_printk(kvm, fmt, args...) \ printk(KERN_ERR "audit: (%s) error: " \ fmt, audit_point_name[kvm->arch.audit_point], ##args) typedef void (*inspect_spte_fn) (struct kvm_vcpu *vcpu, u64 *sptep, int level); static void __mmu_spte_walk(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, inspect_spte_fn fn, int level) { int i; for (i = 0; i < PT64_ENT_PER_PAGE; ++i) { u64 *ent = sp->spt; fn(vcpu, ent + i, level); if (is_shadow_present_pte(ent[i]) && !is_last_spte(ent[i], level)) { struct kvm_mmu_page *child; child = page_header(ent[i] & PT64_BASE_ADDR_MASK); __mmu_spte_walk(vcpu, child, fn, level - 1); } } } static void mmu_spte_walk(struct kvm_vcpu *vcpu, inspect_spte_fn fn) { int i; struct kvm_mmu_page *sp; if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) return; if (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL) { hpa_t root = vcpu->arch.mmu.root_hpa; sp = page_header(root); __mmu_spte_walk(vcpu, sp, fn, PT64_ROOT_LEVEL); return; } for (i = 0; i < 4; ++i) { hpa_t root = vcpu->arch.mmu.pae_root[i]; if (root && VALID_PAGE(root)) { root &= PT64_BASE_ADDR_MASK; sp = page_header(root); __mmu_spte_walk(vcpu, sp, fn, 2); } } return; } typedef void (*sp_handler) (struct kvm *kvm, struct kvm_mmu_page *sp); static void walk_all_active_sps(struct kvm *kvm, sp_handler fn) { struct kvm_mmu_page *sp; list_for_each_entry(sp, &kvm->arch.active_mmu_pages, link) fn(kvm, sp); } static void audit_mappings(struct kvm_vcpu *vcpu, u64 *sptep, int level) { struct kvm_mmu_page *sp; gfn_t gfn; pfn_t pfn; hpa_t hpa; sp = page_header(__pa(sptep)); if (sp->unsync) { if (level != PT_PAGE_TABLE_LEVEL) { audit_printk(vcpu->kvm, "unsync sp: %p " "level = %d\n", sp, level); return; } } if (!is_shadow_present_pte(*sptep) || !is_last_spte(*sptep, level)) return; gfn = kvm_mmu_page_get_gfn(sp, sptep - sp->spt); pfn = gfn_to_pfn_atomic(vcpu->kvm, gfn); if (is_error_pfn(pfn)) { kvm_release_pfn_clean(pfn); return; } hpa = pfn << PAGE_SHIFT; if ((*sptep & PT64_BASE_ADDR_MASK) != hpa) audit_printk(vcpu->kvm, "levels %d pfn %llx hpa %llx " "ent %llxn", vcpu->arch.mmu.root_level, pfn, hpa, *sptep); } static void inspect_spte_has_rmap(struct kvm *kvm, u64 *sptep) { static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10); unsigned long *rmapp; struct kvm_mmu_page *rev_sp; gfn_t gfn; rev_sp = page_header(__pa(sptep)); gfn = kvm_mmu_page_get_gfn(rev_sp, sptep - rev_sp->spt); if (!gfn_to_memslot(kvm, gfn)) { if (!__ratelimit(&ratelimit_state)) return; audit_printk(kvm, "no memslot for gfn %llx\n", gfn); audit_printk(kvm, "index %ld of sp (gfn=%llx)\n", (long int)(sptep - rev_sp->spt), rev_sp->gfn); dump_stack(); return; } rmapp = gfn_to_rmap(kvm, gfn, rev_sp->role.level); if (!*rmapp) { if (!__ratelimit(&ratelimit_state)) return; audit_printk(kvm, "no rmap for writable spte %llx\n", *sptep); dump_stack(); } } static void audit_sptes_have_rmaps(struct kvm_vcpu *vcpu, u64 *sptep, int level) { if (is_shadow_present_pte(*sptep) && is_last_spte(*sptep, level)) inspect_spte_has_rmap(vcpu->kvm, sptep); } static void audit_spte_after_sync(struct kvm_vcpu *vcpu, u64 *sptep, int level) { struct kvm_mmu_page *sp = page_header(__pa(sptep)); if (vcpu->kvm->arch.audit_point == AUDIT_POST_SYNC && sp->unsync) audit_printk(vcpu->kvm, "meet unsync sp(%p) after sync " "root.\n", sp); } static void check_mappings_rmap(struct kvm *kvm, struct kvm_mmu_page *sp) { int i; if (sp->role.level != PT_PAGE_TABLE_LEVEL) return; for (i = 0; i < PT64_ENT_PER_PAGE; ++i) { if (!is_rmap_spte(sp->spt[i])) continue; inspect_spte_has_rmap(kvm, sp->spt + i); } } static void audit_write_protection(struct kvm *kvm, struct kvm_mmu_page *sp) { struct kvm_memory_slot *slot; unsigned long *rmapp; u64 *spte; if (sp->role.direct || sp->unsync || sp->role.invalid) return; slot = gfn_to_memslot(kvm, sp->gfn); rmapp = &slot->rmap[sp->gfn - slot->base_gfn]; spte = rmap_next(kvm, rmapp, NULL); while (spte) { if (is_writable_pte(*spte)) audit_printk(kvm, "shadow page has writable " "mappings: gfn %llx role %x\n", sp->gfn, sp->role.word); spte = rmap_next(kvm, rmapp, spte); } } static void audit_sp(struct kvm *kvm, struct kvm_mmu_page *sp) { check_mappings_rmap(kvm, sp); audit_write_protection(kvm, sp); } static void audit_all_active_sps(struct kvm *kvm) { walk_all_active_sps(kvm, audit_sp); } static void audit_spte(struct kvm_vcpu *vcpu, u64 *sptep, int level) { audit_sptes_have_rmaps(vcpu, sptep, level); audit_mappings(vcpu, sptep, level); audit_spte_after_sync(vcpu, sptep, level); } static void audit_vcpu_spte(struct kvm_vcpu *vcpu) { mmu_spte_walk(vcpu, audit_spte); } static void kvm_mmu_audit(void *ignore, struct kvm_vcpu *vcpu, int point) { static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10); if (!__ratelimit(&ratelimit_state)) return; vcpu->kvm->arch.audit_point = point; audit_all_active_sps(vcpu->kvm); audit_vcpu_spte(vcpu); } static bool mmu_audit; static void mmu_audit_enable(void) { int ret; if (mmu_audit) return; ret = register_trace_kvm_mmu_audit(kvm_mmu_audit, NULL); WARN_ON(ret); mmu_audit = true; } static void mmu_audit_disable(void) { if (!mmu_audit) return; unregister_trace_kvm_mmu_audit(kvm_mmu_audit, NULL); tracepoint_synchronize_unregister(); mmu_audit = false; } static int mmu_audit_set(const char *val, const struct kernel_param *kp) { int ret; unsigned long enable; ret = strict_strtoul(val, 10, &enable); if (ret < 0) return -EINVAL; switch (enable) { case 0: mmu_audit_disable(); break; case 1: mmu_audit_enable(); break; default: return -EINVAL; } return 0; } static struct kernel_param_ops audit_param_ops = { .set = mmu_audit_set, .get = param_get_bool, }; module_param_cb(mmu_audit, &audit_param_ops, &mmu_audit, 0644);
gpl-2.0
dalinaum/studyak
arch/s390/lib/uaccess_std.c
1451
8120
/* * arch/s390/lib/uaccess_std.c * * Standard user space access functions based on mvcp/mvcs and doing * interesting things in the secondary space mode. * * Copyright (C) IBM Corp. 2006 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), * Gerald Schaefer (gerald.schaefer@de.ibm.com) */ #include <linux/errno.h> #include <linux/mm.h> #include <linux/uaccess.h> #include <asm/futex.h> #include "uaccess.h" #ifndef __s390x__ #define AHI "ahi" #define ALR "alr" #define CLR "clr" #define LHI "lhi" #define SLR "slr" #else #define AHI "aghi" #define ALR "algr" #define CLR "clgr" #define LHI "lghi" #define SLR "slgr" #endif size_t copy_from_user_std(size_t size, const void __user *ptr, void *x) { unsigned long tmp1, tmp2; tmp1 = -256UL; asm volatile( "0: mvcp 0(%0,%2),0(%1),%3\n" "10:jz 8f\n" "1:"ALR" %0,%3\n" " la %1,256(%1)\n" " la %2,256(%2)\n" "2: mvcp 0(%0,%2),0(%1),%3\n" "11:jnz 1b\n" " j 8f\n" "3: la %4,255(%1)\n" /* %4 = ptr + 255 */ " "LHI" %3,-4096\n" " nr %4,%3\n" /* %4 = (ptr + 255) & -4096 */ " "SLR" %4,%1\n" " "CLR" %0,%4\n" /* copy crosses next page boundary? */ " jnh 5f\n" "4: mvcp 0(%4,%2),0(%1),%3\n" "12:"SLR" %0,%4\n" " "ALR" %2,%4\n" "5:"LHI" %4,-1\n" " "ALR" %4,%0\n" /* copy remaining size, subtract 1 */ " bras %3,7f\n" /* memset loop */ " xc 0(1,%2),0(%2)\n" "6: xc 0(256,%2),0(%2)\n" " la %2,256(%2)\n" "7:"AHI" %4,-256\n" " jnm 6b\n" " ex %4,0(%3)\n" " j 9f\n" "8:"SLR" %0,%0\n" "9: \n" EX_TABLE(0b,3b) EX_TABLE(2b,3b) EX_TABLE(4b,5b) EX_TABLE(10b,3b) EX_TABLE(11b,3b) EX_TABLE(12b,5b) : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2) : : "cc", "memory"); return size; } static size_t copy_from_user_std_check(size_t size, const void __user *ptr, void *x) { if (size <= 1024) return copy_from_user_std(size, ptr, x); return copy_from_user_pt(size, ptr, x); } size_t copy_to_user_std(size_t size, void __user *ptr, const void *x) { unsigned long tmp1, tmp2; tmp1 = -256UL; asm volatile( "0: mvcs 0(%0,%1),0(%2),%3\n" "7: jz 5f\n" "1:"ALR" %0,%3\n" " la %1,256(%1)\n" " la %2,256(%2)\n" "2: mvcs 0(%0,%1),0(%2),%3\n" "8: jnz 1b\n" " j 5f\n" "3: la %4,255(%1)\n" /* %4 = ptr + 255 */ " "LHI" %3,-4096\n" " nr %4,%3\n" /* %4 = (ptr + 255) & -4096 */ " "SLR" %4,%1\n" " "CLR" %0,%4\n" /* copy crosses next page boundary? */ " jnh 6f\n" "4: mvcs 0(%4,%1),0(%2),%3\n" "9:"SLR" %0,%4\n" " j 6f\n" "5:"SLR" %0,%0\n" "6: \n" EX_TABLE(0b,3b) EX_TABLE(2b,3b) EX_TABLE(4b,6b) EX_TABLE(7b,3b) EX_TABLE(8b,3b) EX_TABLE(9b,6b) : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2) : : "cc", "memory"); return size; } static size_t copy_to_user_std_check(size_t size, void __user *ptr, const void *x) { if (size <= 1024) return copy_to_user_std(size, ptr, x); return copy_to_user_pt(size, ptr, x); } static size_t copy_in_user_std(size_t size, void __user *to, const void __user *from) { unsigned long tmp1; asm volatile( " "AHI" %0,-1\n" " jo 5f\n" " sacf 256\n" " bras %3,3f\n" "0:"AHI" %0,257\n" "1: mvc 0(1,%1),0(%2)\n" " la %1,1(%1)\n" " la %2,1(%2)\n" " "AHI" %0,-1\n" " jnz 1b\n" " j 5f\n" "2: mvc 0(256,%1),0(%2)\n" " la %1,256(%1)\n" " la %2,256(%2)\n" "3:"AHI" %0,-256\n" " jnm 2b\n" "4: ex %0,1b-0b(%3)\n" " sacf 0\n" "5: "SLR" %0,%0\n" "6:\n" EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b) : "+a" (size), "+a" (to), "+a" (from), "=a" (tmp1) : : "cc", "memory"); return size; } static size_t clear_user_std(size_t size, void __user *to) { unsigned long tmp1, tmp2; asm volatile( " "AHI" %0,-1\n" " jo 5f\n" " sacf 256\n" " bras %3,3f\n" " xc 0(1,%1),0(%1)\n" "0:"AHI" %0,257\n" " la %2,255(%1)\n" /* %2 = ptr + 255 */ " srl %2,12\n" " sll %2,12\n" /* %2 = (ptr + 255) & -4096 */ " "SLR" %2,%1\n" " "CLR" %0,%2\n" /* clear crosses next page boundary? */ " jnh 5f\n" " "AHI" %2,-1\n" "1: ex %2,0(%3)\n" " "AHI" %2,1\n" " "SLR" %0,%2\n" " j 5f\n" "2: xc 0(256,%1),0(%1)\n" " la %1,256(%1)\n" "3:"AHI" %0,-256\n" " jnm 2b\n" "4: ex %0,0(%3)\n" " sacf 0\n" "5: "SLR" %0,%0\n" "6:\n" EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b) : "+a" (size), "+a" (to), "=a" (tmp1), "=a" (tmp2) : : "cc", "memory"); return size; } size_t strnlen_user_std(size_t size, const char __user *src) { register unsigned long reg0 asm("0") = 0UL; unsigned long tmp1, tmp2; asm volatile( " la %2,0(%1)\n" " la %3,0(%0,%1)\n" " "SLR" %0,%0\n" " sacf 256\n" "0: srst %3,%2\n" " jo 0b\n" " la %0,1(%3)\n" /* strnlen_user results includes \0 */ " "SLR" %0,%1\n" "1: sacf 0\n" EX_TABLE(0b,1b) : "+a" (size), "+a" (src), "=a" (tmp1), "=a" (tmp2) : "d" (reg0) : "cc", "memory"); return size; } size_t strncpy_from_user_std(size_t size, const char __user *src, char *dst) { register unsigned long reg0 asm("0") = 0UL; unsigned long tmp1, tmp2; asm volatile( " la %3,0(%1)\n" " la %4,0(%0,%1)\n" " sacf 256\n" "0: srst %4,%3\n" " jo 0b\n" " sacf 0\n" " la %0,0(%4)\n" " jh 1f\n" /* found \0 in string ? */ " "AHI" %4,1\n" /* include \0 in copy */ "1:"SLR" %0,%1\n" /* %0 = return length (without \0) */ " "SLR" %4,%1\n" /* %4 = copy length (including \0) */ "2: mvcp 0(%4,%2),0(%1),%5\n" " jz 9f\n" "3:"AHI" %4,-256\n" " la %1,256(%1)\n" " la %2,256(%2)\n" "4: mvcp 0(%4,%2),0(%1),%5\n" " jnz 3b\n" " j 9f\n" "7: sacf 0\n" "8:"LHI" %0,%6\n" "9:\n" EX_TABLE(0b,7b) EX_TABLE(2b,8b) EX_TABLE(4b,8b) : "+a" (size), "+a" (src), "+d" (dst), "=a" (tmp1), "=a" (tmp2) : "d" (reg0), "K" (-EFAULT) : "cc", "memory"); return size; } #define __futex_atomic_op(insn, ret, oldval, newval, uaddr, oparg) \ asm volatile( \ " sacf 256\n" \ "0: l %1,0(%6)\n" \ "1:"insn \ "2: cs %1,%2,0(%6)\n" \ "3: jl 1b\n" \ " lhi %0,0\n" \ "4: sacf 0\n" \ EX_TABLE(0b,4b) EX_TABLE(2b,4b) EX_TABLE(3b,4b) \ : "=d" (ret), "=&d" (oldval), "=&d" (newval), \ "=m" (*uaddr) \ : "0" (-EFAULT), "d" (oparg), "a" (uaddr), \ "m" (*uaddr) : "cc"); int futex_atomic_op_std(int op, int __user *uaddr, int oparg, int *old) { int oldval = 0, newval, ret; switch (op) { case FUTEX_OP_SET: __futex_atomic_op("lr %2,%5\n", ret, oldval, newval, uaddr, oparg); break; case FUTEX_OP_ADD: __futex_atomic_op("lr %2,%1\nar %2,%5\n", ret, oldval, newval, uaddr, oparg); break; case FUTEX_OP_OR: __futex_atomic_op("lr %2,%1\nor %2,%5\n", ret, oldval, newval, uaddr, oparg); break; case FUTEX_OP_ANDN: __futex_atomic_op("lr %2,%1\nnr %2,%5\n", ret, oldval, newval, uaddr, oparg); break; case FUTEX_OP_XOR: __futex_atomic_op("lr %2,%1\nxr %2,%5\n", ret, oldval, newval, uaddr, oparg); break; default: ret = -ENOSYS; } *old = oldval; return ret; } int futex_atomic_cmpxchg_std(int __user *uaddr, int oldval, int newval) { int ret; asm volatile( " sacf 256\n" "0: cs %1,%4,0(%5)\n" "1: lr %0,%1\n" "2: sacf 0\n" EX_TABLE(0b,2b) EX_TABLE(1b,2b) : "=d" (ret), "+d" (oldval), "=m" (*uaddr) : "0" (-EFAULT), "d" (newval), "a" (uaddr), "m" (*uaddr) : "cc", "memory" ); return ret; } struct uaccess_ops uaccess_std = { .copy_from_user = copy_from_user_std_check, .copy_from_user_small = copy_from_user_std, .copy_to_user = copy_to_user_std_check, .copy_to_user_small = copy_to_user_std, .copy_in_user = copy_in_user_std, .clear_user = clear_user_std, .strnlen_user = strnlen_user_std, .strncpy_from_user = strncpy_from_user_std, .futex_atomic_op = futex_atomic_op_std, .futex_atomic_cmpxchg = futex_atomic_cmpxchg_std, };
gpl-2.0
placiano/NBKernel_Lollipop
drivers/mtd/nand/s3c2410.c
2219
28451
/* linux/drivers/mtd/nand/s3c2410.c * * Copyright © 2004-2008 Simtec Electronics * http://armlinux.simtec.co.uk/ * Ben Dooks <ben@simtec.co.uk> * * Samsung S3C2410/S3C2440/S3C2412 NAND driver * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #define pr_fmt(fmt) "nand-s3c2410: " fmt #ifdef CONFIG_MTD_NAND_S3C2410_DEBUG #define DEBUG #endif #include <linux/module.h> #include <linux/types.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/io.h> #include <linux/ioport.h> #include <linux/platform_device.h> #include <linux/delay.h> #include <linux/err.h> #include <linux/slab.h> #include <linux/clk.h> #include <linux/cpufreq.h> #include <linux/mtd/mtd.h> #include <linux/mtd/nand.h> #include <linux/mtd/nand_ecc.h> #include <linux/mtd/partitions.h> #include <plat/regs-nand.h> #include <linux/platform_data/mtd-nand-s3c2410.h> /* new oob placement block for use with hardware ecc generation */ static struct nand_ecclayout nand_hw_eccoob = { .eccbytes = 3, .eccpos = {0, 1, 2}, .oobfree = {{8, 8}} }; /* controller and mtd information */ struct s3c2410_nand_info; /** * struct s3c2410_nand_mtd - driver MTD structure * @mtd: The MTD instance to pass to the MTD layer. * @chip: The NAND chip information. * @set: The platform information supplied for this set of NAND chips. * @info: Link back to the hardware information. * @scan_res: The result from calling nand_scan_ident(). */ struct s3c2410_nand_mtd { struct mtd_info mtd; struct nand_chip chip; struct s3c2410_nand_set *set; struct s3c2410_nand_info *info; int scan_res; }; enum s3c_cpu_type { TYPE_S3C2410, TYPE_S3C2412, TYPE_S3C2440, }; enum s3c_nand_clk_state { CLOCK_DISABLE = 0, CLOCK_ENABLE, CLOCK_SUSPEND, }; /* overview of the s3c2410 nand state */ /** * struct s3c2410_nand_info - NAND controller state. * @mtds: An array of MTD instances on this controoler. * @platform: The platform data for this board. * @device: The platform device we bound to. * @clk: The clock resource for this controller. * @regs: The area mapped for the hardware registers. * @sel_reg: Pointer to the register controlling the NAND selection. * @sel_bit: The bit in @sel_reg to select the NAND chip. * @mtd_count: The number of MTDs created from this controller. * @save_sel: The contents of @sel_reg to be saved over suspend. * @clk_rate: The clock rate from @clk. * @clk_state: The current clock state. * @cpu_type: The exact type of this controller. */ struct s3c2410_nand_info { /* mtd info */ struct nand_hw_control controller; struct s3c2410_nand_mtd *mtds; struct s3c2410_platform_nand *platform; /* device info */ struct device *device; struct clk *clk; void __iomem *regs; void __iomem *sel_reg; int sel_bit; int mtd_count; unsigned long save_sel; unsigned long clk_rate; enum s3c_nand_clk_state clk_state; enum s3c_cpu_type cpu_type; #ifdef CONFIG_CPU_FREQ struct notifier_block freq_transition; #endif }; /* conversion functions */ static struct s3c2410_nand_mtd *s3c2410_nand_mtd_toours(struct mtd_info *mtd) { return container_of(mtd, struct s3c2410_nand_mtd, mtd); } static struct s3c2410_nand_info *s3c2410_nand_mtd_toinfo(struct mtd_info *mtd) { return s3c2410_nand_mtd_toours(mtd)->info; } static struct s3c2410_nand_info *to_nand_info(struct platform_device *dev) { return platform_get_drvdata(dev); } static struct s3c2410_platform_nand *to_nand_plat(struct platform_device *dev) { return dev->dev.platform_data; } static inline int allow_clk_suspend(struct s3c2410_nand_info *info) { #ifdef CONFIG_MTD_NAND_S3C2410_CLKSTOP return 1; #else return 0; #endif } /** * s3c2410_nand_clk_set_state - Enable, disable or suspend NAND clock. * @info: The controller instance. * @new_state: State to which clock should be set. */ static void s3c2410_nand_clk_set_state(struct s3c2410_nand_info *info, enum s3c_nand_clk_state new_state) { if (!allow_clk_suspend(info) && new_state == CLOCK_SUSPEND) return; if (info->clk_state == CLOCK_ENABLE) { if (new_state != CLOCK_ENABLE) clk_disable(info->clk); } else { if (new_state == CLOCK_ENABLE) clk_enable(info->clk); } info->clk_state = new_state; } /* timing calculations */ #define NS_IN_KHZ 1000000 /** * s3c_nand_calc_rate - calculate timing data. * @wanted: The cycle time in nanoseconds. * @clk: The clock rate in kHz. * @max: The maximum divider value. * * Calculate the timing value from the given parameters. */ static int s3c_nand_calc_rate(int wanted, unsigned long clk, int max) { int result; result = DIV_ROUND_UP((wanted * clk), NS_IN_KHZ); pr_debug("result %d from %ld, %d\n", result, clk, wanted); if (result > max) { pr_err("%d ns is too big for current clock rate %ld\n", wanted, clk); return -1; } if (result < 1) result = 1; return result; } #define to_ns(ticks, clk) (((ticks) * NS_IN_KHZ) / (unsigned int)(clk)) /* controller setup */ /** * s3c2410_nand_setrate - setup controller timing information. * @info: The controller instance. * * Given the information supplied by the platform, calculate and set * the necessary timing registers in the hardware to generate the * necessary timing cycles to the hardware. */ static int s3c2410_nand_setrate(struct s3c2410_nand_info *info) { struct s3c2410_platform_nand *plat = info->platform; int tacls_max = (info->cpu_type == TYPE_S3C2412) ? 8 : 4; int tacls, twrph0, twrph1; unsigned long clkrate = clk_get_rate(info->clk); unsigned long uninitialized_var(set), cfg, uninitialized_var(mask); unsigned long flags; /* calculate the timing information for the controller */ info->clk_rate = clkrate; clkrate /= 1000; /* turn clock into kHz for ease of use */ if (plat != NULL) { tacls = s3c_nand_calc_rate(plat->tacls, clkrate, tacls_max); twrph0 = s3c_nand_calc_rate(plat->twrph0, clkrate, 8); twrph1 = s3c_nand_calc_rate(plat->twrph1, clkrate, 8); } else { /* default timings */ tacls = tacls_max; twrph0 = 8; twrph1 = 8; } if (tacls < 0 || twrph0 < 0 || twrph1 < 0) { dev_err(info->device, "cannot get suitable timings\n"); return -EINVAL; } dev_info(info->device, "Tacls=%d, %dns Twrph0=%d %dns, Twrph1=%d %dns\n", tacls, to_ns(tacls, clkrate), twrph0, to_ns(twrph0, clkrate), twrph1, to_ns(twrph1, clkrate)); switch (info->cpu_type) { case TYPE_S3C2410: mask = (S3C2410_NFCONF_TACLS(3) | S3C2410_NFCONF_TWRPH0(7) | S3C2410_NFCONF_TWRPH1(7)); set = S3C2410_NFCONF_EN; set |= S3C2410_NFCONF_TACLS(tacls - 1); set |= S3C2410_NFCONF_TWRPH0(twrph0 - 1); set |= S3C2410_NFCONF_TWRPH1(twrph1 - 1); break; case TYPE_S3C2440: case TYPE_S3C2412: mask = (S3C2440_NFCONF_TACLS(tacls_max - 1) | S3C2440_NFCONF_TWRPH0(7) | S3C2440_NFCONF_TWRPH1(7)); set = S3C2440_NFCONF_TACLS(tacls - 1); set |= S3C2440_NFCONF_TWRPH0(twrph0 - 1); set |= S3C2440_NFCONF_TWRPH1(twrph1 - 1); break; default: BUG(); } local_irq_save(flags); cfg = readl(info->regs + S3C2410_NFCONF); cfg &= ~mask; cfg |= set; writel(cfg, info->regs + S3C2410_NFCONF); local_irq_restore(flags); dev_dbg(info->device, "NF_CONF is 0x%lx\n", cfg); return 0; } /** * s3c2410_nand_inithw - basic hardware initialisation * @info: The hardware state. * * Do the basic initialisation of the hardware, using s3c2410_nand_setrate() * to setup the hardware access speeds and set the controller to be enabled. */ static int s3c2410_nand_inithw(struct s3c2410_nand_info *info) { int ret; ret = s3c2410_nand_setrate(info); if (ret < 0) return ret; switch (info->cpu_type) { case TYPE_S3C2410: default: break; case TYPE_S3C2440: case TYPE_S3C2412: /* enable the controller and de-assert nFCE */ writel(S3C2440_NFCONT_ENABLE, info->regs + S3C2440_NFCONT); } return 0; } /** * s3c2410_nand_select_chip - select the given nand chip * @mtd: The MTD instance for this chip. * @chip: The chip number. * * This is called by the MTD layer to either select a given chip for the * @mtd instance, or to indicate that the access has finished and the * chip can be de-selected. * * The routine ensures that the nFCE line is correctly setup, and any * platform specific selection code is called to route nFCE to the specific * chip. */ static void s3c2410_nand_select_chip(struct mtd_info *mtd, int chip) { struct s3c2410_nand_info *info; struct s3c2410_nand_mtd *nmtd; struct nand_chip *this = mtd->priv; unsigned long cur; nmtd = this->priv; info = nmtd->info; if (chip != -1) s3c2410_nand_clk_set_state(info, CLOCK_ENABLE); cur = readl(info->sel_reg); if (chip == -1) { cur |= info->sel_bit; } else { if (nmtd->set != NULL && chip > nmtd->set->nr_chips) { dev_err(info->device, "invalid chip %d\n", chip); return; } if (info->platform != NULL) { if (info->platform->select_chip != NULL) (info->platform->select_chip) (nmtd->set, chip); } cur &= ~info->sel_bit; } writel(cur, info->sel_reg); if (chip == -1) s3c2410_nand_clk_set_state(info, CLOCK_SUSPEND); } /* s3c2410_nand_hwcontrol * * Issue command and address cycles to the chip */ static void s3c2410_nand_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl) { struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd); if (cmd == NAND_CMD_NONE) return; if (ctrl & NAND_CLE) writeb(cmd, info->regs + S3C2410_NFCMD); else writeb(cmd, info->regs + S3C2410_NFADDR); } /* command and control functions */ static void s3c2440_nand_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl) { struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd); if (cmd == NAND_CMD_NONE) return; if (ctrl & NAND_CLE) writeb(cmd, info->regs + S3C2440_NFCMD); else writeb(cmd, info->regs + S3C2440_NFADDR); } /* s3c2410_nand_devready() * * returns 0 if the nand is busy, 1 if it is ready */ static int s3c2410_nand_devready(struct mtd_info *mtd) { struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd); return readb(info->regs + S3C2410_NFSTAT) & S3C2410_NFSTAT_BUSY; } static int s3c2440_nand_devready(struct mtd_info *mtd) { struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd); return readb(info->regs + S3C2440_NFSTAT) & S3C2440_NFSTAT_READY; } static int s3c2412_nand_devready(struct mtd_info *mtd) { struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd); return readb(info->regs + S3C2412_NFSTAT) & S3C2412_NFSTAT_READY; } /* ECC handling functions */ #ifdef CONFIG_MTD_NAND_S3C2410_HWECC static int s3c2410_nand_correct_data(struct mtd_info *mtd, u_char *dat, u_char *read_ecc, u_char *calc_ecc) { struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd); unsigned int diff0, diff1, diff2; unsigned int bit, byte; pr_debug("%s(%p,%p,%p,%p)\n", __func__, mtd, dat, read_ecc, calc_ecc); diff0 = read_ecc[0] ^ calc_ecc[0]; diff1 = read_ecc[1] ^ calc_ecc[1]; diff2 = read_ecc[2] ^ calc_ecc[2]; pr_debug("%s: rd %*phN calc %*phN diff %02x%02x%02x\n", __func__, 3, read_ecc, 3, calc_ecc, diff0, diff1, diff2); if (diff0 == 0 && diff1 == 0 && diff2 == 0) return 0; /* ECC is ok */ /* sometimes people do not think about using the ECC, so check * to see if we have an 0xff,0xff,0xff read ECC and then ignore * the error, on the assumption that this is an un-eccd page. */ if (read_ecc[0] == 0xff && read_ecc[1] == 0xff && read_ecc[2] == 0xff && info->platform->ignore_unset_ecc) return 0; /* Can we correct this ECC (ie, one row and column change). * Note, this is similar to the 256 error code on smartmedia */ if (((diff0 ^ (diff0 >> 1)) & 0x55) == 0x55 && ((diff1 ^ (diff1 >> 1)) & 0x55) == 0x55 && ((diff2 ^ (diff2 >> 1)) & 0x55) == 0x55) { /* calculate the bit position of the error */ bit = ((diff2 >> 3) & 1) | ((diff2 >> 4) & 2) | ((diff2 >> 5) & 4); /* calculate the byte position of the error */ byte = ((diff2 << 7) & 0x100) | ((diff1 << 0) & 0x80) | ((diff1 << 1) & 0x40) | ((diff1 << 2) & 0x20) | ((diff1 << 3) & 0x10) | ((diff0 >> 4) & 0x08) | ((diff0 >> 3) & 0x04) | ((diff0 >> 2) & 0x02) | ((diff0 >> 1) & 0x01); dev_dbg(info->device, "correcting error bit %d, byte %d\n", bit, byte); dat[byte] ^= (1 << bit); return 1; } /* if there is only one bit difference in the ECC, then * one of only a row or column parity has changed, which * means the error is most probably in the ECC itself */ diff0 |= (diff1 << 8); diff0 |= (diff2 << 16); if ((diff0 & ~(1<<fls(diff0))) == 0) return 1; return -1; } /* ECC functions * * These allow the s3c2410 and s3c2440 to use the controller's ECC * generator block to ECC the data as it passes through] */ static void s3c2410_nand_enable_hwecc(struct mtd_info *mtd, int mode) { struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd); unsigned long ctrl; ctrl = readl(info->regs + S3C2410_NFCONF); ctrl |= S3C2410_NFCONF_INITECC; writel(ctrl, info->regs + S3C2410_NFCONF); } static void s3c2412_nand_enable_hwecc(struct mtd_info *mtd, int mode) { struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd); unsigned long ctrl; ctrl = readl(info->regs + S3C2440_NFCONT); writel(ctrl | S3C2412_NFCONT_INIT_MAIN_ECC, info->regs + S3C2440_NFCONT); } static void s3c2440_nand_enable_hwecc(struct mtd_info *mtd, int mode) { struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd); unsigned long ctrl; ctrl = readl(info->regs + S3C2440_NFCONT); writel(ctrl | S3C2440_NFCONT_INITECC, info->regs + S3C2440_NFCONT); } static int s3c2410_nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat, u_char *ecc_code) { struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd); ecc_code[0] = readb(info->regs + S3C2410_NFECC + 0); ecc_code[1] = readb(info->regs + S3C2410_NFECC + 1); ecc_code[2] = readb(info->regs + S3C2410_NFECC + 2); pr_debug("%s: returning ecc %*phN\n", __func__, 3, ecc_code); return 0; } static int s3c2412_nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat, u_char *ecc_code) { struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd); unsigned long ecc = readl(info->regs + S3C2412_NFMECC0); ecc_code[0] = ecc; ecc_code[1] = ecc >> 8; ecc_code[2] = ecc >> 16; pr_debug("%s: returning ecc %*phN\n", __func__, 3, ecc_code); return 0; } static int s3c2440_nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat, u_char *ecc_code) { struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd); unsigned long ecc = readl(info->regs + S3C2440_NFMECC0); ecc_code[0] = ecc; ecc_code[1] = ecc >> 8; ecc_code[2] = ecc >> 16; pr_debug("%s: returning ecc %06lx\n", __func__, ecc & 0xffffff); return 0; } #endif /* over-ride the standard functions for a little more speed. We can * use read/write block to move the data buffers to/from the controller */ static void s3c2410_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len) { struct nand_chip *this = mtd->priv; readsb(this->IO_ADDR_R, buf, len); } static void s3c2440_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len) { struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd); readsl(info->regs + S3C2440_NFDATA, buf, len >> 2); /* cleanup if we've got less than a word to do */ if (len & 3) { buf += len & ~3; for (; len & 3; len--) *buf++ = readb(info->regs + S3C2440_NFDATA); } } static void s3c2410_nand_write_buf(struct mtd_info *mtd, const u_char *buf, int len) { struct nand_chip *this = mtd->priv; writesb(this->IO_ADDR_W, buf, len); } static void s3c2440_nand_write_buf(struct mtd_info *mtd, const u_char *buf, int len) { struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd); writesl(info->regs + S3C2440_NFDATA, buf, len >> 2); /* cleanup any fractional write */ if (len & 3) { buf += len & ~3; for (; len & 3; len--, buf++) writeb(*buf, info->regs + S3C2440_NFDATA); } } /* cpufreq driver support */ #ifdef CONFIG_CPU_FREQ static int s3c2410_nand_cpufreq_transition(struct notifier_block *nb, unsigned long val, void *data) { struct s3c2410_nand_info *info; unsigned long newclk; info = container_of(nb, struct s3c2410_nand_info, freq_transition); newclk = clk_get_rate(info->clk); if ((val == CPUFREQ_POSTCHANGE && newclk < info->clk_rate) || (val == CPUFREQ_PRECHANGE && newclk > info->clk_rate)) { s3c2410_nand_setrate(info); } return 0; } static inline int s3c2410_nand_cpufreq_register(struct s3c2410_nand_info *info) { info->freq_transition.notifier_call = s3c2410_nand_cpufreq_transition; return cpufreq_register_notifier(&info->freq_transition, CPUFREQ_TRANSITION_NOTIFIER); } static inline void s3c2410_nand_cpufreq_deregister(struct s3c2410_nand_info *info) { cpufreq_unregister_notifier(&info->freq_transition, CPUFREQ_TRANSITION_NOTIFIER); } #else static inline int s3c2410_nand_cpufreq_register(struct s3c2410_nand_info *info) { return 0; } static inline void s3c2410_nand_cpufreq_deregister(struct s3c2410_nand_info *info) { } #endif /* device management functions */ static int s3c24xx_nand_remove(struct platform_device *pdev) { struct s3c2410_nand_info *info = to_nand_info(pdev); platform_set_drvdata(pdev, NULL); if (info == NULL) return 0; s3c2410_nand_cpufreq_deregister(info); /* Release all our mtds and their partitions, then go through * freeing the resources used */ if (info->mtds != NULL) { struct s3c2410_nand_mtd *ptr = info->mtds; int mtdno; for (mtdno = 0; mtdno < info->mtd_count; mtdno++, ptr++) { pr_debug("releasing mtd %d (%p)\n", mtdno, ptr); nand_release(&ptr->mtd); } } /* free the common resources */ if (!IS_ERR(info->clk)) s3c2410_nand_clk_set_state(info, CLOCK_DISABLE); return 0; } static int s3c2410_nand_add_partition(struct s3c2410_nand_info *info, struct s3c2410_nand_mtd *mtd, struct s3c2410_nand_set *set) { if (set) { mtd->mtd.name = set->name; return mtd_device_parse_register(&mtd->mtd, NULL, NULL, set->partitions, set->nr_partitions); } return -ENODEV; } /** * s3c2410_nand_init_chip - initialise a single instance of an chip * @info: The base NAND controller the chip is on. * @nmtd: The new controller MTD instance to fill in. * @set: The information passed from the board specific platform data. * * Initialise the given @nmtd from the information in @info and @set. This * readies the structure for use with the MTD layer functions by ensuring * all pointers are setup and the necessary control routines selected. */ static void s3c2410_nand_init_chip(struct s3c2410_nand_info *info, struct s3c2410_nand_mtd *nmtd, struct s3c2410_nand_set *set) { struct nand_chip *chip = &nmtd->chip; void __iomem *regs = info->regs; chip->write_buf = s3c2410_nand_write_buf; chip->read_buf = s3c2410_nand_read_buf; chip->select_chip = s3c2410_nand_select_chip; chip->chip_delay = 50; chip->priv = nmtd; chip->options = set->options; chip->controller = &info->controller; switch (info->cpu_type) { case TYPE_S3C2410: chip->IO_ADDR_W = regs + S3C2410_NFDATA; info->sel_reg = regs + S3C2410_NFCONF; info->sel_bit = S3C2410_NFCONF_nFCE; chip->cmd_ctrl = s3c2410_nand_hwcontrol; chip->dev_ready = s3c2410_nand_devready; break; case TYPE_S3C2440: chip->IO_ADDR_W = regs + S3C2440_NFDATA; info->sel_reg = regs + S3C2440_NFCONT; info->sel_bit = S3C2440_NFCONT_nFCE; chip->cmd_ctrl = s3c2440_nand_hwcontrol; chip->dev_ready = s3c2440_nand_devready; chip->read_buf = s3c2440_nand_read_buf; chip->write_buf = s3c2440_nand_write_buf; break; case TYPE_S3C2412: chip->IO_ADDR_W = regs + S3C2440_NFDATA; info->sel_reg = regs + S3C2440_NFCONT; info->sel_bit = S3C2412_NFCONT_nFCE0; chip->cmd_ctrl = s3c2440_nand_hwcontrol; chip->dev_ready = s3c2412_nand_devready; if (readl(regs + S3C2410_NFCONF) & S3C2412_NFCONF_NANDBOOT) dev_info(info->device, "System booted from NAND\n"); break; } chip->IO_ADDR_R = chip->IO_ADDR_W; nmtd->info = info; nmtd->mtd.priv = chip; nmtd->mtd.owner = THIS_MODULE; nmtd->set = set; #ifdef CONFIG_MTD_NAND_S3C2410_HWECC chip->ecc.calculate = s3c2410_nand_calculate_ecc; chip->ecc.correct = s3c2410_nand_correct_data; chip->ecc.mode = NAND_ECC_HW; chip->ecc.strength = 1; switch (info->cpu_type) { case TYPE_S3C2410: chip->ecc.hwctl = s3c2410_nand_enable_hwecc; chip->ecc.calculate = s3c2410_nand_calculate_ecc; break; case TYPE_S3C2412: chip->ecc.hwctl = s3c2412_nand_enable_hwecc; chip->ecc.calculate = s3c2412_nand_calculate_ecc; break; case TYPE_S3C2440: chip->ecc.hwctl = s3c2440_nand_enable_hwecc; chip->ecc.calculate = s3c2440_nand_calculate_ecc; break; } #else chip->ecc.mode = NAND_ECC_SOFT; #endif if (set->ecc_layout != NULL) chip->ecc.layout = set->ecc_layout; if (set->disable_ecc) chip->ecc.mode = NAND_ECC_NONE; switch (chip->ecc.mode) { case NAND_ECC_NONE: dev_info(info->device, "NAND ECC disabled\n"); break; case NAND_ECC_SOFT: dev_info(info->device, "NAND soft ECC\n"); break; case NAND_ECC_HW: dev_info(info->device, "NAND hardware ECC\n"); break; default: dev_info(info->device, "NAND ECC UNKNOWN\n"); break; } /* If you use u-boot BBT creation code, specifying this flag will * let the kernel fish out the BBT from the NAND, and also skip the * full NAND scan that can take 1/2s or so. Little things... */ if (set->flash_bbt) { chip->bbt_options |= NAND_BBT_USE_FLASH; chip->options |= NAND_SKIP_BBTSCAN; } } /** * s3c2410_nand_update_chip - post probe update * @info: The controller instance. * @nmtd: The driver version of the MTD instance. * * This routine is called after the chip probe has successfully completed * and the relevant per-chip information updated. This call ensure that * we update the internal state accordingly. * * The internal state is currently limited to the ECC state information. */ static void s3c2410_nand_update_chip(struct s3c2410_nand_info *info, struct s3c2410_nand_mtd *nmtd) { struct nand_chip *chip = &nmtd->chip; dev_dbg(info->device, "chip %p => page shift %d\n", chip, chip->page_shift); if (chip->ecc.mode != NAND_ECC_HW) return; /* change the behaviour depending on whether we are using * the large or small page nand device */ if (chip->page_shift > 10) { chip->ecc.size = 256; chip->ecc.bytes = 3; } else { chip->ecc.size = 512; chip->ecc.bytes = 3; chip->ecc.layout = &nand_hw_eccoob; } } /* s3c24xx_nand_probe * * called by device layer when it finds a device matching * one our driver can handled. This code checks to see if * it can allocate all necessary resources then calls the * nand layer to look for devices */ static int s3c24xx_nand_probe(struct platform_device *pdev) { struct s3c2410_platform_nand *plat = to_nand_plat(pdev); enum s3c_cpu_type cpu_type; struct s3c2410_nand_info *info; struct s3c2410_nand_mtd *nmtd; struct s3c2410_nand_set *sets; struct resource *res; int err = 0; int size; int nr_sets; int setno; cpu_type = platform_get_device_id(pdev)->driver_data; pr_debug("s3c2410_nand_probe(%p)\n", pdev); info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL); if (info == NULL) { dev_err(&pdev->dev, "no memory for flash info\n"); err = -ENOMEM; goto exit_error; } platform_set_drvdata(pdev, info); spin_lock_init(&info->controller.lock); init_waitqueue_head(&info->controller.wq); /* get the clock source and enable it */ info->clk = devm_clk_get(&pdev->dev, "nand"); if (IS_ERR(info->clk)) { dev_err(&pdev->dev, "failed to get clock\n"); err = -ENOENT; goto exit_error; } s3c2410_nand_clk_set_state(info, CLOCK_ENABLE); /* allocate and map the resource */ /* currently we assume we have the one resource */ res = pdev->resource; size = resource_size(res); info->device = &pdev->dev; info->platform = plat; info->cpu_type = cpu_type; info->regs = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(info->regs)) { err = PTR_ERR(info->regs); goto exit_error; } dev_dbg(&pdev->dev, "mapped registers at %p\n", info->regs); /* initialise the hardware */ err = s3c2410_nand_inithw(info); if (err != 0) goto exit_error; sets = (plat != NULL) ? plat->sets : NULL; nr_sets = (plat != NULL) ? plat->nr_sets : 1; info->mtd_count = nr_sets; /* allocate our information */ size = nr_sets * sizeof(*info->mtds); info->mtds = devm_kzalloc(&pdev->dev, size, GFP_KERNEL); if (info->mtds == NULL) { dev_err(&pdev->dev, "failed to allocate mtd storage\n"); err = -ENOMEM; goto exit_error; } /* initialise all possible chips */ nmtd = info->mtds; for (setno = 0; setno < nr_sets; setno++, nmtd++) { pr_debug("initialising set %d (%p, info %p)\n", setno, nmtd, info); s3c2410_nand_init_chip(info, nmtd, sets); nmtd->scan_res = nand_scan_ident(&nmtd->mtd, (sets) ? sets->nr_chips : 1, NULL); if (nmtd->scan_res == 0) { s3c2410_nand_update_chip(info, nmtd); nand_scan_tail(&nmtd->mtd); s3c2410_nand_add_partition(info, nmtd, sets); } if (sets != NULL) sets++; } err = s3c2410_nand_cpufreq_register(info); if (err < 0) { dev_err(&pdev->dev, "failed to init cpufreq support\n"); goto exit_error; } if (allow_clk_suspend(info)) { dev_info(&pdev->dev, "clock idle support enabled\n"); s3c2410_nand_clk_set_state(info, CLOCK_SUSPEND); } pr_debug("initialised ok\n"); return 0; exit_error: s3c24xx_nand_remove(pdev); if (err == 0) err = -EINVAL; return err; } /* PM Support */ #ifdef CONFIG_PM static int s3c24xx_nand_suspend(struct platform_device *dev, pm_message_t pm) { struct s3c2410_nand_info *info = platform_get_drvdata(dev); if (info) { info->save_sel = readl(info->sel_reg); /* For the moment, we must ensure nFCE is high during * the time we are suspended. This really should be * handled by suspending the MTDs we are using, but * that is currently not the case. */ writel(info->save_sel | info->sel_bit, info->sel_reg); s3c2410_nand_clk_set_state(info, CLOCK_DISABLE); } return 0; } static int s3c24xx_nand_resume(struct platform_device *dev) { struct s3c2410_nand_info *info = platform_get_drvdata(dev); unsigned long sel; if (info) { s3c2410_nand_clk_set_state(info, CLOCK_ENABLE); s3c2410_nand_inithw(info); /* Restore the state of the nFCE line. */ sel = readl(info->sel_reg); sel &= ~info->sel_bit; sel |= info->save_sel & info->sel_bit; writel(sel, info->sel_reg); s3c2410_nand_clk_set_state(info, CLOCK_SUSPEND); } return 0; } #else #define s3c24xx_nand_suspend NULL #define s3c24xx_nand_resume NULL #endif /* driver device registration */ static struct platform_device_id s3c24xx_driver_ids[] = { { .name = "s3c2410-nand", .driver_data = TYPE_S3C2410, }, { .name = "s3c2440-nand", .driver_data = TYPE_S3C2440, }, { .name = "s3c2412-nand", .driver_data = TYPE_S3C2412, }, { .name = "s3c6400-nand", .driver_data = TYPE_S3C2412, /* compatible with 2412 */ }, { } }; MODULE_DEVICE_TABLE(platform, s3c24xx_driver_ids); static struct platform_driver s3c24xx_nand_driver = { .probe = s3c24xx_nand_probe, .remove = s3c24xx_nand_remove, .suspend = s3c24xx_nand_suspend, .resume = s3c24xx_nand_resume, .id_table = s3c24xx_driver_ids, .driver = { .name = "s3c24xx-nand", .owner = THIS_MODULE, }, }; module_platform_driver(s3c24xx_nand_driver); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>"); MODULE_DESCRIPTION("S3C24XX MTD NAND driver");
gpl-2.0
JonathanMegevand/test-bare-clone
arch/arm/plat-s3c24xx/spi-bus1-gpd8_9_10.c
3243
1250
/* linux/arch/arm/plat-s3c24xx/spi-bus0-gpd8_9_10.c * * Copyright (c) 2008 Simtec Electronics * http://armlinux.simtec.co.uk/ * Ben Dooks <ben@simtec.co.uk> * * S3C24XX SPI - gpio configuration for bus 1 on gpd8,9,10 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License. */ #include <linux/kernel.h> #include <linux/gpio.h> #include <mach/spi.h> #include <mach/regs-gpio.h> void s3c24xx_spi_gpiocfg_bus1_gpd8_9_10(struct s3c2410_spi_info *spi, int enable) { printk(KERN_INFO "%s(%d)\n", __func__, enable); if (enable) { s3c_gpio_cfgpin(S3C2410_GPD(10), S3C2440_GPD10_SPICLK1); s3c_gpio_cfgpin(S3C2410_GPD(9), S3C2440_GPD9_SPIMOSI1); s3c_gpio_cfgpin(S3C2410_GPD(8), S3C2440_GPD8_SPIMISO1); s3c2410_gpio_pullup(S3C2410_GPD(10), 0); s3c2410_gpio_pullup(S3C2410_GPD(9), 0); } else { s3c_gpio_cfgpin(S3C2410_GPD(8), S3C2410_GPIO_INPUT); s3c_gpio_cfgpin(S3C2410_GPD(9), S3C2410_GPIO_INPUT); s3c_gpio_setpull(S3C2410_GPD(10), S3C_GPIO_PULL_NONE); s3c_gpio_setpull(S3C2410_GPD(9), S3C_GPIO_PULL_NONE); s3c_gpio_setpull(S3C2410_GPD(8), S3C_GPIO_PULL_NONE); } }
gpl-2.0
NooNameR/k2.6.35.14-ICS-
drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
3499
4367
/************************************************************************** * * Copyright (C) 2010 VMware, Inc., Palo Alto, CA., USA * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sub license, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial portions * of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE * USE OR OTHER DEALINGS IN THE SOFTWARE. * **************************************************************************/ #include "vmwgfx_drv.h" struct vmw_fence { struct list_head head; uint32_t sequence; struct timespec submitted; }; void vmw_fence_queue_init(struct vmw_fence_queue *queue) { INIT_LIST_HEAD(&queue->head); queue->lag = ns_to_timespec(0); getrawmonotonic(&queue->lag_time); spin_lock_init(&queue->lock); } void vmw_fence_queue_takedown(struct vmw_fence_queue *queue) { struct vmw_fence *fence, *next; spin_lock(&queue->lock); list_for_each_entry_safe(fence, next, &queue->head, head) { kfree(fence); } spin_unlock(&queue->lock); } int vmw_fence_push(struct vmw_fence_queue *queue, uint32_t sequence) { struct vmw_fence *fence = kmalloc(sizeof(*fence), GFP_KERNEL); if (unlikely(!fence)) return -ENOMEM; fence->sequence = sequence; getrawmonotonic(&fence->submitted); spin_lock(&queue->lock); list_add_tail(&fence->head, &queue->head); spin_unlock(&queue->lock); return 0; } int vmw_fence_pull(struct vmw_fence_queue *queue, uint32_t signaled_sequence) { struct vmw_fence *fence, *next; struct timespec now; bool updated = false; spin_lock(&queue->lock); getrawmonotonic(&now); if (list_empty(&queue->head)) { queue->lag = ns_to_timespec(0); queue->lag_time = now; updated = true; goto out_unlock; } list_for_each_entry_safe(fence, next, &queue->head, head) { if (signaled_sequence - fence->sequence > (1 << 30)) continue; queue->lag = timespec_sub(now, fence->submitted); queue->lag_time = now; updated = true; list_del(&fence->head); kfree(fence); } out_unlock: spin_unlock(&queue->lock); return (updated) ? 0 : -EBUSY; } static struct timespec vmw_timespec_add(struct timespec t1, struct timespec t2) { t1.tv_sec += t2.tv_sec; t1.tv_nsec += t2.tv_nsec; if (t1.tv_nsec >= 1000000000L) { t1.tv_sec += 1; t1.tv_nsec -= 1000000000L; } return t1; } static struct timespec vmw_fifo_lag(struct vmw_fence_queue *queue) { struct timespec now; spin_lock(&queue->lock); getrawmonotonic(&now); queue->lag = vmw_timespec_add(queue->lag, timespec_sub(now, queue->lag_time)); queue->lag_time = now; spin_unlock(&queue->lock); return queue->lag; } static bool vmw_lag_lt(struct vmw_fence_queue *queue, uint32_t us) { struct timespec lag, cond; cond = ns_to_timespec((s64) us * 1000); lag = vmw_fifo_lag(queue); return (timespec_compare(&lag, &cond) < 1); } int vmw_wait_lag(struct vmw_private *dev_priv, struct vmw_fence_queue *queue, uint32_t us) { struct vmw_fence *fence; uint32_t sequence; int ret; while (!vmw_lag_lt(queue, us)) { spin_lock(&queue->lock); if (list_empty(&queue->head)) sequence = atomic_read(&dev_priv->fence_seq); else { fence = list_first_entry(&queue->head, struct vmw_fence, head); sequence = fence->sequence; } spin_unlock(&queue->lock); ret = vmw_wait_fence(dev_priv, false, sequence, true, 3*HZ); if (unlikely(ret != 0)) return ret; (void) vmw_fence_pull(queue, sequence); } return 0; }
gpl-2.0
beats4x/kernel_lge_g3-v10m
drivers/char/hw_random/core.c
3755
7711
/* Added support for the AMD Geode LX RNG (c) Copyright 2004-2005 Advanced Micro Devices, Inc. derived from Hardware driver for the Intel/AMD/VIA Random Number Generators (RNG) (c) Copyright 2003 Red Hat Inc <jgarzik@redhat.com> derived from Hardware driver for the AMD 768 Random Number Generator (RNG) (c) Copyright 2001 Red Hat Inc <alan@redhat.com> derived from Hardware driver for Intel i810 Random Number Generator (RNG) Copyright 2000,2001 Jeff Garzik <jgarzik@pobox.com> Copyright 2000,2001 Philipp Rumpf <prumpf@mandrakesoft.com> Added generic RNG API Copyright 2006 Michael Buesch <m@bues.ch> Copyright 2005 (c) MontaVista Software, Inc. Please read Documentation/hw_random.txt for details on use. ---------------------------------------------------------- This software may be used and distributed according to the terms of the GNU General Public License, incorporated herein by reference. */ #include <linux/device.h> #include <linux/hw_random.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/fs.h> #include <linux/sched.h> #include <linux/init.h> #include <linux/miscdevice.h> #include <linux/delay.h> #include <asm/uaccess.h> #define RNG_MODULE_NAME "hw_random" #define PFX RNG_MODULE_NAME ": " #define RNG_MISCDEV_MINOR 183 /* official */ static struct hwrng *current_rng; static LIST_HEAD(rng_list); static DEFINE_MUTEX(rng_mutex); static int data_avail; static u8 rng_buffer[SMP_CACHE_BYTES < 32 ? 32 : SMP_CACHE_BYTES] __cacheline_aligned; static inline int hwrng_init(struct hwrng *rng) { if (!rng->init) return 0; return rng->init(rng); } static inline void hwrng_cleanup(struct hwrng *rng) { if (rng && rng->cleanup) rng->cleanup(rng); } static int rng_dev_open(struct inode *inode, struct file *filp) { /* enforce read-only access to this chrdev */ if ((filp->f_mode & FMODE_READ) == 0) return -EINVAL; if (filp->f_mode & FMODE_WRITE) return -EINVAL; return 0; } static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size, int wait) { int present; if (rng->read) return rng->read(rng, (void *)buffer, size, wait); if (rng->data_present) present = rng->data_present(rng, wait); else present = 1; if (present) return rng->data_read(rng, (u32 *)buffer); return 0; } static ssize_t rng_dev_read(struct file *filp, char __user *buf, size_t size, loff_t *offp) { ssize_t ret = 0; int err = 0; int bytes_read, len; while (size) { if (mutex_lock_interruptible(&rng_mutex)) { err = -ERESTARTSYS; goto out; } if (!current_rng) { err = -ENODEV; goto out_unlock; } if (!data_avail) { bytes_read = rng_get_data(current_rng, rng_buffer, sizeof(rng_buffer), !(filp->f_flags & O_NONBLOCK)); if (bytes_read < 0) { err = bytes_read; goto out_unlock; } data_avail = bytes_read; } if (!data_avail) { if (filp->f_flags & O_NONBLOCK) { err = -EAGAIN; goto out_unlock; } } else { len = data_avail; if (len > size) len = size; data_avail -= len; if (copy_to_user(buf + ret, rng_buffer + data_avail, len)) { err = -EFAULT; goto out_unlock; } size -= len; ret += len; } mutex_unlock(&rng_mutex); if (need_resched()) schedule_timeout_interruptible(1); if (signal_pending(current)) { err = -ERESTARTSYS; goto out; } } out: return ret ? : err; out_unlock: mutex_unlock(&rng_mutex); goto out; } static const struct file_operations rng_chrdev_ops = { .owner = THIS_MODULE, .open = rng_dev_open, .read = rng_dev_read, .llseek = noop_llseek, }; static struct miscdevice rng_miscdev = { .minor = RNG_MISCDEV_MINOR, .name = RNG_MODULE_NAME, .nodename = "hwrng", .fops = &rng_chrdev_ops, }; static ssize_t hwrng_attr_current_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { int err; struct hwrng *rng; err = mutex_lock_interruptible(&rng_mutex); if (err) return -ERESTARTSYS; err = -ENODEV; list_for_each_entry(rng, &rng_list, list) { if (strcmp(rng->name, buf) == 0) { if (rng == current_rng) { err = 0; break; } err = hwrng_init(rng); if (err) break; hwrng_cleanup(current_rng); current_rng = rng; err = 0; break; } } mutex_unlock(&rng_mutex); return err ? : len; } static ssize_t hwrng_attr_current_show(struct device *dev, struct device_attribute *attr, char *buf) { int err; ssize_t ret; const char *name = "none"; err = mutex_lock_interruptible(&rng_mutex); if (err) return -ERESTARTSYS; if (current_rng) name = current_rng->name; ret = snprintf(buf, PAGE_SIZE, "%s\n", name); mutex_unlock(&rng_mutex); return ret; } static ssize_t hwrng_attr_available_show(struct device *dev, struct device_attribute *attr, char *buf) { int err; ssize_t ret = 0; struct hwrng *rng; err = mutex_lock_interruptible(&rng_mutex); if (err) return -ERESTARTSYS; buf[0] = '\0'; list_for_each_entry(rng, &rng_list, list) { strncat(buf, rng->name, PAGE_SIZE - ret - 1); ret += strlen(rng->name); strncat(buf, " ", PAGE_SIZE - ret - 1); ret++; } strncat(buf, "\n", PAGE_SIZE - ret - 1); ret++; mutex_unlock(&rng_mutex); return ret; } static DEVICE_ATTR(rng_current, S_IRUGO | S_IWUSR, hwrng_attr_current_show, hwrng_attr_current_store); static DEVICE_ATTR(rng_available, S_IRUGO, hwrng_attr_available_show, NULL); static void unregister_miscdev(void) { device_remove_file(rng_miscdev.this_device, &dev_attr_rng_available); device_remove_file(rng_miscdev.this_device, &dev_attr_rng_current); misc_deregister(&rng_miscdev); } static int register_miscdev(void) { int err; err = misc_register(&rng_miscdev); if (err) goto out; err = device_create_file(rng_miscdev.this_device, &dev_attr_rng_current); if (err) goto err_misc_dereg; err = device_create_file(rng_miscdev.this_device, &dev_attr_rng_available); if (err) goto err_remove_current; out: return err; err_remove_current: device_remove_file(rng_miscdev.this_device, &dev_attr_rng_current); err_misc_dereg: misc_deregister(&rng_miscdev); goto out; } int hwrng_register(struct hwrng *rng) { int must_register_misc; int err = -EINVAL; struct hwrng *old_rng, *tmp; if (rng->name == NULL || (rng->data_read == NULL && rng->read == NULL)) goto out; mutex_lock(&rng_mutex); /* Must not register two RNGs with the same name. */ err = -EEXIST; list_for_each_entry(tmp, &rng_list, list) { if (strcmp(tmp->name, rng->name) == 0) goto out_unlock; } must_register_misc = (current_rng == NULL); old_rng = current_rng; if (!old_rng) { err = hwrng_init(rng); if (err) goto out_unlock; current_rng = rng; } err = 0; if (must_register_misc) { err = register_miscdev(); if (err) { if (!old_rng) { hwrng_cleanup(rng); current_rng = NULL; } goto out_unlock; } } INIT_LIST_HEAD(&rng->list); list_add_tail(&rng->list, &rng_list); out_unlock: mutex_unlock(&rng_mutex); out: return err; } EXPORT_SYMBOL_GPL(hwrng_register); void hwrng_unregister(struct hwrng *rng) { int err; mutex_lock(&rng_mutex); list_del(&rng->list); if (current_rng == rng) { hwrng_cleanup(rng); if (list_empty(&rng_list)) { current_rng = NULL; } else { current_rng = list_entry(rng_list.prev, struct hwrng, list); err = hwrng_init(current_rng); if (err) current_rng = NULL; } } if (list_empty(&rng_list)) unregister_miscdev(); mutex_unlock(&rng_mutex); } EXPORT_SYMBOL_GPL(hwrng_unregister); MODULE_DESCRIPTION("H/W Random Number Generator (RNG) driver"); MODULE_LICENSE("GPL");
gpl-2.0
LegacyHuawei/android_kernel_huawei_msm7x30
drivers/gpu/drm/nouveau/nouveau_acpi.c
4011
11247
#include <linux/pci.h> #include <linux/acpi.h> #include <linux/slab.h> #include <acpi/acpi_drivers.h> #include <acpi/acpi_bus.h> #include <acpi/video.h> #include <acpi/acpi.h> #include <linux/mxm-wmi.h> #include "drmP.h" #include "drm.h" #include "drm_sarea.h" #include "drm_crtc_helper.h" #include "nouveau_drv.h" #include "nouveau_drm.h" #include "nv50_display.h" #include "nouveau_connector.h" #include <linux/vga_switcheroo.h> #define NOUVEAU_DSM_LED 0x02 #define NOUVEAU_DSM_LED_STATE 0x00 #define NOUVEAU_DSM_LED_OFF 0x10 #define NOUVEAU_DSM_LED_STAMINA 0x11 #define NOUVEAU_DSM_LED_SPEED 0x12 #define NOUVEAU_DSM_POWER 0x03 #define NOUVEAU_DSM_POWER_STATE 0x00 #define NOUVEAU_DSM_POWER_SPEED 0x01 #define NOUVEAU_DSM_POWER_STAMINA 0x02 #define NOUVEAU_DSM_OPTIMUS_FN 0x1A #define NOUVEAU_DSM_OPTIMUS_ARGS 0x03000001 static struct nouveau_dsm_priv { bool dsm_detected; bool optimus_detected; acpi_handle dhandle; acpi_handle rom_handle; } nouveau_dsm_priv; #define NOUVEAU_DSM_HAS_MUX 0x1 #define NOUVEAU_DSM_HAS_OPT 0x2 static const char nouveau_dsm_muid[] = { 0xA0, 0xA0, 0x95, 0x9D, 0x60, 0x00, 0x48, 0x4D, 0xB3, 0x4D, 0x7E, 0x5F, 0xEA, 0x12, 0x9F, 0xD4, }; static const char nouveau_op_dsm_muid[] = { 0xF8, 0xD8, 0x86, 0xA4, 0xDA, 0x0B, 0x1B, 0x47, 0xA7, 0x2B, 0x60, 0x42, 0xA6, 0xB5, 0xBE, 0xE0, }; static int nouveau_optimus_dsm(acpi_handle handle, int func, int arg, uint32_t *result) { struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; struct acpi_object_list input; union acpi_object params[4]; union acpi_object *obj; int i, err; char args_buff[4]; input.count = 4; input.pointer = params; params[0].type = ACPI_TYPE_BUFFER; params[0].buffer.length = sizeof(nouveau_op_dsm_muid); params[0].buffer.pointer = (char *)nouveau_op_dsm_muid; params[1].type = ACPI_TYPE_INTEGER; params[1].integer.value = 0x00000100; params[2].type = ACPI_TYPE_INTEGER; params[2].integer.value = func; params[3].type = ACPI_TYPE_BUFFER; params[3].buffer.length = 4; /* ACPI is little endian, AABBCCDD becomes {DD,CC,BB,AA} */ for (i = 0; i < 4; i++) args_buff[i] = (arg >> i * 8) & 0xFF; params[3].buffer.pointer = args_buff; err = acpi_evaluate_object(handle, "_DSM", &input, &output); if (err) { printk(KERN_INFO "failed to evaluate _DSM: %d\n", err); return err; } obj = (union acpi_object *)output.pointer; if (obj->type == ACPI_TYPE_INTEGER) if (obj->integer.value == 0x80000002) { return -ENODEV; } if (obj->type == ACPI_TYPE_BUFFER) { if (obj->buffer.length == 4 && result) { *result = 0; *result |= obj->buffer.pointer[0]; *result |= (obj->buffer.pointer[1] << 8); *result |= (obj->buffer.pointer[2] << 16); *result |= (obj->buffer.pointer[3] << 24); } } kfree(output.pointer); return 0; } static int nouveau_dsm(acpi_handle handle, int func, int arg, uint32_t *result) { struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; struct acpi_object_list input; union acpi_object params[4]; union acpi_object *obj; int err; input.count = 4; input.pointer = params; params[0].type = ACPI_TYPE_BUFFER; params[0].buffer.length = sizeof(nouveau_dsm_muid); params[0].buffer.pointer = (char *)nouveau_dsm_muid; params[1].type = ACPI_TYPE_INTEGER; params[1].integer.value = 0x00000102; params[2].type = ACPI_TYPE_INTEGER; params[2].integer.value = func; params[3].type = ACPI_TYPE_INTEGER; params[3].integer.value = arg; err = acpi_evaluate_object(handle, "_DSM", &input, &output); if (err) { printk(KERN_INFO "failed to evaluate _DSM: %d\n", err); return err; } obj = (union acpi_object *)output.pointer; if (obj->type == ACPI_TYPE_INTEGER) if (obj->integer.value == 0x80000002) return -ENODEV; if (obj->type == ACPI_TYPE_BUFFER) { if (obj->buffer.length == 4 && result) { *result = 0; *result |= obj->buffer.pointer[0]; *result |= (obj->buffer.pointer[1] << 8); *result |= (obj->buffer.pointer[2] << 16); *result |= (obj->buffer.pointer[3] << 24); } } kfree(output.pointer); return 0; } /* Returns 1 if a DSM function is usable and 0 otherwise */ static int nouveau_test_dsm(acpi_handle test_handle, int (*dsm_func)(acpi_handle, int, int, uint32_t *), int sfnc) { u32 result = 0; /* Function 0 returns a Buffer containing available functions. The args * parameter is ignored for function 0, so just put 0 in it */ if (dsm_func(test_handle, 0, 0, &result)) return 0; /* ACPI Spec v4 9.14.1: if bit 0 is zero, no function is supported. If * the n-th bit is enabled, function n is supported */ return result & 1 && result & (1 << sfnc); } static int nouveau_dsm_switch_mux(acpi_handle handle, int mux_id) { mxm_wmi_call_mxmx(mux_id == NOUVEAU_DSM_LED_STAMINA ? MXM_MXDS_ADAPTER_IGD : MXM_MXDS_ADAPTER_0); mxm_wmi_call_mxds(mux_id == NOUVEAU_DSM_LED_STAMINA ? MXM_MXDS_ADAPTER_IGD : MXM_MXDS_ADAPTER_0); return nouveau_dsm(handle, NOUVEAU_DSM_LED, mux_id, NULL); } static int nouveau_dsm_set_discrete_state(acpi_handle handle, enum vga_switcheroo_state state) { int arg; if (state == VGA_SWITCHEROO_ON) arg = NOUVEAU_DSM_POWER_SPEED; else arg = NOUVEAU_DSM_POWER_STAMINA; nouveau_dsm(handle, NOUVEAU_DSM_POWER, arg, NULL); return 0; } static int nouveau_dsm_switchto(enum vga_switcheroo_client_id id) { /* perhaps the _DSM functions are mutually exclusive, but prepare for * the future */ if (!nouveau_dsm_priv.dsm_detected && nouveau_dsm_priv.optimus_detected) return 0; if (id == VGA_SWITCHEROO_IGD) return nouveau_dsm_switch_mux(nouveau_dsm_priv.dhandle, NOUVEAU_DSM_LED_STAMINA); else return nouveau_dsm_switch_mux(nouveau_dsm_priv.dhandle, NOUVEAU_DSM_LED_SPEED); } static int nouveau_dsm_power_state(enum vga_switcheroo_client_id id, enum vga_switcheroo_state state) { if (id == VGA_SWITCHEROO_IGD) return 0; /* Optimus laptops have the card already disabled in * nouveau_switcheroo_set_state */ if (!nouveau_dsm_priv.dsm_detected && nouveau_dsm_priv.optimus_detected) return 0; return nouveau_dsm_set_discrete_state(nouveau_dsm_priv.dhandle, state); } static int nouveau_dsm_init(void) { return 0; } static int nouveau_dsm_get_client_id(struct pci_dev *pdev) { /* easy option one - intel vendor ID means Integrated */ if (pdev->vendor == PCI_VENDOR_ID_INTEL) return VGA_SWITCHEROO_IGD; /* is this device on Bus 0? - this may need improving */ if (pdev->bus->number == 0) return VGA_SWITCHEROO_IGD; return VGA_SWITCHEROO_DIS; } static struct vga_switcheroo_handler nouveau_dsm_handler = { .switchto = nouveau_dsm_switchto, .power_state = nouveau_dsm_power_state, .init = nouveau_dsm_init, .get_client_id = nouveau_dsm_get_client_id, }; static int nouveau_dsm_pci_probe(struct pci_dev *pdev) { acpi_handle dhandle, nvidia_handle; acpi_status status; int retval = 0; dhandle = DEVICE_ACPI_HANDLE(&pdev->dev); if (!dhandle) return false; status = acpi_get_handle(dhandle, "_DSM", &nvidia_handle); if (ACPI_FAILURE(status)) { return false; } if (nouveau_test_dsm(dhandle, nouveau_dsm, NOUVEAU_DSM_POWER)) retval |= NOUVEAU_DSM_HAS_MUX; if (nouveau_test_dsm(dhandle, nouveau_optimus_dsm, NOUVEAU_DSM_OPTIMUS_FN)) retval |= NOUVEAU_DSM_HAS_OPT; if (retval) nouveau_dsm_priv.dhandle = dhandle; return retval; } static bool nouveau_dsm_detect(void) { char acpi_method_name[255] = { 0 }; struct acpi_buffer buffer = {sizeof(acpi_method_name), acpi_method_name}; struct pci_dev *pdev = NULL; int has_dsm = 0; int has_optimus = 0; int vga_count = 0; bool guid_valid; int retval; bool ret = false; /* lookup the MXM GUID */ guid_valid = mxm_wmi_supported(); if (guid_valid) printk("MXM: GUID detected in BIOS\n"); /* now do DSM detection */ while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) { vga_count++; retval = nouveau_dsm_pci_probe(pdev); if (retval & NOUVEAU_DSM_HAS_MUX) has_dsm |= 1; if (retval & NOUVEAU_DSM_HAS_OPT) has_optimus = 1; } if (vga_count == 2 && has_dsm && guid_valid) { acpi_get_name(nouveau_dsm_priv.dhandle, ACPI_FULL_PATHNAME, &buffer); printk(KERN_INFO "VGA switcheroo: detected DSM switching method %s handle\n", acpi_method_name); nouveau_dsm_priv.dsm_detected = true; ret = true; } if (has_optimus == 1) { acpi_get_name(nouveau_dsm_priv.dhandle, ACPI_FULL_PATHNAME, &buffer); printk(KERN_INFO "VGA switcheroo: detected Optimus DSM method %s handle\n", acpi_method_name); nouveau_dsm_priv.optimus_detected = true; ret = true; } return ret; } void nouveau_register_dsm_handler(void) { bool r; r = nouveau_dsm_detect(); if (!r) return; vga_switcheroo_register_handler(&nouveau_dsm_handler); } /* Must be called for Optimus models before the card can be turned off */ void nouveau_switcheroo_optimus_dsm(void) { u32 result = 0; if (!nouveau_dsm_priv.optimus_detected) return; nouveau_optimus_dsm(nouveau_dsm_priv.dhandle, NOUVEAU_DSM_OPTIMUS_FN, NOUVEAU_DSM_OPTIMUS_ARGS, &result); } void nouveau_unregister_dsm_handler(void) { vga_switcheroo_unregister_handler(); } /* retrieve the ROM in 4k blocks */ static int nouveau_rom_call(acpi_handle rom_handle, uint8_t *bios, int offset, int len) { acpi_status status; union acpi_object rom_arg_elements[2], *obj; struct acpi_object_list rom_arg; struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL}; rom_arg.count = 2; rom_arg.pointer = &rom_arg_elements[0]; rom_arg_elements[0].type = ACPI_TYPE_INTEGER; rom_arg_elements[0].integer.value = offset; rom_arg_elements[1].type = ACPI_TYPE_INTEGER; rom_arg_elements[1].integer.value = len; status = acpi_evaluate_object(rom_handle, NULL, &rom_arg, &buffer); if (ACPI_FAILURE(status)) { printk(KERN_INFO "failed to evaluate ROM got %s\n", acpi_format_exception(status)); return -ENODEV; } obj = (union acpi_object *)buffer.pointer; memcpy(bios+offset, obj->buffer.pointer, len); kfree(buffer.pointer); return len; } bool nouveau_acpi_rom_supported(struct pci_dev *pdev) { acpi_status status; acpi_handle dhandle, rom_handle; if (!nouveau_dsm_priv.dsm_detected && !nouveau_dsm_priv.optimus_detected) return false; dhandle = DEVICE_ACPI_HANDLE(&pdev->dev); if (!dhandle) return false; status = acpi_get_handle(dhandle, "_ROM", &rom_handle); if (ACPI_FAILURE(status)) return false; nouveau_dsm_priv.rom_handle = rom_handle; return true; } int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len) { return nouveau_rom_call(nouveau_dsm_priv.rom_handle, bios, offset, len); } int nouveau_acpi_edid(struct drm_device *dev, struct drm_connector *connector) { struct nouveau_connector *nv_connector = nouveau_connector(connector); struct acpi_device *acpidev; acpi_handle handle; int type, ret; void *edid; switch (connector->connector_type) { case DRM_MODE_CONNECTOR_LVDS: case DRM_MODE_CONNECTOR_eDP: type = ACPI_VIDEO_DISPLAY_LCD; break; default: return -EINVAL; } handle = DEVICE_ACPI_HANDLE(&dev->pdev->dev); if (!handle) return -ENODEV; ret = acpi_bus_get_device(handle, &acpidev); if (ret) return -ENODEV; ret = acpi_video_get_edid(acpidev, type, -1, &edid); if (ret < 0) return ret; nv_connector->edid = kmemdup(edid, EDID_LENGTH, GFP_KERNEL); return 0; }
gpl-2.0
guard163/linux
arch/mips/cavium-octeon/executive/cvmx-helper-spi.c
4267
6341
/***********************license start*************** * Author: Cavium Networks * * Contact: support@caviumnetworks.com * This file is part of the OCTEON SDK * * Copyright (c) 2003-2008 Cavium Networks * * This file is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License, Version 2, as * published by the Free Software Foundation. * * This file is distributed in the hope that it will be useful, but * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or * NONINFRINGEMENT. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License * along with this file; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * or visit http://www.gnu.org/licenses/. * * This file may also be available under a different license from Cavium. * Contact Cavium Networks for more information ***********************license end**************************************/ void __cvmx_interrupt_gmxx_enable(int interface); void __cvmx_interrupt_spxx_int_msk_enable(int index); void __cvmx_interrupt_stxx_int_msk_enable(int index); /* * Functions for SPI initialization, configuration, * and monitoring. */ #include <asm/octeon/octeon.h> #include <asm/octeon/cvmx-config.h> #include <asm/octeon/cvmx-spi.h> #include <asm/octeon/cvmx-helper.h> #include <asm/octeon/cvmx-pip-defs.h> #include <asm/octeon/cvmx-pko-defs.h> /* * CVMX_HELPER_SPI_TIMEOUT is used to determine how long the SPI * initialization routines wait for SPI training. You can override the * value using executive-config.h if necessary. */ #ifndef CVMX_HELPER_SPI_TIMEOUT #define CVMX_HELPER_SPI_TIMEOUT 10 #endif int __cvmx_helper_spi_enumerate(int interface) { if ((cvmx_sysinfo_get()->board_type != CVMX_BOARD_TYPE_SIM) && cvmx_spi4000_is_present(interface)) { return 10; } else { return 16; } } /** * Probe a SPI interface and determine the number of ports * connected to it. The SPI interface should still be down after * this call. * * @interface: Interface to probe * * Returns Number of ports on the interface. Zero to disable. */ int __cvmx_helper_spi_probe(int interface) { int num_ports = 0; if ((cvmx_sysinfo_get()->board_type != CVMX_BOARD_TYPE_SIM) && cvmx_spi4000_is_present(interface)) { num_ports = 10; } else { union cvmx_pko_reg_crc_enable enable; num_ports = 16; /* * Unlike the SPI4000, most SPI devices don't * automatically put on the L2 CRC. For everything * except for the SPI4000 have PKO append the L2 CRC * to the packet. */ enable.u64 = cvmx_read_csr(CVMX_PKO_REG_CRC_ENABLE); enable.s.enable |= 0xffff << (interface * 16); cvmx_write_csr(CVMX_PKO_REG_CRC_ENABLE, enable.u64); } __cvmx_helper_setup_gmx(interface, num_ports); return num_ports; } /** * Bringup and enable a SPI interface. After this call packet I/O * should be fully functional. This is called with IPD enabled but * PKO disabled. * * @interface: Interface to bring up * * Returns Zero on success, negative on failure */ int __cvmx_helper_spi_enable(int interface) { /* * Normally the ethernet L2 CRC is checked and stripped in the * GMX block. When you are using SPI, this isn' the case and * IPD needs to check the L2 CRC. */ int num_ports = cvmx_helper_ports_on_interface(interface); int ipd_port; for (ipd_port = interface * 16; ipd_port < interface * 16 + num_ports; ipd_port++) { union cvmx_pip_prt_cfgx port_config; port_config.u64 = cvmx_read_csr(CVMX_PIP_PRT_CFGX(ipd_port)); port_config.s.crc_en = 1; cvmx_write_csr(CVMX_PIP_PRT_CFGX(ipd_port), port_config.u64); } if (cvmx_sysinfo_get()->board_type != CVMX_BOARD_TYPE_SIM) { cvmx_spi_start_interface(interface, CVMX_SPI_MODE_DUPLEX, CVMX_HELPER_SPI_TIMEOUT, num_ports); if (cvmx_spi4000_is_present(interface)) cvmx_spi4000_initialize(interface); } __cvmx_interrupt_spxx_int_msk_enable(interface); __cvmx_interrupt_stxx_int_msk_enable(interface); __cvmx_interrupt_gmxx_enable(interface); return 0; } /** * Return the link state of an IPD/PKO port as returned by * auto negotiation. The result of this function may not match * Octeon's link config if auto negotiation has changed since * the last call to cvmx_helper_link_set(). * * @ipd_port: IPD/PKO port to query * * Returns Link state */ cvmx_helper_link_info_t __cvmx_helper_spi_link_get(int ipd_port) { cvmx_helper_link_info_t result; int interface = cvmx_helper_get_interface_num(ipd_port); int index = cvmx_helper_get_interface_index_num(ipd_port); result.u64 = 0; if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_SIM) { /* The simulator gives you a simulated full duplex link */ result.s.link_up = 1; result.s.full_duplex = 1; result.s.speed = 10000; } else if (cvmx_spi4000_is_present(interface)) { union cvmx_gmxx_rxx_rx_inbnd inband = cvmx_spi4000_check_speed(interface, index); result.s.link_up = inband.s.status; result.s.full_duplex = inband.s.duplex; switch (inband.s.speed) { case 0: /* 10 Mbps */ result.s.speed = 10; break; case 1: /* 100 Mbps */ result.s.speed = 100; break; case 2: /* 1 Gbps */ result.s.speed = 1000; break; case 3: /* Illegal */ result.s.speed = 0; result.s.link_up = 0; break; } } else { /* For generic SPI we can't determine the link, just return some sane results */ result.s.link_up = 1; result.s.full_duplex = 1; result.s.speed = 10000; } return result; } /** * Configure an IPD/PKO port for the specified link state. This * function does not influence auto negotiation at the PHY level. * The passed link state must always match the link state returned * by cvmx_helper_link_get(). It is normally best to use * cvmx_helper_link_autoconf() instead. * * @ipd_port: IPD/PKO port to configure * @link_info: The new link state * * Returns Zero on success, negative on failure */ int __cvmx_helper_spi_link_set(int ipd_port, cvmx_helper_link_info_t link_info) { /* Nothing to do. If we have a SPI4000 then the setup was already performed by cvmx_spi4000_check_speed(). If not then there isn't any link info */ return 0; }
gpl-2.0
cyaniris/sgs4duos_kernel
fs/sysv/inode.c
4523
9824
/* * linux/fs/sysv/inode.c * * minix/inode.c * Copyright (C) 1991, 1992 Linus Torvalds * * xenix/inode.c * Copyright (C) 1992 Doug Evans * * coh/inode.c * Copyright (C) 1993 Pascal Haible, Bruno Haible * * sysv/inode.c * Copyright (C) 1993 Paul B. Monday * * sysv/inode.c * Copyright (C) 1993 Bruno Haible * Copyright (C) 1997, 1998 Krzysztof G. Baranowski * * This file contains code for allocating/freeing inodes and for read/writing * the superblock. */ #include <linux/highuid.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/buffer_head.h> #include <linux/vfs.h> #include <linux/writeback.h> #include <linux/namei.h> #include <asm/byteorder.h> #include "sysv.h" static int sysv_sync_fs(struct super_block *sb, int wait) { struct sysv_sb_info *sbi = SYSV_SB(sb); unsigned long time = get_seconds(), old_time; lock_super(sb); /* * If we are going to write out the super block, * then attach current time stamp. * But if the filesystem was marked clean, keep it clean. */ sb->s_dirt = 0; old_time = fs32_to_cpu(sbi, *sbi->s_sb_time); if (sbi->s_type == FSTYPE_SYSV4) { if (*sbi->s_sb_state == cpu_to_fs32(sbi, 0x7c269d38 - old_time)) *sbi->s_sb_state = cpu_to_fs32(sbi, 0x7c269d38 - time); *sbi->s_sb_time = cpu_to_fs32(sbi, time); mark_buffer_dirty(sbi->s_bh2); } unlock_super(sb); return 0; } static void sysv_write_super(struct super_block *sb) { if (!(sb->s_flags & MS_RDONLY)) sysv_sync_fs(sb, 1); else sb->s_dirt = 0; } static int sysv_remount(struct super_block *sb, int *flags, char *data) { struct sysv_sb_info *sbi = SYSV_SB(sb); lock_super(sb); if (sbi->s_forced_ro) *flags |= MS_RDONLY; if (*flags & MS_RDONLY) sysv_write_super(sb); unlock_super(sb); return 0; } static void sysv_put_super(struct super_block *sb) { struct sysv_sb_info *sbi = SYSV_SB(sb); if (sb->s_dirt) sysv_write_super(sb); if (!(sb->s_flags & MS_RDONLY)) { /* XXX ext2 also updates the state here */ mark_buffer_dirty(sbi->s_bh1); if (sbi->s_bh1 != sbi->s_bh2) mark_buffer_dirty(sbi->s_bh2); } brelse(sbi->s_bh1); if (sbi->s_bh1 != sbi->s_bh2) brelse(sbi->s_bh2); kfree(sbi); } static int sysv_statfs(struct dentry *dentry, struct kstatfs *buf) { struct super_block *sb = dentry->d_sb; struct sysv_sb_info *sbi = SYSV_SB(sb); u64 id = huge_encode_dev(sb->s_bdev->bd_dev); buf->f_type = sb->s_magic; buf->f_bsize = sb->s_blocksize; buf->f_blocks = sbi->s_ndatazones; buf->f_bavail = buf->f_bfree = sysv_count_free_blocks(sb); buf->f_files = sbi->s_ninodes; buf->f_ffree = sysv_count_free_inodes(sb); buf->f_namelen = SYSV_NAMELEN; buf->f_fsid.val[0] = (u32)id; buf->f_fsid.val[1] = (u32)(id >> 32); return 0; } /* * NXI <-> N0XI for PDP, XIN <-> XIN0 for le32, NIX <-> 0NIX for be32 */ static inline void read3byte(struct sysv_sb_info *sbi, unsigned char * from, unsigned char * to) { if (sbi->s_bytesex == BYTESEX_PDP) { to[0] = from[0]; to[1] = 0; to[2] = from[1]; to[3] = from[2]; } else if (sbi->s_bytesex == BYTESEX_LE) { to[0] = from[0]; to[1] = from[1]; to[2] = from[2]; to[3] = 0; } else { to[0] = 0; to[1] = from[0]; to[2] = from[1]; to[3] = from[2]; } } static inline void write3byte(struct sysv_sb_info *sbi, unsigned char * from, unsigned char * to) { if (sbi->s_bytesex == BYTESEX_PDP) { to[0] = from[0]; to[1] = from[2]; to[2] = from[3]; } else if (sbi->s_bytesex == BYTESEX_LE) { to[0] = from[0]; to[1] = from[1]; to[2] = from[2]; } else { to[0] = from[1]; to[1] = from[2]; to[2] = from[3]; } } static const struct inode_operations sysv_symlink_inode_operations = { .readlink = generic_readlink, .follow_link = page_follow_link_light, .put_link = page_put_link, .getattr = sysv_getattr, }; void sysv_set_inode(struct inode *inode, dev_t rdev) { if (S_ISREG(inode->i_mode)) { inode->i_op = &sysv_file_inode_operations; inode->i_fop = &sysv_file_operations; inode->i_mapping->a_ops = &sysv_aops; } else if (S_ISDIR(inode->i_mode)) { inode->i_op = &sysv_dir_inode_operations; inode->i_fop = &sysv_dir_operations; inode->i_mapping->a_ops = &sysv_aops; } else if (S_ISLNK(inode->i_mode)) { if (inode->i_blocks) { inode->i_op = &sysv_symlink_inode_operations; inode->i_mapping->a_ops = &sysv_aops; } else { inode->i_op = &sysv_fast_symlink_inode_operations; nd_terminate_link(SYSV_I(inode)->i_data, inode->i_size, sizeof(SYSV_I(inode)->i_data) - 1); } } else init_special_inode(inode, inode->i_mode, rdev); } struct inode *sysv_iget(struct super_block *sb, unsigned int ino) { struct sysv_sb_info * sbi = SYSV_SB(sb); struct buffer_head * bh; struct sysv_inode * raw_inode; struct sysv_inode_info * si; struct inode *inode; unsigned int block; if (!ino || ino > sbi->s_ninodes) { printk("Bad inode number on dev %s: %d is out of range\n", sb->s_id, ino); return ERR_PTR(-EIO); } inode = iget_locked(sb, ino); if (!inode) return ERR_PTR(-ENOMEM); if (!(inode->i_state & I_NEW)) return inode; raw_inode = sysv_raw_inode(sb, ino, &bh); if (!raw_inode) { printk("Major problem: unable to read inode from dev %s\n", inode->i_sb->s_id); goto bad_inode; } /* SystemV FS: kludge permissions if ino==SYSV_ROOT_INO ?? */ inode->i_mode = fs16_to_cpu(sbi, raw_inode->i_mode); inode->i_uid = (uid_t)fs16_to_cpu(sbi, raw_inode->i_uid); inode->i_gid = (gid_t)fs16_to_cpu(sbi, raw_inode->i_gid); set_nlink(inode, fs16_to_cpu(sbi, raw_inode->i_nlink)); inode->i_size = fs32_to_cpu(sbi, raw_inode->i_size); inode->i_atime.tv_sec = fs32_to_cpu(sbi, raw_inode->i_atime); inode->i_mtime.tv_sec = fs32_to_cpu(sbi, raw_inode->i_mtime); inode->i_ctime.tv_sec = fs32_to_cpu(sbi, raw_inode->i_ctime); inode->i_ctime.tv_nsec = 0; inode->i_atime.tv_nsec = 0; inode->i_mtime.tv_nsec = 0; inode->i_blocks = 0; si = SYSV_I(inode); for (block = 0; block < 10+1+1+1; block++) read3byte(sbi, &raw_inode->i_data[3*block], (u8 *)&si->i_data[block]); brelse(bh); si->i_dir_start_lookup = 0; if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) sysv_set_inode(inode, old_decode_dev(fs32_to_cpu(sbi, si->i_data[0]))); else sysv_set_inode(inode, 0); unlock_new_inode(inode); return inode; bad_inode: iget_failed(inode); return ERR_PTR(-EIO); } static int __sysv_write_inode(struct inode *inode, int wait) { struct super_block * sb = inode->i_sb; struct sysv_sb_info * sbi = SYSV_SB(sb); struct buffer_head * bh; struct sysv_inode * raw_inode; struct sysv_inode_info * si; unsigned int ino, block; int err = 0; ino = inode->i_ino; if (!ino || ino > sbi->s_ninodes) { printk("Bad inode number on dev %s: %d is out of range\n", inode->i_sb->s_id, ino); return -EIO; } raw_inode = sysv_raw_inode(sb, ino, &bh); if (!raw_inode) { printk("unable to read i-node block\n"); return -EIO; } raw_inode->i_mode = cpu_to_fs16(sbi, inode->i_mode); raw_inode->i_uid = cpu_to_fs16(sbi, fs_high2lowuid(inode->i_uid)); raw_inode->i_gid = cpu_to_fs16(sbi, fs_high2lowgid(inode->i_gid)); raw_inode->i_nlink = cpu_to_fs16(sbi, inode->i_nlink); raw_inode->i_size = cpu_to_fs32(sbi, inode->i_size); raw_inode->i_atime = cpu_to_fs32(sbi, inode->i_atime.tv_sec); raw_inode->i_mtime = cpu_to_fs32(sbi, inode->i_mtime.tv_sec); raw_inode->i_ctime = cpu_to_fs32(sbi, inode->i_ctime.tv_sec); si = SYSV_I(inode); if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) si->i_data[0] = cpu_to_fs32(sbi, old_encode_dev(inode->i_rdev)); for (block = 0; block < 10+1+1+1; block++) write3byte(sbi, (u8 *)&si->i_data[block], &raw_inode->i_data[3*block]); mark_buffer_dirty(bh); if (wait) { sync_dirty_buffer(bh); if (buffer_req(bh) && !buffer_uptodate(bh)) { printk ("IO error syncing sysv inode [%s:%08x]\n", sb->s_id, ino); err = -EIO; } } brelse(bh); return 0; } int sysv_write_inode(struct inode *inode, struct writeback_control *wbc) { return __sysv_write_inode(inode, wbc->sync_mode == WB_SYNC_ALL); } int sysv_sync_inode(struct inode *inode) { return __sysv_write_inode(inode, 1); } static void sysv_evict_inode(struct inode *inode) { truncate_inode_pages(&inode->i_data, 0); if (!inode->i_nlink) { inode->i_size = 0; sysv_truncate(inode); } invalidate_inode_buffers(inode); end_writeback(inode); if (!inode->i_nlink) sysv_free_inode(inode); } static struct kmem_cache *sysv_inode_cachep; static struct inode *sysv_alloc_inode(struct super_block *sb) { struct sysv_inode_info *si; si = kmem_cache_alloc(sysv_inode_cachep, GFP_KERNEL); if (!si) return NULL; return &si->vfs_inode; } static void sysv_i_callback(struct rcu_head *head) { struct inode *inode = container_of(head, struct inode, i_rcu); kmem_cache_free(sysv_inode_cachep, SYSV_I(inode)); } static void sysv_destroy_inode(struct inode *inode) { call_rcu(&inode->i_rcu, sysv_i_callback); } static void init_once(void *p) { struct sysv_inode_info *si = (struct sysv_inode_info *)p; inode_init_once(&si->vfs_inode); } const struct super_operations sysv_sops = { .alloc_inode = sysv_alloc_inode, .destroy_inode = sysv_destroy_inode, .write_inode = sysv_write_inode, .evict_inode = sysv_evict_inode, .put_super = sysv_put_super, .write_super = sysv_write_super, .sync_fs = sysv_sync_fs, .remount_fs = sysv_remount, .statfs = sysv_statfs, }; int __init sysv_init_icache(void) { sysv_inode_cachep = kmem_cache_create("sysv_inode_cache", sizeof(struct sysv_inode_info), 0, SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, init_once); if (!sysv_inode_cachep) return -ENOMEM; return 0; } void sysv_destroy_icache(void) { kmem_cache_destroy(sysv_inode_cachep); }
gpl-2.0
netmaxt3r/omni_kernel_sony_msm8974ab
arch/mips/ath79/mach-ap81.c
4779
2396
/* * Atheros AP81 board support * * Copyright (C) 2009-2010 Gabor Juhos <juhosg@openwrt.org> * Copyright (C) 2009 Imre Kaloz <kaloz@openwrt.org> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation. */ #include "machtypes.h" #include "dev-wmac.h" #include "dev-gpio-buttons.h" #include "dev-leds-gpio.h" #include "dev-spi.h" #include "dev-usb.h" #define AP81_GPIO_LED_STATUS 1 #define AP81_GPIO_LED_AOSS 3 #define AP81_GPIO_LED_WLAN 6 #define AP81_GPIO_LED_POWER 14 #define AP81_GPIO_BTN_SW4 12 #define AP81_GPIO_BTN_SW1 21 #define AP81_KEYS_POLL_INTERVAL 20 /* msecs */ #define AP81_KEYS_DEBOUNCE_INTERVAL (3 * AP81_KEYS_POLL_INTERVAL) #define AP81_CAL_DATA_ADDR 0x1fff1000 static struct gpio_led ap81_leds_gpio[] __initdata = { { .name = "ap81:green:status", .gpio = AP81_GPIO_LED_STATUS, .active_low = 1, }, { .name = "ap81:amber:aoss", .gpio = AP81_GPIO_LED_AOSS, .active_low = 1, }, { .name = "ap81:green:wlan", .gpio = AP81_GPIO_LED_WLAN, .active_low = 1, }, { .name = "ap81:green:power", .gpio = AP81_GPIO_LED_POWER, .active_low = 1, } }; static struct gpio_keys_button ap81_gpio_keys[] __initdata = { { .desc = "sw1", .type = EV_KEY, .code = BTN_0, .debounce_interval = AP81_KEYS_DEBOUNCE_INTERVAL, .gpio = AP81_GPIO_BTN_SW1, .active_low = 1, } , { .desc = "sw4", .type = EV_KEY, .code = BTN_1, .debounce_interval = AP81_KEYS_DEBOUNCE_INTERVAL, .gpio = AP81_GPIO_BTN_SW4, .active_low = 1, } }; static struct spi_board_info ap81_spi_info[] = { { .bus_num = 0, .chip_select = 0, .max_speed_hz = 25000000, .modalias = "m25p64", } }; static struct ath79_spi_platform_data ap81_spi_data = { .bus_num = 0, .num_chipselect = 1, }; static void __init ap81_setup(void) { u8 *cal_data = (u8 *) KSEG1ADDR(AP81_CAL_DATA_ADDR); ath79_register_leds_gpio(-1, ARRAY_SIZE(ap81_leds_gpio), ap81_leds_gpio); ath79_register_gpio_keys_polled(-1, AP81_KEYS_POLL_INTERVAL, ARRAY_SIZE(ap81_gpio_keys), ap81_gpio_keys); ath79_register_spi(&ap81_spi_data, ap81_spi_info, ARRAY_SIZE(ap81_spi_info)); ath79_register_wmac(cal_data); ath79_register_usb(); } MIPS_MACHINE(ATH79_MACH_AP81, "AP81", "Atheros AP81 reference board", ap81_setup);
gpl-2.0
PaoloW8/android_kernel_ZTE_NX505J
drivers/dma/amba-pl08x.c
4779
55144
/* * Copyright (c) 2006 ARM Ltd. * Copyright (c) 2010 ST-Ericsson SA * * Author: Peter Pearse <peter.pearse@arm.com> * Author: Linus Walleij <linus.walleij@stericsson.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 59 * Temple Place - Suite 330, Boston, MA 02111-1307, USA. * * The full GNU General Public License is in this distribution in the file * called COPYING. * * Documentation: ARM DDI 0196G == PL080 * Documentation: ARM DDI 0218E == PL081 * * PL080 & PL081 both have 16 sets of DMA signals that can be routed to any * channel. * * The PL080 has 8 channels available for simultaneous use, and the PL081 * has only two channels. So on these DMA controllers the number of channels * and the number of incoming DMA signals are two totally different things. * It is usually not possible to theoretically handle all physical signals, * so a multiplexing scheme with possible denial of use is necessary. * * The PL080 has a dual bus master, PL081 has a single master. * * Memory to peripheral transfer may be visualized as * Get data from memory to DMAC * Until no data left * On burst request from peripheral * Destination burst from DMAC to peripheral * Clear burst request * Raise terminal count interrupt * * For peripherals with a FIFO: * Source burst size == half the depth of the peripheral FIFO * Destination burst size == the depth of the peripheral FIFO * * (Bursts are irrelevant for mem to mem transfers - there are no burst * signals, the DMA controller will simply facilitate its AHB master.) * * ASSUMES default (little) endianness for DMA transfers * * The PL08x has two flow control settings: * - DMAC flow control: the transfer size defines the number of transfers * which occur for the current LLI entry, and the DMAC raises TC at the * end of every LLI entry. Observed behaviour shows the DMAC listening * to both the BREQ and SREQ signals (contrary to documented), * transferring data if either is active. The LBREQ and LSREQ signals * are ignored. * * - Peripheral flow control: the transfer size is ignored (and should be * zero). The data is transferred from the current LLI entry, until * after the final transfer signalled by LBREQ or LSREQ. The DMAC * will then move to the next LLI entry. * * Global TODO: * - Break out common code from arch/arm/mach-s3c64xx and share */ #include <linux/amba/bus.h> #include <linux/amba/pl08x.h> #include <linux/debugfs.h> #include <linux/delay.h> #include <linux/device.h> #include <linux/dmaengine.h> #include <linux/dmapool.h> #include <linux/dma-mapping.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/pm_runtime.h> #include <linux/seq_file.h> #include <linux/slab.h> #include <asm/hardware/pl080.h> #include "dmaengine.h" #define DRIVER_NAME "pl08xdmac" static struct amba_driver pl08x_amba_driver; /** * struct vendor_data - vendor-specific config parameters for PL08x derivatives * @channels: the number of channels available in this variant * @dualmaster: whether this version supports dual AHB masters or not. */ struct vendor_data { u8 channels; bool dualmaster; }; /* * PL08X private data structures * An LLI struct - see PL08x TRM. Note that next uses bit[0] as a bus bit, * start & end do not - their bus bit info is in cctl. Also note that these * are fixed 32-bit quantities. */ struct pl08x_lli { u32 src; u32 dst; u32 lli; u32 cctl; }; /** * struct pl08x_driver_data - the local state holder for the PL08x * @slave: slave engine for this instance * @memcpy: memcpy engine for this instance * @base: virtual memory base (remapped) for the PL08x * @adev: the corresponding AMBA (PrimeCell) bus entry * @vd: vendor data for this PL08x variant * @pd: platform data passed in from the platform/machine * @phy_chans: array of data for the physical channels * @pool: a pool for the LLI descriptors * @pool_ctr: counter of LLIs in the pool * @lli_buses: bitmask to or in to LLI pointer selecting AHB port for LLI * fetches * @mem_buses: set to indicate memory transfers on AHB2. * @lock: a spinlock for this struct */ struct pl08x_driver_data { struct dma_device slave; struct dma_device memcpy; void __iomem *base; struct amba_device *adev; const struct vendor_data *vd; struct pl08x_platform_data *pd; struct pl08x_phy_chan *phy_chans; struct dma_pool *pool; int pool_ctr; u8 lli_buses; u8 mem_buses; spinlock_t lock; }; /* * PL08X specific defines */ /* Size (bytes) of each LLI buffer allocated for one transfer */ # define PL08X_LLI_TSFR_SIZE 0x2000 /* Maximum times we call dma_pool_alloc on this pool without freeing */ #define MAX_NUM_TSFR_LLIS (PL08X_LLI_TSFR_SIZE/sizeof(struct pl08x_lli)) #define PL08X_ALIGN 8 static inline struct pl08x_dma_chan *to_pl08x_chan(struct dma_chan *chan) { return container_of(chan, struct pl08x_dma_chan, chan); } static inline struct pl08x_txd *to_pl08x_txd(struct dma_async_tx_descriptor *tx) { return container_of(tx, struct pl08x_txd, tx); } /* * Physical channel handling */ /* Whether a certain channel is busy or not */ static int pl08x_phy_channel_busy(struct pl08x_phy_chan *ch) { unsigned int val; val = readl(ch->base + PL080_CH_CONFIG); return val & PL080_CONFIG_ACTIVE; } /* * Set the initial DMA register values i.e. those for the first LLI * The next LLI pointer and the configuration interrupt bit have * been set when the LLIs were constructed. Poke them into the hardware * and start the transfer. */ static void pl08x_start_txd(struct pl08x_dma_chan *plchan, struct pl08x_txd *txd) { struct pl08x_driver_data *pl08x = plchan->host; struct pl08x_phy_chan *phychan = plchan->phychan; struct pl08x_lli *lli = &txd->llis_va[0]; u32 val; plchan->at = txd; /* Wait for channel inactive */ while (pl08x_phy_channel_busy(phychan)) cpu_relax(); dev_vdbg(&pl08x->adev->dev, "WRITE channel %d: csrc=0x%08x, cdst=0x%08x, " "clli=0x%08x, cctl=0x%08x, ccfg=0x%08x\n", phychan->id, lli->src, lli->dst, lli->lli, lli->cctl, txd->ccfg); writel(lli->src, phychan->base + PL080_CH_SRC_ADDR); writel(lli->dst, phychan->base + PL080_CH_DST_ADDR); writel(lli->lli, phychan->base + PL080_CH_LLI); writel(lli->cctl, phychan->base + PL080_CH_CONTROL); writel(txd->ccfg, phychan->base + PL080_CH_CONFIG); /* Enable the DMA channel */ /* Do not access config register until channel shows as disabled */ while (readl(pl08x->base + PL080_EN_CHAN) & (1 << phychan->id)) cpu_relax(); /* Do not access config register until channel shows as inactive */ val = readl(phychan->base + PL080_CH_CONFIG); while ((val & PL080_CONFIG_ACTIVE) || (val & PL080_CONFIG_ENABLE)) val = readl(phychan->base + PL080_CH_CONFIG); writel(val | PL080_CONFIG_ENABLE, phychan->base + PL080_CH_CONFIG); } /* * Pause the channel by setting the HALT bit. * * For M->P transfers, pause the DMAC first and then stop the peripheral - * the FIFO can only drain if the peripheral is still requesting data. * (note: this can still timeout if the DMAC FIFO never drains of data.) * * For P->M transfers, disable the peripheral first to stop it filling * the DMAC FIFO, and then pause the DMAC. */ static void pl08x_pause_phy_chan(struct pl08x_phy_chan *ch) { u32 val; int timeout; /* Set the HALT bit and wait for the FIFO to drain */ val = readl(ch->base + PL080_CH_CONFIG); val |= PL080_CONFIG_HALT; writel(val, ch->base + PL080_CH_CONFIG); /* Wait for channel inactive */ for (timeout = 1000; timeout; timeout--) { if (!pl08x_phy_channel_busy(ch)) break; udelay(1); } if (pl08x_phy_channel_busy(ch)) pr_err("pl08x: channel%u timeout waiting for pause\n", ch->id); } static void pl08x_resume_phy_chan(struct pl08x_phy_chan *ch) { u32 val; /* Clear the HALT bit */ val = readl(ch->base + PL080_CH_CONFIG); val &= ~PL080_CONFIG_HALT; writel(val, ch->base + PL080_CH_CONFIG); } /* * pl08x_terminate_phy_chan() stops the channel, clears the FIFO and * clears any pending interrupt status. This should not be used for * an on-going transfer, but as a method of shutting down a channel * (eg, when it's no longer used) or terminating a transfer. */ static void pl08x_terminate_phy_chan(struct pl08x_driver_data *pl08x, struct pl08x_phy_chan *ch) { u32 val = readl(ch->base + PL080_CH_CONFIG); val &= ~(PL080_CONFIG_ENABLE | PL080_CONFIG_ERR_IRQ_MASK | PL080_CONFIG_TC_IRQ_MASK); writel(val, ch->base + PL080_CH_CONFIG); writel(1 << ch->id, pl08x->base + PL080_ERR_CLEAR); writel(1 << ch->id, pl08x->base + PL080_TC_CLEAR); } static inline u32 get_bytes_in_cctl(u32 cctl) { /* The source width defines the number of bytes */ u32 bytes = cctl & PL080_CONTROL_TRANSFER_SIZE_MASK; switch (cctl >> PL080_CONTROL_SWIDTH_SHIFT) { case PL080_WIDTH_8BIT: break; case PL080_WIDTH_16BIT: bytes *= 2; break; case PL080_WIDTH_32BIT: bytes *= 4; break; } return bytes; } /* The channel should be paused when calling this */ static u32 pl08x_getbytes_chan(struct pl08x_dma_chan *plchan) { struct pl08x_phy_chan *ch; struct pl08x_txd *txd; unsigned long flags; size_t bytes = 0; spin_lock_irqsave(&plchan->lock, flags); ch = plchan->phychan; txd = plchan->at; /* * Follow the LLIs to get the number of remaining * bytes in the currently active transaction. */ if (ch && txd) { u32 clli = readl(ch->base + PL080_CH_LLI) & ~PL080_LLI_LM_AHB2; /* First get the remaining bytes in the active transfer */ bytes = get_bytes_in_cctl(readl(ch->base + PL080_CH_CONTROL)); if (clli) { struct pl08x_lli *llis_va = txd->llis_va; dma_addr_t llis_bus = txd->llis_bus; int index; BUG_ON(clli < llis_bus || clli >= llis_bus + sizeof(struct pl08x_lli) * MAX_NUM_TSFR_LLIS); /* * Locate the next LLI - as this is an array, * it's simple maths to find. */ index = (clli - llis_bus) / sizeof(struct pl08x_lli); for (; index < MAX_NUM_TSFR_LLIS; index++) { bytes += get_bytes_in_cctl(llis_va[index].cctl); /* * A LLI pointer of 0 terminates the LLI list */ if (!llis_va[index].lli) break; } } } /* Sum up all queued transactions */ if (!list_empty(&plchan->pend_list)) { struct pl08x_txd *txdi; list_for_each_entry(txdi, &plchan->pend_list, node) { struct pl08x_sg *dsg; list_for_each_entry(dsg, &txd->dsg_list, node) bytes += dsg->len; } } spin_unlock_irqrestore(&plchan->lock, flags); return bytes; } /* * Allocate a physical channel for a virtual channel * * Try to locate a physical channel to be used for this transfer. If all * are taken return NULL and the requester will have to cope by using * some fallback PIO mode or retrying later. */ static struct pl08x_phy_chan * pl08x_get_phy_channel(struct pl08x_driver_data *pl08x, struct pl08x_dma_chan *virt_chan) { struct pl08x_phy_chan *ch = NULL; unsigned long flags; int i; for (i = 0; i < pl08x->vd->channels; i++) { ch = &pl08x->phy_chans[i]; spin_lock_irqsave(&ch->lock, flags); if (!ch->serving) { ch->serving = virt_chan; ch->signal = -1; spin_unlock_irqrestore(&ch->lock, flags); break; } spin_unlock_irqrestore(&ch->lock, flags); } if (i == pl08x->vd->channels) { /* No physical channel available, cope with it */ return NULL; } pm_runtime_get_sync(&pl08x->adev->dev); return ch; } static inline void pl08x_put_phy_channel(struct pl08x_driver_data *pl08x, struct pl08x_phy_chan *ch) { unsigned long flags; spin_lock_irqsave(&ch->lock, flags); /* Stop the channel and clear its interrupts */ pl08x_terminate_phy_chan(pl08x, ch); pm_runtime_put(&pl08x->adev->dev); /* Mark it as free */ ch->serving = NULL; spin_unlock_irqrestore(&ch->lock, flags); } /* * LLI handling */ static inline unsigned int pl08x_get_bytes_for_cctl(unsigned int coded) { switch (coded) { case PL080_WIDTH_8BIT: return 1; case PL080_WIDTH_16BIT: return 2; case PL080_WIDTH_32BIT: return 4; default: break; } BUG(); return 0; } static inline u32 pl08x_cctl_bits(u32 cctl, u8 srcwidth, u8 dstwidth, size_t tsize) { u32 retbits = cctl; /* Remove all src, dst and transfer size bits */ retbits &= ~PL080_CONTROL_DWIDTH_MASK; retbits &= ~PL080_CONTROL_SWIDTH_MASK; retbits &= ~PL080_CONTROL_TRANSFER_SIZE_MASK; /* Then set the bits according to the parameters */ switch (srcwidth) { case 1: retbits |= PL080_WIDTH_8BIT << PL080_CONTROL_SWIDTH_SHIFT; break; case 2: retbits |= PL080_WIDTH_16BIT << PL080_CONTROL_SWIDTH_SHIFT; break; case 4: retbits |= PL080_WIDTH_32BIT << PL080_CONTROL_SWIDTH_SHIFT; break; default: BUG(); break; } switch (dstwidth) { case 1: retbits |= PL080_WIDTH_8BIT << PL080_CONTROL_DWIDTH_SHIFT; break; case 2: retbits |= PL080_WIDTH_16BIT << PL080_CONTROL_DWIDTH_SHIFT; break; case 4: retbits |= PL080_WIDTH_32BIT << PL080_CONTROL_DWIDTH_SHIFT; break; default: BUG(); break; } retbits |= tsize << PL080_CONTROL_TRANSFER_SIZE_SHIFT; return retbits; } struct pl08x_lli_build_data { struct pl08x_txd *txd; struct pl08x_bus_data srcbus; struct pl08x_bus_data dstbus; size_t remainder; u32 lli_bus; }; /* * Autoselect a master bus to use for the transfer. Slave will be the chosen as * victim in case src & dest are not similarly aligned. i.e. If after aligning * masters address with width requirements of transfer (by sending few byte by * byte data), slave is still not aligned, then its width will be reduced to * BYTE. * - prefers the destination bus if both available * - prefers bus with fixed address (i.e. peripheral) */ static void pl08x_choose_master_bus(struct pl08x_lli_build_data *bd, struct pl08x_bus_data **mbus, struct pl08x_bus_data **sbus, u32 cctl) { if (!(cctl & PL080_CONTROL_DST_INCR)) { *mbus = &bd->dstbus; *sbus = &bd->srcbus; } else if (!(cctl & PL080_CONTROL_SRC_INCR)) { *mbus = &bd->srcbus; *sbus = &bd->dstbus; } else { if (bd->dstbus.buswidth >= bd->srcbus.buswidth) { *mbus = &bd->dstbus; *sbus = &bd->srcbus; } else { *mbus = &bd->srcbus; *sbus = &bd->dstbus; } } } /* * Fills in one LLI for a certain transfer descriptor and advance the counter */ static void pl08x_fill_lli_for_desc(struct pl08x_lli_build_data *bd, int num_llis, int len, u32 cctl) { struct pl08x_lli *llis_va = bd->txd->llis_va; dma_addr_t llis_bus = bd->txd->llis_bus; BUG_ON(num_llis >= MAX_NUM_TSFR_LLIS); llis_va[num_llis].cctl = cctl; llis_va[num_llis].src = bd->srcbus.addr; llis_va[num_llis].dst = bd->dstbus.addr; llis_va[num_llis].lli = llis_bus + (num_llis + 1) * sizeof(struct pl08x_lli); llis_va[num_llis].lli |= bd->lli_bus; if (cctl & PL080_CONTROL_SRC_INCR) bd->srcbus.addr += len; if (cctl & PL080_CONTROL_DST_INCR) bd->dstbus.addr += len; BUG_ON(bd->remainder < len); bd->remainder -= len; } static inline void prep_byte_width_lli(struct pl08x_lli_build_data *bd, u32 *cctl, u32 len, int num_llis, size_t *total_bytes) { *cctl = pl08x_cctl_bits(*cctl, 1, 1, len); pl08x_fill_lli_for_desc(bd, num_llis, len, *cctl); (*total_bytes) += len; } /* * This fills in the table of LLIs for the transfer descriptor * Note that we assume we never have to change the burst sizes * Return 0 for error */ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x, struct pl08x_txd *txd) { struct pl08x_bus_data *mbus, *sbus; struct pl08x_lli_build_data bd; int num_llis = 0; u32 cctl, early_bytes = 0; size_t max_bytes_per_lli, total_bytes; struct pl08x_lli *llis_va; struct pl08x_sg *dsg; txd->llis_va = dma_pool_alloc(pl08x->pool, GFP_NOWAIT, &txd->llis_bus); if (!txd->llis_va) { dev_err(&pl08x->adev->dev, "%s no memory for llis\n", __func__); return 0; } pl08x->pool_ctr++; bd.txd = txd; bd.lli_bus = (pl08x->lli_buses & PL08X_AHB2) ? PL080_LLI_LM_AHB2 : 0; cctl = txd->cctl; /* Find maximum width of the source bus */ bd.srcbus.maxwidth = pl08x_get_bytes_for_cctl((cctl & PL080_CONTROL_SWIDTH_MASK) >> PL080_CONTROL_SWIDTH_SHIFT); /* Find maximum width of the destination bus */ bd.dstbus.maxwidth = pl08x_get_bytes_for_cctl((cctl & PL080_CONTROL_DWIDTH_MASK) >> PL080_CONTROL_DWIDTH_SHIFT); list_for_each_entry(dsg, &txd->dsg_list, node) { total_bytes = 0; cctl = txd->cctl; bd.srcbus.addr = dsg->src_addr; bd.dstbus.addr = dsg->dst_addr; bd.remainder = dsg->len; bd.srcbus.buswidth = bd.srcbus.maxwidth; bd.dstbus.buswidth = bd.dstbus.maxwidth; pl08x_choose_master_bus(&bd, &mbus, &sbus, cctl); dev_vdbg(&pl08x->adev->dev, "src=0x%08x%s/%u dst=0x%08x%s/%u len=%zu\n", bd.srcbus.addr, cctl & PL080_CONTROL_SRC_INCR ? "+" : "", bd.srcbus.buswidth, bd.dstbus.addr, cctl & PL080_CONTROL_DST_INCR ? "+" : "", bd.dstbus.buswidth, bd.remainder); dev_vdbg(&pl08x->adev->dev, "mbus=%s sbus=%s\n", mbus == &bd.srcbus ? "src" : "dst", sbus == &bd.srcbus ? "src" : "dst"); /* * Zero length is only allowed if all these requirements are * met: * - flow controller is peripheral. * - src.addr is aligned to src.width * - dst.addr is aligned to dst.width * * sg_len == 1 should be true, as there can be two cases here: * * - Memory addresses are contiguous and are not scattered. * Here, Only one sg will be passed by user driver, with * memory address and zero length. We pass this to controller * and after the transfer it will receive the last burst * request from peripheral and so transfer finishes. * * - Memory addresses are scattered and are not contiguous. * Here, Obviously as DMA controller doesn't know when a lli's * transfer gets over, it can't load next lli. So in this * case, there has to be an assumption that only one lli is * supported. Thus, we can't have scattered addresses. */ if (!bd.remainder) { u32 fc = (txd->ccfg & PL080_CONFIG_FLOW_CONTROL_MASK) >> PL080_CONFIG_FLOW_CONTROL_SHIFT; if (!((fc >= PL080_FLOW_SRC2DST_DST) && (fc <= PL080_FLOW_SRC2DST_SRC))) { dev_err(&pl08x->adev->dev, "%s sg len can't be zero", __func__); return 0; } if ((bd.srcbus.addr % bd.srcbus.buswidth) || (bd.dstbus.addr % bd.dstbus.buswidth)) { dev_err(&pl08x->adev->dev, "%s src & dst address must be aligned to src" " & dst width if peripheral is flow controller", __func__); return 0; } cctl = pl08x_cctl_bits(cctl, bd.srcbus.buswidth, bd.dstbus.buswidth, 0); pl08x_fill_lli_for_desc(&bd, num_llis++, 0, cctl); break; } /* * Send byte by byte for following cases * - Less than a bus width available * - until master bus is aligned */ if (bd.remainder < mbus->buswidth) early_bytes = bd.remainder; else if ((mbus->addr) % (mbus->buswidth)) { early_bytes = mbus->buswidth - (mbus->addr) % (mbus->buswidth); if ((bd.remainder - early_bytes) < mbus->buswidth) early_bytes = bd.remainder; } if (early_bytes) { dev_vdbg(&pl08x->adev->dev, "%s byte width LLIs (remain 0x%08x)\n", __func__, bd.remainder); prep_byte_width_lli(&bd, &cctl, early_bytes, num_llis++, &total_bytes); } if (bd.remainder) { /* * Master now aligned * - if slave is not then we must set its width down */ if (sbus->addr % sbus->buswidth) { dev_dbg(&pl08x->adev->dev, "%s set down bus width to one byte\n", __func__); sbus->buswidth = 1; } /* * Bytes transferred = tsize * src width, not * MIN(buswidths) */ max_bytes_per_lli = bd.srcbus.buswidth * PL080_CONTROL_TRANSFER_SIZE_MASK; dev_vdbg(&pl08x->adev->dev, "%s max bytes per lli = %zu\n", __func__, max_bytes_per_lli); /* * Make largest possible LLIs until less than one bus * width left */ while (bd.remainder > (mbus->buswidth - 1)) { size_t lli_len, tsize, width; /* * If enough left try to send max possible, * otherwise try to send the remainder */ lli_len = min(bd.remainder, max_bytes_per_lli); /* * Check against maximum bus alignment: * Calculate actual transfer size in relation to * bus width an get a maximum remainder of the * highest bus width - 1 */ width = max(mbus->buswidth, sbus->buswidth); lli_len = (lli_len / width) * width; tsize = lli_len / bd.srcbus.buswidth; dev_vdbg(&pl08x->adev->dev, "%s fill lli with single lli chunk of " "size 0x%08zx (remainder 0x%08zx)\n", __func__, lli_len, bd.remainder); cctl = pl08x_cctl_bits(cctl, bd.srcbus.buswidth, bd.dstbus.buswidth, tsize); pl08x_fill_lli_for_desc(&bd, num_llis++, lli_len, cctl); total_bytes += lli_len; } /* * Send any odd bytes */ if (bd.remainder) { dev_vdbg(&pl08x->adev->dev, "%s align with boundary, send odd bytes (remain %zu)\n", __func__, bd.remainder); prep_byte_width_lli(&bd, &cctl, bd.remainder, num_llis++, &total_bytes); } } if (total_bytes != dsg->len) { dev_err(&pl08x->adev->dev, "%s size of encoded lli:s don't match total txd, transferred 0x%08zx from size 0x%08zx\n", __func__, total_bytes, dsg->len); return 0; } if (num_llis >= MAX_NUM_TSFR_LLIS) { dev_err(&pl08x->adev->dev, "%s need to increase MAX_NUM_TSFR_LLIS from 0x%08x\n", __func__, (u32) MAX_NUM_TSFR_LLIS); return 0; } } llis_va = txd->llis_va; /* The final LLI terminates the LLI. */ llis_va[num_llis - 1].lli = 0; /* The final LLI element shall also fire an interrupt. */ llis_va[num_llis - 1].cctl |= PL080_CONTROL_TC_IRQ_EN; #ifdef VERBOSE_DEBUG { int i; dev_vdbg(&pl08x->adev->dev, "%-3s %-9s %-10s %-10s %-10s %s\n", "lli", "", "csrc", "cdst", "clli", "cctl"); for (i = 0; i < num_llis; i++) { dev_vdbg(&pl08x->adev->dev, "%3d @%p: 0x%08x 0x%08x 0x%08x 0x%08x\n", i, &llis_va[i], llis_va[i].src, llis_va[i].dst, llis_va[i].lli, llis_va[i].cctl ); } } #endif return num_llis; } /* You should call this with the struct pl08x lock held */ static void pl08x_free_txd(struct pl08x_driver_data *pl08x, struct pl08x_txd *txd) { struct pl08x_sg *dsg, *_dsg; /* Free the LLI */ if (txd->llis_va) dma_pool_free(pl08x->pool, txd->llis_va, txd->llis_bus); pl08x->pool_ctr--; list_for_each_entry_safe(dsg, _dsg, &txd->dsg_list, node) { list_del(&dsg->node); kfree(dsg); } kfree(txd); } static void pl08x_free_txd_list(struct pl08x_driver_data *pl08x, struct pl08x_dma_chan *plchan) { struct pl08x_txd *txdi = NULL; struct pl08x_txd *next; if (!list_empty(&plchan->pend_list)) { list_for_each_entry_safe(txdi, next, &plchan->pend_list, node) { list_del(&txdi->node); pl08x_free_txd(pl08x, txdi); } } } /* * The DMA ENGINE API */ static int pl08x_alloc_chan_resources(struct dma_chan *chan) { return 0; } static void pl08x_free_chan_resources(struct dma_chan *chan) { } /* * This should be called with the channel plchan->lock held */ static int prep_phy_channel(struct pl08x_dma_chan *plchan, struct pl08x_txd *txd) { struct pl08x_driver_data *pl08x = plchan->host; struct pl08x_phy_chan *ch; int ret; /* Check if we already have a channel */ if (plchan->phychan) { ch = plchan->phychan; goto got_channel; } ch = pl08x_get_phy_channel(pl08x, plchan); if (!ch) { /* No physical channel available, cope with it */ dev_dbg(&pl08x->adev->dev, "no physical channel available for xfer on %s\n", plchan->name); return -EBUSY; } /* * OK we have a physical channel: for memcpy() this is all we * need, but for slaves the physical signals may be muxed! * Can the platform allow us to use this channel? */ if (plchan->slave && pl08x->pd->get_signal) { ret = pl08x->pd->get_signal(plchan); if (ret < 0) { dev_dbg(&pl08x->adev->dev, "unable to use physical channel %d for transfer on %s due to platform restrictions\n", ch->id, plchan->name); /* Release physical channel & return */ pl08x_put_phy_channel(pl08x, ch); return -EBUSY; } ch->signal = ret; } plchan->phychan = ch; dev_dbg(&pl08x->adev->dev, "allocated physical channel %d and signal %d for xfer on %s\n", ch->id, ch->signal, plchan->name); got_channel: /* Assign the flow control signal to this channel */ if (txd->direction == DMA_MEM_TO_DEV) txd->ccfg |= ch->signal << PL080_CONFIG_DST_SEL_SHIFT; else if (txd->direction == DMA_DEV_TO_MEM) txd->ccfg |= ch->signal << PL080_CONFIG_SRC_SEL_SHIFT; plchan->phychan_hold++; return 0; } static void release_phy_channel(struct pl08x_dma_chan *plchan) { struct pl08x_driver_data *pl08x = plchan->host; if ((plchan->phychan->signal >= 0) && pl08x->pd->put_signal) { pl08x->pd->put_signal(plchan); plchan->phychan->signal = -1; } pl08x_put_phy_channel(pl08x, plchan->phychan); plchan->phychan = NULL; } static dma_cookie_t pl08x_tx_submit(struct dma_async_tx_descriptor *tx) { struct pl08x_dma_chan *plchan = to_pl08x_chan(tx->chan); struct pl08x_txd *txd = to_pl08x_txd(tx); unsigned long flags; dma_cookie_t cookie; spin_lock_irqsave(&plchan->lock, flags); cookie = dma_cookie_assign(tx); /* Put this onto the pending list */ list_add_tail(&txd->node, &plchan->pend_list); /* * If there was no physical channel available for this memcpy, * stack the request up and indicate that the channel is waiting * for a free physical channel. */ if (!plchan->slave && !plchan->phychan) { /* Do this memcpy whenever there is a channel ready */ plchan->state = PL08X_CHAN_WAITING; plchan->waiting = txd; } else { plchan->phychan_hold--; } spin_unlock_irqrestore(&plchan->lock, flags); return cookie; } static struct dma_async_tx_descriptor *pl08x_prep_dma_interrupt( struct dma_chan *chan, unsigned long flags) { struct dma_async_tx_descriptor *retval = NULL; return retval; } /* * Code accessing dma_async_is_complete() in a tight loop may give problems. * If slaves are relying on interrupts to signal completion this function * must not be called with interrupts disabled. */ static enum dma_status pl08x_dma_tx_status(struct dma_chan *chan, dma_cookie_t cookie, struct dma_tx_state *txstate) { struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); enum dma_status ret; ret = dma_cookie_status(chan, cookie, txstate); if (ret == DMA_SUCCESS) return ret; /* * This cookie not complete yet * Get number of bytes left in the active transactions and queue */ dma_set_residue(txstate, pl08x_getbytes_chan(plchan)); if (plchan->state == PL08X_CHAN_PAUSED) return DMA_PAUSED; /* Whether waiting or running, we're in progress */ return DMA_IN_PROGRESS; } /* PrimeCell DMA extension */ struct burst_table { u32 burstwords; u32 reg; }; static const struct burst_table burst_sizes[] = { { .burstwords = 256, .reg = PL080_BSIZE_256, }, { .burstwords = 128, .reg = PL080_BSIZE_128, }, { .burstwords = 64, .reg = PL080_BSIZE_64, }, { .burstwords = 32, .reg = PL080_BSIZE_32, }, { .burstwords = 16, .reg = PL080_BSIZE_16, }, { .burstwords = 8, .reg = PL080_BSIZE_8, }, { .burstwords = 4, .reg = PL080_BSIZE_4, }, { .burstwords = 0, .reg = PL080_BSIZE_1, }, }; /* * Given the source and destination available bus masks, select which * will be routed to each port. We try to have source and destination * on separate ports, but always respect the allowable settings. */ static u32 pl08x_select_bus(u8 src, u8 dst) { u32 cctl = 0; if (!(dst & PL08X_AHB1) || ((dst & PL08X_AHB2) && (src & PL08X_AHB1))) cctl |= PL080_CONTROL_DST_AHB2; if (!(src & PL08X_AHB1) || ((src & PL08X_AHB2) && !(dst & PL08X_AHB2))) cctl |= PL080_CONTROL_SRC_AHB2; return cctl; } static u32 pl08x_cctl(u32 cctl) { cctl &= ~(PL080_CONTROL_SRC_AHB2 | PL080_CONTROL_DST_AHB2 | PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR | PL080_CONTROL_PROT_MASK); /* Access the cell in privileged mode, non-bufferable, non-cacheable */ return cctl | PL080_CONTROL_PROT_SYS; } static u32 pl08x_width(enum dma_slave_buswidth width) { switch (width) { case DMA_SLAVE_BUSWIDTH_1_BYTE: return PL080_WIDTH_8BIT; case DMA_SLAVE_BUSWIDTH_2_BYTES: return PL080_WIDTH_16BIT; case DMA_SLAVE_BUSWIDTH_4_BYTES: return PL080_WIDTH_32BIT; default: return ~0; } } static u32 pl08x_burst(u32 maxburst) { int i; for (i = 0; i < ARRAY_SIZE(burst_sizes); i++) if (burst_sizes[i].burstwords <= maxburst) break; return burst_sizes[i].reg; } static int dma_set_runtime_config(struct dma_chan *chan, struct dma_slave_config *config) { struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); struct pl08x_driver_data *pl08x = plchan->host; enum dma_slave_buswidth addr_width; u32 width, burst, maxburst; u32 cctl = 0; if (!plchan->slave) return -EINVAL; /* Transfer direction */ plchan->runtime_direction = config->direction; if (config->direction == DMA_MEM_TO_DEV) { addr_width = config->dst_addr_width; maxburst = config->dst_maxburst; } else if (config->direction == DMA_DEV_TO_MEM) { addr_width = config->src_addr_width; maxburst = config->src_maxburst; } else { dev_err(&pl08x->adev->dev, "bad runtime_config: alien transfer direction\n"); return -EINVAL; } width = pl08x_width(addr_width); if (width == ~0) { dev_err(&pl08x->adev->dev, "bad runtime_config: alien address width\n"); return -EINVAL; } cctl |= width << PL080_CONTROL_SWIDTH_SHIFT; cctl |= width << PL080_CONTROL_DWIDTH_SHIFT; /* * If this channel will only request single transfers, set this * down to ONE element. Also select one element if no maxburst * is specified. */ if (plchan->cd->single) maxburst = 1; burst = pl08x_burst(maxburst); cctl |= burst << PL080_CONTROL_SB_SIZE_SHIFT; cctl |= burst << PL080_CONTROL_DB_SIZE_SHIFT; plchan->device_fc = config->device_fc; if (plchan->runtime_direction == DMA_DEV_TO_MEM) { plchan->src_addr = config->src_addr; plchan->src_cctl = pl08x_cctl(cctl) | PL080_CONTROL_DST_INCR | pl08x_select_bus(plchan->cd->periph_buses, pl08x->mem_buses); } else { plchan->dst_addr = config->dst_addr; plchan->dst_cctl = pl08x_cctl(cctl) | PL080_CONTROL_SRC_INCR | pl08x_select_bus(pl08x->mem_buses, plchan->cd->periph_buses); } dev_dbg(&pl08x->adev->dev, "configured channel %s (%s) for %s, data width %d, " "maxburst %d words, LE, CCTL=0x%08x\n", dma_chan_name(chan), plchan->name, (config->direction == DMA_DEV_TO_MEM) ? "RX" : "TX", addr_width, maxburst, cctl); return 0; } /* * Slave transactions callback to the slave device to allow * synchronization of slave DMA signals with the DMAC enable */ static void pl08x_issue_pending(struct dma_chan *chan) { struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); unsigned long flags; spin_lock_irqsave(&plchan->lock, flags); /* Something is already active, or we're waiting for a channel... */ if (plchan->at || plchan->state == PL08X_CHAN_WAITING) { spin_unlock_irqrestore(&plchan->lock, flags); return; } /* Take the first element in the queue and execute it */ if (!list_empty(&plchan->pend_list)) { struct pl08x_txd *next; next = list_first_entry(&plchan->pend_list, struct pl08x_txd, node); list_del(&next->node); plchan->state = PL08X_CHAN_RUNNING; pl08x_start_txd(plchan, next); } spin_unlock_irqrestore(&plchan->lock, flags); } static int pl08x_prep_channel_resources(struct pl08x_dma_chan *plchan, struct pl08x_txd *txd) { struct pl08x_driver_data *pl08x = plchan->host; unsigned long flags; int num_llis, ret; num_llis = pl08x_fill_llis_for_desc(pl08x, txd); if (!num_llis) { spin_lock_irqsave(&plchan->lock, flags); pl08x_free_txd(pl08x, txd); spin_unlock_irqrestore(&plchan->lock, flags); return -EINVAL; } spin_lock_irqsave(&plchan->lock, flags); /* * See if we already have a physical channel allocated, * else this is the time to try to get one. */ ret = prep_phy_channel(plchan, txd); if (ret) { /* * No physical channel was available. * * memcpy transfers can be sorted out at submission time. * * Slave transfers may have been denied due to platform * channel muxing restrictions. Since there is no guarantee * that this will ever be resolved, and the signal must be * acquired AFTER acquiring the physical channel, we will let * them be NACK:ed with -EBUSY here. The drivers can retry * the prep() call if they are eager on doing this using DMA. */ if (plchan->slave) { pl08x_free_txd_list(pl08x, plchan); pl08x_free_txd(pl08x, txd); spin_unlock_irqrestore(&plchan->lock, flags); return -EBUSY; } } else /* * Else we're all set, paused and ready to roll, status * will switch to PL08X_CHAN_RUNNING when we call * issue_pending(). If there is something running on the * channel already we don't change its state. */ if (plchan->state == PL08X_CHAN_IDLE) plchan->state = PL08X_CHAN_PAUSED; spin_unlock_irqrestore(&plchan->lock, flags); return 0; } static struct pl08x_txd *pl08x_get_txd(struct pl08x_dma_chan *plchan, unsigned long flags) { struct pl08x_txd *txd = kzalloc(sizeof(*txd), GFP_NOWAIT); if (txd) { dma_async_tx_descriptor_init(&txd->tx, &plchan->chan); txd->tx.flags = flags; txd->tx.tx_submit = pl08x_tx_submit; INIT_LIST_HEAD(&txd->node); INIT_LIST_HEAD(&txd->dsg_list); /* Always enable error and terminal interrupts */ txd->ccfg = PL080_CONFIG_ERR_IRQ_MASK | PL080_CONFIG_TC_IRQ_MASK; } return txd; } /* * Initialize a descriptor to be used by memcpy submit */ static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy( struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, size_t len, unsigned long flags) { struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); struct pl08x_driver_data *pl08x = plchan->host; struct pl08x_txd *txd; struct pl08x_sg *dsg; int ret; txd = pl08x_get_txd(plchan, flags); if (!txd) { dev_err(&pl08x->adev->dev, "%s no memory for descriptor\n", __func__); return NULL; } dsg = kzalloc(sizeof(struct pl08x_sg), GFP_NOWAIT); if (!dsg) { pl08x_free_txd(pl08x, txd); dev_err(&pl08x->adev->dev, "%s no memory for pl080 sg\n", __func__); return NULL; } list_add_tail(&dsg->node, &txd->dsg_list); txd->direction = DMA_NONE; dsg->src_addr = src; dsg->dst_addr = dest; dsg->len = len; /* Set platform data for m2m */ txd->ccfg |= PL080_FLOW_MEM2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT; txd->cctl = pl08x->pd->memcpy_channel.cctl & ~(PL080_CONTROL_DST_AHB2 | PL080_CONTROL_SRC_AHB2); /* Both to be incremented or the code will break */ txd->cctl |= PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR; if (pl08x->vd->dualmaster) txd->cctl |= pl08x_select_bus(pl08x->mem_buses, pl08x->mem_buses); ret = pl08x_prep_channel_resources(plchan, txd); if (ret) return NULL; return &txd->tx; } static struct dma_async_tx_descriptor *pl08x_prep_slave_sg( struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len, enum dma_transfer_direction direction, unsigned long flags, void *context) { struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); struct pl08x_driver_data *pl08x = plchan->host; struct pl08x_txd *txd; struct pl08x_sg *dsg; struct scatterlist *sg; dma_addr_t slave_addr; int ret, tmp; dev_dbg(&pl08x->adev->dev, "%s prepare transaction of %d bytes from %s\n", __func__, sgl->length, plchan->name); txd = pl08x_get_txd(plchan, flags); if (!txd) { dev_err(&pl08x->adev->dev, "%s no txd\n", __func__); return NULL; } if (direction != plchan->runtime_direction) dev_err(&pl08x->adev->dev, "%s DMA setup does not match " "the direction configured for the PrimeCell\n", __func__); /* * Set up addresses, the PrimeCell configured address * will take precedence since this may configure the * channel target address dynamically at runtime. */ txd->direction = direction; if (direction == DMA_MEM_TO_DEV) { txd->cctl = plchan->dst_cctl; slave_addr = plchan->dst_addr; } else if (direction == DMA_DEV_TO_MEM) { txd->cctl = plchan->src_cctl; slave_addr = plchan->src_addr; } else { pl08x_free_txd(pl08x, txd); dev_err(&pl08x->adev->dev, "%s direction unsupported\n", __func__); return NULL; } if (plchan->device_fc) tmp = (direction == DMA_MEM_TO_DEV) ? PL080_FLOW_MEM2PER_PER : PL080_FLOW_PER2MEM_PER; else tmp = (direction == DMA_MEM_TO_DEV) ? PL080_FLOW_MEM2PER : PL080_FLOW_PER2MEM; txd->ccfg |= tmp << PL080_CONFIG_FLOW_CONTROL_SHIFT; for_each_sg(sgl, sg, sg_len, tmp) { dsg = kzalloc(sizeof(struct pl08x_sg), GFP_NOWAIT); if (!dsg) { pl08x_free_txd(pl08x, txd); dev_err(&pl08x->adev->dev, "%s no mem for pl080 sg\n", __func__); return NULL; } list_add_tail(&dsg->node, &txd->dsg_list); dsg->len = sg_dma_len(sg); if (direction == DMA_MEM_TO_DEV) { dsg->src_addr = sg_phys(sg); dsg->dst_addr = slave_addr; } else { dsg->src_addr = slave_addr; dsg->dst_addr = sg_phys(sg); } } ret = pl08x_prep_channel_resources(plchan, txd); if (ret) return NULL; return &txd->tx; } static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned long arg) { struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); struct pl08x_driver_data *pl08x = plchan->host; unsigned long flags; int ret = 0; /* Controls applicable to inactive channels */ if (cmd == DMA_SLAVE_CONFIG) { return dma_set_runtime_config(chan, (struct dma_slave_config *)arg); } /* * Anything succeeds on channels with no physical allocation and * no queued transfers. */ spin_lock_irqsave(&plchan->lock, flags); if (!plchan->phychan && !plchan->at) { spin_unlock_irqrestore(&plchan->lock, flags); return 0; } switch (cmd) { case DMA_TERMINATE_ALL: plchan->state = PL08X_CHAN_IDLE; if (plchan->phychan) { pl08x_terminate_phy_chan(pl08x, plchan->phychan); /* * Mark physical channel as free and free any slave * signal */ release_phy_channel(plchan); plchan->phychan_hold = 0; } /* Dequeue jobs and free LLIs */ if (plchan->at) { pl08x_free_txd(pl08x, plchan->at); plchan->at = NULL; } /* Dequeue jobs not yet fired as well */ pl08x_free_txd_list(pl08x, plchan); break; case DMA_PAUSE: pl08x_pause_phy_chan(plchan->phychan); plchan->state = PL08X_CHAN_PAUSED; break; case DMA_RESUME: pl08x_resume_phy_chan(plchan->phychan); plchan->state = PL08X_CHAN_RUNNING; break; default: /* Unknown command */ ret = -ENXIO; break; } spin_unlock_irqrestore(&plchan->lock, flags); return ret; } bool pl08x_filter_id(struct dma_chan *chan, void *chan_id) { struct pl08x_dma_chan *plchan; char *name = chan_id; /* Reject channels for devices not bound to this driver */ if (chan->device->dev->driver != &pl08x_amba_driver.drv) return false; plchan = to_pl08x_chan(chan); /* Check that the channel is not taken! */ if (!strcmp(plchan->name, name)) return true; return false; } /* * Just check that the device is there and active * TODO: turn this bit on/off depending on the number of physical channels * actually used, if it is zero... well shut it off. That will save some * power. Cut the clock at the same time. */ static void pl08x_ensure_on(struct pl08x_driver_data *pl08x) { writel(PL080_CONFIG_ENABLE, pl08x->base + PL080_CONFIG); } static void pl08x_unmap_buffers(struct pl08x_txd *txd) { struct device *dev = txd->tx.chan->device->dev; struct pl08x_sg *dsg; if (!(txd->tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) { if (txd->tx.flags & DMA_COMPL_SRC_UNMAP_SINGLE) list_for_each_entry(dsg, &txd->dsg_list, node) dma_unmap_single(dev, dsg->src_addr, dsg->len, DMA_TO_DEVICE); else { list_for_each_entry(dsg, &txd->dsg_list, node) dma_unmap_page(dev, dsg->src_addr, dsg->len, DMA_TO_DEVICE); } } if (!(txd->tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) { if (txd->tx.flags & DMA_COMPL_DEST_UNMAP_SINGLE) list_for_each_entry(dsg, &txd->dsg_list, node) dma_unmap_single(dev, dsg->dst_addr, dsg->len, DMA_FROM_DEVICE); else list_for_each_entry(dsg, &txd->dsg_list, node) dma_unmap_page(dev, dsg->dst_addr, dsg->len, DMA_FROM_DEVICE); } } static void pl08x_tasklet(unsigned long data) { struct pl08x_dma_chan *plchan = (struct pl08x_dma_chan *) data; struct pl08x_driver_data *pl08x = plchan->host; struct pl08x_txd *txd; unsigned long flags; spin_lock_irqsave(&plchan->lock, flags); txd = plchan->at; plchan->at = NULL; if (txd) { /* Update last completed */ dma_cookie_complete(&txd->tx); } /* If a new descriptor is queued, set it up plchan->at is NULL here */ if (!list_empty(&plchan->pend_list)) { struct pl08x_txd *next; next = list_first_entry(&plchan->pend_list, struct pl08x_txd, node); list_del(&next->node); pl08x_start_txd(plchan, next); } else if (plchan->phychan_hold) { /* * This channel is still in use - we have a new txd being * prepared and will soon be queued. Don't give up the * physical channel. */ } else { struct pl08x_dma_chan *waiting = NULL; /* * No more jobs, so free up the physical channel * Free any allocated signal on slave transfers too */ release_phy_channel(plchan); plchan->state = PL08X_CHAN_IDLE; /* * And NOW before anyone else can grab that free:d up * physical channel, see if there is some memcpy pending * that seriously needs to start because of being stacked * up while we were choking the physical channels with data. */ list_for_each_entry(waiting, &pl08x->memcpy.channels, chan.device_node) { if (waiting->state == PL08X_CHAN_WAITING && waiting->waiting != NULL) { int ret; /* This should REALLY not fail now */ ret = prep_phy_channel(waiting, waiting->waiting); BUG_ON(ret); waiting->phychan_hold--; waiting->state = PL08X_CHAN_RUNNING; waiting->waiting = NULL; pl08x_issue_pending(&waiting->chan); break; } } } spin_unlock_irqrestore(&plchan->lock, flags); if (txd) { dma_async_tx_callback callback = txd->tx.callback; void *callback_param = txd->tx.callback_param; /* Don't try to unmap buffers on slave channels */ if (!plchan->slave) pl08x_unmap_buffers(txd); /* Free the descriptor */ spin_lock_irqsave(&plchan->lock, flags); pl08x_free_txd(pl08x, txd); spin_unlock_irqrestore(&plchan->lock, flags); /* Callback to signal completion */ if (callback) callback(callback_param); } } static irqreturn_t pl08x_irq(int irq, void *dev) { struct pl08x_driver_data *pl08x = dev; u32 mask = 0, err, tc, i; /* check & clear - ERR & TC interrupts */ err = readl(pl08x->base + PL080_ERR_STATUS); if (err) { dev_err(&pl08x->adev->dev, "%s error interrupt, register value 0x%08x\n", __func__, err); writel(err, pl08x->base + PL080_ERR_CLEAR); } tc = readl(pl08x->base + PL080_INT_STATUS); if (tc) writel(tc, pl08x->base + PL080_TC_CLEAR); if (!err && !tc) return IRQ_NONE; for (i = 0; i < pl08x->vd->channels; i++) { if (((1 << i) & err) || ((1 << i) & tc)) { /* Locate physical channel */ struct pl08x_phy_chan *phychan = &pl08x->phy_chans[i]; struct pl08x_dma_chan *plchan = phychan->serving; if (!plchan) { dev_err(&pl08x->adev->dev, "%s Error TC interrupt on unused channel: 0x%08x\n", __func__, i); continue; } /* Schedule tasklet on this channel */ tasklet_schedule(&plchan->tasklet); mask |= (1 << i); } } return mask ? IRQ_HANDLED : IRQ_NONE; } static void pl08x_dma_slave_init(struct pl08x_dma_chan *chan) { u32 cctl = pl08x_cctl(chan->cd->cctl); chan->slave = true; chan->name = chan->cd->bus_id; chan->src_addr = chan->cd->addr; chan->dst_addr = chan->cd->addr; chan->src_cctl = cctl | PL080_CONTROL_DST_INCR | pl08x_select_bus(chan->cd->periph_buses, chan->host->mem_buses); chan->dst_cctl = cctl | PL080_CONTROL_SRC_INCR | pl08x_select_bus(chan->host->mem_buses, chan->cd->periph_buses); } /* * Initialise the DMAC memcpy/slave channels. * Make a local wrapper to hold required data */ static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x, struct dma_device *dmadev, unsigned int channels, bool slave) { struct pl08x_dma_chan *chan; int i; INIT_LIST_HEAD(&dmadev->channels); /* * Register as many many memcpy as we have physical channels, * we won't always be able to use all but the code will have * to cope with that situation. */ for (i = 0; i < channels; i++) { chan = kzalloc(sizeof(*chan), GFP_KERNEL); if (!chan) { dev_err(&pl08x->adev->dev, "%s no memory for channel\n", __func__); return -ENOMEM; } chan->host = pl08x; chan->state = PL08X_CHAN_IDLE; if (slave) { chan->cd = &pl08x->pd->slave_channels[i]; pl08x_dma_slave_init(chan); } else { chan->cd = &pl08x->pd->memcpy_channel; chan->name = kasprintf(GFP_KERNEL, "memcpy%d", i); if (!chan->name) { kfree(chan); return -ENOMEM; } } if (chan->cd->circular_buffer) { dev_err(&pl08x->adev->dev, "channel %s: circular buffers not supported\n", chan->name); kfree(chan); continue; } dev_dbg(&pl08x->adev->dev, "initialize virtual channel \"%s\"\n", chan->name); chan->chan.device = dmadev; dma_cookie_init(&chan->chan); spin_lock_init(&chan->lock); INIT_LIST_HEAD(&chan->pend_list); tasklet_init(&chan->tasklet, pl08x_tasklet, (unsigned long) chan); list_add_tail(&chan->chan.device_node, &dmadev->channels); } dev_info(&pl08x->adev->dev, "initialized %d virtual %s channels\n", i, slave ? "slave" : "memcpy"); return i; } static void pl08x_free_virtual_channels(struct dma_device *dmadev) { struct pl08x_dma_chan *chan = NULL; struct pl08x_dma_chan *next; list_for_each_entry_safe(chan, next, &dmadev->channels, chan.device_node) { list_del(&chan->chan.device_node); kfree(chan); } } #ifdef CONFIG_DEBUG_FS static const char *pl08x_state_str(enum pl08x_dma_chan_state state) { switch (state) { case PL08X_CHAN_IDLE: return "idle"; case PL08X_CHAN_RUNNING: return "running"; case PL08X_CHAN_PAUSED: return "paused"; case PL08X_CHAN_WAITING: return "waiting"; default: break; } return "UNKNOWN STATE"; } static int pl08x_debugfs_show(struct seq_file *s, void *data) { struct pl08x_driver_data *pl08x = s->private; struct pl08x_dma_chan *chan; struct pl08x_phy_chan *ch; unsigned long flags; int i; seq_printf(s, "PL08x physical channels:\n"); seq_printf(s, "CHANNEL:\tUSER:\n"); seq_printf(s, "--------\t-----\n"); for (i = 0; i < pl08x->vd->channels; i++) { struct pl08x_dma_chan *virt_chan; ch = &pl08x->phy_chans[i]; spin_lock_irqsave(&ch->lock, flags); virt_chan = ch->serving; seq_printf(s, "%d\t\t%s\n", ch->id, virt_chan ? virt_chan->name : "(none)"); spin_unlock_irqrestore(&ch->lock, flags); } seq_printf(s, "\nPL08x virtual memcpy channels:\n"); seq_printf(s, "CHANNEL:\tSTATE:\n"); seq_printf(s, "--------\t------\n"); list_for_each_entry(chan, &pl08x->memcpy.channels, chan.device_node) { seq_printf(s, "%s\t\t%s\n", chan->name, pl08x_state_str(chan->state)); } seq_printf(s, "\nPL08x virtual slave channels:\n"); seq_printf(s, "CHANNEL:\tSTATE:\n"); seq_printf(s, "--------\t------\n"); list_for_each_entry(chan, &pl08x->slave.channels, chan.device_node) { seq_printf(s, "%s\t\t%s\n", chan->name, pl08x_state_str(chan->state)); } return 0; } static int pl08x_debugfs_open(struct inode *inode, struct file *file) { return single_open(file, pl08x_debugfs_show, inode->i_private); } static const struct file_operations pl08x_debugfs_operations = { .open = pl08x_debugfs_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static void init_pl08x_debugfs(struct pl08x_driver_data *pl08x) { /* Expose a simple debugfs interface to view all clocks */ (void) debugfs_create_file(dev_name(&pl08x->adev->dev), S_IFREG | S_IRUGO, NULL, pl08x, &pl08x_debugfs_operations); } #else static inline void init_pl08x_debugfs(struct pl08x_driver_data *pl08x) { } #endif static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) { struct pl08x_driver_data *pl08x; const struct vendor_data *vd = id->data; int ret = 0; int i; ret = amba_request_regions(adev, NULL); if (ret) return ret; /* Create the driver state holder */ pl08x = kzalloc(sizeof(*pl08x), GFP_KERNEL); if (!pl08x) { ret = -ENOMEM; goto out_no_pl08x; } pm_runtime_set_active(&adev->dev); pm_runtime_enable(&adev->dev); /* Initialize memcpy engine */ dma_cap_set(DMA_MEMCPY, pl08x->memcpy.cap_mask); pl08x->memcpy.dev = &adev->dev; pl08x->memcpy.device_alloc_chan_resources = pl08x_alloc_chan_resources; pl08x->memcpy.device_free_chan_resources = pl08x_free_chan_resources; pl08x->memcpy.device_prep_dma_memcpy = pl08x_prep_dma_memcpy; pl08x->memcpy.device_prep_dma_interrupt = pl08x_prep_dma_interrupt; pl08x->memcpy.device_tx_status = pl08x_dma_tx_status; pl08x->memcpy.device_issue_pending = pl08x_issue_pending; pl08x->memcpy.device_control = pl08x_control; /* Initialize slave engine */ dma_cap_set(DMA_SLAVE, pl08x->slave.cap_mask); pl08x->slave.dev = &adev->dev; pl08x->slave.device_alloc_chan_resources = pl08x_alloc_chan_resources; pl08x->slave.device_free_chan_resources = pl08x_free_chan_resources; pl08x->slave.device_prep_dma_interrupt = pl08x_prep_dma_interrupt; pl08x->slave.device_tx_status = pl08x_dma_tx_status; pl08x->slave.device_issue_pending = pl08x_issue_pending; pl08x->slave.device_prep_slave_sg = pl08x_prep_slave_sg; pl08x->slave.device_control = pl08x_control; /* Get the platform data */ pl08x->pd = dev_get_platdata(&adev->dev); if (!pl08x->pd) { dev_err(&adev->dev, "no platform data supplied\n"); goto out_no_platdata; } /* Assign useful pointers to the driver state */ pl08x->adev = adev; pl08x->vd = vd; /* By default, AHB1 only. If dualmaster, from platform */ pl08x->lli_buses = PL08X_AHB1; pl08x->mem_buses = PL08X_AHB1; if (pl08x->vd->dualmaster) { pl08x->lli_buses = pl08x->pd->lli_buses; pl08x->mem_buses = pl08x->pd->mem_buses; } /* A DMA memory pool for LLIs, align on 1-byte boundary */ pl08x->pool = dma_pool_create(DRIVER_NAME, &pl08x->adev->dev, PL08X_LLI_TSFR_SIZE, PL08X_ALIGN, 0); if (!pl08x->pool) { ret = -ENOMEM; goto out_no_lli_pool; } spin_lock_init(&pl08x->lock); pl08x->base = ioremap(adev->res.start, resource_size(&adev->res)); if (!pl08x->base) { ret = -ENOMEM; goto out_no_ioremap; } /* Turn on the PL08x */ pl08x_ensure_on(pl08x); /* Attach the interrupt handler */ writel(0x000000FF, pl08x->base + PL080_ERR_CLEAR); writel(0x000000FF, pl08x->base + PL080_TC_CLEAR); ret = request_irq(adev->irq[0], pl08x_irq, IRQF_DISABLED, DRIVER_NAME, pl08x); if (ret) { dev_err(&adev->dev, "%s failed to request interrupt %d\n", __func__, adev->irq[0]); goto out_no_irq; } /* Initialize physical channels */ pl08x->phy_chans = kmalloc((vd->channels * sizeof(*pl08x->phy_chans)), GFP_KERNEL); if (!pl08x->phy_chans) { dev_err(&adev->dev, "%s failed to allocate " "physical channel holders\n", __func__); goto out_no_phychans; } for (i = 0; i < vd->channels; i++) { struct pl08x_phy_chan *ch = &pl08x->phy_chans[i]; ch->id = i; ch->base = pl08x->base + PL080_Cx_BASE(i); spin_lock_init(&ch->lock); ch->serving = NULL; ch->signal = -1; dev_dbg(&adev->dev, "physical channel %d is %s\n", i, pl08x_phy_channel_busy(ch) ? "BUSY" : "FREE"); } /* Register as many memcpy channels as there are physical channels */ ret = pl08x_dma_init_virtual_channels(pl08x, &pl08x->memcpy, pl08x->vd->channels, false); if (ret <= 0) { dev_warn(&pl08x->adev->dev, "%s failed to enumerate memcpy channels - %d\n", __func__, ret); goto out_no_memcpy; } pl08x->memcpy.chancnt = ret; /* Register slave channels */ ret = pl08x_dma_init_virtual_channels(pl08x, &pl08x->slave, pl08x->pd->num_slave_channels, true); if (ret <= 0) { dev_warn(&pl08x->adev->dev, "%s failed to enumerate slave channels - %d\n", __func__, ret); goto out_no_slave; } pl08x->slave.chancnt = ret; ret = dma_async_device_register(&pl08x->memcpy); if (ret) { dev_warn(&pl08x->adev->dev, "%s failed to register memcpy as an async device - %d\n", __func__, ret); goto out_no_memcpy_reg; } ret = dma_async_device_register(&pl08x->slave); if (ret) { dev_warn(&pl08x->adev->dev, "%s failed to register slave as an async device - %d\n", __func__, ret); goto out_no_slave_reg; } amba_set_drvdata(adev, pl08x); init_pl08x_debugfs(pl08x); dev_info(&pl08x->adev->dev, "DMA: PL%03x rev%u at 0x%08llx irq %d\n", amba_part(adev), amba_rev(adev), (unsigned long long)adev->res.start, adev->irq[0]); pm_runtime_put(&adev->dev); return 0; out_no_slave_reg: dma_async_device_unregister(&pl08x->memcpy); out_no_memcpy_reg: pl08x_free_virtual_channels(&pl08x->slave); out_no_slave: pl08x_free_virtual_channels(&pl08x->memcpy); out_no_memcpy: kfree(pl08x->phy_chans); out_no_phychans: free_irq(adev->irq[0], pl08x); out_no_irq: iounmap(pl08x->base); out_no_ioremap: dma_pool_destroy(pl08x->pool); out_no_lli_pool: out_no_platdata: pm_runtime_put(&adev->dev); pm_runtime_disable(&adev->dev); kfree(pl08x); out_no_pl08x: amba_release_regions(adev); return ret; } /* PL080 has 8 channels and the PL080 have just 2 */ static struct vendor_data vendor_pl080 = { .channels = 8, .dualmaster = true, }; static struct vendor_data vendor_pl081 = { .channels = 2, .dualmaster = false, }; static struct amba_id pl08x_ids[] = { /* PL080 */ { .id = 0x00041080, .mask = 0x000fffff, .data = &vendor_pl080, }, /* PL081 */ { .id = 0x00041081, .mask = 0x000fffff, .data = &vendor_pl081, }, /* Nomadik 8815 PL080 variant */ { .id = 0x00280880, .mask = 0x00ffffff, .data = &vendor_pl080, }, { 0, 0 }, }; MODULE_DEVICE_TABLE(amba, pl08x_ids); static struct amba_driver pl08x_amba_driver = { .drv.name = DRIVER_NAME, .id_table = pl08x_ids, .probe = pl08x_probe, }; static int __init pl08x_init(void) { int retval; retval = amba_driver_register(&pl08x_amba_driver); if (retval) printk(KERN_WARNING DRIVER_NAME "failed to register as an AMBA device (%d)\n", retval); return retval; } subsys_initcall(pl08x_init);
gpl-2.0
CyanogenMod/android_kernel_asus_tf701t
drivers/scsi/lpfc/lpfc_mbox.c
5035
78160
/******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * * Copyright (C) 2004-2009 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * * * * This program is free software; you can redistribute it and/or * * modify it under the terms of version 2 of the GNU General * * Public License as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful. * * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * * TO BE LEGALLY INVALID. See the GNU General Public License for * * more details, a copy of which can be found in the file COPYING * * included with this package. * *******************************************************************/ #include <linux/blkdev.h> #include <linux/pci.h> #include <linux/slab.h> #include <linux/interrupt.h> #include <scsi/scsi_device.h> #include <scsi/scsi_transport_fc.h> #include <scsi/scsi.h> #include <scsi/fc/fc_fs.h> #include "lpfc_hw4.h" #include "lpfc_hw.h" #include "lpfc_sli.h" #include "lpfc_sli4.h" #include "lpfc_nl.h" #include "lpfc_disc.h" #include "lpfc_scsi.h" #include "lpfc.h" #include "lpfc_logmsg.h" #include "lpfc_crtn.h" #include "lpfc_compat.h" /** * lpfc_dump_static_vport - Dump HBA's static vport information. * @phba: pointer to lpfc hba data structure. * @pmb: pointer to the driver internal queue element for mailbox command. * @offset: offset for dumping vport info. * * The dump mailbox command provides a method for the device driver to obtain * various types of information from the HBA device. * * This routine prepares the mailbox command for dumping list of static * vports to be created. **/ int lpfc_dump_static_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, uint16_t offset) { MAILBOX_t *mb; struct lpfc_dmabuf *mp; mb = &pmb->u.mb; /* Setup to dump vport info region */ memset(pmb, 0, sizeof(LPFC_MBOXQ_t)); mb->mbxCommand = MBX_DUMP_MEMORY; mb->un.varDmp.type = DMP_NV_PARAMS; mb->un.varDmp.entry_index = offset; mb->un.varDmp.region_id = DMP_REGION_VPORT; mb->mbxOwner = OWN_HOST; /* For SLI3 HBAs data is embedded in mailbox */ if (phba->sli_rev != LPFC_SLI_REV4) { mb->un.varDmp.cv = 1; mb->un.varDmp.word_cnt = DMP_RSP_SIZE/sizeof(uint32_t); return 0; } /* For SLI4 HBAs driver need to allocate memory */ mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); if (mp) mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); if (!mp || !mp->virt) { kfree(mp); lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, "2605 lpfc_dump_static_vport: memory" " allocation failed\n"); return 1; } memset(mp->virt, 0, LPFC_BPL_SIZE); INIT_LIST_HEAD(&mp->list); /* save address for completion */ pmb->context2 = (uint8_t *) mp; mb->un.varWords[3] = putPaddrLow(mp->phys); mb->un.varWords[4] = putPaddrHigh(mp->phys); mb->un.varDmp.sli4_length = sizeof(struct static_vport_info); return 0; } /** * lpfc_down_link - Bring down HBAs link. * @phba: pointer to lpfc hba data structure. * @pmb: pointer to the driver internal queue element for mailbox command. * * This routine prepares a mailbox command to bring down HBA link. **/ void lpfc_down_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) { MAILBOX_t *mb; memset(pmb, 0, sizeof(LPFC_MBOXQ_t)); mb = &pmb->u.mb; mb->mbxCommand = MBX_DOWN_LINK; mb->mbxOwner = OWN_HOST; } /** * lpfc_dump_mem - Prepare a mailbox command for reading a region. * @phba: pointer to lpfc hba data structure. * @pmb: pointer to the driver internal queue element for mailbox command. * @offset: offset into the region. * @region_id: config region id. * * The dump mailbox command provides a method for the device driver to obtain * various types of information from the HBA device. * * This routine prepares the mailbox command for dumping HBA's config region. **/ void lpfc_dump_mem(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, uint16_t offset, uint16_t region_id) { MAILBOX_t *mb; void *ctx; mb = &pmb->u.mb; ctx = pmb->context2; /* Setup to dump VPD region */ memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); mb->mbxCommand = MBX_DUMP_MEMORY; mb->un.varDmp.cv = 1; mb->un.varDmp.type = DMP_NV_PARAMS; mb->un.varDmp.entry_index = offset; mb->un.varDmp.region_id = region_id; mb->un.varDmp.word_cnt = (DMP_RSP_SIZE / sizeof (uint32_t)); mb->un.varDmp.co = 0; mb->un.varDmp.resp_offset = 0; pmb->context2 = ctx; mb->mbxOwner = OWN_HOST; return; } /** * lpfc_dump_wakeup_param - Prepare mailbox command for retrieving wakeup params * @phba: pointer to lpfc hba data structure. * @pmb: pointer to the driver internal queue element for mailbox command. * * This function create a dump memory mailbox command to dump wake up * parameters. */ void lpfc_dump_wakeup_param(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) { MAILBOX_t *mb; void *ctx; mb = &pmb->u.mb; /* Save context so that we can restore after memset */ ctx = pmb->context2; /* Setup to dump VPD region */ memset(pmb, 0, sizeof(LPFC_MBOXQ_t)); mb->mbxCommand = MBX_DUMP_MEMORY; mb->mbxOwner = OWN_HOST; mb->un.varDmp.cv = 1; mb->un.varDmp.type = DMP_NV_PARAMS; mb->un.varDmp.entry_index = 0; mb->un.varDmp.region_id = WAKE_UP_PARMS_REGION_ID; mb->un.varDmp.word_cnt = WAKE_UP_PARMS_WORD_SIZE; mb->un.varDmp.co = 0; mb->un.varDmp.resp_offset = 0; pmb->context2 = ctx; return; } /** * lpfc_read_nv - Prepare a mailbox command for reading HBA's NVRAM param * @phba: pointer to lpfc hba data structure. * @pmb: pointer to the driver internal queue element for mailbox command. * * The read NVRAM mailbox command returns the HBA's non-volatile parameters * that are used as defaults when the Fibre Channel link is brought on-line. * * This routine prepares the mailbox command for reading information stored * in the HBA's NVRAM. Specifically, the HBA's WWNN and WWPN. **/ void lpfc_read_nv(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) { MAILBOX_t *mb; mb = &pmb->u.mb; memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); mb->mbxCommand = MBX_READ_NV; mb->mbxOwner = OWN_HOST; return; } /** * lpfc_config_async - Prepare a mailbox command for enabling HBA async event * @phba: pointer to lpfc hba data structure. * @pmb: pointer to the driver internal queue element for mailbox command. * @ring: ring number for the asynchronous event to be configured. * * The asynchronous event enable mailbox command is used to enable the * asynchronous event posting via the ASYNC_STATUS_CN IOCB response and * specifies the default ring to which events are posted. * * This routine prepares the mailbox command for enabling HBA asynchronous * event support on a IOCB ring. **/ void lpfc_config_async(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb, uint32_t ring) { MAILBOX_t *mb; mb = &pmb->u.mb; memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); mb->mbxCommand = MBX_ASYNCEVT_ENABLE; mb->un.varCfgAsyncEvent.ring = ring; mb->mbxOwner = OWN_HOST; return; } /** * lpfc_heart_beat - Prepare a mailbox command for heart beat * @phba: pointer to lpfc hba data structure. * @pmb: pointer to the driver internal queue element for mailbox command. * * The heart beat mailbox command is used to detect an unresponsive HBA, which * is defined as any device where no error attention is sent and both mailbox * and rings are not processed. * * This routine prepares the mailbox command for issuing a heart beat in the * form of mailbox command to the HBA. The timely completion of the heart * beat mailbox command indicates the health of the HBA. **/ void lpfc_heart_beat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) { MAILBOX_t *mb; mb = &pmb->u.mb; memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); mb->mbxCommand = MBX_HEARTBEAT; mb->mbxOwner = OWN_HOST; return; } /** * lpfc_read_topology - Prepare a mailbox command for reading HBA topology * @phba: pointer to lpfc hba data structure. * @pmb: pointer to the driver internal queue element for mailbox command. * @mp: DMA buffer memory for reading the link attention information into. * * The read topology mailbox command is issued to read the link topology * information indicated by the HBA port when the Link Event bit of the Host * Attention (HSTATT) register is set to 1 (For SLI-3) or when an FC Link * Attention ACQE is received from the port (For SLI-4). A Link Event * Attention occurs based on an exception detected at the Fibre Channel link * interface. * * This routine prepares the mailbox command for reading HBA link topology * information. A DMA memory has been set aside and address passed to the * HBA through @mp for the HBA to DMA link attention information into the * memory as part of the execution of the mailbox command. * * Return codes * 0 - Success (currently always return 0) **/ int lpfc_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, struct lpfc_dmabuf *mp) { MAILBOX_t *mb; struct lpfc_sli *psli; psli = &phba->sli; mb = &pmb->u.mb; memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); INIT_LIST_HEAD(&mp->list); mb->mbxCommand = MBX_READ_TOPOLOGY; mb->un.varReadTop.lilpBde64.tus.f.bdeSize = LPFC_ALPA_MAP_SIZE; mb->un.varReadTop.lilpBde64.addrHigh = putPaddrHigh(mp->phys); mb->un.varReadTop.lilpBde64.addrLow = putPaddrLow(mp->phys); /* Save address for later completion and set the owner to host so that * the FW knows this mailbox is available for processing. */ pmb->context1 = (uint8_t *)mp; mb->mbxOwner = OWN_HOST; return (0); } /** * lpfc_clear_la - Prepare a mailbox command for clearing HBA link attention * @phba: pointer to lpfc hba data structure. * @pmb: pointer to the driver internal queue element for mailbox command. * * The clear link attention mailbox command is issued to clear the link event * attention condition indicated by the Link Event bit of the Host Attention * (HSTATT) register. The link event attention condition is cleared only if * the event tag specified matches that of the current link event counter. * The current event tag is read using the read link attention event mailbox * command. * * This routine prepares the mailbox command for clearing HBA link attention * information. **/ void lpfc_clear_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) { MAILBOX_t *mb; mb = &pmb->u.mb; memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); mb->un.varClearLA.eventTag = phba->fc_eventTag; mb->mbxCommand = MBX_CLEAR_LA; mb->mbxOwner = OWN_HOST; return; } /** * lpfc_config_link - Prepare a mailbox command for configuring link on a HBA * @phba: pointer to lpfc hba data structure. * @pmb: pointer to the driver internal queue element for mailbox command. * * The configure link mailbox command is used before the initialize link * mailbox command to override default value and to configure link-oriented * parameters such as DID address and various timers. Typically, this * command would be used after an F_Port login to set the returned DID address * and the fabric timeout values. This command is not valid before a configure * port command has configured the HBA port. * * This routine prepares the mailbox command for configuring link on a HBA. **/ void lpfc_config_link(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) { struct lpfc_vport *vport = phba->pport; MAILBOX_t *mb = &pmb->u.mb; memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); /* NEW_FEATURE * SLI-2, Coalescing Response Feature. */ if (phba->cfg_cr_delay) { mb->un.varCfgLnk.cr = 1; mb->un.varCfgLnk.ci = 1; mb->un.varCfgLnk.cr_delay = phba->cfg_cr_delay; mb->un.varCfgLnk.cr_count = phba->cfg_cr_count; } mb->un.varCfgLnk.myId = vport->fc_myDID; mb->un.varCfgLnk.edtov = phba->fc_edtov; mb->un.varCfgLnk.arbtov = phba->fc_arbtov; mb->un.varCfgLnk.ratov = phba->fc_ratov; mb->un.varCfgLnk.rttov = phba->fc_rttov; mb->un.varCfgLnk.altov = phba->fc_altov; mb->un.varCfgLnk.crtov = phba->fc_crtov; mb->un.varCfgLnk.citov = phba->fc_citov; if (phba->cfg_ack0) mb->un.varCfgLnk.ack0_enable = 1; mb->mbxCommand = MBX_CONFIG_LINK; mb->mbxOwner = OWN_HOST; return; } /** * lpfc_config_msi - Prepare a mailbox command for configuring msi-x * @phba: pointer to lpfc hba data structure. * @pmb: pointer to the driver internal queue element for mailbox command. * * The configure MSI-X mailbox command is used to configure the HBA's SLI-3 * MSI-X multi-message interrupt vector association to interrupt attention * conditions. * * Return codes * 0 - Success * -EINVAL - Failure **/ int lpfc_config_msi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) { MAILBOX_t *mb = &pmb->u.mb; uint32_t attentionConditions[2]; /* Sanity check */ if (phba->cfg_use_msi != 2) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0475 Not configured for supporting MSI-X " "cfg_use_msi: 0x%x\n", phba->cfg_use_msi); return -EINVAL; } if (phba->sli_rev < 3) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0476 HBA not supporting SLI-3 or later " "SLI Revision: 0x%x\n", phba->sli_rev); return -EINVAL; } /* Clear mailbox command fields */ memset(pmb, 0, sizeof(LPFC_MBOXQ_t)); /* * SLI-3, Message Signaled Interrupt Fearure. */ /* Multi-message attention configuration */ attentionConditions[0] = (HA_R0ATT | HA_R1ATT | HA_R2ATT | HA_ERATT | HA_LATT | HA_MBATT); attentionConditions[1] = 0; mb->un.varCfgMSI.attentionConditions[0] = attentionConditions[0]; mb->un.varCfgMSI.attentionConditions[1] = attentionConditions[1]; /* * Set up message number to HA bit association */ #ifdef __BIG_ENDIAN_BITFIELD /* RA0 (FCP Ring) */ mb->un.varCfgMSI.messageNumberByHA[HA_R0_POS] = 1; /* RA1 (Other Protocol Extra Ring) */ mb->un.varCfgMSI.messageNumberByHA[HA_R1_POS] = 1; #else /* __LITTLE_ENDIAN_BITFIELD */ /* RA0 (FCP Ring) */ mb->un.varCfgMSI.messageNumberByHA[HA_R0_POS^3] = 1; /* RA1 (Other Protocol Extra Ring) */ mb->un.varCfgMSI.messageNumberByHA[HA_R1_POS^3] = 1; #endif /* Multi-message interrupt autoclear configuration*/ mb->un.varCfgMSI.autoClearHA[0] = attentionConditions[0]; mb->un.varCfgMSI.autoClearHA[1] = attentionConditions[1]; /* For now, HBA autoclear does not work reliably, disable it */ mb->un.varCfgMSI.autoClearHA[0] = 0; mb->un.varCfgMSI.autoClearHA[1] = 0; /* Set command and owner bit */ mb->mbxCommand = MBX_CONFIG_MSI; mb->mbxOwner = OWN_HOST; return 0; } /** * lpfc_init_link - Prepare a mailbox command for initialize link on a HBA * @phba: pointer to lpfc hba data structure. * @pmb: pointer to the driver internal queue element for mailbox command. * @topology: the link topology for the link to be initialized to. * @linkspeed: the link speed for the link to be initialized to. * * The initialize link mailbox command is used to initialize the Fibre * Channel link. This command must follow a configure port command that * establishes the mode of operation. * * This routine prepares the mailbox command for initializing link on a HBA * with the specified link topology and speed. **/ void lpfc_init_link(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb, uint32_t topology, uint32_t linkspeed) { lpfc_vpd_t *vpd; struct lpfc_sli *psli; MAILBOX_t *mb; mb = &pmb->u.mb; memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); psli = &phba->sli; switch (topology) { case FLAGS_TOPOLOGY_MODE_LOOP_PT: mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_LOOP; mb->un.varInitLnk.link_flags |= FLAGS_TOPOLOGY_FAILOVER; break; case FLAGS_TOPOLOGY_MODE_PT_PT: mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_PT_PT; break; case FLAGS_TOPOLOGY_MODE_LOOP: mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_LOOP; break; case FLAGS_TOPOLOGY_MODE_PT_LOOP: mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_PT_PT; mb->un.varInitLnk.link_flags |= FLAGS_TOPOLOGY_FAILOVER; break; case FLAGS_LOCAL_LB: mb->un.varInitLnk.link_flags = FLAGS_LOCAL_LB; break; } /* Enable asynchronous ABTS responses from firmware */ mb->un.varInitLnk.link_flags |= FLAGS_IMED_ABORT; /* NEW_FEATURE * Setting up the link speed */ vpd = &phba->vpd; if (vpd->rev.feaLevelHigh >= 0x02){ switch(linkspeed){ case LPFC_USER_LINK_SPEED_1G: mb->un.varInitLnk.link_flags |= FLAGS_LINK_SPEED; mb->un.varInitLnk.link_speed = LINK_SPEED_1G; break; case LPFC_USER_LINK_SPEED_2G: mb->un.varInitLnk.link_flags |= FLAGS_LINK_SPEED; mb->un.varInitLnk.link_speed = LINK_SPEED_2G; break; case LPFC_USER_LINK_SPEED_4G: mb->un.varInitLnk.link_flags |= FLAGS_LINK_SPEED; mb->un.varInitLnk.link_speed = LINK_SPEED_4G; break; case LPFC_USER_LINK_SPEED_8G: mb->un.varInitLnk.link_flags |= FLAGS_LINK_SPEED; mb->un.varInitLnk.link_speed = LINK_SPEED_8G; break; case LPFC_USER_LINK_SPEED_10G: mb->un.varInitLnk.link_flags |= FLAGS_LINK_SPEED; mb->un.varInitLnk.link_speed = LINK_SPEED_10G; break; case LPFC_USER_LINK_SPEED_16G: mb->un.varInitLnk.link_flags |= FLAGS_LINK_SPEED; mb->un.varInitLnk.link_speed = LINK_SPEED_16G; break; case LPFC_USER_LINK_SPEED_AUTO: default: mb->un.varInitLnk.link_speed = LINK_SPEED_AUTO; break; } } else mb->un.varInitLnk.link_speed = LINK_SPEED_AUTO; mb->mbxCommand = (volatile uint8_t)MBX_INIT_LINK; mb->mbxOwner = OWN_HOST; mb->un.varInitLnk.fabric_AL_PA = phba->fc_pref_ALPA; return; } /** * lpfc_read_sparam - Prepare a mailbox command for reading HBA parameters * @phba: pointer to lpfc hba data structure. * @pmb: pointer to the driver internal queue element for mailbox command. * @vpi: virtual N_Port identifier. * * The read service parameter mailbox command is used to read the HBA port * service parameters. The service parameters are read into the buffer * specified directly by a BDE in the mailbox command. These service * parameters may then be used to build the payload of an N_Port/F_POrt * login request and reply (LOGI/ACC). * * This routine prepares the mailbox command for reading HBA port service * parameters. The DMA memory is allocated in this function and the addresses * are populated into the mailbox command for the HBA to DMA the service * parameters into. * * Return codes * 0 - Success * 1 - DMA memory allocation failed **/ int lpfc_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, int vpi) { struct lpfc_dmabuf *mp; MAILBOX_t *mb; struct lpfc_sli *psli; psli = &phba->sli; mb = &pmb->u.mb; memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); mb->mbxOwner = OWN_HOST; /* Get a buffer to hold the HBAs Service Parameters */ mp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); if (mp) mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); if (!mp || !mp->virt) { kfree(mp); mb->mbxCommand = MBX_READ_SPARM64; /* READ_SPARAM: no buffers */ lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX, "0301 READ_SPARAM: no buffers\n"); return (1); } INIT_LIST_HEAD(&mp->list); mb->mbxCommand = MBX_READ_SPARM64; mb->un.varRdSparm.un.sp64.tus.f.bdeSize = sizeof (struct serv_parm); mb->un.varRdSparm.un.sp64.addrHigh = putPaddrHigh(mp->phys); mb->un.varRdSparm.un.sp64.addrLow = putPaddrLow(mp->phys); if (phba->sli_rev >= LPFC_SLI_REV3) mb->un.varRdSparm.vpi = phba->vpi_ids[vpi]; /* save address for completion */ pmb->context1 = mp; return (0); } /** * lpfc_unreg_did - Prepare a mailbox command for unregistering DID * @phba: pointer to lpfc hba data structure. * @vpi: virtual N_Port identifier. * @did: remote port identifier. * @pmb: pointer to the driver internal queue element for mailbox command. * * The unregister DID mailbox command is used to unregister an N_Port/F_Port * login for an unknown RPI by specifying the DID of a remote port. This * command frees an RPI context in the HBA port. This has the effect of * performing an implicit N_Port/F_Port logout. * * This routine prepares the mailbox command for unregistering a remote * N_Port/F_Port (DID) login. **/ void lpfc_unreg_did(struct lpfc_hba * phba, uint16_t vpi, uint32_t did, LPFC_MBOXQ_t * pmb) { MAILBOX_t *mb; mb = &pmb->u.mb; memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); mb->un.varUnregDID.did = did; mb->un.varUnregDID.vpi = vpi; if ((vpi != 0xffff) && (phba->sli_rev == LPFC_SLI_REV4)) mb->un.varUnregDID.vpi = phba->vpi_ids[vpi]; mb->mbxCommand = MBX_UNREG_D_ID; mb->mbxOwner = OWN_HOST; return; } /** * lpfc_read_config - Prepare a mailbox command for reading HBA configuration * @phba: pointer to lpfc hba data structure. * @pmb: pointer to the driver internal queue element for mailbox command. * * The read configuration mailbox command is used to read the HBA port * configuration parameters. This mailbox command provides a method for * seeing any parameters that may have changed via various configuration * mailbox commands. * * This routine prepares the mailbox command for reading out HBA configuration * parameters. **/ void lpfc_read_config(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) { MAILBOX_t *mb; mb = &pmb->u.mb; memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); mb->mbxCommand = MBX_READ_CONFIG; mb->mbxOwner = OWN_HOST; return; } /** * lpfc_read_lnk_stat - Prepare a mailbox command for reading HBA link stats * @phba: pointer to lpfc hba data structure. * @pmb: pointer to the driver internal queue element for mailbox command. * * The read link status mailbox command is used to read the link status from * the HBA. Link status includes all link-related error counters. These * counters are maintained by the HBA and originated in the link hardware * unit. Note that all of these counters wrap. * * This routine prepares the mailbox command for reading out HBA link status. **/ void lpfc_read_lnk_stat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) { MAILBOX_t *mb; mb = &pmb->u.mb; memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); mb->mbxCommand = MBX_READ_LNK_STAT; mb->mbxOwner = OWN_HOST; return; } /** * lpfc_reg_rpi - Prepare a mailbox command for registering remote login * @phba: pointer to lpfc hba data structure. * @vpi: virtual N_Port identifier. * @did: remote port identifier. * @param: pointer to memory holding the server parameters. * @pmb: pointer to the driver internal queue element for mailbox command. * @rpi: the rpi to use in the registration (usually only used for SLI4. * * The registration login mailbox command is used to register an N_Port or * F_Port login. This registration allows the HBA to cache the remote N_Port * service parameters internally and thereby make the appropriate FC-2 * decisions. The remote port service parameters are handed off by the driver * to the HBA using a descriptor entry that directly identifies a buffer in * host memory. In exchange, the HBA returns an RPI identifier. * * This routine prepares the mailbox command for registering remote port login. * The function allocates DMA buffer for passing the service parameters to the * HBA with the mailbox command. * * Return codes * 0 - Success * 1 - DMA memory allocation failed **/ int lpfc_reg_rpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t did, uint8_t *param, LPFC_MBOXQ_t *pmb, uint16_t rpi) { MAILBOX_t *mb = &pmb->u.mb; uint8_t *sparam; struct lpfc_dmabuf *mp; memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); mb->un.varRegLogin.rpi = 0; if (phba->sli_rev == LPFC_SLI_REV4) mb->un.varRegLogin.rpi = phba->sli4_hba.rpi_ids[rpi]; if (phba->sli_rev >= LPFC_SLI_REV3) mb->un.varRegLogin.vpi = phba->vpi_ids[vpi]; mb->un.varRegLogin.did = did; mb->mbxOwner = OWN_HOST; /* Get a buffer to hold NPorts Service Parameters */ mp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); if (mp) mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); if (!mp || !mp->virt) { kfree(mp); mb->mbxCommand = MBX_REG_LOGIN64; /* REG_LOGIN: no buffers */ lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX, "0302 REG_LOGIN: no buffers, VPI:%d DID:x%x, " "rpi x%x\n", vpi, did, rpi); return 1; } INIT_LIST_HEAD(&mp->list); sparam = mp->virt; /* Copy param's into a new buffer */ memcpy(sparam, param, sizeof (struct serv_parm)); /* save address for completion */ pmb->context1 = (uint8_t *) mp; mb->mbxCommand = MBX_REG_LOGIN64; mb->un.varRegLogin.un.sp64.tus.f.bdeSize = sizeof (struct serv_parm); mb->un.varRegLogin.un.sp64.addrHigh = putPaddrHigh(mp->phys); mb->un.varRegLogin.un.sp64.addrLow = putPaddrLow(mp->phys); return 0; } /** * lpfc_unreg_login - Prepare a mailbox command for unregistering remote login * @phba: pointer to lpfc hba data structure. * @vpi: virtual N_Port identifier. * @rpi: remote port identifier * @pmb: pointer to the driver internal queue element for mailbox command. * * The unregistration login mailbox command is used to unregister an N_Port * or F_Port login. This command frees an RPI context in the HBA. It has the * effect of performing an implicit N_Port/F_Port logout. * * This routine prepares the mailbox command for unregistering remote port * login. * * For SLI4 ports, the rpi passed to this function must be the physical * rpi value, not the logical index. **/ void lpfc_unreg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t rpi, LPFC_MBOXQ_t * pmb) { MAILBOX_t *mb; mb = &pmb->u.mb; memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); mb->un.varUnregLogin.rpi = rpi; mb->un.varUnregLogin.rsvd1 = 0; if (phba->sli_rev >= LPFC_SLI_REV3) mb->un.varUnregLogin.vpi = phba->vpi_ids[vpi]; mb->mbxCommand = MBX_UNREG_LOGIN; mb->mbxOwner = OWN_HOST; return; } /** * lpfc_sli4_unreg_all_rpis - unregister all RPIs for a vport on SLI4 HBA. * @vport: pointer to a vport object. * * This routine sends mailbox command to unregister all active RPIs for * a vport. **/ void lpfc_sli4_unreg_all_rpis(struct lpfc_vport *vport) { struct lpfc_hba *phba = vport->phba; LPFC_MBOXQ_t *mbox; int rc; mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (mbox) { /* * For SLI4 functions, the rpi field is overloaded for * the vport context unreg all. This routine passes * 0 for the rpi field in lpfc_unreg_login for compatibility * with SLI3 and then overrides the rpi field with the * expected value for SLI4. */ lpfc_unreg_login(phba, vport->vpi, phba->vpi_ids[vport->vpi], mbox); mbox->u.mb.un.varUnregLogin.rsvd1 = 0x4000; mbox->vport = vport; mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; mbox->context1 = NULL; rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); if (rc == MBX_NOT_FINISHED) mempool_free(mbox, phba->mbox_mem_pool); } } /** * lpfc_reg_vpi - Prepare a mailbox command for registering vport identifier * @phba: pointer to lpfc hba data structure. * @vpi: virtual N_Port identifier. * @sid: Fibre Channel S_ID (N_Port_ID assigned to a virtual N_Port). * @pmb: pointer to the driver internal queue element for mailbox command. * * The registration vport identifier mailbox command is used to activate a * virtual N_Port after it has acquired an N_Port_ID. The HBA validates the * N_Port_ID against the information in the selected virtual N_Port context * block and marks it active to allow normal processing of IOCB commands and * received unsolicited exchanges. * * This routine prepares the mailbox command for registering a virtual N_Port. **/ void lpfc_reg_vpi(struct lpfc_vport *vport, LPFC_MBOXQ_t *pmb) { MAILBOX_t *mb = &pmb->u.mb; struct lpfc_hba *phba = vport->phba; memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); /* * Set the re-reg VPI bit for f/w to update the MAC address. */ if ((phba->sli_rev == LPFC_SLI_REV4) && !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) mb->un.varRegVpi.upd = 1; mb->un.varRegVpi.vpi = phba->vpi_ids[vport->vpi]; mb->un.varRegVpi.sid = vport->fc_myDID; if (phba->sli_rev == LPFC_SLI_REV4) mb->un.varRegVpi.vfi = phba->sli4_hba.vfi_ids[vport->vfi]; else mb->un.varRegVpi.vfi = vport->vfi + vport->phba->vfi_base; memcpy(mb->un.varRegVpi.wwn, &vport->fc_portname, sizeof(struct lpfc_name)); mb->un.varRegVpi.wwn[0] = cpu_to_le32(mb->un.varRegVpi.wwn[0]); mb->un.varRegVpi.wwn[1] = cpu_to_le32(mb->un.varRegVpi.wwn[1]); mb->mbxCommand = MBX_REG_VPI; mb->mbxOwner = OWN_HOST; return; } /** * lpfc_unreg_vpi - Prepare a mailbox command for unregistering vport id * @phba: pointer to lpfc hba data structure. * @vpi: virtual N_Port identifier. * @pmb: pointer to the driver internal queue element for mailbox command. * * The unregistration vport identifier mailbox command is used to inactivate * a virtual N_Port. The driver must have logged out and unregistered all * remote N_Ports to abort any activity on the virtual N_Port. The HBA will * unregisters any default RPIs associated with the specified vpi, aborting * any active exchanges. The HBA will post the mailbox response after making * the virtual N_Port inactive. * * This routine prepares the mailbox command for unregistering a virtual * N_Port. **/ void lpfc_unreg_vpi(struct lpfc_hba *phba, uint16_t vpi, LPFC_MBOXQ_t *pmb) { MAILBOX_t *mb = &pmb->u.mb; memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); if (phba->sli_rev == LPFC_SLI_REV3) mb->un.varUnregVpi.vpi = phba->vpi_ids[vpi]; else if (phba->sli_rev >= LPFC_SLI_REV4) mb->un.varUnregVpi.sli4_vpi = phba->vpi_ids[vpi]; mb->mbxCommand = MBX_UNREG_VPI; mb->mbxOwner = OWN_HOST; return; } /** * lpfc_config_pcb_setup - Set up IOCB rings in the Port Control Block (PCB) * @phba: pointer to lpfc hba data structure. * * This routine sets up and initializes the IOCB rings in the Port Control * Block (PCB). **/ static void lpfc_config_pcb_setup(struct lpfc_hba * phba) { struct lpfc_sli *psli = &phba->sli; struct lpfc_sli_ring *pring; PCB_t *pcbp = phba->pcb; dma_addr_t pdma_addr; uint32_t offset; uint32_t iocbCnt = 0; int i; pcbp->maxRing = (psli->num_rings - 1); for (i = 0; i < psli->num_rings; i++) { pring = &psli->ring[i]; pring->sizeCiocb = phba->sli_rev == 3 ? SLI3_IOCB_CMD_SIZE: SLI2_IOCB_CMD_SIZE; pring->sizeRiocb = phba->sli_rev == 3 ? SLI3_IOCB_RSP_SIZE: SLI2_IOCB_RSP_SIZE; /* A ring MUST have both cmd and rsp entries defined to be valid */ if ((pring->numCiocb == 0) || (pring->numRiocb == 0)) { pcbp->rdsc[i].cmdEntries = 0; pcbp->rdsc[i].rspEntries = 0; pcbp->rdsc[i].cmdAddrHigh = 0; pcbp->rdsc[i].rspAddrHigh = 0; pcbp->rdsc[i].cmdAddrLow = 0; pcbp->rdsc[i].rspAddrLow = 0; pring->cmdringaddr = NULL; pring->rspringaddr = NULL; continue; } /* Command ring setup for ring */ pring->cmdringaddr = (void *)&phba->IOCBs[iocbCnt]; pcbp->rdsc[i].cmdEntries = pring->numCiocb; offset = (uint8_t *) &phba->IOCBs[iocbCnt] - (uint8_t *) phba->slim2p.virt; pdma_addr = phba->slim2p.phys + offset; pcbp->rdsc[i].cmdAddrHigh = putPaddrHigh(pdma_addr); pcbp->rdsc[i].cmdAddrLow = putPaddrLow(pdma_addr); iocbCnt += pring->numCiocb; /* Response ring setup for ring */ pring->rspringaddr = (void *) &phba->IOCBs[iocbCnt]; pcbp->rdsc[i].rspEntries = pring->numRiocb; offset = (uint8_t *)&phba->IOCBs[iocbCnt] - (uint8_t *)phba->slim2p.virt; pdma_addr = phba->slim2p.phys + offset; pcbp->rdsc[i].rspAddrHigh = putPaddrHigh(pdma_addr); pcbp->rdsc[i].rspAddrLow = putPaddrLow(pdma_addr); iocbCnt += pring->numRiocb; } } /** * lpfc_read_rev - Prepare a mailbox command for reading HBA revision * @phba: pointer to lpfc hba data structure. * @pmb: pointer to the driver internal queue element for mailbox command. * * The read revision mailbox command is used to read the revision levels of * the HBA components. These components include hardware units, resident * firmware, and available firmware. HBAs that supports SLI-3 mode of * operation provide different response information depending on the version * requested by the driver. * * This routine prepares the mailbox command for reading HBA revision * information. **/ void lpfc_read_rev(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) { MAILBOX_t *mb = &pmb->u.mb; memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); mb->un.varRdRev.cv = 1; mb->un.varRdRev.v3req = 1; /* Request SLI3 info */ mb->mbxCommand = MBX_READ_REV; mb->mbxOwner = OWN_HOST; return; } void lpfc_sli4_swap_str(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) { MAILBOX_t *mb = &pmb->u.mb; struct lpfc_mqe *mqe; switch (mb->mbxCommand) { case MBX_READ_REV: mqe = &pmb->u.mqe; lpfc_sli_pcimem_bcopy(mqe->un.read_rev.fw_name, mqe->un.read_rev.fw_name, 16); lpfc_sli_pcimem_bcopy(mqe->un.read_rev.ulp_fw_name, mqe->un.read_rev.ulp_fw_name, 16); break; default: break; } return; } /** * lpfc_build_hbq_profile2 - Set up the HBQ Selection Profile 2 * @hbqmb: pointer to the HBQ configuration data structure in mailbox command. * @hbq_desc: pointer to the HBQ selection profile descriptor. * * The Host Buffer Queue (HBQ) Selection Profile 2 specifies that the HBA * tests the incoming frames' R_CTL/TYPE fields with works 10:15 and performs * the Sequence Length Test using the fields in the Selection Profile 2 * extension in words 20:31. **/ static void lpfc_build_hbq_profile2(struct config_hbq_var *hbqmb, struct lpfc_hbq_init *hbq_desc) { hbqmb->profiles.profile2.seqlenbcnt = hbq_desc->seqlenbcnt; hbqmb->profiles.profile2.maxlen = hbq_desc->maxlen; hbqmb->profiles.profile2.seqlenoff = hbq_desc->seqlenoff; } /** * lpfc_build_hbq_profile3 - Set up the HBQ Selection Profile 3 * @hbqmb: pointer to the HBQ configuration data structure in mailbox command. * @hbq_desc: pointer to the HBQ selection profile descriptor. * * The Host Buffer Queue (HBQ) Selection Profile 3 specifies that the HBA * tests the incoming frame's R_CTL/TYPE fields with words 10:15 and performs * the Sequence Length Test and Byte Field Test using the fields in the * Selection Profile 3 extension in words 20:31. **/ static void lpfc_build_hbq_profile3(struct config_hbq_var *hbqmb, struct lpfc_hbq_init *hbq_desc) { hbqmb->profiles.profile3.seqlenbcnt = hbq_desc->seqlenbcnt; hbqmb->profiles.profile3.maxlen = hbq_desc->maxlen; hbqmb->profiles.profile3.cmdcodeoff = hbq_desc->cmdcodeoff; hbqmb->profiles.profile3.seqlenoff = hbq_desc->seqlenoff; memcpy(&hbqmb->profiles.profile3.cmdmatch, hbq_desc->cmdmatch, sizeof(hbqmb->profiles.profile3.cmdmatch)); } /** * lpfc_build_hbq_profile5 - Set up the HBQ Selection Profile 5 * @hbqmb: pointer to the HBQ configuration data structure in mailbox command. * @hbq_desc: pointer to the HBQ selection profile descriptor. * * The Host Buffer Queue (HBQ) Selection Profile 5 specifies a header HBQ. The * HBA tests the initial frame of an incoming sequence using the frame's * R_CTL/TYPE fields with words 10:15 and performs the Sequence Length Test * and Byte Field Test using the fields in the Selection Profile 5 extension * words 20:31. **/ static void lpfc_build_hbq_profile5(struct config_hbq_var *hbqmb, struct lpfc_hbq_init *hbq_desc) { hbqmb->profiles.profile5.seqlenbcnt = hbq_desc->seqlenbcnt; hbqmb->profiles.profile5.maxlen = hbq_desc->maxlen; hbqmb->profiles.profile5.cmdcodeoff = hbq_desc->cmdcodeoff; hbqmb->profiles.profile5.seqlenoff = hbq_desc->seqlenoff; memcpy(&hbqmb->profiles.profile5.cmdmatch, hbq_desc->cmdmatch, sizeof(hbqmb->profiles.profile5.cmdmatch)); } /** * lpfc_config_hbq - Prepare a mailbox command for configuring an HBQ * @phba: pointer to lpfc hba data structure. * @id: HBQ identifier. * @hbq_desc: pointer to the HBA descriptor data structure. * @hbq_entry_index: index of the HBQ entry data structures. * @pmb: pointer to the driver internal queue element for mailbox command. * * The configure HBQ (Host Buffer Queue) mailbox command is used to configure * an HBQ. The configuration binds events that require buffers to a particular * ring and HBQ based on a selection profile. * * This routine prepares the mailbox command for configuring an HBQ. **/ void lpfc_config_hbq(struct lpfc_hba *phba, uint32_t id, struct lpfc_hbq_init *hbq_desc, uint32_t hbq_entry_index, LPFC_MBOXQ_t *pmb) { int i; MAILBOX_t *mb = &pmb->u.mb; struct config_hbq_var *hbqmb = &mb->un.varCfgHbq; memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); hbqmb->hbqId = id; hbqmb->entry_count = hbq_desc->entry_count; /* # entries in HBQ */ hbqmb->recvNotify = hbq_desc->rn; /* Receive * Notification */ hbqmb->numMask = hbq_desc->mask_count; /* # R_CTL/TYPE masks * # in words 0-19 */ hbqmb->profile = hbq_desc->profile; /* Selection profile: * 0 = all, * 7 = logentry */ hbqmb->ringMask = hbq_desc->ring_mask; /* Binds HBQ to a ring * e.g. Ring0=b0001, * ring2=b0100 */ hbqmb->headerLen = hbq_desc->headerLen; /* 0 if not profile 4 * or 5 */ hbqmb->logEntry = hbq_desc->logEntry; /* Set to 1 if this * HBQ will be used * for LogEntry * buffers */ hbqmb->hbqaddrLow = putPaddrLow(phba->hbqslimp.phys) + hbq_entry_index * sizeof(struct lpfc_hbq_entry); hbqmb->hbqaddrHigh = putPaddrHigh(phba->hbqslimp.phys); mb->mbxCommand = MBX_CONFIG_HBQ; mb->mbxOwner = OWN_HOST; /* Copy info for profiles 2,3,5. Other * profiles this area is reserved */ if (hbq_desc->profile == 2) lpfc_build_hbq_profile2(hbqmb, hbq_desc); else if (hbq_desc->profile == 3) lpfc_build_hbq_profile3(hbqmb, hbq_desc); else if (hbq_desc->profile == 5) lpfc_build_hbq_profile5(hbqmb, hbq_desc); /* Return if no rctl / type masks for this HBQ */ if (!hbq_desc->mask_count) return; /* Otherwise we setup specific rctl / type masks for this HBQ */ for (i = 0; i < hbq_desc->mask_count; i++) { hbqmb->hbqMasks[i].tmatch = hbq_desc->hbqMasks[i].tmatch; hbqmb->hbqMasks[i].tmask = hbq_desc->hbqMasks[i].tmask; hbqmb->hbqMasks[i].rctlmatch = hbq_desc->hbqMasks[i].rctlmatch; hbqmb->hbqMasks[i].rctlmask = hbq_desc->hbqMasks[i].rctlmask; } return; } /** * lpfc_config_ring - Prepare a mailbox command for configuring an IOCB ring * @phba: pointer to lpfc hba data structure. * @ring: * @pmb: pointer to the driver internal queue element for mailbox command. * * The configure ring mailbox command is used to configure an IOCB ring. This * configuration binds from one to six of HBA RC_CTL/TYPE mask entries to the * ring. This is used to map incoming sequences to a particular ring whose * RC_CTL/TYPE mask entry matches that of the sequence. The driver should not * attempt to configure a ring whose number is greater than the number * specified in the Port Control Block (PCB). It is an error to issue the * configure ring command more than once with the same ring number. The HBA * returns an error if the driver attempts this. * * This routine prepares the mailbox command for configuring IOCB ring. **/ void lpfc_config_ring(struct lpfc_hba * phba, int ring, LPFC_MBOXQ_t * pmb) { int i; MAILBOX_t *mb = &pmb->u.mb; struct lpfc_sli *psli; struct lpfc_sli_ring *pring; memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); mb->un.varCfgRing.ring = ring; mb->un.varCfgRing.maxOrigXchg = 0; mb->un.varCfgRing.maxRespXchg = 0; mb->un.varCfgRing.recvNotify = 1; psli = &phba->sli; pring = &psli->ring[ring]; mb->un.varCfgRing.numMask = pring->num_mask; mb->mbxCommand = MBX_CONFIG_RING; mb->mbxOwner = OWN_HOST; /* Is this ring configured for a specific profile */ if (pring->prt[0].profile) { mb->un.varCfgRing.profile = pring->prt[0].profile; return; } /* Otherwise we setup specific rctl / type masks for this ring */ for (i = 0; i < pring->num_mask; i++) { mb->un.varCfgRing.rrRegs[i].rval = pring->prt[i].rctl; if (mb->un.varCfgRing.rrRegs[i].rval != FC_RCTL_ELS_REQ) mb->un.varCfgRing.rrRegs[i].rmask = 0xff; else mb->un.varCfgRing.rrRegs[i].rmask = 0xfe; mb->un.varCfgRing.rrRegs[i].tval = pring->prt[i].type; mb->un.varCfgRing.rrRegs[i].tmask = 0xff; } return; } /** * lpfc_config_port - Prepare a mailbox command for configuring port * @phba: pointer to lpfc hba data structure. * @pmb: pointer to the driver internal queue element for mailbox command. * * The configure port mailbox command is used to identify the Port Control * Block (PCB) in the driver memory. After this command is issued, the * driver must not access the mailbox in the HBA without first resetting * the HBA. The HBA may copy the PCB information to internal storage for * subsequent use; the driver can not change the PCB information unless it * resets the HBA. * * This routine prepares the mailbox command for configuring port. **/ void lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) { MAILBOX_t __iomem *mb_slim = (MAILBOX_t __iomem *) phba->MBslimaddr; MAILBOX_t *mb = &pmb->u.mb; dma_addr_t pdma_addr; uint32_t bar_low, bar_high; size_t offset; struct lpfc_hgp hgp; int i; uint32_t pgp_offset; memset(pmb, 0, sizeof(LPFC_MBOXQ_t)); mb->mbxCommand = MBX_CONFIG_PORT; mb->mbxOwner = OWN_HOST; mb->un.varCfgPort.pcbLen = sizeof(PCB_t); offset = (uint8_t *)phba->pcb - (uint8_t *)phba->slim2p.virt; pdma_addr = phba->slim2p.phys + offset; mb->un.varCfgPort.pcbLow = putPaddrLow(pdma_addr); mb->un.varCfgPort.pcbHigh = putPaddrHigh(pdma_addr); /* Always Host Group Pointer is in SLIM */ mb->un.varCfgPort.hps = 1; /* If HBA supports SLI=3 ask for it */ if (phba->sli_rev == LPFC_SLI_REV3 && phba->vpd.sli3Feat.cerbm) { if (phba->cfg_enable_bg) mb->un.varCfgPort.cbg = 1; /* configure BlockGuard */ if (phba->cfg_enable_dss) mb->un.varCfgPort.cdss = 1; /* Configure Security */ mb->un.varCfgPort.cerbm = 1; /* Request HBQs */ mb->un.varCfgPort.ccrp = 1; /* Command Ring Polling */ mb->un.varCfgPort.max_hbq = lpfc_sli_hbq_count(); if (phba->max_vpi && phba->cfg_enable_npiv && phba->vpd.sli3Feat.cmv) { mb->un.varCfgPort.max_vpi = LPFC_MAX_VPI; mb->un.varCfgPort.cmv = 1; } else mb->un.varCfgPort.max_vpi = phba->max_vpi = 0; } else phba->sli_rev = LPFC_SLI_REV2; mb->un.varCfgPort.sli_mode = phba->sli_rev; /* If this is an SLI3 port, configure async status notification. */ if (phba->sli_rev == LPFC_SLI_REV3) mb->un.varCfgPort.casabt = 1; /* Now setup pcb */ phba->pcb->type = TYPE_NATIVE_SLI2; phba->pcb->feature = FEATURE_INITIAL_SLI2; /* Setup Mailbox pointers */ phba->pcb->mailBoxSize = sizeof(MAILBOX_t) + MAILBOX_EXT_SIZE; offset = (uint8_t *)phba->mbox - (uint8_t *)phba->slim2p.virt; pdma_addr = phba->slim2p.phys + offset; phba->pcb->mbAddrHigh = putPaddrHigh(pdma_addr); phba->pcb->mbAddrLow = putPaddrLow(pdma_addr); /* * Setup Host Group ring pointer. * * For efficiency reasons, the ring get/put pointers can be * placed in adapter memory (SLIM) rather than in host memory. * This allows firmware to avoid PCI reads/writes when updating * and checking pointers. * * The firmware recognizes the use of SLIM memory by comparing * the address of the get/put pointers structure with that of * the SLIM BAR (BAR0). * * Caution: be sure to use the PCI config space value of BAR0/BAR1 * (the hardware's view of the base address), not the OS's * value of pci_resource_start() as the OS value may be a cookie * for ioremap/iomap. */ pci_read_config_dword(phba->pcidev, PCI_BASE_ADDRESS_0, &bar_low); pci_read_config_dword(phba->pcidev, PCI_BASE_ADDRESS_1, &bar_high); /* * Set up HGP - Port Memory * * The port expects the host get/put pointers to reside in memory * following the "non-diagnostic" mode mailbox (32 words, 0x80 bytes) * area of SLIM. In SLI-2 mode, there's an additional 16 reserved * words (0x40 bytes). This area is not reserved if HBQs are * configured in SLI-3. * * CR0Put - SLI2(no HBQs) = 0xc0, With HBQs = 0x80 * RR0Get 0xc4 0x84 * CR1Put 0xc8 0x88 * RR1Get 0xcc 0x8c * CR2Put 0xd0 0x90 * RR2Get 0xd4 0x94 * CR3Put 0xd8 0x98 * RR3Get 0xdc 0x9c * * Reserved 0xa0-0xbf * If HBQs configured: * HBQ 0 Put ptr 0xc0 * HBQ 1 Put ptr 0xc4 * HBQ 2 Put ptr 0xc8 * ...... * HBQ(M-1)Put Pointer 0xc0+(M-1)*4 * */ if (phba->cfg_hostmem_hgp && phba->sli_rev != 3) { phba->host_gp = &phba->mbox->us.s2.host[0]; phba->hbq_put = NULL; offset = (uint8_t *)&phba->mbox->us.s2.host - (uint8_t *)phba->slim2p.virt; pdma_addr = phba->slim2p.phys + offset; phba->pcb->hgpAddrHigh = putPaddrHigh(pdma_addr); phba->pcb->hgpAddrLow = putPaddrLow(pdma_addr); } else { /* Always Host Group Pointer is in SLIM */ mb->un.varCfgPort.hps = 1; if (phba->sli_rev == 3) { phba->host_gp = &mb_slim->us.s3.host[0]; phba->hbq_put = &mb_slim->us.s3.hbq_put[0]; } else { phba->host_gp = &mb_slim->us.s2.host[0]; phba->hbq_put = NULL; } /* mask off BAR0's flag bits 0 - 3 */ phba->pcb->hgpAddrLow = (bar_low & PCI_BASE_ADDRESS_MEM_MASK) + (void __iomem *)phba->host_gp - (void __iomem *)phba->MBslimaddr; if (bar_low & PCI_BASE_ADDRESS_MEM_TYPE_64) phba->pcb->hgpAddrHigh = bar_high; else phba->pcb->hgpAddrHigh = 0; /* write HGP data to SLIM at the required longword offset */ memset(&hgp, 0, sizeof(struct lpfc_hgp)); for (i = 0; i < phba->sli.num_rings; i++) { lpfc_memcpy_to_slim(phba->host_gp + i, &hgp, sizeof(*phba->host_gp)); } } /* Setup Port Group offset */ if (phba->sli_rev == 3) pgp_offset = offsetof(struct lpfc_sli2_slim, mbx.us.s3_pgp.port); else pgp_offset = offsetof(struct lpfc_sli2_slim, mbx.us.s2.port); pdma_addr = phba->slim2p.phys + pgp_offset; phba->pcb->pgpAddrHigh = putPaddrHigh(pdma_addr); phba->pcb->pgpAddrLow = putPaddrLow(pdma_addr); /* Use callback routine to setp rings in the pcb */ lpfc_config_pcb_setup(phba); /* special handling for LC HBAs */ if (lpfc_is_LC_HBA(phba->pcidev->device)) { uint32_t hbainit[5]; lpfc_hba_init(phba, hbainit); memcpy(&mb->un.varCfgPort.hbainit, hbainit, 20); } /* Swap PCB if needed */ lpfc_sli_pcimem_bcopy(phba->pcb, phba->pcb, sizeof(PCB_t)); } /** * lpfc_kill_board - Prepare a mailbox command for killing board * @phba: pointer to lpfc hba data structure. * @pmb: pointer to the driver internal queue element for mailbox command. * * The kill board mailbox command is used to tell firmware to perform a * graceful shutdown of a channel on a specified board to prepare for reset. * When the kill board mailbox command is received, the ER3 bit is set to 1 * in the Host Status register and the ER Attention bit is set to 1 in the * Host Attention register of the HBA function that received the kill board * command. * * This routine prepares the mailbox command for killing the board in * preparation for a graceful shutdown. **/ void lpfc_kill_board(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) { MAILBOX_t *mb = &pmb->u.mb; memset(pmb, 0, sizeof(LPFC_MBOXQ_t)); mb->mbxCommand = MBX_KILL_BOARD; mb->mbxOwner = OWN_HOST; return; } /** * lpfc_mbox_put - Put a mailbox cmd into the tail of driver's mailbox queue * @phba: pointer to lpfc hba data structure. * @mbq: pointer to the driver internal queue element for mailbox command. * * Driver maintains a internal mailbox command queue implemented as a linked * list. When a mailbox command is issued, it shall be put into the mailbox * command queue such that they shall be processed orderly as HBA can process * one mailbox command at a time. **/ void lpfc_mbox_put(struct lpfc_hba * phba, LPFC_MBOXQ_t * mbq) { struct lpfc_sli *psli; psli = &phba->sli; list_add_tail(&mbq->list, &psli->mboxq); psli->mboxq_cnt++; return; } /** * lpfc_mbox_get - Remove a mailbox cmd from the head of driver's mailbox queue * @phba: pointer to lpfc hba data structure. * * Driver maintains a internal mailbox command queue implemented as a linked * list. When a mailbox command is issued, it shall be put into the mailbox * command queue such that they shall be processed orderly as HBA can process * one mailbox command at a time. After HBA finished processing a mailbox * command, the driver will remove a pending mailbox command from the head of * the mailbox command queue and send to the HBA for processing. * * Return codes * pointer to the driver internal queue element for mailbox command. **/ LPFC_MBOXQ_t * lpfc_mbox_get(struct lpfc_hba * phba) { LPFC_MBOXQ_t *mbq = NULL; struct lpfc_sli *psli = &phba->sli; list_remove_head((&psli->mboxq), mbq, LPFC_MBOXQ_t, list); if (mbq) psli->mboxq_cnt--; return mbq; } /** * __lpfc_mbox_cmpl_put - Put mailbox cmd into mailbox cmd complete list * @phba: pointer to lpfc hba data structure. * @mbq: pointer to the driver internal queue element for mailbox command. * * This routine put the completed mailbox command into the mailbox command * complete list. This is the unlocked version of the routine. The mailbox * complete list is used by the driver worker thread to process mailbox * complete callback functions outside the driver interrupt handler. **/ void __lpfc_mbox_cmpl_put(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbq) { list_add_tail(&mbq->list, &phba->sli.mboxq_cmpl); } /** * lpfc_mbox_cmpl_put - Put mailbox command into mailbox command complete list * @phba: pointer to lpfc hba data structure. * @mbq: pointer to the driver internal queue element for mailbox command. * * This routine put the completed mailbox command into the mailbox command * complete list. This is the locked version of the routine. The mailbox * complete list is used by the driver worker thread to process mailbox * complete callback functions outside the driver interrupt handler. **/ void lpfc_mbox_cmpl_put(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbq) { unsigned long iflag; /* This function expects to be called from interrupt context */ spin_lock_irqsave(&phba->hbalock, iflag); __lpfc_mbox_cmpl_put(phba, mbq); spin_unlock_irqrestore(&phba->hbalock, iflag); return; } /** * lpfc_mbox_cmd_check - Check the validality of a mailbox command * @phba: pointer to lpfc hba data structure. * @mboxq: pointer to the driver internal queue element for mailbox command. * * This routine is to check whether a mailbox command is valid to be issued. * This check will be performed by both the mailbox issue API when a client * is to issue a mailbox command to the mailbox transport. * * Return 0 - pass the check, -ENODEV - fail the check **/ int lpfc_mbox_cmd_check(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) { /* Mailbox command that have a completion handler must also have a * vport specified. */ if (mboxq->mbox_cmpl && mboxq->mbox_cmpl != lpfc_sli_def_mbox_cmpl && mboxq->mbox_cmpl != lpfc_sli_wake_mbox_wait) { if (!mboxq->vport) { lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_VPORT, "1814 Mbox x%x failed, no vport\n", mboxq->u.mb.mbxCommand); dump_stack(); return -ENODEV; } } return 0; } /** * lpfc_mbox_dev_check - Check the device state for issuing a mailbox command * @phba: pointer to lpfc hba data structure. * * This routine is to check whether the HBA device is ready for posting a * mailbox command. It is used by the mailbox transport API at the time the * to post a mailbox command to the device. * * Return 0 - pass the check, -ENODEV - fail the check **/ int lpfc_mbox_dev_check(struct lpfc_hba *phba) { /* If the PCI channel is in offline state, do not issue mbox */ if (unlikely(pci_channel_offline(phba->pcidev))) return -ENODEV; /* If the HBA is in error state, do not issue mbox */ if (phba->link_state == LPFC_HBA_ERROR) return -ENODEV; return 0; } /** * lpfc_mbox_tmo_val - Retrieve mailbox command timeout value * @phba: pointer to lpfc hba data structure. * @cmd: mailbox command code. * * This routine retrieves the proper timeout value according to the mailbox * command code. * * Return codes * Timeout value to be used for the given mailbox command **/ int lpfc_mbox_tmo_val(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) { MAILBOX_t *mbox = &mboxq->u.mb; uint8_t subsys, opcode; switch (mbox->mbxCommand) { case MBX_WRITE_NV: /* 0x03 */ case MBX_UPDATE_CFG: /* 0x1B */ case MBX_DOWN_LOAD: /* 0x1C */ case MBX_DEL_LD_ENTRY: /* 0x1D */ case MBX_LOAD_AREA: /* 0x81 */ case MBX_WRITE_WWN: /* 0x98 */ case MBX_LOAD_EXP_ROM: /* 0x9C */ return LPFC_MBOX_TMO_FLASH_CMD; case MBX_SLI4_CONFIG: /* 0x9b */ subsys = lpfc_sli_config_mbox_subsys_get(phba, mboxq); opcode = lpfc_sli_config_mbox_opcode_get(phba, mboxq); if (subsys == LPFC_MBOX_SUBSYSTEM_COMMON) { switch (opcode) { case LPFC_MBOX_OPCODE_READ_OBJECT: case LPFC_MBOX_OPCODE_WRITE_OBJECT: case LPFC_MBOX_OPCODE_READ_OBJECT_LIST: case LPFC_MBOX_OPCODE_DELETE_OBJECT: case LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG: case LPFC_MBOX_OPCODE_GET_PROFILE_LIST: case LPFC_MBOX_OPCODE_SET_ACT_PROFILE: case LPFC_MBOX_OPCODE_SET_PROFILE_CONFIG: case LPFC_MBOX_OPCODE_GET_FACTORY_PROFILE_CONFIG: return LPFC_MBOX_SLI4_CONFIG_EXTENDED_TMO; } } if (subsys == LPFC_MBOX_SUBSYSTEM_FCOE) { switch (opcode) { case LPFC_MBOX_OPCODE_FCOE_SET_FCLINK_SETTINGS: return LPFC_MBOX_SLI4_CONFIG_EXTENDED_TMO; } } return LPFC_MBOX_SLI4_CONFIG_TMO; } return LPFC_MBOX_TMO; } /** * lpfc_sli4_mbx_sge_set - Set a sge entry in non-embedded mailbox command * @mbox: pointer to lpfc mbox command. * @sgentry: sge entry index. * @phyaddr: physical address for the sge * @length: Length of the sge. * * This routine sets up an entry in the non-embedded mailbox command at the sge * index location. **/ void lpfc_sli4_mbx_sge_set(struct lpfcMboxq *mbox, uint32_t sgentry, dma_addr_t phyaddr, uint32_t length) { struct lpfc_mbx_nembed_cmd *nembed_sge; nembed_sge = (struct lpfc_mbx_nembed_cmd *) &mbox->u.mqe.un.nembed_cmd; nembed_sge->sge[sgentry].pa_lo = putPaddrLow(phyaddr); nembed_sge->sge[sgentry].pa_hi = putPaddrHigh(phyaddr); nembed_sge->sge[sgentry].length = length; } /** * lpfc_sli4_mbx_sge_get - Get a sge entry from non-embedded mailbox command * @mbox: pointer to lpfc mbox command. * @sgentry: sge entry index. * * This routine gets an entry from the non-embedded mailbox command at the sge * index location. **/ void lpfc_sli4_mbx_sge_get(struct lpfcMboxq *mbox, uint32_t sgentry, struct lpfc_mbx_sge *sge) { struct lpfc_mbx_nembed_cmd *nembed_sge; nembed_sge = (struct lpfc_mbx_nembed_cmd *) &mbox->u.mqe.un.nembed_cmd; sge->pa_lo = nembed_sge->sge[sgentry].pa_lo; sge->pa_hi = nembed_sge->sge[sgentry].pa_hi; sge->length = nembed_sge->sge[sgentry].length; } /** * lpfc_sli4_mbox_cmd_free - Free a sli4 mailbox command * @phba: pointer to lpfc hba data structure. * @mbox: pointer to lpfc mbox command. * * This routine frees SLI4 specific mailbox command for sending IOCTL command. **/ void lpfc_sli4_mbox_cmd_free(struct lpfc_hba *phba, struct lpfcMboxq *mbox) { struct lpfc_mbx_sli4_config *sli4_cfg; struct lpfc_mbx_sge sge; dma_addr_t phyaddr; uint32_t sgecount, sgentry; sli4_cfg = &mbox->u.mqe.un.sli4_config; /* For embedded mbox command, just free the mbox command */ if (bf_get(lpfc_mbox_hdr_emb, &sli4_cfg->header.cfg_mhdr)) { mempool_free(mbox, phba->mbox_mem_pool); return; } /* For non-embedded mbox command, we need to free the pages first */ sgecount = bf_get(lpfc_mbox_hdr_sge_cnt, &sli4_cfg->header.cfg_mhdr); /* There is nothing we can do if there is no sge address array */ if (unlikely(!mbox->sge_array)) { mempool_free(mbox, phba->mbox_mem_pool); return; } /* Each non-embedded DMA memory was allocated in the length of a page */ for (sgentry = 0; sgentry < sgecount; sgentry++) { lpfc_sli4_mbx_sge_get(mbox, sgentry, &sge); phyaddr = getPaddr(sge.pa_hi, sge.pa_lo); dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE, mbox->sge_array->addr[sgentry], phyaddr); } /* Free the sge address array memory */ kfree(mbox->sge_array); /* Finally, free the mailbox command itself */ mempool_free(mbox, phba->mbox_mem_pool); } /** * lpfc_sli4_config - Initialize the SLI4 Config Mailbox command * @phba: pointer to lpfc hba data structure. * @mbox: pointer to lpfc mbox command. * @subsystem: The sli4 config sub mailbox subsystem. * @opcode: The sli4 config sub mailbox command opcode. * @length: Length of the sli4 config mailbox command (including sub-header). * * This routine sets up the header fields of SLI4 specific mailbox command * for sending IOCTL command. * * Return: the actual length of the mbox command allocated (mostly useful * for none embedded mailbox command). **/ int lpfc_sli4_config(struct lpfc_hba *phba, struct lpfcMboxq *mbox, uint8_t subsystem, uint8_t opcode, uint32_t length, bool emb) { struct lpfc_mbx_sli4_config *sli4_config; union lpfc_sli4_cfg_shdr *cfg_shdr = NULL; uint32_t alloc_len; uint32_t resid_len; uint32_t pagen, pcount; void *viraddr; dma_addr_t phyaddr; /* Set up SLI4 mailbox command header fields */ memset(mbox, 0, sizeof(*mbox)); bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_SLI4_CONFIG); /* Set up SLI4 ioctl command header fields */ sli4_config = &mbox->u.mqe.un.sli4_config; /* Setup for the embedded mbox command */ if (emb) { /* Set up main header fields */ bf_set(lpfc_mbox_hdr_emb, &sli4_config->header.cfg_mhdr, 1); sli4_config->header.cfg_mhdr.payload_length = length; /* Set up sub-header fields following main header */ bf_set(lpfc_mbox_hdr_opcode, &sli4_config->header.cfg_shdr.request, opcode); bf_set(lpfc_mbox_hdr_subsystem, &sli4_config->header.cfg_shdr.request, subsystem); sli4_config->header.cfg_shdr.request.request_length = length - LPFC_MBX_CMD_HDR_LENGTH; return length; } /* Setup for the non-embedded mbox command */ pcount = (SLI4_PAGE_ALIGN(length))/SLI4_PAGE_SIZE; pcount = (pcount > LPFC_SLI4_MBX_SGE_MAX_PAGES) ? LPFC_SLI4_MBX_SGE_MAX_PAGES : pcount; /* Allocate record for keeping SGE virtual addresses */ mbox->sge_array = kzalloc(sizeof(struct lpfc_mbx_nembed_sge_virt), GFP_KERNEL); if (!mbox->sge_array) { lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, "2527 Failed to allocate non-embedded SGE " "array.\n"); return 0; } for (pagen = 0, alloc_len = 0; pagen < pcount; pagen++) { /* The DMA memory is always allocated in the length of a * page even though the last SGE might not fill up to a * page, this is used as a priori size of SLI4_PAGE_SIZE for * the later DMA memory free. */ viraddr = dma_alloc_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE, &phyaddr, GFP_KERNEL); /* In case of malloc fails, proceed with whatever we have */ if (!viraddr) break; memset(viraddr, 0, SLI4_PAGE_SIZE); mbox->sge_array->addr[pagen] = viraddr; /* Keep the first page for later sub-header construction */ if (pagen == 0) cfg_shdr = (union lpfc_sli4_cfg_shdr *)viraddr; resid_len = length - alloc_len; if (resid_len > SLI4_PAGE_SIZE) { lpfc_sli4_mbx_sge_set(mbox, pagen, phyaddr, SLI4_PAGE_SIZE); alloc_len += SLI4_PAGE_SIZE; } else { lpfc_sli4_mbx_sge_set(mbox, pagen, phyaddr, resid_len); alloc_len = length; } } /* Set up main header fields in mailbox command */ sli4_config->header.cfg_mhdr.payload_length = alloc_len; bf_set(lpfc_mbox_hdr_sge_cnt, &sli4_config->header.cfg_mhdr, pagen); /* Set up sub-header fields into the first page */ if (pagen > 0) { bf_set(lpfc_mbox_hdr_opcode, &cfg_shdr->request, opcode); bf_set(lpfc_mbox_hdr_subsystem, &cfg_shdr->request, subsystem); cfg_shdr->request.request_length = alloc_len - sizeof(union lpfc_sli4_cfg_shdr); } /* The sub-header is in DMA memory, which needs endian converstion */ if (cfg_shdr) lpfc_sli_pcimem_bcopy(cfg_shdr, cfg_shdr, sizeof(union lpfc_sli4_cfg_shdr)); return alloc_len; } /** * lpfc_sli4_mbox_rsrc_extent - Initialize the opcode resource extent. * @phba: pointer to lpfc hba data structure. * @mbox: pointer to an allocated lpfc mbox resource. * @exts_count: the number of extents, if required, to allocate. * @rsrc_type: the resource extent type. * @emb: true if LPFC_SLI4_MBX_EMBED. false if LPFC_SLI4_MBX_NEMBED. * * This routine completes the subcommand header for SLI4 resource extent * mailbox commands. It is called after lpfc_sli4_config. The caller must * pass an allocated mailbox and the attributes required to initialize the * mailbox correctly. * * Return: the actual length of the mbox command allocated. **/ int lpfc_sli4_mbox_rsrc_extent(struct lpfc_hba *phba, struct lpfcMboxq *mbox, uint16_t exts_count, uint16_t rsrc_type, bool emb) { uint8_t opcode = 0; struct lpfc_mbx_nembed_rsrc_extent *n_rsrc_extnt = NULL; void *virtaddr = NULL; /* Set up SLI4 ioctl command header fields */ if (emb == LPFC_SLI4_MBX_NEMBED) { /* Get the first SGE entry from the non-embedded DMA memory */ virtaddr = mbox->sge_array->addr[0]; if (virtaddr == NULL) return 1; n_rsrc_extnt = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr; } /* * The resource type is common to all extent Opcodes and resides in the * same position. */ if (emb == LPFC_SLI4_MBX_EMBED) bf_set(lpfc_mbx_alloc_rsrc_extents_type, &mbox->u.mqe.un.alloc_rsrc_extents.u.req, rsrc_type); else { /* This is DMA data. Byteswap is required. */ bf_set(lpfc_mbx_alloc_rsrc_extents_type, n_rsrc_extnt, rsrc_type); lpfc_sli_pcimem_bcopy(&n_rsrc_extnt->word4, &n_rsrc_extnt->word4, sizeof(uint32_t)); } /* Complete the initialization for the particular Opcode. */ opcode = lpfc_sli_config_mbox_opcode_get(phba, mbox); switch (opcode) { case LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT: if (emb == LPFC_SLI4_MBX_EMBED) bf_set(lpfc_mbx_alloc_rsrc_extents_cnt, &mbox->u.mqe.un.alloc_rsrc_extents.u.req, exts_count); else bf_set(lpfc_mbx_alloc_rsrc_extents_cnt, n_rsrc_extnt, exts_count); break; case LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT: case LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO: case LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT: /* Initialization is complete.*/ break; default: lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, "2929 Resource Extent Opcode x%x is " "unsupported\n", opcode); return 1; } return 0; } /** * lpfc_sli_config_mbox_subsys_get - Get subsystem from a sli_config mbox cmd * @phba: pointer to lpfc hba data structure. * @mbox: pointer to lpfc mbox command queue entry. * * This routine gets the subsystem from a SLI4 specific SLI_CONFIG mailbox * command. If the mailbox command is not MBX_SLI4_CONFIG (0x9B) or if the * sub-header is not present, subsystem LPFC_MBOX_SUBSYSTEM_NA (0x0) shall * be returned. **/ uint8_t lpfc_sli_config_mbox_subsys_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox) { struct lpfc_mbx_sli4_config *sli4_cfg; union lpfc_sli4_cfg_shdr *cfg_shdr; if (mbox->u.mb.mbxCommand != MBX_SLI4_CONFIG) return LPFC_MBOX_SUBSYSTEM_NA; sli4_cfg = &mbox->u.mqe.un.sli4_config; /* For embedded mbox command, get opcode from embedded sub-header*/ if (bf_get(lpfc_mbox_hdr_emb, &sli4_cfg->header.cfg_mhdr)) { cfg_shdr = &mbox->u.mqe.un.sli4_config.header.cfg_shdr; return bf_get(lpfc_mbox_hdr_subsystem, &cfg_shdr->request); } /* For non-embedded mbox command, get opcode from first dma page */ if (unlikely(!mbox->sge_array)) return LPFC_MBOX_SUBSYSTEM_NA; cfg_shdr = (union lpfc_sli4_cfg_shdr *)mbox->sge_array->addr[0]; return bf_get(lpfc_mbox_hdr_subsystem, &cfg_shdr->request); } /** * lpfc_sli_config_mbox_opcode_get - Get opcode from a sli_config mbox cmd * @phba: pointer to lpfc hba data structure. * @mbox: pointer to lpfc mbox command queue entry. * * This routine gets the opcode from a SLI4 specific SLI_CONFIG mailbox * command. If the mailbox command is not MBX_SLI4_CONFIG (0x9B) or if * the sub-header is not present, opcode LPFC_MBOX_OPCODE_NA (0x0) be * returned. **/ uint8_t lpfc_sli_config_mbox_opcode_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox) { struct lpfc_mbx_sli4_config *sli4_cfg; union lpfc_sli4_cfg_shdr *cfg_shdr; if (mbox->u.mb.mbxCommand != MBX_SLI4_CONFIG) return LPFC_MBOX_OPCODE_NA; sli4_cfg = &mbox->u.mqe.un.sli4_config; /* For embedded mbox command, get opcode from embedded sub-header*/ if (bf_get(lpfc_mbox_hdr_emb, &sli4_cfg->header.cfg_mhdr)) { cfg_shdr = &mbox->u.mqe.un.sli4_config.header.cfg_shdr; return bf_get(lpfc_mbox_hdr_opcode, &cfg_shdr->request); } /* For non-embedded mbox command, get opcode from first dma page */ if (unlikely(!mbox->sge_array)) return LPFC_MBOX_OPCODE_NA; cfg_shdr = (union lpfc_sli4_cfg_shdr *)mbox->sge_array->addr[0]; return bf_get(lpfc_mbox_hdr_opcode, &cfg_shdr->request); } /** * lpfc_sli4_mbx_read_fcf_rec - Allocate and construct read fcf mbox cmd * @phba: pointer to lpfc hba data structure. * @fcf_index: index to fcf table. * * This routine routine allocates and constructs non-embedded mailbox command * for reading a FCF table entry referred by @fcf_index. * * Return: pointer to the mailbox command constructed if successful, otherwise * NULL. **/ int lpfc_sli4_mbx_read_fcf_rec(struct lpfc_hba *phba, struct lpfcMboxq *mboxq, uint16_t fcf_index) { void *virt_addr; dma_addr_t phys_addr; uint8_t *bytep; struct lpfc_mbx_sge sge; uint32_t alloc_len, req_len; struct lpfc_mbx_read_fcf_tbl *read_fcf; if (!mboxq) return -ENOMEM; req_len = sizeof(struct fcf_record) + sizeof(union lpfc_sli4_cfg_shdr) + 2 * sizeof(uint32_t); /* Set up READ_FCF SLI4_CONFIG mailbox-ioctl command */ alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE, LPFC_MBOX_OPCODE_FCOE_READ_FCF_TABLE, req_len, LPFC_SLI4_MBX_NEMBED); if (alloc_len < req_len) { lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, "0291 Allocated DMA memory size (x%x) is " "less than the requested DMA memory " "size (x%x)\n", alloc_len, req_len); return -ENOMEM; } /* Get the first SGE entry from the non-embedded DMA memory. This * routine only uses a single SGE. */ lpfc_sli4_mbx_sge_get(mboxq, 0, &sge); phys_addr = getPaddr(sge.pa_hi, sge.pa_lo); virt_addr = mboxq->sge_array->addr[0]; read_fcf = (struct lpfc_mbx_read_fcf_tbl *)virt_addr; /* Set up command fields */ bf_set(lpfc_mbx_read_fcf_tbl_indx, &read_fcf->u.request, fcf_index); /* Perform necessary endian conversion */ bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr); lpfc_sli_pcimem_bcopy(bytep, bytep, sizeof(uint32_t)); return 0; } /** * lpfc_request_features: Configure SLI4 REQUEST_FEATURES mailbox * @mboxq: pointer to lpfc mbox command. * * This routine sets up the mailbox for an SLI4 REQUEST_FEATURES * mailbox command. **/ void lpfc_request_features(struct lpfc_hba *phba, struct lpfcMboxq *mboxq) { /* Set up SLI4 mailbox command header fields */ memset(mboxq, 0, sizeof(LPFC_MBOXQ_t)); bf_set(lpfc_mqe_command, &mboxq->u.mqe, MBX_SLI4_REQ_FTRS); /* Set up host requested features. */ bf_set(lpfc_mbx_rq_ftr_rq_fcpi, &mboxq->u.mqe.un.req_ftrs, 1); bf_set(lpfc_mbx_rq_ftr_rq_perfh, &mboxq->u.mqe.un.req_ftrs, 1); /* Enable DIF (block guard) only if configured to do so. */ if (phba->cfg_enable_bg) bf_set(lpfc_mbx_rq_ftr_rq_dif, &mboxq->u.mqe.un.req_ftrs, 1); /* Enable NPIV only if configured to do so. */ if (phba->max_vpi && phba->cfg_enable_npiv) bf_set(lpfc_mbx_rq_ftr_rq_npiv, &mboxq->u.mqe.un.req_ftrs, 1); return; } /** * lpfc_init_vfi - Initialize the INIT_VFI mailbox command * @mbox: pointer to lpfc mbox command to initialize. * @vport: Vport associated with the VF. * * This routine initializes @mbox to all zeros and then fills in the mailbox * fields from @vport. INIT_VFI configures virtual fabrics identified by VFI * in the context of an FCF. The driver issues this command to setup a VFI * before issuing a FLOGI to login to the VSAN. The driver should also issue a * REG_VFI after a successful VSAN login. **/ void lpfc_init_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport) { struct lpfc_mbx_init_vfi *init_vfi; memset(mbox, 0, sizeof(*mbox)); mbox->vport = vport; init_vfi = &mbox->u.mqe.un.init_vfi; bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_INIT_VFI); bf_set(lpfc_init_vfi_vr, init_vfi, 1); bf_set(lpfc_init_vfi_vt, init_vfi, 1); bf_set(lpfc_init_vfi_vp, init_vfi, 1); bf_set(lpfc_init_vfi_vfi, init_vfi, vport->phba->sli4_hba.vfi_ids[vport->vfi]); bf_set(lpfc_init_vfi_vpi, init_vfi, vport->phba->vpi_ids[vport->vpi]); bf_set(lpfc_init_vfi_fcfi, init_vfi, vport->phba->fcf.fcfi); } /** * lpfc_reg_vfi - Initialize the REG_VFI mailbox command * @mbox: pointer to lpfc mbox command to initialize. * @vport: vport associated with the VF. * @phys: BDE DMA bus address used to send the service parameters to the HBA. * * This routine initializes @mbox to all zeros and then fills in the mailbox * fields from @vport, and uses @buf as a DMAable buffer to send the vport's * fc service parameters to the HBA for this VFI. REG_VFI configures virtual * fabrics identified by VFI in the context of an FCF. **/ void lpfc_reg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport, dma_addr_t phys) { struct lpfc_mbx_reg_vfi *reg_vfi; memset(mbox, 0, sizeof(*mbox)); reg_vfi = &mbox->u.mqe.un.reg_vfi; bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_REG_VFI); bf_set(lpfc_reg_vfi_vp, reg_vfi, 1); bf_set(lpfc_reg_vfi_vfi, reg_vfi, vport->phba->sli4_hba.vfi_ids[vport->vfi]); bf_set(lpfc_reg_vfi_fcfi, reg_vfi, vport->phba->fcf.fcfi); bf_set(lpfc_reg_vfi_vpi, reg_vfi, vport->phba->vpi_ids[vport->vpi]); memcpy(reg_vfi->wwn, &vport->fc_portname, sizeof(struct lpfc_name)); reg_vfi->wwn[0] = cpu_to_le32(reg_vfi->wwn[0]); reg_vfi->wwn[1] = cpu_to_le32(reg_vfi->wwn[1]); reg_vfi->e_d_tov = vport->phba->fc_edtov; reg_vfi->r_a_tov = vport->phba->fc_ratov; reg_vfi->bde.addrHigh = putPaddrHigh(phys); reg_vfi->bde.addrLow = putPaddrLow(phys); reg_vfi->bde.tus.f.bdeSize = sizeof(vport->fc_sparam); reg_vfi->bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64; bf_set(lpfc_reg_vfi_nport_id, reg_vfi, vport->fc_myDID); lpfc_printf_vlog(vport, KERN_INFO, LOG_MBOX, "3134 Register VFI, mydid:x%x, fcfi:%d, " " vfi:%d, vpi:%d, fc_pname:%x%x\n", vport->fc_myDID, vport->phba->fcf.fcfi, vport->phba->sli4_hba.vfi_ids[vport->vfi], vport->phba->vpi_ids[vport->vpi], reg_vfi->wwn[0], reg_vfi->wwn[1]); } /** * lpfc_init_vpi - Initialize the INIT_VPI mailbox command * @phba: pointer to the hba structure to init the VPI for. * @mbox: pointer to lpfc mbox command to initialize. * @vpi: VPI to be initialized. * * The INIT_VPI mailbox command supports virtual N_Ports. The driver uses the * command to activate a virtual N_Port. The HBA assigns a MAC address to use * with the virtual N Port. The SLI Host issues this command before issuing a * FDISC to connect to the Fabric. The SLI Host should issue a REG_VPI after a * successful virtual NPort login. **/ void lpfc_init_vpi(struct lpfc_hba *phba, struct lpfcMboxq *mbox, uint16_t vpi) { memset(mbox, 0, sizeof(*mbox)); bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_INIT_VPI); bf_set(lpfc_init_vpi_vpi, &mbox->u.mqe.un.init_vpi, phba->vpi_ids[vpi]); bf_set(lpfc_init_vpi_vfi, &mbox->u.mqe.un.init_vpi, phba->sli4_hba.vfi_ids[phba->pport->vfi]); } /** * lpfc_unreg_vfi - Initialize the UNREG_VFI mailbox command * @mbox: pointer to lpfc mbox command to initialize. * @vport: vport associated with the VF. * * The UNREG_VFI mailbox command causes the SLI Host to put a virtual fabric * (logical NPort) into the inactive state. The SLI Host must have logged out * and unregistered all remote N_Ports to abort any activity on the virtual * fabric. The SLI Port posts the mailbox response after marking the virtual * fabric inactive. **/ void lpfc_unreg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport) { memset(mbox, 0, sizeof(*mbox)); bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_UNREG_VFI); bf_set(lpfc_unreg_vfi_vfi, &mbox->u.mqe.un.unreg_vfi, vport->phba->sli4_hba.vfi_ids[vport->vfi]); } /** * lpfc_sli4_dump_cfg_rg23 - Dump sli4 port config region 23 * @phba: pointer to the hba structure containing. * @mbox: pointer to lpfc mbox command to initialize. * * This function create a SLI4 dump mailbox command to dump configure * region 23. **/ int lpfc_sli4_dump_cfg_rg23(struct lpfc_hba *phba, struct lpfcMboxq *mbox) { struct lpfc_dmabuf *mp = NULL; MAILBOX_t *mb; memset(mbox, 0, sizeof(*mbox)); mb = &mbox->u.mb; mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); if (mp) mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); if (!mp || !mp->virt) { kfree(mp); /* dump config region 23 failed to allocate memory */ lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX, "2569 lpfc dump config region 23: memory" " allocation failed\n"); return 1; } memset(mp->virt, 0, LPFC_BPL_SIZE); INIT_LIST_HEAD(&mp->list); /* save address for completion */ mbox->context1 = (uint8_t *) mp; mb->mbxCommand = MBX_DUMP_MEMORY; mb->un.varDmp.type = DMP_NV_PARAMS; mb->un.varDmp.region_id = DMP_REGION_23; mb->un.varDmp.sli4_length = DMP_RGN23_SIZE; mb->un.varWords[3] = putPaddrLow(mp->phys); mb->un.varWords[4] = putPaddrHigh(mp->phys); return 0; } /** * lpfc_reg_fcfi - Initialize the REG_FCFI mailbox command * @phba: pointer to the hba structure containing the FCF index and RQ ID. * @mbox: pointer to lpfc mbox command to initialize. * * The REG_FCFI mailbox command supports Fibre Channel Forwarders (FCFs). The * SLI Host uses the command to activate an FCF after it has acquired FCF * information via a READ_FCF mailbox command. This mailbox command also is used * to indicate where received unsolicited frames from this FCF will be sent. By * default this routine will set up the FCF to forward all unsolicited frames * the the RQ ID passed in the @phba. This can be overridden by the caller for * more complicated setups. **/ void lpfc_reg_fcfi(struct lpfc_hba *phba, struct lpfcMboxq *mbox) { struct lpfc_mbx_reg_fcfi *reg_fcfi; memset(mbox, 0, sizeof(*mbox)); reg_fcfi = &mbox->u.mqe.un.reg_fcfi; bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_REG_FCFI); bf_set(lpfc_reg_fcfi_rq_id0, reg_fcfi, phba->sli4_hba.hdr_rq->queue_id); bf_set(lpfc_reg_fcfi_rq_id1, reg_fcfi, REG_FCF_INVALID_QID); bf_set(lpfc_reg_fcfi_rq_id2, reg_fcfi, REG_FCF_INVALID_QID); bf_set(lpfc_reg_fcfi_rq_id3, reg_fcfi, REG_FCF_INVALID_QID); bf_set(lpfc_reg_fcfi_info_index, reg_fcfi, phba->fcf.current_rec.fcf_indx); /* reg_fcf addr mode is bit wise inverted value of fcf addr_mode */ bf_set(lpfc_reg_fcfi_mam, reg_fcfi, (~phba->fcf.addr_mode) & 0x3); if (phba->fcf.current_rec.vlan_id != LPFC_FCOE_NULL_VID) { bf_set(lpfc_reg_fcfi_vv, reg_fcfi, 1); bf_set(lpfc_reg_fcfi_vlan_tag, reg_fcfi, phba->fcf.current_rec.vlan_id); } } /** * lpfc_unreg_fcfi - Initialize the UNREG_FCFI mailbox command * @mbox: pointer to lpfc mbox command to initialize. * @fcfi: FCFI to be unregistered. * * The UNREG_FCFI mailbox command supports Fibre Channel Forwarders (FCFs). * The SLI Host uses the command to inactivate an FCFI. **/ void lpfc_unreg_fcfi(struct lpfcMboxq *mbox, uint16_t fcfi) { memset(mbox, 0, sizeof(*mbox)); bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_UNREG_FCFI); bf_set(lpfc_unreg_fcfi, &mbox->u.mqe.un.unreg_fcfi, fcfi); } /** * lpfc_resume_rpi - Initialize the RESUME_RPI mailbox command * @mbox: pointer to lpfc mbox command to initialize. * @ndlp: The nodelist structure that describes the RPI to resume. * * The RESUME_RPI mailbox command is used to restart I/O to an RPI after a * link event. **/ void lpfc_resume_rpi(struct lpfcMboxq *mbox, struct lpfc_nodelist *ndlp) { struct lpfc_hba *phba = ndlp->phba; struct lpfc_mbx_resume_rpi *resume_rpi; memset(mbox, 0, sizeof(*mbox)); resume_rpi = &mbox->u.mqe.un.resume_rpi; bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_RESUME_RPI); bf_set(lpfc_resume_rpi_index, resume_rpi, phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); bf_set(lpfc_resume_rpi_ii, resume_rpi, RESUME_INDEX_RPI); resume_rpi->event_tag = ndlp->phba->fc_eventTag; } /** * lpfc_supported_pages - Initialize the PORT_CAPABILITIES supported pages * mailbox command. * @mbox: pointer to lpfc mbox command to initialize. * * The PORT_CAPABILITIES supported pages mailbox command is issued to * retrieve the particular feature pages supported by the port. **/ void lpfc_supported_pages(struct lpfcMboxq *mbox) { struct lpfc_mbx_supp_pages *supp_pages; memset(mbox, 0, sizeof(*mbox)); supp_pages = &mbox->u.mqe.un.supp_pages; bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_PORT_CAPABILITIES); bf_set(cpn, supp_pages, LPFC_SUPP_PAGES); } /** * lpfc_pc_sli4_params - Initialize the PORT_CAPABILITIES SLI4 Params mbox cmd. * @mbox: pointer to lpfc mbox command to initialize. * * The PORT_CAPABILITIES SLI4 parameters mailbox command is issued to * retrieve the particular SLI4 features supported by the port. **/ void lpfc_pc_sli4_params(struct lpfcMboxq *mbox) { struct lpfc_mbx_pc_sli4_params *sli4_params; memset(mbox, 0, sizeof(*mbox)); sli4_params = &mbox->u.mqe.un.sli4_params; bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_PORT_CAPABILITIES); bf_set(cpn, sli4_params, LPFC_SLI4_PARAMETERS); }
gpl-2.0
StarkDroid/android_kernel_motorola_msm8610
drivers/scsi/lpfc/lpfc_mem.c
5035
15463
/******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * * Copyright (C) 2004-2009 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * * * * This program is free software; you can redistribute it and/or * * modify it under the terms of version 2 of the GNU General * * Public License as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful. * * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * * TO BE LEGALLY INVALID. See the GNU General Public License for * * more details, a copy of which can be found in the file COPYING * * included with this package. * *******************************************************************/ #include <linux/mempool.h> #include <linux/slab.h> #include <linux/pci.h> #include <linux/interrupt.h> #include <scsi/scsi_device.h> #include <scsi/scsi_transport_fc.h> #include <scsi/scsi.h> #include "lpfc_hw4.h" #include "lpfc_hw.h" #include "lpfc_sli.h" #include "lpfc_sli4.h" #include "lpfc_nl.h" #include "lpfc_disc.h" #include "lpfc_scsi.h" #include "lpfc.h" #include "lpfc_crtn.h" #define LPFC_MBUF_POOL_SIZE 64 /* max elements in MBUF safety pool */ #define LPFC_MEM_POOL_SIZE 64 /* max elem in non-DMA safety pool */ /** * lpfc_mem_alloc - create and allocate all PCI and memory pools * @phba: HBA to allocate pools for * * Description: Creates and allocates PCI pools lpfc_scsi_dma_buf_pool, * lpfc_mbuf_pool, lpfc_hrb_pool. Creates and allocates kmalloc-backed mempools * for LPFC_MBOXQ_t and lpfc_nodelist. Also allocates the VPI bitmask. * * Notes: Not interrupt-safe. Must be called with no locks held. If any * allocation fails, frees all successfully allocated memory before returning. * * Returns: * 0 on success * -ENOMEM on failure (if any memory allocations fail) **/ int lpfc_mem_alloc(struct lpfc_hba *phba, int align) { struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool; int i; if (phba->sli_rev == LPFC_SLI_REV4) phba->lpfc_scsi_dma_buf_pool = pci_pool_create("lpfc_scsi_dma_buf_pool", phba->pcidev, phba->cfg_sg_dma_buf_size, phba->cfg_sg_dma_buf_size, 0); else phba->lpfc_scsi_dma_buf_pool = pci_pool_create("lpfc_scsi_dma_buf_pool", phba->pcidev, phba->cfg_sg_dma_buf_size, align, 0); if (!phba->lpfc_scsi_dma_buf_pool) goto fail; phba->lpfc_mbuf_pool = pci_pool_create("lpfc_mbuf_pool", phba->pcidev, LPFC_BPL_SIZE, align, 0); if (!phba->lpfc_mbuf_pool) goto fail_free_dma_buf_pool; pool->elements = kmalloc(sizeof(struct lpfc_dmabuf) * LPFC_MBUF_POOL_SIZE, GFP_KERNEL); if (!pool->elements) goto fail_free_lpfc_mbuf_pool; pool->max_count = 0; pool->current_count = 0; for ( i = 0; i < LPFC_MBUF_POOL_SIZE; i++) { pool->elements[i].virt = pci_pool_alloc(phba->lpfc_mbuf_pool, GFP_KERNEL, &pool->elements[i].phys); if (!pool->elements[i].virt) goto fail_free_mbuf_pool; pool->max_count++; pool->current_count++; } phba->mbox_mem_pool = mempool_create_kmalloc_pool(LPFC_MEM_POOL_SIZE, sizeof(LPFC_MBOXQ_t)); if (!phba->mbox_mem_pool) goto fail_free_mbuf_pool; phba->nlp_mem_pool = mempool_create_kmalloc_pool(LPFC_MEM_POOL_SIZE, sizeof(struct lpfc_nodelist)); if (!phba->nlp_mem_pool) goto fail_free_mbox_pool; if (phba->sli_rev == LPFC_SLI_REV4) { phba->rrq_pool = mempool_create_kmalloc_pool(LPFC_MEM_POOL_SIZE, sizeof(struct lpfc_node_rrq)); if (!phba->rrq_pool) goto fail_free_nlp_mem_pool; phba->lpfc_hrb_pool = pci_pool_create("lpfc_hrb_pool", phba->pcidev, LPFC_HDR_BUF_SIZE, align, 0); if (!phba->lpfc_hrb_pool) goto fail_free_rrq_mem_pool; phba->lpfc_drb_pool = pci_pool_create("lpfc_drb_pool", phba->pcidev, LPFC_DATA_BUF_SIZE, align, 0); if (!phba->lpfc_drb_pool) goto fail_free_hrb_pool; phba->lpfc_hbq_pool = NULL; } else { phba->lpfc_hbq_pool = pci_pool_create("lpfc_hbq_pool", phba->pcidev, LPFC_BPL_SIZE, align, 0); if (!phba->lpfc_hbq_pool) goto fail_free_nlp_mem_pool; phba->lpfc_hrb_pool = NULL; phba->lpfc_drb_pool = NULL; } return 0; fail_free_hrb_pool: pci_pool_destroy(phba->lpfc_hrb_pool); phba->lpfc_hrb_pool = NULL; fail_free_rrq_mem_pool: mempool_destroy(phba->rrq_pool); phba->rrq_pool = NULL; fail_free_nlp_mem_pool: mempool_destroy(phba->nlp_mem_pool); phba->nlp_mem_pool = NULL; fail_free_mbox_pool: mempool_destroy(phba->mbox_mem_pool); phba->mbox_mem_pool = NULL; fail_free_mbuf_pool: while (i--) pci_pool_free(phba->lpfc_mbuf_pool, pool->elements[i].virt, pool->elements[i].phys); kfree(pool->elements); fail_free_lpfc_mbuf_pool: pci_pool_destroy(phba->lpfc_mbuf_pool); phba->lpfc_mbuf_pool = NULL; fail_free_dma_buf_pool: pci_pool_destroy(phba->lpfc_scsi_dma_buf_pool); phba->lpfc_scsi_dma_buf_pool = NULL; fail: return -ENOMEM; } /** * lpfc_mem_free - Frees memory allocated by lpfc_mem_alloc * @phba: HBA to free memory for * * Description: Free the memory allocated by lpfc_mem_alloc routine. This * routine is a the counterpart of lpfc_mem_alloc. * * Returns: None **/ void lpfc_mem_free(struct lpfc_hba *phba) { int i; struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool; /* Free HBQ pools */ lpfc_sli_hbqbuf_free_all(phba); if (phba->lpfc_drb_pool) pci_pool_destroy(phba->lpfc_drb_pool); phba->lpfc_drb_pool = NULL; if (phba->lpfc_hrb_pool) pci_pool_destroy(phba->lpfc_hrb_pool); phba->lpfc_hrb_pool = NULL; if (phba->lpfc_hbq_pool) pci_pool_destroy(phba->lpfc_hbq_pool); phba->lpfc_hbq_pool = NULL; /* Free NLP memory pool */ mempool_destroy(phba->nlp_mem_pool); phba->nlp_mem_pool = NULL; /* Free mbox memory pool */ mempool_destroy(phba->mbox_mem_pool); phba->mbox_mem_pool = NULL; /* Free MBUF memory pool */ for (i = 0; i < pool->current_count; i++) pci_pool_free(phba->lpfc_mbuf_pool, pool->elements[i].virt, pool->elements[i].phys); kfree(pool->elements); pci_pool_destroy(phba->lpfc_mbuf_pool); phba->lpfc_mbuf_pool = NULL; /* Free DMA buffer memory pool */ pci_pool_destroy(phba->lpfc_scsi_dma_buf_pool); phba->lpfc_scsi_dma_buf_pool = NULL; return; } /** * lpfc_mem_free_all - Frees all PCI and driver memory * @phba: HBA to free memory for * * Description: Free memory from PCI and driver memory pools and also those * used : lpfc_scsi_dma_buf_pool, lpfc_mbuf_pool, lpfc_hrb_pool. Frees * kmalloc-backed mempools for LPFC_MBOXQ_t and lpfc_nodelist. Also frees * the VPI bitmask. * * Returns: None **/ void lpfc_mem_free_all(struct lpfc_hba *phba) { struct lpfc_sli *psli = &phba->sli; LPFC_MBOXQ_t *mbox, *next_mbox; struct lpfc_dmabuf *mp; /* Free memory used in mailbox queue back to mailbox memory pool */ list_for_each_entry_safe(mbox, next_mbox, &psli->mboxq, list) { mp = (struct lpfc_dmabuf *) (mbox->context1); if (mp) { lpfc_mbuf_free(phba, mp->virt, mp->phys); kfree(mp); } list_del(&mbox->list); mempool_free(mbox, phba->mbox_mem_pool); } /* Free memory used in mailbox cmpl list back to mailbox memory pool */ list_for_each_entry_safe(mbox, next_mbox, &psli->mboxq_cmpl, list) { mp = (struct lpfc_dmabuf *) (mbox->context1); if (mp) { lpfc_mbuf_free(phba, mp->virt, mp->phys); kfree(mp); } list_del(&mbox->list); mempool_free(mbox, phba->mbox_mem_pool); } /* Free the active mailbox command back to the mailbox memory pool */ spin_lock_irq(&phba->hbalock); psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; spin_unlock_irq(&phba->hbalock); if (psli->mbox_active) { mbox = psli->mbox_active; mp = (struct lpfc_dmabuf *) (mbox->context1); if (mp) { lpfc_mbuf_free(phba, mp->virt, mp->phys); kfree(mp); } mempool_free(mbox, phba->mbox_mem_pool); psli->mbox_active = NULL; } /* Free and destroy all the allocated memory pools */ lpfc_mem_free(phba); /* Free the iocb lookup array */ kfree(psli->iocbq_lookup); psli->iocbq_lookup = NULL; return; } /** * lpfc_mbuf_alloc - Allocate an mbuf from the lpfc_mbuf_pool PCI pool * @phba: HBA which owns the pool to allocate from * @mem_flags: indicates if this is a priority (MEM_PRI) allocation * @handle: used to return the DMA-mapped address of the mbuf * * Description: Allocates a DMA-mapped buffer from the lpfc_mbuf_pool PCI pool. * Allocates from generic pci_pool_alloc function first and if that fails and * mem_flags has MEM_PRI set (the only defined flag), returns an mbuf from the * HBA's pool. * * Notes: Not interrupt-safe. Must be called with no locks held. Takes * phba->hbalock. * * Returns: * pointer to the allocated mbuf on success * NULL on failure **/ void * lpfc_mbuf_alloc(struct lpfc_hba *phba, int mem_flags, dma_addr_t *handle) { struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool; unsigned long iflags; void *ret; ret = pci_pool_alloc(phba->lpfc_mbuf_pool, GFP_KERNEL, handle); spin_lock_irqsave(&phba->hbalock, iflags); if (!ret && (mem_flags & MEM_PRI) && pool->current_count) { pool->current_count--; ret = pool->elements[pool->current_count].virt; *handle = pool->elements[pool->current_count].phys; } spin_unlock_irqrestore(&phba->hbalock, iflags); return ret; } /** * __lpfc_mbuf_free - Free an mbuf from the lpfc_mbuf_pool PCI pool (locked) * @phba: HBA which owns the pool to return to * @virt: mbuf to free * @dma: the DMA-mapped address of the lpfc_mbuf_pool to be freed * * Description: Returns an mbuf lpfc_mbuf_pool to the lpfc_mbuf_safety_pool if * it is below its max_count, frees the mbuf otherwise. * * Notes: Must be called with phba->hbalock held to synchronize access to * lpfc_mbuf_safety_pool. * * Returns: None **/ void __lpfc_mbuf_free(struct lpfc_hba * phba, void *virt, dma_addr_t dma) { struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool; if (pool->current_count < pool->max_count) { pool->elements[pool->current_count].virt = virt; pool->elements[pool->current_count].phys = dma; pool->current_count++; } else { pci_pool_free(phba->lpfc_mbuf_pool, virt, dma); } return; } /** * lpfc_mbuf_free - Free an mbuf from the lpfc_mbuf_pool PCI pool (unlocked) * @phba: HBA which owns the pool to return to * @virt: mbuf to free * @dma: the DMA-mapped address of the lpfc_mbuf_pool to be freed * * Description: Returns an mbuf lpfc_mbuf_pool to the lpfc_mbuf_safety_pool if * it is below its max_count, frees the mbuf otherwise. * * Notes: Takes phba->hbalock. Can be called with or without other locks held. * * Returns: None **/ void lpfc_mbuf_free(struct lpfc_hba * phba, void *virt, dma_addr_t dma) { unsigned long iflags; spin_lock_irqsave(&phba->hbalock, iflags); __lpfc_mbuf_free(phba, virt, dma); spin_unlock_irqrestore(&phba->hbalock, iflags); return; } /** * lpfc_els_hbq_alloc - Allocate an HBQ buffer * @phba: HBA to allocate HBQ buffer for * * Description: Allocates a DMA-mapped HBQ buffer from the lpfc_hrb_pool PCI * pool along a non-DMA-mapped container for it. * * Notes: Not interrupt-safe. Must be called with no locks held. * * Returns: * pointer to HBQ on success * NULL on failure **/ struct hbq_dmabuf * lpfc_els_hbq_alloc(struct lpfc_hba *phba) { struct hbq_dmabuf *hbqbp; hbqbp = kzalloc(sizeof(struct hbq_dmabuf), GFP_KERNEL); if (!hbqbp) return NULL; hbqbp->dbuf.virt = pci_pool_alloc(phba->lpfc_hbq_pool, GFP_KERNEL, &hbqbp->dbuf.phys); if (!hbqbp->dbuf.virt) { kfree(hbqbp); return NULL; } hbqbp->size = LPFC_BPL_SIZE; return hbqbp; } /** * lpfc_els_hbq_free - Frees an HBQ buffer allocated with lpfc_els_hbq_alloc * @phba: HBA buffer was allocated for * @hbqbp: HBQ container returned by lpfc_els_hbq_alloc * * Description: Frees both the container and the DMA-mapped buffer returned by * lpfc_els_hbq_alloc. * * Notes: Can be called with or without locks held. * * Returns: None **/ void lpfc_els_hbq_free(struct lpfc_hba *phba, struct hbq_dmabuf *hbqbp) { pci_pool_free(phba->lpfc_hbq_pool, hbqbp->dbuf.virt, hbqbp->dbuf.phys); kfree(hbqbp); return; } /** * lpfc_sli4_rb_alloc - Allocate an SLI4 Receive buffer * @phba: HBA to allocate a receive buffer for * * Description: Allocates a DMA-mapped receive buffer from the lpfc_hrb_pool PCI * pool along a non-DMA-mapped container for it. * * Notes: Not interrupt-safe. Must be called with no locks held. * * Returns: * pointer to HBQ on success * NULL on failure **/ struct hbq_dmabuf * lpfc_sli4_rb_alloc(struct lpfc_hba *phba) { struct hbq_dmabuf *dma_buf; dma_buf = kzalloc(sizeof(struct hbq_dmabuf), GFP_KERNEL); if (!dma_buf) return NULL; dma_buf->hbuf.virt = pci_pool_alloc(phba->lpfc_hrb_pool, GFP_KERNEL, &dma_buf->hbuf.phys); if (!dma_buf->hbuf.virt) { kfree(dma_buf); return NULL; } dma_buf->dbuf.virt = pci_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL, &dma_buf->dbuf.phys); if (!dma_buf->dbuf.virt) { pci_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt, dma_buf->hbuf.phys); kfree(dma_buf); return NULL; } dma_buf->size = LPFC_BPL_SIZE; return dma_buf; } /** * lpfc_sli4_rb_free - Frees a receive buffer * @phba: HBA buffer was allocated for * @dmab: DMA Buffer container returned by lpfc_sli4_hbq_alloc * * Description: Frees both the container and the DMA-mapped buffers returned by * lpfc_sli4_rb_alloc. * * Notes: Can be called with or without locks held. * * Returns: None **/ void lpfc_sli4_rb_free(struct lpfc_hba *phba, struct hbq_dmabuf *dmab) { pci_pool_free(phba->lpfc_hrb_pool, dmab->hbuf.virt, dmab->hbuf.phys); pci_pool_free(phba->lpfc_drb_pool, dmab->dbuf.virt, dmab->dbuf.phys); kfree(dmab); return; } /** * lpfc_in_buf_free - Free a DMA buffer * @phba: HBA buffer is associated with * @mp: Buffer to free * * Description: Frees the given DMA buffer in the appropriate way given if the * HBA is running in SLI3 mode with HBQs enabled. * * Notes: Takes phba->hbalock. Can be called with or without other locks held. * * Returns: None **/ void lpfc_in_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp) { struct hbq_dmabuf *hbq_entry; unsigned long flags; if (!mp) return; if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { /* Check whether HBQ is still in use */ spin_lock_irqsave(&phba->hbalock, flags); if (!phba->hbq_in_use) { spin_unlock_irqrestore(&phba->hbalock, flags); return; } hbq_entry = container_of(mp, struct hbq_dmabuf, dbuf); list_del(&hbq_entry->dbuf.list); if (hbq_entry->tag == -1) { (phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer) (phba, hbq_entry); } else { lpfc_sli_free_hbq(phba, hbq_entry); } spin_unlock_irqrestore(&phba->hbalock, flags); } else { lpfc_mbuf_free(phba, mp->virt, mp->phys); kfree(mp); } return; }
gpl-2.0
chijure/android_kernel_lge_vee1
fs/ocfs2/dlm/dlmdomain.c
5035
62020
/* -*- mode: c; c-basic-offset: 8; -*- * vim: noexpandtab sw=8 ts=8 sts=0: * * dlmdomain.c * * defines domain join / leave apis * * Copyright (C) 2004 Oracle. All rights reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this program; if not, write to the * Free Software Foundation, Inc., 59 Temple Place - Suite 330, * Boston, MA 021110-1307, USA. * */ #include <linux/module.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/highmem.h> #include <linux/init.h> #include <linux/spinlock.h> #include <linux/delay.h> #include <linux/err.h> #include <linux/debugfs.h> #include "cluster/heartbeat.h" #include "cluster/nodemanager.h" #include "cluster/tcp.h" #include "dlmapi.h" #include "dlmcommon.h" #include "dlmdomain.h" #include "dlmdebug.h" #include "dlmver.h" #define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_DOMAIN) #include "cluster/masklog.h" /* * ocfs2 node maps are array of long int, which limits to send them freely * across the wire due to endianness issues. To workaround this, we convert * long ints to byte arrays. Following 3 routines are helper functions to * set/test/copy bits within those array of bytes */ static inline void byte_set_bit(u8 nr, u8 map[]) { map[nr >> 3] |= (1UL << (nr & 7)); } static inline int byte_test_bit(u8 nr, u8 map[]) { return ((1UL << (nr & 7)) & (map[nr >> 3])) != 0; } static inline void byte_copymap(u8 dmap[], unsigned long smap[], unsigned int sz) { unsigned int nn; if (!sz) return; memset(dmap, 0, ((sz + 7) >> 3)); for (nn = 0 ; nn < sz; nn++) if (test_bit(nn, smap)) byte_set_bit(nn, dmap); } static void dlm_free_pagevec(void **vec, int pages) { while (pages--) free_page((unsigned long)vec[pages]); kfree(vec); } static void **dlm_alloc_pagevec(int pages) { void **vec = kmalloc(pages * sizeof(void *), GFP_KERNEL); int i; if (!vec) return NULL; for (i = 0; i < pages; i++) if (!(vec[i] = (void *)__get_free_page(GFP_KERNEL))) goto out_free; mlog(0, "Allocated DLM hash pagevec; %d pages (%lu expected), %lu buckets per page\n", pages, (unsigned long)DLM_HASH_PAGES, (unsigned long)DLM_BUCKETS_PER_PAGE); return vec; out_free: dlm_free_pagevec(vec, i); return NULL; } /* * * spinlock lock ordering: if multiple locks are needed, obey this ordering: * dlm_domain_lock * struct dlm_ctxt->spinlock * struct dlm_lock_resource->spinlock * struct dlm_ctxt->master_lock * struct dlm_ctxt->ast_lock * dlm_master_list_entry->spinlock * dlm_lock->spinlock * */ DEFINE_SPINLOCK(dlm_domain_lock); LIST_HEAD(dlm_domains); static DECLARE_WAIT_QUEUE_HEAD(dlm_domain_events); /* * The supported protocol version for DLM communication. Running domains * will have a negotiated version with the same major number and a minor * number equal or smaller. The dlm_ctxt->dlm_locking_proto field should * be used to determine what a running domain is actually using. * * New in version 1.1: * - Message DLM_QUERY_REGION added to support global heartbeat * - Message DLM_QUERY_NODEINFO added to allow online node removes * New in version 1.2: * - Message DLM_BEGIN_EXIT_DOMAIN_MSG added to mark start of exit domain */ static const struct dlm_protocol_version dlm_protocol = { .pv_major = 1, .pv_minor = 2, }; #define DLM_DOMAIN_BACKOFF_MS 200 static int dlm_query_join_handler(struct o2net_msg *msg, u32 len, void *data, void **ret_data); static int dlm_assert_joined_handler(struct o2net_msg *msg, u32 len, void *data, void **ret_data); static int dlm_cancel_join_handler(struct o2net_msg *msg, u32 len, void *data, void **ret_data); static int dlm_query_region_handler(struct o2net_msg *msg, u32 len, void *data, void **ret_data); static int dlm_exit_domain_handler(struct o2net_msg *msg, u32 len, void *data, void **ret_data); static int dlm_protocol_compare(struct dlm_protocol_version *existing, struct dlm_protocol_version *request); static void dlm_unregister_domain_handlers(struct dlm_ctxt *dlm); void __dlm_unhash_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res) { if (hlist_unhashed(&res->hash_node)) return; mlog(0, "%s: Unhash res %.*s\n", dlm->name, res->lockname.len, res->lockname.name); hlist_del_init(&res->hash_node); dlm_lockres_put(res); } void __dlm_insert_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res) { struct hlist_head *bucket; struct qstr *q; assert_spin_locked(&dlm->spinlock); q = &res->lockname; bucket = dlm_lockres_hash(dlm, q->hash); /* get a reference for our hashtable */ dlm_lockres_get(res); hlist_add_head(&res->hash_node, bucket); mlog(0, "%s: Hash res %.*s\n", dlm->name, res->lockname.len, res->lockname.name); } struct dlm_lock_resource * __dlm_lookup_lockres_full(struct dlm_ctxt *dlm, const char *name, unsigned int len, unsigned int hash) { struct hlist_head *bucket; struct hlist_node *list; mlog(0, "%.*s\n", len, name); assert_spin_locked(&dlm->spinlock); bucket = dlm_lockres_hash(dlm, hash); hlist_for_each(list, bucket) { struct dlm_lock_resource *res = hlist_entry(list, struct dlm_lock_resource, hash_node); if (res->lockname.name[0] != name[0]) continue; if (unlikely(res->lockname.len != len)) continue; if (memcmp(res->lockname.name + 1, name + 1, len - 1)) continue; dlm_lockres_get(res); return res; } return NULL; } /* intended to be called by functions which do not care about lock * resources which are being purged (most net _handler functions). * this will return NULL for any lock resource which is found but * currently in the process of dropping its mastery reference. * use __dlm_lookup_lockres_full when you need the lock resource * regardless (e.g. dlm_get_lock_resource) */ struct dlm_lock_resource * __dlm_lookup_lockres(struct dlm_ctxt *dlm, const char *name, unsigned int len, unsigned int hash) { struct dlm_lock_resource *res = NULL; mlog(0, "%.*s\n", len, name); assert_spin_locked(&dlm->spinlock); res = __dlm_lookup_lockres_full(dlm, name, len, hash); if (res) { spin_lock(&res->spinlock); if (res->state & DLM_LOCK_RES_DROPPING_REF) { spin_unlock(&res->spinlock); dlm_lockres_put(res); return NULL; } spin_unlock(&res->spinlock); } return res; } struct dlm_lock_resource * dlm_lookup_lockres(struct dlm_ctxt *dlm, const char *name, unsigned int len) { struct dlm_lock_resource *res; unsigned int hash = dlm_lockid_hash(name, len); spin_lock(&dlm->spinlock); res = __dlm_lookup_lockres(dlm, name, len, hash); spin_unlock(&dlm->spinlock); return res; } static struct dlm_ctxt * __dlm_lookup_domain_full(const char *domain, int len) { struct dlm_ctxt *tmp = NULL; struct list_head *iter; assert_spin_locked(&dlm_domain_lock); /* tmp->name here is always NULL terminated, * but domain may not be! */ list_for_each(iter, &dlm_domains) { tmp = list_entry (iter, struct dlm_ctxt, list); if (strlen(tmp->name) == len && memcmp(tmp->name, domain, len)==0) break; tmp = NULL; } return tmp; } /* For null terminated domain strings ONLY */ static struct dlm_ctxt * __dlm_lookup_domain(const char *domain) { assert_spin_locked(&dlm_domain_lock); return __dlm_lookup_domain_full(domain, strlen(domain)); } /* returns true on one of two conditions: * 1) the domain does not exist * 2) the domain exists and it's state is "joined" */ static int dlm_wait_on_domain_helper(const char *domain) { int ret = 0; struct dlm_ctxt *tmp = NULL; spin_lock(&dlm_domain_lock); tmp = __dlm_lookup_domain(domain); if (!tmp) ret = 1; else if (tmp->dlm_state == DLM_CTXT_JOINED) ret = 1; spin_unlock(&dlm_domain_lock); return ret; } static void dlm_free_ctxt_mem(struct dlm_ctxt *dlm) { dlm_destroy_debugfs_subroot(dlm); if (dlm->lockres_hash) dlm_free_pagevec((void **)dlm->lockres_hash, DLM_HASH_PAGES); if (dlm->master_hash) dlm_free_pagevec((void **)dlm->master_hash, DLM_HASH_PAGES); if (dlm->name) kfree(dlm->name); kfree(dlm); } /* A little strange - this function will be called while holding * dlm_domain_lock and is expected to be holding it on the way out. We * will however drop and reacquire it multiple times */ static void dlm_ctxt_release(struct kref *kref) { struct dlm_ctxt *dlm; dlm = container_of(kref, struct dlm_ctxt, dlm_refs); BUG_ON(dlm->num_joins); BUG_ON(dlm->dlm_state == DLM_CTXT_JOINED); /* we may still be in the list if we hit an error during join. */ list_del_init(&dlm->list); spin_unlock(&dlm_domain_lock); mlog(0, "freeing memory from domain %s\n", dlm->name); wake_up(&dlm_domain_events); dlm_free_ctxt_mem(dlm); spin_lock(&dlm_domain_lock); } void dlm_put(struct dlm_ctxt *dlm) { spin_lock(&dlm_domain_lock); kref_put(&dlm->dlm_refs, dlm_ctxt_release); spin_unlock(&dlm_domain_lock); } static void __dlm_get(struct dlm_ctxt *dlm) { kref_get(&dlm->dlm_refs); } /* given a questionable reference to a dlm object, gets a reference if * it can find it in the list, otherwise returns NULL in which case * you shouldn't trust your pointer. */ struct dlm_ctxt *dlm_grab(struct dlm_ctxt *dlm) { struct list_head *iter; struct dlm_ctxt *target = NULL; spin_lock(&dlm_domain_lock); list_for_each(iter, &dlm_domains) { target = list_entry (iter, struct dlm_ctxt, list); if (target == dlm) { __dlm_get(target); break; } target = NULL; } spin_unlock(&dlm_domain_lock); return target; } int dlm_domain_fully_joined(struct dlm_ctxt *dlm) { int ret; spin_lock(&dlm_domain_lock); ret = (dlm->dlm_state == DLM_CTXT_JOINED) || (dlm->dlm_state == DLM_CTXT_IN_SHUTDOWN); spin_unlock(&dlm_domain_lock); return ret; } static void dlm_destroy_dlm_worker(struct dlm_ctxt *dlm) { if (dlm->dlm_worker) { flush_workqueue(dlm->dlm_worker); destroy_workqueue(dlm->dlm_worker); dlm->dlm_worker = NULL; } } static void dlm_complete_dlm_shutdown(struct dlm_ctxt *dlm) { dlm_unregister_domain_handlers(dlm); dlm_debug_shutdown(dlm); dlm_complete_thread(dlm); dlm_complete_recovery_thread(dlm); dlm_destroy_dlm_worker(dlm); /* We've left the domain. Now we can take ourselves out of the * list and allow the kref stuff to help us free the * memory. */ spin_lock(&dlm_domain_lock); list_del_init(&dlm->list); spin_unlock(&dlm_domain_lock); /* Wake up anyone waiting for us to remove this domain */ wake_up(&dlm_domain_events); } static int dlm_migrate_all_locks(struct dlm_ctxt *dlm) { int i, num, n, ret = 0; struct dlm_lock_resource *res; struct hlist_node *iter; struct hlist_head *bucket; int dropped; mlog(0, "Migrating locks from domain %s\n", dlm->name); num = 0; spin_lock(&dlm->spinlock); for (i = 0; i < DLM_HASH_BUCKETS; i++) { redo_bucket: n = 0; bucket = dlm_lockres_hash(dlm, i); iter = bucket->first; while (iter) { n++; res = hlist_entry(iter, struct dlm_lock_resource, hash_node); dlm_lockres_get(res); /* migrate, if necessary. this will drop the dlm * spinlock and retake it if it does migration. */ dropped = dlm_empty_lockres(dlm, res); spin_lock(&res->spinlock); if (dropped) __dlm_lockres_calc_usage(dlm, res); else iter = res->hash_node.next; spin_unlock(&res->spinlock); dlm_lockres_put(res); if (dropped) { cond_resched_lock(&dlm->spinlock); goto redo_bucket; } } cond_resched_lock(&dlm->spinlock); num += n; } spin_unlock(&dlm->spinlock); wake_up(&dlm->dlm_thread_wq); /* let the dlm thread take care of purging, keep scanning until * nothing remains in the hash */ if (num) { mlog(0, "%s: %d lock resources in hash last pass\n", dlm->name, num); ret = -EAGAIN; } mlog(0, "DONE Migrating locks from domain %s\n", dlm->name); return ret; } static int dlm_no_joining_node(struct dlm_ctxt *dlm) { int ret; spin_lock(&dlm->spinlock); ret = dlm->joining_node == DLM_LOCK_RES_OWNER_UNKNOWN; spin_unlock(&dlm->spinlock); return ret; } static int dlm_begin_exit_domain_handler(struct o2net_msg *msg, u32 len, void *data, void **ret_data) { struct dlm_ctxt *dlm = data; unsigned int node; struct dlm_exit_domain *exit_msg = (struct dlm_exit_domain *) msg->buf; if (!dlm_grab(dlm)) return 0; node = exit_msg->node_idx; mlog(0, "%s: Node %u sent a begin exit domain message\n", dlm->name, node); spin_lock(&dlm->spinlock); set_bit(node, dlm->exit_domain_map); spin_unlock(&dlm->spinlock); dlm_put(dlm); return 0; } static void dlm_mark_domain_leaving(struct dlm_ctxt *dlm) { /* Yikes, a double spinlock! I need domain_lock for the dlm * state and the dlm spinlock for join state... Sorry! */ again: spin_lock(&dlm_domain_lock); spin_lock(&dlm->spinlock); if (dlm->joining_node != DLM_LOCK_RES_OWNER_UNKNOWN) { mlog(0, "Node %d is joining, we wait on it.\n", dlm->joining_node); spin_unlock(&dlm->spinlock); spin_unlock(&dlm_domain_lock); wait_event(dlm->dlm_join_events, dlm_no_joining_node(dlm)); goto again; } dlm->dlm_state = DLM_CTXT_LEAVING; spin_unlock(&dlm->spinlock); spin_unlock(&dlm_domain_lock); } static void __dlm_print_nodes(struct dlm_ctxt *dlm) { int node = -1, num = 0; assert_spin_locked(&dlm->spinlock); printk("( "); while ((node = find_next_bit(dlm->domain_map, O2NM_MAX_NODES, node + 1)) < O2NM_MAX_NODES) { printk("%d ", node); ++num; } printk(") %u nodes\n", num); } static int dlm_exit_domain_handler(struct o2net_msg *msg, u32 len, void *data, void **ret_data) { struct dlm_ctxt *dlm = data; unsigned int node; struct dlm_exit_domain *exit_msg = (struct dlm_exit_domain *) msg->buf; mlog(0, "%p %u %p", msg, len, data); if (!dlm_grab(dlm)) return 0; node = exit_msg->node_idx; spin_lock(&dlm->spinlock); clear_bit(node, dlm->domain_map); clear_bit(node, dlm->exit_domain_map); printk(KERN_NOTICE "o2dlm: Node %u leaves domain %s ", node, dlm->name); __dlm_print_nodes(dlm); /* notify anything attached to the heartbeat events */ dlm_hb_event_notify_attached(dlm, node, 0); spin_unlock(&dlm->spinlock); dlm_put(dlm); return 0; } static int dlm_send_one_domain_exit(struct dlm_ctxt *dlm, u32 msg_type, unsigned int node) { int status; struct dlm_exit_domain leave_msg; mlog(0, "%s: Sending domain exit message %u to node %u\n", dlm->name, msg_type, node); memset(&leave_msg, 0, sizeof(leave_msg)); leave_msg.node_idx = dlm->node_num; status = o2net_send_message(msg_type, dlm->key, &leave_msg, sizeof(leave_msg), node, NULL); if (status < 0) mlog(ML_ERROR, "Error %d sending domain exit message %u " "to node %u on domain %s\n", status, msg_type, node, dlm->name); return status; } static void dlm_begin_exit_domain(struct dlm_ctxt *dlm) { int node = -1; /* Support for begin exit domain was added in 1.2 */ if (dlm->dlm_locking_proto.pv_major == 1 && dlm->dlm_locking_proto.pv_minor < 2) return; /* * Unlike DLM_EXIT_DOMAIN_MSG, DLM_BEGIN_EXIT_DOMAIN_MSG is purely * informational. Meaning if a node does not receive the message, * so be it. */ spin_lock(&dlm->spinlock); while (1) { node = find_next_bit(dlm->domain_map, O2NM_MAX_NODES, node + 1); if (node >= O2NM_MAX_NODES) break; if (node == dlm->node_num) continue; spin_unlock(&dlm->spinlock); dlm_send_one_domain_exit(dlm, DLM_BEGIN_EXIT_DOMAIN_MSG, node); spin_lock(&dlm->spinlock); } spin_unlock(&dlm->spinlock); } static void dlm_leave_domain(struct dlm_ctxt *dlm) { int node, clear_node, status; /* At this point we've migrated away all our locks and won't * accept mastership of new ones. The dlm is responsible for * almost nothing now. We make sure not to confuse any joining * nodes and then commence shutdown procedure. */ spin_lock(&dlm->spinlock); /* Clear ourselves from the domain map */ clear_bit(dlm->node_num, dlm->domain_map); while ((node = find_next_bit(dlm->domain_map, O2NM_MAX_NODES, 0)) < O2NM_MAX_NODES) { /* Drop the dlm spinlock. This is safe wrt the domain_map. * -nodes cannot be added now as the * query_join_handlers knows to respond with OK_NO_MAP * -we catch the right network errors if a node is * removed from the map while we're sending him the * exit message. */ spin_unlock(&dlm->spinlock); clear_node = 1; status = dlm_send_one_domain_exit(dlm, DLM_EXIT_DOMAIN_MSG, node); if (status < 0 && status != -ENOPROTOOPT && status != -ENOTCONN) { mlog(ML_NOTICE, "Error %d sending domain exit message " "to node %d\n", status, node); /* Not sure what to do here but lets sleep for * a bit in case this was a transient * error... */ msleep(DLM_DOMAIN_BACKOFF_MS); clear_node = 0; } spin_lock(&dlm->spinlock); /* If we're not clearing the node bit then we intend * to loop back around to try again. */ if (clear_node) clear_bit(node, dlm->domain_map); } spin_unlock(&dlm->spinlock); } int dlm_joined(struct dlm_ctxt *dlm) { int ret = 0; spin_lock(&dlm_domain_lock); if (dlm->dlm_state == DLM_CTXT_JOINED) ret = 1; spin_unlock(&dlm_domain_lock); return ret; } int dlm_shutting_down(struct dlm_ctxt *dlm) { int ret = 0; spin_lock(&dlm_domain_lock); if (dlm->dlm_state == DLM_CTXT_IN_SHUTDOWN) ret = 1; spin_unlock(&dlm_domain_lock); return ret; } void dlm_unregister_domain(struct dlm_ctxt *dlm) { int leave = 0; struct dlm_lock_resource *res; spin_lock(&dlm_domain_lock); BUG_ON(dlm->dlm_state != DLM_CTXT_JOINED); BUG_ON(!dlm->num_joins); dlm->num_joins--; if (!dlm->num_joins) { /* We mark it "in shutdown" now so new register * requests wait until we've completely left the * domain. Don't use DLM_CTXT_LEAVING yet as we still * want new domain joins to communicate with us at * least until we've completed migration of our * resources. */ dlm->dlm_state = DLM_CTXT_IN_SHUTDOWN; leave = 1; } spin_unlock(&dlm_domain_lock); if (leave) { mlog(0, "shutting down domain %s\n", dlm->name); dlm_begin_exit_domain(dlm); /* We changed dlm state, notify the thread */ dlm_kick_thread(dlm, NULL); while (dlm_migrate_all_locks(dlm)) { /* Give dlm_thread time to purge the lockres' */ msleep(500); mlog(0, "%s: more migration to do\n", dlm->name); } /* This list should be empty. If not, print remaining lockres */ if (!list_empty(&dlm->tracking_list)) { mlog(ML_ERROR, "Following lockres' are still on the " "tracking list:\n"); list_for_each_entry(res, &dlm->tracking_list, tracking) dlm_print_one_lock_resource(res); } dlm_mark_domain_leaving(dlm); dlm_leave_domain(dlm); printk(KERN_NOTICE "o2dlm: Leaving domain %s\n", dlm->name); dlm_force_free_mles(dlm); dlm_complete_dlm_shutdown(dlm); } dlm_put(dlm); } EXPORT_SYMBOL_GPL(dlm_unregister_domain); static int dlm_query_join_proto_check(char *proto_type, int node, struct dlm_protocol_version *ours, struct dlm_protocol_version *request) { int rc; struct dlm_protocol_version proto = *request; if (!dlm_protocol_compare(ours, &proto)) { mlog(0, "node %u wanted to join with %s locking protocol " "%u.%u, we respond with %u.%u\n", node, proto_type, request->pv_major, request->pv_minor, proto.pv_major, proto.pv_minor); request->pv_minor = proto.pv_minor; rc = 0; } else { mlog(ML_NOTICE, "Node %u wanted to join with %s locking " "protocol %u.%u, but we have %u.%u, disallowing\n", node, proto_type, request->pv_major, request->pv_minor, ours->pv_major, ours->pv_minor); rc = 1; } return rc; } /* * struct dlm_query_join_packet is made up of four one-byte fields. They * are effectively in big-endian order already. However, little-endian * machines swap them before putting the packet on the wire (because * query_join's response is a status, and that status is treated as a u32 * on the wire). Thus, a big-endian and little-endian machines will treat * this structure differently. * * The solution is to have little-endian machines swap the structure when * converting from the structure to the u32 representation. This will * result in the structure having the correct format on the wire no matter * the host endian format. */ static void dlm_query_join_packet_to_wire(struct dlm_query_join_packet *packet, u32 *wire) { union dlm_query_join_response response; response.packet = *packet; *wire = cpu_to_be32(response.intval); } static void dlm_query_join_wire_to_packet(u32 wire, struct dlm_query_join_packet *packet) { union dlm_query_join_response response; response.intval = cpu_to_be32(wire); *packet = response.packet; } static int dlm_query_join_handler(struct o2net_msg *msg, u32 len, void *data, void **ret_data) { struct dlm_query_join_request *query; struct dlm_query_join_packet packet = { .code = JOIN_DISALLOW, }; struct dlm_ctxt *dlm = NULL; u32 response; u8 nodenum; query = (struct dlm_query_join_request *) msg->buf; mlog(0, "node %u wants to join domain %s\n", query->node_idx, query->domain); /* * If heartbeat doesn't consider the node live, tell it * to back off and try again. This gives heartbeat a chance * to catch up. */ if (!o2hb_check_node_heartbeating(query->node_idx)) { mlog(0, "node %u is not in our live map yet\n", query->node_idx); packet.code = JOIN_DISALLOW; goto respond; } packet.code = JOIN_OK_NO_MAP; spin_lock(&dlm_domain_lock); dlm = __dlm_lookup_domain_full(query->domain, query->name_len); if (!dlm) goto unlock_respond; /* * There is a small window where the joining node may not see the * node(s) that just left but still part of the cluster. DISALLOW * join request if joining node has different node map. */ nodenum=0; while (nodenum < O2NM_MAX_NODES) { if (test_bit(nodenum, dlm->domain_map)) { if (!byte_test_bit(nodenum, query->node_map)) { mlog(0, "disallow join as node %u does not " "have node %u in its nodemap\n", query->node_idx, nodenum); packet.code = JOIN_DISALLOW; goto unlock_respond; } } nodenum++; } /* Once the dlm ctxt is marked as leaving then we don't want * to be put in someone's domain map. * Also, explicitly disallow joining at certain troublesome * times (ie. during recovery). */ if (dlm && dlm->dlm_state != DLM_CTXT_LEAVING) { int bit = query->node_idx; spin_lock(&dlm->spinlock); if (dlm->dlm_state == DLM_CTXT_NEW && dlm->joining_node == DLM_LOCK_RES_OWNER_UNKNOWN) { /*If this is a brand new context and we * haven't started our join process yet, then * the other node won the race. */ packet.code = JOIN_OK_NO_MAP; } else if (dlm->joining_node != DLM_LOCK_RES_OWNER_UNKNOWN) { /* Disallow parallel joins. */ packet.code = JOIN_DISALLOW; } else if (dlm->reco.state & DLM_RECO_STATE_ACTIVE) { mlog(0, "node %u trying to join, but recovery " "is ongoing.\n", bit); packet.code = JOIN_DISALLOW; } else if (test_bit(bit, dlm->recovery_map)) { mlog(0, "node %u trying to join, but it " "still needs recovery.\n", bit); packet.code = JOIN_DISALLOW; } else if (test_bit(bit, dlm->domain_map)) { mlog(0, "node %u trying to join, but it " "is still in the domain! needs recovery?\n", bit); packet.code = JOIN_DISALLOW; } else { /* Alright we're fully a part of this domain * so we keep some state as to who's joining * and indicate to him that needs to be fixed * up. */ /* Make sure we speak compatible locking protocols. */ if (dlm_query_join_proto_check("DLM", bit, &dlm->dlm_locking_proto, &query->dlm_proto)) { packet.code = JOIN_PROTOCOL_MISMATCH; } else if (dlm_query_join_proto_check("fs", bit, &dlm->fs_locking_proto, &query->fs_proto)) { packet.code = JOIN_PROTOCOL_MISMATCH; } else { packet.dlm_minor = query->dlm_proto.pv_minor; packet.fs_minor = query->fs_proto.pv_minor; packet.code = JOIN_OK; __dlm_set_joining_node(dlm, query->node_idx); } } spin_unlock(&dlm->spinlock); } unlock_respond: spin_unlock(&dlm_domain_lock); respond: mlog(0, "We respond with %u\n", packet.code); dlm_query_join_packet_to_wire(&packet, &response); return response; } static int dlm_assert_joined_handler(struct o2net_msg *msg, u32 len, void *data, void **ret_data) { struct dlm_assert_joined *assert; struct dlm_ctxt *dlm = NULL; assert = (struct dlm_assert_joined *) msg->buf; mlog(0, "node %u asserts join on domain %s\n", assert->node_idx, assert->domain); spin_lock(&dlm_domain_lock); dlm = __dlm_lookup_domain_full(assert->domain, assert->name_len); /* XXX should we consider no dlm ctxt an error? */ if (dlm) { spin_lock(&dlm->spinlock); /* Alright, this node has officially joined our * domain. Set him in the map and clean up our * leftover join state. */ BUG_ON(dlm->joining_node != assert->node_idx); set_bit(assert->node_idx, dlm->domain_map); clear_bit(assert->node_idx, dlm->exit_domain_map); __dlm_set_joining_node(dlm, DLM_LOCK_RES_OWNER_UNKNOWN); printk(KERN_NOTICE "o2dlm: Node %u joins domain %s ", assert->node_idx, dlm->name); __dlm_print_nodes(dlm); /* notify anything attached to the heartbeat events */ dlm_hb_event_notify_attached(dlm, assert->node_idx, 1); spin_unlock(&dlm->spinlock); } spin_unlock(&dlm_domain_lock); return 0; } static int dlm_match_regions(struct dlm_ctxt *dlm, struct dlm_query_region *qr, char *local, int locallen) { char *remote = qr->qr_regions; char *l, *r; int localnr, i, j, foundit; int status = 0; if (!o2hb_global_heartbeat_active()) { if (qr->qr_numregions) { mlog(ML_ERROR, "Domain %s: Joining node %d has global " "heartbeat enabled but local node %d does not\n", qr->qr_domain, qr->qr_node, dlm->node_num); status = -EINVAL; } goto bail; } if (o2hb_global_heartbeat_active() && !qr->qr_numregions) { mlog(ML_ERROR, "Domain %s: Local node %d has global " "heartbeat enabled but joining node %d does not\n", qr->qr_domain, dlm->node_num, qr->qr_node); status = -EINVAL; goto bail; } r = remote; for (i = 0; i < qr->qr_numregions; ++i) { mlog(0, "Region %.*s\n", O2HB_MAX_REGION_NAME_LEN, r); r += O2HB_MAX_REGION_NAME_LEN; } localnr = min(O2NM_MAX_REGIONS, locallen/O2HB_MAX_REGION_NAME_LEN); localnr = o2hb_get_all_regions(local, (u8)localnr); /* compare local regions with remote */ l = local; for (i = 0; i < localnr; ++i) { foundit = 0; r = remote; for (j = 0; j <= qr->qr_numregions; ++j) { if (!memcmp(l, r, O2HB_MAX_REGION_NAME_LEN)) { foundit = 1; break; } r += O2HB_MAX_REGION_NAME_LEN; } if (!foundit) { status = -EINVAL; mlog(ML_ERROR, "Domain %s: Region '%.*s' registered " "in local node %d but not in joining node %d\n", qr->qr_domain, O2HB_MAX_REGION_NAME_LEN, l, dlm->node_num, qr->qr_node); goto bail; } l += O2HB_MAX_REGION_NAME_LEN; } /* compare remote with local regions */ r = remote; for (i = 0; i < qr->qr_numregions; ++i) { foundit = 0; l = local; for (j = 0; j < localnr; ++j) { if (!memcmp(r, l, O2HB_MAX_REGION_NAME_LEN)) { foundit = 1; break; } l += O2HB_MAX_REGION_NAME_LEN; } if (!foundit) { status = -EINVAL; mlog(ML_ERROR, "Domain %s: Region '%.*s' registered " "in joining node %d but not in local node %d\n", qr->qr_domain, O2HB_MAX_REGION_NAME_LEN, r, qr->qr_node, dlm->node_num); goto bail; } r += O2HB_MAX_REGION_NAME_LEN; } bail: return status; } static int dlm_send_regions(struct dlm_ctxt *dlm, unsigned long *node_map) { struct dlm_query_region *qr = NULL; int status, ret = 0, i; char *p; if (find_next_bit(node_map, O2NM_MAX_NODES, 0) >= O2NM_MAX_NODES) goto bail; qr = kzalloc(sizeof(struct dlm_query_region), GFP_KERNEL); if (!qr) { ret = -ENOMEM; mlog_errno(ret); goto bail; } qr->qr_node = dlm->node_num; qr->qr_namelen = strlen(dlm->name); memcpy(qr->qr_domain, dlm->name, qr->qr_namelen); /* if local hb, the numregions will be zero */ if (o2hb_global_heartbeat_active()) qr->qr_numregions = o2hb_get_all_regions(qr->qr_regions, O2NM_MAX_REGIONS); p = qr->qr_regions; for (i = 0; i < qr->qr_numregions; ++i, p += O2HB_MAX_REGION_NAME_LEN) mlog(0, "Region %.*s\n", O2HB_MAX_REGION_NAME_LEN, p); i = -1; while ((i = find_next_bit(node_map, O2NM_MAX_NODES, i + 1)) < O2NM_MAX_NODES) { if (i == dlm->node_num) continue; mlog(0, "Sending regions to node %d\n", i); ret = o2net_send_message(DLM_QUERY_REGION, DLM_MOD_KEY, qr, sizeof(struct dlm_query_region), i, &status); if (ret >= 0) ret = status; if (ret) { mlog(ML_ERROR, "Region mismatch %d, node %d\n", ret, i); break; } } bail: kfree(qr); return ret; } static int dlm_query_region_handler(struct o2net_msg *msg, u32 len, void *data, void **ret_data) { struct dlm_query_region *qr; struct dlm_ctxt *dlm = NULL; char *local = NULL; int status = 0; int locked = 0; qr = (struct dlm_query_region *) msg->buf; mlog(0, "Node %u queries hb regions on domain %s\n", qr->qr_node, qr->qr_domain); /* buffer used in dlm_mast_regions() */ local = kmalloc(sizeof(qr->qr_regions), GFP_KERNEL); if (!local) { status = -ENOMEM; goto bail; } status = -EINVAL; spin_lock(&dlm_domain_lock); dlm = __dlm_lookup_domain_full(qr->qr_domain, qr->qr_namelen); if (!dlm) { mlog(ML_ERROR, "Node %d queried hb regions on domain %s " "before join domain\n", qr->qr_node, qr->qr_domain); goto bail; } spin_lock(&dlm->spinlock); locked = 1; if (dlm->joining_node != qr->qr_node) { mlog(ML_ERROR, "Node %d queried hb regions on domain %s " "but joining node is %d\n", qr->qr_node, qr->qr_domain, dlm->joining_node); goto bail; } /* Support for global heartbeat was added in 1.1 */ if (dlm->dlm_locking_proto.pv_major == 1 && dlm->dlm_locking_proto.pv_minor == 0) { mlog(ML_ERROR, "Node %d queried hb regions on domain %s " "but active dlm protocol is %d.%d\n", qr->qr_node, qr->qr_domain, dlm->dlm_locking_proto.pv_major, dlm->dlm_locking_proto.pv_minor); goto bail; } status = dlm_match_regions(dlm, qr, local, sizeof(qr->qr_regions)); bail: if (locked) spin_unlock(&dlm->spinlock); spin_unlock(&dlm_domain_lock); kfree(local); return status; } static int dlm_match_nodes(struct dlm_ctxt *dlm, struct dlm_query_nodeinfo *qn) { struct o2nm_node *local; struct dlm_node_info *remote; int i, j; int status = 0; for (j = 0; j < qn->qn_numnodes; ++j) mlog(0, "Node %3d, %pI4:%u\n", qn->qn_nodes[j].ni_nodenum, &(qn->qn_nodes[j].ni_ipv4_address), ntohs(qn->qn_nodes[j].ni_ipv4_port)); for (i = 0; i < O2NM_MAX_NODES && !status; ++i) { local = o2nm_get_node_by_num(i); remote = NULL; for (j = 0; j < qn->qn_numnodes; ++j) { if (qn->qn_nodes[j].ni_nodenum == i) { remote = &(qn->qn_nodes[j]); break; } } if (!local && !remote) continue; if ((local && !remote) || (!local && remote)) status = -EINVAL; if (!status && ((remote->ni_nodenum != local->nd_num) || (remote->ni_ipv4_port != local->nd_ipv4_port) || (remote->ni_ipv4_address != local->nd_ipv4_address))) status = -EINVAL; if (status) { if (remote && !local) mlog(ML_ERROR, "Domain %s: Node %d (%pI4:%u) " "registered in joining node %d but not in " "local node %d\n", qn->qn_domain, remote->ni_nodenum, &(remote->ni_ipv4_address), ntohs(remote->ni_ipv4_port), qn->qn_nodenum, dlm->node_num); if (local && !remote) mlog(ML_ERROR, "Domain %s: Node %d (%pI4:%u) " "registered in local node %d but not in " "joining node %d\n", qn->qn_domain, local->nd_num, &(local->nd_ipv4_address), ntohs(local->nd_ipv4_port), dlm->node_num, qn->qn_nodenum); BUG_ON((!local && !remote)); } if (local) o2nm_node_put(local); } return status; } static int dlm_send_nodeinfo(struct dlm_ctxt *dlm, unsigned long *node_map) { struct dlm_query_nodeinfo *qn = NULL; struct o2nm_node *node; int ret = 0, status, count, i; if (find_next_bit(node_map, O2NM_MAX_NODES, 0) >= O2NM_MAX_NODES) goto bail; qn = kzalloc(sizeof(struct dlm_query_nodeinfo), GFP_KERNEL); if (!qn) { ret = -ENOMEM; mlog_errno(ret); goto bail; } for (i = 0, count = 0; i < O2NM_MAX_NODES; ++i) { node = o2nm_get_node_by_num(i); if (!node) continue; qn->qn_nodes[count].ni_nodenum = node->nd_num; qn->qn_nodes[count].ni_ipv4_port = node->nd_ipv4_port; qn->qn_nodes[count].ni_ipv4_address = node->nd_ipv4_address; mlog(0, "Node %3d, %pI4:%u\n", node->nd_num, &(node->nd_ipv4_address), ntohs(node->nd_ipv4_port)); ++count; o2nm_node_put(node); } qn->qn_nodenum = dlm->node_num; qn->qn_numnodes = count; qn->qn_namelen = strlen(dlm->name); memcpy(qn->qn_domain, dlm->name, qn->qn_namelen); i = -1; while ((i = find_next_bit(node_map, O2NM_MAX_NODES, i + 1)) < O2NM_MAX_NODES) { if (i == dlm->node_num) continue; mlog(0, "Sending nodeinfo to node %d\n", i); ret = o2net_send_message(DLM_QUERY_NODEINFO, DLM_MOD_KEY, qn, sizeof(struct dlm_query_nodeinfo), i, &status); if (ret >= 0) ret = status; if (ret) { mlog(ML_ERROR, "node mismatch %d, node %d\n", ret, i); break; } } bail: kfree(qn); return ret; } static int dlm_query_nodeinfo_handler(struct o2net_msg *msg, u32 len, void *data, void **ret_data) { struct dlm_query_nodeinfo *qn; struct dlm_ctxt *dlm = NULL; int locked = 0, status = -EINVAL; qn = (struct dlm_query_nodeinfo *) msg->buf; mlog(0, "Node %u queries nodes on domain %s\n", qn->qn_nodenum, qn->qn_domain); spin_lock(&dlm_domain_lock); dlm = __dlm_lookup_domain_full(qn->qn_domain, qn->qn_namelen); if (!dlm) { mlog(ML_ERROR, "Node %d queried nodes on domain %s before " "join domain\n", qn->qn_nodenum, qn->qn_domain); goto bail; } spin_lock(&dlm->spinlock); locked = 1; if (dlm->joining_node != qn->qn_nodenum) { mlog(ML_ERROR, "Node %d queried nodes on domain %s but " "joining node is %d\n", qn->qn_nodenum, qn->qn_domain, dlm->joining_node); goto bail; } /* Support for node query was added in 1.1 */ if (dlm->dlm_locking_proto.pv_major == 1 && dlm->dlm_locking_proto.pv_minor == 0) { mlog(ML_ERROR, "Node %d queried nodes on domain %s " "but active dlm protocol is %d.%d\n", qn->qn_nodenum, qn->qn_domain, dlm->dlm_locking_proto.pv_major, dlm->dlm_locking_proto.pv_minor); goto bail; } status = dlm_match_nodes(dlm, qn); bail: if (locked) spin_unlock(&dlm->spinlock); spin_unlock(&dlm_domain_lock); return status; } static int dlm_cancel_join_handler(struct o2net_msg *msg, u32 len, void *data, void **ret_data) { struct dlm_cancel_join *cancel; struct dlm_ctxt *dlm = NULL; cancel = (struct dlm_cancel_join *) msg->buf; mlog(0, "node %u cancels join on domain %s\n", cancel->node_idx, cancel->domain); spin_lock(&dlm_domain_lock); dlm = __dlm_lookup_domain_full(cancel->domain, cancel->name_len); if (dlm) { spin_lock(&dlm->spinlock); /* Yikes, this guy wants to cancel his join. No * problem, we simply cleanup our join state. */ BUG_ON(dlm->joining_node != cancel->node_idx); __dlm_set_joining_node(dlm, DLM_LOCK_RES_OWNER_UNKNOWN); spin_unlock(&dlm->spinlock); } spin_unlock(&dlm_domain_lock); return 0; } static int dlm_send_one_join_cancel(struct dlm_ctxt *dlm, unsigned int node) { int status; struct dlm_cancel_join cancel_msg; memset(&cancel_msg, 0, sizeof(cancel_msg)); cancel_msg.node_idx = dlm->node_num; cancel_msg.name_len = strlen(dlm->name); memcpy(cancel_msg.domain, dlm->name, cancel_msg.name_len); status = o2net_send_message(DLM_CANCEL_JOIN_MSG, DLM_MOD_KEY, &cancel_msg, sizeof(cancel_msg), node, NULL); if (status < 0) { mlog(ML_ERROR, "Error %d when sending message %u (key 0x%x) to " "node %u\n", status, DLM_CANCEL_JOIN_MSG, DLM_MOD_KEY, node); goto bail; } bail: return status; } /* map_size should be in bytes. */ static int dlm_send_join_cancels(struct dlm_ctxt *dlm, unsigned long *node_map, unsigned int map_size) { int status, tmpstat; unsigned int node; if (map_size != (BITS_TO_LONGS(O2NM_MAX_NODES) * sizeof(unsigned long))) { mlog(ML_ERROR, "map_size %u != BITS_TO_LONGS(O2NM_MAX_NODES) %u\n", map_size, (unsigned)BITS_TO_LONGS(O2NM_MAX_NODES)); return -EINVAL; } status = 0; node = -1; while ((node = find_next_bit(node_map, O2NM_MAX_NODES, node + 1)) < O2NM_MAX_NODES) { if (node == dlm->node_num) continue; tmpstat = dlm_send_one_join_cancel(dlm, node); if (tmpstat) { mlog(ML_ERROR, "Error return %d cancelling join on " "node %d\n", tmpstat, node); if (!status) status = tmpstat; } } if (status) mlog_errno(status); return status; } static int dlm_request_join(struct dlm_ctxt *dlm, int node, enum dlm_query_join_response_code *response) { int status; struct dlm_query_join_request join_msg; struct dlm_query_join_packet packet; u32 join_resp; mlog(0, "querying node %d\n", node); memset(&join_msg, 0, sizeof(join_msg)); join_msg.node_idx = dlm->node_num; join_msg.name_len = strlen(dlm->name); memcpy(join_msg.domain, dlm->name, join_msg.name_len); join_msg.dlm_proto = dlm->dlm_locking_proto; join_msg.fs_proto = dlm->fs_locking_proto; /* copy live node map to join message */ byte_copymap(join_msg.node_map, dlm->live_nodes_map, O2NM_MAX_NODES); status = o2net_send_message(DLM_QUERY_JOIN_MSG, DLM_MOD_KEY, &join_msg, sizeof(join_msg), node, &join_resp); if (status < 0 && status != -ENOPROTOOPT) { mlog(ML_ERROR, "Error %d when sending message %u (key 0x%x) to " "node %u\n", status, DLM_QUERY_JOIN_MSG, DLM_MOD_KEY, node); goto bail; } dlm_query_join_wire_to_packet(join_resp, &packet); /* -ENOPROTOOPT from the net code means the other side isn't listening for our message type -- that's fine, it means his dlm isn't up, so we can consider him a 'yes' but not joined into the domain. */ if (status == -ENOPROTOOPT) { status = 0; *response = JOIN_OK_NO_MAP; } else if (packet.code == JOIN_DISALLOW || packet.code == JOIN_OK_NO_MAP) { *response = packet.code; } else if (packet.code == JOIN_PROTOCOL_MISMATCH) { mlog(ML_NOTICE, "This node requested DLM locking protocol %u.%u and " "filesystem locking protocol %u.%u. At least one of " "the protocol versions on node %d is not compatible, " "disconnecting\n", dlm->dlm_locking_proto.pv_major, dlm->dlm_locking_proto.pv_minor, dlm->fs_locking_proto.pv_major, dlm->fs_locking_proto.pv_minor, node); status = -EPROTO; *response = packet.code; } else if (packet.code == JOIN_OK) { *response = packet.code; /* Use the same locking protocol as the remote node */ dlm->dlm_locking_proto.pv_minor = packet.dlm_minor; dlm->fs_locking_proto.pv_minor = packet.fs_minor; mlog(0, "Node %d responds JOIN_OK with DLM locking protocol " "%u.%u and fs locking protocol %u.%u\n", node, dlm->dlm_locking_proto.pv_major, dlm->dlm_locking_proto.pv_minor, dlm->fs_locking_proto.pv_major, dlm->fs_locking_proto.pv_minor); } else { status = -EINVAL; mlog(ML_ERROR, "invalid response %d from node %u\n", packet.code, node); } mlog(0, "status %d, node %d response is %d\n", status, node, *response); bail: return status; } static int dlm_send_one_join_assert(struct dlm_ctxt *dlm, unsigned int node) { int status; struct dlm_assert_joined assert_msg; mlog(0, "Sending join assert to node %u\n", node); memset(&assert_msg, 0, sizeof(assert_msg)); assert_msg.node_idx = dlm->node_num; assert_msg.name_len = strlen(dlm->name); memcpy(assert_msg.domain, dlm->name, assert_msg.name_len); status = o2net_send_message(DLM_ASSERT_JOINED_MSG, DLM_MOD_KEY, &assert_msg, sizeof(assert_msg), node, NULL); if (status < 0) mlog(ML_ERROR, "Error %d when sending message %u (key 0x%x) to " "node %u\n", status, DLM_ASSERT_JOINED_MSG, DLM_MOD_KEY, node); return status; } static void dlm_send_join_asserts(struct dlm_ctxt *dlm, unsigned long *node_map) { int status, node, live; status = 0; node = -1; while ((node = find_next_bit(node_map, O2NM_MAX_NODES, node + 1)) < O2NM_MAX_NODES) { if (node == dlm->node_num) continue; do { /* It is very important that this message be * received so we spin until either the node * has died or it gets the message. */ status = dlm_send_one_join_assert(dlm, node); spin_lock(&dlm->spinlock); live = test_bit(node, dlm->live_nodes_map); spin_unlock(&dlm->spinlock); if (status) { mlog(ML_ERROR, "Error return %d asserting " "join on node %d\n", status, node); /* give us some time between errors... */ if (live) msleep(DLM_DOMAIN_BACKOFF_MS); } } while (status && live); } } struct domain_join_ctxt { unsigned long live_map[BITS_TO_LONGS(O2NM_MAX_NODES)]; unsigned long yes_resp_map[BITS_TO_LONGS(O2NM_MAX_NODES)]; }; static int dlm_should_restart_join(struct dlm_ctxt *dlm, struct domain_join_ctxt *ctxt, enum dlm_query_join_response_code response) { int ret; if (response == JOIN_DISALLOW) { mlog(0, "Latest response of disallow -- should restart\n"); return 1; } spin_lock(&dlm->spinlock); /* For now, we restart the process if the node maps have * changed at all */ ret = memcmp(ctxt->live_map, dlm->live_nodes_map, sizeof(dlm->live_nodes_map)); spin_unlock(&dlm->spinlock); if (ret) mlog(0, "Node maps changed -- should restart\n"); return ret; } static int dlm_try_to_join_domain(struct dlm_ctxt *dlm) { int status = 0, tmpstat, node; struct domain_join_ctxt *ctxt; enum dlm_query_join_response_code response = JOIN_DISALLOW; mlog(0, "%p", dlm); ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); if (!ctxt) { status = -ENOMEM; mlog_errno(status); goto bail; } /* group sem locking should work for us here -- we're already * registered for heartbeat events so filling this should be * atomic wrt getting those handlers called. */ o2hb_fill_node_map(dlm->live_nodes_map, sizeof(dlm->live_nodes_map)); spin_lock(&dlm->spinlock); memcpy(ctxt->live_map, dlm->live_nodes_map, sizeof(ctxt->live_map)); __dlm_set_joining_node(dlm, dlm->node_num); spin_unlock(&dlm->spinlock); node = -1; while ((node = find_next_bit(ctxt->live_map, O2NM_MAX_NODES, node + 1)) < O2NM_MAX_NODES) { if (node == dlm->node_num) continue; status = dlm_request_join(dlm, node, &response); if (status < 0) { mlog_errno(status); goto bail; } /* Ok, either we got a response or the node doesn't have a * dlm up. */ if (response == JOIN_OK) set_bit(node, ctxt->yes_resp_map); if (dlm_should_restart_join(dlm, ctxt, response)) { status = -EAGAIN; goto bail; } } mlog(0, "Yay, done querying nodes!\n"); /* Yay, everyone agree's we can join the domain. My domain is * comprised of all nodes who were put in the * yes_resp_map. Copy that into our domain map and send a join * assert message to clean up everyone elses state. */ spin_lock(&dlm->spinlock); memcpy(dlm->domain_map, ctxt->yes_resp_map, sizeof(ctxt->yes_resp_map)); set_bit(dlm->node_num, dlm->domain_map); spin_unlock(&dlm->spinlock); /* Support for global heartbeat and node info was added in 1.1 */ if (dlm->dlm_locking_proto.pv_major > 1 || dlm->dlm_locking_proto.pv_minor > 0) { status = dlm_send_nodeinfo(dlm, ctxt->yes_resp_map); if (status) { mlog_errno(status); goto bail; } status = dlm_send_regions(dlm, ctxt->yes_resp_map); if (status) { mlog_errno(status); goto bail; } } dlm_send_join_asserts(dlm, ctxt->yes_resp_map); /* Joined state *must* be set before the joining node * information, otherwise the query_join handler may read no * current joiner but a state of NEW and tell joining nodes * we're not in the domain. */ spin_lock(&dlm_domain_lock); dlm->dlm_state = DLM_CTXT_JOINED; dlm->num_joins++; spin_unlock(&dlm_domain_lock); bail: spin_lock(&dlm->spinlock); __dlm_set_joining_node(dlm, DLM_LOCK_RES_OWNER_UNKNOWN); if (!status) { printk(KERN_NOTICE "o2dlm: Joining domain %s ", dlm->name); __dlm_print_nodes(dlm); } spin_unlock(&dlm->spinlock); if (ctxt) { /* Do we need to send a cancel message to any nodes? */ if (status < 0) { tmpstat = dlm_send_join_cancels(dlm, ctxt->yes_resp_map, sizeof(ctxt->yes_resp_map)); if (tmpstat < 0) mlog_errno(tmpstat); } kfree(ctxt); } mlog(0, "returning %d\n", status); return status; } static void dlm_unregister_domain_handlers(struct dlm_ctxt *dlm) { o2hb_unregister_callback(dlm->name, &dlm->dlm_hb_up); o2hb_unregister_callback(dlm->name, &dlm->dlm_hb_down); o2net_unregister_handler_list(&dlm->dlm_domain_handlers); } static int dlm_register_domain_handlers(struct dlm_ctxt *dlm) { int status; mlog(0, "registering handlers.\n"); o2hb_setup_callback(&dlm->dlm_hb_down, O2HB_NODE_DOWN_CB, dlm_hb_node_down_cb, dlm, DLM_HB_NODE_DOWN_PRI); status = o2hb_register_callback(dlm->name, &dlm->dlm_hb_down); if (status) goto bail; o2hb_setup_callback(&dlm->dlm_hb_up, O2HB_NODE_UP_CB, dlm_hb_node_up_cb, dlm, DLM_HB_NODE_UP_PRI); status = o2hb_register_callback(dlm->name, &dlm->dlm_hb_up); if (status) goto bail; status = o2net_register_handler(DLM_MASTER_REQUEST_MSG, dlm->key, sizeof(struct dlm_master_request), dlm_master_request_handler, dlm, NULL, &dlm->dlm_domain_handlers); if (status) goto bail; status = o2net_register_handler(DLM_ASSERT_MASTER_MSG, dlm->key, sizeof(struct dlm_assert_master), dlm_assert_master_handler, dlm, dlm_assert_master_post_handler, &dlm->dlm_domain_handlers); if (status) goto bail; status = o2net_register_handler(DLM_CREATE_LOCK_MSG, dlm->key, sizeof(struct dlm_create_lock), dlm_create_lock_handler, dlm, NULL, &dlm->dlm_domain_handlers); if (status) goto bail; status = o2net_register_handler(DLM_CONVERT_LOCK_MSG, dlm->key, DLM_CONVERT_LOCK_MAX_LEN, dlm_convert_lock_handler, dlm, NULL, &dlm->dlm_domain_handlers); if (status) goto bail; status = o2net_register_handler(DLM_UNLOCK_LOCK_MSG, dlm->key, DLM_UNLOCK_LOCK_MAX_LEN, dlm_unlock_lock_handler, dlm, NULL, &dlm->dlm_domain_handlers); if (status) goto bail; status = o2net_register_handler(DLM_PROXY_AST_MSG, dlm->key, DLM_PROXY_AST_MAX_LEN, dlm_proxy_ast_handler, dlm, NULL, &dlm->dlm_domain_handlers); if (status) goto bail; status = o2net_register_handler(DLM_EXIT_DOMAIN_MSG, dlm->key, sizeof(struct dlm_exit_domain), dlm_exit_domain_handler, dlm, NULL, &dlm->dlm_domain_handlers); if (status) goto bail; status = o2net_register_handler(DLM_DEREF_LOCKRES_MSG, dlm->key, sizeof(struct dlm_deref_lockres), dlm_deref_lockres_handler, dlm, NULL, &dlm->dlm_domain_handlers); if (status) goto bail; status = o2net_register_handler(DLM_MIGRATE_REQUEST_MSG, dlm->key, sizeof(struct dlm_migrate_request), dlm_migrate_request_handler, dlm, NULL, &dlm->dlm_domain_handlers); if (status) goto bail; status = o2net_register_handler(DLM_MIG_LOCKRES_MSG, dlm->key, DLM_MIG_LOCKRES_MAX_LEN, dlm_mig_lockres_handler, dlm, NULL, &dlm->dlm_domain_handlers); if (status) goto bail; status = o2net_register_handler(DLM_MASTER_REQUERY_MSG, dlm->key, sizeof(struct dlm_master_requery), dlm_master_requery_handler, dlm, NULL, &dlm->dlm_domain_handlers); if (status) goto bail; status = o2net_register_handler(DLM_LOCK_REQUEST_MSG, dlm->key, sizeof(struct dlm_lock_request), dlm_request_all_locks_handler, dlm, NULL, &dlm->dlm_domain_handlers); if (status) goto bail; status = o2net_register_handler(DLM_RECO_DATA_DONE_MSG, dlm->key, sizeof(struct dlm_reco_data_done), dlm_reco_data_done_handler, dlm, NULL, &dlm->dlm_domain_handlers); if (status) goto bail; status = o2net_register_handler(DLM_BEGIN_RECO_MSG, dlm->key, sizeof(struct dlm_begin_reco), dlm_begin_reco_handler, dlm, NULL, &dlm->dlm_domain_handlers); if (status) goto bail; status = o2net_register_handler(DLM_FINALIZE_RECO_MSG, dlm->key, sizeof(struct dlm_finalize_reco), dlm_finalize_reco_handler, dlm, NULL, &dlm->dlm_domain_handlers); if (status) goto bail; status = o2net_register_handler(DLM_BEGIN_EXIT_DOMAIN_MSG, dlm->key, sizeof(struct dlm_exit_domain), dlm_begin_exit_domain_handler, dlm, NULL, &dlm->dlm_domain_handlers); if (status) goto bail; bail: if (status) dlm_unregister_domain_handlers(dlm); return status; } static int dlm_join_domain(struct dlm_ctxt *dlm) { int status; unsigned int backoff; unsigned int total_backoff = 0; BUG_ON(!dlm); mlog(0, "Join domain %s\n", dlm->name); status = dlm_register_domain_handlers(dlm); if (status) { mlog_errno(status); goto bail; } status = dlm_debug_init(dlm); if (status < 0) { mlog_errno(status); goto bail; } status = dlm_launch_thread(dlm); if (status < 0) { mlog_errno(status); goto bail; } status = dlm_launch_recovery_thread(dlm); if (status < 0) { mlog_errno(status); goto bail; } dlm->dlm_worker = create_singlethread_workqueue("dlm_wq"); if (!dlm->dlm_worker) { status = -ENOMEM; mlog_errno(status); goto bail; } do { status = dlm_try_to_join_domain(dlm); /* If we're racing another node to the join, then we * need to back off temporarily and let them * complete. */ #define DLM_JOIN_TIMEOUT_MSECS 90000 if (status == -EAGAIN) { if (signal_pending(current)) { status = -ERESTARTSYS; goto bail; } if (total_backoff > msecs_to_jiffies(DLM_JOIN_TIMEOUT_MSECS)) { status = -ERESTARTSYS; mlog(ML_NOTICE, "Timed out joining dlm domain " "%s after %u msecs\n", dlm->name, jiffies_to_msecs(total_backoff)); goto bail; } /* * <chip> After you! * <dale> No, after you! * <chip> I insist! * <dale> But you first! * ... */ backoff = (unsigned int)(jiffies & 0x3); backoff *= DLM_DOMAIN_BACKOFF_MS; total_backoff += backoff; mlog(0, "backoff %d\n", backoff); msleep(backoff); } } while (status == -EAGAIN); if (status < 0) { mlog_errno(status); goto bail; } status = 0; bail: wake_up(&dlm_domain_events); if (status) { dlm_unregister_domain_handlers(dlm); dlm_debug_shutdown(dlm); dlm_complete_thread(dlm); dlm_complete_recovery_thread(dlm); dlm_destroy_dlm_worker(dlm); } return status; } static struct dlm_ctxt *dlm_alloc_ctxt(const char *domain, u32 key) { int i; int ret; struct dlm_ctxt *dlm = NULL; dlm = kzalloc(sizeof(*dlm), GFP_KERNEL); if (!dlm) { mlog_errno(-ENOMEM); goto leave; } dlm->name = kstrdup(domain, GFP_KERNEL); if (dlm->name == NULL) { mlog_errno(-ENOMEM); kfree(dlm); dlm = NULL; goto leave; } dlm->lockres_hash = (struct hlist_head **)dlm_alloc_pagevec(DLM_HASH_PAGES); if (!dlm->lockres_hash) { mlog_errno(-ENOMEM); kfree(dlm->name); kfree(dlm); dlm = NULL; goto leave; } for (i = 0; i < DLM_HASH_BUCKETS; i++) INIT_HLIST_HEAD(dlm_lockres_hash(dlm, i)); dlm->master_hash = (struct hlist_head **) dlm_alloc_pagevec(DLM_HASH_PAGES); if (!dlm->master_hash) { mlog_errno(-ENOMEM); dlm_free_pagevec((void **)dlm->lockres_hash, DLM_HASH_PAGES); kfree(dlm->name); kfree(dlm); dlm = NULL; goto leave; } for (i = 0; i < DLM_HASH_BUCKETS; i++) INIT_HLIST_HEAD(dlm_master_hash(dlm, i)); dlm->key = key; dlm->node_num = o2nm_this_node(); ret = dlm_create_debugfs_subroot(dlm); if (ret < 0) { dlm_free_pagevec((void **)dlm->master_hash, DLM_HASH_PAGES); dlm_free_pagevec((void **)dlm->lockres_hash, DLM_HASH_PAGES); kfree(dlm->name); kfree(dlm); dlm = NULL; goto leave; } spin_lock_init(&dlm->spinlock); spin_lock_init(&dlm->master_lock); spin_lock_init(&dlm->ast_lock); spin_lock_init(&dlm->track_lock); INIT_LIST_HEAD(&dlm->list); INIT_LIST_HEAD(&dlm->dirty_list); INIT_LIST_HEAD(&dlm->reco.resources); INIT_LIST_HEAD(&dlm->reco.received); INIT_LIST_HEAD(&dlm->reco.node_data); INIT_LIST_HEAD(&dlm->purge_list); INIT_LIST_HEAD(&dlm->dlm_domain_handlers); INIT_LIST_HEAD(&dlm->tracking_list); dlm->reco.state = 0; INIT_LIST_HEAD(&dlm->pending_asts); INIT_LIST_HEAD(&dlm->pending_basts); mlog(0, "dlm->recovery_map=%p, &(dlm->recovery_map[0])=%p\n", dlm->recovery_map, &(dlm->recovery_map[0])); memset(dlm->recovery_map, 0, sizeof(dlm->recovery_map)); memset(dlm->live_nodes_map, 0, sizeof(dlm->live_nodes_map)); memset(dlm->domain_map, 0, sizeof(dlm->domain_map)); dlm->dlm_thread_task = NULL; dlm->dlm_reco_thread_task = NULL; dlm->dlm_worker = NULL; init_waitqueue_head(&dlm->dlm_thread_wq); init_waitqueue_head(&dlm->dlm_reco_thread_wq); init_waitqueue_head(&dlm->reco.event); init_waitqueue_head(&dlm->ast_wq); init_waitqueue_head(&dlm->migration_wq); INIT_LIST_HEAD(&dlm->mle_hb_events); dlm->joining_node = DLM_LOCK_RES_OWNER_UNKNOWN; init_waitqueue_head(&dlm->dlm_join_events); dlm->reco.new_master = O2NM_INVALID_NODE_NUM; dlm->reco.dead_node = O2NM_INVALID_NODE_NUM; atomic_set(&dlm->res_tot_count, 0); atomic_set(&dlm->res_cur_count, 0); for (i = 0; i < DLM_MLE_NUM_TYPES; ++i) { atomic_set(&dlm->mle_tot_count[i], 0); atomic_set(&dlm->mle_cur_count[i], 0); } spin_lock_init(&dlm->work_lock); INIT_LIST_HEAD(&dlm->work_list); INIT_WORK(&dlm->dispatched_work, dlm_dispatch_work); kref_init(&dlm->dlm_refs); dlm->dlm_state = DLM_CTXT_NEW; INIT_LIST_HEAD(&dlm->dlm_eviction_callbacks); mlog(0, "context init: refcount %u\n", atomic_read(&dlm->dlm_refs.refcount)); leave: return dlm; } /* * Compare a requested locking protocol version against the current one. * * If the major numbers are different, they are incompatible. * If the current minor is greater than the request, they are incompatible. * If the current minor is less than or equal to the request, they are * compatible, and the requester should run at the current minor version. */ static int dlm_protocol_compare(struct dlm_protocol_version *existing, struct dlm_protocol_version *request) { if (existing->pv_major != request->pv_major) return 1; if (existing->pv_minor > request->pv_minor) return 1; if (existing->pv_minor < request->pv_minor) request->pv_minor = existing->pv_minor; return 0; } /* * dlm_register_domain: one-time setup per "domain". * * The filesystem passes in the requested locking version via proto. * If registration was successful, proto will contain the negotiated * locking protocol. */ struct dlm_ctxt * dlm_register_domain(const char *domain, u32 key, struct dlm_protocol_version *fs_proto) { int ret; struct dlm_ctxt *dlm = NULL; struct dlm_ctxt *new_ctxt = NULL; if (strlen(domain) >= O2NM_MAX_NAME_LEN) { ret = -ENAMETOOLONG; mlog(ML_ERROR, "domain name length too long\n"); goto leave; } mlog(0, "register called for domain \"%s\"\n", domain); retry: dlm = NULL; if (signal_pending(current)) { ret = -ERESTARTSYS; mlog_errno(ret); goto leave; } spin_lock(&dlm_domain_lock); dlm = __dlm_lookup_domain(domain); if (dlm) { if (dlm->dlm_state != DLM_CTXT_JOINED) { spin_unlock(&dlm_domain_lock); mlog(0, "This ctxt is not joined yet!\n"); wait_event_interruptible(dlm_domain_events, dlm_wait_on_domain_helper( domain)); goto retry; } if (dlm_protocol_compare(&dlm->fs_locking_proto, fs_proto)) { spin_unlock(&dlm_domain_lock); mlog(ML_ERROR, "Requested locking protocol version is not " "compatible with already registered domain " "\"%s\"\n", domain); ret = -EPROTO; goto leave; } __dlm_get(dlm); dlm->num_joins++; spin_unlock(&dlm_domain_lock); ret = 0; goto leave; } /* doesn't exist */ if (!new_ctxt) { spin_unlock(&dlm_domain_lock); new_ctxt = dlm_alloc_ctxt(domain, key); if (new_ctxt) goto retry; ret = -ENOMEM; mlog_errno(ret); goto leave; } /* a little variable switch-a-roo here... */ dlm = new_ctxt; new_ctxt = NULL; /* add the new domain */ list_add_tail(&dlm->list, &dlm_domains); spin_unlock(&dlm_domain_lock); /* * Pass the locking protocol version into the join. If the join * succeeds, it will have the negotiated protocol set. */ dlm->dlm_locking_proto = dlm_protocol; dlm->fs_locking_proto = *fs_proto; ret = dlm_join_domain(dlm); if (ret) { mlog_errno(ret); dlm_put(dlm); goto leave; } /* Tell the caller what locking protocol we negotiated */ *fs_proto = dlm->fs_locking_proto; ret = 0; leave: if (new_ctxt) dlm_free_ctxt_mem(new_ctxt); if (ret < 0) dlm = ERR_PTR(ret); return dlm; } EXPORT_SYMBOL_GPL(dlm_register_domain); static LIST_HEAD(dlm_join_handlers); static void dlm_unregister_net_handlers(void) { o2net_unregister_handler_list(&dlm_join_handlers); } static int dlm_register_net_handlers(void) { int status = 0; status = o2net_register_handler(DLM_QUERY_JOIN_MSG, DLM_MOD_KEY, sizeof(struct dlm_query_join_request), dlm_query_join_handler, NULL, NULL, &dlm_join_handlers); if (status) goto bail; status = o2net_register_handler(DLM_ASSERT_JOINED_MSG, DLM_MOD_KEY, sizeof(struct dlm_assert_joined), dlm_assert_joined_handler, NULL, NULL, &dlm_join_handlers); if (status) goto bail; status = o2net_register_handler(DLM_CANCEL_JOIN_MSG, DLM_MOD_KEY, sizeof(struct dlm_cancel_join), dlm_cancel_join_handler, NULL, NULL, &dlm_join_handlers); if (status) goto bail; status = o2net_register_handler(DLM_QUERY_REGION, DLM_MOD_KEY, sizeof(struct dlm_query_region), dlm_query_region_handler, NULL, NULL, &dlm_join_handlers); if (status) goto bail; status = o2net_register_handler(DLM_QUERY_NODEINFO, DLM_MOD_KEY, sizeof(struct dlm_query_nodeinfo), dlm_query_nodeinfo_handler, NULL, NULL, &dlm_join_handlers); bail: if (status < 0) dlm_unregister_net_handlers(); return status; } /* Domain eviction callback handling. * * The file system requires notification of node death *before* the * dlm completes it's recovery work, otherwise it may be able to * acquire locks on resources requiring recovery. Since the dlm can * evict a node from it's domain *before* heartbeat fires, a similar * mechanism is required. */ /* Eviction is not expected to happen often, so a per-domain lock is * not necessary. Eviction callbacks are allowed to sleep for short * periods of time. */ static DECLARE_RWSEM(dlm_callback_sem); void dlm_fire_domain_eviction_callbacks(struct dlm_ctxt *dlm, int node_num) { struct list_head *iter; struct dlm_eviction_cb *cb; down_read(&dlm_callback_sem); list_for_each(iter, &dlm->dlm_eviction_callbacks) { cb = list_entry(iter, struct dlm_eviction_cb, ec_item); cb->ec_func(node_num, cb->ec_data); } up_read(&dlm_callback_sem); } void dlm_setup_eviction_cb(struct dlm_eviction_cb *cb, dlm_eviction_func *f, void *data) { INIT_LIST_HEAD(&cb->ec_item); cb->ec_func = f; cb->ec_data = data; } EXPORT_SYMBOL_GPL(dlm_setup_eviction_cb); void dlm_register_eviction_cb(struct dlm_ctxt *dlm, struct dlm_eviction_cb *cb) { down_write(&dlm_callback_sem); list_add_tail(&cb->ec_item, &dlm->dlm_eviction_callbacks); up_write(&dlm_callback_sem); } EXPORT_SYMBOL_GPL(dlm_register_eviction_cb); void dlm_unregister_eviction_cb(struct dlm_eviction_cb *cb) { down_write(&dlm_callback_sem); list_del_init(&cb->ec_item); up_write(&dlm_callback_sem); } EXPORT_SYMBOL_GPL(dlm_unregister_eviction_cb); static int __init dlm_init(void) { int status; dlm_print_version(); status = dlm_init_mle_cache(); if (status) { mlog(ML_ERROR, "Could not create o2dlm_mle slabcache\n"); goto error; } status = dlm_init_master_caches(); if (status) { mlog(ML_ERROR, "Could not create o2dlm_lockres and " "o2dlm_lockname slabcaches\n"); goto error; } status = dlm_init_lock_cache(); if (status) { mlog(ML_ERROR, "Count not create o2dlm_lock slabcache\n"); goto error; } status = dlm_register_net_handlers(); if (status) { mlog(ML_ERROR, "Unable to register network handlers\n"); goto error; } status = dlm_create_debugfs_root(); if (status) goto error; return 0; error: dlm_unregister_net_handlers(); dlm_destroy_lock_cache(); dlm_destroy_master_caches(); dlm_destroy_mle_cache(); return -1; } static void __exit dlm_exit (void) { dlm_destroy_debugfs_root(); dlm_unregister_net_handlers(); dlm_destroy_lock_cache(); dlm_destroy_master_caches(); dlm_destroy_mle_cache(); } MODULE_AUTHOR("Oracle"); MODULE_LICENSE("GPL"); module_init(dlm_init); module_exit(dlm_exit);
gpl-2.0
TeamOrion-Devices/kernel_htc_msm8974
arch/mn10300/unit-asb2364/irq-fpga.c
10155
2646
/* ASB2364 FPGA interrupt multiplexing * * Copyright (C) 2010 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public Licence * as published by the Free Software Foundation; either version * 2 of the Licence, or (at your option) any later version. */ #include <linux/interrupt.h> #include <linux/init.h> #include <linux/irq.h> #include <unit/fpga-regs.h> /* * FPGA PIC operations */ static void asb2364_fpga_mask(struct irq_data *d) { ASB2364_FPGA_REG_MASK(d->irq - NR_CPU_IRQS) = 0x0001; SyncExBus(); } static void asb2364_fpga_ack(struct irq_data *d) { ASB2364_FPGA_REG_IRQ(d->irq - NR_CPU_IRQS) = 0x0001; SyncExBus(); } static void asb2364_fpga_mask_ack(struct irq_data *d) { ASB2364_FPGA_REG_MASK(d->irq - NR_CPU_IRQS) = 0x0001; SyncExBus(); ASB2364_FPGA_REG_IRQ(d->irq - NR_CPU_IRQS) = 0x0001; SyncExBus(); } static void asb2364_fpga_unmask(struct irq_data *d) { ASB2364_FPGA_REG_MASK(d->irq - NR_CPU_IRQS) = 0x0000; SyncExBus(); } static struct irq_chip asb2364_fpga_pic = { .name = "fpga", .irq_ack = asb2364_fpga_ack, .irq_mask = asb2364_fpga_mask, .irq_mask_ack = asb2364_fpga_mask_ack, .irq_unmask = asb2364_fpga_unmask, }; /* * FPGA PIC interrupt handler */ static irqreturn_t fpga_interrupt(int irq, void *_mask) { if ((ASB2364_FPGA_REG_IRQ_LAN & 0x0001) != 0x0001) generic_handle_irq(FPGA_LAN_IRQ); if ((ASB2364_FPGA_REG_IRQ_UART & 0x0001) != 0x0001) generic_handle_irq(FPGA_UART_IRQ); if ((ASB2364_FPGA_REG_IRQ_I2C & 0x0001) != 0x0001) generic_handle_irq(FPGA_I2C_IRQ); if ((ASB2364_FPGA_REG_IRQ_USB & 0x0001) != 0x0001) generic_handle_irq(FPGA_USB_IRQ); if ((ASB2364_FPGA_REG_IRQ_FPGA & 0x0001) != 0x0001) generic_handle_irq(FPGA_FPGA_IRQ); return IRQ_HANDLED; } /* * Define an interrupt action for each FPGA PIC output */ static struct irqaction fpga_irq[] = { [0] = { .handler = fpga_interrupt, .flags = IRQF_DISABLED | IRQF_SHARED, .name = "fpga", }, }; /* * Initialise the FPGA's PIC */ void __init irq_fpga_init(void) { int irq; ASB2364_FPGA_REG_MASK_LAN = 0x0001; SyncExBus(); ASB2364_FPGA_REG_MASK_UART = 0x0001; SyncExBus(); ASB2364_FPGA_REG_MASK_I2C = 0x0001; SyncExBus(); ASB2364_FPGA_REG_MASK_USB = 0x0001; SyncExBus(); ASB2364_FPGA_REG_MASK_FPGA = 0x0001; SyncExBus(); for (irq = NR_CPU_IRQS; irq < NR_IRQS; irq++) irq_set_chip_and_handler(irq, &asb2364_fpga_pic, handle_level_irq); /* the FPGA drives the XIRQ1 input on the CPU PIC */ setup_irq(XIRQ1, &fpga_irq[0]); }
gpl-2.0
arunkuttiyara/linux
drivers/staging/lustre/lustre/obdclass/obd_config.c
172
47494
/* * GPL HEADER START * * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 only, * as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License version 2 for more details (a copy is included * in the LICENSE file that accompanied this code). * * You should have received a copy of the GNU General Public License * version 2 along with this program; If not, see * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf * * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, * CA 95054 USA or visit www.sun.com if you need additional information or * have any questions. * * GPL HEADER END */ /* * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. * * Copyright (c) 2011, 2012, Intel Corporation. */ /* * This file is part of Lustre, http://www.lustre.org/ * Lustre is a trademark of Sun Microsystems, Inc. * * lustre/obdclass/obd_config.c * * Config API */ #define DEBUG_SUBSYSTEM S_CLASS #include "../include/obd_class.h" #include <linux/string.h> #include "../include/lustre_log.h" #include "../include/lprocfs_status.h" #include "../include/lustre_param.h" #include "llog_internal.h" static cfs_hash_ops_t uuid_hash_ops; static cfs_hash_ops_t nid_hash_ops; /*********** string parsing utils *********/ /* returns 0 if we find this key in the buffer, else 1 */ int class_find_param(char *buf, char *key, char **valp) { char *ptr; if (!buf) return 1; ptr = strstr(buf, key); if (ptr == NULL) return 1; if (valp) *valp = ptr + strlen(key); return 0; } EXPORT_SYMBOL(class_find_param); /** * Check whether the proc parameter \a param is an old parameter or not from * the array \a ptr which contains the mapping from old parameters to new ones. * If it's an old one, then return the pointer to the cfg_interop_param struc- * ture which contains both the old and new parameters. * * \param param proc parameter * \param ptr an array which contains the mapping from * old parameters to new ones * * \retval valid-pointer pointer to the cfg_interop_param structure * which contains the old and new parameters * \retval NULL \a param or \a ptr is NULL, * or \a param is not an old parameter */ struct cfg_interop_param *class_find_old_param(const char *param, struct cfg_interop_param *ptr) { char *value = NULL; int name_len = 0; if (param == NULL || ptr == NULL) return NULL; value = strchr(param, '='); if (value == NULL) name_len = strlen(param); else name_len = value - param; while (ptr->old_param != NULL) { if (strncmp(param, ptr->old_param, name_len) == 0 && name_len == strlen(ptr->old_param)) return ptr; ptr++; } return NULL; } EXPORT_SYMBOL(class_find_old_param); /** * Finds a parameter in \a params and copies it to \a copy. * * Leading spaces are skipped. Next space or end of string is the * parameter terminator with the exception that spaces inside single or double * quotes get included into a parameter. The parameter is copied into \a copy * which has to be allocated big enough by a caller, quotes are stripped in * the copy and the copy is terminated by 0. * * On return \a params is set to next parameter or to NULL if last * parameter is returned. * * \retval 0 if parameter is returned in \a copy * \retval 1 otherwise * \retval -EINVAL if unbalanced quota is found */ int class_get_next_param(char **params, char *copy) { char *q1, *q2, *str; int len; str = *params; while (*str == ' ') str++; if (*str == '\0') { *params = NULL; return 1; } while (1) { q1 = strpbrk(str, " '\""); if (q1 == NULL) { len = strlen(str); memcpy(copy, str, len); copy[len] = '\0'; *params = NULL; return 0; } len = q1 - str; if (*q1 == ' ') { memcpy(copy, str, len); copy[len] = '\0'; *params = str + len; return 0; } memcpy(copy, str, len); copy += len; /* search for the matching closing quote */ str = q1 + 1; q2 = strchr(str, *q1); if (q2 == NULL) { CERROR("Unbalanced quota in parameters: \"%s\"\n", *params); return -EINVAL; } len = q2 - str; memcpy(copy, str, len); copy += len; str = q2 + 1; } return 1; } EXPORT_SYMBOL(class_get_next_param); /* returns 0 if this is the first key in the buffer, else 1. valp points to first char after key. */ int class_match_param(char *buf, char *key, char **valp) { if (!buf) return 1; if (memcmp(buf, key, strlen(key)) != 0) return 1; if (valp) *valp = buf + strlen(key); return 0; } EXPORT_SYMBOL(class_match_param); static int parse_nid(char *buf, void *value, int quiet) { lnet_nid_t *nid = (lnet_nid_t *)value; *nid = libcfs_str2nid(buf); if (*nid != LNET_NID_ANY) return 0; if (!quiet) LCONSOLE_ERROR_MSG(0x159, "Can't parse NID '%s'\n", buf); return -EINVAL; } static int parse_net(char *buf, void *value) { __u32 *net = (__u32 *)value; *net = libcfs_str2net(buf); CDEBUG(D_INFO, "Net %s\n", libcfs_net2str(*net)); return 0; } enum { CLASS_PARSE_NID = 1, CLASS_PARSE_NET, }; /* 0 is good nid, 1 not found < 0 error endh is set to next separator */ static int class_parse_value(char *buf, int opc, void *value, char **endh, int quiet) { char *endp; char tmp; int rc = 0; if (!buf) return 1; while (*buf == ',' || *buf == ':') buf++; if (*buf == ' ' || *buf == '/' || *buf == '\0') return 1; /* nid separators or end of nids */ endp = strpbrk(buf, ",: /"); if (endp == NULL) endp = buf + strlen(buf); tmp = *endp; *endp = '\0'; switch (opc) { default: LBUG(); case CLASS_PARSE_NID: rc = parse_nid(buf, value, quiet); break; case CLASS_PARSE_NET: rc = parse_net(buf, value); break; } *endp = tmp; if (rc != 0) return rc; if (endh) *endh = endp; return 0; } int class_parse_nid(char *buf, lnet_nid_t *nid, char **endh) { return class_parse_value(buf, CLASS_PARSE_NID, (void *)nid, endh, 0); } EXPORT_SYMBOL(class_parse_nid); int class_parse_nid_quiet(char *buf, lnet_nid_t *nid, char **endh) { return class_parse_value(buf, CLASS_PARSE_NID, (void *)nid, endh, 1); } EXPORT_SYMBOL(class_parse_nid_quiet); int class_parse_net(char *buf, __u32 *net, char **endh) { return class_parse_value(buf, CLASS_PARSE_NET, (void *)net, endh, 0); } EXPORT_SYMBOL(class_parse_net); /* 1 param contains key and match * 0 param contains key and not match * -1 param does not contain key */ int class_match_nid(char *buf, char *key, lnet_nid_t nid) { lnet_nid_t tmp; int rc = -1; while (class_find_param(buf, key, &buf) == 0) { /* please restrict to the nids pertaining to * the specified nids */ while (class_parse_nid(buf, &tmp, &buf) == 0) { if (tmp == nid) return 1; } rc = 0; } return rc; } EXPORT_SYMBOL(class_match_nid); int class_match_net(char *buf, char *key, __u32 net) { __u32 tmp; int rc = -1; while (class_find_param(buf, key, &buf) == 0) { /* please restrict to the nids pertaining to * the specified networks */ while (class_parse_net(buf, &tmp, &buf) == 0) { if (tmp == net) return 1; } rc = 0; } return rc; } EXPORT_SYMBOL(class_match_net); /********************** class fns **********************/ /** * Create a new obd device and set the type, name and uuid. If successful, * the new device can be accessed by either name or uuid. */ int class_attach(struct lustre_cfg *lcfg) { struct obd_device *obd = NULL; char *typename, *name, *uuid; int rc, len; if (!LUSTRE_CFG_BUFLEN(lcfg, 1)) { CERROR("No type passed!\n"); return -EINVAL; } typename = lustre_cfg_string(lcfg, 1); if (!LUSTRE_CFG_BUFLEN(lcfg, 0)) { CERROR("No name passed!\n"); return -EINVAL; } name = lustre_cfg_string(lcfg, 0); if (!LUSTRE_CFG_BUFLEN(lcfg, 2)) { CERROR("No UUID passed!\n"); return -EINVAL; } uuid = lustre_cfg_string(lcfg, 2); CDEBUG(D_IOCTL, "attach type %s name: %s uuid: %s\n", MKSTR(typename), MKSTR(name), MKSTR(uuid)); obd = class_newdev(typename, name); if (IS_ERR(obd)) { /* Already exists or out of obds */ rc = PTR_ERR(obd); obd = NULL; CERROR("Cannot create device %s of type %s : %d\n", name, typename, rc); goto out; } LASSERTF(obd != NULL, "Cannot get obd device %s of type %s\n", name, typename); LASSERTF(obd->obd_magic == OBD_DEVICE_MAGIC, "obd %p obd_magic %08X != %08X\n", obd, obd->obd_magic, OBD_DEVICE_MAGIC); LASSERTF(strncmp(obd->obd_name, name, strlen(name)) == 0, "%p obd_name %s != %s\n", obd, obd->obd_name, name); rwlock_init(&obd->obd_pool_lock); obd->obd_pool_limit = 0; obd->obd_pool_slv = 0; INIT_LIST_HEAD(&obd->obd_exports); INIT_LIST_HEAD(&obd->obd_unlinked_exports); INIT_LIST_HEAD(&obd->obd_delayed_exports); INIT_LIST_HEAD(&obd->obd_exports_timed); spin_lock_init(&obd->obd_nid_lock); spin_lock_init(&obd->obd_dev_lock); mutex_init(&obd->obd_dev_mutex); spin_lock_init(&obd->obd_osfs_lock); /* obd->obd_osfs_age must be set to a value in the distant * past to guarantee a fresh statfs is fetched on mount. */ obd->obd_osfs_age = cfs_time_shift_64(-1000); /* XXX belongs in setup not attach */ init_rwsem(&obd->obd_observer_link_sem); /* recovery data */ cfs_init_timer(&obd->obd_recovery_timer); spin_lock_init(&obd->obd_recovery_task_lock); init_waitqueue_head(&obd->obd_next_transno_waitq); init_waitqueue_head(&obd->obd_evict_inprogress_waitq); INIT_LIST_HEAD(&obd->obd_req_replay_queue); INIT_LIST_HEAD(&obd->obd_lock_replay_queue); INIT_LIST_HEAD(&obd->obd_final_req_queue); INIT_LIST_HEAD(&obd->obd_evict_list); llog_group_init(&obd->obd_olg, FID_SEQ_LLOG); obd->obd_conn_inprogress = 0; len = strlen(uuid); if (len >= sizeof(obd->obd_uuid)) { CERROR("uuid must be < %d bytes long\n", (int)sizeof(obd->obd_uuid)); rc = -EINVAL; goto out; } memcpy(obd->obd_uuid.uuid, uuid, len); /* do the attach */ if (OBP(obd, attach)) { rc = OBP(obd, attach)(obd, sizeof(*lcfg), lcfg); if (rc) { rc = -EINVAL; goto out; } } /* Detach drops this */ spin_lock(&obd->obd_dev_lock); atomic_set(&obd->obd_refcount, 1); spin_unlock(&obd->obd_dev_lock); lu_ref_init(&obd->obd_reference); lu_ref_add(&obd->obd_reference, "attach", obd); obd->obd_attached = 1; CDEBUG(D_IOCTL, "OBD: dev %d attached type %s with refcount %d\n", obd->obd_minor, typename, atomic_read(&obd->obd_refcount)); return 0; out: if (obd != NULL) { class_release_dev(obd); } return rc; } EXPORT_SYMBOL(class_attach); /** Create hashes, self-export, and call type-specific setup. * Setup is effectively the "start this obd" call. */ int class_setup(struct obd_device *obd, struct lustre_cfg *lcfg) { int err = 0; struct obd_export *exp; LASSERT(obd != NULL); LASSERTF(obd == class_num2obd(obd->obd_minor), "obd %p != obd_devs[%d] %p\n", obd, obd->obd_minor, class_num2obd(obd->obd_minor)); LASSERTF(obd->obd_magic == OBD_DEVICE_MAGIC, "obd %p obd_magic %08x != %08x\n", obd, obd->obd_magic, OBD_DEVICE_MAGIC); /* have we attached a type to this device? */ if (!obd->obd_attached) { CERROR("Device %d not attached\n", obd->obd_minor); return -ENODEV; } if (obd->obd_set_up) { CERROR("Device %d already setup (type %s)\n", obd->obd_minor, obd->obd_type->typ_name); return -EEXIST; } /* is someone else setting us up right now? (attach inits spinlock) */ spin_lock(&obd->obd_dev_lock); if (obd->obd_starting) { spin_unlock(&obd->obd_dev_lock); CERROR("Device %d setup in progress (type %s)\n", obd->obd_minor, obd->obd_type->typ_name); return -EEXIST; } /* just leave this on forever. I can't use obd_set_up here because other fns check that status, and we're not actually set up yet. */ obd->obd_starting = 1; obd->obd_uuid_hash = NULL; obd->obd_nid_hash = NULL; spin_unlock(&obd->obd_dev_lock); /* create an uuid-export lustre hash */ obd->obd_uuid_hash = cfs_hash_create("UUID_HASH", HASH_UUID_CUR_BITS, HASH_UUID_MAX_BITS, HASH_UUID_BKT_BITS, 0, CFS_HASH_MIN_THETA, CFS_HASH_MAX_THETA, &uuid_hash_ops, CFS_HASH_DEFAULT); if (!obd->obd_uuid_hash) { err = -ENOMEM; goto err_hash; } /* create a nid-export lustre hash */ obd->obd_nid_hash = cfs_hash_create("NID_HASH", HASH_NID_CUR_BITS, HASH_NID_MAX_BITS, HASH_NID_BKT_BITS, 0, CFS_HASH_MIN_THETA, CFS_HASH_MAX_THETA, &nid_hash_ops, CFS_HASH_DEFAULT); if (!obd->obd_nid_hash) { err = -ENOMEM; goto err_hash; } exp = class_new_export(obd, &obd->obd_uuid); if (IS_ERR(exp)) { err = PTR_ERR(exp); goto err_hash; } obd->obd_self_export = exp; list_del_init(&exp->exp_obd_chain_timed); class_export_put(exp); err = obd_setup(obd, lcfg); if (err) goto err_exp; obd->obd_set_up = 1; spin_lock(&obd->obd_dev_lock); /* cleanup drops this */ class_incref(obd, "setup", obd); spin_unlock(&obd->obd_dev_lock); CDEBUG(D_IOCTL, "finished setup of obd %s (uuid %s)\n", obd->obd_name, obd->obd_uuid.uuid); return 0; err_exp: if (obd->obd_self_export) { class_unlink_export(obd->obd_self_export); obd->obd_self_export = NULL; } err_hash: if (obd->obd_uuid_hash) { cfs_hash_putref(obd->obd_uuid_hash); obd->obd_uuid_hash = NULL; } if (obd->obd_nid_hash) { cfs_hash_putref(obd->obd_nid_hash); obd->obd_nid_hash = NULL; } obd->obd_starting = 0; CERROR("setup %s failed (%d)\n", obd->obd_name, err); return err; } EXPORT_SYMBOL(class_setup); /** We have finished using this obd and are ready to destroy it. * There can be no more references to this obd. */ int class_detach(struct obd_device *obd, struct lustre_cfg *lcfg) { if (obd->obd_set_up) { CERROR("OBD device %d still set up\n", obd->obd_minor); return -EBUSY; } spin_lock(&obd->obd_dev_lock); if (!obd->obd_attached) { spin_unlock(&obd->obd_dev_lock); CERROR("OBD device %d not attached\n", obd->obd_minor); return -ENODEV; } obd->obd_attached = 0; spin_unlock(&obd->obd_dev_lock); CDEBUG(D_IOCTL, "detach on obd %s (uuid %s)\n", obd->obd_name, obd->obd_uuid.uuid); class_decref(obd, "attach", obd); return 0; } EXPORT_SYMBOL(class_detach); /** Start shutting down the obd. There may be in-progress ops when * this is called. We tell them to start shutting down with a call * to class_disconnect_exports(). */ int class_cleanup(struct obd_device *obd, struct lustre_cfg *lcfg) { int err = 0; char *flag; OBD_RACE(OBD_FAIL_LDLM_RECOV_CLIENTS); if (!obd->obd_set_up) { CERROR("Device %d not setup\n", obd->obd_minor); return -ENODEV; } spin_lock(&obd->obd_dev_lock); if (obd->obd_stopping) { spin_unlock(&obd->obd_dev_lock); CERROR("OBD %d already stopping\n", obd->obd_minor); return -ENODEV; } /* Leave this on forever */ obd->obd_stopping = 1; /* wait for already-arrived-connections to finish. */ while (obd->obd_conn_inprogress > 0) { spin_unlock(&obd->obd_dev_lock); cond_resched(); spin_lock(&obd->obd_dev_lock); } spin_unlock(&obd->obd_dev_lock); if (lcfg->lcfg_bufcount >= 2 && LUSTRE_CFG_BUFLEN(lcfg, 1) > 0) { for (flag = lustre_cfg_string(lcfg, 1); *flag != 0; flag++) switch (*flag) { case 'F': obd->obd_force = 1; break; case 'A': LCONSOLE_WARN("Failing over %s\n", obd->obd_name); obd->obd_fail = 1; obd->obd_no_transno = 1; obd->obd_no_recov = 1; if (OBP(obd, iocontrol)) { obd_iocontrol(OBD_IOC_SYNC, obd->obd_self_export, 0, NULL, NULL); } break; default: CERROR("Unrecognised flag '%c'\n", *flag); } } LASSERT(obd->obd_self_export); /* The three references that should be remaining are the * obd_self_export and the attach and setup references. */ if (atomic_read(&obd->obd_refcount) > 3) { /* refcount - 3 might be the number of real exports (excluding self export). But class_incref is called by other things as well, so don't count on it. */ CDEBUG(D_IOCTL, "%s: forcing exports to disconnect: %d\n", obd->obd_name, atomic_read(&obd->obd_refcount) - 3); dump_exports(obd, 0); class_disconnect_exports(obd); } /* Precleanup, we must make sure all exports get destroyed. */ err = obd_precleanup(obd, OBD_CLEANUP_EXPORTS); if (err) CERROR("Precleanup %s returned %d\n", obd->obd_name, err); /* destroy an uuid-export hash body */ if (obd->obd_uuid_hash) { cfs_hash_putref(obd->obd_uuid_hash); obd->obd_uuid_hash = NULL; } /* destroy a nid-export hash body */ if (obd->obd_nid_hash) { cfs_hash_putref(obd->obd_nid_hash); obd->obd_nid_hash = NULL; } class_decref(obd, "setup", obd); obd->obd_set_up = 0; return 0; } EXPORT_SYMBOL(class_cleanup); struct obd_device *class_incref(struct obd_device *obd, const char *scope, const void *source) { lu_ref_add_atomic(&obd->obd_reference, scope, source); atomic_inc(&obd->obd_refcount); CDEBUG(D_INFO, "incref %s (%p) now %d\n", obd->obd_name, obd, atomic_read(&obd->obd_refcount)); return obd; } EXPORT_SYMBOL(class_incref); void class_decref(struct obd_device *obd, const char *scope, const void *source) { int err; int refs; spin_lock(&obd->obd_dev_lock); atomic_dec(&obd->obd_refcount); refs = atomic_read(&obd->obd_refcount); spin_unlock(&obd->obd_dev_lock); lu_ref_del(&obd->obd_reference, scope, source); CDEBUG(D_INFO, "Decref %s (%p) now %d\n", obd->obd_name, obd, refs); if ((refs == 1) && obd->obd_stopping) { /* All exports have been destroyed; there should be no more in-progress ops by this point.*/ spin_lock(&obd->obd_self_export->exp_lock); obd->obd_self_export->exp_flags |= exp_flags_from_obd(obd); spin_unlock(&obd->obd_self_export->exp_lock); /* note that we'll recurse into class_decref again */ class_unlink_export(obd->obd_self_export); return; } if (refs == 0) { CDEBUG(D_CONFIG, "finishing cleanup of obd %s (%s)\n", obd->obd_name, obd->obd_uuid.uuid); LASSERT(!obd->obd_attached); if (obd->obd_stopping) { /* If we're not stopping, we were never set up */ err = obd_cleanup(obd); if (err) CERROR("Cleanup %s returned %d\n", obd->obd_name, err); } if (OBP(obd, detach)) { err = OBP(obd, detach)(obd); if (err) CERROR("Detach returned %d\n", err); } class_release_dev(obd); } } EXPORT_SYMBOL(class_decref); /** Add a failover nid location. * Client obd types contact server obd types using this nid list. */ int class_add_conn(struct obd_device *obd, struct lustre_cfg *lcfg) { struct obd_import *imp; struct obd_uuid uuid; int rc; if (LUSTRE_CFG_BUFLEN(lcfg, 1) < 1 || LUSTRE_CFG_BUFLEN(lcfg, 1) > sizeof(struct obd_uuid)) { CERROR("invalid conn_uuid\n"); return -EINVAL; } if (strcmp(obd->obd_type->typ_name, LUSTRE_MDC_NAME) && strcmp(obd->obd_type->typ_name, LUSTRE_OSC_NAME) && strcmp(obd->obd_type->typ_name, LUSTRE_OSP_NAME) && strcmp(obd->obd_type->typ_name, LUSTRE_LWP_NAME) && strcmp(obd->obd_type->typ_name, LUSTRE_MGC_NAME)) { CERROR("can't add connection on non-client dev\n"); return -EINVAL; } imp = obd->u.cli.cl_import; if (!imp) { CERROR("try to add conn on immature client dev\n"); return -EINVAL; } obd_str2uuid(&uuid, lustre_cfg_string(lcfg, 1)); rc = obd_add_conn(imp, &uuid, lcfg->lcfg_num); return rc; } EXPORT_SYMBOL(class_add_conn); /** Remove a failover nid location. */ int class_del_conn(struct obd_device *obd, struct lustre_cfg *lcfg) { struct obd_import *imp; struct obd_uuid uuid; int rc; if (LUSTRE_CFG_BUFLEN(lcfg, 1) < 1 || LUSTRE_CFG_BUFLEN(lcfg, 1) > sizeof(struct obd_uuid)) { CERROR("invalid conn_uuid\n"); return -EINVAL; } if (strcmp(obd->obd_type->typ_name, LUSTRE_MDC_NAME) && strcmp(obd->obd_type->typ_name, LUSTRE_OSC_NAME)) { CERROR("can't del connection on non-client dev\n"); return -EINVAL; } imp = obd->u.cli.cl_import; if (!imp) { CERROR("try to del conn on immature client dev\n"); return -EINVAL; } obd_str2uuid(&uuid, lustre_cfg_string(lcfg, 1)); rc = obd_del_conn(imp, &uuid); return rc; } LIST_HEAD(lustre_profile_list); struct lustre_profile *class_get_profile(const char *prof) { struct lustre_profile *lprof; list_for_each_entry(lprof, &lustre_profile_list, lp_list) { if (!strcmp(lprof->lp_profile, prof)) { return lprof; } } return NULL; } EXPORT_SYMBOL(class_get_profile); /** Create a named "profile". * This defines the mdc and osc names to use for a client. * This also is used to define the lov to be used by a mdt. */ int class_add_profile(int proflen, char *prof, int osclen, char *osc, int mdclen, char *mdc) { struct lustre_profile *lprof; int err = 0; CDEBUG(D_CONFIG, "Add profile %s\n", prof); lprof = kzalloc(sizeof(*lprof), GFP_NOFS); if (lprof == NULL) return -ENOMEM; INIT_LIST_HEAD(&lprof->lp_list); LASSERT(proflen == (strlen(prof) + 1)); lprof->lp_profile = kmemdup(prof, proflen, GFP_NOFS); if (lprof->lp_profile == NULL) { err = -ENOMEM; goto free_lprof; } LASSERT(osclen == (strlen(osc) + 1)); lprof->lp_dt = kmemdup(osc, osclen, GFP_NOFS); if (lprof->lp_dt == NULL) { err = -ENOMEM; goto free_lp_profile; } if (mdclen > 0) { LASSERT(mdclen == (strlen(mdc) + 1)); lprof->lp_md = kmemdup(mdc, mdclen, GFP_NOFS); if (lprof->lp_md == NULL) { err = -ENOMEM; goto free_lp_dt; } } list_add(&lprof->lp_list, &lustre_profile_list); return err; free_lp_dt: kfree(lprof->lp_dt); free_lp_profile: kfree(lprof->lp_profile); free_lprof: kfree(lprof); return err; } void class_del_profile(const char *prof) { struct lustre_profile *lprof; CDEBUG(D_CONFIG, "Del profile %s\n", prof); lprof = class_get_profile(prof); if (lprof) { list_del(&lprof->lp_list); kfree(lprof->lp_profile); kfree(lprof->lp_dt); kfree(lprof->lp_md); kfree(lprof); } } EXPORT_SYMBOL(class_del_profile); /* COMPAT_146 */ void class_del_profiles(void) { struct lustre_profile *lprof, *n; list_for_each_entry_safe(lprof, n, &lustre_profile_list, lp_list) { list_del(&lprof->lp_list); kfree(lprof->lp_profile); kfree(lprof->lp_dt); kfree(lprof->lp_md); kfree(lprof); } } EXPORT_SYMBOL(class_del_profiles); static int class_set_global(char *ptr, int val, struct lustre_cfg *lcfg) { if (class_match_param(ptr, PARAM_AT_MIN, NULL) == 0) at_min = val; else if (class_match_param(ptr, PARAM_AT_MAX, NULL) == 0) at_max = val; else if (class_match_param(ptr, PARAM_AT_EXTRA, NULL) == 0) at_extra = val; else if (class_match_param(ptr, PARAM_AT_EARLY_MARGIN, NULL) == 0) at_early_margin = val; else if (class_match_param(ptr, PARAM_AT_HISTORY, NULL) == 0) at_history = val; else if (class_match_param(ptr, PARAM_JOBID_VAR, NULL) == 0) strlcpy(obd_jobid_var, lustre_cfg_string(lcfg, 2), JOBSTATS_JOBID_VAR_MAX_LEN + 1); else return -EINVAL; CDEBUG(D_IOCTL, "global %s = %d\n", ptr, val); return 0; } /* We can't call ll_process_config or lquota_process_config directly because * it lives in a module that must be loaded after this one. */ static int (*client_process_config)(struct lustre_cfg *lcfg) = NULL; static int (*quota_process_config)(struct lustre_cfg *lcfg) = NULL; void lustre_register_client_process_config(int (*cpc)(struct lustre_cfg *lcfg)) { client_process_config = cpc; } EXPORT_SYMBOL(lustre_register_client_process_config); /** * Rename the proc parameter in \a cfg with a new name \a new_name. * * \param cfg config structure which contains the proc parameter * \param new_name new name of the proc parameter * * \retval valid-pointer pointer to the newly-allocated config structure * which contains the renamed proc parameter * \retval ERR_PTR(-EINVAL) if \a cfg or \a new_name is NULL, or \a cfg does * not contain a proc parameter * \retval ERR_PTR(-ENOMEM) if memory allocation failure occurs */ struct lustre_cfg *lustre_cfg_rename(struct lustre_cfg *cfg, const char *new_name) { struct lustre_cfg_bufs *bufs = NULL; struct lustre_cfg *new_cfg = NULL; char *param = NULL; char *new_param = NULL; char *value = NULL; int name_len = 0; int new_len = 0; if (cfg == NULL || new_name == NULL) return ERR_PTR(-EINVAL); param = lustre_cfg_string(cfg, 1); if (param == NULL) return ERR_PTR(-EINVAL); value = strchr(param, '='); if (value == NULL) name_len = strlen(param); else name_len = value - param; new_len = LUSTRE_CFG_BUFLEN(cfg, 1) + strlen(new_name) - name_len; new_param = kzalloc(new_len, GFP_NOFS); if (new_param == NULL) return ERR_PTR(-ENOMEM); strcpy(new_param, new_name); if (value != NULL) strcat(new_param, value); bufs = kzalloc(sizeof(*bufs), GFP_NOFS); if (bufs == NULL) { kfree(new_param); return ERR_PTR(-ENOMEM); } lustre_cfg_bufs_reset(bufs, NULL); lustre_cfg_bufs_init(bufs, cfg); lustre_cfg_bufs_set_string(bufs, 1, new_param); new_cfg = lustre_cfg_new(cfg->lcfg_command, bufs); kfree(new_param); kfree(bufs); if (new_cfg == NULL) return ERR_PTR(-ENOMEM); new_cfg->lcfg_num = cfg->lcfg_num; new_cfg->lcfg_flags = cfg->lcfg_flags; new_cfg->lcfg_nid = cfg->lcfg_nid; new_cfg->lcfg_nal = cfg->lcfg_nal; return new_cfg; } EXPORT_SYMBOL(lustre_cfg_rename); static int process_param2_config(struct lustre_cfg *lcfg) { char *param = lustre_cfg_string(lcfg, 1); char *upcall = lustre_cfg_string(lcfg, 2); char *argv[] = { [0] = "/usr/sbin/lctl", [1] = "set_param", [2] = param, [3] = NULL }; struct timeval start; struct timeval end; int rc; /* Add upcall processing here. Now only lctl is supported */ if (strcmp(upcall, LCTL_UPCALL) != 0) { CERROR("Unsupported upcall %s\n", upcall); return -EINVAL; } do_gettimeofday(&start); rc = call_usermodehelper(argv[0], argv, NULL, 1); do_gettimeofday(&end); if (rc < 0) { CERROR( "lctl: error invoking upcall %s %s %s: rc = %d; time %ldus\n", argv[0], argv[1], argv[2], rc, cfs_timeval_sub(&end, &start, NULL)); } else { CDEBUG(D_HA, "lctl: invoked upcall %s %s %s, time %ldus\n", argv[0], argv[1], argv[2], cfs_timeval_sub(&end, &start, NULL)); rc = 0; } return rc; } void lustre_register_quota_process_config(int (*qpc)(struct lustre_cfg *lcfg)) { quota_process_config = qpc; } EXPORT_SYMBOL(lustre_register_quota_process_config); /** Process configuration commands given in lustre_cfg form. * These may come from direct calls (e.g. class_manual_cleanup) * or processing the config llog, or ioctl from lctl. */ int class_process_config(struct lustre_cfg *lcfg) { struct obd_device *obd; int err; LASSERT(lcfg && !IS_ERR(lcfg)); CDEBUG(D_IOCTL, "processing cmd: %x\n", lcfg->lcfg_command); /* Commands that don't need a device */ switch (lcfg->lcfg_command) { case LCFG_ATTACH: { err = class_attach(lcfg); goto out; } case LCFG_ADD_UUID: { CDEBUG(D_IOCTL, "adding mapping from uuid %s to nid %#llx (%s)\n", lustre_cfg_string(lcfg, 1), lcfg->lcfg_nid, libcfs_nid2str(lcfg->lcfg_nid)); err = class_add_uuid(lustre_cfg_string(lcfg, 1), lcfg->lcfg_nid); goto out; } case LCFG_DEL_UUID: { CDEBUG(D_IOCTL, "removing mappings for uuid %s\n", (lcfg->lcfg_bufcount < 2 || LUSTRE_CFG_BUFLEN(lcfg, 1) == 0) ? "<all uuids>" : lustre_cfg_string(lcfg, 1)); err = class_del_uuid(lustre_cfg_string(lcfg, 1)); goto out; } case LCFG_MOUNTOPT: { CDEBUG(D_IOCTL, "mountopt: profile %s osc %s mdc %s\n", lustre_cfg_string(lcfg, 1), lustre_cfg_string(lcfg, 2), lustre_cfg_string(lcfg, 3)); /* set these mount options somewhere, so ll_fill_super * can find them. */ err = class_add_profile(LUSTRE_CFG_BUFLEN(lcfg, 1), lustre_cfg_string(lcfg, 1), LUSTRE_CFG_BUFLEN(lcfg, 2), lustre_cfg_string(lcfg, 2), LUSTRE_CFG_BUFLEN(lcfg, 3), lustre_cfg_string(lcfg, 3)); goto out; } case LCFG_DEL_MOUNTOPT: { CDEBUG(D_IOCTL, "mountopt: profile %s\n", lustre_cfg_string(lcfg, 1)); class_del_profile(lustre_cfg_string(lcfg, 1)); err = 0; goto out; } case LCFG_SET_TIMEOUT: { CDEBUG(D_IOCTL, "changing lustre timeout from %d to %d\n", obd_timeout, lcfg->lcfg_num); obd_timeout = max(lcfg->lcfg_num, 1U); obd_timeout_set = 1; err = 0; goto out; } case LCFG_SET_LDLM_TIMEOUT: { CDEBUG(D_IOCTL, "changing lustre ldlm_timeout from %d to %d\n", ldlm_timeout, lcfg->lcfg_num); ldlm_timeout = max(lcfg->lcfg_num, 1U); if (ldlm_timeout >= obd_timeout) ldlm_timeout = max(obd_timeout / 3, 1U); ldlm_timeout_set = 1; err = 0; goto out; } case LCFG_SET_UPCALL: { LCONSOLE_ERROR_MSG(0x15a, "recovery upcall is deprecated\n"); /* COMPAT_146 Don't fail on old configs */ err = 0; goto out; } case LCFG_MARKER: { struct cfg_marker *marker; marker = lustre_cfg_buf(lcfg, 1); CDEBUG(D_IOCTL, "marker %d (%#x) %.16s %s\n", marker->cm_step, marker->cm_flags, marker->cm_tgtname, marker->cm_comment); err = 0; goto out; } case LCFG_PARAM: { char *tmp; /* llite has no obd */ if ((class_match_param(lustre_cfg_string(lcfg, 1), PARAM_LLITE, NULL) == 0) && client_process_config) { err = (*client_process_config)(lcfg); goto out; } else if ((class_match_param(lustre_cfg_string(lcfg, 1), PARAM_SYS, &tmp) == 0)) { /* Global param settings */ err = class_set_global(tmp, lcfg->lcfg_num, lcfg); /* * Client or server should not fail to mount if * it hits an unknown configuration parameter. */ if (err != 0) CWARN("Ignoring unknown param %s\n", tmp); err = 0; goto out; } else if ((class_match_param(lustre_cfg_string(lcfg, 1), PARAM_QUOTA, &tmp) == 0) && quota_process_config) { err = (*quota_process_config)(lcfg); goto out; } break; } case LCFG_SET_PARAM: { err = process_param2_config(lcfg); goto out; } } /* Commands that require a device */ obd = class_name2obd(lustre_cfg_string(lcfg, 0)); if (obd == NULL) { if (!LUSTRE_CFG_BUFLEN(lcfg, 0)) CERROR("this lcfg command requires a device name\n"); else CERROR("no device for: %s\n", lustre_cfg_string(lcfg, 0)); err = -EINVAL; goto out; } switch (lcfg->lcfg_command) { case LCFG_SETUP: { err = class_setup(obd, lcfg); goto out; } case LCFG_DETACH: { err = class_detach(obd, lcfg); err = 0; goto out; } case LCFG_CLEANUP: { err = class_cleanup(obd, lcfg); err = 0; goto out; } case LCFG_ADD_CONN: { err = class_add_conn(obd, lcfg); err = 0; goto out; } case LCFG_DEL_CONN: { err = class_del_conn(obd, lcfg); err = 0; goto out; } case LCFG_POOL_NEW: { err = obd_pool_new(obd, lustre_cfg_string(lcfg, 2)); err = 0; goto out; } case LCFG_POOL_ADD: { err = obd_pool_add(obd, lustre_cfg_string(lcfg, 2), lustre_cfg_string(lcfg, 3)); err = 0; goto out; } case LCFG_POOL_REM: { err = obd_pool_rem(obd, lustre_cfg_string(lcfg, 2), lustre_cfg_string(lcfg, 3)); err = 0; goto out; } case LCFG_POOL_DEL: { err = obd_pool_del(obd, lustre_cfg_string(lcfg, 2)); err = 0; goto out; } default: { err = obd_process_config(obd, sizeof(*lcfg), lcfg); goto out; } } out: if ((err < 0) && !(lcfg->lcfg_command & LCFG_REQUIRED)) { CWARN("Ignoring error %d on optional command %#x\n", err, lcfg->lcfg_command); err = 0; } return err; } EXPORT_SYMBOL(class_process_config); int class_process_proc_param(char *prefix, struct lprocfs_vars *lvars, struct lustre_cfg *lcfg, void *data) { struct lprocfs_vars *var; struct file fakefile; struct seq_file fake_seqfile; char *key, *sval; int i, keylen, vallen; int matched = 0, j = 0; int rc = 0; int skip = 0; if (lcfg->lcfg_command != LCFG_PARAM) { CERROR("Unknown command: %d\n", lcfg->lcfg_command); return -EINVAL; } /* fake a seq file so that var->fops->write can work... */ fakefile.private_data = &fake_seqfile; fake_seqfile.private = data; /* e.g. tunefs.lustre --param mdt.group_upcall=foo /r/tmp/lustre-mdt or lctl conf_param lustre-MDT0000.mdt.group_upcall=bar or lctl conf_param lustre-OST0000.osc.max_dirty_mb=36 */ for (i = 1; i < lcfg->lcfg_bufcount; i++) { key = lustre_cfg_buf(lcfg, i); /* Strip off prefix */ class_match_param(key, prefix, &key); sval = strchr(key, '='); if (!sval || (*(sval + 1) == 0)) { CERROR("Can't parse param %s (missing '=')\n", key); /* rc = -EINVAL; continue parsing other params */ continue; } keylen = sval - key; sval++; vallen = strlen(sval); matched = 0; j = 0; /* Search proc entries */ while (lvars[j].name) { var = &lvars[j]; if (class_match_param(key, (char *)var->name, NULL) == 0 && keylen == strlen(var->name)) { matched++; rc = -EROFS; if (var->fops && var->fops->write) { mm_segment_t oldfs; oldfs = get_fs(); set_fs(KERNEL_DS); rc = (var->fops->write)(&fakefile, sval, vallen, NULL); set_fs(oldfs); } break; } j++; } if (!matched) { /* If the prefix doesn't match, return error so we can pass it down the stack */ if (strnchr(key, keylen, '.')) return -ENOSYS; CERROR("%s: unknown param %s\n", (char *)lustre_cfg_string(lcfg, 0), key); /* rc = -EINVAL; continue parsing other params */ skip++; } else if (rc < 0) { CERROR("writing proc entry %s err %d\n", var->name, rc); rc = 0; } else { CDEBUG(D_CONFIG, "%s.%.*s: Set parameter %.*s=%s\n", lustre_cfg_string(lcfg, 0), (int)strlen(prefix) - 1, prefix, (int)(sval - key - 1), key, sval); } } if (rc > 0) rc = 0; if (!rc && skip) rc = skip; return rc; } EXPORT_SYMBOL(class_process_proc_param); extern int lustre_check_exclusion(struct super_block *sb, char *svname); /** Parse a configuration llog, doing various manipulations on them * for various reasons, (modifications for compatibility, skip obsolete * records, change uuids, etc), then class_process_config() resulting * net records. */ int class_config_llog_handler(const struct lu_env *env, struct llog_handle *handle, struct llog_rec_hdr *rec, void *data) { struct config_llog_instance *clli = data; int cfg_len = rec->lrh_len; char *cfg_buf = (char *) (rec + 1); int rc = 0; //class_config_dump_handler(handle, rec, data); switch (rec->lrh_type) { case OBD_CFG_REC: { struct lustre_cfg *lcfg, *lcfg_new; struct lustre_cfg_bufs bufs; char *inst_name = NULL; int inst_len = 0; int inst = 0, swab = 0; lcfg = (struct lustre_cfg *)cfg_buf; if (lcfg->lcfg_version == __swab32(LUSTRE_CFG_VERSION)) { lustre_swab_lustre_cfg(lcfg); swab = 1; } rc = lustre_cfg_sanity_check(cfg_buf, cfg_len); if (rc) goto out; /* Figure out config state info */ if (lcfg->lcfg_command == LCFG_MARKER) { struct cfg_marker *marker = lustre_cfg_buf(lcfg, 1); lustre_swab_cfg_marker(marker, swab, LUSTRE_CFG_BUFLEN(lcfg, 1)); CDEBUG(D_CONFIG, "Marker, inst_flg=%#x mark_flg=%#x\n", clli->cfg_flags, marker->cm_flags); if (marker->cm_flags & CM_START) { /* all previous flags off */ clli->cfg_flags = CFG_F_MARKER; if (marker->cm_flags & CM_SKIP) { clli->cfg_flags |= CFG_F_SKIP; CDEBUG(D_CONFIG, "SKIP #%d\n", marker->cm_step); } else if ((marker->cm_flags & CM_EXCLUDE) || (clli->cfg_sb && lustre_check_exclusion(clli->cfg_sb, marker->cm_tgtname))) { clli->cfg_flags |= CFG_F_EXCLUDE; CDEBUG(D_CONFIG, "EXCLUDE %d\n", marker->cm_step); } } else if (marker->cm_flags & CM_END) { clli->cfg_flags = 0; } } /* A config command without a start marker before it is illegal (post 146) */ if (!(clli->cfg_flags & CFG_F_COMPAT146) && !(clli->cfg_flags & CFG_F_MARKER) && (lcfg->lcfg_command != LCFG_MARKER)) { CWARN("Config not inside markers, ignoring! (inst: %p, uuid: %s, flags: %#x)\n", clli->cfg_instance, clli->cfg_uuid.uuid, clli->cfg_flags); clli->cfg_flags |= CFG_F_SKIP; } if (clli->cfg_flags & CFG_F_SKIP) { CDEBUG(D_CONFIG, "skipping %#x\n", clli->cfg_flags); rc = 0; /* No processing! */ break; } /* * For interoperability between 1.8 and 2.0, * rename "mds" obd device type to "mdt". */ { char *typename = lustre_cfg_string(lcfg, 1); char *index = lustre_cfg_string(lcfg, 2); if ((lcfg->lcfg_command == LCFG_ATTACH && typename && strcmp(typename, "mds") == 0)) { CWARN("For 1.8 interoperability, rename obd type from mds to mdt\n"); typename[2] = 't'; } if ((lcfg->lcfg_command == LCFG_SETUP && index && strcmp(index, "type") == 0)) { CDEBUG(D_INFO, "For 1.8 interoperability, set this index to '0'\n"); index[0] = '0'; index[1] = 0; } } if (clli->cfg_flags & CFG_F_EXCLUDE) { CDEBUG(D_CONFIG, "cmd: %x marked EXCLUDED\n", lcfg->lcfg_command); if (lcfg->lcfg_command == LCFG_LOV_ADD_OBD) /* Add inactive instead */ lcfg->lcfg_command = LCFG_LOV_ADD_INA; } lustre_cfg_bufs_init(&bufs, lcfg); if (clli && clli->cfg_instance && LUSTRE_CFG_BUFLEN(lcfg, 0) > 0){ inst = 1; inst_len = LUSTRE_CFG_BUFLEN(lcfg, 0) + sizeof(clli->cfg_instance) * 2 + 4; inst_name = kzalloc(inst_len, GFP_NOFS); if (inst_name == NULL) { rc = -ENOMEM; goto out; } sprintf(inst_name, "%s-%p", lustre_cfg_string(lcfg, 0), clli->cfg_instance); lustre_cfg_bufs_set_string(&bufs, 0, inst_name); CDEBUG(D_CONFIG, "cmd %x, instance name: %s\n", lcfg->lcfg_command, inst_name); } /* we override the llog's uuid for clients, to insure they are unique */ if (clli && clli->cfg_instance != NULL && lcfg->lcfg_command == LCFG_ATTACH) { lustre_cfg_bufs_set_string(&bufs, 2, clli->cfg_uuid.uuid); } /* * sptlrpc config record, we expect 2 data segments: * [0]: fs_name/target_name, * [1]: rule string * moving them to index [1] and [2], and insert MGC's * obdname at index [0]. */ if (clli && clli->cfg_instance == NULL && lcfg->lcfg_command == LCFG_SPTLRPC_CONF) { lustre_cfg_bufs_set(&bufs, 2, bufs.lcfg_buf[1], bufs.lcfg_buflen[1]); lustre_cfg_bufs_set(&bufs, 1, bufs.lcfg_buf[0], bufs.lcfg_buflen[0]); lustre_cfg_bufs_set_string(&bufs, 0, clli->cfg_obdname); } lcfg_new = lustre_cfg_new(lcfg->lcfg_command, &bufs); lcfg_new->lcfg_num = lcfg->lcfg_num; lcfg_new->lcfg_flags = lcfg->lcfg_flags; /* XXX Hack to try to remain binary compatible with * pre-newconfig logs */ if (lcfg->lcfg_nal != 0 && /* pre-newconfig log? */ (lcfg->lcfg_nid >> 32) == 0) { __u32 addr = (__u32)(lcfg->lcfg_nid & 0xffffffff); lcfg_new->lcfg_nid = LNET_MKNID(LNET_MKNET(lcfg->lcfg_nal, 0), addr); CWARN("Converted pre-newconfig NAL %d NID %x to %s\n", lcfg->lcfg_nal, addr, libcfs_nid2str(lcfg_new->lcfg_nid)); } else { lcfg_new->lcfg_nid = lcfg->lcfg_nid; } lcfg_new->lcfg_nal = 0; /* illegal value for obsolete field */ rc = class_process_config(lcfg_new); lustre_cfg_free(lcfg_new); if (inst) kfree(inst_name); break; } default: CERROR("Unknown llog record type %#x encountered\n", rec->lrh_type); break; } out: if (rc) { CERROR("%s: cfg command failed: rc = %d\n", handle->lgh_ctxt->loc_obd->obd_name, rc); class_config_dump_handler(NULL, handle, rec, data); } return rc; } EXPORT_SYMBOL(class_config_llog_handler); int class_config_parse_llog(const struct lu_env *env, struct llog_ctxt *ctxt, char *name, struct config_llog_instance *cfg) { struct llog_process_cat_data cd = {0, 0}; struct llog_handle *llh; llog_cb_t callback; int rc; CDEBUG(D_INFO, "looking up llog %s\n", name); rc = llog_open(env, ctxt, &llh, NULL, name, LLOG_OPEN_EXISTS); if (rc) return rc; rc = llog_init_handle(env, llh, LLOG_F_IS_PLAIN, NULL); if (rc) goto parse_out; /* continue processing from where we last stopped to end-of-log */ if (cfg) { cd.lpcd_first_idx = cfg->cfg_last_idx; callback = cfg->cfg_callback; LASSERT(callback != NULL); } else { callback = class_config_llog_handler; } cd.lpcd_last_idx = 0; rc = llog_process(env, llh, callback, cfg, &cd); CDEBUG(D_CONFIG, "Processed log %s gen %d-%d (rc=%d)\n", name, cd.lpcd_first_idx + 1, cd.lpcd_last_idx, rc); if (cfg) cfg->cfg_last_idx = cd.lpcd_last_idx; parse_out: llog_close(env, llh); return rc; } EXPORT_SYMBOL(class_config_parse_llog); /** * parse config record and output dump in supplied buffer. * This is separated from class_config_dump_handler() to use * for ioctl needs as well */ int class_config_parse_rec(struct llog_rec_hdr *rec, char *buf, int size) { struct lustre_cfg *lcfg = (struct lustre_cfg *)(rec + 1); char *ptr = buf; char *end = buf + size; int rc = 0; LASSERT(rec->lrh_type == OBD_CFG_REC); rc = lustre_cfg_sanity_check(lcfg, rec->lrh_len); if (rc < 0) return rc; ptr += snprintf(ptr, end-ptr, "cmd=%05x ", lcfg->lcfg_command); if (lcfg->lcfg_flags) ptr += snprintf(ptr, end-ptr, "flags=%#08x ", lcfg->lcfg_flags); if (lcfg->lcfg_num) ptr += snprintf(ptr, end-ptr, "num=%#08x ", lcfg->lcfg_num); if (lcfg->lcfg_nid) ptr += snprintf(ptr, end-ptr, "nid=%s(%#llx)\n ", libcfs_nid2str(lcfg->lcfg_nid), lcfg->lcfg_nid); if (lcfg->lcfg_command == LCFG_MARKER) { struct cfg_marker *marker = lustre_cfg_buf(lcfg, 1); ptr += snprintf(ptr, end-ptr, "marker=%d(%#x)%s '%s'", marker->cm_step, marker->cm_flags, marker->cm_tgtname, marker->cm_comment); } else { int i; for (i = 0; i < lcfg->lcfg_bufcount; i++) { ptr += snprintf(ptr, end-ptr, "%d:%s ", i, lustre_cfg_string(lcfg, i)); } } /* return consumed bytes */ rc = ptr - buf; return rc; } int class_config_dump_handler(const struct lu_env *env, struct llog_handle *handle, struct llog_rec_hdr *rec, void *data) { char *outstr; int rc = 0; outstr = kzalloc(256, GFP_NOFS); if (outstr == NULL) return -ENOMEM; if (rec->lrh_type == OBD_CFG_REC) { class_config_parse_rec(rec, outstr, 256); LCONSOLE(D_WARNING, " %s\n", outstr); } else { LCONSOLE(D_WARNING, "unhandled lrh_type: %#x\n", rec->lrh_type); rc = -EINVAL; } kfree(outstr); return rc; } int class_config_dump_llog(const struct lu_env *env, struct llog_ctxt *ctxt, char *name, struct config_llog_instance *cfg) { struct llog_handle *llh; int rc; LCONSOLE_INFO("Dumping config log %s\n", name); rc = llog_open(env, ctxt, &llh, NULL, name, LLOG_OPEN_EXISTS); if (rc) return rc; rc = llog_init_handle(env, llh, LLOG_F_IS_PLAIN, NULL); if (rc) goto parse_out; rc = llog_process(env, llh, class_config_dump_handler, cfg, NULL); parse_out: llog_close(env, llh); LCONSOLE_INFO("End config log %s\n", name); return rc; } EXPORT_SYMBOL(class_config_dump_llog); /** Call class_cleanup and class_detach. * "Manual" only in the sense that we're faking lcfg commands. */ int class_manual_cleanup(struct obd_device *obd) { char flags[3] = ""; struct lustre_cfg *lcfg; struct lustre_cfg_bufs bufs; int rc; if (!obd) { CERROR("empty cleanup\n"); return -EALREADY; } if (obd->obd_force) strcat(flags, "F"); if (obd->obd_fail) strcat(flags, "A"); CDEBUG(D_CONFIG, "Manual cleanup of %s (flags='%s')\n", obd->obd_name, flags); lustre_cfg_bufs_reset(&bufs, obd->obd_name); lustre_cfg_bufs_set_string(&bufs, 1, flags); lcfg = lustre_cfg_new(LCFG_CLEANUP, &bufs); if (!lcfg) return -ENOMEM; rc = class_process_config(lcfg); if (rc) { CERROR("cleanup failed %d: %s\n", rc, obd->obd_name); goto out; } /* the lcfg is almost the same for both ops */ lcfg->lcfg_command = LCFG_DETACH; rc = class_process_config(lcfg); if (rc) CERROR("detach failed %d: %s\n", rc, obd->obd_name); out: lustre_cfg_free(lcfg); return rc; } EXPORT_SYMBOL(class_manual_cleanup); /* * uuid<->export lustre hash operations */ static unsigned uuid_hash(struct cfs_hash *hs, const void *key, unsigned mask) { return cfs_hash_djb2_hash(((struct obd_uuid *)key)->uuid, sizeof(((struct obd_uuid *)key)->uuid), mask); } static void * uuid_key(struct hlist_node *hnode) { struct obd_export *exp; exp = hlist_entry(hnode, struct obd_export, exp_uuid_hash); return &exp->exp_client_uuid; } /* * NOTE: It is impossible to find an export that is in failed * state with this function */ static int uuid_keycmp(const void *key, struct hlist_node *hnode) { struct obd_export *exp; LASSERT(key); exp = hlist_entry(hnode, struct obd_export, exp_uuid_hash); return obd_uuid_equals(key, &exp->exp_client_uuid) && !exp->exp_failed; } static void * uuid_export_object(struct hlist_node *hnode) { return hlist_entry(hnode, struct obd_export, exp_uuid_hash); } static void uuid_export_get(struct cfs_hash *hs, struct hlist_node *hnode) { struct obd_export *exp; exp = hlist_entry(hnode, struct obd_export, exp_uuid_hash); class_export_get(exp); } static void uuid_export_put_locked(struct cfs_hash *hs, struct hlist_node *hnode) { struct obd_export *exp; exp = hlist_entry(hnode, struct obd_export, exp_uuid_hash); class_export_put(exp); } static cfs_hash_ops_t uuid_hash_ops = { .hs_hash = uuid_hash, .hs_key = uuid_key, .hs_keycmp = uuid_keycmp, .hs_object = uuid_export_object, .hs_get = uuid_export_get, .hs_put_locked = uuid_export_put_locked, }; /* * nid<->export hash operations */ static unsigned nid_hash(struct cfs_hash *hs, const void *key, unsigned mask) { return cfs_hash_djb2_hash(key, sizeof(lnet_nid_t), mask); } static void * nid_key(struct hlist_node *hnode) { struct obd_export *exp; exp = hlist_entry(hnode, struct obd_export, exp_nid_hash); return &exp->exp_connection->c_peer.nid; } /* * NOTE: It is impossible to find an export that is in failed * state with this function */ static int nid_kepcmp(const void *key, struct hlist_node *hnode) { struct obd_export *exp; LASSERT(key); exp = hlist_entry(hnode, struct obd_export, exp_nid_hash); return exp->exp_connection->c_peer.nid == *(lnet_nid_t *)key && !exp->exp_failed; } static void * nid_export_object(struct hlist_node *hnode) { return hlist_entry(hnode, struct obd_export, exp_nid_hash); } static void nid_export_get(struct cfs_hash *hs, struct hlist_node *hnode) { struct obd_export *exp; exp = hlist_entry(hnode, struct obd_export, exp_nid_hash); class_export_get(exp); } static void nid_export_put_locked(struct cfs_hash *hs, struct hlist_node *hnode) { struct obd_export *exp; exp = hlist_entry(hnode, struct obd_export, exp_nid_hash); class_export_put(exp); } static cfs_hash_ops_t nid_hash_ops = { .hs_hash = nid_hash, .hs_key = nid_key, .hs_keycmp = nid_kepcmp, .hs_object = nid_export_object, .hs_get = nid_export_get, .hs_put_locked = nid_export_put_locked, };
gpl-2.0
mythos234/cmkernel_zeroltexx
drivers/video/decon_display/decon_pm_core.c
172
17895
/* linux/drivers/video/decon_display/decon_pm_core.c * * Copyright (c) 2013 Samsung Electronics * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/clk.h> #include <linux/of.h> #include <linux/fb.h> #include <linux/pm_runtime.h> #include <linux/of_gpio.h> #include <linux/delay.h> #include <linux/clk-private.h> #include <linux/exynos_iovmm.h> #include <linux/platform_device.h> #include "decon_display_driver.h" #include "decon_mipi_dsi.h" #include "decon_dt.h" #include "decon_pm_exynos.h" #include "decon_pm.h" #if defined(CONFIG_SOC_EXYNOS5430) || defined(CONFIG_SOC_EXYNOS5433) #include "regs-decon.h" #include "decon_fb.h" #else #include "regs-fimd.h" #include "fimd_fb.h" #endif #include <mach/cpufreq.h> #include <../drivers/clk/samsung/clk.h> #define GATE_LOCK_CNT 2 int decon_dbg = 5; module_param(decon_dbg, int, 0644); #define pm_info(fmt, args...) \ do { \ if (decon_dbg >= 5) \ printk("[INFO]%s: "fmt "\n", \ __func__, ##args); \ } while (0) #define pm_debug(fmt, args...) \ do { \ if (decon_dbg >= 6) \ printk("[DEBUG]%s: "fmt "\n", \ __func__, ##args); \ } while (0) #define call_pm_ops(q, ip, op, args...) \ (((q)->ip.ops->op) ? ((q)->ip.ops->op(args)) : 0) #define call_block_pm_ops(q, op, args...) \ (((q)->pm_status.ops->op) ? ((q)->pm_status.ops->op(args)) : 0) /* following values are for debugging */ unsigned int frame_done_count; unsigned int te_count; void display_block_clock_on(struct display_driver *dispdrv); void display_block_clock_off(struct display_driver *dispdrv); int display_hibernation_power_on(struct display_driver *dispdrv); int display_hibernation_power_off(struct display_driver *dispdrv); static void decon_clock_gating_handler(struct kthread_work *work); static void decon_power_gating_handler(struct kthread_work *work); static void request_dynamic_hotplug(bool hotplug); struct pm_ops display_block_ops = { .clk_on = display_block_clock_on, .clk_off = display_block_clock_off, .pwr_on = display_hibernation_power_on, .pwr_off = display_hibernation_power_off, }; #ifdef CONFIG_FB_HIBERNATION_DISPLAY extern struct pm_ops decon_pm_ops; #ifdef CONFIG_DECON_MIC extern struct pm_ops mic_pm_ops; #endif extern struct pm_ops dsi_pm_ops; #endif extern struct mipi_dsim_device *dsim_for_decon; int disp_pm_set_plat_status(struct display_driver *dispdrv, bool platform_on) { if (platform_on) dispdrv->platform_status = DISP_STATUS_PM1; else dispdrv->platform_status = DISP_STATUS_PM0; return 0; } int init_display_pm_status(struct display_driver *dispdrv) { dispdrv->pm_status.clock_enabled = 0; atomic_set(&dispdrv->pm_status.lock_count, 0); dispdrv->pm_status.clk_idle_count = 0; dispdrv->pm_status.pwr_idle_count = 0; dispdrv->platform_status = DISP_STATUS_PM0; disp_pm_set_plat_status(dispdrv, false); te_count = 0; frame_done_count = 0; return 0; } int init_display_pm(struct display_driver *dispdrv) { init_display_pm_status(dispdrv); spin_lock_init(&dispdrv->pm_status.slock); mutex_init(&dispdrv->pm_status.pm_lock); mutex_init(&dispdrv->pm_status.clk_lock); #ifdef CONFIG_FB_HIBERNATION_DISPLAY set_default_hibernation_mode(dispdrv); dispdrv->pm_status.hotplug_delay_msec = MAX_HOTPLUG_DELAY_MSEC; #else dispdrv->pm_status.clock_gating_on = false; dispdrv->pm_status.power_gating_on = false; dispdrv->pm_status.hotplug_gating_on = false; #endif init_kthread_worker(&dispdrv->pm_status.control_clock_gating); dispdrv->pm_status.control_clock_gating_thread = kthread_run(kthread_worker_fn, &dispdrv->pm_status.control_clock_gating, "decon_clk_thread"); if (IS_ERR(dispdrv->pm_status.control_clock_gating_thread)) { int err = PTR_ERR(dispdrv->pm_status.control_clock_gating_thread); dispdrv->pm_status.control_clock_gating_thread = NULL; pr_err("failed to run control_clock_gating_thread\n"); return err; } init_kthread_work(&dispdrv->pm_status.control_clock_gating_work, decon_clock_gating_handler); init_kthread_worker(&dispdrv->pm_status.control_power_gating); dispdrv->pm_status.control_power_gating_thread = kthread_run(kthread_worker_fn, &dispdrv->pm_status.control_power_gating, "decon_power_thread"); if (IS_ERR(dispdrv->pm_status.control_power_gating_thread)) { int err = PTR_ERR(dispdrv->pm_status.control_power_gating_thread); dispdrv->pm_status.control_power_gating_thread = NULL; pr_err("failed to run control_power_gating_thread\n"); return err; } init_kthread_work(&dispdrv->pm_status.control_power_gating_work, decon_power_gating_handler); #ifdef CONFIG_FB_HIBERNATION_DISPLAY dispdrv->pm_status.ops = &display_block_ops; dispdrv->decon_driver.ops = &decon_pm_ops; dispdrv->dsi_driver.ops = &dsi_pm_ops; #ifdef CONFIG_DECON_MIC dispdrv->mic_driver.ops = &mic_pm_ops; #endif #endif return 0; } void disp_debug_power_info(void) { struct display_driver *dispdrv = get_display_driver(); pm_info("clk: %d, \ lock: %d, clk_idle: %d, pwr_idle: %d\n, \ output_on: %d, pwr_state: %d, vsync: %lld, frame_cnt: %d, te_cnt: %d", dispdrv->pm_status.clock_enabled, atomic_read(&dispdrv->pm_status.lock_count), dispdrv->pm_status.clk_idle_count, dispdrv->pm_status.pwr_idle_count, dispdrv->decon_driver.sfb->output_on, dispdrv->decon_driver.sfb->power_state, ktime_to_ns(dispdrv->decon_driver.sfb->vsync_info.timestamp), frame_done_count, te_count); } void disp_pm_gate_lock(struct display_driver *dispdrv, bool increase) { if (increase) atomic_inc(&dispdrv->pm_status.lock_count); else if(atomic_read(&dispdrv->pm_status.lock_count) > 0) atomic_dec(&dispdrv->pm_status.lock_count); } static void init_gating_idle_count(struct display_driver *dispdrv) { unsigned long flags; if (dispdrv->pm_status.clk_idle_count != 0 || dispdrv->pm_status.pwr_idle_count != 0) { spin_lock_irqsave(&dispdrv->pm_status.slock, flags); dispdrv->pm_status.clk_idle_count = 0; dispdrv->pm_status.pwr_idle_count = 0; spin_unlock_irqrestore(&dispdrv->pm_status.slock, flags); } } void debug_function(struct display_driver *dispdrv, const char *buf) { long input_time; #ifndef CONFIG_FB_HIBERNATION_DISPLAY pm_info("%s: does not support", __func__); return; #endif pm_info("calls [%s] to control gating function\n", buf); if (!kstrtol(buf, 10, &input_time)) { if (input_time == 0) { request_dynamic_hotplug(false); dispdrv->pm_status.hotplug_gating_on = false; } else { dispdrv->pm_status.hotplug_delay_msec = input_time; dispdrv->pm_status.hotplug_gating_on = true; if (dispdrv->decon_driver.sfb->power_state == POWER_HIBER_DOWN) { request_dynamic_hotplug(true); } } pm_info("Hotplug delay time is : %ld ms\n", input_time); pm_info("HOTPLUG GATING MODE: %s\n", dispdrv->pm_status.hotplug_gating_on == true? "TRUE":"FALSE"); return; } if (!strcmp(buf, "clk-gate-on")) { dispdrv->pm_status.clock_gating_on = true; } else if (!strcmp(buf, "clk-gate-off")) { dispdrv->pm_status.clock_gating_on = false; } else if (!strcmp(buf, "pwr-gate-on")) { dispdrv->pm_status.power_gating_on = true; } else if (!strcmp(buf, "pwr-gate-off")) { dispdrv->pm_status.power_gating_on = false; } else if (!strcmp(buf, "hotplug-gate-on")) { dispdrv->pm_status.hotplug_gating_on = true; } else if (!strcmp(buf, "hotplug-gate-off")) { request_dynamic_hotplug(false); dispdrv->pm_status.hotplug_gating_on = false; } else { pr_err("INVALID parameter: '%s'\n", buf); } pm_info("CLOCK GATING MODE: %s\n", dispdrv->pm_status.clock_gating_on == true? "TRUE":"FALSE"); pm_info("POWER GATING MODE: %s\n", dispdrv->pm_status.power_gating_on == true? "TRUE":"FALSE"); pm_info("HOTPLUG GATING MODE: %s\n", dispdrv->pm_status.hotplug_gating_on == true? "TRUE":"FALSE"); } int disp_pm_runtime_enable(struct display_driver *dispdrv) { #ifdef DISP_RUNTIME_PM_DEBUG pm_debug("runtime pm for disp-driver enabled\n"); #endif pm_runtime_enable(dispdrv->display_driver); return 0; } int disp_pm_runtime_get_sync(struct display_driver *dispdrv) { if (!dispdrv->pm_status.clock_gating_on) { pm_runtime_get_sync(dispdrv->display_driver); return 0; } init_gating_idle_count(dispdrv); /* guarantee clock and power gating */ flush_kthread_worker(&dispdrv->pm_status.control_clock_gating); flush_kthread_worker(&dispdrv->pm_status.control_power_gating); pm_runtime_get_sync(dispdrv->display_driver); display_block_clock_on(dispdrv); return 0; } int disp_pm_runtime_put_sync(struct display_driver *dispdrv) { if (!dispdrv->pm_status.clock_gating_on) { pm_runtime_put_sync(dispdrv->display_driver); return 0; } flush_kthread_worker(&dispdrv->pm_status.control_clock_gating); pm_runtime_put_sync(dispdrv->display_driver); return 0; } /* disp_pm_te_triggered - check clock gating or not. * this function is called in the TE interrupt handler */ void disp_pm_te_triggered(struct display_driver *dispdrv) { te_count++; if (!dispdrv->pm_status.clock_gating_on) return; spin_lock(&dispdrv->pm_status.slock); if (dispdrv->platform_status > DISP_STATUS_PM0 && atomic_read(&dispdrv->pm_status.lock_count) == 0) { if (dispdrv->pm_status.clock_enabled && MAX_CLK_GATING_COUNT > 0) { ++dispdrv->pm_status.clk_idle_count; if (dispdrv->pm_status.clk_idle_count > MAX_CLK_GATING_COUNT) { disp_pm_gate_lock(dispdrv, true); pm_debug("display_block_clock_off +"); queue_kthread_work(&dispdrv->pm_status.control_clock_gating, &dispdrv->pm_status.control_clock_gating_work); } } else { ++dispdrv->pm_status.pwr_idle_count; if (dispdrv->pm_status.power_gating_on && dispdrv->pm_status.pwr_idle_count > MAX_PWR_GATING_COUNT) { disp_pm_gate_lock(dispdrv, true); queue_kthread_work(&dispdrv->pm_status.control_power_gating, &dispdrv->pm_status.control_power_gating_work); } } } spin_unlock(&dispdrv->pm_status.slock); } /* disp_pm_sched_power_on - it is called in the early start of the * fb_ioctl to exit HDM */ int disp_pm_sched_power_on(struct display_driver *dispdrv, unsigned int cmd) { struct s3c_fb *sfb = dispdrv->decon_driver.sfb; init_gating_idle_count(dispdrv); /* First WIN_CONFIG should be on clock and power-gating */ if (dispdrv->platform_status < DISP_STATUS_PM1) { if (cmd == S3CFB_WIN_CONFIG) disp_pm_set_plat_status(dispdrv, true); } flush_kthread_worker(&dispdrv->pm_status.control_power_gating); if (sfb->power_state == POWER_HIBER_DOWN) { switch (cmd) { case S3CFB_PLATFORM_RESET: disp_pm_gate_lock(dispdrv, true); queue_kthread_work(&dispdrv->pm_status.control_power_gating, &dispdrv->pm_status.control_power_gating_work); /* Prevent next clock and power-gating */ disp_pm_set_plat_status(dispdrv, false); break; case S3CFB_WIN_PSR_EXIT: case S3CFB_WIN_CONFIG: request_dynamic_hotplug(false); disp_pm_gate_lock(dispdrv, true); queue_kthread_work(&dispdrv->pm_status.control_power_gating, &dispdrv->pm_status.control_power_gating_work); break; default: return -EBUSY; } } else { switch (cmd) { case S3CFB_PLATFORM_RESET: /* Prevent next clock and power-gating */ disp_pm_set_plat_status(dispdrv, false); break; } } return 0; } void disp_set_pm_status(int flag) { struct display_driver *dispdrv = get_display_driver(); if ((flag >= DISP_STATUS_PM0) || (flag < DISP_STATUS_PM_MAX)) dispdrv->platform_status = flag; } /* disp_pm_add_refcount - it is called in the early start of the * update_reg_handler */ int disp_pm_add_refcount(struct display_driver *dispdrv) { if (dispdrv->platform_status == DISP_STATUS_PM0) return 0; if (!dispdrv->pm_status.clock_gating_on) return 0; if (dispdrv->decon_driver.sfb->power_state == POWER_DOWN) return 0; init_gating_idle_count(dispdrv); flush_kthread_worker(&dispdrv->pm_status.control_clock_gating); flush_kthread_worker(&dispdrv->pm_status.control_power_gating); if (dispdrv->decon_driver.sfb->power_state == POWER_HIBER_DOWN) { request_dynamic_hotplug(false); display_hibernation_power_on(dispdrv); } display_block_clock_on(dispdrv); return 0; } /* disp_pm_dec_refcount - it is called at the DSI frame done */ int disp_pm_dec_refcount(struct display_driver *dispdrv) { ++frame_done_count; if (!dispdrv->pm_status.clock_gating_on) return 0; return 0; } static void decon_clock_gating_handler(struct kthread_work *work) { struct display_driver *dispdrv = get_display_driver(); if (dispdrv->pm_status.clk_idle_count > MAX_CLK_GATING_COUNT) display_block_clock_off(dispdrv); init_gating_idle_count(dispdrv); disp_pm_gate_lock(dispdrv, false); pm_debug("display_block_clock_off -"); } static void decon_power_gating_handler(struct kthread_work *work) { struct display_driver *dispdrv = get_display_driver(); if (dispdrv->pm_status.pwr_idle_count > MAX_PWR_GATING_COUNT) { if (!check_camera_is_running()) { display_hibernation_power_off(dispdrv); init_gating_idle_count(dispdrv); } } else if (dispdrv->decon_driver.sfb->power_state == POWER_HIBER_DOWN) { display_hibernation_power_on(dispdrv); } disp_pm_gate_lock(dispdrv, false); } static int __display_hibernation_power_on(struct display_driver *dispdrv) { /* already clocks are on */ /* DSIM -> MIC -> DECON */ call_pm_ops(dispdrv, dsi_driver, pwr_on, dispdrv); #ifdef CONFIG_DECON_MIC call_pm_ops(dispdrv, mic_driver, pwr_on, dispdrv); #endif call_pm_ops(dispdrv, decon_driver, pwr_on, dispdrv); return 0; } static int __display_hibernation_power_off(struct display_driver *dispdrv) { call_block_pm_ops(dispdrv, clk_on, dispdrv); /* DECON -> MIC -> DSIM */ call_pm_ops(dispdrv, decon_driver, pwr_off, dispdrv); #ifdef CONFIG_DECON_MIC call_pm_ops(dispdrv, mic_driver, pwr_off, dispdrv); #endif call_pm_ops(dispdrv, dsi_driver, pwr_off, dispdrv); return 0; } static void __display_block_clock_on(struct display_driver *dispdrv) { /* DSIM -> MIC -> DECON -> SMMU */ call_pm_ops(dispdrv, dsi_driver, clk_on, dispdrv); #ifdef CONFIG_DECON_MIC call_pm_ops(dispdrv, mic_driver, clk_on, dispdrv); #endif call_pm_ops(dispdrv, decon_driver, clk_on, dispdrv); #ifdef CONFIG_ION_EXYNOS if (dispdrv->platform_status > DISP_STATUS_PM0) { if (iovmm_activate(dispdrv->decon_driver.sfb->dev) < 0) pr_err("%s: failed to reactivate vmm\n", __func__); } #endif } static int __display_block_clock_off(struct display_driver *dispdrv) { if (get_display_line_count(dispdrv)) { pm_debug("wait until last frame is totally transferred %d:", get_display_line_count(dispdrv)); return -EBUSY; } /* SMMU -> DECON -> MIC -> DSIM */ #ifdef CONFIG_ION_EXYNOS if (dispdrv->platform_status > DISP_STATUS_PM0) iovmm_deactivate(dispdrv->decon_driver.sfb->dev); #endif call_pm_ops(dispdrv, decon_driver, clk_off, dispdrv); #ifdef CONFIG_DECON_MIC call_pm_ops(dispdrv, mic_driver, clk_off, dispdrv); #endif call_pm_ops(dispdrv, dsi_driver, clk_off, dispdrv); return 0; } static void request_dynamic_hotplug(bool hotplug) { #ifdef CONFIG_EXYNOS5_DYNAMIC_CPU_HOTPLUG struct display_driver *dispdrv = get_display_driver(); if ((dispdrv->pm_status.hotplug_gating_on) && (dispdrv->platform_status == DISP_STATUS_PM1)) force_dynamic_hotplug(hotplug, dispdrv->pm_status.hotplug_delay_msec); #endif } int display_hibernation_power_on(struct display_driver *dispdrv) { int ret = 0; struct s3c_fb *sfb = dispdrv->decon_driver.sfb; pm_info("##### +"); disp_pm_gate_lock(dispdrv, true); mutex_lock(&dispdrv->pm_status.pm_lock); if (sfb->power_state == POWER_ON) { pr_info("%s, DECON are already power on state\n", __func__); goto done; } pm_runtime_get_sync(dispdrv->display_driver); __display_hibernation_power_on(dispdrv); sfb->power_state = POWER_ON; done: mutex_unlock(&dispdrv->pm_status.pm_lock); disp_pm_gate_lock(dispdrv, false); pm_info("##### -\n"); return ret; } int display_hibernation_power_off(struct display_driver *dispdrv) { int ret = 0; struct s3c_fb *sfb = dispdrv->decon_driver.sfb; disp_pm_gate_lock(dispdrv, true); mutex_lock(&dispdrv->pm_status.pm_lock); if (sfb->power_state == POWER_DOWN) { pr_info("%s, DECON are already power off state\n", __func__); goto done; } if (atomic_read(&dispdrv->pm_status.lock_count) > GATE_LOCK_CNT) { pr_info("%s, DECON does not need power-off\n", __func__); goto done; } /* Should be clock on before check a H/W LINECNT */ display_block_clock_on(dispdrv); if (get_display_line_count(dispdrv)) { pm_debug("wait until last frame is totally transferred %d:", get_display_line_count(dispdrv)); goto done; } pm_info("##### +"); sfb->power_state = POWER_HIBER_DOWN; __display_hibernation_power_off(dispdrv); disp_pm_runtime_put_sync(dispdrv); request_dynamic_hotplug(true); pm_info("##### -\n"); done: mutex_unlock(&dispdrv->pm_status.pm_lock); disp_pm_gate_lock(dispdrv, false); return ret; } void display_block_clock_on(struct display_driver *dispdrv) { if (!get_display_power_status()) { pm_info("Requested a pm_runtime_get_sync, but power still off"); pm_runtime_get_sync(dispdrv->display_driver); if (!get_display_power_status()) BUG(); } mutex_lock(&dispdrv->pm_status.clk_lock); if (!dispdrv->pm_status.clock_enabled) { pm_debug("+"); __display_block_clock_on(dispdrv); dispdrv->pm_status.clock_enabled = 1; pm_debug("-"); } mutex_unlock(&dispdrv->pm_status.clk_lock); } void display_block_clock_off(struct display_driver *dispdrv) { mutex_lock(&dispdrv->pm_status.clk_lock); if (dispdrv->pm_status.clock_enabled) { pm_debug("+"); if (__display_block_clock_off(dispdrv) == 0) dispdrv->pm_status.clock_enabled = 0; pm_debug("-"); } mutex_unlock(&dispdrv->pm_status.clk_lock); }
gpl-2.0
Kernel-Saram/ef30s-ics-kernel
sound/soc/s3c24xx/s3c24xx_simtec_tlv320aic23.c
940
3635
/* sound/soc/s3c24xx/s3c24xx_simtec_tlv320aic23.c * * Copyright 2009 Simtec Electronics * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/clk.h> #include <linux/platform_device.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/soc.h> #include <sound/soc-dapm.h> #include <plat/audio-simtec.h> #include "s3c-dma.h" #include "s3c24xx-i2s.h" #include "s3c24xx_simtec.h" #include "../codecs/tlv320aic23.h" /* supported machines: * * Machine Connections AMP * ------- ----------- --- * BAST MIC, HPOUT, LOUT, LIN TPA2001D1 (HPOUTL,R) (gain hardwired) * VR1000 HPOUT, LIN None * VR2000 LIN, LOUT, MIC, HP LM4871 (HPOUTL,R) * DePicture LIN, LOUT, MIC, HP LM4871 (HPOUTL,R) * Anubis LIN, LOUT, MIC, HP TPA2001D1 (HPOUTL,R) */ static const struct snd_soc_dapm_widget dapm_widgets[] = { SND_SOC_DAPM_HP("Headphone Jack", NULL), SND_SOC_DAPM_LINE("Line In", NULL), SND_SOC_DAPM_LINE("Line Out", NULL), SND_SOC_DAPM_MIC("Mic Jack", NULL), }; static const struct snd_soc_dapm_route base_map[] = { { "Headphone Jack", NULL, "LHPOUT"}, { "Headphone Jack", NULL, "RHPOUT"}, { "Line Out", NULL, "LOUT" }, { "Line Out", NULL, "ROUT" }, { "LLINEIN", NULL, "Line In"}, { "RLINEIN", NULL, "Line In"}, { "MICIN", NULL, "Mic Jack"}, }; /** * simtec_tlv320aic23_init - initialise and add controls * @codec; The codec instance to attach to. * * Attach our controls and configure the necessary codec * mappings for our sound card instance. */ static int simtec_tlv320aic23_init(struct snd_soc_codec *codec) { snd_soc_dapm_new_controls(codec, dapm_widgets, ARRAY_SIZE(dapm_widgets)); snd_soc_dapm_add_routes(codec, base_map, ARRAY_SIZE(base_map)); snd_soc_dapm_enable_pin(codec, "Headphone Jack"); snd_soc_dapm_enable_pin(codec, "Line In"); snd_soc_dapm_enable_pin(codec, "Line Out"); snd_soc_dapm_enable_pin(codec, "Mic Jack"); simtec_audio_init(codec); snd_soc_dapm_sync(codec); return 0; } static struct snd_soc_dai_link simtec_dai_aic23 = { .name = "tlv320aic23", .stream_name = "TLV320AIC23", .cpu_dai = &s3c24xx_i2s_dai, .codec_dai = &tlv320aic23_dai, .init = simtec_tlv320aic23_init, }; /* simtec audio machine driver */ static struct snd_soc_card snd_soc_machine_simtec_aic23 = { .name = "Simtec", .platform = &s3c24xx_soc_platform, .dai_link = &simtec_dai_aic23, .num_links = 1, }; /* simtec audio subsystem */ static struct snd_soc_device simtec_snd_devdata_aic23 = { .card = &snd_soc_machine_simtec_aic23, .codec_dev = &soc_codec_dev_tlv320aic23, }; static int __devinit simtec_audio_tlv320aic23_probe(struct platform_device *pd) { return simtec_audio_core_probe(pd, &simtec_snd_devdata_aic23); } static struct platform_driver simtec_audio_tlv320aic23_platdrv = { .driver = { .owner = THIS_MODULE, .name = "s3c24xx-simtec-tlv320aic23", .pm = simtec_audio_pm, }, .probe = simtec_audio_tlv320aic23_probe, .remove = __devexit_p(simtec_audio_remove), }; MODULE_ALIAS("platform:s3c24xx-simtec-tlv320aic23"); static int __init simtec_tlv320aic23_modinit(void) { return platform_driver_register(&simtec_audio_tlv320aic23_platdrv); } static void __exit simtec_tlv320aic23_modexit(void) { platform_driver_unregister(&simtec_audio_tlv320aic23_platdrv); } module_init(simtec_tlv320aic23_modinit); module_exit(simtec_tlv320aic23_modexit); MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>"); MODULE_DESCRIPTION("ALSA SoC Simtec Audio support"); MODULE_LICENSE("GPL");
gpl-2.0
linusw/linux-bfq
net/ipv4/netfilter/nf_log_arp.c
940
3829
/* * (C) 2014 by Pablo Neira Ayuso <pablo@netfilter.org> * * Based on code from ebt_log from: * * Bart De Schuymer <bdschuym@pandora.be> * Harald Welte <laforge@netfilter.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/module.h> #include <linux/spinlock.h> #include <linux/skbuff.h> #include <linux/if_arp.h> #include <linux/ip.h> #include <net/route.h> #include <linux/netfilter.h> #include <linux/netfilter/xt_LOG.h> #include <net/netfilter/nf_log.h> static struct nf_loginfo default_loginfo = { .type = NF_LOG_TYPE_LOG, .u = { .log = { .level = LOGLEVEL_NOTICE, .logflags = NF_LOG_MASK, }, }, }; struct arppayload { unsigned char mac_src[ETH_ALEN]; unsigned char ip_src[4]; unsigned char mac_dst[ETH_ALEN]; unsigned char ip_dst[4]; }; static void dump_arp_packet(struct nf_log_buf *m, const struct nf_loginfo *info, const struct sk_buff *skb, unsigned int nhoff) { const struct arphdr *ah; struct arphdr _arph; const struct arppayload *ap; struct arppayload _arpp; ah = skb_header_pointer(skb, 0, sizeof(_arph), &_arph); if (ah == NULL) { nf_log_buf_add(m, "TRUNCATED"); return; } nf_log_buf_add(m, "ARP HTYPE=%d PTYPE=0x%04x OPCODE=%d", ntohs(ah->ar_hrd), ntohs(ah->ar_pro), ntohs(ah->ar_op)); /* If it's for Ethernet and the lengths are OK, then log the ARP * payload. */ if (ah->ar_hrd != htons(1) || ah->ar_hln != ETH_ALEN || ah->ar_pln != sizeof(__be32)) return; ap = skb_header_pointer(skb, sizeof(_arph), sizeof(_arpp), &_arpp); if (ap == NULL) { nf_log_buf_add(m, " INCOMPLETE [%Zu bytes]", skb->len - sizeof(_arph)); return; } nf_log_buf_add(m, " MACSRC=%pM IPSRC=%pI4 MACDST=%pM IPDST=%pI4", ap->mac_src, ap->ip_src, ap->mac_dst, ap->ip_dst); } static void nf_log_arp_packet(struct net *net, u_int8_t pf, unsigned int hooknum, const struct sk_buff *skb, const struct net_device *in, const struct net_device *out, const struct nf_loginfo *loginfo, const char *prefix) { struct nf_log_buf *m; /* FIXME: Disabled from containers until syslog ns is supported */ if (!net_eq(net, &init_net)) return; m = nf_log_buf_open(); if (!loginfo) loginfo = &default_loginfo; nf_log_dump_packet_common(m, pf, hooknum, skb, in, out, loginfo, prefix); dump_arp_packet(m, loginfo, skb, 0); nf_log_buf_close(m); } static struct nf_logger nf_arp_logger __read_mostly = { .name = "nf_log_arp", .type = NF_LOG_TYPE_LOG, .logfn = nf_log_arp_packet, .me = THIS_MODULE, }; static int __net_init nf_log_arp_net_init(struct net *net) { nf_log_set(net, NFPROTO_ARP, &nf_arp_logger); return 0; } static void __net_exit nf_log_arp_net_exit(struct net *net) { nf_log_unset(net, &nf_arp_logger); } static struct pernet_operations nf_log_arp_net_ops = { .init = nf_log_arp_net_init, .exit = nf_log_arp_net_exit, }; static int __init nf_log_arp_init(void) { int ret; ret = register_pernet_subsys(&nf_log_arp_net_ops); if (ret < 0) return ret; ret = nf_log_register(NFPROTO_ARP, &nf_arp_logger); if (ret < 0) { pr_err("failed to register logger\n"); goto err1; } return 0; err1: unregister_pernet_subsys(&nf_log_arp_net_ops); return ret; } static void __exit nf_log_arp_exit(void) { unregister_pernet_subsys(&nf_log_arp_net_ops); nf_log_unregister(&nf_arp_logger); } module_init(nf_log_arp_init); module_exit(nf_log_arp_exit); MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>"); MODULE_DESCRIPTION("Netfilter ARP packet logging"); MODULE_LICENSE("GPL"); MODULE_ALIAS_NF_LOGGER(3, 0);
gpl-2.0
MSM8916-Samsung/android_kernel_samsung_e53g
arch/arm/mach-ep93xx/core.c
940
27194
/* * arch/arm/mach-ep93xx/core.c * Core routines for Cirrus EP93xx chips. * * Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org> * Copyright (C) 2007 Herbert Valerio Riedel <hvr@gnu.org> * * Thanks go to Michael Burian and Ray Lehtiniemi for their key * role in the ep93xx linux community. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or (at * your option) any later version. */ #define pr_fmt(fmt) "ep93xx " KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/interrupt.h> #include <linux/dma-mapping.h> #include <linux/timex.h> #include <linux/irq.h> #include <linux/io.h> #include <linux/gpio.h> #include <linux/leds.h> #include <linux/termios.h> #include <linux/amba/bus.h> #include <linux/amba/serial.h> #include <linux/mtd/physmap.h> #include <linux/i2c.h> #include <linux/i2c-gpio.h> #include <linux/spi/spi.h> #include <linux/export.h> #include <linux/irqchip/arm-vic.h> #include <linux/reboot.h> #include <mach/hardware.h> #include <linux/platform_data/video-ep93xx.h> #include <linux/platform_data/keypad-ep93xx.h> #include <linux/platform_data/spi-ep93xx.h> #include <mach/gpio-ep93xx.h> #include <asm/mach/map.h> #include <asm/mach/time.h> #include "soc.h" /************************************************************************* * Static I/O mappings that are needed for all EP93xx platforms *************************************************************************/ static struct map_desc ep93xx_io_desc[] __initdata = { { .virtual = EP93XX_AHB_VIRT_BASE, .pfn = __phys_to_pfn(EP93XX_AHB_PHYS_BASE), .length = EP93XX_AHB_SIZE, .type = MT_DEVICE, }, { .virtual = EP93XX_APB_VIRT_BASE, .pfn = __phys_to_pfn(EP93XX_APB_PHYS_BASE), .length = EP93XX_APB_SIZE, .type = MT_DEVICE, }, }; void __init ep93xx_map_io(void) { iotable_init(ep93xx_io_desc, ARRAY_SIZE(ep93xx_io_desc)); } /************************************************************************* * Timer handling for EP93xx ************************************************************************* * The ep93xx has four internal timers. Timers 1, 2 (both 16 bit) and * 3 (32 bit) count down at 508 kHz, are self-reloading, and can generate * an interrupt on underflow. Timer 4 (40 bit) counts down at 983.04 kHz, * is free-running, and can't generate interrupts. * * The 508 kHz timers are ideal for use for the timer interrupt, as the * most common values of HZ divide 508 kHz nicely. We pick one of the 16 * bit timers (timer 1) since we don't need more than 16 bits of reload * value as long as HZ >= 8. * * The higher clock rate of timer 4 makes it a better choice than the * other timers for use in gettimeoffset(), while the fact that it can't * generate interrupts means we don't have to worry about not being able * to use this timer for something else. We also use timer 4 for keeping * track of lost jiffies. */ #define EP93XX_TIMER_REG(x) (EP93XX_TIMER_BASE + (x)) #define EP93XX_TIMER1_LOAD EP93XX_TIMER_REG(0x00) #define EP93XX_TIMER1_VALUE EP93XX_TIMER_REG(0x04) #define EP93XX_TIMER1_CONTROL EP93XX_TIMER_REG(0x08) #define EP93XX_TIMER123_CONTROL_ENABLE (1 << 7) #define EP93XX_TIMER123_CONTROL_MODE (1 << 6) #define EP93XX_TIMER123_CONTROL_CLKSEL (1 << 3) #define EP93XX_TIMER1_CLEAR EP93XX_TIMER_REG(0x0c) #define EP93XX_TIMER2_LOAD EP93XX_TIMER_REG(0x20) #define EP93XX_TIMER2_VALUE EP93XX_TIMER_REG(0x24) #define EP93XX_TIMER2_CONTROL EP93XX_TIMER_REG(0x28) #define EP93XX_TIMER2_CLEAR EP93XX_TIMER_REG(0x2c) #define EP93XX_TIMER4_VALUE_LOW EP93XX_TIMER_REG(0x60) #define EP93XX_TIMER4_VALUE_HIGH EP93XX_TIMER_REG(0x64) #define EP93XX_TIMER4_VALUE_HIGH_ENABLE (1 << 8) #define EP93XX_TIMER3_LOAD EP93XX_TIMER_REG(0x80) #define EP93XX_TIMER3_VALUE EP93XX_TIMER_REG(0x84) #define EP93XX_TIMER3_CONTROL EP93XX_TIMER_REG(0x88) #define EP93XX_TIMER3_CLEAR EP93XX_TIMER_REG(0x8c) #define EP93XX_TIMER123_CLOCK 508469 #define EP93XX_TIMER4_CLOCK 983040 #define TIMER1_RELOAD ((EP93XX_TIMER123_CLOCK / HZ) - 1) #define TIMER4_TICKS_PER_JIFFY DIV_ROUND_CLOSEST(CLOCK_TICK_RATE, HZ) static unsigned int last_jiffy_time; static irqreturn_t ep93xx_timer_interrupt(int irq, void *dev_id) { /* Writing any value clears the timer interrupt */ __raw_writel(1, EP93XX_TIMER1_CLEAR); /* Recover lost jiffies */ while ((signed long) (__raw_readl(EP93XX_TIMER4_VALUE_LOW) - last_jiffy_time) >= TIMER4_TICKS_PER_JIFFY) { last_jiffy_time += TIMER4_TICKS_PER_JIFFY; timer_tick(); } return IRQ_HANDLED; } static struct irqaction ep93xx_timer_irq = { .name = "ep93xx timer", .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL, .handler = ep93xx_timer_interrupt, }; static u32 ep93xx_gettimeoffset(void) { int offset; offset = __raw_readl(EP93XX_TIMER4_VALUE_LOW) - last_jiffy_time; /* * Timer 4 is based on a 983.04 kHz reference clock, * so dividing by 983040 gives the fraction of a second, * so dividing by 0.983040 converts to uS. * Refactor the calculation to avoid overflow. * Finally, multiply by 1000 to give nS. */ return (offset + (53 * offset / 3072)) * 1000; } void __init ep93xx_timer_init(void) { u32 tmode = EP93XX_TIMER123_CONTROL_MODE | EP93XX_TIMER123_CONTROL_CLKSEL; arch_gettimeoffset = ep93xx_gettimeoffset; /* Enable periodic HZ timer. */ __raw_writel(tmode, EP93XX_TIMER1_CONTROL); __raw_writel(TIMER1_RELOAD, EP93XX_TIMER1_LOAD); __raw_writel(tmode | EP93XX_TIMER123_CONTROL_ENABLE, EP93XX_TIMER1_CONTROL); /* Enable lost jiffy timer. */ __raw_writel(EP93XX_TIMER4_VALUE_HIGH_ENABLE, EP93XX_TIMER4_VALUE_HIGH); setup_irq(IRQ_EP93XX_TIMER1, &ep93xx_timer_irq); } /************************************************************************* * EP93xx IRQ handling *************************************************************************/ void __init ep93xx_init_irq(void) { vic_init(EP93XX_VIC1_BASE, 0, EP93XX_VIC1_VALID_IRQ_MASK, 0); vic_init(EP93XX_VIC2_BASE, 32, EP93XX_VIC2_VALID_IRQ_MASK, 0); } /************************************************************************* * EP93xx System Controller Software Locked register handling *************************************************************************/ /* * syscon_swlock prevents anything else from writing to the syscon * block while a software locked register is being written. */ static DEFINE_SPINLOCK(syscon_swlock); void ep93xx_syscon_swlocked_write(unsigned int val, void __iomem *reg) { unsigned long flags; spin_lock_irqsave(&syscon_swlock, flags); __raw_writel(0xaa, EP93XX_SYSCON_SWLOCK); __raw_writel(val, reg); spin_unlock_irqrestore(&syscon_swlock, flags); } void ep93xx_devcfg_set_clear(unsigned int set_bits, unsigned int clear_bits) { unsigned long flags; unsigned int val; spin_lock_irqsave(&syscon_swlock, flags); val = __raw_readl(EP93XX_SYSCON_DEVCFG); val &= ~clear_bits; val |= set_bits; __raw_writel(0xaa, EP93XX_SYSCON_SWLOCK); __raw_writel(val, EP93XX_SYSCON_DEVCFG); spin_unlock_irqrestore(&syscon_swlock, flags); } /** * ep93xx_chip_revision() - returns the EP93xx chip revision * * See <mach/platform.h> for more information. */ unsigned int ep93xx_chip_revision(void) { unsigned int v; v = __raw_readl(EP93XX_SYSCON_SYSCFG); v &= EP93XX_SYSCON_SYSCFG_REV_MASK; v >>= EP93XX_SYSCON_SYSCFG_REV_SHIFT; return v; } /************************************************************************* * EP93xx GPIO *************************************************************************/ static struct resource ep93xx_gpio_resource[] = { DEFINE_RES_MEM(EP93XX_GPIO_PHYS_BASE, 0xcc), }; static struct platform_device ep93xx_gpio_device = { .name = "gpio-ep93xx", .id = -1, .num_resources = ARRAY_SIZE(ep93xx_gpio_resource), .resource = ep93xx_gpio_resource, }; /************************************************************************* * EP93xx peripheral handling *************************************************************************/ #define EP93XX_UART_MCR_OFFSET (0x0100) static void ep93xx_uart_set_mctrl(struct amba_device *dev, void __iomem *base, unsigned int mctrl) { unsigned int mcr; mcr = 0; if (mctrl & TIOCM_RTS) mcr |= 2; if (mctrl & TIOCM_DTR) mcr |= 1; __raw_writel(mcr, base + EP93XX_UART_MCR_OFFSET); } static struct amba_pl010_data ep93xx_uart_data = { .set_mctrl = ep93xx_uart_set_mctrl, }; static AMBA_APB_DEVICE(uart1, "apb:uart1", 0x00041010, EP93XX_UART1_PHYS_BASE, { IRQ_EP93XX_UART1 }, &ep93xx_uart_data); static AMBA_APB_DEVICE(uart2, "apb:uart2", 0x00041010, EP93XX_UART2_PHYS_BASE, { IRQ_EP93XX_UART2 }, &ep93xx_uart_data); static AMBA_APB_DEVICE(uart3, "apb:uart3", 0x00041010, EP93XX_UART3_PHYS_BASE, { IRQ_EP93XX_UART3 }, &ep93xx_uart_data); static struct resource ep93xx_rtc_resource[] = { DEFINE_RES_MEM(EP93XX_RTC_PHYS_BASE, 0x10c), }; static struct platform_device ep93xx_rtc_device = { .name = "ep93xx-rtc", .id = -1, .num_resources = ARRAY_SIZE(ep93xx_rtc_resource), .resource = ep93xx_rtc_resource, }; static struct resource ep93xx_ohci_resources[] = { DEFINE_RES_MEM(EP93XX_USB_PHYS_BASE, 0x1000), DEFINE_RES_IRQ(IRQ_EP93XX_USB), }; static struct platform_device ep93xx_ohci_device = { .name = "ep93xx-ohci", .id = -1, .dev = { .dma_mask = &ep93xx_ohci_device.dev.coherent_dma_mask, .coherent_dma_mask = DMA_BIT_MASK(32), }, .num_resources = ARRAY_SIZE(ep93xx_ohci_resources), .resource = ep93xx_ohci_resources, }; /************************************************************************* * EP93xx physmap'ed flash *************************************************************************/ static struct physmap_flash_data ep93xx_flash_data; static struct resource ep93xx_flash_resource = { .flags = IORESOURCE_MEM, }; static struct platform_device ep93xx_flash = { .name = "physmap-flash", .id = 0, .dev = { .platform_data = &ep93xx_flash_data, }, .num_resources = 1, .resource = &ep93xx_flash_resource, }; /** * ep93xx_register_flash() - Register the external flash device. * @width: bank width in octets * @start: resource start address * @size: resource size */ void __init ep93xx_register_flash(unsigned int width, resource_size_t start, resource_size_t size) { ep93xx_flash_data.width = width; ep93xx_flash_resource.start = start; ep93xx_flash_resource.end = start + size - 1; platform_device_register(&ep93xx_flash); } /************************************************************************* * EP93xx ethernet peripheral handling *************************************************************************/ static struct ep93xx_eth_data ep93xx_eth_data; static struct resource ep93xx_eth_resource[] = { DEFINE_RES_MEM(EP93XX_ETHERNET_PHYS_BASE, 0x10000), DEFINE_RES_IRQ(IRQ_EP93XX_ETHERNET), }; static u64 ep93xx_eth_dma_mask = DMA_BIT_MASK(32); static struct platform_device ep93xx_eth_device = { .name = "ep93xx-eth", .id = -1, .dev = { .platform_data = &ep93xx_eth_data, .coherent_dma_mask = DMA_BIT_MASK(32), .dma_mask = &ep93xx_eth_dma_mask, }, .num_resources = ARRAY_SIZE(ep93xx_eth_resource), .resource = ep93xx_eth_resource, }; /** * ep93xx_register_eth - Register the built-in ethernet platform device. * @data: platform specific ethernet configuration (__initdata) * @copy_addr: flag indicating that the MAC address should be copied * from the IndAd registers (as programmed by the bootloader) */ void __init ep93xx_register_eth(struct ep93xx_eth_data *data, int copy_addr) { if (copy_addr) memcpy_fromio(data->dev_addr, EP93XX_ETHERNET_BASE + 0x50, 6); ep93xx_eth_data = *data; platform_device_register(&ep93xx_eth_device); } /************************************************************************* * EP93xx i2c peripheral handling *************************************************************************/ static struct i2c_gpio_platform_data ep93xx_i2c_data; static struct platform_device ep93xx_i2c_device = { .name = "i2c-gpio", .id = 0, .dev = { .platform_data = &ep93xx_i2c_data, }, }; /** * ep93xx_register_i2c - Register the i2c platform device. * @data: platform specific i2c-gpio configuration (__initdata) * @devices: platform specific i2c bus device information (__initdata) * @num: the number of devices on the i2c bus */ void __init ep93xx_register_i2c(struct i2c_gpio_platform_data *data, struct i2c_board_info *devices, int num) { /* * Set the EEPROM interface pin drive type control. * Defines the driver type for the EECLK and EEDAT pins as either * open drain, which will require an external pull-up, or a normal * CMOS driver. */ if (data->sda_is_open_drain && data->sda_pin != EP93XX_GPIO_LINE_EEDAT) pr_warning("sda != EEDAT, open drain has no effect\n"); if (data->scl_is_open_drain && data->scl_pin != EP93XX_GPIO_LINE_EECLK) pr_warning("scl != EECLK, open drain has no effect\n"); __raw_writel((data->sda_is_open_drain << 1) | (data->scl_is_open_drain << 0), EP93XX_GPIO_EEDRIVE); ep93xx_i2c_data = *data; i2c_register_board_info(0, devices, num); platform_device_register(&ep93xx_i2c_device); } /************************************************************************* * EP93xx SPI peripheral handling *************************************************************************/ static struct ep93xx_spi_info ep93xx_spi_master_data; static struct resource ep93xx_spi_resources[] = { DEFINE_RES_MEM(EP93XX_SPI_PHYS_BASE, 0x18), DEFINE_RES_IRQ(IRQ_EP93XX_SSP), }; static u64 ep93xx_spi_dma_mask = DMA_BIT_MASK(32); static struct platform_device ep93xx_spi_device = { .name = "ep93xx-spi", .id = 0, .dev = { .platform_data = &ep93xx_spi_master_data, .coherent_dma_mask = DMA_BIT_MASK(32), .dma_mask = &ep93xx_spi_dma_mask, }, .num_resources = ARRAY_SIZE(ep93xx_spi_resources), .resource = ep93xx_spi_resources, }; /** * ep93xx_register_spi() - registers spi platform device * @info: ep93xx board specific spi master info (__initdata) * @devices: SPI devices to register (__initdata) * @num: number of SPI devices to register * * This function registers platform device for the EP93xx SPI controller and * also makes sure that SPI pins are muxed so that I2S is not using those pins. */ void __init ep93xx_register_spi(struct ep93xx_spi_info *info, struct spi_board_info *devices, int num) { /* * When SPI is used, we need to make sure that I2S is muxed off from * SPI pins. */ ep93xx_devcfg_clear_bits(EP93XX_SYSCON_DEVCFG_I2SONSSP); ep93xx_spi_master_data = *info; spi_register_board_info(devices, num); platform_device_register(&ep93xx_spi_device); } /************************************************************************* * EP93xx LEDs *************************************************************************/ static const struct gpio_led ep93xx_led_pins[] __initconst = { { .name = "platform:grled", .gpio = EP93XX_GPIO_LINE_GRLED, }, { .name = "platform:rdled", .gpio = EP93XX_GPIO_LINE_RDLED, }, }; static const struct gpio_led_platform_data ep93xx_led_data __initconst = { .num_leds = ARRAY_SIZE(ep93xx_led_pins), .leds = ep93xx_led_pins, }; /************************************************************************* * EP93xx pwm peripheral handling *************************************************************************/ static struct resource ep93xx_pwm0_resource[] = { DEFINE_RES_MEM(EP93XX_PWM_PHYS_BASE, 0x10), }; static struct platform_device ep93xx_pwm0_device = { .name = "ep93xx-pwm", .id = 0, .num_resources = ARRAY_SIZE(ep93xx_pwm0_resource), .resource = ep93xx_pwm0_resource, }; static struct resource ep93xx_pwm1_resource[] = { DEFINE_RES_MEM(EP93XX_PWM_PHYS_BASE + 0x20, 0x10), }; static struct platform_device ep93xx_pwm1_device = { .name = "ep93xx-pwm", .id = 1, .num_resources = ARRAY_SIZE(ep93xx_pwm1_resource), .resource = ep93xx_pwm1_resource, }; void __init ep93xx_register_pwm(int pwm0, int pwm1) { if (pwm0) platform_device_register(&ep93xx_pwm0_device); /* NOTE: EP9307 does not have PWMOUT1 (pin EGPIO14) */ if (pwm1) platform_device_register(&ep93xx_pwm1_device); } int ep93xx_pwm_acquire_gpio(struct platform_device *pdev) { int err; if (pdev->id == 0) { err = 0; } else if (pdev->id == 1) { err = gpio_request(EP93XX_GPIO_LINE_EGPIO14, dev_name(&pdev->dev)); if (err) return err; err = gpio_direction_output(EP93XX_GPIO_LINE_EGPIO14, 0); if (err) goto fail; /* PWM 1 output on EGPIO[14] */ ep93xx_devcfg_set_bits(EP93XX_SYSCON_DEVCFG_PONG); } else { err = -ENODEV; } return err; fail: gpio_free(EP93XX_GPIO_LINE_EGPIO14); return err; } EXPORT_SYMBOL(ep93xx_pwm_acquire_gpio); void ep93xx_pwm_release_gpio(struct platform_device *pdev) { if (pdev->id == 1) { gpio_direction_input(EP93XX_GPIO_LINE_EGPIO14); gpio_free(EP93XX_GPIO_LINE_EGPIO14); /* EGPIO[14] used for GPIO */ ep93xx_devcfg_clear_bits(EP93XX_SYSCON_DEVCFG_PONG); } } EXPORT_SYMBOL(ep93xx_pwm_release_gpio); /************************************************************************* * EP93xx video peripheral handling *************************************************************************/ static struct ep93xxfb_mach_info ep93xxfb_data; static struct resource ep93xx_fb_resource[] = { DEFINE_RES_MEM(EP93XX_RASTER_PHYS_BASE, 0x800), }; static struct platform_device ep93xx_fb_device = { .name = "ep93xx-fb", .id = -1, .dev = { .platform_data = &ep93xxfb_data, .coherent_dma_mask = DMA_BIT_MASK(32), .dma_mask = &ep93xx_fb_device.dev.coherent_dma_mask, }, .num_resources = ARRAY_SIZE(ep93xx_fb_resource), .resource = ep93xx_fb_resource, }; /* The backlight use a single register in the framebuffer's register space */ #define EP93XX_RASTER_REG_BRIGHTNESS 0x20 static struct resource ep93xx_bl_resources[] = { DEFINE_RES_MEM(EP93XX_RASTER_PHYS_BASE + EP93XX_RASTER_REG_BRIGHTNESS, 0x04), }; static struct platform_device ep93xx_bl_device = { .name = "ep93xx-bl", .id = -1, .num_resources = ARRAY_SIZE(ep93xx_bl_resources), .resource = ep93xx_bl_resources, }; /** * ep93xx_register_fb - Register the framebuffer platform device. * @data: platform specific framebuffer configuration (__initdata) */ void __init ep93xx_register_fb(struct ep93xxfb_mach_info *data) { ep93xxfb_data = *data; platform_device_register(&ep93xx_fb_device); platform_device_register(&ep93xx_bl_device); } /************************************************************************* * EP93xx matrix keypad peripheral handling *************************************************************************/ static struct ep93xx_keypad_platform_data ep93xx_keypad_data; static struct resource ep93xx_keypad_resource[] = { DEFINE_RES_MEM(EP93XX_KEY_MATRIX_PHYS_BASE, 0x0c), DEFINE_RES_IRQ(IRQ_EP93XX_KEY), }; static struct platform_device ep93xx_keypad_device = { .name = "ep93xx-keypad", .id = -1, .dev = { .platform_data = &ep93xx_keypad_data, }, .num_resources = ARRAY_SIZE(ep93xx_keypad_resource), .resource = ep93xx_keypad_resource, }; /** * ep93xx_register_keypad - Register the keypad platform device. * @data: platform specific keypad configuration (__initdata) */ void __init ep93xx_register_keypad(struct ep93xx_keypad_platform_data *data) { ep93xx_keypad_data = *data; platform_device_register(&ep93xx_keypad_device); } int ep93xx_keypad_acquire_gpio(struct platform_device *pdev) { int err; int i; for (i = 0; i < 8; i++) { err = gpio_request(EP93XX_GPIO_LINE_C(i), dev_name(&pdev->dev)); if (err) goto fail_gpio_c; err = gpio_request(EP93XX_GPIO_LINE_D(i), dev_name(&pdev->dev)); if (err) goto fail_gpio_d; } /* Enable the keypad controller; GPIO ports C and D used for keypad */ ep93xx_devcfg_clear_bits(EP93XX_SYSCON_DEVCFG_KEYS | EP93XX_SYSCON_DEVCFG_GONK); return 0; fail_gpio_d: gpio_free(EP93XX_GPIO_LINE_C(i)); fail_gpio_c: for (--i; i >= 0; --i) { gpio_free(EP93XX_GPIO_LINE_C(i)); gpio_free(EP93XX_GPIO_LINE_D(i)); } return err; } EXPORT_SYMBOL(ep93xx_keypad_acquire_gpio); void ep93xx_keypad_release_gpio(struct platform_device *pdev) { int i; for (i = 0; i < 8; i++) { gpio_free(EP93XX_GPIO_LINE_C(i)); gpio_free(EP93XX_GPIO_LINE_D(i)); } /* Disable the keypad controller; GPIO ports C and D used for GPIO */ ep93xx_devcfg_set_bits(EP93XX_SYSCON_DEVCFG_KEYS | EP93XX_SYSCON_DEVCFG_GONK); } EXPORT_SYMBOL(ep93xx_keypad_release_gpio); /************************************************************************* * EP93xx I2S audio peripheral handling *************************************************************************/ static struct resource ep93xx_i2s_resource[] = { DEFINE_RES_MEM(EP93XX_I2S_PHYS_BASE, 0x100), }; static struct platform_device ep93xx_i2s_device = { .name = "ep93xx-i2s", .id = -1, .num_resources = ARRAY_SIZE(ep93xx_i2s_resource), .resource = ep93xx_i2s_resource, }; static struct platform_device ep93xx_pcm_device = { .name = "ep93xx-pcm-audio", .id = -1, }; void __init ep93xx_register_i2s(void) { platform_device_register(&ep93xx_i2s_device); platform_device_register(&ep93xx_pcm_device); } #define EP93XX_SYSCON_DEVCFG_I2S_MASK (EP93XX_SYSCON_DEVCFG_I2SONSSP | \ EP93XX_SYSCON_DEVCFG_I2SONAC97) #define EP93XX_I2SCLKDIV_MASK (EP93XX_SYSCON_I2SCLKDIV_ORIDE | \ EP93XX_SYSCON_I2SCLKDIV_SPOL) int ep93xx_i2s_acquire(void) { unsigned val; ep93xx_devcfg_set_clear(EP93XX_SYSCON_DEVCFG_I2SONAC97, EP93XX_SYSCON_DEVCFG_I2S_MASK); /* * This is potentially racy with the clock api for i2s_mclk, sclk and * lrclk. Since the i2s driver is the only user of those clocks we * rely on it to prevent parallel use of this function and the * clock api for the i2s clocks. */ val = __raw_readl(EP93XX_SYSCON_I2SCLKDIV); val &= ~EP93XX_I2SCLKDIV_MASK; val |= EP93XX_SYSCON_I2SCLKDIV_ORIDE | EP93XX_SYSCON_I2SCLKDIV_SPOL; ep93xx_syscon_swlocked_write(val, EP93XX_SYSCON_I2SCLKDIV); return 0; } EXPORT_SYMBOL(ep93xx_i2s_acquire); void ep93xx_i2s_release(void) { ep93xx_devcfg_clear_bits(EP93XX_SYSCON_DEVCFG_I2S_MASK); } EXPORT_SYMBOL(ep93xx_i2s_release); /************************************************************************* * EP93xx AC97 audio peripheral handling *************************************************************************/ static struct resource ep93xx_ac97_resources[] = { DEFINE_RES_MEM(EP93XX_AAC_PHYS_BASE, 0xac), DEFINE_RES_IRQ(IRQ_EP93XX_AACINTR), }; static struct platform_device ep93xx_ac97_device = { .name = "ep93xx-ac97", .id = -1, .num_resources = ARRAY_SIZE(ep93xx_ac97_resources), .resource = ep93xx_ac97_resources, }; void __init ep93xx_register_ac97(void) { /* * Make sure that the AC97 pins are not used by I2S. */ ep93xx_devcfg_clear_bits(EP93XX_SYSCON_DEVCFG_I2SONAC97); platform_device_register(&ep93xx_ac97_device); platform_device_register(&ep93xx_pcm_device); } /************************************************************************* * EP93xx Watchdog *************************************************************************/ static struct resource ep93xx_wdt_resources[] = { DEFINE_RES_MEM(EP93XX_WATCHDOG_PHYS_BASE, 0x08), }; static struct platform_device ep93xx_wdt_device = { .name = "ep93xx-wdt", .id = -1, .num_resources = ARRAY_SIZE(ep93xx_wdt_resources), .resource = ep93xx_wdt_resources, }; /************************************************************************* * EP93xx IDE *************************************************************************/ static struct resource ep93xx_ide_resources[] = { DEFINE_RES_MEM(EP93XX_IDE_PHYS_BASE, 0x38), DEFINE_RES_IRQ(IRQ_EP93XX_EXT3), }; static struct platform_device ep93xx_ide_device = { .name = "ep93xx-ide", .id = -1, .dev = { .dma_mask = &ep93xx_ide_device.dev.coherent_dma_mask, .coherent_dma_mask = DMA_BIT_MASK(32), }, .num_resources = ARRAY_SIZE(ep93xx_ide_resources), .resource = ep93xx_ide_resources, }; void __init ep93xx_register_ide(void) { platform_device_register(&ep93xx_ide_device); } int ep93xx_ide_acquire_gpio(struct platform_device *pdev) { int err; int i; err = gpio_request(EP93XX_GPIO_LINE_EGPIO2, dev_name(&pdev->dev)); if (err) return err; err = gpio_request(EP93XX_GPIO_LINE_EGPIO15, dev_name(&pdev->dev)); if (err) goto fail_egpio15; for (i = 2; i < 8; i++) { err = gpio_request(EP93XX_GPIO_LINE_E(i), dev_name(&pdev->dev)); if (err) goto fail_gpio_e; } for (i = 4; i < 8; i++) { err = gpio_request(EP93XX_GPIO_LINE_G(i), dev_name(&pdev->dev)); if (err) goto fail_gpio_g; } for (i = 0; i < 8; i++) { err = gpio_request(EP93XX_GPIO_LINE_H(i), dev_name(&pdev->dev)); if (err) goto fail_gpio_h; } /* GPIO ports E[7:2], G[7:4] and H used by IDE */ ep93xx_devcfg_clear_bits(EP93XX_SYSCON_DEVCFG_EONIDE | EP93XX_SYSCON_DEVCFG_GONIDE | EP93XX_SYSCON_DEVCFG_HONIDE); return 0; fail_gpio_h: for (--i; i >= 0; --i) gpio_free(EP93XX_GPIO_LINE_H(i)); i = 8; fail_gpio_g: for (--i; i >= 4; --i) gpio_free(EP93XX_GPIO_LINE_G(i)); i = 8; fail_gpio_e: for (--i; i >= 2; --i) gpio_free(EP93XX_GPIO_LINE_E(i)); gpio_free(EP93XX_GPIO_LINE_EGPIO15); fail_egpio15: gpio_free(EP93XX_GPIO_LINE_EGPIO2); return err; } EXPORT_SYMBOL(ep93xx_ide_acquire_gpio); void ep93xx_ide_release_gpio(struct platform_device *pdev) { int i; for (i = 2; i < 8; i++) gpio_free(EP93XX_GPIO_LINE_E(i)); for (i = 4; i < 8; i++) gpio_free(EP93XX_GPIO_LINE_G(i)); for (i = 0; i < 8; i++) gpio_free(EP93XX_GPIO_LINE_H(i)); gpio_free(EP93XX_GPIO_LINE_EGPIO15); gpio_free(EP93XX_GPIO_LINE_EGPIO2); /* GPIO ports E[7:2], G[7:4] and H used by GPIO */ ep93xx_devcfg_set_bits(EP93XX_SYSCON_DEVCFG_EONIDE | EP93XX_SYSCON_DEVCFG_GONIDE | EP93XX_SYSCON_DEVCFG_HONIDE); } EXPORT_SYMBOL(ep93xx_ide_release_gpio); void __init ep93xx_init_devices(void) { /* Disallow access to MaverickCrunch initially */ ep93xx_devcfg_clear_bits(EP93XX_SYSCON_DEVCFG_CPENA); /* Default all ports to GPIO */ ep93xx_devcfg_set_bits(EP93XX_SYSCON_DEVCFG_KEYS | EP93XX_SYSCON_DEVCFG_GONK | EP93XX_SYSCON_DEVCFG_EONIDE | EP93XX_SYSCON_DEVCFG_GONIDE | EP93XX_SYSCON_DEVCFG_HONIDE); /* Get the GPIO working early, other devices need it */ platform_device_register(&ep93xx_gpio_device); amba_device_register(&uart1_device, &iomem_resource); amba_device_register(&uart2_device, &iomem_resource); amba_device_register(&uart3_device, &iomem_resource); platform_device_register(&ep93xx_rtc_device); platform_device_register(&ep93xx_ohci_device); platform_device_register(&ep93xx_wdt_device); gpio_led_register_device(-1, &ep93xx_led_data); } void ep93xx_restart(enum reboot_mode mode, const char *cmd) { /* * Set then clear the SWRST bit to initiate a software reset */ ep93xx_devcfg_set_bits(EP93XX_SYSCON_DEVCFG_SWRST); ep93xx_devcfg_clear_bits(EP93XX_SYSCON_DEVCFG_SWRST); while (1) ; } void __init ep93xx_init_late(void) { crunch_init(); }
gpl-2.0
jgcaap/boeffla
arch/xtensa/kernel/signal.c
940
13616
/* * arch/xtensa/kernel/signal.c * * Default platform functions. * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2005, 2006 Tensilica Inc. * Copyright (C) 1991, 1992 Linus Torvalds * 1997-11-28 Modified for POSIX.1b signals by Richard Henderson * * Chris Zankel <chris@zankel.net> * Joe Taylor <joe@tensilica.com> */ #include <linux/signal.h> #include <linux/errno.h> #include <linux/ptrace.h> #include <linux/personality.h> #include <linux/freezer.h> #include <asm/ucontext.h> #include <asm/uaccess.h> #include <asm/cacheflush.h> #include <asm/coprocessor.h> #include <asm/unistd.h> #define DEBUG_SIG 0 #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) asmlinkage int do_signal(struct pt_regs *regs, sigset_t *oldset); extern struct task_struct *coproc_owners[]; struct rt_sigframe { struct siginfo info; struct ucontext uc; struct { xtregs_opt_t opt; xtregs_user_t user; #if XTENSA_HAVE_COPROCESSORS xtregs_coprocessor_t cp; #endif } xtregs; unsigned char retcode[6]; unsigned int window[4]; }; /* * Flush register windows stored in pt_regs to stack. * Returns 1 for errors. */ int flush_window_regs_user(struct pt_regs *regs) { const unsigned long ws = regs->windowstart; const unsigned long wb = regs->windowbase; unsigned long sp = 0; unsigned long wm; int err = 1; int base; /* Return if no other frames. */ if (regs->wmask == 1) return 0; /* Rotate windowmask and skip empty frames. */ wm = (ws >> wb) | (ws << (XCHAL_NUM_AREGS / 4 - wb)); base = (XCHAL_NUM_AREGS / 4) - (regs->wmask >> 4); /* For call8 or call12 frames, we need the previous stack pointer. */ if ((regs->wmask & 2) == 0) if (__get_user(sp, (int*)(regs->areg[base * 4 + 1] - 12))) goto errout; /* Spill frames to stack. */ while (base < XCHAL_NUM_AREGS / 4) { int m = (wm >> base); int inc = 0; /* Save registers a4..a7 (call8) or a4...a11 (call12) */ if (m & 2) { /* call4 */ inc = 1; } else if (m & 4) { /* call8 */ if (copy_to_user((void*)(sp - 32), &regs->areg[(base + 1) * 4], 16)) goto errout; inc = 2; } else if (m & 8) { /* call12 */ if (copy_to_user((void*)(sp - 48), &regs->areg[(base + 1) * 4], 32)) goto errout; inc = 3; } /* Save current frame a0..a3 under next SP */ sp = regs->areg[((base + inc) * 4 + 1) % XCHAL_NUM_AREGS]; if (copy_to_user((void*)(sp - 16), &regs->areg[base * 4], 16)) goto errout; /* Get current stack pointer for next loop iteration. */ sp = regs->areg[base * 4 + 1]; base += inc; } regs->wmask = 1; regs->windowstart = 1 << wb; return 0; errout: return err; } /* * Note: We don't copy double exception 'regs', we have to finish double exc. * first before we return to signal handler! This dbl.exc.handler might cause * another double exception, but I think we are fine as the situation is the * same as if we had returned to the signal handerl and got an interrupt * immediately... */ static int setup_sigcontext(struct rt_sigframe __user *frame, struct pt_regs *regs) { struct sigcontext __user *sc = &frame->uc.uc_mcontext; struct thread_info *ti = current_thread_info(); int err = 0; #define COPY(x) err |= __put_user(regs->x, &sc->sc_##x) COPY(pc); COPY(ps); COPY(lbeg); COPY(lend); COPY(lcount); COPY(sar); #undef COPY err |= flush_window_regs_user(regs); err |= __copy_to_user (sc->sc_a, regs->areg, 16 * 4); err |= __put_user(0, &sc->sc_xtregs); if (err) return err; #if XTENSA_HAVE_COPROCESSORS coprocessor_flush_all(ti); coprocessor_release_all(ti); err |= __copy_to_user(&frame->xtregs.cp, &ti->xtregs_cp, sizeof (frame->xtregs.cp)); #endif err |= __copy_to_user(&frame->xtregs.opt, &regs->xtregs_opt, sizeof (xtregs_opt_t)); err |= __copy_to_user(&frame->xtregs.user, &ti->xtregs_user, sizeof (xtregs_user_t)); err |= __put_user(err ? NULL : &frame->xtregs, &sc->sc_xtregs); return err; } static int restore_sigcontext(struct pt_regs *regs, struct rt_sigframe __user *frame) { struct sigcontext __user *sc = &frame->uc.uc_mcontext; struct thread_info *ti = current_thread_info(); unsigned int err = 0; unsigned long ps; #define COPY(x) err |= __get_user(regs->x, &sc->sc_##x) COPY(pc); COPY(lbeg); COPY(lend); COPY(lcount); COPY(sar); #undef COPY /* All registers were flushed to stack. Start with a prestine frame. */ regs->wmask = 1; regs->windowbase = 0; regs->windowstart = 1; regs->syscall = -1; /* disable syscall checks */ /* For PS, restore only PS.CALLINC. * Assume that all other bits are either the same as for the signal * handler, or the user mode value doesn't matter (e.g. PS.OWB). */ err |= __get_user(ps, &sc->sc_ps); regs->ps = (regs->ps & ~PS_CALLINC_MASK) | (ps & PS_CALLINC_MASK); /* Additional corruption checks */ if ((regs->lcount > 0) && ((regs->lbeg > TASK_SIZE) || (regs->lend > TASK_SIZE)) ) err = 1; err |= __copy_from_user(regs->areg, sc->sc_a, 16 * 4); if (err) return err; /* The signal handler may have used coprocessors in which * case they are still enabled. We disable them to force a * reloading of the original task's CP state by the lazy * context-switching mechanisms of CP exception handling. * Also, we essentially discard any coprocessor state that the * signal handler created. */ #if XTENSA_HAVE_COPROCESSORS coprocessor_release_all(ti); err |= __copy_from_user(&ti->xtregs_cp, &frame->xtregs.cp, sizeof (frame->xtregs.cp)); #endif err |= __copy_from_user(&ti->xtregs_user, &frame->xtregs.user, sizeof (xtregs_user_t)); err |= __copy_from_user(&regs->xtregs_opt, &frame->xtregs.opt, sizeof (xtregs_opt_t)); return err; } /* * Do a signal return; undo the signal stack. */ asmlinkage long xtensa_rt_sigreturn(long a0, long a1, long a2, long a3, long a4, long a5, struct pt_regs *regs) { struct rt_sigframe __user *frame; sigset_t set; int ret; if (regs->depc > 64) panic("rt_sigreturn in double exception!\n"); frame = (struct rt_sigframe __user *) regs->areg[1]; if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) goto badframe; if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) goto badframe; sigdelsetmask(&set, ~_BLOCKABLE); set_current_blocked(&set); if (restore_sigcontext(regs, frame)) goto badframe; ret = regs->areg[2]; if (do_sigaltstack(&frame->uc.uc_stack, NULL, regs->areg[1]) == -EFAULT) goto badframe; return ret; badframe: force_sig(SIGSEGV, current); return 0; } /* * Set up a signal frame. */ static int gen_return_code(unsigned char *codemem) { int err = 0; /* * The 12-bit immediate is really split up within the 24-bit MOVI * instruction. As long as the above system call numbers fit within * 8-bits, the following code works fine. See the Xtensa ISA for * details. */ #if __NR_rt_sigreturn > 255 # error Generating the MOVI instruction below breaks! #endif #ifdef __XTENSA_EB__ /* Big Endian version */ /* Generate instruction: MOVI a2, __NR_rt_sigreturn */ err |= __put_user(0x22, &codemem[0]); err |= __put_user(0x0a, &codemem[1]); err |= __put_user(__NR_rt_sigreturn, &codemem[2]); /* Generate instruction: SYSCALL */ err |= __put_user(0x00, &codemem[3]); err |= __put_user(0x05, &codemem[4]); err |= __put_user(0x00, &codemem[5]); #elif defined __XTENSA_EL__ /* Little Endian version */ /* Generate instruction: MOVI a2, __NR_rt_sigreturn */ err |= __put_user(0x22, &codemem[0]); err |= __put_user(0xa0, &codemem[1]); err |= __put_user(__NR_rt_sigreturn, &codemem[2]); /* Generate instruction: SYSCALL */ err |= __put_user(0x00, &codemem[3]); err |= __put_user(0x50, &codemem[4]); err |= __put_user(0x00, &codemem[5]); #else # error Must use compiler for Xtensa processors. #endif /* Flush generated code out of the data cache */ if (err == 0) { __invalidate_icache_range((unsigned long)codemem, 6UL); __flush_invalidate_dcache_range((unsigned long)codemem, 6UL); } return err; } static int setup_frame(int sig, struct k_sigaction *ka, siginfo_t *info, sigset_t *set, struct pt_regs *regs) { struct rt_sigframe *frame; int err = 0; int signal; unsigned long sp, ra; sp = regs->areg[1]; if ((ka->sa.sa_flags & SA_ONSTACK) != 0 && sas_ss_flags(sp) == 0) { sp = current->sas_ss_sp + current->sas_ss_size; } frame = (void *)((sp - sizeof(*frame)) & -16ul); if (regs->depc > 64) panic ("Double exception sys_sigreturn\n"); if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) { goto give_sigsegv; } signal = current_thread_info()->exec_domain && current_thread_info()->exec_domain->signal_invmap && sig < 32 ? current_thread_info()->exec_domain->signal_invmap[sig] : sig; if (ka->sa.sa_flags & SA_SIGINFO) { err |= copy_siginfo_to_user(&frame->info, info); } /* Create the user context. */ err |= __put_user(0, &frame->uc.uc_flags); err |= __put_user(0, &frame->uc.uc_link); err |= __put_user((void *)current->sas_ss_sp, &frame->uc.uc_stack.ss_sp); err |= __put_user(sas_ss_flags(regs->areg[1]), &frame->uc.uc_stack.ss_flags); err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size); err |= setup_sigcontext(frame, regs); err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); if (ka->sa.sa_flags & SA_RESTORER) { ra = (unsigned long)ka->sa.sa_restorer; } else { /* Create sys_rt_sigreturn syscall in stack frame */ err |= gen_return_code(frame->retcode); if (err) { goto give_sigsegv; } ra = (unsigned long) frame->retcode; } /* * Create signal handler execution context. * Return context not modified until this point. */ /* Set up registers for signal handler */ start_thread(regs, (unsigned long) ka->sa.sa_handler, (unsigned long) frame); /* Set up a stack frame for a call4 * Note: PS.CALLINC is set to one by start_thread */ regs->areg[4] = (((unsigned long) ra) & 0x3fffffff) | 0x40000000; regs->areg[6] = (unsigned long) signal; regs->areg[7] = (unsigned long) &frame->info; regs->areg[8] = (unsigned long) &frame->uc; /* Set access mode to USER_DS. Nomenclature is outdated, but * functionality is used in uaccess.h */ set_fs(USER_DS); #if DEBUG_SIG printk("SIG rt deliver (%s:%d): signal=%d sp=%p pc=%08x\n", current->comm, current->pid, signal, frame, regs->pc); #endif return 0; give_sigsegv: force_sigsegv(sig, current); return -EFAULT; } /* * Atomically swap in the new signal mask, and wait for a signal. */ asmlinkage long xtensa_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize, long a2, long a3, long a4, long a5, struct pt_regs *regs) { sigset_t saveset, newset; /* XXX: Don't preclude handling different sized sigset_t's. */ if (sigsetsize != sizeof(sigset_t)) return -EINVAL; if (copy_from_user(&newset, unewset, sizeof(newset))) return -EFAULT; sigdelsetmask(&newset, ~_BLOCKABLE); saveset = current->blocked; set_current_blocked(&newset); regs->areg[2] = -EINTR; while (1) { current->state = TASK_INTERRUPTIBLE; schedule(); if (do_signal(regs, &saveset)) return -EINTR; } } asmlinkage long xtensa_sigaltstack(const stack_t __user *uss, stack_t __user *uoss, long a2, long a3, long a4, long a5, struct pt_regs *regs) { return do_sigaltstack(uss, uoss, regs->areg[1]); } /* * Note that 'init' is a special process: it doesn't get signals it doesn't * want to handle. Thus you cannot kill init even with a SIGKILL even by * mistake. * * Note that we go through the signals twice: once to check the signals that * the kernel can handle, and then we build all the user-level signal handling * stack-frames in one go after that. */ int do_signal(struct pt_regs *regs, sigset_t *oldset) { siginfo_t info; int signr; struct k_sigaction ka; if (!user_mode(regs)) return 0; if (try_to_freeze()) goto no_signal; if (!oldset) oldset = &current->blocked; task_pt_regs(current)->icountlevel = 0; signr = get_signal_to_deliver(&info, &ka, regs, NULL); if (signr > 0) { int ret; /* Are we from a system call? */ if ((signed)regs->syscall >= 0) { /* If so, check system call restarting.. */ switch (regs->areg[2]) { case -ERESTARTNOHAND: case -ERESTART_RESTARTBLOCK: regs->areg[2] = -EINTR; break; case -ERESTARTSYS: if (!(ka.sa.sa_flags & SA_RESTART)) { regs->areg[2] = -EINTR; break; } /* fallthrough */ case -ERESTARTNOINTR: regs->areg[2] = regs->syscall; regs->pc -= 3; break; default: /* nothing to do */ if (regs->areg[2] != 0) break; } } /* Whee! Actually deliver the signal. */ /* Set up the stack frame */ ret = setup_frame(signr, &ka, &info, oldset, regs); if (ret) return ret; block_sigmask(&ka, signr); if (current->ptrace & PT_SINGLESTEP) task_pt_regs(current)->icountlevel = 1; return 1; } no_signal: /* Did we come from a system call? */ if ((signed) regs->syscall >= 0) { /* Restart the system call - no handlers present */ switch (regs->areg[2]) { case -ERESTARTNOHAND: case -ERESTARTSYS: case -ERESTARTNOINTR: regs->areg[2] = regs->syscall; regs->pc -= 3; break; case -ERESTART_RESTARTBLOCK: regs->areg[2] = __NR_restart_syscall; regs->pc -= 3; break; } } if (current->ptrace & PT_SINGLESTEP) task_pt_regs(current)->icountlevel = 1; return 0; }
gpl-2.0
HarveyHunt/CI20_linux
drivers/gpu/drm/nouveau/core/subdev/therm/fantog.c
940
3602
/* * Copyright 2012 The Nouveau community * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Martin Peres */ #include "priv.h" #include <core/object.h> #include <core/device.h> #include <subdev/gpio.h> #include <subdev/timer.h> struct nouveau_fantog_priv { struct nouveau_fan base; struct nouveau_alarm alarm; spinlock_t lock; u32 period_us; u32 percent; struct dcb_gpio_func func; }; static void nouveau_fantog_update(struct nouveau_fantog_priv *priv, int percent) { struct nouveau_therm_priv *tpriv = (void *)priv->base.parent; struct nouveau_timer *ptimer = nouveau_timer(tpriv); struct nouveau_gpio *gpio = nouveau_gpio(tpriv); unsigned long flags; int duty; spin_lock_irqsave(&priv->lock, flags); if (percent < 0) percent = priv->percent; priv->percent = percent; duty = !gpio->get(gpio, 0, DCB_GPIO_FAN, 0xff); gpio->set(gpio, 0, DCB_GPIO_FAN, 0xff, duty); if (list_empty(&priv->alarm.head) && percent != (duty * 100)) { u64 next_change = (percent * priv->period_us) / 100; if (!duty) next_change = priv->period_us - next_change; ptimer->alarm(ptimer, next_change * 1000, &priv->alarm); } spin_unlock_irqrestore(&priv->lock, flags); } static void nouveau_fantog_alarm(struct nouveau_alarm *alarm) { struct nouveau_fantog_priv *priv = container_of(alarm, struct nouveau_fantog_priv, alarm); nouveau_fantog_update(priv, -1); } static int nouveau_fantog_get(struct nouveau_therm *therm) { struct nouveau_therm_priv *tpriv = (void *)therm; struct nouveau_fantog_priv *priv = (void *)tpriv->fan; return priv->percent; } static int nouveau_fantog_set(struct nouveau_therm *therm, int percent) { struct nouveau_therm_priv *tpriv = (void *)therm; struct nouveau_fantog_priv *priv = (void *)tpriv->fan; if (therm->pwm_ctrl) therm->pwm_ctrl(therm, priv->func.line, false); nouveau_fantog_update(priv, percent); return 0; } int nouveau_fantog_create(struct nouveau_therm *therm, struct dcb_gpio_func *func) { struct nouveau_therm_priv *tpriv = (void *)therm; struct nouveau_fantog_priv *priv; int ret; if (therm->pwm_ctrl) { ret = therm->pwm_ctrl(therm, func->line, false); if (ret) return ret; } priv = kzalloc(sizeof(*priv), GFP_KERNEL); tpriv->fan = &priv->base; if (!priv) return -ENOMEM; priv->base.type = "toggle"; priv->base.get = nouveau_fantog_get; priv->base.set = nouveau_fantog_set; nouveau_alarm_init(&priv->alarm, nouveau_fantog_alarm); priv->period_us = 100000; /* 10Hz */ priv->percent = 100; priv->func = *func; spin_lock_init(&priv->lock); return 0; }
gpl-2.0
NooNameR/Sense4.0-kernel
kernel/user-return-notifier.c
1196
1352
#include <linux/user-return-notifier.h> #include <linux/percpu.h> #include <linux/sched.h> #include <linux/module.h> static DEFINE_PER_CPU(struct hlist_head, return_notifier_list); /* * Request a notification when the current cpu returns to userspace. Must be * called in atomic context. The notifier will also be called in atomic * context. */ void user_return_notifier_register(struct user_return_notifier *urn) { set_tsk_thread_flag(current, TIF_USER_RETURN_NOTIFY); hlist_add_head(&urn->link, &__get_cpu_var(return_notifier_list)); } EXPORT_SYMBOL_GPL(user_return_notifier_register); /* * Removes a registered user return notifier. Must be called from atomic * context, and from the same cpu registration occured in. */ void user_return_notifier_unregister(struct user_return_notifier *urn) { hlist_del(&urn->link); if (hlist_empty(&__get_cpu_var(return_notifier_list))) clear_tsk_thread_flag(current, TIF_USER_RETURN_NOTIFY); } EXPORT_SYMBOL_GPL(user_return_notifier_unregister); /* Calls registered user return notifiers */ void fire_user_return_notifiers(void) { struct user_return_notifier *urn; struct hlist_node *tmp1, *tmp2; struct hlist_head *head; head = &get_cpu_var(return_notifier_list); hlist_for_each_entry_safe(urn, tmp1, tmp2, head, link) urn->on_user_return(urn); put_cpu_var(return_notifier_list); }
gpl-2.0
Stane1983/android_kernel_xiaomi_dior_DEPRECATED
drivers/usb/gadget/u_bam.c
1452
38984
/* Copyright (c) 2011-2013, Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/kernel.h> #include <linux/interrupt.h> #include <linux/device.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/termios.h> #include <mach/msm_smd.h> #include <linux/netdevice.h> #include <mach/bam_dmux.h> #include <linux/debugfs.h> #include <linux/bitops.h> #include <linux/termios.h> #include <mach/usb_gadget_xport.h> #include <linux/usb/msm_hsusb.h> #include <mach/usb_bam.h> #include "u_rmnet.h" #define BAM_N_PORTS 1 #define BAM2BAM_N_PORTS 3 static struct workqueue_struct *gbam_wq; static int n_bam_ports; static int n_bam2bam_ports; static unsigned n_tx_req_queued; static unsigned bam_ch_ids[] = { 8 }; static const char *bam_ch_names[] = { "bam_dmux_ch_8" }; #define BAM_PENDING_LIMIT 220 #define BAM_MUX_TX_PKT_DROP_THRESHOLD 1000 #define BAM_MUX_RX_PKT_FCTRL_EN_TSHOLD 500 #define BAM_MUX_RX_PKT_FCTRL_DIS_TSHOLD 300 #define BAM_MUX_RX_PKT_FLOW_CTRL_SUPPORT 1 #define BAM_MUX_HDR 8 #define BAM_MUX_RX_Q_SIZE 16 #define BAM_MUX_TX_Q_SIZE 200 #define BAM_MUX_RX_REQ_SIZE 2048 /* Must be 1KB aligned */ #define DL_INTR_THRESHOLD 20 static unsigned int bam_pending_limit = BAM_PENDING_LIMIT; module_param(bam_pending_limit, uint, S_IRUGO | S_IWUSR); static unsigned int bam_mux_tx_pkt_drop_thld = BAM_MUX_TX_PKT_DROP_THRESHOLD; module_param(bam_mux_tx_pkt_drop_thld, uint, S_IRUGO | S_IWUSR); static unsigned int bam_mux_rx_fctrl_en_thld = BAM_MUX_RX_PKT_FCTRL_EN_TSHOLD; module_param(bam_mux_rx_fctrl_en_thld, uint, S_IRUGO | S_IWUSR); static unsigned int bam_mux_rx_fctrl_support = BAM_MUX_RX_PKT_FLOW_CTRL_SUPPORT; module_param(bam_mux_rx_fctrl_support, uint, S_IRUGO | S_IWUSR); static unsigned int bam_mux_rx_fctrl_dis_thld = BAM_MUX_RX_PKT_FCTRL_DIS_TSHOLD; module_param(bam_mux_rx_fctrl_dis_thld, uint, S_IRUGO | S_IWUSR); static unsigned int bam_mux_tx_q_size = BAM_MUX_TX_Q_SIZE; module_param(bam_mux_tx_q_size, uint, S_IRUGO | S_IWUSR); static unsigned int bam_mux_rx_q_size = BAM_MUX_RX_Q_SIZE; module_param(bam_mux_rx_q_size, uint, S_IRUGO | S_IWUSR); static unsigned int bam_mux_rx_req_size = BAM_MUX_RX_REQ_SIZE; module_param(bam_mux_rx_req_size, uint, S_IRUGO | S_IWUSR); static unsigned int dl_intr_threshold = DL_INTR_THRESHOLD; module_param(dl_intr_threshold, uint, S_IRUGO | S_IWUSR); #define BAM_CH_OPENED BIT(0) #define BAM_CH_READY BIT(1) struct bam_ch_info { unsigned long flags; unsigned id; struct list_head tx_idle; struct sk_buff_head tx_skb_q; struct list_head rx_idle; struct sk_buff_head rx_skb_q; struct gbam_port *port; struct work_struct write_tobam_w; struct work_struct write_tohost_w; struct usb_request *rx_req; struct usb_request *tx_req; u32 src_pipe_idx; u32 dst_pipe_idx; u8 src_connection_idx; u8 dst_connection_idx; enum transport_type trans; struct usb_bam_connect_ipa_params ipa_params; /* stats */ unsigned int pending_with_bam; unsigned int tohost_drp_cnt; unsigned int tomodem_drp_cnt; unsigned int tx_len; unsigned int rx_len; unsigned long to_modem; unsigned long to_host; unsigned int rx_flow_control_disable; unsigned int rx_flow_control_enable; unsigned int rx_flow_control_triggered; unsigned int max_num_pkts_pending_with_bam; }; struct gbam_port { unsigned port_num; spinlock_t port_lock_ul; spinlock_t port_lock_dl; struct grmnet *port_usb; struct grmnet *gr; struct bam_ch_info data_ch; struct work_struct connect_w; struct work_struct disconnect_w; struct work_struct suspend_w; struct work_struct resume_w; }; static struct bam_portmaster { struct gbam_port *port; struct platform_driver pdrv; } bam_ports[BAM_N_PORTS]; struct gbam_port *bam2bam_ports[BAM2BAM_N_PORTS]; static void gbam_start_rx(struct gbam_port *port); static void gbam_start_endless_rx(struct gbam_port *port); static void gbam_start_endless_tx(struct gbam_port *port); static int gbam_peer_reset_cb(void *param); /*---------------misc functions---------------- */ static void gbam_free_requests(struct usb_ep *ep, struct list_head *head) { struct usb_request *req; while (!list_empty(head)) { req = list_entry(head->next, struct usb_request, list); list_del(&req->list); usb_ep_free_request(ep, req); } } static int gbam_alloc_requests(struct usb_ep *ep, struct list_head *head, int num, void (*cb)(struct usb_ep *ep, struct usb_request *), gfp_t flags) { int i; struct usb_request *req; pr_debug("%s: ep:%p head:%p num:%d cb:%p", __func__, ep, head, num, cb); for (i = 0; i < num; i++) { req = usb_ep_alloc_request(ep, flags); if (!req) { pr_debug("%s: req allocated:%d\n", __func__, i); return list_empty(head) ? -ENOMEM : 0; } req->complete = cb; list_add(&req->list, head); } return 0; } /*--------------------------------------------- */ /*------------data_path----------------------------*/ static void gbam_write_data_tohost(struct gbam_port *port) { unsigned long flags; struct bam_ch_info *d = &port->data_ch; struct sk_buff *skb; int ret; struct usb_request *req; struct usb_ep *ep; spin_lock_irqsave(&port->port_lock_dl, flags); if (!port->port_usb) { spin_unlock_irqrestore(&port->port_lock_dl, flags); return; } ep = port->port_usb->in; while (!list_empty(&d->tx_idle)) { skb = __skb_dequeue(&d->tx_skb_q); if (!skb) { spin_unlock_irqrestore(&port->port_lock_dl, flags); return; } req = list_first_entry(&d->tx_idle, struct usb_request, list); req->context = skb; req->buf = skb->data; req->length = skb->len; n_tx_req_queued++; if (n_tx_req_queued == dl_intr_threshold) { req->no_interrupt = 0; n_tx_req_queued = 0; } else { req->no_interrupt = 1; } /* Send ZLP in case packet length is multiple of maxpacksize */ req->zero = 1; list_del(&req->list); spin_unlock(&port->port_lock_dl); ret = usb_ep_queue(ep, req, GFP_ATOMIC); spin_lock(&port->port_lock_dl); if (ret) { pr_err("%s: usb epIn failed with %d\n", __func__, ret); list_add(&req->list, &d->tx_idle); dev_kfree_skb_any(skb); break; } d->to_host++; } spin_unlock_irqrestore(&port->port_lock_dl, flags); } static void gbam_write_data_tohost_w(struct work_struct *w) { struct bam_ch_info *d; struct gbam_port *port; d = container_of(w, struct bam_ch_info, write_tohost_w); port = d->port; gbam_write_data_tohost(port); } void gbam_data_recv_cb(void *p, struct sk_buff *skb) { struct gbam_port *port = p; struct bam_ch_info *d = &port->data_ch; unsigned long flags; if (!skb) return; pr_debug("%s: p:%p#%d d:%p skb_len:%d\n", __func__, port, port->port_num, d, skb->len); spin_lock_irqsave(&port->port_lock_dl, flags); if (!port->port_usb) { spin_unlock_irqrestore(&port->port_lock_dl, flags); dev_kfree_skb_any(skb); return; } if (d->tx_skb_q.qlen > bam_mux_tx_pkt_drop_thld) { d->tohost_drp_cnt++; if (printk_ratelimit()) pr_err("%s: tx pkt dropped: tx_drop_cnt:%u\n", __func__, d->tohost_drp_cnt); spin_unlock_irqrestore(&port->port_lock_dl, flags); dev_kfree_skb_any(skb); return; } __skb_queue_tail(&d->tx_skb_q, skb); spin_unlock_irqrestore(&port->port_lock_dl, flags); gbam_write_data_tohost(port); } void gbam_data_write_done(void *p, struct sk_buff *skb) { struct gbam_port *port = p; struct bam_ch_info *d = &port->data_ch; unsigned long flags; if (!skb) return; dev_kfree_skb_any(skb); spin_lock_irqsave(&port->port_lock_ul, flags); d->pending_with_bam--; pr_debug("%s: port:%p d:%p tom:%lu pbam:%u, pno:%d\n", __func__, port, d, d->to_modem, d->pending_with_bam, port->port_num); spin_unlock_irqrestore(&port->port_lock_ul, flags); queue_work(gbam_wq, &d->write_tobam_w); } static void gbam_data_write_tobam(struct work_struct *w) { struct gbam_port *port; struct bam_ch_info *d; struct sk_buff *skb; unsigned long flags; int ret; int qlen; d = container_of(w, struct bam_ch_info, write_tobam_w); port = d->port; spin_lock_irqsave(&port->port_lock_ul, flags); if (!port->port_usb) { spin_unlock_irqrestore(&port->port_lock_ul, flags); return; } while (d->pending_with_bam < bam_pending_limit) { skb = __skb_dequeue(&d->rx_skb_q); if (!skb) break; d->pending_with_bam++; d->to_modem++; pr_debug("%s: port:%p d:%p tom:%lu pbam:%u pno:%d\n", __func__, port, d, d->to_modem, d->pending_with_bam, port->port_num); spin_unlock_irqrestore(&port->port_lock_ul, flags); ret = msm_bam_dmux_write(d->id, skb); spin_lock_irqsave(&port->port_lock_ul, flags); if (ret) { pr_debug("%s: write error:%d\n", __func__, ret); d->pending_with_bam--; d->to_modem--; d->tomodem_drp_cnt++; dev_kfree_skb_any(skb); break; } if (d->pending_with_bam > d->max_num_pkts_pending_with_bam) d->max_num_pkts_pending_with_bam = d->pending_with_bam; } qlen = d->rx_skb_q.qlen; spin_unlock_irqrestore(&port->port_lock_ul, flags); if (qlen < bam_mux_rx_fctrl_dis_thld) { if (d->rx_flow_control_triggered) { d->rx_flow_control_disable++; d->rx_flow_control_triggered = 0; } gbam_start_rx(port); } } /*-------------------------------------------------------------*/ static void gbam_epin_complete(struct usb_ep *ep, struct usb_request *req) { struct gbam_port *port = ep->driver_data; struct bam_ch_info *d; struct sk_buff *skb = req->context; int status = req->status; switch (status) { case 0: /* successful completion */ break; case -ECONNRESET: case -ESHUTDOWN: /* connection gone */ dev_kfree_skb_any(skb); usb_ep_free_request(ep, req); return; default: pr_err("%s: data tx ep error %d\n", __func__, status); break; } dev_kfree_skb_any(skb); if (!port) return; spin_lock(&port->port_lock_dl); d = &port->data_ch; list_add_tail(&req->list, &d->tx_idle); spin_unlock(&port->port_lock_dl); queue_work(gbam_wq, &d->write_tohost_w); } static void gbam_epout_complete(struct usb_ep *ep, struct usb_request *req) { struct gbam_port *port = ep->driver_data; struct bam_ch_info *d = &port->data_ch; struct sk_buff *skb = req->context; int status = req->status; int queue = 0; switch (status) { case 0: skb_put(skb, req->actual); queue = 1; break; case -ECONNRESET: case -ESHUTDOWN: /* cable disconnection */ dev_kfree_skb_any(skb); req->buf = 0; usb_ep_free_request(ep, req); return; default: if (printk_ratelimit()) pr_err("%s: %s response error %d, %d/%d\n", __func__, ep->name, status, req->actual, req->length); dev_kfree_skb_any(skb); break; } spin_lock(&port->port_lock_ul); if (queue) { __skb_queue_tail(&d->rx_skb_q, skb); queue_work(gbam_wq, &d->write_tobam_w); } /* TODO: Handle flow control gracefully by having * having call back mechanism from bam driver */ if (bam_mux_rx_fctrl_support && d->rx_skb_q.qlen >= bam_mux_rx_fctrl_en_thld) { if (!d->rx_flow_control_triggered) { d->rx_flow_control_triggered = 1; d->rx_flow_control_enable++; } list_add_tail(&req->list, &d->rx_idle); spin_unlock(&port->port_lock_ul); return; } spin_unlock(&port->port_lock_ul); skb = alloc_skb(bam_mux_rx_req_size + BAM_MUX_HDR, GFP_ATOMIC); if (!skb) { spin_lock(&port->port_lock_ul); list_add_tail(&req->list, &d->rx_idle); spin_unlock(&port->port_lock_ul); return; } skb_reserve(skb, BAM_MUX_HDR); req->buf = skb->data; req->length = bam_mux_rx_req_size; req->context = skb; status = usb_ep_queue(ep, req, GFP_ATOMIC); if (status) { dev_kfree_skb_any(skb); if (printk_ratelimit()) pr_err("%s: data rx enqueue err %d\n", __func__, status); spin_lock(&port->port_lock_ul); list_add_tail(&req->list, &d->rx_idle); spin_unlock(&port->port_lock_ul); } } static void gbam_endless_rx_complete(struct usb_ep *ep, struct usb_request *req) { int status = req->status; pr_debug("%s status: %d\n", __func__, status); } static void gbam_endless_tx_complete(struct usb_ep *ep, struct usb_request *req) { int status = req->status; pr_debug("%s status: %d\n", __func__, status); } static void gbam_start_rx(struct gbam_port *port) { struct usb_request *req; struct bam_ch_info *d; struct usb_ep *ep; unsigned long flags; int ret; struct sk_buff *skb; spin_lock_irqsave(&port->port_lock_ul, flags); if (!port->port_usb) { spin_unlock_irqrestore(&port->port_lock_ul, flags); return; } d = &port->data_ch; ep = port->port_usb->out; while (port->port_usb && !list_empty(&d->rx_idle)) { if (bam_mux_rx_fctrl_support && d->rx_skb_q.qlen >= bam_mux_rx_fctrl_en_thld) break; req = list_first_entry(&d->rx_idle, struct usb_request, list); skb = alloc_skb(bam_mux_rx_req_size + BAM_MUX_HDR, GFP_ATOMIC); if (!skb) break; skb_reserve(skb, BAM_MUX_HDR); list_del(&req->list); req->buf = skb->data; req->length = bam_mux_rx_req_size; req->context = skb; spin_unlock_irqrestore(&port->port_lock_ul, flags); ret = usb_ep_queue(ep, req, GFP_ATOMIC); spin_lock_irqsave(&port->port_lock_ul, flags); if (ret) { dev_kfree_skb_any(skb); if (printk_ratelimit()) pr_err("%s: rx queue failed %d\n", __func__, ret); if (port->port_usb) list_add(&req->list, &d->rx_idle); else usb_ep_free_request(ep, req); break; } } spin_unlock_irqrestore(&port->port_lock_ul, flags); } static void gbam_start_endless_rx(struct gbam_port *port) { struct bam_ch_info *d = &port->data_ch; int status; spin_lock(&port->port_lock_ul); if (!port->port_usb) { spin_unlock(&port->port_lock_ul); pr_err("%s: port->port_usb is NULL", __func__); return; } pr_debug("%s: enqueue\n", __func__); status = usb_ep_queue(port->port_usb->out, d->rx_req, GFP_ATOMIC); if (status) pr_err("%s: error enqueuing transfer, %d\n", __func__, status); spin_unlock(&port->port_lock_ul); } static void gbam_start_endless_tx(struct gbam_port *port) { struct bam_ch_info *d = &port->data_ch; int status; spin_lock(&port->port_lock_dl); if (!port->port_usb) { spin_unlock(&port->port_lock_dl); pr_err("%s: port->port_usb is NULL", __func__); return; } pr_debug("%s: enqueue\n", __func__); status = usb_ep_queue(port->port_usb->in, d->tx_req, GFP_ATOMIC); if (status) pr_err("%s: error enqueuing transfer, %d\n", __func__, status); spin_unlock(&port->port_lock_dl); } static void gbam_stop_endless_rx(struct gbam_port *port) { struct bam_ch_info *d = &port->data_ch; int status; spin_lock(&port->port_lock_ul); if (!port->port_usb) { spin_unlock(&port->port_lock_ul); pr_err("%s: port->port_usb is NULL", __func__); return; } pr_debug("%s: dequeue\n", __func__); status = usb_ep_dequeue(port->port_usb->out, d->rx_req); if (status) pr_err("%s: error dequeuing transfer, %d\n", __func__, status); spin_unlock(&port->port_lock_ul); } static void gbam_stop_endless_tx(struct gbam_port *port) { struct bam_ch_info *d = &port->data_ch; int status; spin_lock(&port->port_lock_dl); if (!port->port_usb) { spin_unlock(&port->port_lock_dl); pr_err("%s: port->port_usb is NULL", __func__); return; } pr_debug("%s: dequeue\n", __func__); status = usb_ep_dequeue(port->port_usb->in, d->tx_req); if (status) pr_err("%s: error dequeuing transfer, %d\n", __func__, status); spin_unlock(&port->port_lock_dl); } static void gbam_start(void *param, enum usb_bam_pipe_dir dir) { struct gbam_port *port = param; if (dir == USB_TO_PEER_PERIPHERAL) gbam_start_endless_rx(port); else gbam_start_endless_tx(port); } static void gbam_stop(void *param, enum usb_bam_pipe_dir dir) { struct gbam_port *port = param; if (dir == USB_TO_PEER_PERIPHERAL) gbam_stop_endless_rx(port); else gbam_stop_endless_tx(port); } static void gbam_start_io(struct gbam_port *port) { unsigned long flags; struct usb_ep *ep; int ret; struct bam_ch_info *d; pr_debug("%s: port:%p\n", __func__, port); spin_lock_irqsave(&port->port_lock_ul, flags); if (!port->port_usb) { spin_unlock_irqrestore(&port->port_lock_ul, flags); return; } d = &port->data_ch; ep = port->port_usb->out; ret = gbam_alloc_requests(ep, &d->rx_idle, bam_mux_rx_q_size, gbam_epout_complete, GFP_ATOMIC); if (ret) { pr_err("%s: rx req allocation failed\n", __func__); spin_unlock_irqrestore(&port->port_lock_ul, flags); return; } spin_unlock_irqrestore(&port->port_lock_ul, flags); spin_lock_irqsave(&port->port_lock_dl, flags); if (!port->port_usb) { gbam_free_requests(ep, &d->rx_idle); spin_unlock_irqrestore(&port->port_lock_dl, flags); return; } ep = port->port_usb->in; ret = gbam_alloc_requests(ep, &d->tx_idle, bam_mux_tx_q_size, gbam_epin_complete, GFP_ATOMIC); if (ret) { pr_err("%s: tx req allocation failed\n", __func__); gbam_free_requests(ep, &d->rx_idle); spin_unlock_irqrestore(&port->port_lock_dl, flags); return; } spin_unlock_irqrestore(&port->port_lock_dl, flags); /* queue out requests */ gbam_start_rx(port); } static void gbam_notify(void *p, int event, unsigned long data) { switch (event) { case BAM_DMUX_RECEIVE: gbam_data_recv_cb(p, (struct sk_buff *)(data)); break; case BAM_DMUX_WRITE_DONE: gbam_data_write_done(p, (struct sk_buff *)(data)); break; } } static void gbam_free_buffers(struct gbam_port *port) { struct sk_buff *skb; unsigned long flags; struct bam_ch_info *d; spin_lock_irqsave(&port->port_lock_ul, flags); spin_lock(&port->port_lock_dl); if (!port || !port->port_usb) goto free_buf_out; d = &port->data_ch; gbam_free_requests(port->port_usb->in, &d->tx_idle); gbam_free_requests(port->port_usb->out, &d->rx_idle); while ((skb = __skb_dequeue(&d->tx_skb_q))) dev_kfree_skb_any(skb); while ((skb = __skb_dequeue(&d->rx_skb_q))) dev_kfree_skb_any(skb); free_buf_out: spin_unlock(&port->port_lock_dl); spin_unlock_irqrestore(&port->port_lock_ul, flags); } static void gbam_disconnect_work(struct work_struct *w) { struct gbam_port *port = container_of(w, struct gbam_port, disconnect_w); struct bam_ch_info *d = &port->data_ch; if (!test_bit(BAM_CH_OPENED, &d->flags)) return; msm_bam_dmux_close(d->id); clear_bit(BAM_CH_OPENED, &d->flags); } static void gbam2bam_disconnect_work(struct work_struct *w) { struct gbam_port *port = container_of(w, struct gbam_port, disconnect_w); struct bam_ch_info *d = &port->data_ch; int ret; if (d->trans == USB_GADGET_XPORT_BAM2BAM_IPA) { ret = usb_bam_disconnect_ipa(&d->ipa_params); if (ret) pr_err("%s: usb_bam_disconnect_ipa failed: err:%d\n", __func__, ret); teth_bridge_disconnect(); } } static void gbam_connect_work(struct work_struct *w) { struct gbam_port *port = container_of(w, struct gbam_port, connect_w); struct bam_ch_info *d = &port->data_ch; int ret; unsigned long flags; spin_lock_irqsave(&port->port_lock_ul, flags); spin_lock(&port->port_lock_dl); if (!port->port_usb) { spin_unlock(&port->port_lock_dl); spin_unlock_irqrestore(&port->port_lock_ul, flags); return; } spin_unlock(&port->port_lock_dl); spin_unlock_irqrestore(&port->port_lock_ul, flags); if (!test_bit(BAM_CH_READY, &d->flags)) return; ret = msm_bam_dmux_open(d->id, port, gbam_notify); if (ret) { pr_err("%s: unable open bam ch:%d err:%d\n", __func__, d->id, ret); return; } set_bit(BAM_CH_OPENED, &d->flags); gbam_start_io(port); pr_debug("%s: done\n", __func__); } static void gbam2bam_connect_work(struct work_struct *w) { struct gbam_port *port = container_of(w, struct gbam_port, connect_w); struct teth_bridge_connect_params connect_params; struct bam_ch_info *d = &port->data_ch; u32 sps_params; ipa_notify_cb usb_notify_cb; void *priv; int ret; unsigned long flags; if (d->trans == USB_GADGET_XPORT_BAM2BAM) { usb_bam_reset_complete(); ret = usb_bam_connect(d->src_connection_idx, &d->src_pipe_idx); if (ret) { pr_err("%s: usb_bam_connect (src) failed: err:%d\n", __func__, ret); return; } ret = usb_bam_connect(d->dst_connection_idx, &d->dst_pipe_idx); if (ret) { pr_err("%s: usb_bam_connect (dst) failed: err:%d\n", __func__, ret); return; } } else if (d->trans == USB_GADGET_XPORT_BAM2BAM_IPA) { ret = teth_bridge_init(&usb_notify_cb, &priv); if (ret) { pr_err("%s:teth_bridge_init() failed\n", __func__); return; } d->ipa_params.notify = usb_notify_cb; d->ipa_params.priv = priv; d->ipa_params.ipa_ep_cfg.mode.mode = IPA_BASIC; d->ipa_params.client = IPA_CLIENT_USB_PROD; d->ipa_params.dir = USB_TO_PEER_PERIPHERAL; ret = usb_bam_connect_ipa(&d->ipa_params); if (ret) { pr_err("%s: usb_bam_connect_ipa failed: err:%d\n", __func__, ret); return; } d->ipa_params.client = IPA_CLIENT_USB_CONS; d->ipa_params.dir = PEER_PERIPHERAL_TO_USB; ret = usb_bam_connect_ipa(&d->ipa_params); if (ret) { pr_err("%s: usb_bam_connect_ipa failed: err:%d\n", __func__, ret); return; } connect_params.ipa_usb_pipe_hdl = d->ipa_params.prod_clnt_hdl; connect_params.usb_ipa_pipe_hdl = d->ipa_params.cons_clnt_hdl; connect_params.tethering_mode = TETH_TETHERING_MODE_RMNET; ret = teth_bridge_connect(&connect_params); if (ret) { pr_err("%s:teth_bridge_connect() failed\n", __func__); return; } } spin_lock_irqsave(&port->port_lock_ul, flags); spin_lock(&port->port_lock_dl); if (!port->port_usb) { pr_debug("%s: usb cable is disconnected, exiting\n", __func__); spin_unlock(&port->port_lock_dl); spin_unlock_irqrestore(&port->port_lock_ul, flags); return; } d->rx_req = usb_ep_alloc_request(port->port_usb->out, GFP_ATOMIC); if (!d->rx_req) { spin_unlock(&port->port_lock_dl); spin_unlock_irqrestore(&port->port_lock_ul, flags); pr_err("%s: out of memory\n", __func__); return; } d->rx_req->context = port; d->rx_req->complete = gbam_endless_rx_complete; d->rx_req->length = 0; d->rx_req->no_interrupt = 1; sps_params = (MSM_SPS_MODE | d->src_pipe_idx | MSM_VENDOR_ID) & ~MSM_IS_FINITE_TRANSFER; d->rx_req->udc_priv = sps_params; d->tx_req = usb_ep_alloc_request(port->port_usb->in, GFP_ATOMIC); spin_unlock(&port->port_lock_dl); spin_unlock_irqrestore(&port->port_lock_ul, flags); if (!d->tx_req) { pr_err("%s: out of memory\n", __func__); return; } d->tx_req->context = port; d->tx_req->complete = gbam_endless_tx_complete; d->tx_req->length = 0; d->tx_req->no_interrupt = 1; sps_params = (MSM_SPS_MODE | d->dst_pipe_idx | MSM_VENDOR_ID) & ~MSM_IS_FINITE_TRANSFER; d->tx_req->udc_priv = sps_params; /* queue in & out requests */ gbam_start_endless_rx(port); gbam_start_endless_tx(port); if (d->trans == USB_GADGET_XPORT_BAM2BAM && port->port_num == 0) { /* Register for peer reset callback */ usb_bam_register_peer_reset_cb(gbam_peer_reset_cb, port); ret = usb_bam_client_ready(true); if (ret) { pr_err("%s: usb_bam_client_ready failed: err:%d\n", __func__, ret); return; } } pr_debug("%s: done\n", __func__); } static int gbam_wake_cb(void *param) { struct gbam_port *port = (struct gbam_port *)param; struct bam_ch_info *d; struct f_rmnet *dev; dev = port_to_rmnet(port->gr); d = &port->data_ch; pr_debug("%s: woken up by peer\n", __func__); return usb_gadget_wakeup(dev->cdev->gadget); } static void gbam2bam_suspend_work(struct work_struct *w) { struct gbam_port *port = container_of(w, struct gbam_port, suspend_w); struct bam_ch_info *d = &port->data_ch; pr_debug("%s: suspend work started\n", __func__); usb_bam_register_wake_cb(d->dst_connection_idx, gbam_wake_cb, port); if (d->trans == USB_GADGET_XPORT_BAM2BAM_IPA) { usb_bam_register_start_stop_cbs(gbam_start, gbam_stop, port); usb_bam_suspend(&d->ipa_params); } } static void gbam2bam_resume_work(struct work_struct *w) { struct gbam_port *port = container_of(w, struct gbam_port, resume_w); struct bam_ch_info *d = &port->data_ch; pr_debug("%s: resume work started\n", __func__); usb_bam_register_wake_cb(d->dst_connection_idx, NULL, NULL); if (d->trans == USB_GADGET_XPORT_BAM2BAM_IPA) usb_bam_resume(&d->ipa_params); } static int gbam_peer_reset_cb(void *param) { struct gbam_port *port = (struct gbam_port *)param; struct bam_ch_info *d; struct f_rmnet *dev; struct usb_gadget *gadget; int ret; bool reenable_eps = false; dev = port_to_rmnet(port->gr); d = &port->data_ch; gadget = dev->cdev->gadget; pr_debug("%s: reset by peer\n", __func__); /* Disable the relevant EPs if currently EPs are enabled */ if (port->port_usb && port->port_usb->in && port->port_usb->in->driver_data) { usb_ep_disable(port->port_usb->out); usb_ep_disable(port->port_usb->in); port->port_usb->in->driver_data = NULL; port->port_usb->out->driver_data = NULL; reenable_eps = true; } /* Disable BAM */ msm_hw_bam_disable(1); /* Reset BAM */ ret = usb_bam_a2_reset(0); if (ret) { pr_err("%s: BAM reset failed %d\n", __func__, ret); goto reenable_eps; } /* Enable BAM */ msm_hw_bam_disable(0); reenable_eps: /* Re-Enable the relevant EPs, if EPs were originally enabled */ if (reenable_eps) { ret = usb_ep_enable(port->port_usb->in); if (ret) { pr_err("%s: usb_ep_enable failed eptype:IN ep:%p", __func__, port->port_usb->in); return ret; } port->port_usb->in->driver_data = port; ret = usb_ep_enable(port->port_usb->out); if (ret) { pr_err("%s: usb_ep_enable failed eptype:OUT ep:%p", __func__, port->port_usb->out); port->port_usb->in->driver_data = 0; return ret; } port->port_usb->out->driver_data = port; gbam_start_endless_rx(port); gbam_start_endless_tx(port); } /* Unregister the peer reset callback */ if (d->trans == USB_GADGET_XPORT_BAM2BAM && port->port_num == 0) usb_bam_register_peer_reset_cb(NULL, NULL); return 0; } /* BAM data channel ready, allow attempt to open */ static int gbam_data_ch_probe(struct platform_device *pdev) { struct gbam_port *port; struct bam_ch_info *d; int i; unsigned long flags; pr_debug("%s: name:%s\n", __func__, pdev->name); for (i = 0; i < n_bam_ports; i++) { port = bam_ports[i].port; d = &port->data_ch; if (!strncmp(bam_ch_names[i], pdev->name, BAM_DMUX_CH_NAME_MAX_LEN)) { set_bit(BAM_CH_READY, &d->flags); /* if usb is online, try opening bam_ch */ spin_lock_irqsave(&port->port_lock_ul, flags); spin_lock(&port->port_lock_dl); if (port->port_usb) queue_work(gbam_wq, &port->connect_w); spin_unlock(&port->port_lock_dl); spin_unlock_irqrestore(&port->port_lock_ul, flags); break; } } return 0; } /* BAM data channel went inactive, so close it */ static int gbam_data_ch_remove(struct platform_device *pdev) { struct gbam_port *port; struct bam_ch_info *d; struct usb_ep *ep_in = NULL; struct usb_ep *ep_out = NULL; unsigned long flags; int i; pr_debug("%s: name:%s\n", __func__, pdev->name); for (i = 0; i < n_bam_ports; i++) { if (!strncmp(bam_ch_names[i], pdev->name, BAM_DMUX_CH_NAME_MAX_LEN)) { port = bam_ports[i].port; d = &port->data_ch; spin_lock_irqsave(&port->port_lock_ul, flags); spin_lock(&port->port_lock_dl); if (port->port_usb) { ep_in = port->port_usb->in; ep_out = port->port_usb->out; } spin_unlock(&port->port_lock_dl); spin_unlock_irqrestore(&port->port_lock_ul, flags); if (ep_in) usb_ep_fifo_flush(ep_in); if (ep_out) usb_ep_fifo_flush(ep_out); gbam_free_buffers(port); msm_bam_dmux_close(d->id); /* bam dmux will free all pending skbs */ d->pending_with_bam = 0; clear_bit(BAM_CH_READY, &d->flags); clear_bit(BAM_CH_OPENED, &d->flags); } } return 0; } static void gbam_port_free(int portno) { struct gbam_port *port = bam_ports[portno].port; struct platform_driver *pdrv = &bam_ports[portno].pdrv; if (port) { kfree(port); platform_driver_unregister(pdrv); } } static void gbam2bam_port_free(int portno) { struct gbam_port *port = bam2bam_ports[portno]; kfree(port); } static int gbam_port_alloc(int portno) { struct gbam_port *port; struct bam_ch_info *d; struct platform_driver *pdrv; port = kzalloc(sizeof(struct gbam_port), GFP_KERNEL); if (!port) return -ENOMEM; port->port_num = portno; /* port initialization */ spin_lock_init(&port->port_lock_ul); spin_lock_init(&port->port_lock_dl); INIT_WORK(&port->connect_w, gbam_connect_work); INIT_WORK(&port->disconnect_w, gbam_disconnect_work); /* data ch */ d = &port->data_ch; d->port = port; INIT_LIST_HEAD(&d->tx_idle); INIT_LIST_HEAD(&d->rx_idle); INIT_WORK(&d->write_tobam_w, gbam_data_write_tobam); INIT_WORK(&d->write_tohost_w, gbam_write_data_tohost_w); skb_queue_head_init(&d->tx_skb_q); skb_queue_head_init(&d->rx_skb_q); d->id = bam_ch_ids[portno]; bam_ports[portno].port = port; pdrv = &bam_ports[portno].pdrv; pdrv->probe = gbam_data_ch_probe; pdrv->remove = gbam_data_ch_remove; pdrv->driver.name = bam_ch_names[portno]; pdrv->driver.owner = THIS_MODULE; platform_driver_register(pdrv); pr_debug("%s: port:%p portno:%d\n", __func__, port, portno); return 0; } static int gbam2bam_port_alloc(int portno) { struct gbam_port *port; struct bam_ch_info *d; port = kzalloc(sizeof(struct gbam_port), GFP_KERNEL); if (!port) return -ENOMEM; port->port_num = portno; /* port initialization */ spin_lock_init(&port->port_lock_ul); spin_lock_init(&port->port_lock_dl); INIT_WORK(&port->connect_w, gbam2bam_connect_work); INIT_WORK(&port->disconnect_w, gbam2bam_disconnect_work); INIT_WORK(&port->suspend_w, gbam2bam_suspend_work); INIT_WORK(&port->resume_w, gbam2bam_resume_work); /* data ch */ d = &port->data_ch; d->port = port; bam2bam_ports[portno] = port; pr_debug("%s: port:%p portno:%d\n", __func__, port, portno); return 0; } #if defined(CONFIG_DEBUG_FS) #define DEBUG_BUF_SIZE 1024 static ssize_t gbam_read_stats(struct file *file, char __user *ubuf, size_t count, loff_t *ppos) { struct gbam_port *port; struct bam_ch_info *d; char *buf; unsigned long flags; int ret; int i; int temp = 0; buf = kzalloc(sizeof(char) * DEBUG_BUF_SIZE, GFP_KERNEL); if (!buf) return -ENOMEM; for (i = 0; i < n_bam_ports; i++) { port = bam_ports[i].port; if (!port) continue; spin_lock_irqsave(&port->port_lock_ul, flags); spin_lock(&port->port_lock_dl); d = &port->data_ch; temp += scnprintf(buf + temp, DEBUG_BUF_SIZE - temp, "#PORT:%d port:%p data_ch:%p#\n" "dpkts_to_usbhost: %lu\n" "dpkts_to_modem: %lu\n" "dpkts_pwith_bam: %u\n" "to_usbhost_dcnt: %u\n" "tomodem__dcnt: %u\n" "rx_flow_control_disable_count: %u\n" "rx_flow_control_enable_count: %u\n" "rx_flow_control_triggered: %u\n" "max_num_pkts_pending_with_bam: %u\n" "tx_buf_len: %u\n" "rx_buf_len: %u\n" "data_ch_open: %d\n" "data_ch_ready: %d\n", i, port, &port->data_ch, d->to_host, d->to_modem, d->pending_with_bam, d->tohost_drp_cnt, d->tomodem_drp_cnt, d->rx_flow_control_disable, d->rx_flow_control_enable, d->rx_flow_control_triggered, d->max_num_pkts_pending_with_bam, d->tx_skb_q.qlen, d->rx_skb_q.qlen, test_bit(BAM_CH_OPENED, &d->flags), test_bit(BAM_CH_READY, &d->flags)); spin_unlock(&port->port_lock_dl); spin_unlock_irqrestore(&port->port_lock_ul, flags); } ret = simple_read_from_buffer(ubuf, count, ppos, buf, temp); kfree(buf); return ret; } static ssize_t gbam_reset_stats(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { struct gbam_port *port; struct bam_ch_info *d; int i; unsigned long flags; for (i = 0; i < n_bam_ports; i++) { port = bam_ports[i].port; if (!port) continue; spin_lock_irqsave(&port->port_lock_ul, flags); spin_lock(&port->port_lock_dl); d = &port->data_ch; d->to_host = 0; d->to_modem = 0; d->pending_with_bam = 0; d->tohost_drp_cnt = 0; d->tomodem_drp_cnt = 0; d->rx_flow_control_disable = 0; d->rx_flow_control_enable = 0; d->rx_flow_control_triggered = 0; d->max_num_pkts_pending_with_bam = 0; spin_unlock(&port->port_lock_dl); spin_unlock_irqrestore(&port->port_lock_ul, flags); } return count; } const struct file_operations gbam_stats_ops = { .read = gbam_read_stats, .write = gbam_reset_stats, }; struct dentry *gbam_dent; static void gbam_debugfs_init(void) { struct dentry *dfile; gbam_dent = debugfs_create_dir("usb_rmnet", 0); if (!gbam_dent || IS_ERR(gbam_dent)) return; dfile = debugfs_create_file("status", 0444, gbam_dent, 0, &gbam_stats_ops); if (!dfile || IS_ERR(dfile)) { debugfs_remove(gbam_dent); gbam_dent = NULL; return; } } static void gbam_debugfs_remove(void) { debugfs_remove_recursive(gbam_dent); } #else static inline void gbam_debugfs_init(void) {} static inline void gbam_debugfs_remove(void) {} #endif void gbam_disconnect(struct grmnet *gr, u8 port_num, enum transport_type trans) { struct gbam_port *port; unsigned long flags; struct bam_ch_info *d; pr_debug("%s: grmnet:%p port#%d\n", __func__, gr, port_num); if (trans == USB_GADGET_XPORT_BAM && port_num >= n_bam_ports) { pr_err("%s: invalid bam portno#%d\n", __func__, port_num); return; } if ((trans == USB_GADGET_XPORT_BAM2BAM || trans == USB_GADGET_XPORT_BAM2BAM_IPA) && port_num >= n_bam2bam_ports) { pr_err("%s: invalid bam2bam portno#%d\n", __func__, port_num); return; } if (!gr) { pr_err("%s: grmnet port is null\n", __func__); return; } if (trans == USB_GADGET_XPORT_BAM) port = bam_ports[port_num].port; else port = bam2bam_ports[port_num]; d = &port->data_ch; port->gr = gr; if (trans == USB_GADGET_XPORT_BAM) gbam_free_buffers(port); spin_lock_irqsave(&port->port_lock_ul, flags); spin_lock(&port->port_lock_dl); port->port_usb = 0; n_tx_req_queued = 0; spin_unlock(&port->port_lock_dl); spin_unlock_irqrestore(&port->port_lock_ul, flags); /* disable endpoints */ usb_ep_disable(gr->out); usb_ep_disable(gr->in); gr->in->driver_data = NULL; gr->out->driver_data = NULL; if (trans == USB_GADGET_XPORT_BAM || trans == USB_GADGET_XPORT_BAM2BAM_IPA) queue_work(gbam_wq, &port->disconnect_w); else if (trans == USB_GADGET_XPORT_BAM2BAM) { if (port_num == 0) { if (usb_bam_client_ready(false)) { pr_err("%s: usb_bam_client_ready failed\n", __func__); } } } } int gbam_connect(struct grmnet *gr, u8 port_num, enum transport_type trans, u8 src_connection_idx, u8 dst_connection_idx) { struct gbam_port *port; struct bam_ch_info *d; int ret; unsigned long flags; pr_debug("%s: grmnet:%p port#%d\n", __func__, gr, port_num); if (trans == USB_GADGET_XPORT_BAM && port_num >= n_bam_ports) { pr_err("%s: invalid portno#%d\n", __func__, port_num); return -ENODEV; } if ((trans == USB_GADGET_XPORT_BAM2BAM || trans == USB_GADGET_XPORT_BAM2BAM_IPA) && port_num >= n_bam2bam_ports) { pr_err("%s: invalid portno#%d\n", __func__, port_num); return -ENODEV; } if (!gr) { pr_err("%s: grmnet port is null\n", __func__); return -ENODEV; } if (trans == USB_GADGET_XPORT_BAM) port = bam_ports[port_num].port; else port = bam2bam_ports[port_num]; d = &port->data_ch; ret = usb_ep_enable(gr->in); if (ret) { pr_err("%s: usb_ep_enable failed eptype:IN ep:%p", __func__, gr->in); return ret; } gr->in->driver_data = port; ret = usb_ep_enable(gr->out); if (ret) { pr_err("%s: usb_ep_enable failed eptype:OUT ep:%p", __func__, gr->out); gr->in->driver_data = 0; return ret; } gr->out->driver_data = port; spin_lock_irqsave(&port->port_lock_ul, flags); spin_lock(&port->port_lock_dl); port->port_usb = gr; if (trans == USB_GADGET_XPORT_BAM) { d->to_host = 0; d->to_modem = 0; d->pending_with_bam = 0; d->tohost_drp_cnt = 0; d->tomodem_drp_cnt = 0; d->rx_flow_control_disable = 0; d->rx_flow_control_enable = 0; d->rx_flow_control_triggered = 0; d->max_num_pkts_pending_with_bam = 0; } spin_unlock(&port->port_lock_dl); spin_unlock_irqrestore(&port->port_lock_ul, flags); if (trans == USB_GADGET_XPORT_BAM2BAM) { port->gr = gr; d->src_connection_idx = src_connection_idx; d->dst_connection_idx = dst_connection_idx; } else if (trans == USB_GADGET_XPORT_BAM2BAM_IPA) { port->gr = gr; d->ipa_params.src_pipe = &(d->src_pipe_idx); d->ipa_params.dst_pipe = &(d->dst_pipe_idx); d->ipa_params.src_idx = src_connection_idx; d->ipa_params.dst_idx = dst_connection_idx; } d->trans = trans; queue_work(gbam_wq, &port->connect_w); return 0; } int gbam_setup(unsigned int no_bam_port, unsigned int no_bam2bam_port) { int i; int ret; pr_debug("%s: requested BAM ports:%d and BAM2BAM ports:%d\n", __func__, no_bam_port, no_bam2bam_port); if ((!no_bam_port && !no_bam2bam_port) || no_bam_port > BAM_N_PORTS || no_bam2bam_port > BAM2BAM_N_PORTS) { pr_err("%s: Invalid num of ports count:%d,%d\n", __func__, no_bam_port, no_bam2bam_port); return -EINVAL; } gbam_wq = alloc_workqueue("k_gbam", WQ_UNBOUND | WQ_MEM_RECLAIM, 1); if (!gbam_wq) { pr_err("%s: Unable to create workqueue gbam_wq\n", __func__); return -ENOMEM; } for (i = 0; i < no_bam_port; i++) { n_bam_ports++; ret = gbam_port_alloc(i); if (ret) { n_bam_ports--; pr_err("%s: Unable to alloc port:%d\n", __func__, i); goto free_bam_ports; } } for (i = 0; i < no_bam2bam_port; i++) { n_bam2bam_ports++; ret = gbam2bam_port_alloc(i); if (ret) { n_bam2bam_ports--; pr_err("%s: Unable to alloc port:%d\n", __func__, i); goto free_bam_ports; } } gbam_debugfs_init(); return 0; free_bam_ports: for (i = 0; i < n_bam_ports; i++) gbam_port_free(i); for (i = 0; i < n_bam2bam_ports; i++) gbam2bam_port_free(i); destroy_workqueue(gbam_wq); return ret; } void gbam_cleanup(void) { gbam_debugfs_remove(); } void gbam_suspend(struct grmnet *gr, u8 port_num, enum transport_type trans) { struct gbam_port *port; struct bam_ch_info *d; if (trans != USB_GADGET_XPORT_BAM2BAM && trans != USB_GADGET_XPORT_BAM2BAM_IPA) return; port = bam2bam_ports[port_num]; d = &port->data_ch; pr_debug("%s: suspended port %d\n", __func__, port_num); queue_work(gbam_wq, &port->suspend_w); } void gbam_resume(struct grmnet *gr, u8 port_num, enum transport_type trans) { struct gbam_port *port; struct bam_ch_info *d; if (trans != USB_GADGET_XPORT_BAM2BAM && trans != USB_GADGET_XPORT_BAM2BAM_IPA) return; port = bam2bam_ports[port_num]; d = &port->data_ch; pr_debug("%s: resumed port %d\n", __func__, port_num); queue_work(gbam_wq, &port->resume_w); }
gpl-2.0
krizky82/Xperia-2011-Kernel-2.6.32.X-ICS
drivers/net/wan/c101.c
1452
11248
/* * Moxa C101 synchronous serial card driver for Linux * * Copyright (C) 2000-2003 Krzysztof Halasa <khc@pm.waw.pl> * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License * as published by the Free Software Foundation. * * For information see <http://www.kernel.org/pub/linux/utils/net/hdlc/> * * Sources of information: * Hitachi HD64570 SCA User's Manual * Moxa C101 User's Manual */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/capability.h> #include <linux/slab.h> #include <linux/types.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/moduleparam.h> #include <linux/netdevice.h> #include <linux/hdlc.h> #include <linux/delay.h> #include <asm/io.h> #include "hd64570.h" static const char* version = "Moxa C101 driver version: 1.15"; static const char* devname = "C101"; #undef DEBUG_PKT #define DEBUG_RINGS #define C101_PAGE 0x1D00 #define C101_DTR 0x1E00 #define C101_SCA 0x1F00 #define C101_WINDOW_SIZE 0x2000 #define C101_MAPPED_RAM_SIZE 0x4000 #define RAM_SIZE (256 * 1024) #define TX_RING_BUFFERS 10 #define RX_RING_BUFFERS ((RAM_SIZE - C101_WINDOW_SIZE) / \ (sizeof(pkt_desc) + HDLC_MAX_MRU) - TX_RING_BUFFERS) #define CLOCK_BASE 9830400 /* 9.8304 MHz */ #define PAGE0_ALWAYS_MAPPED static char *hw; /* pointer to hw=xxx command line string */ typedef struct card_s { struct net_device *dev; spinlock_t lock; /* TX lock */ u8 __iomem *win0base; /* ISA window base address */ u32 phy_winbase; /* ISA physical base address */ sync_serial_settings settings; int rxpart; /* partial frame received, next frame invalid*/ unsigned short encoding; unsigned short parity; u16 rx_ring_buffers; /* number of buffers in a ring */ u16 tx_ring_buffers; u16 buff_offset; /* offset of first buffer of first channel */ u16 rxin; /* rx ring buffer 'in' pointer */ u16 txin; /* tx ring buffer 'in' and 'last' pointers */ u16 txlast; u8 rxs, txs, tmc; /* SCA registers */ u8 irq; /* IRQ (3-15) */ u8 page; struct card_s *next_card; }card_t; typedef card_t port_t; static card_t *first_card; static card_t **new_card = &first_card; #define sca_in(reg, card) readb((card)->win0base + C101_SCA + (reg)) #define sca_out(value, reg, card) writeb(value, (card)->win0base + C101_SCA + (reg)) #define sca_inw(reg, card) readw((card)->win0base + C101_SCA + (reg)) /* EDA address register must be set in EDAL, EDAH order - 8 bit ISA bus */ #define sca_outw(value, reg, card) do { \ writeb(value & 0xFF, (card)->win0base + C101_SCA + (reg)); \ writeb((value >> 8 ) & 0xFF, (card)->win0base + C101_SCA + (reg + 1));\ } while(0) #define port_to_card(port) (port) #define log_node(port) (0) #define phy_node(port) (0) #define winsize(card) (C101_WINDOW_SIZE) #define win0base(card) ((card)->win0base) #define winbase(card) ((card)->win0base + 0x2000) #define get_port(card, port) (card) static void sca_msci_intr(port_t *port); static inline u8 sca_get_page(card_t *card) { return card->page; } static inline void openwin(card_t *card, u8 page) { card->page = page; writeb(page, card->win0base + C101_PAGE); } #include "hd64570.c" static inline void set_carrier(port_t *port) { if (!(sca_in(MSCI1_OFFSET + ST3, port) & ST3_DCD)) netif_carrier_on(port_to_dev(port)); else netif_carrier_off(port_to_dev(port)); } static void sca_msci_intr(port_t *port) { u8 stat = sca_in(MSCI0_OFFSET + ST1, port); /* read MSCI ST1 status */ /* Reset MSCI TX underrun and CDCD (ignored) status bit */ sca_out(stat & (ST1_UDRN | ST1_CDCD), MSCI0_OFFSET + ST1, port); if (stat & ST1_UDRN) { /* TX Underrun error detected */ port_to_dev(port)->stats.tx_errors++; port_to_dev(port)->stats.tx_fifo_errors++; } stat = sca_in(MSCI1_OFFSET + ST1, port); /* read MSCI1 ST1 status */ /* Reset MSCI CDCD status bit - uses ch#2 DCD input */ sca_out(stat & ST1_CDCD, MSCI1_OFFSET + ST1, port); if (stat & ST1_CDCD) set_carrier(port); } static void c101_set_iface(port_t *port) { u8 rxs = port->rxs & CLK_BRG_MASK; u8 txs = port->txs & CLK_BRG_MASK; switch(port->settings.clock_type) { case CLOCK_INT: rxs |= CLK_BRG_RX; /* TX clock */ txs |= CLK_RXCLK_TX; /* BRG output */ break; case CLOCK_TXINT: rxs |= CLK_LINE_RX; /* RXC input */ txs |= CLK_BRG_TX; /* BRG output */ break; case CLOCK_TXFROMRX: rxs |= CLK_LINE_RX; /* RXC input */ txs |= CLK_RXCLK_TX; /* RX clock */ break; default: /* EXTernal clock */ rxs |= CLK_LINE_RX; /* RXC input */ txs |= CLK_LINE_TX; /* TXC input */ } port->rxs = rxs; port->txs = txs; sca_out(rxs, MSCI1_OFFSET + RXS, port); sca_out(txs, MSCI1_OFFSET + TXS, port); sca_set_port(port); } static int c101_open(struct net_device *dev) { port_t *port = dev_to_port(dev); int result; result = hdlc_open(dev); if (result) return result; writeb(1, port->win0base + C101_DTR); sca_out(0, MSCI1_OFFSET + CTL, port); /* RTS uses ch#2 output */ sca_open(dev); /* DCD is connected to port 2 !@#$%^& - disable MSCI0 CDCD interrupt */ sca_out(IE1_UDRN, MSCI0_OFFSET + IE1, port); sca_out(IE0_TXINT, MSCI0_OFFSET + IE0, port); set_carrier(port); /* enable MSCI1 CDCD interrupt */ sca_out(IE1_CDCD, MSCI1_OFFSET + IE1, port); sca_out(IE0_RXINTA, MSCI1_OFFSET + IE0, port); sca_out(0x48, IER0, port); /* TXINT #0 and RXINT #1 */ c101_set_iface(port); return 0; } static int c101_close(struct net_device *dev) { port_t *port = dev_to_port(dev); sca_close(dev); writeb(0, port->win0base + C101_DTR); sca_out(CTL_NORTS, MSCI1_OFFSET + CTL, port); hdlc_close(dev); return 0; } static int c101_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { const size_t size = sizeof(sync_serial_settings); sync_serial_settings new_line; sync_serial_settings __user *line = ifr->ifr_settings.ifs_ifsu.sync; port_t *port = dev_to_port(dev); #ifdef DEBUG_RINGS if (cmd == SIOCDEVPRIVATE) { sca_dump_rings(dev); printk(KERN_DEBUG "MSCI1: ST: %02x %02x %02x %02x\n", sca_in(MSCI1_OFFSET + ST0, port), sca_in(MSCI1_OFFSET + ST1, port), sca_in(MSCI1_OFFSET + ST2, port), sca_in(MSCI1_OFFSET + ST3, port)); return 0; } #endif if (cmd != SIOCWANDEV) return hdlc_ioctl(dev, ifr, cmd); switch(ifr->ifr_settings.type) { case IF_GET_IFACE: ifr->ifr_settings.type = IF_IFACE_SYNC_SERIAL; if (ifr->ifr_settings.size < size) { ifr->ifr_settings.size = size; /* data size wanted */ return -ENOBUFS; } if (copy_to_user(line, &port->settings, size)) return -EFAULT; return 0; case IF_IFACE_SYNC_SERIAL: if(!capable(CAP_NET_ADMIN)) return -EPERM; if (copy_from_user(&new_line, line, size)) return -EFAULT; if (new_line.clock_type != CLOCK_EXT && new_line.clock_type != CLOCK_TXFROMRX && new_line.clock_type != CLOCK_INT && new_line.clock_type != CLOCK_TXINT) return -EINVAL; /* No such clock setting */ if (new_line.loopback != 0 && new_line.loopback != 1) return -EINVAL; memcpy(&port->settings, &new_line, size); /* Update settings */ c101_set_iface(port); return 0; default: return hdlc_ioctl(dev, ifr, cmd); } } static void c101_destroy_card(card_t *card) { readb(card->win0base + C101_PAGE); /* Resets SCA? */ if (card->irq) free_irq(card->irq, card); if (card->win0base) { iounmap(card->win0base); release_mem_region(card->phy_winbase, C101_MAPPED_RAM_SIZE); } free_netdev(card->dev); kfree(card); } static const struct net_device_ops c101_ops = { .ndo_open = c101_open, .ndo_stop = c101_close, .ndo_change_mtu = hdlc_change_mtu, .ndo_start_xmit = hdlc_start_xmit, .ndo_do_ioctl = c101_ioctl, }; static int __init c101_run(unsigned long irq, unsigned long winbase) { struct net_device *dev; hdlc_device *hdlc; card_t *card; int result; if (irq<3 || irq>15 || irq == 6) /* FIXME */ { printk(KERN_ERR "c101: invalid IRQ value\n"); return -ENODEV; } if (winbase < 0xC0000 || winbase > 0xDFFFF || (winbase & 0x3FFF) !=0) { printk(KERN_ERR "c101: invalid RAM value\n"); return -ENODEV; } card = kzalloc(sizeof(card_t), GFP_KERNEL); if (card == NULL) { printk(KERN_ERR "c101: unable to allocate memory\n"); return -ENOBUFS; } card->dev = alloc_hdlcdev(card); if (!card->dev) { printk(KERN_ERR "c101: unable to allocate memory\n"); kfree(card); return -ENOBUFS; } if (request_irq(irq, sca_intr, 0, devname, card)) { printk(KERN_ERR "c101: could not allocate IRQ\n"); c101_destroy_card(card); return -EBUSY; } card->irq = irq; if (!request_mem_region(winbase, C101_MAPPED_RAM_SIZE, devname)) { printk(KERN_ERR "c101: could not request RAM window\n"); c101_destroy_card(card); return -EBUSY; } card->phy_winbase = winbase; card->win0base = ioremap(winbase, C101_MAPPED_RAM_SIZE); if (!card->win0base) { printk(KERN_ERR "c101: could not map I/O address\n"); c101_destroy_card(card); return -EFAULT; } card->tx_ring_buffers = TX_RING_BUFFERS; card->rx_ring_buffers = RX_RING_BUFFERS; card->buff_offset = C101_WINDOW_SIZE; /* Bytes 1D00-1FFF reserved */ readb(card->win0base + C101_PAGE); /* Resets SCA? */ udelay(100); writeb(0, card->win0base + C101_PAGE); writeb(0, card->win0base + C101_DTR); /* Power-up for RAM? */ sca_init(card, 0); dev = port_to_dev(card); hdlc = dev_to_hdlc(dev); spin_lock_init(&card->lock); dev->irq = irq; dev->mem_start = winbase; dev->mem_end = winbase + C101_MAPPED_RAM_SIZE - 1; dev->tx_queue_len = 50; dev->netdev_ops = &c101_ops; hdlc->attach = sca_attach; hdlc->xmit = sca_xmit; card->settings.clock_type = CLOCK_EXT; result = register_hdlc_device(dev); if (result) { printk(KERN_WARNING "c101: unable to register hdlc device\n"); c101_destroy_card(card); return result; } sca_init_port(card); /* Set up C101 memory */ set_carrier(card); printk(KERN_INFO "%s: Moxa C101 on IRQ%u," " using %u TX + %u RX packets rings\n", dev->name, card->irq, card->tx_ring_buffers, card->rx_ring_buffers); *new_card = card; new_card = &card->next_card; return 0; } static int __init c101_init(void) { if (hw == NULL) { #ifdef MODULE printk(KERN_INFO "c101: no card initialized\n"); #endif return -EINVAL; /* no parameters specified, abort */ } printk(KERN_INFO "%s\n", version); do { unsigned long irq, ram; irq = simple_strtoul(hw, &hw, 0); if (*hw++ != ',') break; ram = simple_strtoul(hw, &hw, 0); if (*hw == ':' || *hw == '\x0') c101_run(irq, ram); if (*hw == '\x0') return first_card ? 0 : -EINVAL; }while(*hw++ == ':'); printk(KERN_ERR "c101: invalid hardware parameters\n"); return first_card ? 0 : -EINVAL; } static void __exit c101_cleanup(void) { card_t *card = first_card; while (card) { card_t *ptr = card; card = card->next_card; unregister_hdlc_device(port_to_dev(ptr)); c101_destroy_card(ptr); } } module_init(c101_init); module_exit(c101_cleanup); MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>"); MODULE_DESCRIPTION("Moxa C101 serial port driver"); MODULE_LICENSE("GPL v2"); module_param(hw, charp, 0444); MODULE_PARM_DESC(hw, "irq,ram:irq,...");
gpl-2.0
MeiDahua/htc-kernel-tattoo
arch/mips/sgi-ip27/ip27-klconfig.c
1708
2945
/* * Copyright (C) 1999, 2000 Ralf Baechle (ralf@gnu.org) * Copyright (C) 1999, 2000 Silicon Graphics, Inc. */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/interrupt.h> #include <linux/kernel_stat.h> #include <linux/param.h> #include <linux/timex.h> #include <linux/mm.h> #include <asm/sn/klconfig.h> #include <asm/sn/arch.h> #include <asm/sn/gda.h> klinfo_t *find_component(lboard_t *brd, klinfo_t *kli, unsigned char struct_type) { int index, j; if (kli == (klinfo_t *)NULL) { index = 0; } else { for (j = 0; j < KLCF_NUM_COMPS(brd); j++) if (kli == KLCF_COMP(brd, j)) break; index = j; if (index == KLCF_NUM_COMPS(brd)) { printk("find_component: Bad pointer: 0x%p\n", kli); return (klinfo_t *)NULL; } index++; /* next component */ } for (; index < KLCF_NUM_COMPS(brd); index++) { kli = KLCF_COMP(brd, index); if (KLCF_COMP_TYPE(kli) == struct_type) return kli; } /* Didn't find it. */ return (klinfo_t *)NULL; } klinfo_t *find_first_component(lboard_t *brd, unsigned char struct_type) { return find_component(brd, (klinfo_t *)NULL, struct_type); } lboard_t * find_lboard(lboard_t *start, unsigned char brd_type) { /* Search all boards stored on this node. */ while (start) { if (start->brd_type == brd_type) return start; start = KLCF_NEXT(start); } /* Didn't find it. */ return (lboard_t *)NULL; } lboard_t * find_lboard_class(lboard_t *start, unsigned char brd_type) { /* Search all boards stored on this node. */ while (start) { if (KLCLASS(start->brd_type) == KLCLASS(brd_type)) return start; start = KLCF_NEXT(start); } /* Didn't find it. */ return (lboard_t *)NULL; } cnodeid_t get_cpu_cnode(cpuid_t cpu) { return CPUID_TO_COMPACT_NODEID(cpu); } klcpu_t * nasid_slice_to_cpuinfo(nasid_t nasid, int slice) { lboard_t *brd; klcpu_t *acpu; if (!(brd = find_lboard((lboard_t *)KL_CONFIG_INFO(nasid), KLTYPE_IP27))) return (klcpu_t *)NULL; if (!(acpu = (klcpu_t *)find_first_component(brd, KLSTRUCT_CPU))) return (klcpu_t *)NULL; do { if ((acpu->cpu_info.physid) == slice) return acpu; } while ((acpu = (klcpu_t *)find_component(brd, (klinfo_t *)acpu, KLSTRUCT_CPU))); return (klcpu_t *)NULL; } klcpu_t * sn_get_cpuinfo(cpuid_t cpu) { nasid_t nasid; int slice; klcpu_t *acpu; gda_t *gdap = GDA; cnodeid_t cnode; if (!(cpu < MAXCPUS)) { printk("sn_get_cpuinfo: illegal cpuid 0x%lx\n", cpu); return NULL; } cnode = get_cpu_cnode(cpu); if (cnode == INVALID_CNODEID) return NULL; if ((nasid = gdap->g_nasidtable[cnode]) == INVALID_NASID) return NULL; for (slice = 0; slice < CPUS_PER_NODE; slice++) { acpu = nasid_slice_to_cpuinfo(nasid, slice); if (acpu && acpu->cpu_info.virtid == cpu) return acpu; } return NULL; } int get_cpu_slice(cpuid_t cpu) { klcpu_t *acpu; if ((acpu = sn_get_cpuinfo(cpu)) == NULL) return -1; return acpu->cpu_info.physid; }
gpl-2.0
cattleprod/for-sense
security/keys/request_key_auth.c
1708
7746
/* request_key_auth.c: request key authorisation controlling key def * * Copyright (C) 2005 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * See Documentation/keys-request-key.txt */ #include <linux/module.h> #include <linux/sched.h> #include <linux/err.h> #include <linux/seq_file.h> #include <linux/slab.h> #include <asm/uaccess.h> #include "internal.h" static int request_key_auth_instantiate(struct key *, const void *, size_t); static void request_key_auth_describe(const struct key *, struct seq_file *); static void request_key_auth_revoke(struct key *); static void request_key_auth_destroy(struct key *); static long request_key_auth_read(const struct key *, char __user *, size_t); /* * the request-key authorisation key type definition */ struct key_type key_type_request_key_auth = { .name = ".request_key_auth", .def_datalen = sizeof(struct request_key_auth), .instantiate = request_key_auth_instantiate, .describe = request_key_auth_describe, .revoke = request_key_auth_revoke, .destroy = request_key_auth_destroy, .read = request_key_auth_read, }; /*****************************************************************************/ /* * instantiate a request-key authorisation key */ static int request_key_auth_instantiate(struct key *key, const void *data, size_t datalen) { key->payload.data = (struct request_key_auth *) data; return 0; } /* end request_key_auth_instantiate() */ /*****************************************************************************/ /* * reading a request-key authorisation key retrieves the callout information */ static void request_key_auth_describe(const struct key *key, struct seq_file *m) { struct request_key_auth *rka = key->payload.data; seq_puts(m, "key:"); seq_puts(m, key->description); seq_printf(m, " pid:%d ci:%zu", rka->pid, rka->callout_len); } /* end request_key_auth_describe() */ /*****************************************************************************/ /* * read the callout_info data * - the key's semaphore is read-locked */ static long request_key_auth_read(const struct key *key, char __user *buffer, size_t buflen) { struct request_key_auth *rka = key->payload.data; size_t datalen; long ret; datalen = rka->callout_len; ret = datalen; /* we can return the data as is */ if (buffer && buflen > 0) { if (buflen > datalen) buflen = datalen; if (copy_to_user(buffer, rka->callout_info, buflen) != 0) ret = -EFAULT; } return ret; } /* end request_key_auth_read() */ /*****************************************************************************/ /* * handle revocation of an authorisation token key * - called with the key sem write-locked */ static void request_key_auth_revoke(struct key *key) { struct request_key_auth *rka = key->payload.data; kenter("{%d}", key->serial); if (rka->cred) { put_cred(rka->cred); rka->cred = NULL; } } /* end request_key_auth_revoke() */ /*****************************************************************************/ /* * destroy an instantiation authorisation token key */ static void request_key_auth_destroy(struct key *key) { struct request_key_auth *rka = key->payload.data; kenter("{%d}", key->serial); if (rka->cred) { put_cred(rka->cred); rka->cred = NULL; } key_put(rka->target_key); key_put(rka->dest_keyring); kfree(rka->callout_info); kfree(rka); } /* end request_key_auth_destroy() */ /*****************************************************************************/ /* * create an authorisation token for /sbin/request-key or whoever to gain * access to the caller's security data */ struct key *request_key_auth_new(struct key *target, const void *callout_info, size_t callout_len, struct key *dest_keyring) { struct request_key_auth *rka, *irka; const struct cred *cred = current->cred; struct key *authkey = NULL; char desc[20]; int ret; kenter("%d,", target->serial); /* allocate a auth record */ rka = kmalloc(sizeof(*rka), GFP_KERNEL); if (!rka) { kleave(" = -ENOMEM"); return ERR_PTR(-ENOMEM); } rka->callout_info = kmalloc(callout_len, GFP_KERNEL); if (!rka->callout_info) { kleave(" = -ENOMEM"); kfree(rka); return ERR_PTR(-ENOMEM); } /* see if the calling process is already servicing the key request of * another process */ if (cred->request_key_auth) { /* it is - use that instantiation context here too */ down_read(&cred->request_key_auth->sem); /* if the auth key has been revoked, then the key we're * servicing is already instantiated */ if (test_bit(KEY_FLAG_REVOKED, &cred->request_key_auth->flags)) goto auth_key_revoked; irka = cred->request_key_auth->payload.data; rka->cred = get_cred(irka->cred); rka->pid = irka->pid; up_read(&cred->request_key_auth->sem); } else { /* it isn't - use this process as the context */ rka->cred = get_cred(cred); rka->pid = current->pid; } rka->target_key = key_get(target); rka->dest_keyring = key_get(dest_keyring); memcpy(rka->callout_info, callout_info, callout_len); rka->callout_len = callout_len; /* allocate the auth key */ sprintf(desc, "%x", target->serial); authkey = key_alloc(&key_type_request_key_auth, desc, cred->fsuid, cred->fsgid, cred, KEY_POS_VIEW | KEY_POS_READ | KEY_POS_SEARCH | KEY_USR_VIEW, KEY_ALLOC_NOT_IN_QUOTA); if (IS_ERR(authkey)) { ret = PTR_ERR(authkey); goto error_alloc; } /* construct the auth key */ ret = key_instantiate_and_link(authkey, rka, 0, NULL, NULL); if (ret < 0) goto error_inst; kleave(" = {%d,%d}", authkey->serial, atomic_read(&authkey->usage)); return authkey; auth_key_revoked: up_read(&cred->request_key_auth->sem); kfree(rka->callout_info); kfree(rka); kleave("= -EKEYREVOKED"); return ERR_PTR(-EKEYREVOKED); error_inst: key_revoke(authkey); key_put(authkey); error_alloc: key_put(rka->target_key); key_put(rka->dest_keyring); kfree(rka->callout_info); kfree(rka); kleave("= %d", ret); return ERR_PTR(ret); } /* end request_key_auth_new() */ /*****************************************************************************/ /* * see if an authorisation key is associated with a particular key */ static int key_get_instantiation_authkey_match(const struct key *key, const void *_id) { struct request_key_auth *rka = key->payload.data; key_serial_t id = (key_serial_t)(unsigned long) _id; return rka->target_key->serial == id; } /* end key_get_instantiation_authkey_match() */ /*****************************************************************************/ /* * get the authorisation key for instantiation of a specific key if attached to * the current process's keyrings * - this key is inserted into a keyring and that is set as /sbin/request-key's * session keyring * - a target_id of zero specifies any valid token */ struct key *key_get_instantiation_authkey(key_serial_t target_id) { const struct cred *cred = current_cred(); struct key *authkey; key_ref_t authkey_ref; authkey_ref = search_process_keyrings( &key_type_request_key_auth, (void *) (unsigned long) target_id, key_get_instantiation_authkey_match, cred); if (IS_ERR(authkey_ref)) { authkey = ERR_CAST(authkey_ref); goto error; } authkey = key_ref_to_ptr(authkey_ref); if (test_bit(KEY_FLAG_REVOKED, &authkey->flags)) { key_put(authkey); authkey = ERR_PTR(-EKEYREVOKED); } error: return authkey; } /* end key_get_instantiation_authkey() */
gpl-2.0
DeltaDroidTeam/android_kernel_lenovo_msm8x25q
net/ipv6/ndisc.c
2220
47357
/* * Neighbour Discovery for IPv6 * Linux INET6 implementation * * Authors: * Pedro Roque <roque@di.fc.ul.pt> * Mike Shaver <shaver@ingenia.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ /* * Changes: * * Pierre Ynard : export userland ND options * through netlink (RDNSS support) * Lars Fenneberg : fixed MTU setting on receipt * of an RA. * Janos Farkas : kmalloc failure checks * Alexey Kuznetsov : state machine reworked * and moved to net/core. * Pekka Savola : RFC2461 validation * YOSHIFUJI Hideaki @USAGI : Verify ND options properly */ /* Set to 3 to get tracing... */ #define ND_DEBUG 1 #define ND_PRINTK(fmt, args...) do { if (net_ratelimit()) { printk(fmt, ## args); } } while(0) #define ND_NOPRINTK(x...) do { ; } while(0) #define ND_PRINTK0 ND_PRINTK #define ND_PRINTK1 ND_NOPRINTK #define ND_PRINTK2 ND_NOPRINTK #define ND_PRINTK3 ND_NOPRINTK #if ND_DEBUG >= 1 #undef ND_PRINTK1 #define ND_PRINTK1 ND_PRINTK #endif #if ND_DEBUG >= 2 #undef ND_PRINTK2 #define ND_PRINTK2 ND_PRINTK #endif #if ND_DEBUG >= 3 #undef ND_PRINTK3 #define ND_PRINTK3 ND_PRINTK #endif #include <linux/module.h> #include <linux/errno.h> #include <linux/types.h> #include <linux/socket.h> #include <linux/sockios.h> #include <linux/sched.h> #include <linux/net.h> #include <linux/in6.h> #include <linux/route.h> #include <linux/init.h> #include <linux/rcupdate.h> #include <linux/slab.h> #ifdef CONFIG_SYSCTL #include <linux/sysctl.h> #endif #include <linux/if_addr.h> #include <linux/if_arp.h> #include <linux/ipv6.h> #include <linux/icmpv6.h> #include <linux/jhash.h> #include <net/sock.h> #include <net/snmp.h> #include <net/ipv6.h> #include <net/protocol.h> #include <net/ndisc.h> #include <net/ip6_route.h> #include <net/addrconf.h> #include <net/icmp.h> #include <net/netlink.h> #include <linux/rtnetlink.h> #include <net/flow.h> #include <net/ip6_checksum.h> #include <net/inet_common.h> #include <linux/proc_fs.h> #include <linux/netfilter.h> #include <linux/netfilter_ipv6.h> static u32 ndisc_hash(const void *pkey, const struct net_device *dev, __u32 *hash_rnd); static int ndisc_constructor(struct neighbour *neigh); static void ndisc_solicit(struct neighbour *neigh, struct sk_buff *skb); static void ndisc_error_report(struct neighbour *neigh, struct sk_buff *skb); static int pndisc_constructor(struct pneigh_entry *n); static void pndisc_destructor(struct pneigh_entry *n); static void pndisc_redo(struct sk_buff *skb); static const struct neigh_ops ndisc_generic_ops = { .family = AF_INET6, .solicit = ndisc_solicit, .error_report = ndisc_error_report, .output = neigh_resolve_output, .connected_output = neigh_connected_output, }; static const struct neigh_ops ndisc_hh_ops = { .family = AF_INET6, .solicit = ndisc_solicit, .error_report = ndisc_error_report, .output = neigh_resolve_output, .connected_output = neigh_resolve_output, }; static const struct neigh_ops ndisc_direct_ops = { .family = AF_INET6, .output = neigh_direct_output, .connected_output = neigh_direct_output, }; struct neigh_table nd_tbl = { .family = AF_INET6, .key_len = sizeof(struct in6_addr), .hash = ndisc_hash, .constructor = ndisc_constructor, .pconstructor = pndisc_constructor, .pdestructor = pndisc_destructor, .proxy_redo = pndisc_redo, .id = "ndisc_cache", .parms = { .tbl = &nd_tbl, .base_reachable_time = ND_REACHABLE_TIME, .retrans_time = ND_RETRANS_TIMER, .gc_staletime = 60 * HZ, .reachable_time = ND_REACHABLE_TIME, .delay_probe_time = 5 * HZ, .queue_len_bytes = 64*1024, .ucast_probes = 3, .mcast_probes = 3, .anycast_delay = 1 * HZ, .proxy_delay = (8 * HZ) / 10, .proxy_qlen = 64, }, .gc_interval = 30 * HZ, .gc_thresh1 = 128, .gc_thresh2 = 512, .gc_thresh3 = 1024, }; /* ND options */ struct ndisc_options { struct nd_opt_hdr *nd_opt_array[__ND_OPT_ARRAY_MAX]; #ifdef CONFIG_IPV6_ROUTE_INFO struct nd_opt_hdr *nd_opts_ri; struct nd_opt_hdr *nd_opts_ri_end; #endif struct nd_opt_hdr *nd_useropts; struct nd_opt_hdr *nd_useropts_end; }; #define nd_opts_src_lladdr nd_opt_array[ND_OPT_SOURCE_LL_ADDR] #define nd_opts_tgt_lladdr nd_opt_array[ND_OPT_TARGET_LL_ADDR] #define nd_opts_pi nd_opt_array[ND_OPT_PREFIX_INFO] #define nd_opts_pi_end nd_opt_array[__ND_OPT_PREFIX_INFO_END] #define nd_opts_rh nd_opt_array[ND_OPT_REDIRECT_HDR] #define nd_opts_mtu nd_opt_array[ND_OPT_MTU] #define NDISC_OPT_SPACE(len) (((len)+2+7)&~7) /* * Return the padding between the option length and the start of the * link addr. Currently only IP-over-InfiniBand needs this, although * if RFC 3831 IPv6-over-Fibre Channel is ever implemented it may * also need a pad of 2. */ static int ndisc_addr_option_pad(unsigned short type) { switch (type) { case ARPHRD_INFINIBAND: return 2; default: return 0; } } static inline int ndisc_opt_addr_space(struct net_device *dev) { return NDISC_OPT_SPACE(dev->addr_len + ndisc_addr_option_pad(dev->type)); } static u8 *ndisc_fill_addr_option(u8 *opt, int type, void *data, int data_len, unsigned short addr_type) { int space = NDISC_OPT_SPACE(data_len); int pad = ndisc_addr_option_pad(addr_type); opt[0] = type; opt[1] = space>>3; memset(opt + 2, 0, pad); opt += pad; space -= pad; memcpy(opt+2, data, data_len); data_len += 2; opt += data_len; if ((space -= data_len) > 0) memset(opt, 0, space); return opt + space; } static struct nd_opt_hdr *ndisc_next_option(struct nd_opt_hdr *cur, struct nd_opt_hdr *end) { int type; if (!cur || !end || cur >= end) return NULL; type = cur->nd_opt_type; do { cur = ((void *)cur) + (cur->nd_opt_len << 3); } while(cur < end && cur->nd_opt_type != type); return cur <= end && cur->nd_opt_type == type ? cur : NULL; } static inline int ndisc_is_useropt(struct nd_opt_hdr *opt) { return opt->nd_opt_type == ND_OPT_RDNSS; } static struct nd_opt_hdr *ndisc_next_useropt(struct nd_opt_hdr *cur, struct nd_opt_hdr *end) { if (!cur || !end || cur >= end) return NULL; do { cur = ((void *)cur) + (cur->nd_opt_len << 3); } while(cur < end && !ndisc_is_useropt(cur)); return cur <= end && ndisc_is_useropt(cur) ? cur : NULL; } static struct ndisc_options *ndisc_parse_options(u8 *opt, int opt_len, struct ndisc_options *ndopts) { struct nd_opt_hdr *nd_opt = (struct nd_opt_hdr *)opt; if (!nd_opt || opt_len < 0 || !ndopts) return NULL; memset(ndopts, 0, sizeof(*ndopts)); while (opt_len) { int l; if (opt_len < sizeof(struct nd_opt_hdr)) return NULL; l = nd_opt->nd_opt_len << 3; if (opt_len < l || l == 0) return NULL; switch (nd_opt->nd_opt_type) { case ND_OPT_SOURCE_LL_ADDR: case ND_OPT_TARGET_LL_ADDR: case ND_OPT_MTU: case ND_OPT_REDIRECT_HDR: if (ndopts->nd_opt_array[nd_opt->nd_opt_type]) { ND_PRINTK2(KERN_WARNING "%s(): duplicated ND6 option found: type=%d\n", __func__, nd_opt->nd_opt_type); } else { ndopts->nd_opt_array[nd_opt->nd_opt_type] = nd_opt; } break; case ND_OPT_PREFIX_INFO: ndopts->nd_opts_pi_end = nd_opt; if (!ndopts->nd_opt_array[nd_opt->nd_opt_type]) ndopts->nd_opt_array[nd_opt->nd_opt_type] = nd_opt; break; #ifdef CONFIG_IPV6_ROUTE_INFO case ND_OPT_ROUTE_INFO: ndopts->nd_opts_ri_end = nd_opt; if (!ndopts->nd_opts_ri) ndopts->nd_opts_ri = nd_opt; break; #endif default: if (ndisc_is_useropt(nd_opt)) { ndopts->nd_useropts_end = nd_opt; if (!ndopts->nd_useropts) ndopts->nd_useropts = nd_opt; } else { /* * Unknown options must be silently ignored, * to accommodate future extension to the * protocol. */ ND_PRINTK2(KERN_NOTICE "%s(): ignored unsupported option; type=%d, len=%d\n", __func__, nd_opt->nd_opt_type, nd_opt->nd_opt_len); } } opt_len -= l; nd_opt = ((void *)nd_opt) + l; } return ndopts; } static inline u8 *ndisc_opt_addr_data(struct nd_opt_hdr *p, struct net_device *dev) { u8 *lladdr = (u8 *)(p + 1); int lladdrlen = p->nd_opt_len << 3; int prepad = ndisc_addr_option_pad(dev->type); if (lladdrlen != NDISC_OPT_SPACE(dev->addr_len + prepad)) return NULL; return lladdr + prepad; } int ndisc_mc_map(const struct in6_addr *addr, char *buf, struct net_device *dev, int dir) { switch (dev->type) { case ARPHRD_ETHER: case ARPHRD_IEEE802: /* Not sure. Check it later. --ANK */ case ARPHRD_FDDI: ipv6_eth_mc_map(addr, buf); return 0; case ARPHRD_IEEE802_TR: ipv6_tr_mc_map(addr,buf); return 0; case ARPHRD_ARCNET: ipv6_arcnet_mc_map(addr, buf); return 0; case ARPHRD_INFINIBAND: ipv6_ib_mc_map(addr, dev->broadcast, buf); return 0; case ARPHRD_IPGRE: return ipv6_ipgre_mc_map(addr, dev->broadcast, buf); default: if (dir) { memcpy(buf, dev->broadcast, dev->addr_len); return 0; } } return -EINVAL; } EXPORT_SYMBOL(ndisc_mc_map); static u32 ndisc_hash(const void *pkey, const struct net_device *dev, __u32 *hash_rnd) { return ndisc_hashfn(pkey, dev, hash_rnd); } static int ndisc_constructor(struct neighbour *neigh) { struct in6_addr *addr = (struct in6_addr*)&neigh->primary_key; struct net_device *dev = neigh->dev; struct inet6_dev *in6_dev; struct neigh_parms *parms; int is_multicast = ipv6_addr_is_multicast(addr); in6_dev = in6_dev_get(dev); if (in6_dev == NULL) { return -EINVAL; } parms = in6_dev->nd_parms; __neigh_parms_put(neigh->parms); neigh->parms = neigh_parms_clone(parms); neigh->type = is_multicast ? RTN_MULTICAST : RTN_UNICAST; if (!dev->header_ops) { neigh->nud_state = NUD_NOARP; neigh->ops = &ndisc_direct_ops; neigh->output = neigh_direct_output; } else { if (is_multicast) { neigh->nud_state = NUD_NOARP; ndisc_mc_map(addr, neigh->ha, dev, 1); } else if (dev->flags&(IFF_NOARP|IFF_LOOPBACK)) { neigh->nud_state = NUD_NOARP; memcpy(neigh->ha, dev->dev_addr, dev->addr_len); if (dev->flags&IFF_LOOPBACK) neigh->type = RTN_LOCAL; } else if (dev->flags&IFF_POINTOPOINT) { neigh->nud_state = NUD_NOARP; memcpy(neigh->ha, dev->broadcast, dev->addr_len); } if (dev->header_ops->cache) neigh->ops = &ndisc_hh_ops; else neigh->ops = &ndisc_generic_ops; if (neigh->nud_state&NUD_VALID) neigh->output = neigh->ops->connected_output; else neigh->output = neigh->ops->output; } in6_dev_put(in6_dev); return 0; } static int pndisc_constructor(struct pneigh_entry *n) { struct in6_addr *addr = (struct in6_addr*)&n->key; struct in6_addr maddr; struct net_device *dev = n->dev; if (dev == NULL || __in6_dev_get(dev) == NULL) return -EINVAL; addrconf_addr_solict_mult(addr, &maddr); ipv6_dev_mc_inc(dev, &maddr); return 0; } static void pndisc_destructor(struct pneigh_entry *n) { struct in6_addr *addr = (struct in6_addr*)&n->key; struct in6_addr maddr; struct net_device *dev = n->dev; if (dev == NULL || __in6_dev_get(dev) == NULL) return; addrconf_addr_solict_mult(addr, &maddr); ipv6_dev_mc_dec(dev, &maddr); } struct sk_buff *ndisc_build_skb(struct net_device *dev, const struct in6_addr *daddr, const struct in6_addr *saddr, struct icmp6hdr *icmp6h, const struct in6_addr *target, int llinfo) { struct net *net = dev_net(dev); struct sock *sk = net->ipv6.ndisc_sk; struct sk_buff *skb; struct icmp6hdr *hdr; int hlen = LL_RESERVED_SPACE(dev); int tlen = dev->needed_tailroom; int len; int err; u8 *opt; if (!dev->addr_len) llinfo = 0; len = sizeof(struct icmp6hdr) + (target ? sizeof(*target) : 0); if (llinfo) len += ndisc_opt_addr_space(dev); skb = sock_alloc_send_skb(sk, (MAX_HEADER + sizeof(struct ipv6hdr) + len + hlen + tlen), 1, &err); if (!skb) { ND_PRINTK0(KERN_ERR "ICMPv6 ND: %s() failed to allocate an skb, err=%d.\n", __func__, err); return NULL; } skb_reserve(skb, hlen); ip6_nd_hdr(sk, skb, dev, saddr, daddr, IPPROTO_ICMPV6, len); skb->transport_header = skb->tail; skb_put(skb, len); hdr = (struct icmp6hdr *)skb_transport_header(skb); memcpy(hdr, icmp6h, sizeof(*hdr)); opt = skb_transport_header(skb) + sizeof(struct icmp6hdr); if (target) { *(struct in6_addr *)opt = *target; opt += sizeof(*target); } if (llinfo) ndisc_fill_addr_option(opt, llinfo, dev->dev_addr, dev->addr_len, dev->type); hdr->icmp6_cksum = csum_ipv6_magic(saddr, daddr, len, IPPROTO_ICMPV6, csum_partial(hdr, len, 0)); return skb; } EXPORT_SYMBOL(ndisc_build_skb); void ndisc_send_skb(struct sk_buff *skb, struct net_device *dev, struct neighbour *neigh, const struct in6_addr *daddr, const struct in6_addr *saddr, struct icmp6hdr *icmp6h) { struct flowi6 fl6; struct dst_entry *dst; struct net *net = dev_net(dev); struct sock *sk = net->ipv6.ndisc_sk; struct inet6_dev *idev; int err; u8 type; type = icmp6h->icmp6_type; icmpv6_flow_init(sk, &fl6, type, saddr, daddr, dev->ifindex); dst = icmp6_dst_alloc(dev, neigh, &fl6); if (IS_ERR(dst)) { kfree_skb(skb); return; } skb_dst_set(skb, dst); rcu_read_lock(); idev = __in6_dev_get(dst->dev); IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUT, skb->len); err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL, dst->dev, dst_output); if (!err) { ICMP6MSGOUT_INC_STATS(net, idev, type); ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS); } rcu_read_unlock(); } EXPORT_SYMBOL(ndisc_send_skb); /* * Send a Neighbour Discover packet */ static void __ndisc_send(struct net_device *dev, struct neighbour *neigh, const struct in6_addr *daddr, const struct in6_addr *saddr, struct icmp6hdr *icmp6h, const struct in6_addr *target, int llinfo) { struct sk_buff *skb; skb = ndisc_build_skb(dev, daddr, saddr, icmp6h, target, llinfo); if (!skb) return; ndisc_send_skb(skb, dev, neigh, daddr, saddr, icmp6h); } static void ndisc_send_na(struct net_device *dev, struct neighbour *neigh, const struct in6_addr *daddr, const struct in6_addr *solicited_addr, int router, int solicited, int override, int inc_opt) { struct in6_addr tmpaddr; struct inet6_ifaddr *ifp; const struct in6_addr *src_addr; struct icmp6hdr icmp6h = { .icmp6_type = NDISC_NEIGHBOUR_ADVERTISEMENT, }; /* for anycast or proxy, solicited_addr != src_addr */ ifp = ipv6_get_ifaddr(dev_net(dev), solicited_addr, dev, 1); if (ifp) { src_addr = solicited_addr; if (ifp->flags & IFA_F_OPTIMISTIC) override = 0; inc_opt |= ifp->idev->cnf.force_tllao; in6_ifa_put(ifp); } else { if (ipv6_dev_get_saddr(dev_net(dev), dev, daddr, inet6_sk(dev_net(dev)->ipv6.ndisc_sk)->srcprefs, &tmpaddr)) return; src_addr = &tmpaddr; } icmp6h.icmp6_router = router; icmp6h.icmp6_solicited = solicited; icmp6h.icmp6_override = override; __ndisc_send(dev, neigh, daddr, src_addr, &icmp6h, solicited_addr, inc_opt ? ND_OPT_TARGET_LL_ADDR : 0); } static void ndisc_send_unsol_na(struct net_device *dev) { struct inet6_dev *idev; struct inet6_ifaddr *ifa; struct in6_addr mcaddr; idev = in6_dev_get(dev); if (!idev) return; read_lock_bh(&idev->lock); list_for_each_entry(ifa, &idev->addr_list, if_list) { addrconf_addr_solict_mult(&ifa->addr, &mcaddr); ndisc_send_na(dev, NULL, &mcaddr, &ifa->addr, /*router=*/ !!idev->cnf.forwarding, /*solicited=*/ false, /*override=*/ true, /*inc_opt=*/ true); } read_unlock_bh(&idev->lock); in6_dev_put(idev); } void ndisc_send_ns(struct net_device *dev, struct neighbour *neigh, const struct in6_addr *solicit, const struct in6_addr *daddr, const struct in6_addr *saddr) { struct in6_addr addr_buf; struct icmp6hdr icmp6h = { .icmp6_type = NDISC_NEIGHBOUR_SOLICITATION, }; if (saddr == NULL) { if (ipv6_get_lladdr(dev, &addr_buf, (IFA_F_TENTATIVE|IFA_F_OPTIMISTIC))) return; saddr = &addr_buf; } __ndisc_send(dev, neigh, daddr, saddr, &icmp6h, solicit, !ipv6_addr_any(saddr) ? ND_OPT_SOURCE_LL_ADDR : 0); } void ndisc_send_rs(struct net_device *dev, const struct in6_addr *saddr, const struct in6_addr *daddr) { struct icmp6hdr icmp6h = { .icmp6_type = NDISC_ROUTER_SOLICITATION, }; int send_sllao = dev->addr_len; #ifdef CONFIG_IPV6_OPTIMISTIC_DAD /* * According to section 2.2 of RFC 4429, we must not * send router solicitations with a sllao from * optimistic addresses, but we may send the solicitation * if we don't include the sllao. So here we check * if our address is optimistic, and if so, we * suppress the inclusion of the sllao. */ if (send_sllao) { struct inet6_ifaddr *ifp = ipv6_get_ifaddr(dev_net(dev), saddr, dev, 1); if (ifp) { if (ifp->flags & IFA_F_OPTIMISTIC) { send_sllao = 0; } in6_ifa_put(ifp); } else { send_sllao = 0; } } #endif __ndisc_send(dev, NULL, daddr, saddr, &icmp6h, NULL, send_sllao ? ND_OPT_SOURCE_LL_ADDR : 0); } static void ndisc_error_report(struct neighbour *neigh, struct sk_buff *skb) { /* * "The sender MUST return an ICMP * destination unreachable" */ dst_link_failure(skb); kfree_skb(skb); } /* Called with locked neigh: either read or both */ static void ndisc_solicit(struct neighbour *neigh, struct sk_buff *skb) { struct in6_addr *saddr = NULL; struct in6_addr mcaddr; struct net_device *dev = neigh->dev; struct in6_addr *target = (struct in6_addr *)&neigh->primary_key; int probes = atomic_read(&neigh->probes); if (skb && ipv6_chk_addr(dev_net(dev), &ipv6_hdr(skb)->saddr, dev, 1)) saddr = &ipv6_hdr(skb)->saddr; if ((probes -= neigh->parms->ucast_probes) < 0) { if (!(neigh->nud_state & NUD_VALID)) { ND_PRINTK1(KERN_DEBUG "%s(): trying to ucast probe in NUD_INVALID: %pI6\n", __func__, target); } ndisc_send_ns(dev, neigh, target, target, saddr); } else if ((probes -= neigh->parms->app_probes) < 0) { #ifdef CONFIG_ARPD neigh_app_ns(neigh); #endif } else { addrconf_addr_solict_mult(target, &mcaddr); ndisc_send_ns(dev, NULL, target, &mcaddr, saddr); } } static int pndisc_is_router(const void *pkey, struct net_device *dev) { struct pneigh_entry *n; int ret = -1; read_lock_bh(&nd_tbl.lock); n = __pneigh_lookup(&nd_tbl, dev_net(dev), pkey, dev); if (n) ret = !!(n->flags & NTF_ROUTER); read_unlock_bh(&nd_tbl.lock); return ret; } static void ndisc_recv_ns(struct sk_buff *skb) { struct nd_msg *msg = (struct nd_msg *)skb_transport_header(skb); const struct in6_addr *saddr = &ipv6_hdr(skb)->saddr; const struct in6_addr *daddr = &ipv6_hdr(skb)->daddr; u8 *lladdr = NULL; u32 ndoptlen = skb->tail - (skb->transport_header + offsetof(struct nd_msg, opt)); struct ndisc_options ndopts; struct net_device *dev = skb->dev; struct inet6_ifaddr *ifp; struct inet6_dev *idev = NULL; struct neighbour *neigh; int dad = ipv6_addr_any(saddr); int inc; int is_router = -1; if (ipv6_addr_is_multicast(&msg->target)) { ND_PRINTK2(KERN_WARNING "ICMPv6 NS: multicast target address"); return; } /* * RFC2461 7.1.1: * DAD has to be destined for solicited node multicast address. */ if (dad && !(daddr->s6_addr32[0] == htonl(0xff020000) && daddr->s6_addr32[1] == htonl(0x00000000) && daddr->s6_addr32[2] == htonl(0x00000001) && daddr->s6_addr [12] == 0xff )) { ND_PRINTK2(KERN_WARNING "ICMPv6 NS: bad DAD packet (wrong destination)\n"); return; } if (!ndisc_parse_options(msg->opt, ndoptlen, &ndopts)) { ND_PRINTK2(KERN_WARNING "ICMPv6 NS: invalid ND options\n"); return; } if (ndopts.nd_opts_src_lladdr) { lladdr = ndisc_opt_addr_data(ndopts.nd_opts_src_lladdr, dev); if (!lladdr) { ND_PRINTK2(KERN_WARNING "ICMPv6 NS: invalid link-layer address length\n"); return; } /* RFC2461 7.1.1: * If the IP source address is the unspecified address, * there MUST NOT be source link-layer address option * in the message. */ if (dad) { ND_PRINTK2(KERN_WARNING "ICMPv6 NS: bad DAD packet (link-layer address option)\n"); return; } } inc = ipv6_addr_is_multicast(daddr); ifp = ipv6_get_ifaddr(dev_net(dev), &msg->target, dev, 1); if (ifp) { if (ifp->flags & (IFA_F_TENTATIVE|IFA_F_OPTIMISTIC)) { if (dad) { if (dev->type == ARPHRD_IEEE802_TR) { const unsigned char *sadr; sadr = skb_mac_header(skb); if (((sadr[8] ^ dev->dev_addr[0]) & 0x7f) == 0 && sadr[9] == dev->dev_addr[1] && sadr[10] == dev->dev_addr[2] && sadr[11] == dev->dev_addr[3] && sadr[12] == dev->dev_addr[4] && sadr[13] == dev->dev_addr[5]) { /* looped-back to us */ goto out; } } /* * We are colliding with another node * who is doing DAD * so fail our DAD process */ addrconf_dad_failure(ifp); return; } else { /* * This is not a dad solicitation. * If we are an optimistic node, * we should respond. * Otherwise, we should ignore it. */ if (!(ifp->flags & IFA_F_OPTIMISTIC)) goto out; } } idev = ifp->idev; } else { struct net *net = dev_net(dev); idev = in6_dev_get(dev); if (!idev) { /* XXX: count this drop? */ return; } if (ipv6_chk_acast_addr(net, dev, &msg->target) || (idev->cnf.forwarding && (net->ipv6.devconf_all->proxy_ndp || idev->cnf.proxy_ndp) && (is_router = pndisc_is_router(&msg->target, dev)) >= 0)) { if (!(NEIGH_CB(skb)->flags & LOCALLY_ENQUEUED) && skb->pkt_type != PACKET_HOST && inc != 0 && idev->nd_parms->proxy_delay != 0) { /* * for anycast or proxy, * sender should delay its response * by a random time between 0 and * MAX_ANYCAST_DELAY_TIME seconds. * (RFC2461) -- yoshfuji */ struct sk_buff *n = skb_clone(skb, GFP_ATOMIC); if (n) pneigh_enqueue(&nd_tbl, idev->nd_parms, n); goto out; } } else goto out; } if (is_router < 0) is_router = !!idev->cnf.forwarding; if (dad) { ndisc_send_na(dev, NULL, &in6addr_linklocal_allnodes, &msg->target, is_router, 0, (ifp != NULL), 1); goto out; } if (inc) NEIGH_CACHE_STAT_INC(&nd_tbl, rcv_probes_mcast); else NEIGH_CACHE_STAT_INC(&nd_tbl, rcv_probes_ucast); /* * update / create cache entry * for the source address */ neigh = __neigh_lookup(&nd_tbl, saddr, dev, !inc || lladdr || !dev->addr_len); if (neigh) neigh_update(neigh, lladdr, NUD_STALE, NEIGH_UPDATE_F_WEAK_OVERRIDE| NEIGH_UPDATE_F_OVERRIDE); if (neigh || !dev->header_ops) { ndisc_send_na(dev, neigh, saddr, &msg->target, is_router, 1, (ifp != NULL && inc), inc); if (neigh) neigh_release(neigh); } out: if (ifp) in6_ifa_put(ifp); else in6_dev_put(idev); } static void ndisc_recv_na(struct sk_buff *skb) { struct nd_msg *msg = (struct nd_msg *)skb_transport_header(skb); const struct in6_addr *saddr = &ipv6_hdr(skb)->saddr; const struct in6_addr *daddr = &ipv6_hdr(skb)->daddr; u8 *lladdr = NULL; u32 ndoptlen = skb->tail - (skb->transport_header + offsetof(struct nd_msg, opt)); struct ndisc_options ndopts; struct net_device *dev = skb->dev; struct inet6_ifaddr *ifp; struct neighbour *neigh; if (skb->len < sizeof(struct nd_msg)) { ND_PRINTK2(KERN_WARNING "ICMPv6 NA: packet too short\n"); return; } if (ipv6_addr_is_multicast(&msg->target)) { ND_PRINTK2(KERN_WARNING "ICMPv6 NA: target address is multicast.\n"); return; } if (ipv6_addr_is_multicast(daddr) && msg->icmph.icmp6_solicited) { ND_PRINTK2(KERN_WARNING "ICMPv6 NA: solicited NA is multicasted.\n"); return; } if (!ndisc_parse_options(msg->opt, ndoptlen, &ndopts)) { ND_PRINTK2(KERN_WARNING "ICMPv6 NS: invalid ND option\n"); return; } if (ndopts.nd_opts_tgt_lladdr) { lladdr = ndisc_opt_addr_data(ndopts.nd_opts_tgt_lladdr, dev); if (!lladdr) { ND_PRINTK2(KERN_WARNING "ICMPv6 NA: invalid link-layer address length\n"); return; } } ifp = ipv6_get_ifaddr(dev_net(dev), &msg->target, dev, 1); if (ifp) { if (skb->pkt_type != PACKET_LOOPBACK && (ifp->flags & IFA_F_TENTATIVE)) { addrconf_dad_failure(ifp); return; } /* What should we make now? The advertisement is invalid, but ndisc specs say nothing about it. It could be misconfiguration, or an smart proxy agent tries to help us :-) We should not print the error if NA has been received from loopback - it is just our own unsolicited advertisement. */ if (skb->pkt_type != PACKET_LOOPBACK) ND_PRINTK1(KERN_WARNING "ICMPv6 NA: someone advertises our address %pI6 on %s!\n", &ifp->addr, ifp->idev->dev->name); in6_ifa_put(ifp); return; } neigh = neigh_lookup(&nd_tbl, &msg->target, dev); if (neigh) { u8 old_flags = neigh->flags; struct net *net = dev_net(dev); if (neigh->nud_state & NUD_FAILED) goto out; /* * Don't update the neighbor cache entry on a proxy NA from * ourselves because either the proxied node is off link or it * has already sent a NA to us. */ if (lladdr && !memcmp(lladdr, dev->dev_addr, dev->addr_len) && net->ipv6.devconf_all->forwarding && net->ipv6.devconf_all->proxy_ndp && pneigh_lookup(&nd_tbl, net, &msg->target, dev, 0)) { /* XXX: idev->cnf.prixy_ndp */ goto out; } neigh_update(neigh, lladdr, msg->icmph.icmp6_solicited ? NUD_REACHABLE : NUD_STALE, NEIGH_UPDATE_F_WEAK_OVERRIDE| (msg->icmph.icmp6_override ? NEIGH_UPDATE_F_OVERRIDE : 0)| NEIGH_UPDATE_F_OVERRIDE_ISROUTER| (msg->icmph.icmp6_router ? NEIGH_UPDATE_F_ISROUTER : 0)); if ((old_flags & ~neigh->flags) & NTF_ROUTER) { /* * Change: router to host */ struct rt6_info *rt; rt = rt6_get_dflt_router(saddr, dev); if (rt) ip6_del_rt(rt); } out: neigh_release(neigh); } } static void ndisc_recv_rs(struct sk_buff *skb) { struct rs_msg *rs_msg = (struct rs_msg *)skb_transport_header(skb); unsigned long ndoptlen = skb->len - sizeof(*rs_msg); struct neighbour *neigh; struct inet6_dev *idev; const struct in6_addr *saddr = &ipv6_hdr(skb)->saddr; struct ndisc_options ndopts; u8 *lladdr = NULL; if (skb->len < sizeof(*rs_msg)) return; idev = __in6_dev_get(skb->dev); if (!idev) { if (net_ratelimit()) ND_PRINTK1("ICMP6 RS: can't find in6 device\n"); return; } /* Don't accept RS if we're not in router mode */ if (!idev->cnf.forwarding) goto out; /* * Don't update NCE if src = ::; * this implies that the source node has no ip address assigned yet. */ if (ipv6_addr_any(saddr)) goto out; /* Parse ND options */ if (!ndisc_parse_options(rs_msg->opt, ndoptlen, &ndopts)) { if (net_ratelimit()) ND_PRINTK2("ICMP6 NS: invalid ND option, ignored\n"); goto out; } if (ndopts.nd_opts_src_lladdr) { lladdr = ndisc_opt_addr_data(ndopts.nd_opts_src_lladdr, skb->dev); if (!lladdr) goto out; } neigh = __neigh_lookup(&nd_tbl, saddr, skb->dev, 1); if (neigh) { neigh_update(neigh, lladdr, NUD_STALE, NEIGH_UPDATE_F_WEAK_OVERRIDE| NEIGH_UPDATE_F_OVERRIDE| NEIGH_UPDATE_F_OVERRIDE_ISROUTER); neigh_release(neigh); } out: return; } static void ndisc_ra_useropt(struct sk_buff *ra, struct nd_opt_hdr *opt) { struct icmp6hdr *icmp6h = (struct icmp6hdr *)skb_transport_header(ra); struct sk_buff *skb; struct nlmsghdr *nlh; struct nduseroptmsg *ndmsg; struct net *net = dev_net(ra->dev); int err; int base_size = NLMSG_ALIGN(sizeof(struct nduseroptmsg) + (opt->nd_opt_len << 3)); size_t msg_size = base_size + nla_total_size(sizeof(struct in6_addr)); skb = nlmsg_new(msg_size, GFP_ATOMIC); if (skb == NULL) { err = -ENOBUFS; goto errout; } nlh = nlmsg_put(skb, 0, 0, RTM_NEWNDUSEROPT, base_size, 0); if (nlh == NULL) { goto nla_put_failure; } ndmsg = nlmsg_data(nlh); ndmsg->nduseropt_family = AF_INET6; ndmsg->nduseropt_ifindex = ra->dev->ifindex; ndmsg->nduseropt_icmp_type = icmp6h->icmp6_type; ndmsg->nduseropt_icmp_code = icmp6h->icmp6_code; ndmsg->nduseropt_opts_len = opt->nd_opt_len << 3; memcpy(ndmsg + 1, opt, opt->nd_opt_len << 3); NLA_PUT(skb, NDUSEROPT_SRCADDR, sizeof(struct in6_addr), &ipv6_hdr(ra)->saddr); nlmsg_end(skb, nlh); rtnl_notify(skb, net, 0, RTNLGRP_ND_USEROPT, NULL, GFP_ATOMIC); return; nla_put_failure: nlmsg_free(skb); err = -EMSGSIZE; errout: rtnl_set_sk_err(net, RTNLGRP_ND_USEROPT, err); } static inline int accept_ra(struct inet6_dev *in6_dev) { /* * If forwarding is enabled, RA are not accepted unless the special * hybrid mode (accept_ra=2) is enabled. */ if (in6_dev->cnf.forwarding && in6_dev->cnf.accept_ra < 2) return 0; return in6_dev->cnf.accept_ra; } static void ndisc_router_discovery(struct sk_buff *skb) { struct ra_msg *ra_msg = (struct ra_msg *)skb_transport_header(skb); struct neighbour *neigh = NULL; struct inet6_dev *in6_dev; struct rt6_info *rt = NULL; int lifetime; struct ndisc_options ndopts; int optlen; unsigned int pref = 0; __u8 * opt = (__u8 *)(ra_msg + 1); optlen = (skb->tail - skb->transport_header) - sizeof(struct ra_msg); if (!(ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL)) { ND_PRINTK2(KERN_WARNING "ICMPv6 RA: source address is not link-local.\n"); return; } if (optlen < 0) { ND_PRINTK2(KERN_WARNING "ICMPv6 RA: packet too short\n"); return; } #ifdef CONFIG_IPV6_NDISC_NODETYPE if (skb->ndisc_nodetype == NDISC_NODETYPE_HOST) { ND_PRINTK2(KERN_WARNING "ICMPv6 RA: from host or unauthorized router\n"); return; } #endif /* * set the RA_RECV flag in the interface */ in6_dev = __in6_dev_get(skb->dev); if (in6_dev == NULL) { ND_PRINTK0(KERN_ERR "ICMPv6 RA: can't find inet6 device for %s.\n", skb->dev->name); return; } if (!ndisc_parse_options(opt, optlen, &ndopts)) { ND_PRINTK2(KERN_WARNING "ICMP6 RA: invalid ND options\n"); return; } if (!accept_ra(in6_dev)) goto skip_linkparms; #ifdef CONFIG_IPV6_NDISC_NODETYPE /* skip link-specific parameters from interior routers */ if (skb->ndisc_nodetype == NDISC_NODETYPE_NODEFAULT) goto skip_linkparms; #endif if (in6_dev->if_flags & IF_RS_SENT) { /* * flag that an RA was received after an RS was sent * out on this interface. */ in6_dev->if_flags |= IF_RA_RCVD; } /* * Remember the managed/otherconf flags from most recently * received RA message (RFC 2462) -- yoshfuji */ in6_dev->if_flags = (in6_dev->if_flags & ~(IF_RA_MANAGED | IF_RA_OTHERCONF)) | (ra_msg->icmph.icmp6_addrconf_managed ? IF_RA_MANAGED : 0) | (ra_msg->icmph.icmp6_addrconf_other ? IF_RA_OTHERCONF : 0); if (!in6_dev->cnf.accept_ra_defrtr) goto skip_defrtr; if (ipv6_chk_addr(dev_net(in6_dev->dev), &ipv6_hdr(skb)->saddr, NULL, 0)) goto skip_defrtr; lifetime = ntohs(ra_msg->icmph.icmp6_rt_lifetime); #ifdef CONFIG_IPV6_ROUTER_PREF pref = ra_msg->icmph.icmp6_router_pref; /* 10b is handled as if it were 00b (medium) */ if (pref == ICMPV6_ROUTER_PREF_INVALID || !in6_dev->cnf.accept_ra_rtr_pref) pref = ICMPV6_ROUTER_PREF_MEDIUM; #endif rt = rt6_get_dflt_router(&ipv6_hdr(skb)->saddr, skb->dev); if (rt) { neigh = dst_neigh_lookup(&rt->dst, &ipv6_hdr(skb)->saddr); if (!neigh) { ND_PRINTK0(KERN_ERR "ICMPv6 RA: %s() got default router without neighbour.\n", __func__); dst_release(&rt->dst); return; } } if (rt && lifetime == 0) { ip6_del_rt(rt); rt = NULL; } if (rt == NULL && lifetime) { ND_PRINTK3(KERN_DEBUG "ICMPv6 RA: adding default router.\n"); rt = rt6_add_dflt_router(&ipv6_hdr(skb)->saddr, skb->dev, pref); if (rt == NULL) { ND_PRINTK0(KERN_ERR "ICMPv6 RA: %s() failed to add default route.\n", __func__); return; } neigh = dst_neigh_lookup(&rt->dst, &ipv6_hdr(skb)->saddr); if (neigh == NULL) { ND_PRINTK0(KERN_ERR "ICMPv6 RA: %s() got default router without neighbour.\n", __func__); dst_release(&rt->dst); return; } neigh->flags |= NTF_ROUTER; } else if (rt) { rt->rt6i_flags = (rt->rt6i_flags & ~RTF_PREF_MASK) | RTF_PREF(pref); } if (rt) rt6_set_expires(rt, jiffies + (HZ * lifetime)); if (ra_msg->icmph.icmp6_hop_limit) { in6_dev->cnf.hop_limit = ra_msg->icmph.icmp6_hop_limit; if (rt) dst_metric_set(&rt->dst, RTAX_HOPLIMIT, ra_msg->icmph.icmp6_hop_limit); } skip_defrtr: /* * Update Reachable Time and Retrans Timer */ if (in6_dev->nd_parms) { unsigned long rtime = ntohl(ra_msg->retrans_timer); if (rtime && rtime/1000 < MAX_SCHEDULE_TIMEOUT/HZ) { rtime = (rtime*HZ)/1000; if (rtime < HZ/10) rtime = HZ/10; in6_dev->nd_parms->retrans_time = rtime; in6_dev->tstamp = jiffies; inet6_ifinfo_notify(RTM_NEWLINK, in6_dev); } rtime = ntohl(ra_msg->reachable_time); if (rtime && rtime/1000 < MAX_SCHEDULE_TIMEOUT/(3*HZ)) { rtime = (rtime*HZ)/1000; if (rtime < HZ/10) rtime = HZ/10; if (rtime != in6_dev->nd_parms->base_reachable_time) { in6_dev->nd_parms->base_reachable_time = rtime; in6_dev->nd_parms->gc_staletime = 3 * rtime; in6_dev->nd_parms->reachable_time = neigh_rand_reach_time(rtime); in6_dev->tstamp = jiffies; inet6_ifinfo_notify(RTM_NEWLINK, in6_dev); } } } skip_linkparms: /* * Process options. */ if (!neigh) neigh = __neigh_lookup(&nd_tbl, &ipv6_hdr(skb)->saddr, skb->dev, 1); if (neigh) { u8 *lladdr = NULL; if (ndopts.nd_opts_src_lladdr) { lladdr = ndisc_opt_addr_data(ndopts.nd_opts_src_lladdr, skb->dev); if (!lladdr) { ND_PRINTK2(KERN_WARNING "ICMPv6 RA: invalid link-layer address length\n"); goto out; } } neigh_update(neigh, lladdr, NUD_STALE, NEIGH_UPDATE_F_WEAK_OVERRIDE| NEIGH_UPDATE_F_OVERRIDE| NEIGH_UPDATE_F_OVERRIDE_ISROUTER| NEIGH_UPDATE_F_ISROUTER); } if (!accept_ra(in6_dev)) goto out; #ifdef CONFIG_IPV6_ROUTE_INFO if (ipv6_chk_addr(dev_net(in6_dev->dev), &ipv6_hdr(skb)->saddr, NULL, 0)) goto skip_routeinfo; if (in6_dev->cnf.accept_ra_rtr_pref && ndopts.nd_opts_ri) { struct nd_opt_hdr *p; for (p = ndopts.nd_opts_ri; p; p = ndisc_next_option(p, ndopts.nd_opts_ri_end)) { struct route_info *ri = (struct route_info *)p; #ifdef CONFIG_IPV6_NDISC_NODETYPE if (skb->ndisc_nodetype == NDISC_NODETYPE_NODEFAULT && ri->prefix_len == 0) continue; #endif if (ri->prefix_len > in6_dev->cnf.accept_ra_rt_info_max_plen) continue; rt6_route_rcv(skb->dev, (u8*)p, (p->nd_opt_len) << 3, &ipv6_hdr(skb)->saddr); } } skip_routeinfo: #endif #ifdef CONFIG_IPV6_NDISC_NODETYPE /* skip link-specific ndopts from interior routers */ if (skb->ndisc_nodetype == NDISC_NODETYPE_NODEFAULT) goto out; #endif if (in6_dev->cnf.accept_ra_pinfo && ndopts.nd_opts_pi) { struct nd_opt_hdr *p; for (p = ndopts.nd_opts_pi; p; p = ndisc_next_option(p, ndopts.nd_opts_pi_end)) { addrconf_prefix_rcv(skb->dev, (u8 *)p, (p->nd_opt_len) << 3, ndopts.nd_opts_src_lladdr != NULL); } } if (ndopts.nd_opts_mtu) { __be32 n; u32 mtu; memcpy(&n, ((u8*)(ndopts.nd_opts_mtu+1))+2, sizeof(mtu)); mtu = ntohl(n); if (mtu < IPV6_MIN_MTU || mtu > skb->dev->mtu) { ND_PRINTK2(KERN_WARNING "ICMPv6 RA: invalid mtu: %d\n", mtu); } else if (in6_dev->cnf.mtu6 != mtu) { in6_dev->cnf.mtu6 = mtu; if (rt) dst_metric_set(&rt->dst, RTAX_MTU, mtu); rt6_mtu_change(skb->dev, mtu); } } if (ndopts.nd_useropts) { struct nd_opt_hdr *p; for (p = ndopts.nd_useropts; p; p = ndisc_next_useropt(p, ndopts.nd_useropts_end)) { ndisc_ra_useropt(skb, p); } } if (ndopts.nd_opts_tgt_lladdr || ndopts.nd_opts_rh) { ND_PRINTK2(KERN_WARNING "ICMPv6 RA: invalid RA options"); } out: if (rt) dst_release(&rt->dst); if (neigh) neigh_release(neigh); } static void ndisc_redirect_rcv(struct sk_buff *skb) { struct inet6_dev *in6_dev; struct icmp6hdr *icmph; const struct in6_addr *dest; const struct in6_addr *target; /* new first hop to destination */ struct neighbour *neigh; int on_link = 0; struct ndisc_options ndopts; int optlen; u8 *lladdr = NULL; #ifdef CONFIG_IPV6_NDISC_NODETYPE switch (skb->ndisc_nodetype) { case NDISC_NODETYPE_HOST: case NDISC_NODETYPE_NODEFAULT: ND_PRINTK2(KERN_WARNING "ICMPv6 Redirect: from host or unauthorized router\n"); return; } #endif if (!(ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL)) { ND_PRINTK2(KERN_WARNING "ICMPv6 Redirect: source address is not link-local.\n"); return; } optlen = skb->tail - skb->transport_header; optlen -= sizeof(struct icmp6hdr) + 2 * sizeof(struct in6_addr); if (optlen < 0) { ND_PRINTK2(KERN_WARNING "ICMPv6 Redirect: packet too short\n"); return; } icmph = icmp6_hdr(skb); target = (const struct in6_addr *) (icmph + 1); dest = target + 1; if (ipv6_addr_is_multicast(dest)) { ND_PRINTK2(KERN_WARNING "ICMPv6 Redirect: destination address is multicast.\n"); return; } if (ipv6_addr_equal(dest, target)) { on_link = 1; } else if (ipv6_addr_type(target) != (IPV6_ADDR_UNICAST|IPV6_ADDR_LINKLOCAL)) { ND_PRINTK2(KERN_WARNING "ICMPv6 Redirect: target address is not link-local unicast.\n"); return; } in6_dev = __in6_dev_get(skb->dev); if (!in6_dev) return; if (in6_dev->cnf.forwarding || !in6_dev->cnf.accept_redirects) return; /* RFC2461 8.1: * The IP source address of the Redirect MUST be the same as the current * first-hop router for the specified ICMP Destination Address. */ if (!ndisc_parse_options((u8*)(dest + 1), optlen, &ndopts)) { ND_PRINTK2(KERN_WARNING "ICMPv6 Redirect: invalid ND options\n"); return; } if (ndopts.nd_opts_tgt_lladdr) { lladdr = ndisc_opt_addr_data(ndopts.nd_opts_tgt_lladdr, skb->dev); if (!lladdr) { ND_PRINTK2(KERN_WARNING "ICMPv6 Redirect: invalid link-layer address length\n"); return; } } neigh = __neigh_lookup(&nd_tbl, target, skb->dev, 1); if (neigh) { rt6_redirect(dest, &ipv6_hdr(skb)->daddr, &ipv6_hdr(skb)->saddr, neigh, lladdr, on_link); neigh_release(neigh); } } void ndisc_send_redirect(struct sk_buff *skb, const struct in6_addr *target) { struct net_device *dev = skb->dev; struct net *net = dev_net(dev); struct sock *sk = net->ipv6.ndisc_sk; int len = sizeof(struct icmp6hdr) + 2 * sizeof(struct in6_addr); struct sk_buff *buff; struct icmp6hdr *icmph; struct in6_addr saddr_buf; struct in6_addr *addrp; struct rt6_info *rt; struct dst_entry *dst; struct inet6_dev *idev; struct flowi6 fl6; u8 *opt; int hlen, tlen; int rd_len; int err; u8 ha_buf[MAX_ADDR_LEN], *ha = NULL; if (ipv6_get_lladdr(dev, &saddr_buf, IFA_F_TENTATIVE)) { ND_PRINTK2(KERN_WARNING "ICMPv6 Redirect: no link-local address on %s\n", dev->name); return; } if (!ipv6_addr_equal(&ipv6_hdr(skb)->daddr, target) && ipv6_addr_type(target) != (IPV6_ADDR_UNICAST|IPV6_ADDR_LINKLOCAL)) { ND_PRINTK2(KERN_WARNING "ICMPv6 Redirect: target address is not link-local unicast.\n"); return; } icmpv6_flow_init(sk, &fl6, NDISC_REDIRECT, &saddr_buf, &ipv6_hdr(skb)->saddr, dev->ifindex); dst = ip6_route_output(net, NULL, &fl6); if (dst->error) { dst_release(dst); return; } dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), NULL, 0); if (IS_ERR(dst)) return; rt = (struct rt6_info *) dst; if (rt->rt6i_flags & RTF_GATEWAY) { ND_PRINTK2(KERN_WARNING "ICMPv6 Redirect: destination is not a neighbour.\n"); goto release; } if (!rt->rt6i_peer) rt6_bind_peer(rt, 1); if (!inet_peer_xrlim_allow(rt->rt6i_peer, 1*HZ)) goto release; if (dev->addr_len) { struct neighbour *neigh = dst_neigh_lookup(skb_dst(skb), target); if (!neigh) { ND_PRINTK2(KERN_WARNING "ICMPv6 Redirect: no neigh for target address\n"); goto release; } read_lock_bh(&neigh->lock); if (neigh->nud_state & NUD_VALID) { memcpy(ha_buf, neigh->ha, dev->addr_len); read_unlock_bh(&neigh->lock); ha = ha_buf; len += ndisc_opt_addr_space(dev); } else read_unlock_bh(&neigh->lock); neigh_release(neigh); } rd_len = min_t(unsigned int, IPV6_MIN_MTU-sizeof(struct ipv6hdr)-len, skb->len + 8); rd_len &= ~0x7; len += rd_len; hlen = LL_RESERVED_SPACE(dev); tlen = dev->needed_tailroom; buff = sock_alloc_send_skb(sk, (MAX_HEADER + sizeof(struct ipv6hdr) + len + hlen + tlen), 1, &err); if (buff == NULL) { ND_PRINTK0(KERN_ERR "ICMPv6 Redirect: %s() failed to allocate an skb, err=%d.\n", __func__, err); goto release; } skb_reserve(buff, hlen); ip6_nd_hdr(sk, buff, dev, &saddr_buf, &ipv6_hdr(skb)->saddr, IPPROTO_ICMPV6, len); skb_set_transport_header(buff, skb_tail_pointer(buff) - buff->data); skb_put(buff, len); icmph = icmp6_hdr(buff); memset(icmph, 0, sizeof(struct icmp6hdr)); icmph->icmp6_type = NDISC_REDIRECT; /* * copy target and destination addresses */ addrp = (struct in6_addr *)(icmph + 1); *addrp = *target; addrp++; *addrp = ipv6_hdr(skb)->daddr; opt = (u8*) (addrp + 1); /* * include target_address option */ if (ha) opt = ndisc_fill_addr_option(opt, ND_OPT_TARGET_LL_ADDR, ha, dev->addr_len, dev->type); /* * build redirect option and copy skb over to the new packet. */ memset(opt, 0, 8); *(opt++) = ND_OPT_REDIRECT_HDR; *(opt++) = (rd_len >> 3); opt += 6; memcpy(opt, ipv6_hdr(skb), rd_len - 8); icmph->icmp6_cksum = csum_ipv6_magic(&saddr_buf, &ipv6_hdr(skb)->saddr, len, IPPROTO_ICMPV6, csum_partial(icmph, len, 0)); skb_dst_set(buff, dst); rcu_read_lock(); idev = __in6_dev_get(dst->dev); IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUT, skb->len); err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, buff, NULL, dst->dev, dst_output); if (!err) { ICMP6MSGOUT_INC_STATS(net, idev, NDISC_REDIRECT); ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS); } rcu_read_unlock(); return; release: dst_release(dst); } static void pndisc_redo(struct sk_buff *skb) { ndisc_recv_ns(skb); kfree_skb(skb); } int ndisc_rcv(struct sk_buff *skb) { struct nd_msg *msg; if (!pskb_may_pull(skb, skb->len)) return 0; msg = (struct nd_msg *)skb_transport_header(skb); __skb_push(skb, skb->data - skb_transport_header(skb)); if (ipv6_hdr(skb)->hop_limit != 255) { ND_PRINTK2(KERN_WARNING "ICMPv6 NDISC: invalid hop-limit: %d\n", ipv6_hdr(skb)->hop_limit); return 0; } if (msg->icmph.icmp6_code != 0) { ND_PRINTK2(KERN_WARNING "ICMPv6 NDISC: invalid ICMPv6 code: %d\n", msg->icmph.icmp6_code); return 0; } memset(NEIGH_CB(skb), 0, sizeof(struct neighbour_cb)); switch (msg->icmph.icmp6_type) { case NDISC_NEIGHBOUR_SOLICITATION: ndisc_recv_ns(skb); break; case NDISC_NEIGHBOUR_ADVERTISEMENT: ndisc_recv_na(skb); break; case NDISC_ROUTER_SOLICITATION: ndisc_recv_rs(skb); break; case NDISC_ROUTER_ADVERTISEMENT: ndisc_router_discovery(skb); break; case NDISC_REDIRECT: ndisc_redirect_rcv(skb); break; } return 0; } static int ndisc_netdev_event(struct notifier_block *this, unsigned long event, void *ptr) { struct net_device *dev = ptr; struct net *net = dev_net(dev); switch (event) { case NETDEV_CHANGEADDR: neigh_changeaddr(&nd_tbl, dev); fib6_run_gc(~0UL, net); break; case NETDEV_DOWN: neigh_ifdown(&nd_tbl, dev); fib6_run_gc(~0UL, net); break; case NETDEV_NOTIFY_PEERS: ndisc_send_unsol_na(dev); break; default: break; } return NOTIFY_DONE; } static struct notifier_block ndisc_netdev_notifier = { .notifier_call = ndisc_netdev_event, }; #ifdef CONFIG_SYSCTL static void ndisc_warn_deprecated_sysctl(struct ctl_table *ctl, const char *func, const char *dev_name) { static char warncomm[TASK_COMM_LEN]; static int warned; if (strcmp(warncomm, current->comm) && warned < 5) { strcpy(warncomm, current->comm); printk(KERN_WARNING "process `%s' is using deprecated sysctl (%s) " "net.ipv6.neigh.%s.%s; " "Use net.ipv6.neigh.%s.%s_ms " "instead.\n", warncomm, func, dev_name, ctl->procname, dev_name, ctl->procname); warned++; } } int ndisc_ifinfo_sysctl_change(struct ctl_table *ctl, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { struct net_device *dev = ctl->extra1; struct inet6_dev *idev; int ret; if ((strcmp(ctl->procname, "retrans_time") == 0) || (strcmp(ctl->procname, "base_reachable_time") == 0)) ndisc_warn_deprecated_sysctl(ctl, "syscall", dev ? dev->name : "default"); if (strcmp(ctl->procname, "retrans_time") == 0) ret = proc_dointvec(ctl, write, buffer, lenp, ppos); else if (strcmp(ctl->procname, "base_reachable_time") == 0) ret = proc_dointvec_jiffies(ctl, write, buffer, lenp, ppos); else if ((strcmp(ctl->procname, "retrans_time_ms") == 0) || (strcmp(ctl->procname, "base_reachable_time_ms") == 0)) ret = proc_dointvec_ms_jiffies(ctl, write, buffer, lenp, ppos); else ret = -1; if (write && ret == 0 && dev && (idev = in6_dev_get(dev)) != NULL) { if (ctl->data == &idev->nd_parms->base_reachable_time) idev->nd_parms->reachable_time = neigh_rand_reach_time(idev->nd_parms->base_reachable_time); idev->tstamp = jiffies; inet6_ifinfo_notify(RTM_NEWLINK, idev); in6_dev_put(idev); } return ret; } #endif static int __net_init ndisc_net_init(struct net *net) { struct ipv6_pinfo *np; struct sock *sk; int err; err = inet_ctl_sock_create(&sk, PF_INET6, SOCK_RAW, IPPROTO_ICMPV6, net); if (err < 0) { ND_PRINTK0(KERN_ERR "ICMPv6 NDISC: Failed to initialize the control socket (err %d).\n", err); return err; } net->ipv6.ndisc_sk = sk; np = inet6_sk(sk); np->hop_limit = 255; /* Do not loopback ndisc messages */ np->mc_loop = 0; return 0; } static void __net_exit ndisc_net_exit(struct net *net) { inet_ctl_sock_destroy(net->ipv6.ndisc_sk); } static struct pernet_operations ndisc_net_ops = { .init = ndisc_net_init, .exit = ndisc_net_exit, }; int __init ndisc_init(void) { int err; err = register_pernet_subsys(&ndisc_net_ops); if (err) return err; /* * Initialize the neighbour table */ neigh_table_init(&nd_tbl); #ifdef CONFIG_SYSCTL err = neigh_sysctl_register(NULL, &nd_tbl.parms, "ipv6", &ndisc_ifinfo_sysctl_change); if (err) goto out_unregister_pernet; #endif err = register_netdevice_notifier(&ndisc_netdev_notifier); if (err) goto out_unregister_sysctl; out: return err; out_unregister_sysctl: #ifdef CONFIG_SYSCTL neigh_sysctl_unregister(&nd_tbl.parms); out_unregister_pernet: #endif unregister_pernet_subsys(&ndisc_net_ops); goto out; } void ndisc_cleanup(void) { unregister_netdevice_notifier(&ndisc_netdev_notifier); #ifdef CONFIG_SYSCTL neigh_sysctl_unregister(&nd_tbl.parms); #endif neigh_table_clear(&nd_tbl); unregister_pernet_subsys(&ndisc_net_ops); }
gpl-2.0
instantinfrastructure/linux-yocto-3.10
drivers/staging/rtl8187se/ieee80211/ieee80211_crypt_ccmp.c
2476
10991
/* * Host AP crypt: host-based CCMP encryption implementation for Host AP driver * * Copyright (c) 2003-2004, Jouni Malinen <jkmaline@cc.hut.fi> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. See README and COPYING for * more details. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt //#include <linux/config.h> #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/random.h> #include <linux/skbuff.h> #include <linux/netdevice.h> #include <linux/if_ether.h> #include <linux/if_arp.h> #include <linux/string.h> #include <linux/wireless.h> #include "ieee80211.h" #include <linux/crypto.h> #include <linux/scatterlist.h> MODULE_AUTHOR("Jouni Malinen"); MODULE_DESCRIPTION("Host AP crypt: CCMP"); MODULE_LICENSE("GPL"); #define AES_BLOCK_LEN 16 #define CCMP_HDR_LEN 8 #define CCMP_MIC_LEN 8 #define CCMP_TK_LEN 16 #define CCMP_PN_LEN 6 struct ieee80211_ccmp_data { u8 key[CCMP_TK_LEN]; int key_set; u8 tx_pn[CCMP_PN_LEN]; u8 rx_pn[CCMP_PN_LEN]; u32 dot11RSNAStatsCCMPFormatErrors; u32 dot11RSNAStatsCCMPReplays; u32 dot11RSNAStatsCCMPDecryptErrors; int key_idx; struct crypto_tfm *tfm; /* scratch buffers for virt_to_page() (crypto API) */ u8 tx_b0[AES_BLOCK_LEN], tx_b[AES_BLOCK_LEN], tx_e[AES_BLOCK_LEN], tx_s0[AES_BLOCK_LEN]; u8 rx_b0[AES_BLOCK_LEN], rx_b[AES_BLOCK_LEN], rx_a[AES_BLOCK_LEN]; }; void ieee80211_ccmp_aes_encrypt(struct crypto_tfm *tfm, const u8 pt[16], u8 ct[16]) { crypto_cipher_encrypt_one((void *)tfm, ct, pt); } static void *ieee80211_ccmp_init(int key_idx) { struct ieee80211_ccmp_data *priv; priv = kzalloc(sizeof(*priv), GFP_ATOMIC); if (priv == NULL) goto fail; priv->key_idx = key_idx; priv->tfm = (void *)crypto_alloc_cipher("aes", 0, CRYPTO_ALG_ASYNC); if (IS_ERR(priv->tfm)) { pr_debug("could not allocate crypto API aes\n"); priv->tfm = NULL; goto fail; } return priv; fail: if (priv) { if (priv->tfm) crypto_free_cipher((void *)priv->tfm); kfree(priv); } return NULL; } static void ieee80211_ccmp_deinit(void *priv) { struct ieee80211_ccmp_data *_priv = priv; if (_priv && _priv->tfm) crypto_free_cipher((void *)_priv->tfm); kfree(priv); } static inline void xor_block(u8 *b, u8 *a, size_t len) { int i; for (i = 0; i < len; i++) b[i] ^= a[i]; } static void ccmp_init_blocks(struct crypto_tfm *tfm, struct ieee80211_hdr_4addr *hdr, u8 *pn, size_t dlen, u8 *b0, u8 *auth, u8 *s0) { u8 *pos, qc = 0; size_t aad_len; u16 fc; int a4_included, qc_included; u8 aad[2 * AES_BLOCK_LEN]; fc = le16_to_cpu(hdr->frame_ctl); a4_included = ((fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) == (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)); /* qc_included = ((WLAN_FC_GET_TYPE(fc) == IEEE80211_FTYPE_DATA) && (WLAN_FC_GET_STYPE(fc) & 0x08)); */ // fixed by David :2006.9.6 qc_included = ((WLAN_FC_GET_TYPE(fc) == IEEE80211_FTYPE_DATA) && (WLAN_FC_GET_STYPE(fc) & 0x80)); aad_len = 22; if (a4_included) aad_len += 6; if (qc_included) { pos = (u8 *) &hdr->addr4; if (a4_included) pos += 6; qc = *pos & 0x0f; aad_len += 2; } /* CCM Initial Block: * Flag (Include authentication header, M=3 (8-octet MIC), * L=1 (2-octet Dlen)) * Nonce: 0x00 | A2 | PN * Dlen */ b0[0] = 0x59; b0[1] = qc; memcpy(b0 + 2, hdr->addr2, ETH_ALEN); memcpy(b0 + 8, pn, CCMP_PN_LEN); b0[14] = (dlen >> 8) & 0xff; b0[15] = dlen & 0xff; /* AAD: * FC with bits 4..6 and 11..13 masked to zero; 14 is always one * A1 | A2 | A3 * SC with bits 4..15 (seq#) masked to zero * A4 (if present) * QC (if present) */ pos = (u8 *) hdr; aad[0] = 0; /* aad_len >> 8 */ aad[1] = aad_len & 0xff; aad[2] = pos[0] & 0x8f; aad[3] = pos[1] & 0xc7; memcpy(aad + 4, hdr->addr1, 3 * ETH_ALEN); pos = (u8 *) &hdr->seq_ctl; aad[22] = pos[0] & 0x0f; aad[23] = 0; /* all bits masked */ memset(aad + 24, 0, 8); if (a4_included) memcpy(aad + 24, hdr->addr4, ETH_ALEN); if (qc_included) { aad[a4_included ? 30 : 24] = qc; /* rest of QC masked */ } /* Start with the first block and AAD */ ieee80211_ccmp_aes_encrypt(tfm, b0, auth); xor_block(auth, aad, AES_BLOCK_LEN); ieee80211_ccmp_aes_encrypt(tfm, auth, auth); xor_block(auth, &aad[AES_BLOCK_LEN], AES_BLOCK_LEN); ieee80211_ccmp_aes_encrypt(tfm, auth, auth); b0[0] &= 0x07; b0[14] = b0[15] = 0; ieee80211_ccmp_aes_encrypt(tfm, b0, s0); } static int ieee80211_ccmp_encrypt(struct sk_buff *skb, int hdr_len, void *priv) { struct ieee80211_ccmp_data *key = priv; int data_len, i; u8 *pos; struct ieee80211_hdr_4addr *hdr; int blocks, last, len; u8 *mic; u8 *b0 = key->tx_b0; u8 *b = key->tx_b; u8 *e = key->tx_e; u8 *s0 = key->tx_s0; if (skb_headroom(skb) < CCMP_HDR_LEN || skb_tailroom(skb) < CCMP_MIC_LEN || skb->len < hdr_len) return -1; data_len = skb->len - hdr_len; pos = skb_push(skb, CCMP_HDR_LEN); memmove(pos, pos + CCMP_HDR_LEN, hdr_len); pos += hdr_len; // mic = skb_put(skb, CCMP_MIC_LEN); i = CCMP_PN_LEN - 1; while (i >= 0) { key->tx_pn[i]++; if (key->tx_pn[i] != 0) break; i--; } *pos++ = key->tx_pn[5]; *pos++ = key->tx_pn[4]; *pos++ = 0; *pos++ = (key->key_idx << 6) | (1 << 5) /* Ext IV included */; *pos++ = key->tx_pn[3]; *pos++ = key->tx_pn[2]; *pos++ = key->tx_pn[1]; *pos++ = key->tx_pn[0]; hdr = (struct ieee80211_hdr_4addr *)skb->data; //mic is moved to here by john mic = skb_put(skb, CCMP_MIC_LEN); ccmp_init_blocks(key->tfm, hdr, key->tx_pn, data_len, b0, b, s0); blocks = (data_len + AES_BLOCK_LEN - 1) / AES_BLOCK_LEN; last = data_len % AES_BLOCK_LEN; for (i = 1; i <= blocks; i++) { len = (i == blocks && last) ? last : AES_BLOCK_LEN; /* Authentication */ xor_block(b, pos, len); ieee80211_ccmp_aes_encrypt(key->tfm, b, b); /* Encryption, with counter */ b0[14] = (i >> 8) & 0xff; b0[15] = i & 0xff; ieee80211_ccmp_aes_encrypt(key->tfm, b0, e); xor_block(pos, e, len); pos += len; } for (i = 0; i < CCMP_MIC_LEN; i++) mic[i] = b[i] ^ s0[i]; return 0; } static int ieee80211_ccmp_decrypt(struct sk_buff *skb, int hdr_len, void *priv) { struct ieee80211_ccmp_data *key = priv; u8 keyidx, *pos; struct ieee80211_hdr_4addr *hdr; u8 pn[6]; size_t data_len = skb->len - hdr_len - CCMP_HDR_LEN - CCMP_MIC_LEN; u8 *mic = skb->data + skb->len - CCMP_MIC_LEN; u8 *b0 = key->rx_b0; u8 *b = key->rx_b; u8 *a = key->rx_a; int i, blocks, last, len; if (skb->len < hdr_len + CCMP_HDR_LEN + CCMP_MIC_LEN) { key->dot11RSNAStatsCCMPFormatErrors++; return -1; } hdr = (struct ieee80211_hdr_4addr *)skb->data; pos = skb->data + hdr_len; keyidx = pos[3]; if (!(keyidx & (1 << 5))) { if (net_ratelimit()) { pr_debug("received packet without ExtIV flag from %pM\n", hdr->addr2); } key->dot11RSNAStatsCCMPFormatErrors++; return -2; } keyidx >>= 6; if (key->key_idx != keyidx) { pr_debug("RX tkey->key_idx=%d frame keyidx=%d priv=%p\n", key->key_idx, keyidx, priv); return -6; } if (!key->key_set) { if (net_ratelimit()) { pr_debug("received packet from %pM with keyid=%d that does not have a configured key\n", hdr->addr2, keyidx); } return -3; } pn[0] = pos[7]; pn[1] = pos[6]; pn[2] = pos[5]; pn[3] = pos[4]; pn[4] = pos[1]; pn[5] = pos[0]; pos += 8; if (memcmp(pn, key->rx_pn, CCMP_PN_LEN) <= 0) { if (net_ratelimit()) { pr_debug("replay detected: STA=%pM previous PN %pm received PN %pm\n", hdr->addr2, key->rx_pn, pn); } key->dot11RSNAStatsCCMPReplays++; return -4; } ccmp_init_blocks(key->tfm, hdr, pn, data_len, b0, a, b); xor_block(mic, b, CCMP_MIC_LEN); blocks = (data_len + AES_BLOCK_LEN - 1) / AES_BLOCK_LEN; last = data_len % AES_BLOCK_LEN; for (i = 1; i <= blocks; i++) { len = (i == blocks && last) ? last : AES_BLOCK_LEN; /* Decrypt, with counter */ b0[14] = (i >> 8) & 0xff; b0[15] = i & 0xff; ieee80211_ccmp_aes_encrypt(key->tfm, b0, b); xor_block(pos, b, len); /* Authentication */ xor_block(a, pos, len); ieee80211_ccmp_aes_encrypt(key->tfm, a, a); pos += len; } if (memcmp(mic, a, CCMP_MIC_LEN) != 0) { if (net_ratelimit()) pr_debug("decrypt failed: STA=%pM\n", hdr->addr2); key->dot11RSNAStatsCCMPDecryptErrors++; return -5; } memcpy(key->rx_pn, pn, CCMP_PN_LEN); /* Remove hdr and MIC */ memmove(skb->data + CCMP_HDR_LEN, skb->data, hdr_len); skb_pull(skb, CCMP_HDR_LEN); skb_trim(skb, skb->len - CCMP_MIC_LEN); return keyidx; } static int ieee80211_ccmp_set_key(void *key, int len, u8 *seq, void *priv) { struct ieee80211_ccmp_data *data = priv; int keyidx; struct crypto_tfm *tfm = data->tfm; keyidx = data->key_idx; memset(data, 0, sizeof(*data)); data->key_idx = keyidx; data->tfm = tfm; if (len == CCMP_TK_LEN) { memcpy(data->key, key, CCMP_TK_LEN); data->key_set = 1; if (seq) { data->rx_pn[0] = seq[5]; data->rx_pn[1] = seq[4]; data->rx_pn[2] = seq[3]; data->rx_pn[3] = seq[2]; data->rx_pn[4] = seq[1]; data->rx_pn[5] = seq[0]; } crypto_cipher_setkey((void *)data->tfm, data->key, CCMP_TK_LEN); } else if (len == 0) data->key_set = 0; else return -1; return 0; } static int ieee80211_ccmp_get_key(void *key, int len, u8 *seq, void *priv) { struct ieee80211_ccmp_data *data = priv; if (len < CCMP_TK_LEN) return -1; if (!data->key_set) return 0; memcpy(key, data->key, CCMP_TK_LEN); if (seq) { seq[0] = data->tx_pn[5]; seq[1] = data->tx_pn[4]; seq[2] = data->tx_pn[3]; seq[3] = data->tx_pn[2]; seq[4] = data->tx_pn[1]; seq[5] = data->tx_pn[0]; } return CCMP_TK_LEN; } static char *ieee80211_ccmp_print_stats(char *p, void *priv) { struct ieee80211_ccmp_data *ccmp = priv; p += sprintf(p, "key[%d] alg=CCMP key_set=%d " "tx_pn=%pm rx_pn=%pm " "format_errors=%d replays=%d decrypt_errors=%d\n", ccmp->key_idx, ccmp->key_set, ccmp->tx_pn, ccmp->rx_pn, ccmp->dot11RSNAStatsCCMPFormatErrors, ccmp->dot11RSNAStatsCCMPReplays, ccmp->dot11RSNAStatsCCMPDecryptErrors); return p; } void ieee80211_ccmp_null(void) { // printk("============>%s()\n", __func__); return; } static struct ieee80211_crypto_ops ieee80211_crypt_ccmp = { .name = "CCMP", .init = ieee80211_ccmp_init, .deinit = ieee80211_ccmp_deinit, .encrypt_mpdu = ieee80211_ccmp_encrypt, .decrypt_mpdu = ieee80211_ccmp_decrypt, .encrypt_msdu = NULL, .decrypt_msdu = NULL, .set_key = ieee80211_ccmp_set_key, .get_key = ieee80211_ccmp_get_key, .print_stats = ieee80211_ccmp_print_stats, .extra_prefix_len = CCMP_HDR_LEN, .extra_postfix_len = CCMP_MIC_LEN, .owner = THIS_MODULE, }; int ieee80211_crypto_ccmp_init(void) { return ieee80211_register_crypto_ops(&ieee80211_crypt_ccmp); } void ieee80211_crypto_ccmp_exit(void) { ieee80211_unregister_crypto_ops(&ieee80211_crypt_ccmp); }
gpl-2.0
revjunkie/i9505
drivers/net/wireless/zd1201.c
3756
46765
/* * Driver for ZyDAS zd1201 based wireless USB devices. * * Copyright (c) 2004, 2005 Jeroen Vreeken (pe1rxq@amsat.org) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2 as published by the Free Software Foundation. * * Parts of this driver have been derived from a wlan-ng version * modified by ZyDAS. They also made documentation available, thanks! * Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved. */ #include <linux/module.h> #include <linux/usb.h> #include <linux/slab.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/wireless.h> #include <linux/ieee80211.h> #include <net/iw_handler.h> #include <linux/string.h> #include <linux/if_arp.h> #include <linux/firmware.h> #include "zd1201.h" static struct usb_device_id zd1201_table[] = { {USB_DEVICE(0x0586, 0x3400)}, /* Peabird Wireless USB Adapter */ {USB_DEVICE(0x0ace, 0x1201)}, /* ZyDAS ZD1201 Wireless USB Adapter */ {USB_DEVICE(0x050d, 0x6051)}, /* Belkin F5D6051 usb adapter */ {USB_DEVICE(0x0db0, 0x6823)}, /* MSI UB11B usb adapter */ {USB_DEVICE(0x1044, 0x8004)}, /* Gigabyte GN-WLBZ101 */ {USB_DEVICE(0x1044, 0x8005)}, /* GIGABYTE GN-WLBZ201 usb adapter */ {} }; static int ap; /* Are we an AP or a normal station? */ #define ZD1201_VERSION "0.15" MODULE_AUTHOR("Jeroen Vreeken <pe1rxq@amsat.org>"); MODULE_DESCRIPTION("Driver for ZyDAS ZD1201 based USB Wireless adapters"); MODULE_VERSION(ZD1201_VERSION); MODULE_LICENSE("GPL"); module_param(ap, int, 0); MODULE_PARM_DESC(ap, "If non-zero Access Point firmware will be loaded"); MODULE_DEVICE_TABLE(usb, zd1201_table); static int zd1201_fw_upload(struct usb_device *dev, int apfw) { const struct firmware *fw_entry; const char *data; unsigned long len; int err; unsigned char ret; char *buf; char *fwfile; if (apfw) fwfile = "zd1201-ap.fw"; else fwfile = "zd1201.fw"; err = request_firmware(&fw_entry, fwfile, &dev->dev); if (err) { dev_err(&dev->dev, "Failed to load %s firmware file!\n", fwfile); dev_err(&dev->dev, "Make sure the hotplug firmware loader is installed.\n"); dev_err(&dev->dev, "Goto http://linux-lc100020.sourceforge.net for more info.\n"); return err; } data = fw_entry->data; len = fw_entry->size; buf = kmalloc(1024, GFP_ATOMIC); if (!buf) goto exit; while (len > 0) { int translen = (len > 1024) ? 1024 : len; memcpy(buf, data, translen); err = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), 0, USB_DIR_OUT | 0x40, 0, 0, buf, translen, ZD1201_FW_TIMEOUT); if (err < 0) goto exit; len -= translen; data += translen; } err = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), 0x2, USB_DIR_OUT | 0x40, 0, 0, NULL, 0, ZD1201_FW_TIMEOUT); if (err < 0) goto exit; err = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), 0x4, USB_DIR_IN | 0x40, 0,0, &ret, sizeof(ret), ZD1201_FW_TIMEOUT); if (err < 0) goto exit; if (ret & 0x80) { err = -EIO; goto exit; } err = 0; exit: kfree(buf); release_firmware(fw_entry); return err; } MODULE_FIRMWARE("zd1201-ap.fw"); MODULE_FIRMWARE("zd1201.fw"); static void zd1201_usbfree(struct urb *urb) { struct zd1201 *zd = urb->context; switch(urb->status) { case -EILSEQ: case -ENODEV: case -ETIME: case -ENOENT: case -EPIPE: case -EOVERFLOW: case -ESHUTDOWN: dev_warn(&zd->usb->dev, "%s: urb failed: %d\n", zd->dev->name, urb->status); } kfree(urb->transfer_buffer); usb_free_urb(urb); } /* cmdreq message: u32 type u16 cmd u16 parm0 u16 parm1 u16 parm2 u8 pad[4] total: 4 + 2 + 2 + 2 + 2 + 4 = 16 */ static int zd1201_docmd(struct zd1201 *zd, int cmd, int parm0, int parm1, int parm2) { unsigned char *command; int ret; struct urb *urb; command = kmalloc(16, GFP_ATOMIC); if (!command) return -ENOMEM; *((__le32*)command) = cpu_to_le32(ZD1201_USB_CMDREQ); *((__le16*)&command[4]) = cpu_to_le16(cmd); *((__le16*)&command[6]) = cpu_to_le16(parm0); *((__le16*)&command[8]) = cpu_to_le16(parm1); *((__le16*)&command[10])= cpu_to_le16(parm2); urb = usb_alloc_urb(0, GFP_ATOMIC); if (!urb) { kfree(command); return -ENOMEM; } usb_fill_bulk_urb(urb, zd->usb, usb_sndbulkpipe(zd->usb, zd->endp_out2), command, 16, zd1201_usbfree, zd); ret = usb_submit_urb(urb, GFP_ATOMIC); if (ret) { kfree(command); usb_free_urb(urb); } return ret; } /* Callback after sending out a packet */ static void zd1201_usbtx(struct urb *urb) { struct zd1201 *zd = urb->context; netif_wake_queue(zd->dev); } /* Incoming data */ static void zd1201_usbrx(struct urb *urb) { struct zd1201 *zd = urb->context; int free = 0; unsigned char *data = urb->transfer_buffer; struct sk_buff *skb; unsigned char type; if (!zd) return; switch(urb->status) { case -EILSEQ: case -ENODEV: case -ETIME: case -ENOENT: case -EPIPE: case -EOVERFLOW: case -ESHUTDOWN: dev_warn(&zd->usb->dev, "%s: rx urb failed: %d\n", zd->dev->name, urb->status); free = 1; goto exit; } if (urb->status != 0 || urb->actual_length == 0) goto resubmit; type = data[0]; if (type == ZD1201_PACKET_EVENTSTAT || type == ZD1201_PACKET_RESOURCE) { memcpy(zd->rxdata, data, urb->actual_length); zd->rxlen = urb->actual_length; zd->rxdatas = 1; wake_up(&zd->rxdataq); } /* Info frame */ if (type == ZD1201_PACKET_INQUIRE) { int i = 0; unsigned short infotype, framelen, copylen; framelen = le16_to_cpu(*(__le16*)&data[4]); infotype = le16_to_cpu(*(__le16*)&data[6]); if (infotype == ZD1201_INF_LINKSTATUS) { short linkstatus; linkstatus = le16_to_cpu(*(__le16*)&data[8]); switch(linkstatus) { case 1: netif_carrier_on(zd->dev); break; case 2: netif_carrier_off(zd->dev); break; case 3: netif_carrier_off(zd->dev); break; case 4: netif_carrier_on(zd->dev); break; default: netif_carrier_off(zd->dev); } goto resubmit; } if (infotype == ZD1201_INF_ASSOCSTATUS) { short status = le16_to_cpu(*(__le16*)(data+8)); int event; union iwreq_data wrqu; switch (status) { case ZD1201_ASSOCSTATUS_STAASSOC: case ZD1201_ASSOCSTATUS_REASSOC: event = IWEVREGISTERED; break; case ZD1201_ASSOCSTATUS_DISASSOC: case ZD1201_ASSOCSTATUS_ASSOCFAIL: case ZD1201_ASSOCSTATUS_AUTHFAIL: default: event = IWEVEXPIRED; } memcpy(wrqu.addr.sa_data, data+10, ETH_ALEN); wrqu.addr.sa_family = ARPHRD_ETHER; /* Send event to user space */ wireless_send_event(zd->dev, event, &wrqu, NULL); goto resubmit; } if (infotype == ZD1201_INF_AUTHREQ) { union iwreq_data wrqu; memcpy(wrqu.addr.sa_data, data+8, ETH_ALEN); wrqu.addr.sa_family = ARPHRD_ETHER; /* There isn't a event that trully fits this request. We assume that userspace will be smart enough to see a new station being expired and sends back a authstation ioctl to authorize it. */ wireless_send_event(zd->dev, IWEVEXPIRED, &wrqu, NULL); goto resubmit; } /* Other infotypes are handled outside this handler */ zd->rxlen = 0; while (i < urb->actual_length) { copylen = le16_to_cpu(*(__le16*)&data[i+2]); /* Sanity check, sometimes we get junk */ if (copylen+zd->rxlen > sizeof(zd->rxdata)) break; memcpy(zd->rxdata+zd->rxlen, data+i+4, copylen); zd->rxlen += copylen; i += 64; } if (i >= urb->actual_length) { zd->rxdatas = 1; wake_up(&zd->rxdataq); } goto resubmit; } /* Actual data */ if (data[urb->actual_length-1] == ZD1201_PACKET_RXDATA) { int datalen = urb->actual_length-1; unsigned short len, fc, seq; struct hlist_node *node; len = ntohs(*(__be16 *)&data[datalen-2]); if (len>datalen) len=datalen; fc = le16_to_cpu(*(__le16 *)&data[datalen-16]); seq = le16_to_cpu(*(__le16 *)&data[datalen-24]); if (zd->monitor) { if (datalen < 24) goto resubmit; if (!(skb = dev_alloc_skb(datalen+24))) goto resubmit; memcpy(skb_put(skb, 2), &data[datalen-16], 2); memcpy(skb_put(skb, 2), &data[datalen-2], 2); memcpy(skb_put(skb, 6), &data[datalen-14], 6); memcpy(skb_put(skb, 6), &data[datalen-22], 6); memcpy(skb_put(skb, 6), &data[datalen-8], 6); memcpy(skb_put(skb, 2), &data[datalen-24], 2); memcpy(skb_put(skb, len), data, len); skb->protocol = eth_type_trans(skb, zd->dev); zd->dev->stats.rx_packets++; zd->dev->stats.rx_bytes += skb->len; netif_rx(skb); goto resubmit; } if ((seq & IEEE80211_SCTL_FRAG) || (fc & IEEE80211_FCTL_MOREFRAGS)) { struct zd1201_frag *frag = NULL; char *ptr; if (datalen<14) goto resubmit; if ((seq & IEEE80211_SCTL_FRAG) == 0) { frag = kmalloc(sizeof(*frag), GFP_ATOMIC); if (!frag) goto resubmit; skb = dev_alloc_skb(IEEE80211_MAX_DATA_LEN +14+2); if (!skb) { kfree(frag); goto resubmit; } frag->skb = skb; frag->seq = seq & IEEE80211_SCTL_SEQ; skb_reserve(skb, 2); memcpy(skb_put(skb, 12), &data[datalen-14], 12); memcpy(skb_put(skb, 2), &data[6], 2); memcpy(skb_put(skb, len), data+8, len); hlist_add_head(&frag->fnode, &zd->fraglist); goto resubmit; } hlist_for_each_entry(frag, node, &zd->fraglist, fnode) if (frag->seq == (seq&IEEE80211_SCTL_SEQ)) break; if (!frag) goto resubmit; skb = frag->skb; ptr = skb_put(skb, len); if (ptr) memcpy(ptr, data+8, len); if (fc & IEEE80211_FCTL_MOREFRAGS) goto resubmit; hlist_del_init(&frag->fnode); kfree(frag); } else { if (datalen<14) goto resubmit; skb = dev_alloc_skb(len + 14 + 2); if (!skb) goto resubmit; skb_reserve(skb, 2); memcpy(skb_put(skb, 12), &data[datalen-14], 12); memcpy(skb_put(skb, 2), &data[6], 2); memcpy(skb_put(skb, len), data+8, len); } skb->protocol = eth_type_trans(skb, zd->dev); zd->dev->stats.rx_packets++; zd->dev->stats.rx_bytes += skb->len; netif_rx(skb); } resubmit: memset(data, 0, ZD1201_RXSIZE); urb->status = 0; urb->dev = zd->usb; if(usb_submit_urb(urb, GFP_ATOMIC)) free = 1; exit: if (free) { zd->rxlen = 0; zd->rxdatas = 1; wake_up(&zd->rxdataq); kfree(urb->transfer_buffer); } } static int zd1201_getconfig(struct zd1201 *zd, int rid, void *riddata, unsigned int riddatalen) { int err; int i = 0; int code; int rid_fid; int length; unsigned char *pdata; zd->rxdatas = 0; err = zd1201_docmd(zd, ZD1201_CMDCODE_ACCESS, rid, 0, 0); if (err) return err; wait_event_interruptible(zd->rxdataq, zd->rxdatas); if (!zd->rxlen) return -EIO; code = le16_to_cpu(*(__le16*)(&zd->rxdata[4])); rid_fid = le16_to_cpu(*(__le16*)(&zd->rxdata[6])); length = le16_to_cpu(*(__le16*)(&zd->rxdata[8])); if (length > zd->rxlen) length = zd->rxlen-6; /* If access bit is not on, then error */ if ((code & ZD1201_ACCESSBIT) != ZD1201_ACCESSBIT || rid_fid != rid ) return -EINVAL; /* Not enough buffer for allocating data */ if (riddatalen != (length - 4)) { dev_dbg(&zd->usb->dev, "riddatalen mismatches, expected=%u, (packet=%u) length=%u, rid=0x%04X, rid_fid=0x%04X\n", riddatalen, zd->rxlen, length, rid, rid_fid); return -ENODATA; } zd->rxdatas = 0; /* Issue SetRxRid commnd */ err = zd1201_docmd(zd, ZD1201_CMDCODE_SETRXRID, rid, 0, length); if (err) return err; /* Receive RID record from resource packets */ wait_event_interruptible(zd->rxdataq, zd->rxdatas); if (!zd->rxlen) return -EIO; if (zd->rxdata[zd->rxlen - 1] != ZD1201_PACKET_RESOURCE) { dev_dbg(&zd->usb->dev, "Packet type mismatch: 0x%x not 0x3\n", zd->rxdata[zd->rxlen-1]); return -EINVAL; } /* Set the data pointer and received data length */ pdata = zd->rxdata; length = zd->rxlen; do { int actual_length; actual_length = (length > 64) ? 64 : length; if (pdata[0] != 0x3) { dev_dbg(&zd->usb->dev, "Rx Resource packet type error: %02X\n", pdata[0]); return -EINVAL; } if (actual_length != 64) { /* Trim the last packet type byte */ actual_length--; } /* Skip the 4 bytes header (RID length and RID) */ if (i == 0) { pdata += 8; actual_length -= 8; } else { pdata += 4; actual_length -= 4; } memcpy(riddata, pdata, actual_length); riddata += actual_length; pdata += actual_length; length -= 64; i++; } while (length > 0); return 0; } /* * resreq: * byte type * byte sequence * u16 reserved * byte data[12] * total: 16 */ static int zd1201_setconfig(struct zd1201 *zd, int rid, void *buf, int len, int wait) { int err; unsigned char *request; int reqlen; char seq=0; struct urb *urb; gfp_t gfp_mask = wait ? GFP_NOIO : GFP_ATOMIC; len += 4; /* first 4 are for header */ zd->rxdatas = 0; zd->rxlen = 0; for (seq=0; len > 0; seq++) { request = kmalloc(16, gfp_mask); if (!request) return -ENOMEM; urb = usb_alloc_urb(0, gfp_mask); if (!urb) { kfree(request); return -ENOMEM; } memset(request, 0, 16); reqlen = len>12 ? 12 : len; request[0] = ZD1201_USB_RESREQ; request[1] = seq; request[2] = 0; request[3] = 0; if (request[1] == 0) { /* add header */ *(__le16*)&request[4] = cpu_to_le16((len-2+1)/2); *(__le16*)&request[6] = cpu_to_le16(rid); memcpy(request+8, buf, reqlen-4); buf += reqlen-4; } else { memcpy(request+4, buf, reqlen); buf += reqlen; } len -= reqlen; usb_fill_bulk_urb(urb, zd->usb, usb_sndbulkpipe(zd->usb, zd->endp_out2), request, 16, zd1201_usbfree, zd); err = usb_submit_urb(urb, gfp_mask); if (err) goto err; } request = kmalloc(16, gfp_mask); if (!request) return -ENOMEM; urb = usb_alloc_urb(0, gfp_mask); if (!urb) { kfree(request); return -ENOMEM; } *((__le32*)request) = cpu_to_le32(ZD1201_USB_CMDREQ); *((__le16*)&request[4]) = cpu_to_le16(ZD1201_CMDCODE_ACCESS|ZD1201_ACCESSBIT); *((__le16*)&request[6]) = cpu_to_le16(rid); *((__le16*)&request[8]) = cpu_to_le16(0); *((__le16*)&request[10]) = cpu_to_le16(0); usb_fill_bulk_urb(urb, zd->usb, usb_sndbulkpipe(zd->usb, zd->endp_out2), request, 16, zd1201_usbfree, zd); err = usb_submit_urb(urb, gfp_mask); if (err) goto err; if (wait) { wait_event_interruptible(zd->rxdataq, zd->rxdatas); if (!zd->rxlen || le16_to_cpu(*(__le16*)&zd->rxdata[6]) != rid) { dev_dbg(&zd->usb->dev, "wrong or no RID received\n"); } } return 0; err: kfree(request); usb_free_urb(urb); return err; } static inline int zd1201_getconfig16(struct zd1201 *zd, int rid, short *val) { int err; __le16 zdval; err = zd1201_getconfig(zd, rid, &zdval, sizeof(__le16)); if (err) return err; *val = le16_to_cpu(zdval); return 0; } static inline int zd1201_setconfig16(struct zd1201 *zd, int rid, short val) { __le16 zdval = cpu_to_le16(val); return (zd1201_setconfig(zd, rid, &zdval, sizeof(__le16), 1)); } static int zd1201_drvr_start(struct zd1201 *zd) { int err, i; short max; __le16 zdmax; unsigned char *buffer; buffer = kzalloc(ZD1201_RXSIZE, GFP_KERNEL); if (!buffer) return -ENOMEM; usb_fill_bulk_urb(zd->rx_urb, zd->usb, usb_rcvbulkpipe(zd->usb, zd->endp_in), buffer, ZD1201_RXSIZE, zd1201_usbrx, zd); err = usb_submit_urb(zd->rx_urb, GFP_KERNEL); if (err) goto err_buffer; err = zd1201_docmd(zd, ZD1201_CMDCODE_INIT, 0, 0, 0); if (err) goto err_urb; err = zd1201_getconfig(zd, ZD1201_RID_CNFMAXTXBUFFERNUMBER, &zdmax, sizeof(__le16)); if (err) goto err_urb; max = le16_to_cpu(zdmax); for (i=0; i<max; i++) { err = zd1201_docmd(zd, ZD1201_CMDCODE_ALLOC, 1514, 0, 0); if (err) goto err_urb; } return 0; err_urb: usb_kill_urb(zd->rx_urb); return err; err_buffer: kfree(buffer); return err; } /* Magic alert: The firmware doesn't seem to like the MAC state being * toggled in promisc (aka monitor) mode. * (It works a number of times, but will halt eventually) * So we turn it of before disabling and on after enabling if needed. */ static int zd1201_enable(struct zd1201 *zd) { int err; if (zd->mac_enabled) return 0; err = zd1201_docmd(zd, ZD1201_CMDCODE_ENABLE, 0, 0, 0); if (!err) zd->mac_enabled = 1; if (zd->monitor) err = zd1201_setconfig16(zd, ZD1201_RID_PROMISCUOUSMODE, 1); return err; } static int zd1201_disable(struct zd1201 *zd) { int err; if (!zd->mac_enabled) return 0; if (zd->monitor) { err = zd1201_setconfig16(zd, ZD1201_RID_PROMISCUOUSMODE, 0); if (err) return err; } err = zd1201_docmd(zd, ZD1201_CMDCODE_DISABLE, 0, 0, 0); if (!err) zd->mac_enabled = 0; return err; } static int zd1201_mac_reset(struct zd1201 *zd) { if (!zd->mac_enabled) return 0; zd1201_disable(zd); return zd1201_enable(zd); } static int zd1201_join(struct zd1201 *zd, char *essid, int essidlen) { int err, val; char buf[IW_ESSID_MAX_SIZE+2]; err = zd1201_disable(zd); if (err) return err; val = ZD1201_CNFAUTHENTICATION_OPENSYSTEM; val |= ZD1201_CNFAUTHENTICATION_SHAREDKEY; err = zd1201_setconfig16(zd, ZD1201_RID_CNFAUTHENTICATION, val); if (err) return err; *(__le16 *)buf = cpu_to_le16(essidlen); memcpy(buf+2, essid, essidlen); if (!zd->ap) { /* Normal station */ err = zd1201_setconfig(zd, ZD1201_RID_CNFDESIREDSSID, buf, IW_ESSID_MAX_SIZE+2, 1); if (err) return err; } else { /* AP */ err = zd1201_setconfig(zd, ZD1201_RID_CNFOWNSSID, buf, IW_ESSID_MAX_SIZE+2, 1); if (err) return err; } err = zd1201_setconfig(zd, ZD1201_RID_CNFOWNMACADDR, zd->dev->dev_addr, zd->dev->addr_len, 1); if (err) return err; err = zd1201_enable(zd); if (err) return err; msleep(100); return 0; } static int zd1201_net_open(struct net_device *dev) { struct zd1201 *zd = netdev_priv(dev); /* Start MAC with wildcard if no essid set */ if (!zd->mac_enabled) zd1201_join(zd, zd->essid, zd->essidlen); netif_start_queue(dev); return 0; } static int zd1201_net_stop(struct net_device *dev) { netif_stop_queue(dev); return 0; } /* RFC 1042 encapsulates Ethernet frames in 802.11 frames by prefixing them with 0xaa, 0xaa, 0x03) followed by a SNAP OID of 0 (0x00, 0x00, 0x00). Zd requires an additional padding, copy of ethernet addresses, length of the standard RFC 1042 packet and a command byte (which is nul for tx). tx frame (from Wlan NG): RFC 1042: llc 0xAA 0xAA 0x03 (802.2 LLC) snap 0x00 0x00 0x00 (Ethernet encapsulated) type 2 bytes, Ethernet type field payload (minus eth header) Zydas specific: padding 1B if (skb->len+8+1)%64==0 Eth MAC addr 12 bytes, Ethernet MAC addresses length 2 bytes, RFC 1042 packet length (llc+snap+type+payload) zd 1 null byte, zd1201 packet type */ static netdev_tx_t zd1201_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct zd1201 *zd = netdev_priv(dev); unsigned char *txbuf = zd->txdata; int txbuflen, pad = 0, err; struct urb *urb = zd->tx_urb; if (!zd->mac_enabled || zd->monitor) { dev->stats.tx_dropped++; kfree_skb(skb); return NETDEV_TX_OK; } netif_stop_queue(dev); txbuflen = skb->len + 8 + 1; if (txbuflen%64 == 0) { pad = 1; txbuflen++; } txbuf[0] = 0xAA; txbuf[1] = 0xAA; txbuf[2] = 0x03; txbuf[3] = 0x00; /* rfc1042 */ txbuf[4] = 0x00; txbuf[5] = 0x00; skb_copy_from_linear_data_offset(skb, 12, txbuf + 6, skb->len - 12); if (pad) txbuf[skb->len-12+6]=0; skb_copy_from_linear_data(skb, txbuf + skb->len - 12 + 6 + pad, 12); *(__be16*)&txbuf[skb->len+6+pad] = htons(skb->len-12+6); txbuf[txbuflen-1] = 0; usb_fill_bulk_urb(urb, zd->usb, usb_sndbulkpipe(zd->usb, zd->endp_out), txbuf, txbuflen, zd1201_usbtx, zd); err = usb_submit_urb(zd->tx_urb, GFP_ATOMIC); if (err) { dev->stats.tx_errors++; netif_start_queue(dev); } else { dev->stats.tx_packets++; dev->stats.tx_bytes += skb->len; } kfree_skb(skb); return NETDEV_TX_OK; } static void zd1201_tx_timeout(struct net_device *dev) { struct zd1201 *zd = netdev_priv(dev); if (!zd) return; dev_warn(&zd->usb->dev, "%s: TX timeout, shooting down urb\n", dev->name); usb_unlink_urb(zd->tx_urb); dev->stats.tx_errors++; /* Restart the timeout to quiet the watchdog: */ dev->trans_start = jiffies; /* prevent tx timeout */ } static int zd1201_set_mac_address(struct net_device *dev, void *p) { struct sockaddr *addr = p; struct zd1201 *zd = netdev_priv(dev); int err; if (!zd) return -ENODEV; err = zd1201_setconfig(zd, ZD1201_RID_CNFOWNMACADDR, addr->sa_data, dev->addr_len, 1); if (err) return err; memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); return zd1201_mac_reset(zd); } static struct iw_statistics *zd1201_get_wireless_stats(struct net_device *dev) { struct zd1201 *zd = netdev_priv(dev); return &zd->iwstats; } static void zd1201_set_multicast(struct net_device *dev) { struct zd1201 *zd = netdev_priv(dev); struct netdev_hw_addr *ha; unsigned char reqbuf[ETH_ALEN*ZD1201_MAXMULTI]; int i; if (netdev_mc_count(dev) > ZD1201_MAXMULTI) return; i = 0; netdev_for_each_mc_addr(ha, dev) memcpy(reqbuf + i++ * ETH_ALEN, ha->addr, ETH_ALEN); zd1201_setconfig(zd, ZD1201_RID_CNFGROUPADDRESS, reqbuf, netdev_mc_count(dev) * ETH_ALEN, 0); } static int zd1201_config_commit(struct net_device *dev, struct iw_request_info *info, struct iw_point *data, char *essid) { struct zd1201 *zd = netdev_priv(dev); return zd1201_mac_reset(zd); } static int zd1201_get_name(struct net_device *dev, struct iw_request_info *info, char *name, char *extra) { strcpy(name, "IEEE 802.11b"); return 0; } static int zd1201_set_freq(struct net_device *dev, struct iw_request_info *info, struct iw_freq *freq, char *extra) { struct zd1201 *zd = netdev_priv(dev); short channel = 0; int err; if (freq->e == 0) channel = freq->m; else { channel = ieee80211_freq_to_dsss_chan(freq->m); if (channel < 0) channel = 0; } err = zd1201_setconfig16(zd, ZD1201_RID_CNFOWNCHANNEL, channel); if (err) return err; zd1201_mac_reset(zd); return 0; } static int zd1201_get_freq(struct net_device *dev, struct iw_request_info *info, struct iw_freq *freq, char *extra) { struct zd1201 *zd = netdev_priv(dev); short channel; int err; err = zd1201_getconfig16(zd, ZD1201_RID_CNFOWNCHANNEL, &channel); if (err) return err; freq->e = 0; freq->m = channel; return 0; } static int zd1201_set_mode(struct net_device *dev, struct iw_request_info *info, __u32 *mode, char *extra) { struct zd1201 *zd = netdev_priv(dev); short porttype, monitor = 0; unsigned char buffer[IW_ESSID_MAX_SIZE+2]; int err; if (zd->ap) { if (*mode != IW_MODE_MASTER) return -EINVAL; return 0; } err = zd1201_setconfig16(zd, ZD1201_RID_PROMISCUOUSMODE, 0); if (err) return err; zd->dev->type = ARPHRD_ETHER; switch(*mode) { case IW_MODE_MONITOR: monitor = 1; zd->dev->type = ARPHRD_IEEE80211; /* Make sure we are no longer associated with by setting an 'impossible' essid. (otherwise we mess up firmware) */ zd1201_join(zd, "\0-*#\0", 5); /* Put port in pIBSS */ case 8: /* No pseudo-IBSS in wireless extensions (yet) */ porttype = ZD1201_PORTTYPE_PSEUDOIBSS; break; case IW_MODE_ADHOC: porttype = ZD1201_PORTTYPE_IBSS; break; case IW_MODE_INFRA: porttype = ZD1201_PORTTYPE_BSS; break; default: return -EINVAL; } err = zd1201_setconfig16(zd, ZD1201_RID_CNFPORTTYPE, porttype); if (err) return err; if (zd->monitor && !monitor) { zd1201_disable(zd); *(__le16 *)buffer = cpu_to_le16(zd->essidlen); memcpy(buffer+2, zd->essid, zd->essidlen); err = zd1201_setconfig(zd, ZD1201_RID_CNFDESIREDSSID, buffer, IW_ESSID_MAX_SIZE+2, 1); if (err) return err; } zd->monitor = monitor; /* If monitor mode is set we don't actually turn it on here since it * is done during mac reset anyway (see zd1201_mac_enable). */ zd1201_mac_reset(zd); return 0; } static int zd1201_get_mode(struct net_device *dev, struct iw_request_info *info, __u32 *mode, char *extra) { struct zd1201 *zd = netdev_priv(dev); short porttype; int err; err = zd1201_getconfig16(zd, ZD1201_RID_CNFPORTTYPE, &porttype); if (err) return err; switch(porttype) { case ZD1201_PORTTYPE_IBSS: *mode = IW_MODE_ADHOC; break; case ZD1201_PORTTYPE_BSS: *mode = IW_MODE_INFRA; break; case ZD1201_PORTTYPE_WDS: *mode = IW_MODE_REPEAT; break; case ZD1201_PORTTYPE_PSEUDOIBSS: *mode = 8;/* No Pseudo-IBSS... */ break; case ZD1201_PORTTYPE_AP: *mode = IW_MODE_MASTER; break; default: dev_dbg(&zd->usb->dev, "Unknown porttype: %d\n", porttype); *mode = IW_MODE_AUTO; } if (zd->monitor) *mode = IW_MODE_MONITOR; return 0; } static int zd1201_get_range(struct net_device *dev, struct iw_request_info *info, struct iw_point *wrq, char *extra) { struct iw_range *range = (struct iw_range *)extra; wrq->length = sizeof(struct iw_range); memset(range, 0, sizeof(struct iw_range)); range->we_version_compiled = WIRELESS_EXT; range->we_version_source = WIRELESS_EXT; range->max_qual.qual = 128; range->max_qual.level = 128; range->max_qual.noise = 128; range->max_qual.updated = 7; range->encoding_size[0] = 5; range->encoding_size[1] = 13; range->num_encoding_sizes = 2; range->max_encoding_tokens = ZD1201_NUMKEYS; range->num_bitrates = 4; range->bitrate[0] = 1000000; range->bitrate[1] = 2000000; range->bitrate[2] = 5500000; range->bitrate[3] = 11000000; range->min_rts = 0; range->min_frag = ZD1201_FRAGMIN; range->max_rts = ZD1201_RTSMAX; range->min_frag = ZD1201_FRAGMAX; return 0; } /* Little bit of magic here: we only get the quality if we poll * for it, and we never get an actual request to trigger such * a poll. Therefore we 'assume' that the user will soon ask for * the stats after asking the bssid. */ static int zd1201_get_wap(struct net_device *dev, struct iw_request_info *info, struct sockaddr *ap_addr, char *extra) { struct zd1201 *zd = netdev_priv(dev); unsigned char buffer[6]; if (!zd1201_getconfig(zd, ZD1201_RID_COMMSQUALITY, buffer, 6)) { /* Unfortunately the quality and noise reported is useless. they seem to be accumulators that increase until you read them, unless we poll on a fixed interval we can't use them */ /*zd->iwstats.qual.qual = le16_to_cpu(((__le16 *)buffer)[0]);*/ zd->iwstats.qual.level = le16_to_cpu(((__le16 *)buffer)[1]); /*zd->iwstats.qual.noise = le16_to_cpu(((__le16 *)buffer)[2]);*/ zd->iwstats.qual.updated = 2; } return zd1201_getconfig(zd, ZD1201_RID_CURRENTBSSID, ap_addr->sa_data, 6); } static int zd1201_set_scan(struct net_device *dev, struct iw_request_info *info, struct iw_point *srq, char *extra) { /* We do everything in get_scan */ return 0; } static int zd1201_get_scan(struct net_device *dev, struct iw_request_info *info, struct iw_point *srq, char *extra) { struct zd1201 *zd = netdev_priv(dev); int err, i, j, enabled_save; struct iw_event iwe; char *cev = extra; char *end_buf = extra + IW_SCAN_MAX_DATA; /* No scanning in AP mode */ if (zd->ap) return -EOPNOTSUPP; /* Scan doesn't seem to work if disabled */ enabled_save = zd->mac_enabled; zd1201_enable(zd); zd->rxdatas = 0; err = zd1201_docmd(zd, ZD1201_CMDCODE_INQUIRE, ZD1201_INQ_SCANRESULTS, 0, 0); if (err) return err; wait_event_interruptible(zd->rxdataq, zd->rxdatas); if (!zd->rxlen) return -EIO; if (le16_to_cpu(*(__le16*)&zd->rxdata[2]) != ZD1201_INQ_SCANRESULTS) return -EIO; for(i=8; i<zd->rxlen; i+=62) { iwe.cmd = SIOCGIWAP; iwe.u.ap_addr.sa_family = ARPHRD_ETHER; memcpy(iwe.u.ap_addr.sa_data, zd->rxdata+i+6, 6); cev = iwe_stream_add_event(info, cev, end_buf, &iwe, IW_EV_ADDR_LEN); iwe.cmd = SIOCGIWESSID; iwe.u.data.length = zd->rxdata[i+16]; iwe.u.data.flags = 1; cev = iwe_stream_add_point(info, cev, end_buf, &iwe, zd->rxdata+i+18); iwe.cmd = SIOCGIWMODE; if (zd->rxdata[i+14]&0x01) iwe.u.mode = IW_MODE_MASTER; else iwe.u.mode = IW_MODE_ADHOC; cev = iwe_stream_add_event(info, cev, end_buf, &iwe, IW_EV_UINT_LEN); iwe.cmd = SIOCGIWFREQ; iwe.u.freq.m = zd->rxdata[i+0]; iwe.u.freq.e = 0; cev = iwe_stream_add_event(info, cev, end_buf, &iwe, IW_EV_FREQ_LEN); iwe.cmd = SIOCGIWRATE; iwe.u.bitrate.fixed = 0; iwe.u.bitrate.disabled = 0; for (j=0; j<10; j++) if (zd->rxdata[i+50+j]) { iwe.u.bitrate.value = (zd->rxdata[i+50+j]&0x7f)*500000; cev = iwe_stream_add_event(info, cev, end_buf, &iwe, IW_EV_PARAM_LEN); } iwe.cmd = SIOCGIWENCODE; iwe.u.data.length = 0; if (zd->rxdata[i+14]&0x10) iwe.u.data.flags = IW_ENCODE_ENABLED; else iwe.u.data.flags = IW_ENCODE_DISABLED; cev = iwe_stream_add_point(info, cev, end_buf, &iwe, NULL); iwe.cmd = IWEVQUAL; iwe.u.qual.qual = zd->rxdata[i+4]; iwe.u.qual.noise= zd->rxdata[i+2]/10-100; iwe.u.qual.level = (256+zd->rxdata[i+4]*100)/255-100; iwe.u.qual.updated = 7; cev = iwe_stream_add_event(info, cev, end_buf, &iwe, IW_EV_QUAL_LEN); } if (!enabled_save) zd1201_disable(zd); srq->length = cev - extra; srq->flags = 0; return 0; } static int zd1201_set_essid(struct net_device *dev, struct iw_request_info *info, struct iw_point *data, char *essid) { struct zd1201 *zd = netdev_priv(dev); if (data->length > IW_ESSID_MAX_SIZE) return -EINVAL; if (data->length < 1) data->length = 1; zd->essidlen = data->length; memset(zd->essid, 0, IW_ESSID_MAX_SIZE+1); memcpy(zd->essid, essid, data->length); return zd1201_join(zd, zd->essid, zd->essidlen); } static int zd1201_get_essid(struct net_device *dev, struct iw_request_info *info, struct iw_point *data, char *essid) { struct zd1201 *zd = netdev_priv(dev); memcpy(essid, zd->essid, zd->essidlen); data->flags = 1; data->length = zd->essidlen; return 0; } static int zd1201_get_nick(struct net_device *dev, struct iw_request_info *info, struct iw_point *data, char *nick) { strcpy(nick, "zd1201"); data->flags = 1; data->length = strlen(nick); return 0; } static int zd1201_set_rate(struct net_device *dev, struct iw_request_info *info, struct iw_param *rrq, char *extra) { struct zd1201 *zd = netdev_priv(dev); short rate; int err; switch (rrq->value) { case 1000000: rate = ZD1201_RATEB1; break; case 2000000: rate = ZD1201_RATEB2; break; case 5500000: rate = ZD1201_RATEB5; break; case 11000000: default: rate = ZD1201_RATEB11; break; } if (!rrq->fixed) { /* Also enable all lower bitrates */ rate |= rate-1; } err = zd1201_setconfig16(zd, ZD1201_RID_TXRATECNTL, rate); if (err) return err; return zd1201_mac_reset(zd); } static int zd1201_get_rate(struct net_device *dev, struct iw_request_info *info, struct iw_param *rrq, char *extra) { struct zd1201 *zd = netdev_priv(dev); short rate; int err; err = zd1201_getconfig16(zd, ZD1201_RID_CURRENTTXRATE, &rate); if (err) return err; switch(rate) { case 1: rrq->value = 1000000; break; case 2: rrq->value = 2000000; break; case 5: rrq->value = 5500000; break; case 11: rrq->value = 11000000; break; default: rrq->value = 0; } rrq->fixed = 0; rrq->disabled = 0; return 0; } static int zd1201_set_rts(struct net_device *dev, struct iw_request_info *info, struct iw_param *rts, char *extra) { struct zd1201 *zd = netdev_priv(dev); int err; short val = rts->value; if (rts->disabled || !rts->fixed) val = ZD1201_RTSMAX; if (val > ZD1201_RTSMAX) return -EINVAL; if (val < 0) return -EINVAL; err = zd1201_setconfig16(zd, ZD1201_RID_CNFRTSTHRESHOLD, val); if (err) return err; return zd1201_mac_reset(zd); } static int zd1201_get_rts(struct net_device *dev, struct iw_request_info *info, struct iw_param *rts, char *extra) { struct zd1201 *zd = netdev_priv(dev); short rtst; int err; err = zd1201_getconfig16(zd, ZD1201_RID_CNFRTSTHRESHOLD, &rtst); if (err) return err; rts->value = rtst; rts->disabled = (rts->value == ZD1201_RTSMAX); rts->fixed = 1; return 0; } static int zd1201_set_frag(struct net_device *dev, struct iw_request_info *info, struct iw_param *frag, char *extra) { struct zd1201 *zd = netdev_priv(dev); int err; short val = frag->value; if (frag->disabled || !frag->fixed) val = ZD1201_FRAGMAX; if (val > ZD1201_FRAGMAX) return -EINVAL; if (val < ZD1201_FRAGMIN) return -EINVAL; if (val & 1) return -EINVAL; err = zd1201_setconfig16(zd, ZD1201_RID_CNFFRAGTHRESHOLD, val); if (err) return err; return zd1201_mac_reset(zd); } static int zd1201_get_frag(struct net_device *dev, struct iw_request_info *info, struct iw_param *frag, char *extra) { struct zd1201 *zd = netdev_priv(dev); short fragt; int err; err = zd1201_getconfig16(zd, ZD1201_RID_CNFFRAGTHRESHOLD, &fragt); if (err) return err; frag->value = fragt; frag->disabled = (frag->value == ZD1201_FRAGMAX); frag->fixed = 1; return 0; } static int zd1201_set_retry(struct net_device *dev, struct iw_request_info *info, struct iw_param *rrq, char *extra) { return 0; } static int zd1201_get_retry(struct net_device *dev, struct iw_request_info *info, struct iw_param *rrq, char *extra) { return 0; } static int zd1201_set_encode(struct net_device *dev, struct iw_request_info *info, struct iw_point *erq, char *key) { struct zd1201 *zd = netdev_priv(dev); short i; int err, rid; if (erq->length > ZD1201_MAXKEYLEN) return -EINVAL; i = (erq->flags & IW_ENCODE_INDEX)-1; if (i == -1) { err = zd1201_getconfig16(zd,ZD1201_RID_CNFDEFAULTKEYID,&i); if (err) return err; } else { err = zd1201_setconfig16(zd, ZD1201_RID_CNFDEFAULTKEYID, i); if (err) return err; } if (i < 0 || i >= ZD1201_NUMKEYS) return -EINVAL; rid = ZD1201_RID_CNFDEFAULTKEY0 + i; err = zd1201_setconfig(zd, rid, key, erq->length, 1); if (err) return err; zd->encode_keylen[i] = erq->length; memcpy(zd->encode_keys[i], key, erq->length); i=0; if (!(erq->flags & IW_ENCODE_DISABLED & IW_ENCODE_MODE)) { i |= 0x01; zd->encode_enabled = 1; } else zd->encode_enabled = 0; if (erq->flags & IW_ENCODE_RESTRICTED & IW_ENCODE_MODE) { i |= 0x02; zd->encode_restricted = 1; } else zd->encode_restricted = 0; err = zd1201_setconfig16(zd, ZD1201_RID_CNFWEBFLAGS, i); if (err) return err; if (zd->encode_enabled) i = ZD1201_CNFAUTHENTICATION_SHAREDKEY; else i = ZD1201_CNFAUTHENTICATION_OPENSYSTEM; err = zd1201_setconfig16(zd, ZD1201_RID_CNFAUTHENTICATION, i); if (err) return err; return zd1201_mac_reset(zd); } static int zd1201_get_encode(struct net_device *dev, struct iw_request_info *info, struct iw_point *erq, char *key) { struct zd1201 *zd = netdev_priv(dev); short i; int err; if (zd->encode_enabled) erq->flags = IW_ENCODE_ENABLED; else erq->flags = IW_ENCODE_DISABLED; if (zd->encode_restricted) erq->flags |= IW_ENCODE_RESTRICTED; else erq->flags |= IW_ENCODE_OPEN; i = (erq->flags & IW_ENCODE_INDEX) -1; if (i == -1) { err = zd1201_getconfig16(zd, ZD1201_RID_CNFDEFAULTKEYID, &i); if (err) return err; } if (i<0 || i>= ZD1201_NUMKEYS) return -EINVAL; erq->flags |= i+1; erq->length = zd->encode_keylen[i]; memcpy(key, zd->encode_keys[i], erq->length); return 0; } static int zd1201_set_power(struct net_device *dev, struct iw_request_info *info, struct iw_param *vwrq, char *extra) { struct zd1201 *zd = netdev_priv(dev); short enabled, duration, level; int err; enabled = vwrq->disabled ? 0 : 1; if (enabled) { if (vwrq->flags & IW_POWER_PERIOD) { duration = vwrq->value; err = zd1201_setconfig16(zd, ZD1201_RID_CNFMAXSLEEPDURATION, duration); if (err) return err; goto out; } if (vwrq->flags & IW_POWER_TIMEOUT) { err = zd1201_getconfig16(zd, ZD1201_RID_CNFMAXSLEEPDURATION, &duration); if (err) return err; level = vwrq->value * 4 / duration; if (level > 4) level = 4; if (level < 0) level = 0; err = zd1201_setconfig16(zd, ZD1201_RID_CNFPMEPS, level); if (err) return err; goto out; } return -EINVAL; } out: return zd1201_setconfig16(zd, ZD1201_RID_CNFPMENABLED, enabled); } static int zd1201_get_power(struct net_device *dev, struct iw_request_info *info, struct iw_param *vwrq, char *extra) { struct zd1201 *zd = netdev_priv(dev); short enabled, level, duration; int err; err = zd1201_getconfig16(zd, ZD1201_RID_CNFPMENABLED, &enabled); if (err) return err; err = zd1201_getconfig16(zd, ZD1201_RID_CNFPMEPS, &level); if (err) return err; err = zd1201_getconfig16(zd, ZD1201_RID_CNFMAXSLEEPDURATION, &duration); if (err) return err; vwrq->disabled = enabled ? 0 : 1; if (vwrq->flags & IW_POWER_TYPE) { if (vwrq->flags & IW_POWER_PERIOD) { vwrq->value = duration; vwrq->flags = IW_POWER_PERIOD; } else { vwrq->value = duration * level / 4; vwrq->flags = IW_POWER_TIMEOUT; } } if (vwrq->flags & IW_POWER_MODE) { if (enabled && level) vwrq->flags = IW_POWER_UNICAST_R; else vwrq->flags = IW_POWER_ALL_R; } return 0; } static const iw_handler zd1201_iw_handler[] = { (iw_handler) zd1201_config_commit, /* SIOCSIWCOMMIT */ (iw_handler) zd1201_get_name, /* SIOCGIWNAME */ (iw_handler) NULL, /* SIOCSIWNWID */ (iw_handler) NULL, /* SIOCGIWNWID */ (iw_handler) zd1201_set_freq, /* SIOCSIWFREQ */ (iw_handler) zd1201_get_freq, /* SIOCGIWFREQ */ (iw_handler) zd1201_set_mode, /* SIOCSIWMODE */ (iw_handler) zd1201_get_mode, /* SIOCGIWMODE */ (iw_handler) NULL, /* SIOCSIWSENS */ (iw_handler) NULL, /* SIOCGIWSENS */ (iw_handler) NULL, /* SIOCSIWRANGE */ (iw_handler) zd1201_get_range, /* SIOCGIWRANGE */ (iw_handler) NULL, /* SIOCSIWPRIV */ (iw_handler) NULL, /* SIOCGIWPRIV */ (iw_handler) NULL, /* SIOCSIWSTATS */ (iw_handler) NULL, /* SIOCGIWSTATS */ (iw_handler) NULL, /* SIOCSIWSPY */ (iw_handler) NULL, /* SIOCGIWSPY */ (iw_handler) NULL, /* -- hole -- */ (iw_handler) NULL, /* -- hole -- */ (iw_handler) NULL/*zd1201_set_wap*/, /* SIOCSIWAP */ (iw_handler) zd1201_get_wap, /* SIOCGIWAP */ (iw_handler) NULL, /* -- hole -- */ (iw_handler) NULL, /* SIOCGIWAPLIST */ (iw_handler) zd1201_set_scan, /* SIOCSIWSCAN */ (iw_handler) zd1201_get_scan, /* SIOCGIWSCAN */ (iw_handler) zd1201_set_essid, /* SIOCSIWESSID */ (iw_handler) zd1201_get_essid, /* SIOCGIWESSID */ (iw_handler) NULL, /* SIOCSIWNICKN */ (iw_handler) zd1201_get_nick, /* SIOCGIWNICKN */ (iw_handler) NULL, /* -- hole -- */ (iw_handler) NULL, /* -- hole -- */ (iw_handler) zd1201_set_rate, /* SIOCSIWRATE */ (iw_handler) zd1201_get_rate, /* SIOCGIWRATE */ (iw_handler) zd1201_set_rts, /* SIOCSIWRTS */ (iw_handler) zd1201_get_rts, /* SIOCGIWRTS */ (iw_handler) zd1201_set_frag, /* SIOCSIWFRAG */ (iw_handler) zd1201_get_frag, /* SIOCGIWFRAG */ (iw_handler) NULL, /* SIOCSIWTXPOW */ (iw_handler) NULL, /* SIOCGIWTXPOW */ (iw_handler) zd1201_set_retry, /* SIOCSIWRETRY */ (iw_handler) zd1201_get_retry, /* SIOCGIWRETRY */ (iw_handler) zd1201_set_encode, /* SIOCSIWENCODE */ (iw_handler) zd1201_get_encode, /* SIOCGIWENCODE */ (iw_handler) zd1201_set_power, /* SIOCSIWPOWER */ (iw_handler) zd1201_get_power, /* SIOCGIWPOWER */ }; static int zd1201_set_hostauth(struct net_device *dev, struct iw_request_info *info, struct iw_param *rrq, char *extra) { struct zd1201 *zd = netdev_priv(dev); if (!zd->ap) return -EOPNOTSUPP; return zd1201_setconfig16(zd, ZD1201_RID_CNFHOSTAUTH, rrq->value); } static int zd1201_get_hostauth(struct net_device *dev, struct iw_request_info *info, struct iw_param *rrq, char *extra) { struct zd1201 *zd = netdev_priv(dev); short hostauth; int err; if (!zd->ap) return -EOPNOTSUPP; err = zd1201_getconfig16(zd, ZD1201_RID_CNFHOSTAUTH, &hostauth); if (err) return err; rrq->value = hostauth; rrq->fixed = 1; return 0; } static int zd1201_auth_sta(struct net_device *dev, struct iw_request_info *info, struct sockaddr *sta, char *extra) { struct zd1201 *zd = netdev_priv(dev); unsigned char buffer[10]; if (!zd->ap) return -EOPNOTSUPP; memcpy(buffer, sta->sa_data, ETH_ALEN); *(short*)(buffer+6) = 0; /* 0==success, 1==failure */ *(short*)(buffer+8) = 0; return zd1201_setconfig(zd, ZD1201_RID_AUTHENTICATESTA, buffer, 10, 1); } static int zd1201_set_maxassoc(struct net_device *dev, struct iw_request_info *info, struct iw_param *rrq, char *extra) { struct zd1201 *zd = netdev_priv(dev); int err; if (!zd->ap) return -EOPNOTSUPP; err = zd1201_setconfig16(zd, ZD1201_RID_CNFMAXASSOCSTATIONS, rrq->value); if (err) return err; return 0; } static int zd1201_get_maxassoc(struct net_device *dev, struct iw_request_info *info, struct iw_param *rrq, char *extra) { struct zd1201 *zd = netdev_priv(dev); short maxassoc; int err; if (!zd->ap) return -EOPNOTSUPP; err = zd1201_getconfig16(zd, ZD1201_RID_CNFMAXASSOCSTATIONS, &maxassoc); if (err) return err; rrq->value = maxassoc; rrq->fixed = 1; return 0; } static const iw_handler zd1201_private_handler[] = { (iw_handler) zd1201_set_hostauth, /* ZD1201SIWHOSTAUTH */ (iw_handler) zd1201_get_hostauth, /* ZD1201GIWHOSTAUTH */ (iw_handler) zd1201_auth_sta, /* ZD1201SIWAUTHSTA */ (iw_handler) NULL, /* nothing to get */ (iw_handler) zd1201_set_maxassoc, /* ZD1201SIMAXASSOC */ (iw_handler) zd1201_get_maxassoc, /* ZD1201GIMAXASSOC */ }; static const struct iw_priv_args zd1201_private_args[] = { { ZD1201SIWHOSTAUTH, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, IW_PRIV_TYPE_NONE, "sethostauth" }, { ZD1201GIWHOSTAUTH, IW_PRIV_TYPE_NONE, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "gethostauth" }, { ZD1201SIWAUTHSTA, IW_PRIV_TYPE_ADDR | IW_PRIV_SIZE_FIXED | 1, IW_PRIV_TYPE_NONE, "authstation" }, { ZD1201SIWMAXASSOC, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, IW_PRIV_TYPE_NONE, "setmaxassoc" }, { ZD1201GIWMAXASSOC, IW_PRIV_TYPE_NONE, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getmaxassoc" }, }; static const struct iw_handler_def zd1201_iw_handlers = { .num_standard = ARRAY_SIZE(zd1201_iw_handler), .num_private = ARRAY_SIZE(zd1201_private_handler), .num_private_args = ARRAY_SIZE(zd1201_private_args), .standard = (iw_handler *)zd1201_iw_handler, .private = (iw_handler *)zd1201_private_handler, .private_args = (struct iw_priv_args *) zd1201_private_args, .get_wireless_stats = zd1201_get_wireless_stats, }; static const struct net_device_ops zd1201_netdev_ops = { .ndo_open = zd1201_net_open, .ndo_stop = zd1201_net_stop, .ndo_start_xmit = zd1201_hard_start_xmit, .ndo_tx_timeout = zd1201_tx_timeout, .ndo_set_rx_mode = zd1201_set_multicast, .ndo_set_mac_address = zd1201_set_mac_address, .ndo_change_mtu = eth_change_mtu, .ndo_validate_addr = eth_validate_addr, }; static int zd1201_probe(struct usb_interface *interface, const struct usb_device_id *id) { struct zd1201 *zd; struct net_device *dev; struct usb_device *usb; int err; short porttype; char buf[IW_ESSID_MAX_SIZE+2]; usb = interface_to_usbdev(interface); dev = alloc_etherdev(sizeof(*zd)); if (!dev) return -ENOMEM; zd = netdev_priv(dev); zd->dev = dev; zd->ap = ap; zd->usb = usb; zd->removed = 0; init_waitqueue_head(&zd->rxdataq); INIT_HLIST_HEAD(&zd->fraglist); err = zd1201_fw_upload(usb, zd->ap); if (err) { dev_err(&usb->dev, "zd1201 firmware upload failed: %d\n", err); goto err_zd; } zd->endp_in = 1; zd->endp_out = 1; zd->endp_out2 = 2; zd->rx_urb = usb_alloc_urb(0, GFP_KERNEL); zd->tx_urb = usb_alloc_urb(0, GFP_KERNEL); if (!zd->rx_urb || !zd->tx_urb) goto err_zd; mdelay(100); err = zd1201_drvr_start(zd); if (err) goto err_zd; err = zd1201_setconfig16(zd, ZD1201_RID_CNFMAXDATALEN, 2312); if (err) goto err_start; err = zd1201_setconfig16(zd, ZD1201_RID_TXRATECNTL, ZD1201_RATEB1 | ZD1201_RATEB2 | ZD1201_RATEB5 | ZD1201_RATEB11); if (err) goto err_start; dev->netdev_ops = &zd1201_netdev_ops; dev->wireless_handlers = &zd1201_iw_handlers; dev->watchdog_timeo = ZD1201_TX_TIMEOUT; strcpy(dev->name, "wlan%d"); err = zd1201_getconfig(zd, ZD1201_RID_CNFOWNMACADDR, dev->dev_addr, dev->addr_len); if (err) goto err_start; /* Set wildcard essid to match zd->essid */ *(__le16 *)buf = cpu_to_le16(0); err = zd1201_setconfig(zd, ZD1201_RID_CNFDESIREDSSID, buf, IW_ESSID_MAX_SIZE+2, 1); if (err) goto err_start; if (zd->ap) porttype = ZD1201_PORTTYPE_AP; else porttype = ZD1201_PORTTYPE_BSS; err = zd1201_setconfig16(zd, ZD1201_RID_CNFPORTTYPE, porttype); if (err) goto err_start; SET_NETDEV_DEV(dev, &usb->dev); err = register_netdev(dev); if (err) goto err_start; dev_info(&usb->dev, "%s: ZD1201 USB Wireless interface\n", dev->name); usb_set_intfdata(interface, zd); zd1201_enable(zd); /* zd1201 likes to startup enabled, */ zd1201_disable(zd); /* interfering with all the wifis in range */ return 0; err_start: /* Leave the device in reset state */ zd1201_docmd(zd, ZD1201_CMDCODE_INIT, 0, 0, 0); err_zd: usb_free_urb(zd->tx_urb); usb_free_urb(zd->rx_urb); free_netdev(dev); return err; } static void zd1201_disconnect(struct usb_interface *interface) { struct zd1201 *zd = usb_get_intfdata(interface); struct hlist_node *node, *node2; struct zd1201_frag *frag; if (!zd) return; usb_set_intfdata(interface, NULL); hlist_for_each_entry_safe(frag, node, node2, &zd->fraglist, fnode) { hlist_del_init(&frag->fnode); kfree_skb(frag->skb); kfree(frag); } if (zd->tx_urb) { usb_kill_urb(zd->tx_urb); usb_free_urb(zd->tx_urb); } if (zd->rx_urb) { usb_kill_urb(zd->rx_urb); usb_free_urb(zd->rx_urb); } if (zd->dev) { unregister_netdev(zd->dev); free_netdev(zd->dev); } } #ifdef CONFIG_PM static int zd1201_suspend(struct usb_interface *interface, pm_message_t message) { struct zd1201 *zd = usb_get_intfdata(interface); netif_device_detach(zd->dev); zd->was_enabled = zd->mac_enabled; if (zd->was_enabled) return zd1201_disable(zd); else return 0; } static int zd1201_resume(struct usb_interface *interface) { struct zd1201 *zd = usb_get_intfdata(interface); if (!zd || !zd->dev) return -ENODEV; netif_device_attach(zd->dev); if (zd->was_enabled) return zd1201_enable(zd); else return 0; } #else #define zd1201_suspend NULL #define zd1201_resume NULL #endif static struct usb_driver zd1201_usb = { .name = "zd1201", .probe = zd1201_probe, .disconnect = zd1201_disconnect, .id_table = zd1201_table, .suspend = zd1201_suspend, .resume = zd1201_resume, }; module_usb_driver(zd1201_usb);
gpl-2.0
felipesanches/linux-media
arch/mips/dec/wbflush.c
4268
2095
/* * Setup the right wbflush routine for the different DECstations. * * Created with information from: * DECstation 3100 Desktop Workstation Functional Specification * DECstation 5000/200 KN02 System Module Functional Specification * mipsel-linux-objdump --disassemble vmunix | grep "wbflush" :-) * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 1998 Harald Koerfgen * Copyright (C) 2002 Maciej W. Rozycki */ #include <linux/init.h> #include <asm/bootinfo.h> #include <asm/wbflush.h> #include <asm/barrier.h> static void wbflush_kn01(void); static void wbflush_kn210(void); static void wbflush_mips(void); void (*__wbflush) (void); void __init wbflush_setup(void) { switch (mips_machtype) { case MACH_DS23100: case MACH_DS5000_200: /* DS5000 3max */ __wbflush = wbflush_kn01; break; case MACH_DS5100: /* DS5100 MIPSMATE */ __wbflush = wbflush_kn210; break; case MACH_DS5000_1XX: /* DS5000/100 3min */ case MACH_DS5000_XX: /* Personal DS5000/2x */ case MACH_DS5000_2X0: /* DS5000/240 3max+ */ case MACH_DS5900: /* DS5900 bigmax */ default: __wbflush = wbflush_mips; break; } } /* * For the DS3100 and DS5000/200 the R2020/R3220 writeback buffer functions * as part of Coprocessor 0. */ static void wbflush_kn01(void) { asm(".set\tpush\n\t" ".set\tnoreorder\n\t" "1:\tbc0f\t1b\n\t" "nop\n\t" ".set\tpop"); } /* * For the DS5100 the writeback buffer seems to be a part of Coprocessor 3. * But CP3 has to enabled first. */ static void wbflush_kn210(void) { asm(".set\tpush\n\t" ".set\tnoreorder\n\t" "mfc0\t$2,$12\n\t" "lui\t$3,0x8000\n\t" "or\t$3,$2,$3\n\t" "mtc0\t$3,$12\n\t" "nop\n" "1:\tbc3f\t1b\n\t" "nop\n\t" "mtc0\t$2,$12\n\t" "nop\n\t" ".set\tpop" : : : "$2", "$3"); } /* * I/O ASIC systems use a standard writeback buffer that gets flushed * upon an uncached read. */ static void wbflush_mips(void) { __fast_iob(); } #include <linux/module.h> EXPORT_SYMBOL(__wbflush);
gpl-2.0
TeamWin/android_kernel_motorola_msm8226
arch/s390/kernel/perf_cpum_cf.c
4268
17737
/* * Performance event support for s390x - CPU-measurement Counter Facility * * Copyright IBM Corp. 2012 * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License (version 2 only) * as published by the Free Software Foundation. */ #define KMSG_COMPONENT "cpum_cf" #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt #include <linux/kernel.h> #include <linux/kernel_stat.h> #include <linux/perf_event.h> #include <linux/percpu.h> #include <linux/notifier.h> #include <linux/init.h> #include <linux/export.h> #include <asm/ctl_reg.h> #include <asm/irq.h> #include <asm/cpu_mf.h> /* CPU-measurement counter facility supports these CPU counter sets: * For CPU counter sets: * Basic counter set: 0-31 * Problem-state counter set: 32-63 * Crypto-activity counter set: 64-127 * Extented counter set: 128-159 */ enum cpumf_ctr_set { /* CPU counter sets */ CPUMF_CTR_SET_BASIC = 0, CPUMF_CTR_SET_USER = 1, CPUMF_CTR_SET_CRYPTO = 2, CPUMF_CTR_SET_EXT = 3, /* Maximum number of counter sets */ CPUMF_CTR_SET_MAX, }; #define CPUMF_LCCTL_ENABLE_SHIFT 16 #define CPUMF_LCCTL_ACTCTL_SHIFT 0 static const u64 cpumf_state_ctl[CPUMF_CTR_SET_MAX] = { [CPUMF_CTR_SET_BASIC] = 0x02, [CPUMF_CTR_SET_USER] = 0x04, [CPUMF_CTR_SET_CRYPTO] = 0x08, [CPUMF_CTR_SET_EXT] = 0x01, }; static void ctr_set_enable(u64 *state, int ctr_set) { *state |= cpumf_state_ctl[ctr_set] << CPUMF_LCCTL_ENABLE_SHIFT; } static void ctr_set_disable(u64 *state, int ctr_set) { *state &= ~(cpumf_state_ctl[ctr_set] << CPUMF_LCCTL_ENABLE_SHIFT); } static void ctr_set_start(u64 *state, int ctr_set) { *state |= cpumf_state_ctl[ctr_set] << CPUMF_LCCTL_ACTCTL_SHIFT; } static void ctr_set_stop(u64 *state, int ctr_set) { *state &= ~(cpumf_state_ctl[ctr_set] << CPUMF_LCCTL_ACTCTL_SHIFT); } /* Local CPUMF event structure */ struct cpu_hw_events { struct cpumf_ctr_info info; atomic_t ctr_set[CPUMF_CTR_SET_MAX]; u64 state, tx_state; unsigned int flags; }; static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { .ctr_set = { [CPUMF_CTR_SET_BASIC] = ATOMIC_INIT(0), [CPUMF_CTR_SET_USER] = ATOMIC_INIT(0), [CPUMF_CTR_SET_CRYPTO] = ATOMIC_INIT(0), [CPUMF_CTR_SET_EXT] = ATOMIC_INIT(0), }, .state = 0, .flags = 0, }; static int get_counter_set(u64 event) { int set = -1; if (event < 32) set = CPUMF_CTR_SET_BASIC; else if (event < 64) set = CPUMF_CTR_SET_USER; else if (event < 128) set = CPUMF_CTR_SET_CRYPTO; else if (event < 160) set = CPUMF_CTR_SET_EXT; return set; } static int validate_event(const struct hw_perf_event *hwc) { switch (hwc->config_base) { case CPUMF_CTR_SET_BASIC: case CPUMF_CTR_SET_USER: case CPUMF_CTR_SET_CRYPTO: case CPUMF_CTR_SET_EXT: /* check for reserved counters */ if ((hwc->config >= 6 && hwc->config <= 31) || (hwc->config >= 38 && hwc->config <= 63) || (hwc->config >= 80 && hwc->config <= 127)) return -EOPNOTSUPP; break; default: return -EINVAL; } return 0; } static int validate_ctr_version(const struct hw_perf_event *hwc) { struct cpu_hw_events *cpuhw; int err = 0; cpuhw = &get_cpu_var(cpu_hw_events); /* check required version for counter sets */ switch (hwc->config_base) { case CPUMF_CTR_SET_BASIC: case CPUMF_CTR_SET_USER: if (cpuhw->info.cfvn < 1) err = -EOPNOTSUPP; break; case CPUMF_CTR_SET_CRYPTO: case CPUMF_CTR_SET_EXT: if (cpuhw->info.csvn < 1) err = -EOPNOTSUPP; break; } put_cpu_var(cpu_hw_events); return err; } static int validate_ctr_auth(const struct hw_perf_event *hwc) { struct cpu_hw_events *cpuhw; u64 ctrs_state; int err = 0; cpuhw = &get_cpu_var(cpu_hw_events); /* check authorization for cpu counter sets */ ctrs_state = cpumf_state_ctl[hwc->config_base]; if (!(ctrs_state & cpuhw->info.auth_ctl)) err = -EPERM; put_cpu_var(cpu_hw_events); return err; } /* * Change the CPUMF state to active. * Enable and activate the CPU-counter sets according * to the per-cpu control state. */ static void cpumf_pmu_enable(struct pmu *pmu) { struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); int err; if (cpuhw->flags & PMU_F_ENABLED) return; err = lcctl(cpuhw->state); if (err) { pr_err("Enabling the performance measuring unit " "failed with rc=%x\n", err); return; } cpuhw->flags |= PMU_F_ENABLED; } /* * Change the CPUMF state to inactive. * Disable and enable (inactive) the CPU-counter sets according * to the per-cpu control state. */ static void cpumf_pmu_disable(struct pmu *pmu) { struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); int err; u64 inactive; if (!(cpuhw->flags & PMU_F_ENABLED)) return; inactive = cpuhw->state & ~((1 << CPUMF_LCCTL_ENABLE_SHIFT) - 1); err = lcctl(inactive); if (err) { pr_err("Disabling the performance measuring unit " "failed with rc=%x\n", err); return; } cpuhw->flags &= ~PMU_F_ENABLED; } /* Number of perf events counting hardware events */ static atomic_t num_events = ATOMIC_INIT(0); /* Used to avoid races in calling reserve/release_cpumf_hardware */ static DEFINE_MUTEX(pmc_reserve_mutex); /* CPU-measurement alerts for the counter facility */ static void cpumf_measurement_alert(struct ext_code ext_code, unsigned int alert, unsigned long unused) { struct cpu_hw_events *cpuhw; if (!(alert & CPU_MF_INT_CF_MASK)) return; kstat_cpu(smp_processor_id()).irqs[EXTINT_CPM]++; cpuhw = &__get_cpu_var(cpu_hw_events); /* Measurement alerts are shared and might happen when the PMU * is not reserved. Ignore these alerts in this case. */ if (!(cpuhw->flags & PMU_F_RESERVED)) return; /* counter authorization change alert */ if (alert & CPU_MF_INT_CF_CACA) qctri(&cpuhw->info); /* loss of counter data alert */ if (alert & CPU_MF_INT_CF_LCDA) pr_err("CPU[%i] Counter data was lost\n", smp_processor_id()); } #define PMC_INIT 0 #define PMC_RELEASE 1 static void setup_pmc_cpu(void *flags) { struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); switch (*((int *) flags)) { case PMC_INIT: memset(&cpuhw->info, 0, sizeof(cpuhw->info)); qctri(&cpuhw->info); cpuhw->flags |= PMU_F_RESERVED; break; case PMC_RELEASE: cpuhw->flags &= ~PMU_F_RESERVED; break; } /* Disable CPU counter sets */ lcctl(0); } /* Initialize the CPU-measurement facility */ static int reserve_pmc_hardware(void) { int flags = PMC_INIT; on_each_cpu(setup_pmc_cpu, &flags, 1); measurement_alert_subclass_register(); return 0; } /* Release the CPU-measurement facility */ static void release_pmc_hardware(void) { int flags = PMC_RELEASE; on_each_cpu(setup_pmc_cpu, &flags, 1); measurement_alert_subclass_unregister(); } /* Release the PMU if event is the last perf event */ static void hw_perf_event_destroy(struct perf_event *event) { if (!atomic_add_unless(&num_events, -1, 1)) { mutex_lock(&pmc_reserve_mutex); if (atomic_dec_return(&num_events) == 0) release_pmc_hardware(); mutex_unlock(&pmc_reserve_mutex); } } /* CPUMF <-> perf event mappings for kernel+userspace (basic set) */ static const int cpumf_generic_events_basic[] = { [PERF_COUNT_HW_CPU_CYCLES] = 0, [PERF_COUNT_HW_INSTRUCTIONS] = 1, [PERF_COUNT_HW_CACHE_REFERENCES] = -1, [PERF_COUNT_HW_CACHE_MISSES] = -1, [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = -1, [PERF_COUNT_HW_BRANCH_MISSES] = -1, [PERF_COUNT_HW_BUS_CYCLES] = -1, }; /* CPUMF <-> perf event mappings for userspace (problem-state set) */ static const int cpumf_generic_events_user[] = { [PERF_COUNT_HW_CPU_CYCLES] = 32, [PERF_COUNT_HW_INSTRUCTIONS] = 33, [PERF_COUNT_HW_CACHE_REFERENCES] = -1, [PERF_COUNT_HW_CACHE_MISSES] = -1, [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = -1, [PERF_COUNT_HW_BRANCH_MISSES] = -1, [PERF_COUNT_HW_BUS_CYCLES] = -1, }; static int __hw_perf_event_init(struct perf_event *event) { struct perf_event_attr *attr = &event->attr; struct hw_perf_event *hwc = &event->hw; int err; u64 ev; switch (attr->type) { case PERF_TYPE_RAW: /* Raw events are used to access counters directly, * hence do not permit excludes */ if (attr->exclude_kernel || attr->exclude_user || attr->exclude_hv) return -EOPNOTSUPP; ev = attr->config; break; case PERF_TYPE_HARDWARE: ev = attr->config; /* Count user space (problem-state) only */ if (!attr->exclude_user && attr->exclude_kernel) { if (ev >= ARRAY_SIZE(cpumf_generic_events_user)) return -EOPNOTSUPP; ev = cpumf_generic_events_user[ev]; /* No support for kernel space counters only */ } else if (!attr->exclude_kernel && attr->exclude_user) { return -EOPNOTSUPP; /* Count user and kernel space */ } else { if (ev >= ARRAY_SIZE(cpumf_generic_events_basic)) return -EOPNOTSUPP; ev = cpumf_generic_events_basic[ev]; } break; default: return -ENOENT; } if (ev == -1) return -ENOENT; if (ev >= PERF_CPUM_CF_MAX_CTR) return -EINVAL; /* The CPU measurement counter facility does not have any interrupts * to do sampling. Sampling must be provided by external means, * for example, by timers. */ if (hwc->sample_period) return -EINVAL; /* Use the hardware perf event structure to store the counter number * in 'config' member and the counter set to which the counter belongs * in the 'config_base'. The counter set (config_base) is then used * to enable/disable the counters. */ hwc->config = ev; hwc->config_base = get_counter_set(ev); /* Validate the counter that is assigned to this event. * Because the counter facility can use numerous counters at the * same time without constraints, it is not necessary to explicity * validate event groups (event->group_leader != event). */ err = validate_event(hwc); if (err) return err; /* Initialize for using the CPU-measurement counter facility */ if (!atomic_inc_not_zero(&num_events)) { mutex_lock(&pmc_reserve_mutex); if (atomic_read(&num_events) == 0 && reserve_pmc_hardware()) err = -EBUSY; else atomic_inc(&num_events); mutex_unlock(&pmc_reserve_mutex); } event->destroy = hw_perf_event_destroy; /* Finally, validate version and authorization of the counter set */ err = validate_ctr_auth(hwc); if (!err) err = validate_ctr_version(hwc); return err; } static int cpumf_pmu_event_init(struct perf_event *event) { int err; switch (event->attr.type) { case PERF_TYPE_HARDWARE: case PERF_TYPE_HW_CACHE: case PERF_TYPE_RAW: err = __hw_perf_event_init(event); break; default: return -ENOENT; } if (unlikely(err) && event->destroy) event->destroy(event); return err; } static int hw_perf_event_reset(struct perf_event *event) { u64 prev, new; int err; do { prev = local64_read(&event->hw.prev_count); err = ecctr(event->hw.config, &new); if (err) { if (err != 3) break; /* The counter is not (yet) available. This * might happen if the counter set to which * this counter belongs is in the disabled * state. */ new = 0; } } while (local64_cmpxchg(&event->hw.prev_count, prev, new) != prev); return err; } static int hw_perf_event_update(struct perf_event *event) { u64 prev, new, delta; int err; do { prev = local64_read(&event->hw.prev_count); err = ecctr(event->hw.config, &new); if (err) goto out; } while (local64_cmpxchg(&event->hw.prev_count, prev, new) != prev); delta = (prev <= new) ? new - prev : (-1ULL - prev) + new + 1; /* overflow */ local64_add(delta, &event->count); out: return err; } static void cpumf_pmu_read(struct perf_event *event) { if (event->hw.state & PERF_HES_STOPPED) return; hw_perf_event_update(event); } static void cpumf_pmu_start(struct perf_event *event, int flags) { struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); struct hw_perf_event *hwc = &event->hw; if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED))) return; if (WARN_ON_ONCE(hwc->config == -1)) return; if (flags & PERF_EF_RELOAD) WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE)); hwc->state = 0; /* (Re-)enable and activate the counter set */ ctr_set_enable(&cpuhw->state, hwc->config_base); ctr_set_start(&cpuhw->state, hwc->config_base); /* The counter set to which this counter belongs can be already active. * Because all counters in a set are active, the event->hw.prev_count * needs to be synchronized. At this point, the counter set can be in * the inactive or disabled state. */ hw_perf_event_reset(event); /* increment refcount for this counter set */ atomic_inc(&cpuhw->ctr_set[hwc->config_base]); } static void cpumf_pmu_stop(struct perf_event *event, int flags) { struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); struct hw_perf_event *hwc = &event->hw; if (!(hwc->state & PERF_HES_STOPPED)) { /* Decrement reference count for this counter set and if this * is the last used counter in the set, clear activation * control and set the counter set state to inactive. */ if (!atomic_dec_return(&cpuhw->ctr_set[hwc->config_base])) ctr_set_stop(&cpuhw->state, hwc->config_base); event->hw.state |= PERF_HES_STOPPED; } if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) { hw_perf_event_update(event); event->hw.state |= PERF_HES_UPTODATE; } } static int cpumf_pmu_add(struct perf_event *event, int flags) { struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); /* Check authorization for the counter set to which this * counter belongs. * For group events transaction, the authorization check is * done in cpumf_pmu_commit_txn(). */ if (!(cpuhw->flags & PERF_EVENT_TXN)) if (validate_ctr_auth(&event->hw)) return -EPERM; ctr_set_enable(&cpuhw->state, event->hw.config_base); event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED; if (flags & PERF_EF_START) cpumf_pmu_start(event, PERF_EF_RELOAD); perf_event_update_userpage(event); return 0; } static void cpumf_pmu_del(struct perf_event *event, int flags) { struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); cpumf_pmu_stop(event, PERF_EF_UPDATE); /* Check if any counter in the counter set is still used. If not used, * change the counter set to the disabled state. This also clears the * content of all counters in the set. * * When a new perf event has been added but not yet started, this can * clear enable control and resets all counters in a set. Therefore, * cpumf_pmu_start() always has to reenable a counter set. */ if (!atomic_read(&cpuhw->ctr_set[event->hw.config_base])) ctr_set_disable(&cpuhw->state, event->hw.config_base); perf_event_update_userpage(event); } /* * Start group events scheduling transaction. * Set flags to perform a single test at commit time. */ static void cpumf_pmu_start_txn(struct pmu *pmu) { struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); perf_pmu_disable(pmu); cpuhw->flags |= PERF_EVENT_TXN; cpuhw->tx_state = cpuhw->state; } /* * Stop and cancel a group events scheduling tranctions. * Assumes cpumf_pmu_del() is called for each successful added * cpumf_pmu_add() during the transaction. */ static void cpumf_pmu_cancel_txn(struct pmu *pmu) { struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); WARN_ON(cpuhw->tx_state != cpuhw->state); cpuhw->flags &= ~PERF_EVENT_TXN; perf_pmu_enable(pmu); } /* * Commit the group events scheduling transaction. On success, the * transaction is closed. On error, the transaction is kept open * until cpumf_pmu_cancel_txn() is called. */ static int cpumf_pmu_commit_txn(struct pmu *pmu) { struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); u64 state; /* check if the updated state can be scheduled */ state = cpuhw->state & ~((1 << CPUMF_LCCTL_ENABLE_SHIFT) - 1); state >>= CPUMF_LCCTL_ENABLE_SHIFT; if ((state & cpuhw->info.auth_ctl) != state) return -EPERM; cpuhw->flags &= ~PERF_EVENT_TXN; perf_pmu_enable(pmu); return 0; } /* Performance monitoring unit for s390x */ static struct pmu cpumf_pmu = { .pmu_enable = cpumf_pmu_enable, .pmu_disable = cpumf_pmu_disable, .event_init = cpumf_pmu_event_init, .add = cpumf_pmu_add, .del = cpumf_pmu_del, .start = cpumf_pmu_start, .stop = cpumf_pmu_stop, .read = cpumf_pmu_read, .start_txn = cpumf_pmu_start_txn, .commit_txn = cpumf_pmu_commit_txn, .cancel_txn = cpumf_pmu_cancel_txn, }; static int __cpuinit cpumf_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu) { unsigned int cpu = (long) hcpu; int flags; switch (action & ~CPU_TASKS_FROZEN) { case CPU_ONLINE: flags = PMC_INIT; smp_call_function_single(cpu, setup_pmc_cpu, &flags, 1); break; case CPU_DOWN_PREPARE: flags = PMC_RELEASE; smp_call_function_single(cpu, setup_pmc_cpu, &flags, 1); break; default: break; } return NOTIFY_OK; } static int __init cpumf_pmu_init(void) { int rc; if (!cpum_cf_avail()) return -ENODEV; /* clear bit 15 of cr0 to unauthorize problem-state to * extract measurement counters */ ctl_clear_bit(0, 48); /* register handler for measurement-alert interruptions */ rc = register_external_interrupt(0x1407, cpumf_measurement_alert); if (rc) { pr_err("Registering for CPU-measurement alerts " "failed with rc=%i\n", rc); goto out; } rc = perf_pmu_register(&cpumf_pmu, "cpum_cf", PERF_TYPE_RAW); if (rc) { pr_err("Registering the cpum_cf PMU failed with rc=%i\n", rc); unregister_external_interrupt(0x1407, cpumf_measurement_alert); goto out; } perf_cpu_notifier(cpumf_pmu_notifier); out: return rc; } early_initcall(cpumf_pmu_init);
gpl-2.0
Constellation/linux-3.6.5
drivers/gpu/drm/radeon/radeon_mem.c
6060
7343
/* radeon_mem.c -- Simple GART/fb memory manager for radeon -*- linux-c -*- */ /* * Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved. * * The Weather Channel (TM) funded Tungsten Graphics to develop the * initial release of the Radeon 8500 driver under the XFree86 license. * This notice must be preserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Authors: * Keith Whitwell <keith@tungstengraphics.com> */ #include "drmP.h" #include "drm.h" #include "radeon_drm.h" #include "radeon_drv.h" /* Very simple allocator for GART memory, working on a static range * already mapped into each client's address space. */ static struct mem_block *split_block(struct mem_block *p, int start, int size, struct drm_file *file_priv) { /* Maybe cut off the start of an existing block */ if (start > p->start) { struct mem_block *newblock = kmalloc(sizeof(*newblock), GFP_KERNEL); if (!newblock) goto out; newblock->start = start; newblock->size = p->size - (start - p->start); newblock->file_priv = NULL; newblock->next = p->next; newblock->prev = p; p->next->prev = newblock; p->next = newblock; p->size -= newblock->size; p = newblock; } /* Maybe cut off the end of an existing block */ if (size < p->size) { struct mem_block *newblock = kmalloc(sizeof(*newblock), GFP_KERNEL); if (!newblock) goto out; newblock->start = start + size; newblock->size = p->size - size; newblock->file_priv = NULL; newblock->next = p->next; newblock->prev = p; p->next->prev = newblock; p->next = newblock; p->size = size; } out: /* Our block is in the middle */ p->file_priv = file_priv; return p; } static struct mem_block *alloc_block(struct mem_block *heap, int size, int align2, struct drm_file *file_priv) { struct mem_block *p; int mask = (1 << align2) - 1; list_for_each(p, heap) { int start = (p->start + mask) & ~mask; if (p->file_priv == NULL && start + size <= p->start + p->size) return split_block(p, start, size, file_priv); } return NULL; } static struct mem_block *find_block(struct mem_block *heap, int start) { struct mem_block *p; list_for_each(p, heap) if (p->start == start) return p; return NULL; } static void free_block(struct mem_block *p) { p->file_priv = NULL; /* Assumes a single contiguous range. Needs a special file_priv in * 'heap' to stop it being subsumed. */ if (p->next->file_priv == NULL) { struct mem_block *q = p->next; p->size += q->size; p->next = q->next; p->next->prev = p; kfree(q); } if (p->prev->file_priv == NULL) { struct mem_block *q = p->prev; q->size += p->size; q->next = p->next; q->next->prev = q; kfree(p); } } /* Initialize. How to check for an uninitialized heap? */ static int init_heap(struct mem_block **heap, int start, int size) { struct mem_block *blocks = kmalloc(sizeof(*blocks), GFP_KERNEL); if (!blocks) return -ENOMEM; *heap = kzalloc(sizeof(**heap), GFP_KERNEL); if (!*heap) { kfree(blocks); return -ENOMEM; } blocks->start = start; blocks->size = size; blocks->file_priv = NULL; blocks->next = blocks->prev = *heap; (*heap)->file_priv = (struct drm_file *) - 1; (*heap)->next = (*heap)->prev = blocks; return 0; } /* Free all blocks associated with the releasing file. */ void radeon_mem_release(struct drm_file *file_priv, struct mem_block *heap) { struct mem_block *p; if (!heap || !heap->next) return; list_for_each(p, heap) { if (p->file_priv == file_priv) p->file_priv = NULL; } /* Assumes a single contiguous range. Needs a special file_priv in * 'heap' to stop it being subsumed. */ list_for_each(p, heap) { while (p->file_priv == NULL && p->next->file_priv == NULL) { struct mem_block *q = p->next; p->size += q->size; p->next = q->next; p->next->prev = p; kfree(q); } } } /* Shutdown. */ void radeon_mem_takedown(struct mem_block **heap) { struct mem_block *p; if (!*heap) return; for (p = (*heap)->next; p != *heap;) { struct mem_block *q = p; p = p->next; kfree(q); } kfree(*heap); *heap = NULL; } /* IOCTL HANDLERS */ static struct mem_block **get_heap(drm_radeon_private_t * dev_priv, int region) { switch (region) { case RADEON_MEM_REGION_GART: return &dev_priv->gart_heap; case RADEON_MEM_REGION_FB: return &dev_priv->fb_heap; default: return NULL; } } int radeon_mem_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_radeon_private_t *dev_priv = dev->dev_private; drm_radeon_mem_alloc_t *alloc = data; struct mem_block *block, **heap; if (!dev_priv) { DRM_ERROR("called with no initialization\n"); return -EINVAL; } heap = get_heap(dev_priv, alloc->region); if (!heap || !*heap) return -EFAULT; /* Make things easier on ourselves: all allocations at least * 4k aligned. */ if (alloc->alignment < 12) alloc->alignment = 12; block = alloc_block(*heap, alloc->size, alloc->alignment, file_priv); if (!block) return -ENOMEM; if (DRM_COPY_TO_USER(alloc->region_offset, &block->start, sizeof(int))) { DRM_ERROR("copy_to_user\n"); return -EFAULT; } return 0; } int radeon_mem_free(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_radeon_private_t *dev_priv = dev->dev_private; drm_radeon_mem_free_t *memfree = data; struct mem_block *block, **heap; if (!dev_priv) { DRM_ERROR("called with no initialization\n"); return -EINVAL; } heap = get_heap(dev_priv, memfree->region); if (!heap || !*heap) return -EFAULT; block = find_block(*heap, memfree->region_offset); if (!block) return -EFAULT; if (block->file_priv != file_priv) return -EPERM; free_block(block); return 0; } int radeon_mem_init_heap(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_radeon_private_t *dev_priv = dev->dev_private; drm_radeon_mem_init_heap_t *initheap = data; struct mem_block **heap; if (!dev_priv) { DRM_ERROR("called with no initialization\n"); return -EINVAL; } heap = get_heap(dev_priv, initheap->region); if (!heap) return -EFAULT; if (*heap) { DRM_ERROR("heap already initialized?"); return -EFAULT; } return init_heap(heap, initheap->start, initheap->size); }
gpl-2.0
pershoot/galaxy-31
arch/sh/kernel/sys_sh32.c
7852
2260
#include <linux/errno.h> #include <linux/sched.h> #include <linux/mm.h> #include <linux/smp.h> #include <linux/sem.h> #include <linux/msg.h> #include <linux/shm.h> #include <linux/stat.h> #include <linux/syscalls.h> #include <linux/mman.h> #include <linux/file.h> #include <linux/module.h> #include <linux/fs.h> #include <linux/ipc.h> #include <asm/cacheflush.h> #include <asm/uaccess.h> #include <asm/unistd.h> #include <asm/syscalls.h> /* * sys_pipe() is the normal C calling standard for creating * a pipe. It's not the way Unix traditionally does this, though. */ asmlinkage int sys_sh_pipe(unsigned long r4, unsigned long r5, unsigned long r6, unsigned long r7, struct pt_regs __regs) { struct pt_regs *regs = RELOC_HIDE(&__regs, 0); int fd[2]; int error; error = do_pipe_flags(fd, 0); if (!error) { regs->regs[1] = fd[1]; return fd[0]; } return error; } asmlinkage ssize_t sys_pread_wrapper(unsigned int fd, char __user *buf, size_t count, long dummy, loff_t pos) { return sys_pread64(fd, buf, count, pos); } asmlinkage ssize_t sys_pwrite_wrapper(unsigned int fd, const char __user *buf, size_t count, long dummy, loff_t pos) { return sys_pwrite64(fd, buf, count, pos); } asmlinkage int sys_fadvise64_64_wrapper(int fd, u32 offset0, u32 offset1, u32 len0, u32 len1, int advice) { #ifdef __LITTLE_ENDIAN__ return sys_fadvise64_64(fd, (u64)offset1 << 32 | offset0, (u64)len1 << 32 | len0, advice); #else return sys_fadvise64_64(fd, (u64)offset0 << 32 | offset1, (u64)len0 << 32 | len1, advice); #endif } #if defined(CONFIG_CPU_SH2) || defined(CONFIG_CPU_SH2A) #define SYSCALL_ARG3 "trapa #0x23" #else #define SYSCALL_ARG3 "trapa #0x13" #endif /* * Do a system call from kernel instead of calling sys_execve so we * end up with proper pt_regs. */ int kernel_execve(const char *filename, const char *const argv[], const char *const envp[]) { register long __sc0 __asm__ ("r3") = __NR_execve; register long __sc4 __asm__ ("r4") = (long) filename; register long __sc5 __asm__ ("r5") = (long) argv; register long __sc6 __asm__ ("r6") = (long) envp; __asm__ __volatile__ (SYSCALL_ARG3 : "=z" (__sc0) : "0" (__sc0), "r" (__sc4), "r" (__sc5), "r" (__sc6) : "memory"); return __sc0; }
gpl-2.0
schqiushui/android_kk_kernel_htc_dlxj
arch/s390/kernel/ftrace.c
7852
5313
/* * Dynamic function tracer architecture backend. * * Copyright IBM Corp. 2009 * * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>, * Martin Schwidefsky <schwidefsky@de.ibm.com> */ #include <linux/hardirq.h> #include <linux/uaccess.h> #include <linux/ftrace.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/kprobes.h> #include <trace/syscall.h> #include <asm/asm-offsets.h> #ifdef CONFIG_64BIT #define MCOUNT_OFFSET_RET 12 #else #define MCOUNT_OFFSET_RET 22 #endif #ifdef CONFIG_DYNAMIC_FTRACE void ftrace_disable_code(void); void ftrace_enable_insn(void); #ifdef CONFIG_64BIT /* * The 64-bit mcount code looks like this: * stg %r14,8(%r15) # offset 0 * > larl %r1,<&counter> # offset 6 * > brasl %r14,_mcount # offset 12 * lg %r14,8(%r15) # offset 18 * Total length is 24 bytes. The middle two instructions of the mcount * block get overwritten by ftrace_make_nop / ftrace_make_call. * The 64-bit enabled ftrace code block looks like this: * stg %r14,8(%r15) # offset 0 * > lg %r1,__LC_FTRACE_FUNC # offset 6 * > lgr %r0,%r0 # offset 12 * > basr %r14,%r1 # offset 16 * lg %r14,8(%15) # offset 18 * The return points of the mcount/ftrace function have the same offset 18. * The 64-bit disable ftrace code block looks like this: * stg %r14,8(%r15) # offset 0 * > jg .+18 # offset 6 * > lgr %r0,%r0 # offset 12 * > basr %r14,%r1 # offset 16 * lg %r14,8(%15) # offset 18 * The jg instruction branches to offset 24 to skip as many instructions * as possible. */ asm( " .align 4\n" "ftrace_disable_code:\n" " jg 0f\n" " lgr %r0,%r0\n" " basr %r14,%r1\n" "0:\n" " .align 4\n" "ftrace_enable_insn:\n" " lg %r1,"__stringify(__LC_FTRACE_FUNC)"\n"); #define FTRACE_INSN_SIZE 6 #else /* CONFIG_64BIT */ /* * The 31-bit mcount code looks like this: * st %r14,4(%r15) # offset 0 * > bras %r1,0f # offset 4 * > .long _mcount # offset 8 * > .long <&counter> # offset 12 * > 0: l %r14,0(%r1) # offset 16 * > l %r1,4(%r1) # offset 20 * basr %r14,%r14 # offset 24 * l %r14,4(%r15) # offset 26 * Total length is 30 bytes. The twenty bytes starting from offset 4 * to offset 24 get overwritten by ftrace_make_nop / ftrace_make_call. * The 31-bit enabled ftrace code block looks like this: * st %r14,4(%r15) # offset 0 * > l %r14,__LC_FTRACE_FUNC # offset 4 * > j 0f # offset 8 * > .fill 12,1,0x07 # offset 12 * 0: basr %r14,%r14 # offset 24 * l %r14,4(%r14) # offset 26 * The return points of the mcount/ftrace function have the same offset 26. * The 31-bit disabled ftrace code block looks like this: * st %r14,4(%r15) # offset 0 * > j .+26 # offset 4 * > j 0f # offset 8 * > .fill 12,1,0x07 # offset 12 * 0: basr %r14,%r14 # offset 24 * l %r14,4(%r14) # offset 26 * The j instruction branches to offset 30 to skip as many instructions * as possible. */ asm( " .align 4\n" "ftrace_disable_code:\n" " j 1f\n" " j 0f\n" " .fill 12,1,0x07\n" "0: basr %r14,%r14\n" "1:\n" " .align 4\n" "ftrace_enable_insn:\n" " l %r14,"__stringify(__LC_FTRACE_FUNC)"\n"); #define FTRACE_INSN_SIZE 4 #endif /* CONFIG_64BIT */ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long addr) { if (probe_kernel_write((void *) rec->ip, ftrace_disable_code, MCOUNT_INSN_SIZE)) return -EPERM; return 0; } int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) { if (probe_kernel_write((void *) rec->ip, ftrace_enable_insn, FTRACE_INSN_SIZE)) return -EPERM; return 0; } int ftrace_update_ftrace_func(ftrace_func_t func) { return 0; } int __init ftrace_dyn_arch_init(void *data) { *(unsigned long *) data = 0; return 0; } #endif /* CONFIG_DYNAMIC_FTRACE */ #ifdef CONFIG_FUNCTION_GRAPH_TRACER /* * Hook the return address and push it in the stack of return addresses * in current thread info. */ unsigned long __kprobes prepare_ftrace_return(unsigned long parent, unsigned long ip) { struct ftrace_graph_ent trace; if (unlikely(atomic_read(&current->tracing_graph_pause))) goto out; if (ftrace_push_return_trace(parent, ip, &trace.depth, 0) == -EBUSY) goto out; trace.func = (ip & PSW_ADDR_INSN) - MCOUNT_OFFSET_RET; /* Only trace if the calling function expects to. */ if (!ftrace_graph_entry(&trace)) { current->curr_ret_stack--; goto out; } parent = (unsigned long) return_to_handler; out: return parent; } #ifdef CONFIG_DYNAMIC_FTRACE /* * Patch the kernel code at ftrace_graph_caller location. The instruction * there is branch relative and save to prepare_ftrace_return. To disable * the call to prepare_ftrace_return we patch the bras offset to point * directly after the instructions. To enable the call we calculate * the original offset to prepare_ftrace_return and put it back. */ int ftrace_enable_ftrace_graph_caller(void) { unsigned short offset; offset = ((void *) prepare_ftrace_return - (void *) ftrace_graph_caller) / 2; return probe_kernel_write(ftrace_graph_caller + 2, &offset, sizeof(offset)); } int ftrace_disable_ftrace_graph_caller(void) { static unsigned short offset = 0x0002; return probe_kernel_write(ftrace_graph_caller + 2, &offset, sizeof(offset)); } #endif /* CONFIG_DYNAMIC_FTRACE */ #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
gpl-2.0
shark147/m7-kernel
drivers/scsi/sym53c8xx_2/sym_hipd.c
8620
147387
/* * Device driver for the SYMBIOS/LSILOGIC 53C8XX and 53C1010 family * of PCI-SCSI IO processors. * * Copyright (C) 1999-2001 Gerard Roudier <groudier@free.fr> * Copyright (c) 2003-2005 Matthew Wilcox <matthew@wil.cx> * * This driver is derived from the Linux sym53c8xx driver. * Copyright (C) 1998-2000 Gerard Roudier * * The sym53c8xx driver is derived from the ncr53c8xx driver that had been * a port of the FreeBSD ncr driver to Linux-1.2.13. * * The original ncr driver has been written for 386bsd and FreeBSD by * Wolfgang Stanglmeier <wolf@cologne.de> * Stefan Esser <se@mi.Uni-Koeln.de> * Copyright (C) 1994 Wolfgang Stanglmeier * * Other major contributions: * * NVRAM detection and reading. * Copyright (C) 1997 Richard Waltham <dormouse@farsrobt.demon.co.uk> * *----------------------------------------------------------------------------- * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/slab.h> #include <asm/param.h> /* for timeouts in units of HZ */ #include "sym_glue.h" #include "sym_nvram.h" #if 0 #define SYM_DEBUG_GENERIC_SUPPORT #endif /* * Needed function prototypes. */ static void sym_int_ma (struct sym_hcb *np); static void sym_int_sir(struct sym_hcb *); static struct sym_ccb *sym_alloc_ccb(struct sym_hcb *np); static struct sym_ccb *sym_ccb_from_dsa(struct sym_hcb *np, u32 dsa); static void sym_alloc_lcb_tags (struct sym_hcb *np, u_char tn, u_char ln); static void sym_complete_error (struct sym_hcb *np, struct sym_ccb *cp); static void sym_complete_ok (struct sym_hcb *np, struct sym_ccb *cp); static int sym_compute_residual(struct sym_hcb *np, struct sym_ccb *cp); /* * Print a buffer in hexadecimal format with a ".\n" at end. */ static void sym_printl_hex(u_char *p, int n) { while (n-- > 0) printf (" %x", *p++); printf (".\n"); } static void sym_print_msg(struct sym_ccb *cp, char *label, u_char *msg) { sym_print_addr(cp->cmd, "%s: ", label); spi_print_msg(msg); printf("\n"); } static void sym_print_nego_msg(struct sym_hcb *np, int target, char *label, u_char *msg) { struct sym_tcb *tp = &np->target[target]; dev_info(&tp->starget->dev, "%s: ", label); spi_print_msg(msg); printf("\n"); } /* * Print something that tells about extended errors. */ void sym_print_xerr(struct scsi_cmnd *cmd, int x_status) { if (x_status & XE_PARITY_ERR) { sym_print_addr(cmd, "unrecovered SCSI parity error.\n"); } if (x_status & XE_EXTRA_DATA) { sym_print_addr(cmd, "extraneous data discarded.\n"); } if (x_status & XE_BAD_PHASE) { sym_print_addr(cmd, "illegal scsi phase (4/5).\n"); } if (x_status & XE_SODL_UNRUN) { sym_print_addr(cmd, "ODD transfer in DATA OUT phase.\n"); } if (x_status & XE_SWIDE_OVRUN) { sym_print_addr(cmd, "ODD transfer in DATA IN phase.\n"); } } /* * Return a string for SCSI BUS mode. */ static char *sym_scsi_bus_mode(int mode) { switch(mode) { case SMODE_HVD: return "HVD"; case SMODE_SE: return "SE"; case SMODE_LVD: return "LVD"; } return "??"; } /* * Soft reset the chip. * * Raising SRST when the chip is running may cause * problems on dual function chips (see below). * On the other hand, LVD devices need some delay * to settle and report actual BUS mode in STEST4. */ static void sym_chip_reset (struct sym_hcb *np) { OUTB(np, nc_istat, SRST); INB(np, nc_mbox1); udelay(10); OUTB(np, nc_istat, 0); INB(np, nc_mbox1); udelay(2000); /* For BUS MODE to settle */ } /* * Really soft reset the chip.:) * * Some 896 and 876 chip revisions may hang-up if we set * the SRST (soft reset) bit at the wrong time when SCRIPTS * are running. * So, we need to abort the current operation prior to * soft resetting the chip. */ static void sym_soft_reset (struct sym_hcb *np) { u_char istat = 0; int i; if (!(np->features & FE_ISTAT1) || !(INB(np, nc_istat1) & SCRUN)) goto do_chip_reset; OUTB(np, nc_istat, CABRT); for (i = 100000 ; i ; --i) { istat = INB(np, nc_istat); if (istat & SIP) { INW(np, nc_sist); } else if (istat & DIP) { if (INB(np, nc_dstat) & ABRT) break; } udelay(5); } OUTB(np, nc_istat, 0); if (!i) printf("%s: unable to abort current chip operation, " "ISTAT=0x%02x.\n", sym_name(np), istat); do_chip_reset: sym_chip_reset(np); } /* * Start reset process. * * The interrupt handler will reinitialize the chip. */ static void sym_start_reset(struct sym_hcb *np) { sym_reset_scsi_bus(np, 1); } int sym_reset_scsi_bus(struct sym_hcb *np, int enab_int) { u32 term; int retv = 0; sym_soft_reset(np); /* Soft reset the chip */ if (enab_int) OUTW(np, nc_sien, RST); /* * Enable Tolerant, reset IRQD if present and * properly set IRQ mode, prior to resetting the bus. */ OUTB(np, nc_stest3, TE); OUTB(np, nc_dcntl, (np->rv_dcntl & IRQM)); OUTB(np, nc_scntl1, CRST); INB(np, nc_mbox1); udelay(200); if (!SYM_SETUP_SCSI_BUS_CHECK) goto out; /* * Check for no terminators or SCSI bus shorts to ground. * Read SCSI data bus, data parity bits and control signals. * We are expecting RESET to be TRUE and other signals to be * FALSE. */ term = INB(np, nc_sstat0); term = ((term & 2) << 7) + ((term & 1) << 17); /* rst sdp0 */ term |= ((INB(np, nc_sstat2) & 0x01) << 26) | /* sdp1 */ ((INW(np, nc_sbdl) & 0xff) << 9) | /* d7-0 */ ((INW(np, nc_sbdl) & 0xff00) << 10) | /* d15-8 */ INB(np, nc_sbcl); /* req ack bsy sel atn msg cd io */ if (!np->maxwide) term &= 0x3ffff; if (term != (2<<7)) { printf("%s: suspicious SCSI data while resetting the BUS.\n", sym_name(np)); printf("%s: %sdp0,d7-0,rst,req,ack,bsy,sel,atn,msg,c/d,i/o = " "0x%lx, expecting 0x%lx\n", sym_name(np), (np->features & FE_WIDE) ? "dp1,d15-8," : "", (u_long)term, (u_long)(2<<7)); if (SYM_SETUP_SCSI_BUS_CHECK == 1) retv = 1; } out: OUTB(np, nc_scntl1, 0); return retv; } /* * Select SCSI clock frequency */ static void sym_selectclock(struct sym_hcb *np, u_char scntl3) { /* * If multiplier not present or not selected, leave here. */ if (np->multiplier <= 1) { OUTB(np, nc_scntl3, scntl3); return; } if (sym_verbose >= 2) printf ("%s: enabling clock multiplier\n", sym_name(np)); OUTB(np, nc_stest1, DBLEN); /* Enable clock multiplier */ /* * Wait for the LCKFRQ bit to be set if supported by the chip. * Otherwise wait 50 micro-seconds (at least). */ if (np->features & FE_LCKFRQ) { int i = 20; while (!(INB(np, nc_stest4) & LCKFRQ) && --i > 0) udelay(20); if (!i) printf("%s: the chip cannot lock the frequency\n", sym_name(np)); } else { INB(np, nc_mbox1); udelay(50+10); } OUTB(np, nc_stest3, HSC); /* Halt the scsi clock */ OUTB(np, nc_scntl3, scntl3); OUTB(np, nc_stest1, (DBLEN|DBLSEL));/* Select clock multiplier */ OUTB(np, nc_stest3, 0x00); /* Restart scsi clock */ } /* * Determine the chip's clock frequency. * * This is essential for the negotiation of the synchronous * transfer rate. * * Note: we have to return the correct value. * THERE IS NO SAFE DEFAULT VALUE. * * Most NCR/SYMBIOS boards are delivered with a 40 Mhz clock. * 53C860 and 53C875 rev. 1 support fast20 transfers but * do not have a clock doubler and so are provided with a * 80 MHz clock. All other fast20 boards incorporate a doubler * and so should be delivered with a 40 MHz clock. * The recent fast40 chips (895/896/895A/1010) use a 40 Mhz base * clock and provide a clock quadrupler (160 Mhz). */ /* * calculate SCSI clock frequency (in KHz) */ static unsigned getfreq (struct sym_hcb *np, int gen) { unsigned int ms = 0; unsigned int f; /* * Measure GEN timer delay in order * to calculate SCSI clock frequency * * This code will never execute too * many loop iterations (if DELAY is * reasonably correct). It could get * too low a delay (too high a freq.) * if the CPU is slow executing the * loop for some reason (an NMI, for * example). For this reason we will * if multiple measurements are to be * performed trust the higher delay * (lower frequency returned). */ OUTW(np, nc_sien, 0); /* mask all scsi interrupts */ INW(np, nc_sist); /* clear pending scsi interrupt */ OUTB(np, nc_dien, 0); /* mask all dma interrupts */ INW(np, nc_sist); /* another one, just to be sure :) */ /* * The C1010-33 core does not report GEN in SIST, * if this interrupt is masked in SIEN. * I don't know yet if the C1010-66 behaves the same way. */ if (np->features & FE_C10) { OUTW(np, nc_sien, GEN); OUTB(np, nc_istat1, SIRQD); } OUTB(np, nc_scntl3, 4); /* set pre-scaler to divide by 3 */ OUTB(np, nc_stime1, 0); /* disable general purpose timer */ OUTB(np, nc_stime1, gen); /* set to nominal delay of 1<<gen * 125us */ while (!(INW(np, nc_sist) & GEN) && ms++ < 100000) udelay(1000/4); /* count in 1/4 of ms */ OUTB(np, nc_stime1, 0); /* disable general purpose timer */ /* * Undo C1010-33 specific settings. */ if (np->features & FE_C10) { OUTW(np, nc_sien, 0); OUTB(np, nc_istat1, 0); } /* * set prescaler to divide by whatever 0 means * 0 ought to choose divide by 2, but appears * to set divide by 3.5 mode in my 53c810 ... */ OUTB(np, nc_scntl3, 0); /* * adjust for prescaler, and convert into KHz */ f = ms ? ((1 << gen) * (4340*4)) / ms : 0; /* * The C1010-33 result is biased by a factor * of 2/3 compared to earlier chips. */ if (np->features & FE_C10) f = (f * 2) / 3; if (sym_verbose >= 2) printf ("%s: Delay (GEN=%d): %u msec, %u KHz\n", sym_name(np), gen, ms/4, f); return f; } static unsigned sym_getfreq (struct sym_hcb *np) { u_int f1, f2; int gen = 8; getfreq (np, gen); /* throw away first result */ f1 = getfreq (np, gen); f2 = getfreq (np, gen); if (f1 > f2) f1 = f2; /* trust lower result */ return f1; } /* * Get/probe chip SCSI clock frequency */ static void sym_getclock (struct sym_hcb *np, int mult) { unsigned char scntl3 = np->sv_scntl3; unsigned char stest1 = np->sv_stest1; unsigned f1; np->multiplier = 1; f1 = 40000; /* * True with 875/895/896/895A with clock multiplier selected */ if (mult > 1 && (stest1 & (DBLEN+DBLSEL)) == DBLEN+DBLSEL) { if (sym_verbose >= 2) printf ("%s: clock multiplier found\n", sym_name(np)); np->multiplier = mult; } /* * If multiplier not found or scntl3 not 7,5,3, * reset chip and get frequency from general purpose timer. * Otherwise trust scntl3 BIOS setting. */ if (np->multiplier != mult || (scntl3 & 7) < 3 || !(scntl3 & 1)) { OUTB(np, nc_stest1, 0); /* make sure doubler is OFF */ f1 = sym_getfreq (np); if (sym_verbose) printf ("%s: chip clock is %uKHz\n", sym_name(np), f1); if (f1 < 45000) f1 = 40000; else if (f1 < 55000) f1 = 50000; else f1 = 80000; if (f1 < 80000 && mult > 1) { if (sym_verbose >= 2) printf ("%s: clock multiplier assumed\n", sym_name(np)); np->multiplier = mult; } } else { if ((scntl3 & 7) == 3) f1 = 40000; else if ((scntl3 & 7) == 5) f1 = 80000; else f1 = 160000; f1 /= np->multiplier; } /* * Compute controller synchronous parameters. */ f1 *= np->multiplier; np->clock_khz = f1; } /* * Get/probe PCI clock frequency */ static int sym_getpciclock (struct sym_hcb *np) { int f = 0; /* * For now, we only need to know about the actual * PCI BUS clock frequency for C1010-66 chips. */ #if 1 if (np->features & FE_66MHZ) { #else if (1) { #endif OUTB(np, nc_stest1, SCLK); /* Use the PCI clock as SCSI clock */ f = sym_getfreq(np); OUTB(np, nc_stest1, 0); } np->pciclk_khz = f; return f; } /* * SYMBIOS chip clock divisor table. * * Divisors are multiplied by 10,000,000 in order to make * calculations more simple. */ #define _5M 5000000 static const u32 div_10M[] = {2*_5M, 3*_5M, 4*_5M, 6*_5M, 8*_5M, 12*_5M, 16*_5M}; /* * Get clock factor and sync divisor for a given * synchronous factor period. */ static int sym_getsync(struct sym_hcb *np, u_char dt, u_char sfac, u_char *divp, u_char *fakp) { u32 clk = np->clock_khz; /* SCSI clock frequency in kHz */ int div = np->clock_divn; /* Number of divisors supported */ u32 fak; /* Sync factor in sxfer */ u32 per; /* Period in tenths of ns */ u32 kpc; /* (per * clk) */ int ret; /* * Compute the synchronous period in tenths of nano-seconds */ if (dt && sfac <= 9) per = 125; else if (sfac <= 10) per = 250; else if (sfac == 11) per = 303; else if (sfac == 12) per = 500; else per = 40 * sfac; ret = per; kpc = per * clk; if (dt) kpc <<= 1; /* * For earliest C10 revision 0, we cannot use extra * clocks for the setting of the SCSI clocking. * Note that this limits the lowest sync data transfer * to 5 Mega-transfers per second and may result in * using higher clock divisors. */ #if 1 if ((np->features & (FE_C10|FE_U3EN)) == FE_C10) { /* * Look for the lowest clock divisor that allows an * output speed not faster than the period. */ while (div > 0) { --div; if (kpc > (div_10M[div] << 2)) { ++div; break; } } fak = 0; /* No extra clocks */ if (div == np->clock_divn) { /* Are we too fast ? */ ret = -1; } *divp = div; *fakp = fak; return ret; } #endif /* * Look for the greatest clock divisor that allows an * input speed faster than the period. */ while (div-- > 0) if (kpc >= (div_10M[div] << 2)) break; /* * Calculate the lowest clock factor that allows an output * speed not faster than the period, and the max output speed. * If fak >= 1 we will set both XCLKH_ST and XCLKH_DT. * If fak >= 2 we will also set XCLKS_ST and XCLKS_DT. */ if (dt) { fak = (kpc - 1) / (div_10M[div] << 1) + 1 - 2; /* ret = ((2+fak)*div_10M[div])/np->clock_khz; */ } else { fak = (kpc - 1) / div_10M[div] + 1 - 4; /* ret = ((4+fak)*div_10M[div])/np->clock_khz; */ } /* * Check against our hardware limits, or bugs :). */ if (fak > 2) { fak = 2; ret = -1; } /* * Compute and return sync parameters. */ *divp = div; *fakp = fak; return ret; } /* * SYMBIOS chips allow burst lengths of 2, 4, 8, 16, 32, 64, * 128 transfers. All chips support at least 16 transfers * bursts. The 825A, 875 and 895 chips support bursts of up * to 128 transfers and the 895A and 896 support bursts of up * to 64 transfers. All other chips support up to 16 * transfers bursts. * * For PCI 32 bit data transfers each transfer is a DWORD. * It is a QUADWORD (8 bytes) for PCI 64 bit data transfers. * * We use log base 2 (burst length) as internal code, with * value 0 meaning "burst disabled". */ /* * Burst length from burst code. */ #define burst_length(bc) (!(bc))? 0 : 1 << (bc) /* * Burst code from io register bits. */ #define burst_code(dmode, ctest4, ctest5) \ (ctest4) & 0x80? 0 : (((dmode) & 0xc0) >> 6) + ((ctest5) & 0x04) + 1 /* * Set initial io register bits from burst code. */ static inline void sym_init_burst(struct sym_hcb *np, u_char bc) { np->rv_ctest4 &= ~0x80; np->rv_dmode &= ~(0x3 << 6); np->rv_ctest5 &= ~0x4; if (!bc) { np->rv_ctest4 |= 0x80; } else { --bc; np->rv_dmode |= ((bc & 0x3) << 6); np->rv_ctest5 |= (bc & 0x4); } } /* * Save initial settings of some IO registers. * Assumed to have been set by BIOS. * We cannot reset the chip prior to reading the * IO registers, since informations will be lost. * Since the SCRIPTS processor may be running, this * is not safe on paper, but it seems to work quite * well. :) */ static void sym_save_initial_setting (struct sym_hcb *np) { np->sv_scntl0 = INB(np, nc_scntl0) & 0x0a; np->sv_scntl3 = INB(np, nc_scntl3) & 0x07; np->sv_dmode = INB(np, nc_dmode) & 0xce; np->sv_dcntl = INB(np, nc_dcntl) & 0xa8; np->sv_ctest3 = INB(np, nc_ctest3) & 0x01; np->sv_ctest4 = INB(np, nc_ctest4) & 0x80; np->sv_gpcntl = INB(np, nc_gpcntl); np->sv_stest1 = INB(np, nc_stest1); np->sv_stest2 = INB(np, nc_stest2) & 0x20; np->sv_stest4 = INB(np, nc_stest4); if (np->features & FE_C10) { /* Always large DMA fifo + ultra3 */ np->sv_scntl4 = INB(np, nc_scntl4); np->sv_ctest5 = INB(np, nc_ctest5) & 0x04; } else np->sv_ctest5 = INB(np, nc_ctest5) & 0x24; } /* * Set SCSI BUS mode. * - LVD capable chips (895/895A/896/1010) report the current BUS mode * through the STEST4 IO register. * - For previous generation chips (825/825A/875), the user has to tell us * how to check against HVD, since a 100% safe algorithm is not possible. */ static void sym_set_bus_mode(struct sym_hcb *np, struct sym_nvram *nvram) { if (np->scsi_mode) return; np->scsi_mode = SMODE_SE; if (np->features & (FE_ULTRA2|FE_ULTRA3)) np->scsi_mode = (np->sv_stest4 & SMODE); else if (np->features & FE_DIFF) { if (SYM_SETUP_SCSI_DIFF == 1) { if (np->sv_scntl3) { if (np->sv_stest2 & 0x20) np->scsi_mode = SMODE_HVD; } else if (nvram->type == SYM_SYMBIOS_NVRAM) { if (!(INB(np, nc_gpreg) & 0x08)) np->scsi_mode = SMODE_HVD; } } else if (SYM_SETUP_SCSI_DIFF == 2) np->scsi_mode = SMODE_HVD; } if (np->scsi_mode == SMODE_HVD) np->rv_stest2 |= 0x20; } /* * Prepare io register values used by sym_start_up() * according to selected and supported features. */ static int sym_prepare_setting(struct Scsi_Host *shost, struct sym_hcb *np, struct sym_nvram *nvram) { struct sym_data *sym_data = shost_priv(shost); struct pci_dev *pdev = sym_data->pdev; u_char burst_max; u32 period; int i; np->maxwide = (np->features & FE_WIDE) ? 1 : 0; /* * Guess the frequency of the chip's clock. */ if (np->features & (FE_ULTRA3 | FE_ULTRA2)) np->clock_khz = 160000; else if (np->features & FE_ULTRA) np->clock_khz = 80000; else np->clock_khz = 40000; /* * Get the clock multiplier factor. */ if (np->features & FE_QUAD) np->multiplier = 4; else if (np->features & FE_DBLR) np->multiplier = 2; else np->multiplier = 1; /* * Measure SCSI clock frequency for chips * it may vary from assumed one. */ if (np->features & FE_VARCLK) sym_getclock(np, np->multiplier); /* * Divisor to be used for async (timer pre-scaler). */ i = np->clock_divn - 1; while (--i >= 0) { if (10ul * SYM_CONF_MIN_ASYNC * np->clock_khz > div_10M[i]) { ++i; break; } } np->rv_scntl3 = i+1; /* * The C1010 uses hardwired divisors for async. * So, we just throw away, the async. divisor.:-) */ if (np->features & FE_C10) np->rv_scntl3 = 0; /* * Minimum synchronous period factor supported by the chip. * Btw, 'period' is in tenths of nanoseconds. */ period = (4 * div_10M[0] + np->clock_khz - 1) / np->clock_khz; if (period <= 250) np->minsync = 10; else if (period <= 303) np->minsync = 11; else if (period <= 500) np->minsync = 12; else np->minsync = (period + 40 - 1) / 40; /* * Check against chip SCSI standard support (SCSI-2,ULTRA,ULTRA2). */ if (np->minsync < 25 && !(np->features & (FE_ULTRA|FE_ULTRA2|FE_ULTRA3))) np->minsync = 25; else if (np->minsync < 12 && !(np->features & (FE_ULTRA2|FE_ULTRA3))) np->minsync = 12; /* * Maximum synchronous period factor supported by the chip. */ period = (11 * div_10M[np->clock_divn - 1]) / (4 * np->clock_khz); np->maxsync = period > 2540 ? 254 : period / 10; /* * If chip is a C1010, guess the sync limits in DT mode. */ if ((np->features & (FE_C10|FE_ULTRA3)) == (FE_C10|FE_ULTRA3)) { if (np->clock_khz == 160000) { np->minsync_dt = 9; np->maxsync_dt = 50; np->maxoffs_dt = nvram->type ? 62 : 31; } } /* * 64 bit addressing (895A/896/1010) ? */ if (np->features & FE_DAC) { if (!use_dac(np)) np->rv_ccntl1 |= (DDAC); else if (SYM_CONF_DMA_ADDRESSING_MODE == 1) np->rv_ccntl1 |= (XTIMOD | EXTIBMV); else if (SYM_CONF_DMA_ADDRESSING_MODE == 2) np->rv_ccntl1 |= (0 | EXTIBMV); } /* * Phase mismatch handled by SCRIPTS (895A/896/1010) ? */ if (np->features & FE_NOPM) np->rv_ccntl0 |= (ENPMJ); /* * C1010-33 Errata: Part Number:609-039638 (rev. 1) is fixed. * In dual channel mode, contention occurs if internal cycles * are used. Disable internal cycles. */ if (pdev->device == PCI_DEVICE_ID_LSI_53C1010_33 && pdev->revision < 0x1) np->rv_ccntl0 |= DILS; /* * Select burst length (dwords) */ burst_max = SYM_SETUP_BURST_ORDER; if (burst_max == 255) burst_max = burst_code(np->sv_dmode, np->sv_ctest4, np->sv_ctest5); if (burst_max > 7) burst_max = 7; if (burst_max > np->maxburst) burst_max = np->maxburst; /* * DEL 352 - 53C810 Rev x11 - Part Number 609-0392140 - ITEM 2. * This chip and the 860 Rev 1 may wrongly use PCI cache line * based transactions on LOAD/STORE instructions. So we have * to prevent these chips from using such PCI transactions in * this driver. The generic ncr driver that does not use * LOAD/STORE instructions does not need this work-around. */ if ((pdev->device == PCI_DEVICE_ID_NCR_53C810 && pdev->revision >= 0x10 && pdev->revision <= 0x11) || (pdev->device == PCI_DEVICE_ID_NCR_53C860 && pdev->revision <= 0x1)) np->features &= ~(FE_WRIE|FE_ERL|FE_ERMP); /* * Select all supported special features. * If we are using on-board RAM for scripts, prefetch (PFEN) * does not help, but burst op fetch (BOF) does. * Disabling PFEN makes sure BOF will be used. */ if (np->features & FE_ERL) np->rv_dmode |= ERL; /* Enable Read Line */ if (np->features & FE_BOF) np->rv_dmode |= BOF; /* Burst Opcode Fetch */ if (np->features & FE_ERMP) np->rv_dmode |= ERMP; /* Enable Read Multiple */ #if 1 if ((np->features & FE_PFEN) && !np->ram_ba) #else if (np->features & FE_PFEN) #endif np->rv_dcntl |= PFEN; /* Prefetch Enable */ if (np->features & FE_CLSE) np->rv_dcntl |= CLSE; /* Cache Line Size Enable */ if (np->features & FE_WRIE) np->rv_ctest3 |= WRIE; /* Write and Invalidate */ if (np->features & FE_DFS) np->rv_ctest5 |= DFS; /* Dma Fifo Size */ /* * Select some other */ np->rv_ctest4 |= MPEE; /* Master parity checking */ np->rv_scntl0 |= 0x0a; /* full arb., ena parity, par->ATN */ /* * Get parity checking, host ID and verbose mode from NVRAM */ np->myaddr = 255; np->scsi_mode = 0; sym_nvram_setup_host(shost, np, nvram); /* * Get SCSI addr of host adapter (set by bios?). */ if (np->myaddr == 255) { np->myaddr = INB(np, nc_scid) & 0x07; if (!np->myaddr) np->myaddr = SYM_SETUP_HOST_ID; } /* * Prepare initial io register bits for burst length */ sym_init_burst(np, burst_max); sym_set_bus_mode(np, nvram); /* * Set LED support from SCRIPTS. * Ignore this feature for boards known to use a * specific GPIO wiring and for the 895A, 896 * and 1010 that drive the LED directly. */ if ((SYM_SETUP_SCSI_LED || (nvram->type == SYM_SYMBIOS_NVRAM || (nvram->type == SYM_TEKRAM_NVRAM && pdev->device == PCI_DEVICE_ID_NCR_53C895))) && !(np->features & FE_LEDC) && !(np->sv_gpcntl & 0x01)) np->features |= FE_LED0; /* * Set irq mode. */ switch(SYM_SETUP_IRQ_MODE & 3) { case 2: np->rv_dcntl |= IRQM; break; case 1: np->rv_dcntl |= (np->sv_dcntl & IRQM); break; default: break; } /* * Configure targets according to driver setup. * If NVRAM present get targets setup from NVRAM. */ for (i = 0 ; i < SYM_CONF_MAX_TARGET ; i++) { struct sym_tcb *tp = &np->target[i]; tp->usrflags |= (SYM_DISC_ENABLED | SYM_TAGS_ENABLED); tp->usrtags = SYM_SETUP_MAX_TAG; tp->usr_width = np->maxwide; tp->usr_period = 9; sym_nvram_setup_target(tp, i, nvram); if (!tp->usrtags) tp->usrflags &= ~SYM_TAGS_ENABLED; } /* * Let user know about the settings. */ printf("%s: %s, ID %d, Fast-%d, %s, %s\n", sym_name(np), sym_nvram_type(nvram), np->myaddr, (np->features & FE_ULTRA3) ? 80 : (np->features & FE_ULTRA2) ? 40 : (np->features & FE_ULTRA) ? 20 : 10, sym_scsi_bus_mode(np->scsi_mode), (np->rv_scntl0 & 0xa) ? "parity checking" : "NO parity"); /* * Tell him more on demand. */ if (sym_verbose) { printf("%s: %s IRQ line driver%s\n", sym_name(np), np->rv_dcntl & IRQM ? "totem pole" : "open drain", np->ram_ba ? ", using on-chip SRAM" : ""); printf("%s: using %s firmware.\n", sym_name(np), np->fw_name); if (np->features & FE_NOPM) printf("%s: handling phase mismatch from SCRIPTS.\n", sym_name(np)); } /* * And still more. */ if (sym_verbose >= 2) { printf ("%s: initial SCNTL3/DMODE/DCNTL/CTEST3/4/5 = " "(hex) %02x/%02x/%02x/%02x/%02x/%02x\n", sym_name(np), np->sv_scntl3, np->sv_dmode, np->sv_dcntl, np->sv_ctest3, np->sv_ctest4, np->sv_ctest5); printf ("%s: final SCNTL3/DMODE/DCNTL/CTEST3/4/5 = " "(hex) %02x/%02x/%02x/%02x/%02x/%02x\n", sym_name(np), np->rv_scntl3, np->rv_dmode, np->rv_dcntl, np->rv_ctest3, np->rv_ctest4, np->rv_ctest5); } return 0; } /* * Test the pci bus snoop logic :-( * * Has to be called with interrupts disabled. */ #ifdef CONFIG_SCSI_SYM53C8XX_MMIO static int sym_regtest(struct sym_hcb *np) { register volatile u32 data; /* * chip registers may NOT be cached. * write 0xffffffff to a read only register area, * and try to read it back. */ data = 0xffffffff; OUTL(np, nc_dstat, data); data = INL(np, nc_dstat); #if 1 if (data == 0xffffffff) { #else if ((data & 0xe2f0fffd) != 0x02000080) { #endif printf ("CACHE TEST FAILED: reg dstat-sstat2 readback %x.\n", (unsigned) data); return 0x10; } return 0; } #else static inline int sym_regtest(struct sym_hcb *np) { return 0; } #endif static int sym_snooptest(struct sym_hcb *np) { u32 sym_rd, sym_wr, sym_bk, host_rd, host_wr, pc, dstat; int i, err; err = sym_regtest(np); if (err) return err; restart_test: /* * Enable Master Parity Checking as we intend * to enable it for normal operations. */ OUTB(np, nc_ctest4, (np->rv_ctest4 & MPEE)); /* * init */ pc = SCRIPTZ_BA(np, snooptest); host_wr = 1; sym_wr = 2; /* * Set memory and register. */ np->scratch = cpu_to_scr(host_wr); OUTL(np, nc_temp, sym_wr); /* * Start script (exchange values) */ OUTL(np, nc_dsa, np->hcb_ba); OUTL_DSP(np, pc); /* * Wait 'til done (with timeout) */ for (i=0; i<SYM_SNOOP_TIMEOUT; i++) if (INB(np, nc_istat) & (INTF|SIP|DIP)) break; if (i>=SYM_SNOOP_TIMEOUT) { printf ("CACHE TEST FAILED: timeout.\n"); return (0x20); } /* * Check for fatal DMA errors. */ dstat = INB(np, nc_dstat); #if 1 /* Band aiding for broken hardwares that fail PCI parity */ if ((dstat & MDPE) && (np->rv_ctest4 & MPEE)) { printf ("%s: PCI DATA PARITY ERROR DETECTED - " "DISABLING MASTER DATA PARITY CHECKING.\n", sym_name(np)); np->rv_ctest4 &= ~MPEE; goto restart_test; } #endif if (dstat & (MDPE|BF|IID)) { printf ("CACHE TEST FAILED: DMA error (dstat=0x%02x).", dstat); return (0x80); } /* * Save termination position. */ pc = INL(np, nc_dsp); /* * Read memory and register. */ host_rd = scr_to_cpu(np->scratch); sym_rd = INL(np, nc_scratcha); sym_bk = INL(np, nc_temp); /* * Check termination position. */ if (pc != SCRIPTZ_BA(np, snoopend)+8) { printf ("CACHE TEST FAILED: script execution failed.\n"); printf ("start=%08lx, pc=%08lx, end=%08lx\n", (u_long) SCRIPTZ_BA(np, snooptest), (u_long) pc, (u_long) SCRIPTZ_BA(np, snoopend) +8); return (0x40); } /* * Show results. */ if (host_wr != sym_rd) { printf ("CACHE TEST FAILED: host wrote %d, chip read %d.\n", (int) host_wr, (int) sym_rd); err |= 1; } if (host_rd != sym_wr) { printf ("CACHE TEST FAILED: chip wrote %d, host read %d.\n", (int) sym_wr, (int) host_rd); err |= 2; } if (sym_bk != sym_wr) { printf ("CACHE TEST FAILED: chip wrote %d, read back %d.\n", (int) sym_wr, (int) sym_bk); err |= 4; } return err; } /* * log message for real hard errors * * sym0 targ 0?: ERROR (ds:si) (so-si-sd) (sx/s3/s4) @ name (dsp:dbc). * reg: r0 r1 r2 r3 r4 r5 r6 ..... rf. * * exception register: * ds: dstat * si: sist * * SCSI bus lines: * so: control lines as driven by chip. * si: control lines as seen by chip. * sd: scsi data lines as seen by chip. * * wide/fastmode: * sx: sxfer (see the manual) * s3: scntl3 (see the manual) * s4: scntl4 (see the manual) * * current script command: * dsp: script address (relative to start of script). * dbc: first word of script command. * * First 24 register of the chip: * r0..rf */ static void sym_log_hard_error(struct Scsi_Host *shost, u_short sist, u_char dstat) { struct sym_hcb *np = sym_get_hcb(shost); u32 dsp; int script_ofs; int script_size; char *script_name; u_char *script_base; int i; dsp = INL(np, nc_dsp); if (dsp > np->scripta_ba && dsp <= np->scripta_ba + np->scripta_sz) { script_ofs = dsp - np->scripta_ba; script_size = np->scripta_sz; script_base = (u_char *) np->scripta0; script_name = "scripta"; } else if (np->scriptb_ba < dsp && dsp <= np->scriptb_ba + np->scriptb_sz) { script_ofs = dsp - np->scriptb_ba; script_size = np->scriptb_sz; script_base = (u_char *) np->scriptb0; script_name = "scriptb"; } else { script_ofs = dsp; script_size = 0; script_base = NULL; script_name = "mem"; } printf ("%s:%d: ERROR (%x:%x) (%x-%x-%x) (%x/%x/%x) @ (%s %x:%08x).\n", sym_name(np), (unsigned)INB(np, nc_sdid)&0x0f, dstat, sist, (unsigned)INB(np, nc_socl), (unsigned)INB(np, nc_sbcl), (unsigned)INB(np, nc_sbdl), (unsigned)INB(np, nc_sxfer), (unsigned)INB(np, nc_scntl3), (np->features & FE_C10) ? (unsigned)INB(np, nc_scntl4) : 0, script_name, script_ofs, (unsigned)INL(np, nc_dbc)); if (((script_ofs & 3) == 0) && (unsigned)script_ofs < script_size) { printf ("%s: script cmd = %08x\n", sym_name(np), scr_to_cpu((int) *(u32 *)(script_base + script_ofs))); } printf("%s: regdump:", sym_name(np)); for (i = 0; i < 24; i++) printf(" %02x", (unsigned)INB_OFF(np, i)); printf(".\n"); /* * PCI BUS error. */ if (dstat & (MDPE|BF)) sym_log_bus_error(shost); } void sym_dump_registers(struct Scsi_Host *shost) { struct sym_hcb *np = sym_get_hcb(shost); u_short sist; u_char dstat; sist = INW(np, nc_sist); dstat = INB(np, nc_dstat); sym_log_hard_error(shost, sist, dstat); } static struct sym_chip sym_dev_table[] = { {PCI_DEVICE_ID_NCR_53C810, 0x0f, "810", 4, 8, 4, 64, FE_ERL} , #ifdef SYM_DEBUG_GENERIC_SUPPORT {PCI_DEVICE_ID_NCR_53C810, 0xff, "810a", 4, 8, 4, 1, FE_BOF} , #else {PCI_DEVICE_ID_NCR_53C810, 0xff, "810a", 4, 8, 4, 1, FE_CACHE_SET|FE_LDSTR|FE_PFEN|FE_BOF} , #endif {PCI_DEVICE_ID_NCR_53C815, 0xff, "815", 4, 8, 4, 64, FE_BOF|FE_ERL} , {PCI_DEVICE_ID_NCR_53C825, 0x0f, "825", 6, 8, 4, 64, FE_WIDE|FE_BOF|FE_ERL|FE_DIFF} , {PCI_DEVICE_ID_NCR_53C825, 0xff, "825a", 6, 8, 4, 2, FE_WIDE|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN|FE_RAM|FE_DIFF} , {PCI_DEVICE_ID_NCR_53C860, 0xff, "860", 4, 8, 5, 1, FE_ULTRA|FE_CACHE_SET|FE_BOF|FE_LDSTR|FE_PFEN} , {PCI_DEVICE_ID_NCR_53C875, 0x01, "875", 6, 16, 5, 2, FE_WIDE|FE_ULTRA|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| FE_RAM|FE_DIFF|FE_VARCLK} , {PCI_DEVICE_ID_NCR_53C875, 0xff, "875", 6, 16, 5, 2, FE_WIDE|FE_ULTRA|FE_DBLR|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| FE_RAM|FE_DIFF|FE_VARCLK} , {PCI_DEVICE_ID_NCR_53C875J, 0xff, "875J", 6, 16, 5, 2, FE_WIDE|FE_ULTRA|FE_DBLR|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| FE_RAM|FE_DIFF|FE_VARCLK} , {PCI_DEVICE_ID_NCR_53C885, 0xff, "885", 6, 16, 5, 2, FE_WIDE|FE_ULTRA|FE_DBLR|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| FE_RAM|FE_DIFF|FE_VARCLK} , #ifdef SYM_DEBUG_GENERIC_SUPPORT {PCI_DEVICE_ID_NCR_53C895, 0xff, "895", 6, 31, 7, 2, FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS| FE_RAM|FE_LCKFRQ} , #else {PCI_DEVICE_ID_NCR_53C895, 0xff, "895", 6, 31, 7, 2, FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| FE_RAM|FE_LCKFRQ} , #endif {PCI_DEVICE_ID_NCR_53C896, 0xff, "896", 6, 31, 7, 4, FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| FE_RAM|FE_RAM8K|FE_64BIT|FE_DAC|FE_IO256|FE_NOPM|FE_LEDC|FE_LCKFRQ} , {PCI_DEVICE_ID_LSI_53C895A, 0xff, "895a", 6, 31, 7, 4, FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| FE_RAM|FE_RAM8K|FE_DAC|FE_IO256|FE_NOPM|FE_LEDC|FE_LCKFRQ} , {PCI_DEVICE_ID_LSI_53C875A, 0xff, "875a", 6, 31, 7, 4, FE_WIDE|FE_ULTRA|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| FE_RAM|FE_DAC|FE_IO256|FE_NOPM|FE_LEDC|FE_LCKFRQ} , {PCI_DEVICE_ID_LSI_53C1010_33, 0x00, "1010-33", 6, 31, 7, 8, FE_WIDE|FE_ULTRA3|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFBC|FE_LDSTR|FE_PFEN| FE_RAM|FE_RAM8K|FE_64BIT|FE_DAC|FE_IO256|FE_NOPM|FE_LEDC|FE_CRC| FE_C10} , {PCI_DEVICE_ID_LSI_53C1010_33, 0xff, "1010-33", 6, 31, 7, 8, FE_WIDE|FE_ULTRA3|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFBC|FE_LDSTR|FE_PFEN| FE_RAM|FE_RAM8K|FE_64BIT|FE_DAC|FE_IO256|FE_NOPM|FE_LEDC|FE_CRC| FE_C10|FE_U3EN} , {PCI_DEVICE_ID_LSI_53C1010_66, 0xff, "1010-66", 6, 31, 7, 8, FE_WIDE|FE_ULTRA3|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFBC|FE_LDSTR|FE_PFEN| FE_RAM|FE_RAM8K|FE_64BIT|FE_DAC|FE_IO256|FE_NOPM|FE_LEDC|FE_66MHZ|FE_CRC| FE_C10|FE_U3EN} , {PCI_DEVICE_ID_LSI_53C1510, 0xff, "1510d", 6, 31, 7, 4, FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| FE_RAM|FE_IO256|FE_LEDC} }; #define sym_num_devs (ARRAY_SIZE(sym_dev_table)) /* * Look up the chip table. * * Return a pointer to the chip entry if found, * zero otherwise. */ struct sym_chip * sym_lookup_chip_table (u_short device_id, u_char revision) { struct sym_chip *chip; int i; for (i = 0; i < sym_num_devs; i++) { chip = &sym_dev_table[i]; if (device_id != chip->device_id) continue; if (revision > chip->revision_id) continue; return chip; } return NULL; } #if SYM_CONF_DMA_ADDRESSING_MODE == 2 /* * Lookup the 64 bit DMA segments map. * This is only used if the direct mapping * has been unsuccessful. */ int sym_lookup_dmap(struct sym_hcb *np, u32 h, int s) { int i; if (!use_dac(np)) goto weird; /* Look up existing mappings */ for (i = SYM_DMAP_SIZE-1; i > 0; i--) { if (h == np->dmap_bah[i]) return i; } /* If direct mapping is free, get it */ if (!np->dmap_bah[s]) goto new; /* Collision -> lookup free mappings */ for (s = SYM_DMAP_SIZE-1; s > 0; s--) { if (!np->dmap_bah[s]) goto new; } weird: panic("sym: ran out of 64 bit DMA segment registers"); return -1; new: np->dmap_bah[s] = h; np->dmap_dirty = 1; return s; } /* * Update IO registers scratch C..R so they will be * in sync. with queued CCB expectations. */ static void sym_update_dmap_regs(struct sym_hcb *np) { int o, i; if (!np->dmap_dirty) return; o = offsetof(struct sym_reg, nc_scrx[0]); for (i = 0; i < SYM_DMAP_SIZE; i++) { OUTL_OFF(np, o, np->dmap_bah[i]); o += 4; } np->dmap_dirty = 0; } #endif /* Enforce all the fiddly SPI rules and the chip limitations */ static void sym_check_goals(struct sym_hcb *np, struct scsi_target *starget, struct sym_trans *goal) { if (!spi_support_wide(starget)) goal->width = 0; if (!spi_support_sync(starget)) { goal->iu = 0; goal->dt = 0; goal->qas = 0; goal->offset = 0; return; } if (spi_support_dt(starget)) { if (spi_support_dt_only(starget)) goal->dt = 1; if (goal->offset == 0) goal->dt = 0; } else { goal->dt = 0; } /* Some targets fail to properly negotiate DT in SE mode */ if ((np->scsi_mode != SMODE_LVD) || !(np->features & FE_U3EN)) goal->dt = 0; if (goal->dt) { /* all DT transfers must be wide */ goal->width = 1; if (goal->offset > np->maxoffs_dt) goal->offset = np->maxoffs_dt; if (goal->period < np->minsync_dt) goal->period = np->minsync_dt; if (goal->period > np->maxsync_dt) goal->period = np->maxsync_dt; } else { goal->iu = goal->qas = 0; if (goal->offset > np->maxoffs) goal->offset = np->maxoffs; if (goal->period < np->minsync) goal->period = np->minsync; if (goal->period > np->maxsync) goal->period = np->maxsync; } } /* * Prepare the next negotiation message if needed. * * Fill in the part of message buffer that contains the * negotiation and the nego_status field of the CCB. * Returns the size of the message in bytes. */ static int sym_prepare_nego(struct sym_hcb *np, struct sym_ccb *cp, u_char *msgptr) { struct sym_tcb *tp = &np->target[cp->target]; struct scsi_target *starget = tp->starget; struct sym_trans *goal = &tp->tgoal; int msglen = 0; int nego; sym_check_goals(np, starget, goal); /* * Many devices implement PPR in a buggy way, so only use it if we * really want to. */ if (goal->renego == NS_PPR || (goal->offset && (goal->iu || goal->dt || goal->qas || (goal->period < 0xa)))) { nego = NS_PPR; } else if (goal->renego == NS_WIDE || goal->width) { nego = NS_WIDE; } else if (goal->renego == NS_SYNC || goal->offset) { nego = NS_SYNC; } else { goal->check_nego = 0; nego = 0; } switch (nego) { case NS_SYNC: msglen += spi_populate_sync_msg(msgptr + msglen, goal->period, goal->offset); break; case NS_WIDE: msglen += spi_populate_width_msg(msgptr + msglen, goal->width); break; case NS_PPR: msglen += spi_populate_ppr_msg(msgptr + msglen, goal->period, goal->offset, goal->width, (goal->iu ? PPR_OPT_IU : 0) | (goal->dt ? PPR_OPT_DT : 0) | (goal->qas ? PPR_OPT_QAS : 0)); break; } cp->nego_status = nego; if (nego) { tp->nego_cp = cp; /* Keep track a nego will be performed */ if (DEBUG_FLAGS & DEBUG_NEGO) { sym_print_nego_msg(np, cp->target, nego == NS_SYNC ? "sync msgout" : nego == NS_WIDE ? "wide msgout" : "ppr msgout", msgptr); } } return msglen; } /* * Insert a job into the start queue. */ void sym_put_start_queue(struct sym_hcb *np, struct sym_ccb *cp) { u_short qidx; #ifdef SYM_CONF_IARB_SUPPORT /* * If the previously queued CCB is not yet done, * set the IARB hint. The SCRIPTS will go with IARB * for this job when starting the previous one. * We leave devices a chance to win arbitration by * not using more than 'iarb_max' consecutive * immediate arbitrations. */ if (np->last_cp && np->iarb_count < np->iarb_max) { np->last_cp->host_flags |= HF_HINT_IARB; ++np->iarb_count; } else np->iarb_count = 0; np->last_cp = cp; #endif #if SYM_CONF_DMA_ADDRESSING_MODE == 2 /* * Make SCRIPTS aware of the 64 bit DMA * segment registers not being up-to-date. */ if (np->dmap_dirty) cp->host_xflags |= HX_DMAP_DIRTY; #endif /* * Insert first the idle task and then our job. * The MBs should ensure proper ordering. */ qidx = np->squeueput + 2; if (qidx >= MAX_QUEUE*2) qidx = 0; np->squeue [qidx] = cpu_to_scr(np->idletask_ba); MEMORY_WRITE_BARRIER(); np->squeue [np->squeueput] = cpu_to_scr(cp->ccb_ba); np->squeueput = qidx; if (DEBUG_FLAGS & DEBUG_QUEUE) scmd_printk(KERN_DEBUG, cp->cmd, "queuepos=%d\n", np->squeueput); /* * Script processor may be waiting for reselect. * Wake it up. */ MEMORY_WRITE_BARRIER(); OUTB(np, nc_istat, SIGP|np->istat_sem); } #ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING /* * Start next ready-to-start CCBs. */ void sym_start_next_ccbs(struct sym_hcb *np, struct sym_lcb *lp, int maxn) { SYM_QUEHEAD *qp; struct sym_ccb *cp; /* * Paranoia, as usual. :-) */ assert(!lp->started_tags || !lp->started_no_tag); /* * Try to start as many commands as asked by caller. * Prevent from having both tagged and untagged * commands queued to the device at the same time. */ while (maxn--) { qp = sym_remque_head(&lp->waiting_ccbq); if (!qp) break; cp = sym_que_entry(qp, struct sym_ccb, link2_ccbq); if (cp->tag != NO_TAG) { if (lp->started_no_tag || lp->started_tags >= lp->started_max) { sym_insque_head(qp, &lp->waiting_ccbq); break; } lp->itlq_tbl[cp->tag] = cpu_to_scr(cp->ccb_ba); lp->head.resel_sa = cpu_to_scr(SCRIPTA_BA(np, resel_tag)); ++lp->started_tags; } else { if (lp->started_no_tag || lp->started_tags) { sym_insque_head(qp, &lp->waiting_ccbq); break; } lp->head.itl_task_sa = cpu_to_scr(cp->ccb_ba); lp->head.resel_sa = cpu_to_scr(SCRIPTA_BA(np, resel_no_tag)); ++lp->started_no_tag; } cp->started = 1; sym_insque_tail(qp, &lp->started_ccbq); sym_put_start_queue(np, cp); } } #endif /* SYM_OPT_HANDLE_DEVICE_QUEUEING */ /* * The chip may have completed jobs. Look at the DONE QUEUE. * * On paper, memory read barriers may be needed here to * prevent out of order LOADs by the CPU from having * prefetched stale data prior to DMA having occurred. */ static int sym_wakeup_done (struct sym_hcb *np) { struct sym_ccb *cp; int i, n; u32 dsa; n = 0; i = np->dqueueget; /* MEMORY_READ_BARRIER(); */ while (1) { dsa = scr_to_cpu(np->dqueue[i]); if (!dsa) break; np->dqueue[i] = 0; if ((i = i+2) >= MAX_QUEUE*2) i = 0; cp = sym_ccb_from_dsa(np, dsa); if (cp) { MEMORY_READ_BARRIER(); sym_complete_ok (np, cp); ++n; } else printf ("%s: bad DSA (%x) in done queue.\n", sym_name(np), (u_int) dsa); } np->dqueueget = i; return n; } /* * Complete all CCBs queued to the COMP queue. * * These CCBs are assumed: * - Not to be referenced either by devices or * SCRIPTS-related queues and datas. * - To have to be completed with an error condition * or requeued. * * The device queue freeze count is incremented * for each CCB that does not prevent this. * This function is called when all CCBs involved * in error handling/recovery have been reaped. */ static void sym_flush_comp_queue(struct sym_hcb *np, int cam_status) { SYM_QUEHEAD *qp; struct sym_ccb *cp; while ((qp = sym_remque_head(&np->comp_ccbq)) != NULL) { struct scsi_cmnd *cmd; cp = sym_que_entry(qp, struct sym_ccb, link_ccbq); sym_insque_tail(&cp->link_ccbq, &np->busy_ccbq); /* Leave quiet CCBs waiting for resources */ if (cp->host_status == HS_WAIT) continue; cmd = cp->cmd; if (cam_status) sym_set_cam_status(cmd, cam_status); #ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING if (sym_get_cam_status(cmd) == DID_SOFT_ERROR) { struct sym_tcb *tp = &np->target[cp->target]; struct sym_lcb *lp = sym_lp(tp, cp->lun); if (lp) { sym_remque(&cp->link2_ccbq); sym_insque_tail(&cp->link2_ccbq, &lp->waiting_ccbq); if (cp->started) { if (cp->tag != NO_TAG) --lp->started_tags; else --lp->started_no_tag; } } cp->started = 0; continue; } #endif sym_free_ccb(np, cp); sym_xpt_done(np, cmd); } } /* * Complete all active CCBs with error. * Used on CHIP/SCSI RESET. */ static void sym_flush_busy_queue (struct sym_hcb *np, int cam_status) { /* * Move all active CCBs to the COMP queue * and flush this queue. */ sym_que_splice(&np->busy_ccbq, &np->comp_ccbq); sym_que_init(&np->busy_ccbq); sym_flush_comp_queue(np, cam_status); } /* * Start chip. * * 'reason' means: * 0: initialisation. * 1: SCSI BUS RESET delivered or received. * 2: SCSI BUS MODE changed. */ void sym_start_up(struct Scsi_Host *shost, int reason) { struct sym_data *sym_data = shost_priv(shost); struct pci_dev *pdev = sym_data->pdev; struct sym_hcb *np = sym_data->ncb; int i; u32 phys; /* * Reset chip if asked, otherwise just clear fifos. */ if (reason == 1) sym_soft_reset(np); else { OUTB(np, nc_stest3, TE|CSF); OUTONB(np, nc_ctest3, CLF); } /* * Clear Start Queue */ phys = np->squeue_ba; for (i = 0; i < MAX_QUEUE*2; i += 2) { np->squeue[i] = cpu_to_scr(np->idletask_ba); np->squeue[i+1] = cpu_to_scr(phys + (i+2)*4); } np->squeue[MAX_QUEUE*2-1] = cpu_to_scr(phys); /* * Start at first entry. */ np->squeueput = 0; /* * Clear Done Queue */ phys = np->dqueue_ba; for (i = 0; i < MAX_QUEUE*2; i += 2) { np->dqueue[i] = 0; np->dqueue[i+1] = cpu_to_scr(phys + (i+2)*4); } np->dqueue[MAX_QUEUE*2-1] = cpu_to_scr(phys); /* * Start at first entry. */ np->dqueueget = 0; /* * Install patches in scripts. * This also let point to first position the start * and done queue pointers used from SCRIPTS. */ np->fw_patch(shost); /* * Wakeup all pending jobs. */ sym_flush_busy_queue(np, DID_RESET); /* * Init chip. */ OUTB(np, nc_istat, 0x00); /* Remove Reset, abort */ INB(np, nc_mbox1); udelay(2000); /* The 895 needs time for the bus mode to settle */ OUTB(np, nc_scntl0, np->rv_scntl0 | 0xc0); /* full arb., ena parity, par->ATN */ OUTB(np, nc_scntl1, 0x00); /* odd parity, and remove CRST!! */ sym_selectclock(np, np->rv_scntl3); /* Select SCSI clock */ OUTB(np, nc_scid , RRE|np->myaddr); /* Adapter SCSI address */ OUTW(np, nc_respid, 1ul<<np->myaddr); /* Id to respond to */ OUTB(np, nc_istat , SIGP ); /* Signal Process */ OUTB(np, nc_dmode , np->rv_dmode); /* Burst length, dma mode */ OUTB(np, nc_ctest5, np->rv_ctest5); /* Large fifo + large burst */ OUTB(np, nc_dcntl , NOCOM|np->rv_dcntl); /* Protect SFBR */ OUTB(np, nc_ctest3, np->rv_ctest3); /* Write and invalidate */ OUTB(np, nc_ctest4, np->rv_ctest4); /* Master parity checking */ /* Extended Sreq/Sack filtering not supported on the C10 */ if (np->features & FE_C10) OUTB(np, nc_stest2, np->rv_stest2); else OUTB(np, nc_stest2, EXT|np->rv_stest2); OUTB(np, nc_stest3, TE); /* TolerANT enable */ OUTB(np, nc_stime0, 0x0c); /* HTH disabled STO 0.25 sec */ /* * For now, disable AIP generation on C1010-66. */ if (pdev->device == PCI_DEVICE_ID_LSI_53C1010_66) OUTB(np, nc_aipcntl1, DISAIP); /* * C10101 rev. 0 errata. * Errant SGE's when in narrow. Write bits 4 & 5 of * STEST1 register to disable SGE. We probably should do * that from SCRIPTS for each selection/reselection, but * I just don't want. :) */ if (pdev->device == PCI_DEVICE_ID_LSI_53C1010_33 && pdev->revision < 1) OUTB(np, nc_stest1, INB(np, nc_stest1) | 0x30); /* * DEL 441 - 53C876 Rev 5 - Part Number 609-0392787/2788 - ITEM 2. * Disable overlapped arbitration for some dual function devices, * regardless revision id (kind of post-chip-design feature. ;-)) */ if (pdev->device == PCI_DEVICE_ID_NCR_53C875) OUTB(np, nc_ctest0, (1<<5)); else if (pdev->device == PCI_DEVICE_ID_NCR_53C896) np->rv_ccntl0 |= DPR; /* * Write CCNTL0/CCNTL1 for chips capable of 64 bit addressing * and/or hardware phase mismatch, since only such chips * seem to support those IO registers. */ if (np->features & (FE_DAC|FE_NOPM)) { OUTB(np, nc_ccntl0, np->rv_ccntl0); OUTB(np, nc_ccntl1, np->rv_ccntl1); } #if SYM_CONF_DMA_ADDRESSING_MODE == 2 /* * Set up scratch C and DRS IO registers to map the 32 bit * DMA address range our data structures are located in. */ if (use_dac(np)) { np->dmap_bah[0] = 0; /* ??? */ OUTL(np, nc_scrx[0], np->dmap_bah[0]); OUTL(np, nc_drs, np->dmap_bah[0]); } #endif /* * If phase mismatch handled by scripts (895A/896/1010), * set PM jump addresses. */ if (np->features & FE_NOPM) { OUTL(np, nc_pmjad1, SCRIPTB_BA(np, pm_handle)); OUTL(np, nc_pmjad2, SCRIPTB_BA(np, pm_handle)); } /* * Enable GPIO0 pin for writing if LED support from SCRIPTS. * Also set GPIO5 and clear GPIO6 if hardware LED control. */ if (np->features & FE_LED0) OUTB(np, nc_gpcntl, INB(np, nc_gpcntl) & ~0x01); else if (np->features & FE_LEDC) OUTB(np, nc_gpcntl, (INB(np, nc_gpcntl) & ~0x41) | 0x20); /* * enable ints */ OUTW(np, nc_sien , STO|HTH|MA|SGE|UDC|RST|PAR); OUTB(np, nc_dien , MDPE|BF|SSI|SIR|IID); /* * For 895/6 enable SBMC interrupt and save current SCSI bus mode. * Try to eat the spurious SBMC interrupt that may occur when * we reset the chip but not the SCSI BUS (at initialization). */ if (np->features & (FE_ULTRA2|FE_ULTRA3)) { OUTONW(np, nc_sien, SBMC); if (reason == 0) { INB(np, nc_mbox1); mdelay(100); INW(np, nc_sist); } np->scsi_mode = INB(np, nc_stest4) & SMODE; } /* * Fill in target structure. * Reinitialize usrsync. * Reinitialize usrwide. * Prepare sync negotiation according to actual SCSI bus mode. */ for (i=0;i<SYM_CONF_MAX_TARGET;i++) { struct sym_tcb *tp = &np->target[i]; tp->to_reset = 0; tp->head.sval = 0; tp->head.wval = np->rv_scntl3; tp->head.uval = 0; if (tp->lun0p) tp->lun0p->to_clear = 0; if (tp->lunmp) { int ln; for (ln = 1; ln < SYM_CONF_MAX_LUN; ln++) if (tp->lunmp[ln]) tp->lunmp[ln]->to_clear = 0; } } /* * Download SCSI SCRIPTS to on-chip RAM if present, * and start script processor. * We do the download preferently from the CPU. * For platforms that may not support PCI memory mapping, * we use simple SCRIPTS that performs MEMORY MOVEs. */ phys = SCRIPTA_BA(np, init); if (np->ram_ba) { if (sym_verbose >= 2) printf("%s: Downloading SCSI SCRIPTS.\n", sym_name(np)); memcpy_toio(np->s.ramaddr, np->scripta0, np->scripta_sz); if (np->features & FE_RAM8K) { memcpy_toio(np->s.ramaddr + 4096, np->scriptb0, np->scriptb_sz); phys = scr_to_cpu(np->scr_ram_seg); OUTL(np, nc_mmws, phys); OUTL(np, nc_mmrs, phys); OUTL(np, nc_sfs, phys); phys = SCRIPTB_BA(np, start64); } } np->istat_sem = 0; OUTL(np, nc_dsa, np->hcb_ba); OUTL_DSP(np, phys); /* * Notify the XPT about the RESET condition. */ if (reason != 0) sym_xpt_async_bus_reset(np); } /* * Switch trans mode for current job and its target. */ static void sym_settrans(struct sym_hcb *np, int target, u_char opts, u_char ofs, u_char per, u_char wide, u_char div, u_char fak) { SYM_QUEHEAD *qp; u_char sval, wval, uval; struct sym_tcb *tp = &np->target[target]; assert(target == (INB(np, nc_sdid) & 0x0f)); sval = tp->head.sval; wval = tp->head.wval; uval = tp->head.uval; #if 0 printf("XXXX sval=%x wval=%x uval=%x (%x)\n", sval, wval, uval, np->rv_scntl3); #endif /* * Set the offset. */ if (!(np->features & FE_C10)) sval = (sval & ~0x1f) | ofs; else sval = (sval & ~0x3f) | ofs; /* * Set the sync divisor and extra clock factor. */ if (ofs != 0) { wval = (wval & ~0x70) | ((div+1) << 4); if (!(np->features & FE_C10)) sval = (sval & ~0xe0) | (fak << 5); else { uval = uval & ~(XCLKH_ST|XCLKH_DT|XCLKS_ST|XCLKS_DT); if (fak >= 1) uval |= (XCLKH_ST|XCLKH_DT); if (fak >= 2) uval |= (XCLKS_ST|XCLKS_DT); } } /* * Set the bus width. */ wval = wval & ~EWS; if (wide != 0) wval |= EWS; /* * Set misc. ultra enable bits. */ if (np->features & FE_C10) { uval = uval & ~(U3EN|AIPCKEN); if (opts) { assert(np->features & FE_U3EN); uval |= U3EN; } } else { wval = wval & ~ULTRA; if (per <= 12) wval |= ULTRA; } /* * Stop there if sync parameters are unchanged. */ if (tp->head.sval == sval && tp->head.wval == wval && tp->head.uval == uval) return; tp->head.sval = sval; tp->head.wval = wval; tp->head.uval = uval; /* * Disable extended Sreq/Sack filtering if per < 50. * Not supported on the C1010. */ if (per < 50 && !(np->features & FE_C10)) OUTOFFB(np, nc_stest2, EXT); /* * set actual value and sync_status */ OUTB(np, nc_sxfer, tp->head.sval); OUTB(np, nc_scntl3, tp->head.wval); if (np->features & FE_C10) { OUTB(np, nc_scntl4, tp->head.uval); } /* * patch ALL busy ccbs of this target. */ FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) { struct sym_ccb *cp; cp = sym_que_entry(qp, struct sym_ccb, link_ccbq); if (cp->target != target) continue; cp->phys.select.sel_scntl3 = tp->head.wval; cp->phys.select.sel_sxfer = tp->head.sval; if (np->features & FE_C10) { cp->phys.select.sel_scntl4 = tp->head.uval; } } } static void sym_announce_transfer_rate(struct sym_tcb *tp) { struct scsi_target *starget = tp->starget; if (tp->tprint.period != spi_period(starget) || tp->tprint.offset != spi_offset(starget) || tp->tprint.width != spi_width(starget) || tp->tprint.iu != spi_iu(starget) || tp->tprint.dt != spi_dt(starget) || tp->tprint.qas != spi_qas(starget) || !tp->tprint.check_nego) { tp->tprint.period = spi_period(starget); tp->tprint.offset = spi_offset(starget); tp->tprint.width = spi_width(starget); tp->tprint.iu = spi_iu(starget); tp->tprint.dt = spi_dt(starget); tp->tprint.qas = spi_qas(starget); tp->tprint.check_nego = 1; spi_display_xfer_agreement(starget); } } /* * We received a WDTR. * Let everything be aware of the changes. */ static void sym_setwide(struct sym_hcb *np, int target, u_char wide) { struct sym_tcb *tp = &np->target[target]; struct scsi_target *starget = tp->starget; sym_settrans(np, target, 0, 0, 0, wide, 0, 0); if (wide) tp->tgoal.renego = NS_WIDE; else tp->tgoal.renego = 0; tp->tgoal.check_nego = 0; tp->tgoal.width = wide; spi_offset(starget) = 0; spi_period(starget) = 0; spi_width(starget) = wide; spi_iu(starget) = 0; spi_dt(starget) = 0; spi_qas(starget) = 0; if (sym_verbose >= 3) sym_announce_transfer_rate(tp); } /* * We received a SDTR. * Let everything be aware of the changes. */ static void sym_setsync(struct sym_hcb *np, int target, u_char ofs, u_char per, u_char div, u_char fak) { struct sym_tcb *tp = &np->target[target]; struct scsi_target *starget = tp->starget; u_char wide = (tp->head.wval & EWS) ? BUS_16_BIT : BUS_8_BIT; sym_settrans(np, target, 0, ofs, per, wide, div, fak); if (wide) tp->tgoal.renego = NS_WIDE; else if (ofs) tp->tgoal.renego = NS_SYNC; else tp->tgoal.renego = 0; spi_period(starget) = per; spi_offset(starget) = ofs; spi_iu(starget) = spi_dt(starget) = spi_qas(starget) = 0; if (!tp->tgoal.dt && !tp->tgoal.iu && !tp->tgoal.qas) { tp->tgoal.period = per; tp->tgoal.offset = ofs; tp->tgoal.check_nego = 0; } sym_announce_transfer_rate(tp); } /* * We received a PPR. * Let everything be aware of the changes. */ static void sym_setpprot(struct sym_hcb *np, int target, u_char opts, u_char ofs, u_char per, u_char wide, u_char div, u_char fak) { struct sym_tcb *tp = &np->target[target]; struct scsi_target *starget = tp->starget; sym_settrans(np, target, opts, ofs, per, wide, div, fak); if (wide || ofs) tp->tgoal.renego = NS_PPR; else tp->tgoal.renego = 0; spi_width(starget) = tp->tgoal.width = wide; spi_period(starget) = tp->tgoal.period = per; spi_offset(starget) = tp->tgoal.offset = ofs; spi_iu(starget) = tp->tgoal.iu = !!(opts & PPR_OPT_IU); spi_dt(starget) = tp->tgoal.dt = !!(opts & PPR_OPT_DT); spi_qas(starget) = tp->tgoal.qas = !!(opts & PPR_OPT_QAS); tp->tgoal.check_nego = 0; sym_announce_transfer_rate(tp); } /* * generic recovery from scsi interrupt * * The doc says that when the chip gets an SCSI interrupt, * it tries to stop in an orderly fashion, by completing * an instruction fetch that had started or by flushing * the DMA fifo for a write to memory that was executing. * Such a fashion is not enough to know if the instruction * that was just before the current DSP value has been * executed or not. * * There are some small SCRIPTS sections that deal with * the start queue and the done queue that may break any * assomption from the C code if we are interrupted * inside, so we reset if this happens. Btw, since these * SCRIPTS sections are executed while the SCRIPTS hasn't * started SCSI operations, it is very unlikely to happen. * * All the driver data structures are supposed to be * allocated from the same 4 GB memory window, so there * is a 1 to 1 relationship between DSA and driver data * structures. Since we are careful :) to invalidate the * DSA when we complete a command or when the SCRIPTS * pushes a DSA into a queue, we can trust it when it * points to a CCB. */ static void sym_recover_scsi_int (struct sym_hcb *np, u_char hsts) { u32 dsp = INL(np, nc_dsp); u32 dsa = INL(np, nc_dsa); struct sym_ccb *cp = sym_ccb_from_dsa(np, dsa); /* * If we haven't been interrupted inside the SCRIPTS * critical pathes, we can safely restart the SCRIPTS * and trust the DSA value if it matches a CCB. */ if ((!(dsp > SCRIPTA_BA(np, getjob_begin) && dsp < SCRIPTA_BA(np, getjob_end) + 1)) && (!(dsp > SCRIPTA_BA(np, ungetjob) && dsp < SCRIPTA_BA(np, reselect) + 1)) && (!(dsp > SCRIPTB_BA(np, sel_for_abort) && dsp < SCRIPTB_BA(np, sel_for_abort_1) + 1)) && (!(dsp > SCRIPTA_BA(np, done) && dsp < SCRIPTA_BA(np, done_end) + 1))) { OUTB(np, nc_ctest3, np->rv_ctest3 | CLF); /* clear dma fifo */ OUTB(np, nc_stest3, TE|CSF); /* clear scsi fifo */ /* * If we have a CCB, let the SCRIPTS call us back for * the handling of the error with SCRATCHA filled with * STARTPOS. This way, we will be able to freeze the * device queue and requeue awaiting IOs. */ if (cp) { cp->host_status = hsts; OUTL_DSP(np, SCRIPTA_BA(np, complete_error)); } /* * Otherwise just restart the SCRIPTS. */ else { OUTL(np, nc_dsa, 0xffffff); OUTL_DSP(np, SCRIPTA_BA(np, start)); } } else goto reset_all; return; reset_all: sym_start_reset(np); } /* * chip exception handler for selection timeout */ static void sym_int_sto (struct sym_hcb *np) { u32 dsp = INL(np, nc_dsp); if (DEBUG_FLAGS & DEBUG_TINY) printf ("T"); if (dsp == SCRIPTA_BA(np, wf_sel_done) + 8) sym_recover_scsi_int(np, HS_SEL_TIMEOUT); else sym_start_reset(np); } /* * chip exception handler for unexpected disconnect */ static void sym_int_udc (struct sym_hcb *np) { printf ("%s: unexpected disconnect\n", sym_name(np)); sym_recover_scsi_int(np, HS_UNEXPECTED); } /* * chip exception handler for SCSI bus mode change * * spi2-r12 11.2.3 says a transceiver mode change must * generate a reset event and a device that detects a reset * event shall initiate a hard reset. It says also that a * device that detects a mode change shall set data transfer * mode to eight bit asynchronous, etc... * So, just reinitializing all except chip should be enough. */ static void sym_int_sbmc(struct Scsi_Host *shost) { struct sym_hcb *np = sym_get_hcb(shost); u_char scsi_mode = INB(np, nc_stest4) & SMODE; /* * Notify user. */ printf("%s: SCSI BUS mode change from %s to %s.\n", sym_name(np), sym_scsi_bus_mode(np->scsi_mode), sym_scsi_bus_mode(scsi_mode)); /* * Should suspend command processing for a few seconds and * reinitialize all except the chip. */ sym_start_up(shost, 2); } /* * chip exception handler for SCSI parity error. * * When the chip detects a SCSI parity error and is * currently executing a (CH)MOV instruction, it does * not interrupt immediately, but tries to finish the * transfer of the current scatter entry before * interrupting. The following situations may occur: * * - The complete scatter entry has been transferred * without the device having changed phase. * The chip will then interrupt with the DSP pointing * to the instruction that follows the MOV. * * - A phase mismatch occurs before the MOV finished * and phase errors are to be handled by the C code. * The chip will then interrupt with both PAR and MA * conditions set. * * - A phase mismatch occurs before the MOV finished and * phase errors are to be handled by SCRIPTS. * The chip will load the DSP with the phase mismatch * JUMP address and interrupt the host processor. */ static void sym_int_par (struct sym_hcb *np, u_short sist) { u_char hsts = INB(np, HS_PRT); u32 dsp = INL(np, nc_dsp); u32 dbc = INL(np, nc_dbc); u32 dsa = INL(np, nc_dsa); u_char sbcl = INB(np, nc_sbcl); u_char cmd = dbc >> 24; int phase = cmd & 7; struct sym_ccb *cp = sym_ccb_from_dsa(np, dsa); if (printk_ratelimit()) printf("%s: SCSI parity error detected: SCR1=%d DBC=%x SBCL=%x\n", sym_name(np), hsts, dbc, sbcl); /* * Check that the chip is connected to the SCSI BUS. */ if (!(INB(np, nc_scntl1) & ISCON)) { sym_recover_scsi_int(np, HS_UNEXPECTED); return; } /* * If the nexus is not clearly identified, reset the bus. * We will try to do better later. */ if (!cp) goto reset_all; /* * Check instruction was a MOV, direction was INPUT and * ATN is asserted. */ if ((cmd & 0xc0) || !(phase & 1) || !(sbcl & 0x8)) goto reset_all; /* * Keep track of the parity error. */ OUTONB(np, HF_PRT, HF_EXT_ERR); cp->xerr_status |= XE_PARITY_ERR; /* * Prepare the message to send to the device. */ np->msgout[0] = (phase == 7) ? M_PARITY : M_ID_ERROR; /* * If the old phase was DATA IN phase, we have to deal with * the 3 situations described above. * For other input phases (MSG IN and STATUS), the device * must resend the whole thing that failed parity checking * or signal error. So, jumping to dispatcher should be OK. */ if (phase == 1 || phase == 5) { /* Phase mismatch handled by SCRIPTS */ if (dsp == SCRIPTB_BA(np, pm_handle)) OUTL_DSP(np, dsp); /* Phase mismatch handled by the C code */ else if (sist & MA) sym_int_ma (np); /* No phase mismatch occurred */ else { sym_set_script_dp (np, cp, dsp); OUTL_DSP(np, SCRIPTA_BA(np, dispatch)); } } else if (phase == 7) /* We definitely cannot handle parity errors */ #if 1 /* in message-in phase due to the relection */ goto reset_all; /* path and various message anticipations. */ #else OUTL_DSP(np, SCRIPTA_BA(np, clrack)); #endif else OUTL_DSP(np, SCRIPTA_BA(np, dispatch)); return; reset_all: sym_start_reset(np); return; } /* * chip exception handler for phase errors. * * We have to construct a new transfer descriptor, * to transfer the rest of the current block. */ static void sym_int_ma (struct sym_hcb *np) { u32 dbc; u32 rest; u32 dsp; u32 dsa; u32 nxtdsp; u32 *vdsp; u32 oadr, olen; u32 *tblp; u32 newcmd; u_int delta; u_char cmd; u_char hflags, hflags0; struct sym_pmc *pm; struct sym_ccb *cp; dsp = INL(np, nc_dsp); dbc = INL(np, nc_dbc); dsa = INL(np, nc_dsa); cmd = dbc >> 24; rest = dbc & 0xffffff; delta = 0; /* * locate matching cp if any. */ cp = sym_ccb_from_dsa(np, dsa); /* * Donnot take into account dma fifo and various buffers in * INPUT phase since the chip flushes everything before * raising the MA interrupt for interrupted INPUT phases. * For DATA IN phase, we will check for the SWIDE later. */ if ((cmd & 7) != 1 && (cmd & 7) != 5) { u_char ss0, ss2; if (np->features & FE_DFBC) delta = INW(np, nc_dfbc); else { u32 dfifo; /* * Read DFIFO, CTEST[4-6] using 1 PCI bus ownership. */ dfifo = INL(np, nc_dfifo); /* * Calculate remaining bytes in DMA fifo. * (CTEST5 = dfifo >> 16) */ if (dfifo & (DFS << 16)) delta = ((((dfifo >> 8) & 0x300) | (dfifo & 0xff)) - rest) & 0x3ff; else delta = ((dfifo & 0xff) - rest) & 0x7f; } /* * The data in the dma fifo has not been transferred to * the target -> add the amount to the rest * and clear the data. * Check the sstat2 register in case of wide transfer. */ rest += delta; ss0 = INB(np, nc_sstat0); if (ss0 & OLF) rest++; if (!(np->features & FE_C10)) if (ss0 & ORF) rest++; if (cp && (cp->phys.select.sel_scntl3 & EWS)) { ss2 = INB(np, nc_sstat2); if (ss2 & OLF1) rest++; if (!(np->features & FE_C10)) if (ss2 & ORF1) rest++; } /* * Clear fifos. */ OUTB(np, nc_ctest3, np->rv_ctest3 | CLF); /* dma fifo */ OUTB(np, nc_stest3, TE|CSF); /* scsi fifo */ } /* * log the information */ if (DEBUG_FLAGS & (DEBUG_TINY|DEBUG_PHASE)) printf ("P%x%x RL=%d D=%d ", cmd&7, INB(np, nc_sbcl)&7, (unsigned) rest, (unsigned) delta); /* * try to find the interrupted script command, * and the address at which to continue. */ vdsp = NULL; nxtdsp = 0; if (dsp > np->scripta_ba && dsp <= np->scripta_ba + np->scripta_sz) { vdsp = (u32 *)((char*)np->scripta0 + (dsp-np->scripta_ba-8)); nxtdsp = dsp; } else if (dsp > np->scriptb_ba && dsp <= np->scriptb_ba + np->scriptb_sz) { vdsp = (u32 *)((char*)np->scriptb0 + (dsp-np->scriptb_ba-8)); nxtdsp = dsp; } /* * log the information */ if (DEBUG_FLAGS & DEBUG_PHASE) { printf ("\nCP=%p DSP=%x NXT=%x VDSP=%p CMD=%x ", cp, (unsigned)dsp, (unsigned)nxtdsp, vdsp, cmd); } if (!vdsp) { printf ("%s: interrupted SCRIPT address not found.\n", sym_name (np)); goto reset_all; } if (!cp) { printf ("%s: SCSI phase error fixup: CCB already dequeued.\n", sym_name (np)); goto reset_all; } /* * get old startaddress and old length. */ oadr = scr_to_cpu(vdsp[1]); if (cmd & 0x10) { /* Table indirect */ tblp = (u32 *) ((char*) &cp->phys + oadr); olen = scr_to_cpu(tblp[0]); oadr = scr_to_cpu(tblp[1]); } else { tblp = (u32 *) 0; olen = scr_to_cpu(vdsp[0]) & 0xffffff; } if (DEBUG_FLAGS & DEBUG_PHASE) { printf ("OCMD=%x\nTBLP=%p OLEN=%x OADR=%x\n", (unsigned) (scr_to_cpu(vdsp[0]) >> 24), tblp, (unsigned) olen, (unsigned) oadr); } /* * check cmd against assumed interrupted script command. * If dt data phase, the MOVE instruction hasn't bit 4 of * the phase. */ if (((cmd & 2) ? cmd : (cmd & ~4)) != (scr_to_cpu(vdsp[0]) >> 24)) { sym_print_addr(cp->cmd, "internal error: cmd=%02x != %02x=(vdsp[0] >> 24)\n", cmd, scr_to_cpu(vdsp[0]) >> 24); goto reset_all; } /* * if old phase not dataphase, leave here. */ if (cmd & 2) { sym_print_addr(cp->cmd, "phase change %x-%x %d@%08x resid=%d.\n", cmd&7, INB(np, nc_sbcl)&7, (unsigned)olen, (unsigned)oadr, (unsigned)rest); goto unexpected_phase; } /* * Choose the correct PM save area. * * Look at the PM_SAVE SCRIPT if you want to understand * this stuff. The equivalent code is implemented in * SCRIPTS for the 895A, 896 and 1010 that are able to * handle PM from the SCRIPTS processor. */ hflags0 = INB(np, HF_PRT); hflags = hflags0; if (hflags & (HF_IN_PM0 | HF_IN_PM1 | HF_DP_SAVED)) { if (hflags & HF_IN_PM0) nxtdsp = scr_to_cpu(cp->phys.pm0.ret); else if (hflags & HF_IN_PM1) nxtdsp = scr_to_cpu(cp->phys.pm1.ret); if (hflags & HF_DP_SAVED) hflags ^= HF_ACT_PM; } if (!(hflags & HF_ACT_PM)) { pm = &cp->phys.pm0; newcmd = SCRIPTA_BA(np, pm0_data); } else { pm = &cp->phys.pm1; newcmd = SCRIPTA_BA(np, pm1_data); } hflags &= ~(HF_IN_PM0 | HF_IN_PM1 | HF_DP_SAVED); if (hflags != hflags0) OUTB(np, HF_PRT, hflags); /* * fillin the phase mismatch context */ pm->sg.addr = cpu_to_scr(oadr + olen - rest); pm->sg.size = cpu_to_scr(rest); pm->ret = cpu_to_scr(nxtdsp); /* * If we have a SWIDE, * - prepare the address to write the SWIDE from SCRIPTS, * - compute the SCRIPTS address to restart from, * - move current data pointer context by one byte. */ nxtdsp = SCRIPTA_BA(np, dispatch); if ((cmd & 7) == 1 && cp && (cp->phys.select.sel_scntl3 & EWS) && (INB(np, nc_scntl2) & WSR)) { u32 tmp; /* * Set up the table indirect for the MOVE * of the residual byte and adjust the data * pointer context. */ tmp = scr_to_cpu(pm->sg.addr); cp->phys.wresid.addr = cpu_to_scr(tmp); pm->sg.addr = cpu_to_scr(tmp + 1); tmp = scr_to_cpu(pm->sg.size); cp->phys.wresid.size = cpu_to_scr((tmp&0xff000000) | 1); pm->sg.size = cpu_to_scr(tmp - 1); /* * If only the residual byte is to be moved, * no PM context is needed. */ if ((tmp&0xffffff) == 1) newcmd = pm->ret; /* * Prepare the address of SCRIPTS that will * move the residual byte to memory. */ nxtdsp = SCRIPTB_BA(np, wsr_ma_helper); } if (DEBUG_FLAGS & DEBUG_PHASE) { sym_print_addr(cp->cmd, "PM %x %x %x / %x %x %x.\n", hflags0, hflags, newcmd, (unsigned)scr_to_cpu(pm->sg.addr), (unsigned)scr_to_cpu(pm->sg.size), (unsigned)scr_to_cpu(pm->ret)); } /* * Restart the SCRIPTS processor. */ sym_set_script_dp (np, cp, newcmd); OUTL_DSP(np, nxtdsp); return; /* * Unexpected phase changes that occurs when the current phase * is not a DATA IN or DATA OUT phase are due to error conditions. * Such event may only happen when the SCRIPTS is using a * multibyte SCSI MOVE. * * Phase change Some possible cause * * COMMAND --> MSG IN SCSI parity error detected by target. * COMMAND --> STATUS Bad command or refused by target. * MSG OUT --> MSG IN Message rejected by target. * MSG OUT --> COMMAND Bogus target that discards extended * negotiation messages. * * The code below does not care of the new phase and so * trusts the target. Why to annoy it ? * If the interrupted phase is COMMAND phase, we restart at * dispatcher. * If a target does not get all the messages after selection, * the code assumes blindly that the target discards extended * messages and clears the negotiation status. * If the target does not want all our response to negotiation, * we force a SIR_NEGO_PROTO interrupt (it is a hack that avoids * bloat for such a should_not_happen situation). * In all other situation, we reset the BUS. * Are these assumptions reasonable ? (Wait and see ...) */ unexpected_phase: dsp -= 8; nxtdsp = 0; switch (cmd & 7) { case 2: /* COMMAND phase */ nxtdsp = SCRIPTA_BA(np, dispatch); break; #if 0 case 3: /* STATUS phase */ nxtdsp = SCRIPTA_BA(np, dispatch); break; #endif case 6: /* MSG OUT phase */ /* * If the device may want to use untagged when we want * tagged, we prepare an IDENTIFY without disc. granted, * since we will not be able to handle reselect. * Otherwise, we just don't care. */ if (dsp == SCRIPTA_BA(np, send_ident)) { if (cp->tag != NO_TAG && olen - rest <= 3) { cp->host_status = HS_BUSY; np->msgout[0] = IDENTIFY(0, cp->lun); nxtdsp = SCRIPTB_BA(np, ident_break_atn); } else nxtdsp = SCRIPTB_BA(np, ident_break); } else if (dsp == SCRIPTB_BA(np, send_wdtr) || dsp == SCRIPTB_BA(np, send_sdtr) || dsp == SCRIPTB_BA(np, send_ppr)) { nxtdsp = SCRIPTB_BA(np, nego_bad_phase); if (dsp == SCRIPTB_BA(np, send_ppr)) { struct scsi_device *dev = cp->cmd->device; dev->ppr = 0; } } break; #if 0 case 7: /* MSG IN phase */ nxtdsp = SCRIPTA_BA(np, clrack); break; #endif } if (nxtdsp) { OUTL_DSP(np, nxtdsp); return; } reset_all: sym_start_reset(np); } /* * chip interrupt handler * * In normal situations, interrupt conditions occur one at * a time. But when something bad happens on the SCSI BUS, * the chip may raise several interrupt flags before * stopping and interrupting the CPU. The additionnal * interrupt flags are stacked in some extra registers * after the SIP and/or DIP flag has been raised in the * ISTAT. After the CPU has read the interrupt condition * flag from SIST or DSTAT, the chip unstacks the other * interrupt flags and sets the corresponding bits in * SIST or DSTAT. Since the chip starts stacking once the * SIP or DIP flag is set, there is a small window of time * where the stacking does not occur. * * Typically, multiple interrupt conditions may happen in * the following situations: * * - SCSI parity error + Phase mismatch (PAR|MA) * When an parity error is detected in input phase * and the device switches to msg-in phase inside a * block MOV. * - SCSI parity error + Unexpected disconnect (PAR|UDC) * When a stupid device does not want to handle the * recovery of an SCSI parity error. * - Some combinations of STO, PAR, UDC, ... * When using non compliant SCSI stuff, when user is * doing non compliant hot tampering on the BUS, when * something really bad happens to a device, etc ... * * The heuristic suggested by SYMBIOS to handle * multiple interrupts is to try unstacking all * interrupts conditions and to handle them on some * priority based on error severity. * This will work when the unstacking has been * successful, but we cannot be 100 % sure of that, * since the CPU may have been faster to unstack than * the chip is able to stack. Hmmm ... But it seems that * such a situation is very unlikely to happen. * * If this happen, for example STO caught by the CPU * then UDC happenning before the CPU have restarted * the SCRIPTS, the driver may wrongly complete the * same command on UDC, since the SCRIPTS didn't restart * and the DSA still points to the same command. * We avoid this situation by setting the DSA to an * invalid value when the CCB is completed and before * restarting the SCRIPTS. * * Another issue is that we need some section of our * recovery procedures to be somehow uninterruptible but * the SCRIPTS processor does not provides such a * feature. For this reason, we handle recovery preferently * from the C code and check against some SCRIPTS critical * sections from the C code. * * Hopefully, the interrupt handling of the driver is now * able to resist to weird BUS error conditions, but donnot * ask me for any guarantee that it will never fail. :-) * Use at your own decision and risk. */ irqreturn_t sym_interrupt(struct Scsi_Host *shost) { struct sym_data *sym_data = shost_priv(shost); struct sym_hcb *np = sym_data->ncb; struct pci_dev *pdev = sym_data->pdev; u_char istat, istatc; u_char dstat; u_short sist; /* * interrupt on the fly ? * (SCRIPTS may still be running) * * A `dummy read' is needed to ensure that the * clear of the INTF flag reaches the device * and that posted writes are flushed to memory * before the scanning of the DONE queue. * Note that SCRIPTS also (dummy) read to memory * prior to deliver the INTF interrupt condition. */ istat = INB(np, nc_istat); if (istat & INTF) { OUTB(np, nc_istat, (istat & SIGP) | INTF | np->istat_sem); istat |= INB(np, nc_istat); /* DUMMY READ */ if (DEBUG_FLAGS & DEBUG_TINY) printf ("F "); sym_wakeup_done(np); } if (!(istat & (SIP|DIP))) return (istat & INTF) ? IRQ_HANDLED : IRQ_NONE; #if 0 /* We should never get this one */ if (istat & CABRT) OUTB(np, nc_istat, CABRT); #endif /* * PAR and MA interrupts may occur at the same time, * and we need to know of both in order to handle * this situation properly. We try to unstack SCSI * interrupts for that reason. BTW, I dislike a LOT * such a loop inside the interrupt routine. * Even if DMA interrupt stacking is very unlikely to * happen, we also try unstacking these ones, since * this has no performance impact. */ sist = 0; dstat = 0; istatc = istat; do { if (istatc & SIP) sist |= INW(np, nc_sist); if (istatc & DIP) dstat |= INB(np, nc_dstat); istatc = INB(np, nc_istat); istat |= istatc; /* Prevent deadlock waiting on a condition that may * never clear. */ if (unlikely(sist == 0xffff && dstat == 0xff)) { if (pci_channel_offline(pdev)) return IRQ_NONE; } } while (istatc & (SIP|DIP)); if (DEBUG_FLAGS & DEBUG_TINY) printf ("<%d|%x:%x|%x:%x>", (int)INB(np, nc_scr0), dstat,sist, (unsigned)INL(np, nc_dsp), (unsigned)INL(np, nc_dbc)); /* * On paper, a memory read barrier may be needed here to * prevent out of order LOADs by the CPU from having * prefetched stale data prior to DMA having occurred. * And since we are paranoid ... :) */ MEMORY_READ_BARRIER(); /* * First, interrupts we want to service cleanly. * * Phase mismatch (MA) is the most frequent interrupt * for chip earlier than the 896 and so we have to service * it as quickly as possible. * A SCSI parity error (PAR) may be combined with a phase * mismatch condition (MA). * Programmed interrupts (SIR) are used to call the C code * from SCRIPTS. * The single step interrupt (SSI) is not used in this * driver. */ if (!(sist & (STO|GEN|HTH|SGE|UDC|SBMC|RST)) && !(dstat & (MDPE|BF|ABRT|IID))) { if (sist & PAR) sym_int_par (np, sist); else if (sist & MA) sym_int_ma (np); else if (dstat & SIR) sym_int_sir(np); else if (dstat & SSI) OUTONB_STD(); else goto unknown_int; return IRQ_HANDLED; } /* * Now, interrupts that donnot happen in normal * situations and that we may need to recover from. * * On SCSI RESET (RST), we reset everything. * On SCSI BUS MODE CHANGE (SBMC), we complete all * active CCBs with RESET status, prepare all devices * for negotiating again and restart the SCRIPTS. * On STO and UDC, we complete the CCB with the corres- * ponding status and restart the SCRIPTS. */ if (sist & RST) { printf("%s: SCSI BUS reset detected.\n", sym_name(np)); sym_start_up(shost, 1); return IRQ_HANDLED; } OUTB(np, nc_ctest3, np->rv_ctest3 | CLF); /* clear dma fifo */ OUTB(np, nc_stest3, TE|CSF); /* clear scsi fifo */ if (!(sist & (GEN|HTH|SGE)) && !(dstat & (MDPE|BF|ABRT|IID))) { if (sist & SBMC) sym_int_sbmc(shost); else if (sist & STO) sym_int_sto (np); else if (sist & UDC) sym_int_udc (np); else goto unknown_int; return IRQ_HANDLED; } /* * Now, interrupts we are not able to recover cleanly. * * Log message for hard errors. * Reset everything. */ sym_log_hard_error(shost, sist, dstat); if ((sist & (GEN|HTH|SGE)) || (dstat & (MDPE|BF|ABRT|IID))) { sym_start_reset(np); return IRQ_HANDLED; } unknown_int: /* * We just miss the cause of the interrupt. :( * Print a message. The timeout will do the real work. */ printf( "%s: unknown interrupt(s) ignored, " "ISTAT=0x%x DSTAT=0x%x SIST=0x%x\n", sym_name(np), istat, dstat, sist); return IRQ_NONE; } /* * Dequeue from the START queue all CCBs that match * a given target/lun/task condition (-1 means all), * and move them from the BUSY queue to the COMP queue * with DID_SOFT_ERROR status condition. * This function is used during error handling/recovery. * It is called with SCRIPTS not running. */ static int sym_dequeue_from_squeue(struct sym_hcb *np, int i, int target, int lun, int task) { int j; struct sym_ccb *cp; /* * Make sure the starting index is within range. */ assert((i >= 0) && (i < 2*MAX_QUEUE)); /* * Walk until end of START queue and dequeue every job * that matches the target/lun/task condition. */ j = i; while (i != np->squeueput) { cp = sym_ccb_from_dsa(np, scr_to_cpu(np->squeue[i])); assert(cp); #ifdef SYM_CONF_IARB_SUPPORT /* Forget hints for IARB, they may be no longer relevant */ cp->host_flags &= ~HF_HINT_IARB; #endif if ((target == -1 || cp->target == target) && (lun == -1 || cp->lun == lun) && (task == -1 || cp->tag == task)) { sym_set_cam_status(cp->cmd, DID_SOFT_ERROR); sym_remque(&cp->link_ccbq); sym_insque_tail(&cp->link_ccbq, &np->comp_ccbq); } else { if (i != j) np->squeue[j] = np->squeue[i]; if ((j += 2) >= MAX_QUEUE*2) j = 0; } if ((i += 2) >= MAX_QUEUE*2) i = 0; } if (i != j) /* Copy back the idle task if needed */ np->squeue[j] = np->squeue[i]; np->squeueput = j; /* Update our current start queue pointer */ return (i - j) / 2; } /* * chip handler for bad SCSI status condition * * In case of bad SCSI status, we unqueue all the tasks * currently queued to the controller but not yet started * and then restart the SCRIPTS processor immediately. * * QUEUE FULL and BUSY conditions are handled the same way. * Basically all the not yet started tasks are requeued in * device queue and the queue is frozen until a completion. * * For CHECK CONDITION and COMMAND TERMINATED status, we use * the CCB of the failed command to prepare a REQUEST SENSE * SCSI command and queue it to the controller queue. * * SCRATCHA is assumed to have been loaded with STARTPOS * before the SCRIPTS called the C code. */ static void sym_sir_bad_scsi_status(struct sym_hcb *np, int num, struct sym_ccb *cp) { u32 startp; u_char s_status = cp->ssss_status; u_char h_flags = cp->host_flags; int msglen; int i; /* * Compute the index of the next job to start from SCRIPTS. */ i = (INL(np, nc_scratcha) - np->squeue_ba) / 4; /* * The last CCB queued used for IARB hint may be * no longer relevant. Forget it. */ #ifdef SYM_CONF_IARB_SUPPORT if (np->last_cp) np->last_cp = 0; #endif /* * Now deal with the SCSI status. */ switch(s_status) { case S_BUSY: case S_QUEUE_FULL: if (sym_verbose >= 2) { sym_print_addr(cp->cmd, "%s\n", s_status == S_BUSY ? "BUSY" : "QUEUE FULL\n"); } default: /* S_INT, S_INT_COND_MET, S_CONFLICT */ sym_complete_error (np, cp); break; case S_TERMINATED: case S_CHECK_COND: /* * If we get an SCSI error when requesting sense, give up. */ if (h_flags & HF_SENSE) { sym_complete_error (np, cp); break; } /* * Dequeue all queued CCBs for that device not yet started, * and restart the SCRIPTS processor immediately. */ sym_dequeue_from_squeue(np, i, cp->target, cp->lun, -1); OUTL_DSP(np, SCRIPTA_BA(np, start)); /* * Save some info of the actual IO. * Compute the data residual. */ cp->sv_scsi_status = cp->ssss_status; cp->sv_xerr_status = cp->xerr_status; cp->sv_resid = sym_compute_residual(np, cp); /* * Prepare all needed data structures for * requesting sense data. */ cp->scsi_smsg2[0] = IDENTIFY(0, cp->lun); msglen = 1; /* * If we are currently using anything different from * async. 8 bit data transfers with that target, * start a negotiation, since the device may want * to report us a UNIT ATTENTION condition due to * a cause we currently ignore, and we donnot want * to be stuck with WIDE and/or SYNC data transfer. * * cp->nego_status is filled by sym_prepare_nego(). */ cp->nego_status = 0; msglen += sym_prepare_nego(np, cp, &cp->scsi_smsg2[msglen]); /* * Message table indirect structure. */ cp->phys.smsg.addr = CCB_BA(cp, scsi_smsg2); cp->phys.smsg.size = cpu_to_scr(msglen); /* * sense command */ cp->phys.cmd.addr = CCB_BA(cp, sensecmd); cp->phys.cmd.size = cpu_to_scr(6); /* * patch requested size into sense command */ cp->sensecmd[0] = REQUEST_SENSE; cp->sensecmd[1] = 0; if (cp->cmd->device->scsi_level <= SCSI_2 && cp->lun <= 7) cp->sensecmd[1] = cp->lun << 5; cp->sensecmd[4] = SYM_SNS_BBUF_LEN; cp->data_len = SYM_SNS_BBUF_LEN; /* * sense data */ memset(cp->sns_bbuf, 0, SYM_SNS_BBUF_LEN); cp->phys.sense.addr = CCB_BA(cp, sns_bbuf); cp->phys.sense.size = cpu_to_scr(SYM_SNS_BBUF_LEN); /* * requeue the command. */ startp = SCRIPTB_BA(np, sdata_in); cp->phys.head.savep = cpu_to_scr(startp); cp->phys.head.lastp = cpu_to_scr(startp); cp->startp = cpu_to_scr(startp); cp->goalp = cpu_to_scr(startp + 16); cp->host_xflags = 0; cp->host_status = cp->nego_status ? HS_NEGOTIATE : HS_BUSY; cp->ssss_status = S_ILLEGAL; cp->host_flags = (HF_SENSE|HF_DATA_IN); cp->xerr_status = 0; cp->extra_bytes = 0; cp->phys.head.go.start = cpu_to_scr(SCRIPTA_BA(np, select)); /* * Requeue the command. */ sym_put_start_queue(np, cp); /* * Give back to upper layer everything we have dequeued. */ sym_flush_comp_queue(np, 0); break; } } /* * After a device has accepted some management message * as BUS DEVICE RESET, ABORT TASK, etc ..., or when * a device signals a UNIT ATTENTION condition, some * tasks are thrown away by the device. We are required * to reflect that on our tasks list since the device * will never complete these tasks. * * This function move from the BUSY queue to the COMP * queue all disconnected CCBs for a given target that * match the following criteria: * - lun=-1 means any logical UNIT otherwise a given one. * - task=-1 means any task, otherwise a given one. */ int sym_clear_tasks(struct sym_hcb *np, int cam_status, int target, int lun, int task) { SYM_QUEHEAD qtmp, *qp; int i = 0; struct sym_ccb *cp; /* * Move the entire BUSY queue to our temporary queue. */ sym_que_init(&qtmp); sym_que_splice(&np->busy_ccbq, &qtmp); sym_que_init(&np->busy_ccbq); /* * Put all CCBs that matches our criteria into * the COMP queue and put back other ones into * the BUSY queue. */ while ((qp = sym_remque_head(&qtmp)) != NULL) { struct scsi_cmnd *cmd; cp = sym_que_entry(qp, struct sym_ccb, link_ccbq); cmd = cp->cmd; if (cp->host_status != HS_DISCONNECT || cp->target != target || (lun != -1 && cp->lun != lun) || (task != -1 && (cp->tag != NO_TAG && cp->scsi_smsg[2] != task))) { sym_insque_tail(&cp->link_ccbq, &np->busy_ccbq); continue; } sym_insque_tail(&cp->link_ccbq, &np->comp_ccbq); /* Preserve the software timeout condition */ if (sym_get_cam_status(cmd) != DID_TIME_OUT) sym_set_cam_status(cmd, cam_status); ++i; #if 0 printf("XXXX TASK @%p CLEARED\n", cp); #endif } return i; } /* * chip handler for TASKS recovery * * We cannot safely abort a command, while the SCRIPTS * processor is running, since we just would be in race * with it. * * As long as we have tasks to abort, we keep the SEM * bit set in the ISTAT. When this bit is set, the * SCRIPTS processor interrupts (SIR_SCRIPT_STOPPED) * each time it enters the scheduler. * * If we have to reset a target, clear tasks of a unit, * or to perform the abort of a disconnected job, we * restart the SCRIPTS for selecting the target. Once * selected, the SCRIPTS interrupts (SIR_TARGET_SELECTED). * If it loses arbitration, the SCRIPTS will interrupt again * the next time it will enter its scheduler, and so on ... * * On SIR_TARGET_SELECTED, we scan for the more * appropriate thing to do: * * - If nothing, we just sent a M_ABORT message to the * target to get rid of the useless SCSI bus ownership. * According to the specs, no tasks shall be affected. * - If the target is to be reset, we send it a M_RESET * message. * - If a logical UNIT is to be cleared , we send the * IDENTIFY(lun) + M_ABORT. * - If an untagged task is to be aborted, we send the * IDENTIFY(lun) + M_ABORT. * - If a tagged task is to be aborted, we send the * IDENTIFY(lun) + task attributes + M_ABORT_TAG. * * Once our 'kiss of death' :) message has been accepted * by the target, the SCRIPTS interrupts again * (SIR_ABORT_SENT). On this interrupt, we complete * all the CCBs that should have been aborted by the * target according to our message. */ static void sym_sir_task_recovery(struct sym_hcb *np, int num) { SYM_QUEHEAD *qp; struct sym_ccb *cp; struct sym_tcb *tp = NULL; /* gcc isn't quite smart enough yet */ struct scsi_target *starget; int target=-1, lun=-1, task; int i, k; switch(num) { /* * The SCRIPTS processor stopped before starting * the next command in order to allow us to perform * some task recovery. */ case SIR_SCRIPT_STOPPED: /* * Do we have any target to reset or unit to clear ? */ for (i = 0 ; i < SYM_CONF_MAX_TARGET ; i++) { tp = &np->target[i]; if (tp->to_reset || (tp->lun0p && tp->lun0p->to_clear)) { target = i; break; } if (!tp->lunmp) continue; for (k = 1 ; k < SYM_CONF_MAX_LUN ; k++) { if (tp->lunmp[k] && tp->lunmp[k]->to_clear) { target = i; break; } } if (target != -1) break; } /* * If not, walk the busy queue for any * disconnected CCB to be aborted. */ if (target == -1) { FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) { cp = sym_que_entry(qp,struct sym_ccb,link_ccbq); if (cp->host_status != HS_DISCONNECT) continue; if (cp->to_abort) { target = cp->target; break; } } } /* * If some target is to be selected, * prepare and start the selection. */ if (target != -1) { tp = &np->target[target]; np->abrt_sel.sel_id = target; np->abrt_sel.sel_scntl3 = tp->head.wval; np->abrt_sel.sel_sxfer = tp->head.sval; OUTL(np, nc_dsa, np->hcb_ba); OUTL_DSP(np, SCRIPTB_BA(np, sel_for_abort)); return; } /* * Now look for a CCB to abort that haven't started yet. * Btw, the SCRIPTS processor is still stopped, so * we are not in race. */ i = 0; cp = NULL; FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) { cp = sym_que_entry(qp, struct sym_ccb, link_ccbq); if (cp->host_status != HS_BUSY && cp->host_status != HS_NEGOTIATE) continue; if (!cp->to_abort) continue; #ifdef SYM_CONF_IARB_SUPPORT /* * If we are using IMMEDIATE ARBITRATION, we donnot * want to cancel the last queued CCB, since the * SCRIPTS may have anticipated the selection. */ if (cp == np->last_cp) { cp->to_abort = 0; continue; } #endif i = 1; /* Means we have found some */ break; } if (!i) { /* * We are done, so we donnot need * to synchronize with the SCRIPTS anylonger. * Remove the SEM flag from the ISTAT. */ np->istat_sem = 0; OUTB(np, nc_istat, SIGP); break; } /* * Compute index of next position in the start * queue the SCRIPTS intends to start and dequeue * all CCBs for that device that haven't been started. */ i = (INL(np, nc_scratcha) - np->squeue_ba) / 4; i = sym_dequeue_from_squeue(np, i, cp->target, cp->lun, -1); /* * Make sure at least our IO to abort has been dequeued. */ #ifndef SYM_OPT_HANDLE_DEVICE_QUEUEING assert(i && sym_get_cam_status(cp->cmd) == DID_SOFT_ERROR); #else sym_remque(&cp->link_ccbq); sym_insque_tail(&cp->link_ccbq, &np->comp_ccbq); #endif /* * Keep track in cam status of the reason of the abort. */ if (cp->to_abort == 2) sym_set_cam_status(cp->cmd, DID_TIME_OUT); else sym_set_cam_status(cp->cmd, DID_ABORT); /* * Complete with error everything that we have dequeued. */ sym_flush_comp_queue(np, 0); break; /* * The SCRIPTS processor has selected a target * we may have some manual recovery to perform for. */ case SIR_TARGET_SELECTED: target = INB(np, nc_sdid) & 0xf; tp = &np->target[target]; np->abrt_tbl.addr = cpu_to_scr(vtobus(np->abrt_msg)); /* * If the target is to be reset, prepare a * M_RESET message and clear the to_reset flag * since we donnot expect this operation to fail. */ if (tp->to_reset) { np->abrt_msg[0] = M_RESET; np->abrt_tbl.size = 1; tp->to_reset = 0; break; } /* * Otherwise, look for some logical unit to be cleared. */ if (tp->lun0p && tp->lun0p->to_clear) lun = 0; else if (tp->lunmp) { for (k = 1 ; k < SYM_CONF_MAX_LUN ; k++) { if (tp->lunmp[k] && tp->lunmp[k]->to_clear) { lun = k; break; } } } /* * If a logical unit is to be cleared, prepare * an IDENTIFY(lun) + ABORT MESSAGE. */ if (lun != -1) { struct sym_lcb *lp = sym_lp(tp, lun); lp->to_clear = 0; /* We don't expect to fail here */ np->abrt_msg[0] = IDENTIFY(0, lun); np->abrt_msg[1] = M_ABORT; np->abrt_tbl.size = 2; break; } /* * Otherwise, look for some disconnected job to * abort for this target. */ i = 0; cp = NULL; FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) { cp = sym_que_entry(qp, struct sym_ccb, link_ccbq); if (cp->host_status != HS_DISCONNECT) continue; if (cp->target != target) continue; if (!cp->to_abort) continue; i = 1; /* Means we have some */ break; } /* * If we have none, probably since the device has * completed the command before we won abitration, * send a M_ABORT message without IDENTIFY. * According to the specs, the device must just * disconnect the BUS and not abort any task. */ if (!i) { np->abrt_msg[0] = M_ABORT; np->abrt_tbl.size = 1; break; } /* * We have some task to abort. * Set the IDENTIFY(lun) */ np->abrt_msg[0] = IDENTIFY(0, cp->lun); /* * If we want to abort an untagged command, we * will send a IDENTIFY + M_ABORT. * Otherwise (tagged command), we will send * a IDENTITFY + task attributes + ABORT TAG. */ if (cp->tag == NO_TAG) { np->abrt_msg[1] = M_ABORT; np->abrt_tbl.size = 2; } else { np->abrt_msg[1] = cp->scsi_smsg[1]; np->abrt_msg[2] = cp->scsi_smsg[2]; np->abrt_msg[3] = M_ABORT_TAG; np->abrt_tbl.size = 4; } /* * Keep track of software timeout condition, since the * peripheral driver may not count retries on abort * conditions not due to timeout. */ if (cp->to_abort == 2) sym_set_cam_status(cp->cmd, DID_TIME_OUT); cp->to_abort = 0; /* We donnot expect to fail here */ break; /* * The target has accepted our message and switched * to BUS FREE phase as we expected. */ case SIR_ABORT_SENT: target = INB(np, nc_sdid) & 0xf; tp = &np->target[target]; starget = tp->starget; /* ** If we didn't abort anything, leave here. */ if (np->abrt_msg[0] == M_ABORT) break; /* * If we sent a M_RESET, then a hardware reset has * been performed by the target. * - Reset everything to async 8 bit * - Tell ourself to negotiate next time :-) * - Prepare to clear all disconnected CCBs for * this target from our task list (lun=task=-1) */ lun = -1; task = -1; if (np->abrt_msg[0] == M_RESET) { tp->head.sval = 0; tp->head.wval = np->rv_scntl3; tp->head.uval = 0; spi_period(starget) = 0; spi_offset(starget) = 0; spi_width(starget) = 0; spi_iu(starget) = 0; spi_dt(starget) = 0; spi_qas(starget) = 0; tp->tgoal.check_nego = 1; tp->tgoal.renego = 0; } /* * Otherwise, check for the LUN and TASK(s) * concerned by the cancelation. * If it is not ABORT_TAG then it is CLEAR_QUEUE * or an ABORT message :-) */ else { lun = np->abrt_msg[0] & 0x3f; if (np->abrt_msg[1] == M_ABORT_TAG) task = np->abrt_msg[2]; } /* * Complete all the CCBs the device should have * aborted due to our 'kiss of death' message. */ i = (INL(np, nc_scratcha) - np->squeue_ba) / 4; sym_dequeue_from_squeue(np, i, target, lun, -1); sym_clear_tasks(np, DID_ABORT, target, lun, task); sym_flush_comp_queue(np, 0); /* * If we sent a BDR, make upper layer aware of that. */ if (np->abrt_msg[0] == M_RESET) starget_printk(KERN_NOTICE, starget, "has been reset\n"); break; } /* * Print to the log the message we intend to send. */ if (num == SIR_TARGET_SELECTED) { dev_info(&tp->starget->dev, "control msgout:"); sym_printl_hex(np->abrt_msg, np->abrt_tbl.size); np->abrt_tbl.size = cpu_to_scr(np->abrt_tbl.size); } /* * Let the SCRIPTS processor continue. */ OUTONB_STD(); } /* * Gerard's alchemy:) that deals with with the data * pointer for both MDP and the residual calculation. * * I didn't want to bloat the code by more than 200 * lines for the handling of both MDP and the residual. * This has been achieved by using a data pointer * representation consisting in an index in the data * array (dp_sg) and a negative offset (dp_ofs) that * have the following meaning: * * - dp_sg = SYM_CONF_MAX_SG * we are at the end of the data script. * - dp_sg < SYM_CONF_MAX_SG * dp_sg points to the next entry of the scatter array * we want to transfer. * - dp_ofs < 0 * dp_ofs represents the residual of bytes of the * previous entry scatter entry we will send first. * - dp_ofs = 0 * no residual to send first. * * The function sym_evaluate_dp() accepts an arbitray * offset (basically from the MDP message) and returns * the corresponding values of dp_sg and dp_ofs. */ static int sym_evaluate_dp(struct sym_hcb *np, struct sym_ccb *cp, u32 scr, int *ofs) { u32 dp_scr; int dp_ofs, dp_sg, dp_sgmin; int tmp; struct sym_pmc *pm; /* * Compute the resulted data pointer in term of a script * address within some DATA script and a signed byte offset. */ dp_scr = scr; dp_ofs = *ofs; if (dp_scr == SCRIPTA_BA(np, pm0_data)) pm = &cp->phys.pm0; else if (dp_scr == SCRIPTA_BA(np, pm1_data)) pm = &cp->phys.pm1; else pm = NULL; if (pm) { dp_scr = scr_to_cpu(pm->ret); dp_ofs -= scr_to_cpu(pm->sg.size) & 0x00ffffff; } /* * If we are auto-sensing, then we are done. */ if (cp->host_flags & HF_SENSE) { *ofs = dp_ofs; return 0; } /* * Deduce the index of the sg entry. * Keep track of the index of the first valid entry. * If result is dp_sg = SYM_CONF_MAX_SG, then we are at the * end of the data. */ tmp = scr_to_cpu(cp->goalp); dp_sg = SYM_CONF_MAX_SG; if (dp_scr != tmp) dp_sg -= (tmp - 8 - (int)dp_scr) / (2*4); dp_sgmin = SYM_CONF_MAX_SG - cp->segments; /* * Move to the sg entry the data pointer belongs to. * * If we are inside the data area, we expect result to be: * * Either, * dp_ofs = 0 and dp_sg is the index of the sg entry * the data pointer belongs to (or the end of the data) * Or, * dp_ofs < 0 and dp_sg is the index of the sg entry * the data pointer belongs to + 1. */ if (dp_ofs < 0) { int n; while (dp_sg > dp_sgmin) { --dp_sg; tmp = scr_to_cpu(cp->phys.data[dp_sg].size); n = dp_ofs + (tmp & 0xffffff); if (n > 0) { ++dp_sg; break; } dp_ofs = n; } } else if (dp_ofs > 0) { while (dp_sg < SYM_CONF_MAX_SG) { tmp = scr_to_cpu(cp->phys.data[dp_sg].size); dp_ofs -= (tmp & 0xffffff); ++dp_sg; if (dp_ofs <= 0) break; } } /* * Make sure the data pointer is inside the data area. * If not, return some error. */ if (dp_sg < dp_sgmin || (dp_sg == dp_sgmin && dp_ofs < 0)) goto out_err; else if (dp_sg > SYM_CONF_MAX_SG || (dp_sg == SYM_CONF_MAX_SG && dp_ofs > 0)) goto out_err; /* * Save the extreme pointer if needed. */ if (dp_sg > cp->ext_sg || (dp_sg == cp->ext_sg && dp_ofs > cp->ext_ofs)) { cp->ext_sg = dp_sg; cp->ext_ofs = dp_ofs; } /* * Return data. */ *ofs = dp_ofs; return dp_sg; out_err: return -1; } /* * chip handler for MODIFY DATA POINTER MESSAGE * * We also call this function on IGNORE WIDE RESIDUE * messages that do not match a SWIDE full condition. * Btw, we assume in that situation that such a message * is equivalent to a MODIFY DATA POINTER (offset=-1). */ static void sym_modify_dp(struct sym_hcb *np, struct sym_tcb *tp, struct sym_ccb *cp, int ofs) { int dp_ofs = ofs; u32 dp_scr = sym_get_script_dp (np, cp); u32 dp_ret; u32 tmp; u_char hflags; int dp_sg; struct sym_pmc *pm; /* * Not supported for auto-sense. */ if (cp->host_flags & HF_SENSE) goto out_reject; /* * Apply our alchemy:) (see comments in sym_evaluate_dp()), * to the resulted data pointer. */ dp_sg = sym_evaluate_dp(np, cp, dp_scr, &dp_ofs); if (dp_sg < 0) goto out_reject; /* * And our alchemy:) allows to easily calculate the data * script address we want to return for the next data phase. */ dp_ret = cpu_to_scr(cp->goalp); dp_ret = dp_ret - 8 - (SYM_CONF_MAX_SG - dp_sg) * (2*4); /* * If offset / scatter entry is zero we donnot need * a context for the new current data pointer. */ if (dp_ofs == 0) { dp_scr = dp_ret; goto out_ok; } /* * Get a context for the new current data pointer. */ hflags = INB(np, HF_PRT); if (hflags & HF_DP_SAVED) hflags ^= HF_ACT_PM; if (!(hflags & HF_ACT_PM)) { pm = &cp->phys.pm0; dp_scr = SCRIPTA_BA(np, pm0_data); } else { pm = &cp->phys.pm1; dp_scr = SCRIPTA_BA(np, pm1_data); } hflags &= ~(HF_DP_SAVED); OUTB(np, HF_PRT, hflags); /* * Set up the new current data pointer. * ofs < 0 there, and for the next data phase, we * want to transfer part of the data of the sg entry * corresponding to index dp_sg-1 prior to returning * to the main data script. */ pm->ret = cpu_to_scr(dp_ret); tmp = scr_to_cpu(cp->phys.data[dp_sg-1].addr); tmp += scr_to_cpu(cp->phys.data[dp_sg-1].size) + dp_ofs; pm->sg.addr = cpu_to_scr(tmp); pm->sg.size = cpu_to_scr(-dp_ofs); out_ok: sym_set_script_dp (np, cp, dp_scr); OUTL_DSP(np, SCRIPTA_BA(np, clrack)); return; out_reject: OUTL_DSP(np, SCRIPTB_BA(np, msg_bad)); } /* * chip calculation of the data residual. * * As I used to say, the requirement of data residual * in SCSI is broken, useless and cannot be achieved * without huge complexity. * But most OSes and even the official CAM require it. * When stupidity happens to be so widely spread inside * a community, it gets hard to convince. * * Anyway, I don't care, since I am not going to use * any software that considers this data residual as * a relevant information. :) */ int sym_compute_residual(struct sym_hcb *np, struct sym_ccb *cp) { int dp_sg, dp_sgmin, resid = 0; int dp_ofs = 0; /* * Check for some data lost or just thrown away. * We are not required to be quite accurate in this * situation. Btw, if we are odd for output and the * device claims some more data, it may well happen * than our residual be zero. :-) */ if (cp->xerr_status & (XE_EXTRA_DATA|XE_SODL_UNRUN|XE_SWIDE_OVRUN)) { if (cp->xerr_status & XE_EXTRA_DATA) resid -= cp->extra_bytes; if (cp->xerr_status & XE_SODL_UNRUN) ++resid; if (cp->xerr_status & XE_SWIDE_OVRUN) --resid; } /* * If all data has been transferred, * there is no residual. */ if (cp->phys.head.lastp == cp->goalp) return resid; /* * If no data transfer occurs, or if the data * pointer is weird, return full residual. */ if (cp->startp == cp->phys.head.lastp || sym_evaluate_dp(np, cp, scr_to_cpu(cp->phys.head.lastp), &dp_ofs) < 0) { return cp->data_len - cp->odd_byte_adjustment; } /* * If we were auto-sensing, then we are done. */ if (cp->host_flags & HF_SENSE) { return -dp_ofs; } /* * We are now full comfortable in the computation * of the data residual (2's complement). */ dp_sgmin = SYM_CONF_MAX_SG - cp->segments; resid = -cp->ext_ofs; for (dp_sg = cp->ext_sg; dp_sg < SYM_CONF_MAX_SG; ++dp_sg) { u_int tmp = scr_to_cpu(cp->phys.data[dp_sg].size); resid += (tmp & 0xffffff); } resid -= cp->odd_byte_adjustment; /* * Hopefully, the result is not too wrong. */ return resid; } /* * Negotiation for WIDE and SYNCHRONOUS DATA TRANSFER. * * When we try to negotiate, we append the negotiation message * to the identify and (maybe) simple tag message. * The host status field is set to HS_NEGOTIATE to mark this * situation. * * If the target doesn't answer this message immediately * (as required by the standard), the SIR_NEGO_FAILED interrupt * will be raised eventually. * The handler removes the HS_NEGOTIATE status, and sets the * negotiated value to the default (async / nowide). * * If we receive a matching answer immediately, we check it * for validity, and set the values. * * If we receive a Reject message immediately, we assume the * negotiation has failed, and fall back to standard values. * * If we receive a negotiation message while not in HS_NEGOTIATE * state, it's a target initiated negotiation. We prepare a * (hopefully) valid answer, set our parameters, and send back * this answer to the target. * * If the target doesn't fetch the answer (no message out phase), * we assume the negotiation has failed, and fall back to default * settings (SIR_NEGO_PROTO interrupt). * * When we set the values, we adjust them in all ccbs belonging * to this target, in the controller's register, and in the "phys" * field of the controller's struct sym_hcb. */ /* * chip handler for SYNCHRONOUS DATA TRANSFER REQUEST (SDTR) message. */ static int sym_sync_nego_check(struct sym_hcb *np, int req, struct sym_ccb *cp) { int target = cp->target; u_char chg, ofs, per, fak, div; if (DEBUG_FLAGS & DEBUG_NEGO) { sym_print_nego_msg(np, target, "sync msgin", np->msgin); } /* * Get requested values. */ chg = 0; per = np->msgin[3]; ofs = np->msgin[4]; /* * Check values against our limits. */ if (ofs) { if (ofs > np->maxoffs) {chg = 1; ofs = np->maxoffs;} } if (ofs) { if (per < np->minsync) {chg = 1; per = np->minsync;} } /* * Get new chip synchronous parameters value. */ div = fak = 0; if (ofs && sym_getsync(np, 0, per, &div, &fak) < 0) goto reject_it; if (DEBUG_FLAGS & DEBUG_NEGO) { sym_print_addr(cp->cmd, "sdtr: ofs=%d per=%d div=%d fak=%d chg=%d.\n", ofs, per, div, fak, chg); } /* * If it was an answer we want to change, * then it isn't acceptable. Reject it. */ if (!req && chg) goto reject_it; /* * Apply new values. */ sym_setsync (np, target, ofs, per, div, fak); /* * It was an answer. We are done. */ if (!req) return 0; /* * It was a request. Prepare an answer message. */ spi_populate_sync_msg(np->msgout, per, ofs); if (DEBUG_FLAGS & DEBUG_NEGO) { sym_print_nego_msg(np, target, "sync msgout", np->msgout); } np->msgin [0] = M_NOOP; return 0; reject_it: sym_setsync (np, target, 0, 0, 0, 0); return -1; } static void sym_sync_nego(struct sym_hcb *np, struct sym_tcb *tp, struct sym_ccb *cp) { int req = 1; int result; /* * Request or answer ? */ if (INB(np, HS_PRT) == HS_NEGOTIATE) { OUTB(np, HS_PRT, HS_BUSY); if (cp->nego_status && cp->nego_status != NS_SYNC) goto reject_it; req = 0; } /* * Check and apply new values. */ result = sym_sync_nego_check(np, req, cp); if (result) /* Not acceptable, reject it */ goto reject_it; if (req) { /* Was a request, send response. */ cp->nego_status = NS_SYNC; OUTL_DSP(np, SCRIPTB_BA(np, sdtr_resp)); } else /* Was a response, we are done. */ OUTL_DSP(np, SCRIPTA_BA(np, clrack)); return; reject_it: OUTL_DSP(np, SCRIPTB_BA(np, msg_bad)); } /* * chip handler for PARALLEL PROTOCOL REQUEST (PPR) message. */ static int sym_ppr_nego_check(struct sym_hcb *np, int req, int target) { struct sym_tcb *tp = &np->target[target]; unsigned char fak, div; int dt, chg = 0; unsigned char per = np->msgin[3]; unsigned char ofs = np->msgin[5]; unsigned char wide = np->msgin[6]; unsigned char opts = np->msgin[7] & PPR_OPT_MASK; if (DEBUG_FLAGS & DEBUG_NEGO) { sym_print_nego_msg(np, target, "ppr msgin", np->msgin); } /* * Check values against our limits. */ if (wide > np->maxwide) { chg = 1; wide = np->maxwide; } if (!wide || !(np->features & FE_U3EN)) opts = 0; if (opts != (np->msgin[7] & PPR_OPT_MASK)) chg = 1; dt = opts & PPR_OPT_DT; if (ofs) { unsigned char maxoffs = dt ? np->maxoffs_dt : np->maxoffs; if (ofs > maxoffs) { chg = 1; ofs = maxoffs; } } if (ofs) { unsigned char minsync = dt ? np->minsync_dt : np->minsync; if (per < minsync) { chg = 1; per = minsync; } } /* * Get new chip synchronous parameters value. */ div = fak = 0; if (ofs && sym_getsync(np, dt, per, &div, &fak) < 0) goto reject_it; /* * If it was an answer we want to change, * then it isn't acceptable. Reject it. */ if (!req && chg) goto reject_it; /* * Apply new values. */ sym_setpprot(np, target, opts, ofs, per, wide, div, fak); /* * It was an answer. We are done. */ if (!req) return 0; /* * It was a request. Prepare an answer message. */ spi_populate_ppr_msg(np->msgout, per, ofs, wide, opts); if (DEBUG_FLAGS & DEBUG_NEGO) { sym_print_nego_msg(np, target, "ppr msgout", np->msgout); } np->msgin [0] = M_NOOP; return 0; reject_it: sym_setpprot (np, target, 0, 0, 0, 0, 0, 0); /* * If it is a device response that should result in * ST, we may want to try a legacy negotiation later. */ if (!req && !opts) { tp->tgoal.period = per; tp->tgoal.offset = ofs; tp->tgoal.width = wide; tp->tgoal.iu = tp->tgoal.dt = tp->tgoal.qas = 0; tp->tgoal.check_nego = 1; } return -1; } static void sym_ppr_nego(struct sym_hcb *np, struct sym_tcb *tp, struct sym_ccb *cp) { int req = 1; int result; /* * Request or answer ? */ if (INB(np, HS_PRT) == HS_NEGOTIATE) { OUTB(np, HS_PRT, HS_BUSY); if (cp->nego_status && cp->nego_status != NS_PPR) goto reject_it; req = 0; } /* * Check and apply new values. */ result = sym_ppr_nego_check(np, req, cp->target); if (result) /* Not acceptable, reject it */ goto reject_it; if (req) { /* Was a request, send response. */ cp->nego_status = NS_PPR; OUTL_DSP(np, SCRIPTB_BA(np, ppr_resp)); } else /* Was a response, we are done. */ OUTL_DSP(np, SCRIPTA_BA(np, clrack)); return; reject_it: OUTL_DSP(np, SCRIPTB_BA(np, msg_bad)); } /* * chip handler for WIDE DATA TRANSFER REQUEST (WDTR) message. */ static int sym_wide_nego_check(struct sym_hcb *np, int req, struct sym_ccb *cp) { int target = cp->target; u_char chg, wide; if (DEBUG_FLAGS & DEBUG_NEGO) { sym_print_nego_msg(np, target, "wide msgin", np->msgin); } /* * Get requested values. */ chg = 0; wide = np->msgin[3]; /* * Check values against our limits. */ if (wide > np->maxwide) { chg = 1; wide = np->maxwide; } if (DEBUG_FLAGS & DEBUG_NEGO) { sym_print_addr(cp->cmd, "wdtr: wide=%d chg=%d.\n", wide, chg); } /* * If it was an answer we want to change, * then it isn't acceptable. Reject it. */ if (!req && chg) goto reject_it; /* * Apply new values. */ sym_setwide (np, target, wide); /* * It was an answer. We are done. */ if (!req) return 0; /* * It was a request. Prepare an answer message. */ spi_populate_width_msg(np->msgout, wide); np->msgin [0] = M_NOOP; if (DEBUG_FLAGS & DEBUG_NEGO) { sym_print_nego_msg(np, target, "wide msgout", np->msgout); } return 0; reject_it: return -1; } static void sym_wide_nego(struct sym_hcb *np, struct sym_tcb *tp, struct sym_ccb *cp) { int req = 1; int result; /* * Request or answer ? */ if (INB(np, HS_PRT) == HS_NEGOTIATE) { OUTB(np, HS_PRT, HS_BUSY); if (cp->nego_status && cp->nego_status != NS_WIDE) goto reject_it; req = 0; } /* * Check and apply new values. */ result = sym_wide_nego_check(np, req, cp); if (result) /* Not acceptable, reject it */ goto reject_it; if (req) { /* Was a request, send response. */ cp->nego_status = NS_WIDE; OUTL_DSP(np, SCRIPTB_BA(np, wdtr_resp)); } else { /* Was a response. */ /* * Negotiate for SYNC immediately after WIDE response. * This allows to negotiate for both WIDE and SYNC on * a single SCSI command (Suggested by Justin Gibbs). */ if (tp->tgoal.offset) { spi_populate_sync_msg(np->msgout, tp->tgoal.period, tp->tgoal.offset); if (DEBUG_FLAGS & DEBUG_NEGO) { sym_print_nego_msg(np, cp->target, "sync msgout", np->msgout); } cp->nego_status = NS_SYNC; OUTB(np, HS_PRT, HS_NEGOTIATE); OUTL_DSP(np, SCRIPTB_BA(np, sdtr_resp)); return; } else OUTL_DSP(np, SCRIPTA_BA(np, clrack)); } return; reject_it: OUTL_DSP(np, SCRIPTB_BA(np, msg_bad)); } /* * Reset DT, SYNC or WIDE to default settings. * * Called when a negotiation does not succeed either * on rejection or on protocol error. * * A target that understands a PPR message should never * reject it, and messing with it is very unlikely. * So, if a PPR makes problems, we may just want to * try a legacy negotiation later. */ static void sym_nego_default(struct sym_hcb *np, struct sym_tcb *tp, struct sym_ccb *cp) { switch (cp->nego_status) { case NS_PPR: #if 0 sym_setpprot (np, cp->target, 0, 0, 0, 0, 0, 0); #else if (tp->tgoal.period < np->minsync) tp->tgoal.period = np->minsync; if (tp->tgoal.offset > np->maxoffs) tp->tgoal.offset = np->maxoffs; tp->tgoal.iu = tp->tgoal.dt = tp->tgoal.qas = 0; tp->tgoal.check_nego = 1; #endif break; case NS_SYNC: sym_setsync (np, cp->target, 0, 0, 0, 0); break; case NS_WIDE: sym_setwide (np, cp->target, 0); break; } np->msgin [0] = M_NOOP; np->msgout[0] = M_NOOP; cp->nego_status = 0; } /* * chip handler for MESSAGE REJECT received in response to * PPR, WIDE or SYNCHRONOUS negotiation. */ static void sym_nego_rejected(struct sym_hcb *np, struct sym_tcb *tp, struct sym_ccb *cp) { sym_nego_default(np, tp, cp); OUTB(np, HS_PRT, HS_BUSY); } /* * chip exception handler for programmed interrupts. */ static void sym_int_sir(struct sym_hcb *np) { u_char num = INB(np, nc_dsps); u32 dsa = INL(np, nc_dsa); struct sym_ccb *cp = sym_ccb_from_dsa(np, dsa); u_char target = INB(np, nc_sdid) & 0x0f; struct sym_tcb *tp = &np->target[target]; int tmp; if (DEBUG_FLAGS & DEBUG_TINY) printf ("I#%d", num); switch (num) { #if SYM_CONF_DMA_ADDRESSING_MODE == 2 /* * SCRIPTS tell us that we may have to update * 64 bit DMA segment registers. */ case SIR_DMAP_DIRTY: sym_update_dmap_regs(np); goto out; #endif /* * Command has been completed with error condition * or has been auto-sensed. */ case SIR_COMPLETE_ERROR: sym_complete_error(np, cp); return; /* * The C code is currently trying to recover from something. * Typically, user want to abort some command. */ case SIR_SCRIPT_STOPPED: case SIR_TARGET_SELECTED: case SIR_ABORT_SENT: sym_sir_task_recovery(np, num); return; /* * The device didn't go to MSG OUT phase after having * been selected with ATN. We do not want to handle that. */ case SIR_SEL_ATN_NO_MSG_OUT: scmd_printk(KERN_WARNING, cp->cmd, "No MSG OUT phase after selection with ATN\n"); goto out_stuck; /* * The device didn't switch to MSG IN phase after * having reselected the initiator. */ case SIR_RESEL_NO_MSG_IN: scmd_printk(KERN_WARNING, cp->cmd, "No MSG IN phase after reselection\n"); goto out_stuck; /* * After reselection, the device sent a message that wasn't * an IDENTIFY. */ case SIR_RESEL_NO_IDENTIFY: scmd_printk(KERN_WARNING, cp->cmd, "No IDENTIFY after reselection\n"); goto out_stuck; /* * The device reselected a LUN we do not know about. */ case SIR_RESEL_BAD_LUN: np->msgout[0] = M_RESET; goto out; /* * The device reselected for an untagged nexus and we * haven't any. */ case SIR_RESEL_BAD_I_T_L: np->msgout[0] = M_ABORT; goto out; /* * The device reselected for a tagged nexus that we do not have. */ case SIR_RESEL_BAD_I_T_L_Q: np->msgout[0] = M_ABORT_TAG; goto out; /* * The SCRIPTS let us know that the device has grabbed * our message and will abort the job. */ case SIR_RESEL_ABORTED: np->lastmsg = np->msgout[0]; np->msgout[0] = M_NOOP; scmd_printk(KERN_WARNING, cp->cmd, "message %x sent on bad reselection\n", np->lastmsg); goto out; /* * The SCRIPTS let us know that a message has been * successfully sent to the device. */ case SIR_MSG_OUT_DONE: np->lastmsg = np->msgout[0]; np->msgout[0] = M_NOOP; /* Should we really care of that */ if (np->lastmsg == M_PARITY || np->lastmsg == M_ID_ERROR) { if (cp) { cp->xerr_status &= ~XE_PARITY_ERR; if (!cp->xerr_status) OUTOFFB(np, HF_PRT, HF_EXT_ERR); } } goto out; /* * The device didn't send a GOOD SCSI status. * We may have some work to do prior to allow * the SCRIPTS processor to continue. */ case SIR_BAD_SCSI_STATUS: if (!cp) goto out; sym_sir_bad_scsi_status(np, num, cp); return; /* * We are asked by the SCRIPTS to prepare a * REJECT message. */ case SIR_REJECT_TO_SEND: sym_print_msg(cp, "M_REJECT to send for ", np->msgin); np->msgout[0] = M_REJECT; goto out; /* * We have been ODD at the end of a DATA IN * transfer and the device didn't send a * IGNORE WIDE RESIDUE message. * It is a data overrun condition. */ case SIR_SWIDE_OVERRUN: if (cp) { OUTONB(np, HF_PRT, HF_EXT_ERR); cp->xerr_status |= XE_SWIDE_OVRUN; } goto out; /* * We have been ODD at the end of a DATA OUT * transfer. * It is a data underrun condition. */ case SIR_SODL_UNDERRUN: if (cp) { OUTONB(np, HF_PRT, HF_EXT_ERR); cp->xerr_status |= XE_SODL_UNRUN; } goto out; /* * The device wants us to tranfer more data than * expected or in the wrong direction. * The number of extra bytes is in scratcha. * It is a data overrun condition. */ case SIR_DATA_OVERRUN: if (cp) { OUTONB(np, HF_PRT, HF_EXT_ERR); cp->xerr_status |= XE_EXTRA_DATA; cp->extra_bytes += INL(np, nc_scratcha); } goto out; /* * The device switched to an illegal phase (4/5). */ case SIR_BAD_PHASE: if (cp) { OUTONB(np, HF_PRT, HF_EXT_ERR); cp->xerr_status |= XE_BAD_PHASE; } goto out; /* * We received a message. */ case SIR_MSG_RECEIVED: if (!cp) goto out_stuck; switch (np->msgin [0]) { /* * We received an extended message. * We handle MODIFY DATA POINTER, SDTR, WDTR * and reject all other extended messages. */ case M_EXTENDED: switch (np->msgin [2]) { case M_X_MODIFY_DP: if (DEBUG_FLAGS & DEBUG_POINTER) sym_print_msg(cp, "extended msg ", np->msgin); tmp = (np->msgin[3]<<24) + (np->msgin[4]<<16) + (np->msgin[5]<<8) + (np->msgin[6]); sym_modify_dp(np, tp, cp, tmp); return; case M_X_SYNC_REQ: sym_sync_nego(np, tp, cp); return; case M_X_PPR_REQ: sym_ppr_nego(np, tp, cp); return; case M_X_WIDE_REQ: sym_wide_nego(np, tp, cp); return; default: goto out_reject; } break; /* * We received a 1/2 byte message not handled from SCRIPTS. * We are only expecting MESSAGE REJECT and IGNORE WIDE * RESIDUE messages that haven't been anticipated by * SCRIPTS on SWIDE full condition. Unanticipated IGNORE * WIDE RESIDUE messages are aliased as MODIFY DP (-1). */ case M_IGN_RESIDUE: if (DEBUG_FLAGS & DEBUG_POINTER) sym_print_msg(cp, "1 or 2 byte ", np->msgin); if (cp->host_flags & HF_SENSE) OUTL_DSP(np, SCRIPTA_BA(np, clrack)); else sym_modify_dp(np, tp, cp, -1); return; case M_REJECT: if (INB(np, HS_PRT) == HS_NEGOTIATE) sym_nego_rejected(np, tp, cp); else { sym_print_addr(cp->cmd, "M_REJECT received (%x:%x).\n", scr_to_cpu(np->lastmsg), np->msgout[0]); } goto out_clrack; break; default: goto out_reject; } break; /* * We received an unknown message. * Ignore all MSG IN phases and reject it. */ case SIR_MSG_WEIRD: sym_print_msg(cp, "WEIRD message received", np->msgin); OUTL_DSP(np, SCRIPTB_BA(np, msg_weird)); return; /* * Negotiation failed. * Target does not send us the reply. * Remove the HS_NEGOTIATE status. */ case SIR_NEGO_FAILED: OUTB(np, HS_PRT, HS_BUSY); /* * Negotiation failed. * Target does not want answer message. */ case SIR_NEGO_PROTO: sym_nego_default(np, tp, cp); goto out; } out: OUTONB_STD(); return; out_reject: OUTL_DSP(np, SCRIPTB_BA(np, msg_bad)); return; out_clrack: OUTL_DSP(np, SCRIPTA_BA(np, clrack)); return; out_stuck: return; } /* * Acquire a control block */ struct sym_ccb *sym_get_ccb (struct sym_hcb *np, struct scsi_cmnd *cmd, u_char tag_order) { u_char tn = cmd->device->id; u_char ln = cmd->device->lun; struct sym_tcb *tp = &np->target[tn]; struct sym_lcb *lp = sym_lp(tp, ln); u_short tag = NO_TAG; SYM_QUEHEAD *qp; struct sym_ccb *cp = NULL; /* * Look for a free CCB */ if (sym_que_empty(&np->free_ccbq)) sym_alloc_ccb(np); qp = sym_remque_head(&np->free_ccbq); if (!qp) goto out; cp = sym_que_entry(qp, struct sym_ccb, link_ccbq); { /* * If we have been asked for a tagged command. */ if (tag_order) { /* * Debugging purpose. */ #ifndef SYM_OPT_HANDLE_DEVICE_QUEUEING if (lp->busy_itl != 0) goto out_free; #endif /* * Allocate resources for tags if not yet. */ if (!lp->cb_tags) { sym_alloc_lcb_tags(np, tn, ln); if (!lp->cb_tags) goto out_free; } /* * Get a tag for this SCSI IO and set up * the CCB bus address for reselection, * and count it for this LUN. * Toggle reselect path to tagged. */ if (lp->busy_itlq < SYM_CONF_MAX_TASK) { tag = lp->cb_tags[lp->ia_tag]; if (++lp->ia_tag == SYM_CONF_MAX_TASK) lp->ia_tag = 0; ++lp->busy_itlq; #ifndef SYM_OPT_HANDLE_DEVICE_QUEUEING lp->itlq_tbl[tag] = cpu_to_scr(cp->ccb_ba); lp->head.resel_sa = cpu_to_scr(SCRIPTA_BA(np, resel_tag)); #endif #ifdef SYM_OPT_LIMIT_COMMAND_REORDERING cp->tags_si = lp->tags_si; ++lp->tags_sum[cp->tags_si]; ++lp->tags_since; #endif } else goto out_free; } /* * This command will not be tagged. * If we already have either a tagged or untagged * one, refuse to overlap this untagged one. */ else { /* * Debugging purpose. */ #ifndef SYM_OPT_HANDLE_DEVICE_QUEUEING if (lp->busy_itl != 0 || lp->busy_itlq != 0) goto out_free; #endif /* * Count this nexus for this LUN. * Set up the CCB bus address for reselection. * Toggle reselect path to untagged. */ ++lp->busy_itl; #ifndef SYM_OPT_HANDLE_DEVICE_QUEUEING if (lp->busy_itl == 1) { lp->head.itl_task_sa = cpu_to_scr(cp->ccb_ba); lp->head.resel_sa = cpu_to_scr(SCRIPTA_BA(np, resel_no_tag)); } else goto out_free; #endif } } /* * Put the CCB into the busy queue. */ sym_insque_tail(&cp->link_ccbq, &np->busy_ccbq); #ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING if (lp) { sym_remque(&cp->link2_ccbq); sym_insque_tail(&cp->link2_ccbq, &lp->waiting_ccbq); } #endif cp->to_abort = 0; cp->odd_byte_adjustment = 0; cp->tag = tag; cp->order = tag_order; cp->target = tn; cp->lun = ln; if (DEBUG_FLAGS & DEBUG_TAGS) { sym_print_addr(cmd, "ccb @%p using tag %d.\n", cp, tag); } out: return cp; out_free: sym_insque_head(&cp->link_ccbq, &np->free_ccbq); return NULL; } /* * Release one control block */ void sym_free_ccb (struct sym_hcb *np, struct sym_ccb *cp) { struct sym_tcb *tp = &np->target[cp->target]; struct sym_lcb *lp = sym_lp(tp, cp->lun); if (DEBUG_FLAGS & DEBUG_TAGS) { sym_print_addr(cp->cmd, "ccb @%p freeing tag %d.\n", cp, cp->tag); } /* * If LCB available, */ if (lp) { /* * If tagged, release the tag, set the relect path */ if (cp->tag != NO_TAG) { #ifdef SYM_OPT_LIMIT_COMMAND_REORDERING --lp->tags_sum[cp->tags_si]; #endif /* * Free the tag value. */ lp->cb_tags[lp->if_tag] = cp->tag; if (++lp->if_tag == SYM_CONF_MAX_TASK) lp->if_tag = 0; /* * Make the reselect path invalid, * and uncount this CCB. */ lp->itlq_tbl[cp->tag] = cpu_to_scr(np->bad_itlq_ba); --lp->busy_itlq; } else { /* Untagged */ /* * Make the reselect path invalid, * and uncount this CCB. */ lp->head.itl_task_sa = cpu_to_scr(np->bad_itl_ba); --lp->busy_itl; } /* * If no JOB active, make the LUN reselect path invalid. */ if (lp->busy_itlq == 0 && lp->busy_itl == 0) lp->head.resel_sa = cpu_to_scr(SCRIPTB_BA(np, resel_bad_lun)); } /* * We donnot queue more than 1 ccb per target * with negotiation at any time. If this ccb was * used for negotiation, clear this info in the tcb. */ if (cp == tp->nego_cp) tp->nego_cp = NULL; #ifdef SYM_CONF_IARB_SUPPORT /* * If we just complete the last queued CCB, * clear this info that is no longer relevant. */ if (cp == np->last_cp) np->last_cp = 0; #endif /* * Make this CCB available. */ cp->cmd = NULL; cp->host_status = HS_IDLE; sym_remque(&cp->link_ccbq); sym_insque_head(&cp->link_ccbq, &np->free_ccbq); #ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING if (lp) { sym_remque(&cp->link2_ccbq); sym_insque_tail(&cp->link2_ccbq, &np->dummy_ccbq); if (cp->started) { if (cp->tag != NO_TAG) --lp->started_tags; else --lp->started_no_tag; } } cp->started = 0; #endif } /* * Allocate a CCB from memory and initialize its fixed part. */ static struct sym_ccb *sym_alloc_ccb(struct sym_hcb *np) { struct sym_ccb *cp = NULL; int hcode; /* * Prevent from allocating more CCBs than we can * queue to the controller. */ if (np->actccbs >= SYM_CONF_MAX_START) return NULL; /* * Allocate memory for this CCB. */ cp = sym_calloc_dma(sizeof(struct sym_ccb), "CCB"); if (!cp) goto out_free; /* * Count it. */ np->actccbs++; /* * Compute the bus address of this ccb. */ cp->ccb_ba = vtobus(cp); /* * Insert this ccb into the hashed list. */ hcode = CCB_HASH_CODE(cp->ccb_ba); cp->link_ccbh = np->ccbh[hcode]; np->ccbh[hcode] = cp; /* * Initialyze the start and restart actions. */ cp->phys.head.go.start = cpu_to_scr(SCRIPTA_BA(np, idle)); cp->phys.head.go.restart = cpu_to_scr(SCRIPTB_BA(np, bad_i_t_l)); /* * Initilialyze some other fields. */ cp->phys.smsg_ext.addr = cpu_to_scr(HCB_BA(np, msgin[2])); /* * Chain into free ccb queue. */ sym_insque_head(&cp->link_ccbq, &np->free_ccbq); /* * Chain into optionnal lists. */ #ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING sym_insque_head(&cp->link2_ccbq, &np->dummy_ccbq); #endif return cp; out_free: if (cp) sym_mfree_dma(cp, sizeof(*cp), "CCB"); return NULL; } /* * Look up a CCB from a DSA value. */ static struct sym_ccb *sym_ccb_from_dsa(struct sym_hcb *np, u32 dsa) { int hcode; struct sym_ccb *cp; hcode = CCB_HASH_CODE(dsa); cp = np->ccbh[hcode]; while (cp) { if (cp->ccb_ba == dsa) break; cp = cp->link_ccbh; } return cp; } /* * Target control block initialisation. * Nothing important to do at the moment. */ static void sym_init_tcb (struct sym_hcb *np, u_char tn) { #if 0 /* Hmmm... this checking looks paranoid. */ /* * Check some alignments required by the chip. */ assert (((offsetof(struct sym_reg, nc_sxfer) ^ offsetof(struct sym_tcb, head.sval)) &3) == 0); assert (((offsetof(struct sym_reg, nc_scntl3) ^ offsetof(struct sym_tcb, head.wval)) &3) == 0); #endif } /* * Lun control block allocation and initialization. */ struct sym_lcb *sym_alloc_lcb (struct sym_hcb *np, u_char tn, u_char ln) { struct sym_tcb *tp = &np->target[tn]; struct sym_lcb *lp = NULL; /* * Initialize the target control block if not yet. */ sym_init_tcb (np, tn); /* * Allocate the LCB bus address array. * Compute the bus address of this table. */ if (ln && !tp->luntbl) { int i; tp->luntbl = sym_calloc_dma(256, "LUNTBL"); if (!tp->luntbl) goto fail; for (i = 0 ; i < 64 ; i++) tp->luntbl[i] = cpu_to_scr(vtobus(&np->badlun_sa)); tp->head.luntbl_sa = cpu_to_scr(vtobus(tp->luntbl)); } /* * Allocate the table of pointers for LUN(s) > 0, if needed. */ if (ln && !tp->lunmp) { tp->lunmp = kcalloc(SYM_CONF_MAX_LUN, sizeof(struct sym_lcb *), GFP_ATOMIC); if (!tp->lunmp) goto fail; } /* * Allocate the lcb. * Make it available to the chip. */ lp = sym_calloc_dma(sizeof(struct sym_lcb), "LCB"); if (!lp) goto fail; if (ln) { tp->lunmp[ln] = lp; tp->luntbl[ln] = cpu_to_scr(vtobus(lp)); } else { tp->lun0p = lp; tp->head.lun0_sa = cpu_to_scr(vtobus(lp)); } tp->nlcb++; /* * Let the itl task point to error handling. */ lp->head.itl_task_sa = cpu_to_scr(np->bad_itl_ba); /* * Set the reselect pattern to our default. :) */ lp->head.resel_sa = cpu_to_scr(SCRIPTB_BA(np, resel_bad_lun)); /* * Set user capabilities. */ lp->user_flags = tp->usrflags & (SYM_DISC_ENABLED | SYM_TAGS_ENABLED); #ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING /* * Initialize device queueing. */ sym_que_init(&lp->waiting_ccbq); sym_que_init(&lp->started_ccbq); lp->started_max = SYM_CONF_MAX_TASK; lp->started_limit = SYM_CONF_MAX_TASK; #endif fail: return lp; } /* * Allocate LCB resources for tagged command queuing. */ static void sym_alloc_lcb_tags (struct sym_hcb *np, u_char tn, u_char ln) { struct sym_tcb *tp = &np->target[tn]; struct sym_lcb *lp = sym_lp(tp, ln); int i; /* * Allocate the task table and and the tag allocation * circular buffer. We want both or none. */ lp->itlq_tbl = sym_calloc_dma(SYM_CONF_MAX_TASK*4, "ITLQ_TBL"); if (!lp->itlq_tbl) goto fail; lp->cb_tags = kcalloc(SYM_CONF_MAX_TASK, 1, GFP_ATOMIC); if (!lp->cb_tags) { sym_mfree_dma(lp->itlq_tbl, SYM_CONF_MAX_TASK*4, "ITLQ_TBL"); lp->itlq_tbl = NULL; goto fail; } /* * Initialize the task table with invalid entries. */ for (i = 0 ; i < SYM_CONF_MAX_TASK ; i++) lp->itlq_tbl[i] = cpu_to_scr(np->notask_ba); /* * Fill up the tag buffer with tag numbers. */ for (i = 0 ; i < SYM_CONF_MAX_TASK ; i++) lp->cb_tags[i] = i; /* * Make the task table available to SCRIPTS, * And accept tagged commands now. */ lp->head.itlq_tbl_sa = cpu_to_scr(vtobus(lp->itlq_tbl)); return; fail: return; } /* * Lun control block deallocation. Returns the number of valid remaining LCBs * for the target. */ int sym_free_lcb(struct sym_hcb *np, u_char tn, u_char ln) { struct sym_tcb *tp = &np->target[tn]; struct sym_lcb *lp = sym_lp(tp, ln); tp->nlcb--; if (ln) { if (!tp->nlcb) { kfree(tp->lunmp); sym_mfree_dma(tp->luntbl, 256, "LUNTBL"); tp->lunmp = NULL; tp->luntbl = NULL; tp->head.luntbl_sa = cpu_to_scr(vtobus(np->badluntbl)); } else { tp->luntbl[ln] = cpu_to_scr(vtobus(&np->badlun_sa)); tp->lunmp[ln] = NULL; } } else { tp->lun0p = NULL; tp->head.lun0_sa = cpu_to_scr(vtobus(&np->badlun_sa)); } if (lp->itlq_tbl) { sym_mfree_dma(lp->itlq_tbl, SYM_CONF_MAX_TASK*4, "ITLQ_TBL"); kfree(lp->cb_tags); } sym_mfree_dma(lp, sizeof(*lp), "LCB"); return tp->nlcb; } /* * Queue a SCSI IO to the controller. */ int sym_queue_scsiio(struct sym_hcb *np, struct scsi_cmnd *cmd, struct sym_ccb *cp) { struct scsi_device *sdev = cmd->device; struct sym_tcb *tp; struct sym_lcb *lp; u_char *msgptr; u_int msglen; int can_disconnect; /* * Keep track of the IO in our CCB. */ cp->cmd = cmd; /* * Retrieve the target descriptor. */ tp = &np->target[cp->target]; /* * Retrieve the lun descriptor. */ lp = sym_lp(tp, sdev->lun); can_disconnect = (cp->tag != NO_TAG) || (lp && (lp->curr_flags & SYM_DISC_ENABLED)); msgptr = cp->scsi_smsg; msglen = 0; msgptr[msglen++] = IDENTIFY(can_disconnect, sdev->lun); /* * Build the tag message if present. */ if (cp->tag != NO_TAG) { u_char order = cp->order; switch(order) { case M_ORDERED_TAG: break; case M_HEAD_TAG: break; default: order = M_SIMPLE_TAG; } #ifdef SYM_OPT_LIMIT_COMMAND_REORDERING /* * Avoid too much reordering of SCSI commands. * The algorithm tries to prevent completion of any * tagged command from being delayed against more * than 3 times the max number of queued commands. */ if (lp && lp->tags_since > 3*SYM_CONF_MAX_TAG) { lp->tags_si = !(lp->tags_si); if (lp->tags_sum[lp->tags_si]) { order = M_ORDERED_TAG; if ((DEBUG_FLAGS & DEBUG_TAGS)||sym_verbose>1) { sym_print_addr(cmd, "ordered tag forced.\n"); } } lp->tags_since = 0; } #endif msgptr[msglen++] = order; /* * For less than 128 tags, actual tags are numbered * 1,3,5,..2*MAXTAGS+1,since we may have to deal * with devices that have problems with #TAG 0 or too * great #TAG numbers. For more tags (up to 256), * we use directly our tag number. */ #if SYM_CONF_MAX_TASK > (512/4) msgptr[msglen++] = cp->tag; #else msgptr[msglen++] = (cp->tag << 1) + 1; #endif } /* * Build a negotiation message if needed. * (nego_status is filled by sym_prepare_nego()) * * Always negotiate on INQUIRY and REQUEST SENSE. * */ cp->nego_status = 0; if ((tp->tgoal.check_nego || cmd->cmnd[0] == INQUIRY || cmd->cmnd[0] == REQUEST_SENSE) && !tp->nego_cp && lp) { msglen += sym_prepare_nego(np, cp, msgptr + msglen); } /* * Startqueue */ cp->phys.head.go.start = cpu_to_scr(SCRIPTA_BA(np, select)); cp->phys.head.go.restart = cpu_to_scr(SCRIPTA_BA(np, resel_dsa)); /* * select */ cp->phys.select.sel_id = cp->target; cp->phys.select.sel_scntl3 = tp->head.wval; cp->phys.select.sel_sxfer = tp->head.sval; cp->phys.select.sel_scntl4 = tp->head.uval; /* * message */ cp->phys.smsg.addr = CCB_BA(cp, scsi_smsg); cp->phys.smsg.size = cpu_to_scr(msglen); /* * status */ cp->host_xflags = 0; cp->host_status = cp->nego_status ? HS_NEGOTIATE : HS_BUSY; cp->ssss_status = S_ILLEGAL; cp->xerr_status = 0; cp->host_flags = 0; cp->extra_bytes = 0; /* * extreme data pointer. * shall be positive, so -1 is lower than lowest.:) */ cp->ext_sg = -1; cp->ext_ofs = 0; /* * Build the CDB and DATA descriptor block * and start the IO. */ return sym_setup_data_and_start(np, cmd, cp); } /* * Reset a SCSI target (all LUNs of this target). */ int sym_reset_scsi_target(struct sym_hcb *np, int target) { struct sym_tcb *tp; if (target == np->myaddr || (u_int)target >= SYM_CONF_MAX_TARGET) return -1; tp = &np->target[target]; tp->to_reset = 1; np->istat_sem = SEM; OUTB(np, nc_istat, SIGP|SEM); return 0; } /* * Abort a SCSI IO. */ static int sym_abort_ccb(struct sym_hcb *np, struct sym_ccb *cp, int timed_out) { /* * Check that the IO is active. */ if (!cp || !cp->host_status || cp->host_status == HS_WAIT) return -1; /* * If a previous abort didn't succeed in time, * perform a BUS reset. */ if (cp->to_abort) { sym_reset_scsi_bus(np, 1); return 0; } /* * Mark the CCB for abort and allow time for. */ cp->to_abort = timed_out ? 2 : 1; /* * Tell the SCRIPTS processor to stop and synchronize with us. */ np->istat_sem = SEM; OUTB(np, nc_istat, SIGP|SEM); return 0; } int sym_abort_scsiio(struct sym_hcb *np, struct scsi_cmnd *cmd, int timed_out) { struct sym_ccb *cp; SYM_QUEHEAD *qp; /* * Look up our CCB control block. */ cp = NULL; FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) { struct sym_ccb *cp2 = sym_que_entry(qp, struct sym_ccb, link_ccbq); if (cp2->cmd == cmd) { cp = cp2; break; } } return sym_abort_ccb(np, cp, timed_out); } /* * Complete execution of a SCSI command with extended * error, SCSI status error, or having been auto-sensed. * * The SCRIPTS processor is not running there, so we * can safely access IO registers and remove JOBs from * the START queue. * SCRATCHA is assumed to have been loaded with STARTPOS * before the SCRIPTS called the C code. */ void sym_complete_error(struct sym_hcb *np, struct sym_ccb *cp) { struct scsi_device *sdev; struct scsi_cmnd *cmd; struct sym_tcb *tp; struct sym_lcb *lp; int resid; int i; /* * Paranoid check. :) */ if (!cp || !cp->cmd) return; cmd = cp->cmd; sdev = cmd->device; if (DEBUG_FLAGS & (DEBUG_TINY|DEBUG_RESULT)) { dev_info(&sdev->sdev_gendev, "CCB=%p STAT=%x/%x/%x\n", cp, cp->host_status, cp->ssss_status, cp->host_flags); } /* * Get target and lun pointers. */ tp = &np->target[cp->target]; lp = sym_lp(tp, sdev->lun); /* * Check for extended errors. */ if (cp->xerr_status) { if (sym_verbose) sym_print_xerr(cmd, cp->xerr_status); if (cp->host_status == HS_COMPLETE) cp->host_status = HS_COMP_ERR; } /* * Calculate the residual. */ resid = sym_compute_residual(np, cp); if (!SYM_SETUP_RESIDUAL_SUPPORT) {/* If user does not want residuals */ resid = 0; /* throw them away. :) */ cp->sv_resid = 0; } #ifdef DEBUG_2_0_X if (resid) printf("XXXX RESID= %d - 0x%x\n", resid, resid); #endif /* * Dequeue all queued CCBs for that device * not yet started by SCRIPTS. */ i = (INL(np, nc_scratcha) - np->squeue_ba) / 4; i = sym_dequeue_from_squeue(np, i, cp->target, sdev->lun, -1); /* * Restart the SCRIPTS processor. */ OUTL_DSP(np, SCRIPTA_BA(np, start)); #ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING if (cp->host_status == HS_COMPLETE && cp->ssss_status == S_QUEUE_FULL) { if (!lp || lp->started_tags - i < 2) goto weirdness; /* * Decrease queue depth as needed. */ lp->started_max = lp->started_tags - i - 1; lp->num_sgood = 0; if (sym_verbose >= 2) { sym_print_addr(cmd, " queue depth is now %d\n", lp->started_max); } /* * Repair the CCB. */ cp->host_status = HS_BUSY; cp->ssss_status = S_ILLEGAL; /* * Let's requeue it to device. */ sym_set_cam_status(cmd, DID_SOFT_ERROR); goto finish; } weirdness: #endif /* * Build result in CAM ccb. */ sym_set_cam_result_error(np, cp, resid); #ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING finish: #endif /* * Add this one to the COMP queue. */ sym_remque(&cp->link_ccbq); sym_insque_head(&cp->link_ccbq, &np->comp_ccbq); /* * Complete all those commands with either error * or requeue condition. */ sym_flush_comp_queue(np, 0); #ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING /* * Donnot start more than 1 command after an error. */ sym_start_next_ccbs(np, lp, 1); #endif } /* * Complete execution of a successful SCSI command. * * Only successful commands go to the DONE queue, * since we need to have the SCRIPTS processor * stopped on any error condition. * The SCRIPTS processor is running while we are * completing successful commands. */ void sym_complete_ok (struct sym_hcb *np, struct sym_ccb *cp) { struct sym_tcb *tp; struct sym_lcb *lp; struct scsi_cmnd *cmd; int resid; /* * Paranoid check. :) */ if (!cp || !cp->cmd) return; assert (cp->host_status == HS_COMPLETE); /* * Get user command. */ cmd = cp->cmd; /* * Get target and lun pointers. */ tp = &np->target[cp->target]; lp = sym_lp(tp, cp->lun); /* * If all data have been transferred, given than no * extended error did occur, there is no residual. */ resid = 0; if (cp->phys.head.lastp != cp->goalp) resid = sym_compute_residual(np, cp); /* * Wrong transfer residuals may be worse than just always * returning zero. User can disable this feature in * sym53c8xx.h. Residual support is enabled by default. */ if (!SYM_SETUP_RESIDUAL_SUPPORT) resid = 0; #ifdef DEBUG_2_0_X if (resid) printf("XXXX RESID= %d - 0x%x\n", resid, resid); #endif /* * Build result in CAM ccb. */ sym_set_cam_result_ok(cp, cmd, resid); #ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING /* * If max number of started ccbs had been reduced, * increase it if 200 good status received. */ if (lp && lp->started_max < lp->started_limit) { ++lp->num_sgood; if (lp->num_sgood >= 200) { lp->num_sgood = 0; ++lp->started_max; if (sym_verbose >= 2) { sym_print_addr(cmd, " queue depth is now %d\n", lp->started_max); } } } #endif /* * Free our CCB. */ sym_free_ccb (np, cp); #ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING /* * Requeue a couple of awaiting scsi commands. */ if (!sym_que_empty(&lp->waiting_ccbq)) sym_start_next_ccbs(np, lp, 2); #endif /* * Complete the command. */ sym_xpt_done(np, cmd); } /* * Soft-attach the controller. */ int sym_hcb_attach(struct Scsi_Host *shost, struct sym_fw *fw, struct sym_nvram *nvram) { struct sym_hcb *np = sym_get_hcb(shost); int i; /* * Get some info about the firmware. */ np->scripta_sz = fw->a_size; np->scriptb_sz = fw->b_size; np->scriptz_sz = fw->z_size; np->fw_setup = fw->setup; np->fw_patch = fw->patch; np->fw_name = fw->name; /* * Save setting of some IO registers, so we will * be able to probe specific implementations. */ sym_save_initial_setting (np); /* * Reset the chip now, since it has been reported * that SCSI clock calibration may not work properly * if the chip is currently active. */ sym_chip_reset(np); /* * Prepare controller and devices settings, according * to chip features, user set-up and driver set-up. */ sym_prepare_setting(shost, np, nvram); /* * Check the PCI clock frequency. * Must be performed after prepare_setting since it destroys * STEST1 that is used to probe for the clock doubler. */ i = sym_getpciclock(np); if (i > 37000 && !(np->features & FE_66MHZ)) printf("%s: PCI BUS clock seems too high: %u KHz.\n", sym_name(np), i); /* * Allocate the start queue. */ np->squeue = sym_calloc_dma(sizeof(u32)*(MAX_QUEUE*2),"SQUEUE"); if (!np->squeue) goto attach_failed; np->squeue_ba = vtobus(np->squeue); /* * Allocate the done queue. */ np->dqueue = sym_calloc_dma(sizeof(u32)*(MAX_QUEUE*2),"DQUEUE"); if (!np->dqueue) goto attach_failed; np->dqueue_ba = vtobus(np->dqueue); /* * Allocate the target bus address array. */ np->targtbl = sym_calloc_dma(256, "TARGTBL"); if (!np->targtbl) goto attach_failed; np->targtbl_ba = vtobus(np->targtbl); /* * Allocate SCRIPTS areas. */ np->scripta0 = sym_calloc_dma(np->scripta_sz, "SCRIPTA0"); np->scriptb0 = sym_calloc_dma(np->scriptb_sz, "SCRIPTB0"); np->scriptz0 = sym_calloc_dma(np->scriptz_sz, "SCRIPTZ0"); if (!np->scripta0 || !np->scriptb0 || !np->scriptz0) goto attach_failed; /* * Allocate the array of lists of CCBs hashed by DSA. */ np->ccbh = kcalloc(CCB_HASH_SIZE, sizeof(struct sym_ccb **), GFP_KERNEL); if (!np->ccbh) goto attach_failed; /* * Initialyze the CCB free and busy queues. */ sym_que_init(&np->free_ccbq); sym_que_init(&np->busy_ccbq); sym_que_init(&np->comp_ccbq); /* * Initialization for optional handling * of device queueing. */ #ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING sym_que_init(&np->dummy_ccbq); #endif /* * Allocate some CCB. We need at least ONE. */ if (!sym_alloc_ccb(np)) goto attach_failed; /* * Calculate BUS addresses where we are going * to load the SCRIPTS. */ np->scripta_ba = vtobus(np->scripta0); np->scriptb_ba = vtobus(np->scriptb0); np->scriptz_ba = vtobus(np->scriptz0); if (np->ram_ba) { np->scripta_ba = np->ram_ba; if (np->features & FE_RAM8K) { np->scriptb_ba = np->scripta_ba + 4096; #if 0 /* May get useful for 64 BIT PCI addressing */ np->scr_ram_seg = cpu_to_scr(np->scripta_ba >> 32); #endif } } /* * Copy scripts to controller instance. */ memcpy(np->scripta0, fw->a_base, np->scripta_sz); memcpy(np->scriptb0, fw->b_base, np->scriptb_sz); memcpy(np->scriptz0, fw->z_base, np->scriptz_sz); /* * Setup variable parts in scripts and compute * scripts bus addresses used from the C code. */ np->fw_setup(np, fw); /* * Bind SCRIPTS with physical addresses usable by the * SCRIPTS processor (as seen from the BUS = BUS addresses). */ sym_fw_bind_script(np, (u32 *) np->scripta0, np->scripta_sz); sym_fw_bind_script(np, (u32 *) np->scriptb0, np->scriptb_sz); sym_fw_bind_script(np, (u32 *) np->scriptz0, np->scriptz_sz); #ifdef SYM_CONF_IARB_SUPPORT /* * If user wants IARB to be set when we win arbitration * and have other jobs, compute the max number of consecutive * settings of IARB hints before we leave devices a chance to * arbitrate for reselection. */ #ifdef SYM_SETUP_IARB_MAX np->iarb_max = SYM_SETUP_IARB_MAX; #else np->iarb_max = 4; #endif #endif /* * Prepare the idle and invalid task actions. */ np->idletask.start = cpu_to_scr(SCRIPTA_BA(np, idle)); np->idletask.restart = cpu_to_scr(SCRIPTB_BA(np, bad_i_t_l)); np->idletask_ba = vtobus(&np->idletask); np->notask.start = cpu_to_scr(SCRIPTA_BA(np, idle)); np->notask.restart = cpu_to_scr(SCRIPTB_BA(np, bad_i_t_l)); np->notask_ba = vtobus(&np->notask); np->bad_itl.start = cpu_to_scr(SCRIPTA_BA(np, idle)); np->bad_itl.restart = cpu_to_scr(SCRIPTB_BA(np, bad_i_t_l)); np->bad_itl_ba = vtobus(&np->bad_itl); np->bad_itlq.start = cpu_to_scr(SCRIPTA_BA(np, idle)); np->bad_itlq.restart = cpu_to_scr(SCRIPTB_BA(np,bad_i_t_l_q)); np->bad_itlq_ba = vtobus(&np->bad_itlq); /* * Allocate and prepare the lun JUMP table that is used * for a target prior the probing of devices (bad lun table). * A private table will be allocated for the target on the * first INQUIRY response received. */ np->badluntbl = sym_calloc_dma(256, "BADLUNTBL"); if (!np->badluntbl) goto attach_failed; np->badlun_sa = cpu_to_scr(SCRIPTB_BA(np, resel_bad_lun)); for (i = 0 ; i < 64 ; i++) /* 64 luns/target, no less */ np->badluntbl[i] = cpu_to_scr(vtobus(&np->badlun_sa)); /* * Prepare the bus address array that contains the bus * address of each target control block. * For now, assume all logical units are wrong. :) */ for (i = 0 ; i < SYM_CONF_MAX_TARGET ; i++) { np->targtbl[i] = cpu_to_scr(vtobus(&np->target[i])); np->target[i].head.luntbl_sa = cpu_to_scr(vtobus(np->badluntbl)); np->target[i].head.lun0_sa = cpu_to_scr(vtobus(&np->badlun_sa)); } /* * Now check the cache handling of the pci chipset. */ if (sym_snooptest (np)) { printf("%s: CACHE INCORRECTLY CONFIGURED.\n", sym_name(np)); goto attach_failed; } /* * Sigh! we are done. */ return 0; attach_failed: return -ENXIO; } /* * Free everything that has been allocated for this device. */ void sym_hcb_free(struct sym_hcb *np) { SYM_QUEHEAD *qp; struct sym_ccb *cp; struct sym_tcb *tp; int target; if (np->scriptz0) sym_mfree_dma(np->scriptz0, np->scriptz_sz, "SCRIPTZ0"); if (np->scriptb0) sym_mfree_dma(np->scriptb0, np->scriptb_sz, "SCRIPTB0"); if (np->scripta0) sym_mfree_dma(np->scripta0, np->scripta_sz, "SCRIPTA0"); if (np->squeue) sym_mfree_dma(np->squeue, sizeof(u32)*(MAX_QUEUE*2), "SQUEUE"); if (np->dqueue) sym_mfree_dma(np->dqueue, sizeof(u32)*(MAX_QUEUE*2), "DQUEUE"); if (np->actccbs) { while ((qp = sym_remque_head(&np->free_ccbq)) != NULL) { cp = sym_que_entry(qp, struct sym_ccb, link_ccbq); sym_mfree_dma(cp, sizeof(*cp), "CCB"); } } kfree(np->ccbh); if (np->badluntbl) sym_mfree_dma(np->badluntbl, 256,"BADLUNTBL"); for (target = 0; target < SYM_CONF_MAX_TARGET ; target++) { tp = &np->target[target]; if (tp->luntbl) sym_mfree_dma(tp->luntbl, 256, "LUNTBL"); #if SYM_CONF_MAX_LUN > 1 kfree(tp->lunmp); #endif } if (np->targtbl) sym_mfree_dma(np->targtbl, 256, "TARGTBL"); }
gpl-2.0
sakuraba001/android_kernel_samsung_hlte
drivers/scsi/sym53c8xx_2/sym_hipd.c
8620
147387
/* * Device driver for the SYMBIOS/LSILOGIC 53C8XX and 53C1010 family * of PCI-SCSI IO processors. * * Copyright (C) 1999-2001 Gerard Roudier <groudier@free.fr> * Copyright (c) 2003-2005 Matthew Wilcox <matthew@wil.cx> * * This driver is derived from the Linux sym53c8xx driver. * Copyright (C) 1998-2000 Gerard Roudier * * The sym53c8xx driver is derived from the ncr53c8xx driver that had been * a port of the FreeBSD ncr driver to Linux-1.2.13. * * The original ncr driver has been written for 386bsd and FreeBSD by * Wolfgang Stanglmeier <wolf@cologne.de> * Stefan Esser <se@mi.Uni-Koeln.de> * Copyright (C) 1994 Wolfgang Stanglmeier * * Other major contributions: * * NVRAM detection and reading. * Copyright (C) 1997 Richard Waltham <dormouse@farsrobt.demon.co.uk> * *----------------------------------------------------------------------------- * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/slab.h> #include <asm/param.h> /* for timeouts in units of HZ */ #include "sym_glue.h" #include "sym_nvram.h" #if 0 #define SYM_DEBUG_GENERIC_SUPPORT #endif /* * Needed function prototypes. */ static void sym_int_ma (struct sym_hcb *np); static void sym_int_sir(struct sym_hcb *); static struct sym_ccb *sym_alloc_ccb(struct sym_hcb *np); static struct sym_ccb *sym_ccb_from_dsa(struct sym_hcb *np, u32 dsa); static void sym_alloc_lcb_tags (struct sym_hcb *np, u_char tn, u_char ln); static void sym_complete_error (struct sym_hcb *np, struct sym_ccb *cp); static void sym_complete_ok (struct sym_hcb *np, struct sym_ccb *cp); static int sym_compute_residual(struct sym_hcb *np, struct sym_ccb *cp); /* * Print a buffer in hexadecimal format with a ".\n" at end. */ static void sym_printl_hex(u_char *p, int n) { while (n-- > 0) printf (" %x", *p++); printf (".\n"); } static void sym_print_msg(struct sym_ccb *cp, char *label, u_char *msg) { sym_print_addr(cp->cmd, "%s: ", label); spi_print_msg(msg); printf("\n"); } static void sym_print_nego_msg(struct sym_hcb *np, int target, char *label, u_char *msg) { struct sym_tcb *tp = &np->target[target]; dev_info(&tp->starget->dev, "%s: ", label); spi_print_msg(msg); printf("\n"); } /* * Print something that tells about extended errors. */ void sym_print_xerr(struct scsi_cmnd *cmd, int x_status) { if (x_status & XE_PARITY_ERR) { sym_print_addr(cmd, "unrecovered SCSI parity error.\n"); } if (x_status & XE_EXTRA_DATA) { sym_print_addr(cmd, "extraneous data discarded.\n"); } if (x_status & XE_BAD_PHASE) { sym_print_addr(cmd, "illegal scsi phase (4/5).\n"); } if (x_status & XE_SODL_UNRUN) { sym_print_addr(cmd, "ODD transfer in DATA OUT phase.\n"); } if (x_status & XE_SWIDE_OVRUN) { sym_print_addr(cmd, "ODD transfer in DATA IN phase.\n"); } } /* * Return a string for SCSI BUS mode. */ static char *sym_scsi_bus_mode(int mode) { switch(mode) { case SMODE_HVD: return "HVD"; case SMODE_SE: return "SE"; case SMODE_LVD: return "LVD"; } return "??"; } /* * Soft reset the chip. * * Raising SRST when the chip is running may cause * problems on dual function chips (see below). * On the other hand, LVD devices need some delay * to settle and report actual BUS mode in STEST4. */ static void sym_chip_reset (struct sym_hcb *np) { OUTB(np, nc_istat, SRST); INB(np, nc_mbox1); udelay(10); OUTB(np, nc_istat, 0); INB(np, nc_mbox1); udelay(2000); /* For BUS MODE to settle */ } /* * Really soft reset the chip.:) * * Some 896 and 876 chip revisions may hang-up if we set * the SRST (soft reset) bit at the wrong time when SCRIPTS * are running. * So, we need to abort the current operation prior to * soft resetting the chip. */ static void sym_soft_reset (struct sym_hcb *np) { u_char istat = 0; int i; if (!(np->features & FE_ISTAT1) || !(INB(np, nc_istat1) & SCRUN)) goto do_chip_reset; OUTB(np, nc_istat, CABRT); for (i = 100000 ; i ; --i) { istat = INB(np, nc_istat); if (istat & SIP) { INW(np, nc_sist); } else if (istat & DIP) { if (INB(np, nc_dstat) & ABRT) break; } udelay(5); } OUTB(np, nc_istat, 0); if (!i) printf("%s: unable to abort current chip operation, " "ISTAT=0x%02x.\n", sym_name(np), istat); do_chip_reset: sym_chip_reset(np); } /* * Start reset process. * * The interrupt handler will reinitialize the chip. */ static void sym_start_reset(struct sym_hcb *np) { sym_reset_scsi_bus(np, 1); } int sym_reset_scsi_bus(struct sym_hcb *np, int enab_int) { u32 term; int retv = 0; sym_soft_reset(np); /* Soft reset the chip */ if (enab_int) OUTW(np, nc_sien, RST); /* * Enable Tolerant, reset IRQD if present and * properly set IRQ mode, prior to resetting the bus. */ OUTB(np, nc_stest3, TE); OUTB(np, nc_dcntl, (np->rv_dcntl & IRQM)); OUTB(np, nc_scntl1, CRST); INB(np, nc_mbox1); udelay(200); if (!SYM_SETUP_SCSI_BUS_CHECK) goto out; /* * Check for no terminators or SCSI bus shorts to ground. * Read SCSI data bus, data parity bits and control signals. * We are expecting RESET to be TRUE and other signals to be * FALSE. */ term = INB(np, nc_sstat0); term = ((term & 2) << 7) + ((term & 1) << 17); /* rst sdp0 */ term |= ((INB(np, nc_sstat2) & 0x01) << 26) | /* sdp1 */ ((INW(np, nc_sbdl) & 0xff) << 9) | /* d7-0 */ ((INW(np, nc_sbdl) & 0xff00) << 10) | /* d15-8 */ INB(np, nc_sbcl); /* req ack bsy sel atn msg cd io */ if (!np->maxwide) term &= 0x3ffff; if (term != (2<<7)) { printf("%s: suspicious SCSI data while resetting the BUS.\n", sym_name(np)); printf("%s: %sdp0,d7-0,rst,req,ack,bsy,sel,atn,msg,c/d,i/o = " "0x%lx, expecting 0x%lx\n", sym_name(np), (np->features & FE_WIDE) ? "dp1,d15-8," : "", (u_long)term, (u_long)(2<<7)); if (SYM_SETUP_SCSI_BUS_CHECK == 1) retv = 1; } out: OUTB(np, nc_scntl1, 0); return retv; } /* * Select SCSI clock frequency */ static void sym_selectclock(struct sym_hcb *np, u_char scntl3) { /* * If multiplier not present or not selected, leave here. */ if (np->multiplier <= 1) { OUTB(np, nc_scntl3, scntl3); return; } if (sym_verbose >= 2) printf ("%s: enabling clock multiplier\n", sym_name(np)); OUTB(np, nc_stest1, DBLEN); /* Enable clock multiplier */ /* * Wait for the LCKFRQ bit to be set if supported by the chip. * Otherwise wait 50 micro-seconds (at least). */ if (np->features & FE_LCKFRQ) { int i = 20; while (!(INB(np, nc_stest4) & LCKFRQ) && --i > 0) udelay(20); if (!i) printf("%s: the chip cannot lock the frequency\n", sym_name(np)); } else { INB(np, nc_mbox1); udelay(50+10); } OUTB(np, nc_stest3, HSC); /* Halt the scsi clock */ OUTB(np, nc_scntl3, scntl3); OUTB(np, nc_stest1, (DBLEN|DBLSEL));/* Select clock multiplier */ OUTB(np, nc_stest3, 0x00); /* Restart scsi clock */ } /* * Determine the chip's clock frequency. * * This is essential for the negotiation of the synchronous * transfer rate. * * Note: we have to return the correct value. * THERE IS NO SAFE DEFAULT VALUE. * * Most NCR/SYMBIOS boards are delivered with a 40 Mhz clock. * 53C860 and 53C875 rev. 1 support fast20 transfers but * do not have a clock doubler and so are provided with a * 80 MHz clock. All other fast20 boards incorporate a doubler * and so should be delivered with a 40 MHz clock. * The recent fast40 chips (895/896/895A/1010) use a 40 Mhz base * clock and provide a clock quadrupler (160 Mhz). */ /* * calculate SCSI clock frequency (in KHz) */ static unsigned getfreq (struct sym_hcb *np, int gen) { unsigned int ms = 0; unsigned int f; /* * Measure GEN timer delay in order * to calculate SCSI clock frequency * * This code will never execute too * many loop iterations (if DELAY is * reasonably correct). It could get * too low a delay (too high a freq.) * if the CPU is slow executing the * loop for some reason (an NMI, for * example). For this reason we will * if multiple measurements are to be * performed trust the higher delay * (lower frequency returned). */ OUTW(np, nc_sien, 0); /* mask all scsi interrupts */ INW(np, nc_sist); /* clear pending scsi interrupt */ OUTB(np, nc_dien, 0); /* mask all dma interrupts */ INW(np, nc_sist); /* another one, just to be sure :) */ /* * The C1010-33 core does not report GEN in SIST, * if this interrupt is masked in SIEN. * I don't know yet if the C1010-66 behaves the same way. */ if (np->features & FE_C10) { OUTW(np, nc_sien, GEN); OUTB(np, nc_istat1, SIRQD); } OUTB(np, nc_scntl3, 4); /* set pre-scaler to divide by 3 */ OUTB(np, nc_stime1, 0); /* disable general purpose timer */ OUTB(np, nc_stime1, gen); /* set to nominal delay of 1<<gen * 125us */ while (!(INW(np, nc_sist) & GEN) && ms++ < 100000) udelay(1000/4); /* count in 1/4 of ms */ OUTB(np, nc_stime1, 0); /* disable general purpose timer */ /* * Undo C1010-33 specific settings. */ if (np->features & FE_C10) { OUTW(np, nc_sien, 0); OUTB(np, nc_istat1, 0); } /* * set prescaler to divide by whatever 0 means * 0 ought to choose divide by 2, but appears * to set divide by 3.5 mode in my 53c810 ... */ OUTB(np, nc_scntl3, 0); /* * adjust for prescaler, and convert into KHz */ f = ms ? ((1 << gen) * (4340*4)) / ms : 0; /* * The C1010-33 result is biased by a factor * of 2/3 compared to earlier chips. */ if (np->features & FE_C10) f = (f * 2) / 3; if (sym_verbose >= 2) printf ("%s: Delay (GEN=%d): %u msec, %u KHz\n", sym_name(np), gen, ms/4, f); return f; } static unsigned sym_getfreq (struct sym_hcb *np) { u_int f1, f2; int gen = 8; getfreq (np, gen); /* throw away first result */ f1 = getfreq (np, gen); f2 = getfreq (np, gen); if (f1 > f2) f1 = f2; /* trust lower result */ return f1; } /* * Get/probe chip SCSI clock frequency */ static void sym_getclock (struct sym_hcb *np, int mult) { unsigned char scntl3 = np->sv_scntl3; unsigned char stest1 = np->sv_stest1; unsigned f1; np->multiplier = 1; f1 = 40000; /* * True with 875/895/896/895A with clock multiplier selected */ if (mult > 1 && (stest1 & (DBLEN+DBLSEL)) == DBLEN+DBLSEL) { if (sym_verbose >= 2) printf ("%s: clock multiplier found\n", sym_name(np)); np->multiplier = mult; } /* * If multiplier not found or scntl3 not 7,5,3, * reset chip and get frequency from general purpose timer. * Otherwise trust scntl3 BIOS setting. */ if (np->multiplier != mult || (scntl3 & 7) < 3 || !(scntl3 & 1)) { OUTB(np, nc_stest1, 0); /* make sure doubler is OFF */ f1 = sym_getfreq (np); if (sym_verbose) printf ("%s: chip clock is %uKHz\n", sym_name(np), f1); if (f1 < 45000) f1 = 40000; else if (f1 < 55000) f1 = 50000; else f1 = 80000; if (f1 < 80000 && mult > 1) { if (sym_verbose >= 2) printf ("%s: clock multiplier assumed\n", sym_name(np)); np->multiplier = mult; } } else { if ((scntl3 & 7) == 3) f1 = 40000; else if ((scntl3 & 7) == 5) f1 = 80000; else f1 = 160000; f1 /= np->multiplier; } /* * Compute controller synchronous parameters. */ f1 *= np->multiplier; np->clock_khz = f1; } /* * Get/probe PCI clock frequency */ static int sym_getpciclock (struct sym_hcb *np) { int f = 0; /* * For now, we only need to know about the actual * PCI BUS clock frequency for C1010-66 chips. */ #if 1 if (np->features & FE_66MHZ) { #else if (1) { #endif OUTB(np, nc_stest1, SCLK); /* Use the PCI clock as SCSI clock */ f = sym_getfreq(np); OUTB(np, nc_stest1, 0); } np->pciclk_khz = f; return f; } /* * SYMBIOS chip clock divisor table. * * Divisors are multiplied by 10,000,000 in order to make * calculations more simple. */ #define _5M 5000000 static const u32 div_10M[] = {2*_5M, 3*_5M, 4*_5M, 6*_5M, 8*_5M, 12*_5M, 16*_5M}; /* * Get clock factor and sync divisor for a given * synchronous factor period. */ static int sym_getsync(struct sym_hcb *np, u_char dt, u_char sfac, u_char *divp, u_char *fakp) { u32 clk = np->clock_khz; /* SCSI clock frequency in kHz */ int div = np->clock_divn; /* Number of divisors supported */ u32 fak; /* Sync factor in sxfer */ u32 per; /* Period in tenths of ns */ u32 kpc; /* (per * clk) */ int ret; /* * Compute the synchronous period in tenths of nano-seconds */ if (dt && sfac <= 9) per = 125; else if (sfac <= 10) per = 250; else if (sfac == 11) per = 303; else if (sfac == 12) per = 500; else per = 40 * sfac; ret = per; kpc = per * clk; if (dt) kpc <<= 1; /* * For earliest C10 revision 0, we cannot use extra * clocks for the setting of the SCSI clocking. * Note that this limits the lowest sync data transfer * to 5 Mega-transfers per second and may result in * using higher clock divisors. */ #if 1 if ((np->features & (FE_C10|FE_U3EN)) == FE_C10) { /* * Look for the lowest clock divisor that allows an * output speed not faster than the period. */ while (div > 0) { --div; if (kpc > (div_10M[div] << 2)) { ++div; break; } } fak = 0; /* No extra clocks */ if (div == np->clock_divn) { /* Are we too fast ? */ ret = -1; } *divp = div; *fakp = fak; return ret; } #endif /* * Look for the greatest clock divisor that allows an * input speed faster than the period. */ while (div-- > 0) if (kpc >= (div_10M[div] << 2)) break; /* * Calculate the lowest clock factor that allows an output * speed not faster than the period, and the max output speed. * If fak >= 1 we will set both XCLKH_ST and XCLKH_DT. * If fak >= 2 we will also set XCLKS_ST and XCLKS_DT. */ if (dt) { fak = (kpc - 1) / (div_10M[div] << 1) + 1 - 2; /* ret = ((2+fak)*div_10M[div])/np->clock_khz; */ } else { fak = (kpc - 1) / div_10M[div] + 1 - 4; /* ret = ((4+fak)*div_10M[div])/np->clock_khz; */ } /* * Check against our hardware limits, or bugs :). */ if (fak > 2) { fak = 2; ret = -1; } /* * Compute and return sync parameters. */ *divp = div; *fakp = fak; return ret; } /* * SYMBIOS chips allow burst lengths of 2, 4, 8, 16, 32, 64, * 128 transfers. All chips support at least 16 transfers * bursts. The 825A, 875 and 895 chips support bursts of up * to 128 transfers and the 895A and 896 support bursts of up * to 64 transfers. All other chips support up to 16 * transfers bursts. * * For PCI 32 bit data transfers each transfer is a DWORD. * It is a QUADWORD (8 bytes) for PCI 64 bit data transfers. * * We use log base 2 (burst length) as internal code, with * value 0 meaning "burst disabled". */ /* * Burst length from burst code. */ #define burst_length(bc) (!(bc))? 0 : 1 << (bc) /* * Burst code from io register bits. */ #define burst_code(dmode, ctest4, ctest5) \ (ctest4) & 0x80? 0 : (((dmode) & 0xc0) >> 6) + ((ctest5) & 0x04) + 1 /* * Set initial io register bits from burst code. */ static inline void sym_init_burst(struct sym_hcb *np, u_char bc) { np->rv_ctest4 &= ~0x80; np->rv_dmode &= ~(0x3 << 6); np->rv_ctest5 &= ~0x4; if (!bc) { np->rv_ctest4 |= 0x80; } else { --bc; np->rv_dmode |= ((bc & 0x3) << 6); np->rv_ctest5 |= (bc & 0x4); } } /* * Save initial settings of some IO registers. * Assumed to have been set by BIOS. * We cannot reset the chip prior to reading the * IO registers, since informations will be lost. * Since the SCRIPTS processor may be running, this * is not safe on paper, but it seems to work quite * well. :) */ static void sym_save_initial_setting (struct sym_hcb *np) { np->sv_scntl0 = INB(np, nc_scntl0) & 0x0a; np->sv_scntl3 = INB(np, nc_scntl3) & 0x07; np->sv_dmode = INB(np, nc_dmode) & 0xce; np->sv_dcntl = INB(np, nc_dcntl) & 0xa8; np->sv_ctest3 = INB(np, nc_ctest3) & 0x01; np->sv_ctest4 = INB(np, nc_ctest4) & 0x80; np->sv_gpcntl = INB(np, nc_gpcntl); np->sv_stest1 = INB(np, nc_stest1); np->sv_stest2 = INB(np, nc_stest2) & 0x20; np->sv_stest4 = INB(np, nc_stest4); if (np->features & FE_C10) { /* Always large DMA fifo + ultra3 */ np->sv_scntl4 = INB(np, nc_scntl4); np->sv_ctest5 = INB(np, nc_ctest5) & 0x04; } else np->sv_ctest5 = INB(np, nc_ctest5) & 0x24; } /* * Set SCSI BUS mode. * - LVD capable chips (895/895A/896/1010) report the current BUS mode * through the STEST4 IO register. * - For previous generation chips (825/825A/875), the user has to tell us * how to check against HVD, since a 100% safe algorithm is not possible. */ static void sym_set_bus_mode(struct sym_hcb *np, struct sym_nvram *nvram) { if (np->scsi_mode) return; np->scsi_mode = SMODE_SE; if (np->features & (FE_ULTRA2|FE_ULTRA3)) np->scsi_mode = (np->sv_stest4 & SMODE); else if (np->features & FE_DIFF) { if (SYM_SETUP_SCSI_DIFF == 1) { if (np->sv_scntl3) { if (np->sv_stest2 & 0x20) np->scsi_mode = SMODE_HVD; } else if (nvram->type == SYM_SYMBIOS_NVRAM) { if (!(INB(np, nc_gpreg) & 0x08)) np->scsi_mode = SMODE_HVD; } } else if (SYM_SETUP_SCSI_DIFF == 2) np->scsi_mode = SMODE_HVD; } if (np->scsi_mode == SMODE_HVD) np->rv_stest2 |= 0x20; } /* * Prepare io register values used by sym_start_up() * according to selected and supported features. */ static int sym_prepare_setting(struct Scsi_Host *shost, struct sym_hcb *np, struct sym_nvram *nvram) { struct sym_data *sym_data = shost_priv(shost); struct pci_dev *pdev = sym_data->pdev; u_char burst_max; u32 period; int i; np->maxwide = (np->features & FE_WIDE) ? 1 : 0; /* * Guess the frequency of the chip's clock. */ if (np->features & (FE_ULTRA3 | FE_ULTRA2)) np->clock_khz = 160000; else if (np->features & FE_ULTRA) np->clock_khz = 80000; else np->clock_khz = 40000; /* * Get the clock multiplier factor. */ if (np->features & FE_QUAD) np->multiplier = 4; else if (np->features & FE_DBLR) np->multiplier = 2; else np->multiplier = 1; /* * Measure SCSI clock frequency for chips * it may vary from assumed one. */ if (np->features & FE_VARCLK) sym_getclock(np, np->multiplier); /* * Divisor to be used for async (timer pre-scaler). */ i = np->clock_divn - 1; while (--i >= 0) { if (10ul * SYM_CONF_MIN_ASYNC * np->clock_khz > div_10M[i]) { ++i; break; } } np->rv_scntl3 = i+1; /* * The C1010 uses hardwired divisors for async. * So, we just throw away, the async. divisor.:-) */ if (np->features & FE_C10) np->rv_scntl3 = 0; /* * Minimum synchronous period factor supported by the chip. * Btw, 'period' is in tenths of nanoseconds. */ period = (4 * div_10M[0] + np->clock_khz - 1) / np->clock_khz; if (period <= 250) np->minsync = 10; else if (period <= 303) np->minsync = 11; else if (period <= 500) np->minsync = 12; else np->minsync = (period + 40 - 1) / 40; /* * Check against chip SCSI standard support (SCSI-2,ULTRA,ULTRA2). */ if (np->minsync < 25 && !(np->features & (FE_ULTRA|FE_ULTRA2|FE_ULTRA3))) np->minsync = 25; else if (np->minsync < 12 && !(np->features & (FE_ULTRA2|FE_ULTRA3))) np->minsync = 12; /* * Maximum synchronous period factor supported by the chip. */ period = (11 * div_10M[np->clock_divn - 1]) / (4 * np->clock_khz); np->maxsync = period > 2540 ? 254 : period / 10; /* * If chip is a C1010, guess the sync limits in DT mode. */ if ((np->features & (FE_C10|FE_ULTRA3)) == (FE_C10|FE_ULTRA3)) { if (np->clock_khz == 160000) { np->minsync_dt = 9; np->maxsync_dt = 50; np->maxoffs_dt = nvram->type ? 62 : 31; } } /* * 64 bit addressing (895A/896/1010) ? */ if (np->features & FE_DAC) { if (!use_dac(np)) np->rv_ccntl1 |= (DDAC); else if (SYM_CONF_DMA_ADDRESSING_MODE == 1) np->rv_ccntl1 |= (XTIMOD | EXTIBMV); else if (SYM_CONF_DMA_ADDRESSING_MODE == 2) np->rv_ccntl1 |= (0 | EXTIBMV); } /* * Phase mismatch handled by SCRIPTS (895A/896/1010) ? */ if (np->features & FE_NOPM) np->rv_ccntl0 |= (ENPMJ); /* * C1010-33 Errata: Part Number:609-039638 (rev. 1) is fixed. * In dual channel mode, contention occurs if internal cycles * are used. Disable internal cycles. */ if (pdev->device == PCI_DEVICE_ID_LSI_53C1010_33 && pdev->revision < 0x1) np->rv_ccntl0 |= DILS; /* * Select burst length (dwords) */ burst_max = SYM_SETUP_BURST_ORDER; if (burst_max == 255) burst_max = burst_code(np->sv_dmode, np->sv_ctest4, np->sv_ctest5); if (burst_max > 7) burst_max = 7; if (burst_max > np->maxburst) burst_max = np->maxburst; /* * DEL 352 - 53C810 Rev x11 - Part Number 609-0392140 - ITEM 2. * This chip and the 860 Rev 1 may wrongly use PCI cache line * based transactions on LOAD/STORE instructions. So we have * to prevent these chips from using such PCI transactions in * this driver. The generic ncr driver that does not use * LOAD/STORE instructions does not need this work-around. */ if ((pdev->device == PCI_DEVICE_ID_NCR_53C810 && pdev->revision >= 0x10 && pdev->revision <= 0x11) || (pdev->device == PCI_DEVICE_ID_NCR_53C860 && pdev->revision <= 0x1)) np->features &= ~(FE_WRIE|FE_ERL|FE_ERMP); /* * Select all supported special features. * If we are using on-board RAM for scripts, prefetch (PFEN) * does not help, but burst op fetch (BOF) does. * Disabling PFEN makes sure BOF will be used. */ if (np->features & FE_ERL) np->rv_dmode |= ERL; /* Enable Read Line */ if (np->features & FE_BOF) np->rv_dmode |= BOF; /* Burst Opcode Fetch */ if (np->features & FE_ERMP) np->rv_dmode |= ERMP; /* Enable Read Multiple */ #if 1 if ((np->features & FE_PFEN) && !np->ram_ba) #else if (np->features & FE_PFEN) #endif np->rv_dcntl |= PFEN; /* Prefetch Enable */ if (np->features & FE_CLSE) np->rv_dcntl |= CLSE; /* Cache Line Size Enable */ if (np->features & FE_WRIE) np->rv_ctest3 |= WRIE; /* Write and Invalidate */ if (np->features & FE_DFS) np->rv_ctest5 |= DFS; /* Dma Fifo Size */ /* * Select some other */ np->rv_ctest4 |= MPEE; /* Master parity checking */ np->rv_scntl0 |= 0x0a; /* full arb., ena parity, par->ATN */ /* * Get parity checking, host ID and verbose mode from NVRAM */ np->myaddr = 255; np->scsi_mode = 0; sym_nvram_setup_host(shost, np, nvram); /* * Get SCSI addr of host adapter (set by bios?). */ if (np->myaddr == 255) { np->myaddr = INB(np, nc_scid) & 0x07; if (!np->myaddr) np->myaddr = SYM_SETUP_HOST_ID; } /* * Prepare initial io register bits for burst length */ sym_init_burst(np, burst_max); sym_set_bus_mode(np, nvram); /* * Set LED support from SCRIPTS. * Ignore this feature for boards known to use a * specific GPIO wiring and for the 895A, 896 * and 1010 that drive the LED directly. */ if ((SYM_SETUP_SCSI_LED || (nvram->type == SYM_SYMBIOS_NVRAM || (nvram->type == SYM_TEKRAM_NVRAM && pdev->device == PCI_DEVICE_ID_NCR_53C895))) && !(np->features & FE_LEDC) && !(np->sv_gpcntl & 0x01)) np->features |= FE_LED0; /* * Set irq mode. */ switch(SYM_SETUP_IRQ_MODE & 3) { case 2: np->rv_dcntl |= IRQM; break; case 1: np->rv_dcntl |= (np->sv_dcntl & IRQM); break; default: break; } /* * Configure targets according to driver setup. * If NVRAM present get targets setup from NVRAM. */ for (i = 0 ; i < SYM_CONF_MAX_TARGET ; i++) { struct sym_tcb *tp = &np->target[i]; tp->usrflags |= (SYM_DISC_ENABLED | SYM_TAGS_ENABLED); tp->usrtags = SYM_SETUP_MAX_TAG; tp->usr_width = np->maxwide; tp->usr_period = 9; sym_nvram_setup_target(tp, i, nvram); if (!tp->usrtags) tp->usrflags &= ~SYM_TAGS_ENABLED; } /* * Let user know about the settings. */ printf("%s: %s, ID %d, Fast-%d, %s, %s\n", sym_name(np), sym_nvram_type(nvram), np->myaddr, (np->features & FE_ULTRA3) ? 80 : (np->features & FE_ULTRA2) ? 40 : (np->features & FE_ULTRA) ? 20 : 10, sym_scsi_bus_mode(np->scsi_mode), (np->rv_scntl0 & 0xa) ? "parity checking" : "NO parity"); /* * Tell him more on demand. */ if (sym_verbose) { printf("%s: %s IRQ line driver%s\n", sym_name(np), np->rv_dcntl & IRQM ? "totem pole" : "open drain", np->ram_ba ? ", using on-chip SRAM" : ""); printf("%s: using %s firmware.\n", sym_name(np), np->fw_name); if (np->features & FE_NOPM) printf("%s: handling phase mismatch from SCRIPTS.\n", sym_name(np)); } /* * And still more. */ if (sym_verbose >= 2) { printf ("%s: initial SCNTL3/DMODE/DCNTL/CTEST3/4/5 = " "(hex) %02x/%02x/%02x/%02x/%02x/%02x\n", sym_name(np), np->sv_scntl3, np->sv_dmode, np->sv_dcntl, np->sv_ctest3, np->sv_ctest4, np->sv_ctest5); printf ("%s: final SCNTL3/DMODE/DCNTL/CTEST3/4/5 = " "(hex) %02x/%02x/%02x/%02x/%02x/%02x\n", sym_name(np), np->rv_scntl3, np->rv_dmode, np->rv_dcntl, np->rv_ctest3, np->rv_ctest4, np->rv_ctest5); } return 0; } /* * Test the pci bus snoop logic :-( * * Has to be called with interrupts disabled. */ #ifdef CONFIG_SCSI_SYM53C8XX_MMIO static int sym_regtest(struct sym_hcb *np) { register volatile u32 data; /* * chip registers may NOT be cached. * write 0xffffffff to a read only register area, * and try to read it back. */ data = 0xffffffff; OUTL(np, nc_dstat, data); data = INL(np, nc_dstat); #if 1 if (data == 0xffffffff) { #else if ((data & 0xe2f0fffd) != 0x02000080) { #endif printf ("CACHE TEST FAILED: reg dstat-sstat2 readback %x.\n", (unsigned) data); return 0x10; } return 0; } #else static inline int sym_regtest(struct sym_hcb *np) { return 0; } #endif static int sym_snooptest(struct sym_hcb *np) { u32 sym_rd, sym_wr, sym_bk, host_rd, host_wr, pc, dstat; int i, err; err = sym_regtest(np); if (err) return err; restart_test: /* * Enable Master Parity Checking as we intend * to enable it for normal operations. */ OUTB(np, nc_ctest4, (np->rv_ctest4 & MPEE)); /* * init */ pc = SCRIPTZ_BA(np, snooptest); host_wr = 1; sym_wr = 2; /* * Set memory and register. */ np->scratch = cpu_to_scr(host_wr); OUTL(np, nc_temp, sym_wr); /* * Start script (exchange values) */ OUTL(np, nc_dsa, np->hcb_ba); OUTL_DSP(np, pc); /* * Wait 'til done (with timeout) */ for (i=0; i<SYM_SNOOP_TIMEOUT; i++) if (INB(np, nc_istat) & (INTF|SIP|DIP)) break; if (i>=SYM_SNOOP_TIMEOUT) { printf ("CACHE TEST FAILED: timeout.\n"); return (0x20); } /* * Check for fatal DMA errors. */ dstat = INB(np, nc_dstat); #if 1 /* Band aiding for broken hardwares that fail PCI parity */ if ((dstat & MDPE) && (np->rv_ctest4 & MPEE)) { printf ("%s: PCI DATA PARITY ERROR DETECTED - " "DISABLING MASTER DATA PARITY CHECKING.\n", sym_name(np)); np->rv_ctest4 &= ~MPEE; goto restart_test; } #endif if (dstat & (MDPE|BF|IID)) { printf ("CACHE TEST FAILED: DMA error (dstat=0x%02x).", dstat); return (0x80); } /* * Save termination position. */ pc = INL(np, nc_dsp); /* * Read memory and register. */ host_rd = scr_to_cpu(np->scratch); sym_rd = INL(np, nc_scratcha); sym_bk = INL(np, nc_temp); /* * Check termination position. */ if (pc != SCRIPTZ_BA(np, snoopend)+8) { printf ("CACHE TEST FAILED: script execution failed.\n"); printf ("start=%08lx, pc=%08lx, end=%08lx\n", (u_long) SCRIPTZ_BA(np, snooptest), (u_long) pc, (u_long) SCRIPTZ_BA(np, snoopend) +8); return (0x40); } /* * Show results. */ if (host_wr != sym_rd) { printf ("CACHE TEST FAILED: host wrote %d, chip read %d.\n", (int) host_wr, (int) sym_rd); err |= 1; } if (host_rd != sym_wr) { printf ("CACHE TEST FAILED: chip wrote %d, host read %d.\n", (int) sym_wr, (int) host_rd); err |= 2; } if (sym_bk != sym_wr) { printf ("CACHE TEST FAILED: chip wrote %d, read back %d.\n", (int) sym_wr, (int) sym_bk); err |= 4; } return err; } /* * log message for real hard errors * * sym0 targ 0?: ERROR (ds:si) (so-si-sd) (sx/s3/s4) @ name (dsp:dbc). * reg: r0 r1 r2 r3 r4 r5 r6 ..... rf. * * exception register: * ds: dstat * si: sist * * SCSI bus lines: * so: control lines as driven by chip. * si: control lines as seen by chip. * sd: scsi data lines as seen by chip. * * wide/fastmode: * sx: sxfer (see the manual) * s3: scntl3 (see the manual) * s4: scntl4 (see the manual) * * current script command: * dsp: script address (relative to start of script). * dbc: first word of script command. * * First 24 register of the chip: * r0..rf */ static void sym_log_hard_error(struct Scsi_Host *shost, u_short sist, u_char dstat) { struct sym_hcb *np = sym_get_hcb(shost); u32 dsp; int script_ofs; int script_size; char *script_name; u_char *script_base; int i; dsp = INL(np, nc_dsp); if (dsp > np->scripta_ba && dsp <= np->scripta_ba + np->scripta_sz) { script_ofs = dsp - np->scripta_ba; script_size = np->scripta_sz; script_base = (u_char *) np->scripta0; script_name = "scripta"; } else if (np->scriptb_ba < dsp && dsp <= np->scriptb_ba + np->scriptb_sz) { script_ofs = dsp - np->scriptb_ba; script_size = np->scriptb_sz; script_base = (u_char *) np->scriptb0; script_name = "scriptb"; } else { script_ofs = dsp; script_size = 0; script_base = NULL; script_name = "mem"; } printf ("%s:%d: ERROR (%x:%x) (%x-%x-%x) (%x/%x/%x) @ (%s %x:%08x).\n", sym_name(np), (unsigned)INB(np, nc_sdid)&0x0f, dstat, sist, (unsigned)INB(np, nc_socl), (unsigned)INB(np, nc_sbcl), (unsigned)INB(np, nc_sbdl), (unsigned)INB(np, nc_sxfer), (unsigned)INB(np, nc_scntl3), (np->features & FE_C10) ? (unsigned)INB(np, nc_scntl4) : 0, script_name, script_ofs, (unsigned)INL(np, nc_dbc)); if (((script_ofs & 3) == 0) && (unsigned)script_ofs < script_size) { printf ("%s: script cmd = %08x\n", sym_name(np), scr_to_cpu((int) *(u32 *)(script_base + script_ofs))); } printf("%s: regdump:", sym_name(np)); for (i = 0; i < 24; i++) printf(" %02x", (unsigned)INB_OFF(np, i)); printf(".\n"); /* * PCI BUS error. */ if (dstat & (MDPE|BF)) sym_log_bus_error(shost); } void sym_dump_registers(struct Scsi_Host *shost) { struct sym_hcb *np = sym_get_hcb(shost); u_short sist; u_char dstat; sist = INW(np, nc_sist); dstat = INB(np, nc_dstat); sym_log_hard_error(shost, sist, dstat); } static struct sym_chip sym_dev_table[] = { {PCI_DEVICE_ID_NCR_53C810, 0x0f, "810", 4, 8, 4, 64, FE_ERL} , #ifdef SYM_DEBUG_GENERIC_SUPPORT {PCI_DEVICE_ID_NCR_53C810, 0xff, "810a", 4, 8, 4, 1, FE_BOF} , #else {PCI_DEVICE_ID_NCR_53C810, 0xff, "810a", 4, 8, 4, 1, FE_CACHE_SET|FE_LDSTR|FE_PFEN|FE_BOF} , #endif {PCI_DEVICE_ID_NCR_53C815, 0xff, "815", 4, 8, 4, 64, FE_BOF|FE_ERL} , {PCI_DEVICE_ID_NCR_53C825, 0x0f, "825", 6, 8, 4, 64, FE_WIDE|FE_BOF|FE_ERL|FE_DIFF} , {PCI_DEVICE_ID_NCR_53C825, 0xff, "825a", 6, 8, 4, 2, FE_WIDE|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN|FE_RAM|FE_DIFF} , {PCI_DEVICE_ID_NCR_53C860, 0xff, "860", 4, 8, 5, 1, FE_ULTRA|FE_CACHE_SET|FE_BOF|FE_LDSTR|FE_PFEN} , {PCI_DEVICE_ID_NCR_53C875, 0x01, "875", 6, 16, 5, 2, FE_WIDE|FE_ULTRA|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| FE_RAM|FE_DIFF|FE_VARCLK} , {PCI_DEVICE_ID_NCR_53C875, 0xff, "875", 6, 16, 5, 2, FE_WIDE|FE_ULTRA|FE_DBLR|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| FE_RAM|FE_DIFF|FE_VARCLK} , {PCI_DEVICE_ID_NCR_53C875J, 0xff, "875J", 6, 16, 5, 2, FE_WIDE|FE_ULTRA|FE_DBLR|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| FE_RAM|FE_DIFF|FE_VARCLK} , {PCI_DEVICE_ID_NCR_53C885, 0xff, "885", 6, 16, 5, 2, FE_WIDE|FE_ULTRA|FE_DBLR|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| FE_RAM|FE_DIFF|FE_VARCLK} , #ifdef SYM_DEBUG_GENERIC_SUPPORT {PCI_DEVICE_ID_NCR_53C895, 0xff, "895", 6, 31, 7, 2, FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS| FE_RAM|FE_LCKFRQ} , #else {PCI_DEVICE_ID_NCR_53C895, 0xff, "895", 6, 31, 7, 2, FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| FE_RAM|FE_LCKFRQ} , #endif {PCI_DEVICE_ID_NCR_53C896, 0xff, "896", 6, 31, 7, 4, FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| FE_RAM|FE_RAM8K|FE_64BIT|FE_DAC|FE_IO256|FE_NOPM|FE_LEDC|FE_LCKFRQ} , {PCI_DEVICE_ID_LSI_53C895A, 0xff, "895a", 6, 31, 7, 4, FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| FE_RAM|FE_RAM8K|FE_DAC|FE_IO256|FE_NOPM|FE_LEDC|FE_LCKFRQ} , {PCI_DEVICE_ID_LSI_53C875A, 0xff, "875a", 6, 31, 7, 4, FE_WIDE|FE_ULTRA|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| FE_RAM|FE_DAC|FE_IO256|FE_NOPM|FE_LEDC|FE_LCKFRQ} , {PCI_DEVICE_ID_LSI_53C1010_33, 0x00, "1010-33", 6, 31, 7, 8, FE_WIDE|FE_ULTRA3|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFBC|FE_LDSTR|FE_PFEN| FE_RAM|FE_RAM8K|FE_64BIT|FE_DAC|FE_IO256|FE_NOPM|FE_LEDC|FE_CRC| FE_C10} , {PCI_DEVICE_ID_LSI_53C1010_33, 0xff, "1010-33", 6, 31, 7, 8, FE_WIDE|FE_ULTRA3|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFBC|FE_LDSTR|FE_PFEN| FE_RAM|FE_RAM8K|FE_64BIT|FE_DAC|FE_IO256|FE_NOPM|FE_LEDC|FE_CRC| FE_C10|FE_U3EN} , {PCI_DEVICE_ID_LSI_53C1010_66, 0xff, "1010-66", 6, 31, 7, 8, FE_WIDE|FE_ULTRA3|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFBC|FE_LDSTR|FE_PFEN| FE_RAM|FE_RAM8K|FE_64BIT|FE_DAC|FE_IO256|FE_NOPM|FE_LEDC|FE_66MHZ|FE_CRC| FE_C10|FE_U3EN} , {PCI_DEVICE_ID_LSI_53C1510, 0xff, "1510d", 6, 31, 7, 4, FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| FE_RAM|FE_IO256|FE_LEDC} }; #define sym_num_devs (ARRAY_SIZE(sym_dev_table)) /* * Look up the chip table. * * Return a pointer to the chip entry if found, * zero otherwise. */ struct sym_chip * sym_lookup_chip_table (u_short device_id, u_char revision) { struct sym_chip *chip; int i; for (i = 0; i < sym_num_devs; i++) { chip = &sym_dev_table[i]; if (device_id != chip->device_id) continue; if (revision > chip->revision_id) continue; return chip; } return NULL; } #if SYM_CONF_DMA_ADDRESSING_MODE == 2 /* * Lookup the 64 bit DMA segments map. * This is only used if the direct mapping * has been unsuccessful. */ int sym_lookup_dmap(struct sym_hcb *np, u32 h, int s) { int i; if (!use_dac(np)) goto weird; /* Look up existing mappings */ for (i = SYM_DMAP_SIZE-1; i > 0; i--) { if (h == np->dmap_bah[i]) return i; } /* If direct mapping is free, get it */ if (!np->dmap_bah[s]) goto new; /* Collision -> lookup free mappings */ for (s = SYM_DMAP_SIZE-1; s > 0; s--) { if (!np->dmap_bah[s]) goto new; } weird: panic("sym: ran out of 64 bit DMA segment registers"); return -1; new: np->dmap_bah[s] = h; np->dmap_dirty = 1; return s; } /* * Update IO registers scratch C..R so they will be * in sync. with queued CCB expectations. */ static void sym_update_dmap_regs(struct sym_hcb *np) { int o, i; if (!np->dmap_dirty) return; o = offsetof(struct sym_reg, nc_scrx[0]); for (i = 0; i < SYM_DMAP_SIZE; i++) { OUTL_OFF(np, o, np->dmap_bah[i]); o += 4; } np->dmap_dirty = 0; } #endif /* Enforce all the fiddly SPI rules and the chip limitations */ static void sym_check_goals(struct sym_hcb *np, struct scsi_target *starget, struct sym_trans *goal) { if (!spi_support_wide(starget)) goal->width = 0; if (!spi_support_sync(starget)) { goal->iu = 0; goal->dt = 0; goal->qas = 0; goal->offset = 0; return; } if (spi_support_dt(starget)) { if (spi_support_dt_only(starget)) goal->dt = 1; if (goal->offset == 0) goal->dt = 0; } else { goal->dt = 0; } /* Some targets fail to properly negotiate DT in SE mode */ if ((np->scsi_mode != SMODE_LVD) || !(np->features & FE_U3EN)) goal->dt = 0; if (goal->dt) { /* all DT transfers must be wide */ goal->width = 1; if (goal->offset > np->maxoffs_dt) goal->offset = np->maxoffs_dt; if (goal->period < np->minsync_dt) goal->period = np->minsync_dt; if (goal->period > np->maxsync_dt) goal->period = np->maxsync_dt; } else { goal->iu = goal->qas = 0; if (goal->offset > np->maxoffs) goal->offset = np->maxoffs; if (goal->period < np->minsync) goal->period = np->minsync; if (goal->period > np->maxsync) goal->period = np->maxsync; } } /* * Prepare the next negotiation message if needed. * * Fill in the part of message buffer that contains the * negotiation and the nego_status field of the CCB. * Returns the size of the message in bytes. */ static int sym_prepare_nego(struct sym_hcb *np, struct sym_ccb *cp, u_char *msgptr) { struct sym_tcb *tp = &np->target[cp->target]; struct scsi_target *starget = tp->starget; struct sym_trans *goal = &tp->tgoal; int msglen = 0; int nego; sym_check_goals(np, starget, goal); /* * Many devices implement PPR in a buggy way, so only use it if we * really want to. */ if (goal->renego == NS_PPR || (goal->offset && (goal->iu || goal->dt || goal->qas || (goal->period < 0xa)))) { nego = NS_PPR; } else if (goal->renego == NS_WIDE || goal->width) { nego = NS_WIDE; } else if (goal->renego == NS_SYNC || goal->offset) { nego = NS_SYNC; } else { goal->check_nego = 0; nego = 0; } switch (nego) { case NS_SYNC: msglen += spi_populate_sync_msg(msgptr + msglen, goal->period, goal->offset); break; case NS_WIDE: msglen += spi_populate_width_msg(msgptr + msglen, goal->width); break; case NS_PPR: msglen += spi_populate_ppr_msg(msgptr + msglen, goal->period, goal->offset, goal->width, (goal->iu ? PPR_OPT_IU : 0) | (goal->dt ? PPR_OPT_DT : 0) | (goal->qas ? PPR_OPT_QAS : 0)); break; } cp->nego_status = nego; if (nego) { tp->nego_cp = cp; /* Keep track a nego will be performed */ if (DEBUG_FLAGS & DEBUG_NEGO) { sym_print_nego_msg(np, cp->target, nego == NS_SYNC ? "sync msgout" : nego == NS_WIDE ? "wide msgout" : "ppr msgout", msgptr); } } return msglen; } /* * Insert a job into the start queue. */ void sym_put_start_queue(struct sym_hcb *np, struct sym_ccb *cp) { u_short qidx; #ifdef SYM_CONF_IARB_SUPPORT /* * If the previously queued CCB is not yet done, * set the IARB hint. The SCRIPTS will go with IARB * for this job when starting the previous one. * We leave devices a chance to win arbitration by * not using more than 'iarb_max' consecutive * immediate arbitrations. */ if (np->last_cp && np->iarb_count < np->iarb_max) { np->last_cp->host_flags |= HF_HINT_IARB; ++np->iarb_count; } else np->iarb_count = 0; np->last_cp = cp; #endif #if SYM_CONF_DMA_ADDRESSING_MODE == 2 /* * Make SCRIPTS aware of the 64 bit DMA * segment registers not being up-to-date. */ if (np->dmap_dirty) cp->host_xflags |= HX_DMAP_DIRTY; #endif /* * Insert first the idle task and then our job. * The MBs should ensure proper ordering. */ qidx = np->squeueput + 2; if (qidx >= MAX_QUEUE*2) qidx = 0; np->squeue [qidx] = cpu_to_scr(np->idletask_ba); MEMORY_WRITE_BARRIER(); np->squeue [np->squeueput] = cpu_to_scr(cp->ccb_ba); np->squeueput = qidx; if (DEBUG_FLAGS & DEBUG_QUEUE) scmd_printk(KERN_DEBUG, cp->cmd, "queuepos=%d\n", np->squeueput); /* * Script processor may be waiting for reselect. * Wake it up. */ MEMORY_WRITE_BARRIER(); OUTB(np, nc_istat, SIGP|np->istat_sem); } #ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING /* * Start next ready-to-start CCBs. */ void sym_start_next_ccbs(struct sym_hcb *np, struct sym_lcb *lp, int maxn) { SYM_QUEHEAD *qp; struct sym_ccb *cp; /* * Paranoia, as usual. :-) */ assert(!lp->started_tags || !lp->started_no_tag); /* * Try to start as many commands as asked by caller. * Prevent from having both tagged and untagged * commands queued to the device at the same time. */ while (maxn--) { qp = sym_remque_head(&lp->waiting_ccbq); if (!qp) break; cp = sym_que_entry(qp, struct sym_ccb, link2_ccbq); if (cp->tag != NO_TAG) { if (lp->started_no_tag || lp->started_tags >= lp->started_max) { sym_insque_head(qp, &lp->waiting_ccbq); break; } lp->itlq_tbl[cp->tag] = cpu_to_scr(cp->ccb_ba); lp->head.resel_sa = cpu_to_scr(SCRIPTA_BA(np, resel_tag)); ++lp->started_tags; } else { if (lp->started_no_tag || lp->started_tags) { sym_insque_head(qp, &lp->waiting_ccbq); break; } lp->head.itl_task_sa = cpu_to_scr(cp->ccb_ba); lp->head.resel_sa = cpu_to_scr(SCRIPTA_BA(np, resel_no_tag)); ++lp->started_no_tag; } cp->started = 1; sym_insque_tail(qp, &lp->started_ccbq); sym_put_start_queue(np, cp); } } #endif /* SYM_OPT_HANDLE_DEVICE_QUEUEING */ /* * The chip may have completed jobs. Look at the DONE QUEUE. * * On paper, memory read barriers may be needed here to * prevent out of order LOADs by the CPU from having * prefetched stale data prior to DMA having occurred. */ static int sym_wakeup_done (struct sym_hcb *np) { struct sym_ccb *cp; int i, n; u32 dsa; n = 0; i = np->dqueueget; /* MEMORY_READ_BARRIER(); */ while (1) { dsa = scr_to_cpu(np->dqueue[i]); if (!dsa) break; np->dqueue[i] = 0; if ((i = i+2) >= MAX_QUEUE*2) i = 0; cp = sym_ccb_from_dsa(np, dsa); if (cp) { MEMORY_READ_BARRIER(); sym_complete_ok (np, cp); ++n; } else printf ("%s: bad DSA (%x) in done queue.\n", sym_name(np), (u_int) dsa); } np->dqueueget = i; return n; } /* * Complete all CCBs queued to the COMP queue. * * These CCBs are assumed: * - Not to be referenced either by devices or * SCRIPTS-related queues and datas. * - To have to be completed with an error condition * or requeued. * * The device queue freeze count is incremented * for each CCB that does not prevent this. * This function is called when all CCBs involved * in error handling/recovery have been reaped. */ static void sym_flush_comp_queue(struct sym_hcb *np, int cam_status) { SYM_QUEHEAD *qp; struct sym_ccb *cp; while ((qp = sym_remque_head(&np->comp_ccbq)) != NULL) { struct scsi_cmnd *cmd; cp = sym_que_entry(qp, struct sym_ccb, link_ccbq); sym_insque_tail(&cp->link_ccbq, &np->busy_ccbq); /* Leave quiet CCBs waiting for resources */ if (cp->host_status == HS_WAIT) continue; cmd = cp->cmd; if (cam_status) sym_set_cam_status(cmd, cam_status); #ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING if (sym_get_cam_status(cmd) == DID_SOFT_ERROR) { struct sym_tcb *tp = &np->target[cp->target]; struct sym_lcb *lp = sym_lp(tp, cp->lun); if (lp) { sym_remque(&cp->link2_ccbq); sym_insque_tail(&cp->link2_ccbq, &lp->waiting_ccbq); if (cp->started) { if (cp->tag != NO_TAG) --lp->started_tags; else --lp->started_no_tag; } } cp->started = 0; continue; } #endif sym_free_ccb(np, cp); sym_xpt_done(np, cmd); } } /* * Complete all active CCBs with error. * Used on CHIP/SCSI RESET. */ static void sym_flush_busy_queue (struct sym_hcb *np, int cam_status) { /* * Move all active CCBs to the COMP queue * and flush this queue. */ sym_que_splice(&np->busy_ccbq, &np->comp_ccbq); sym_que_init(&np->busy_ccbq); sym_flush_comp_queue(np, cam_status); } /* * Start chip. * * 'reason' means: * 0: initialisation. * 1: SCSI BUS RESET delivered or received. * 2: SCSI BUS MODE changed. */ void sym_start_up(struct Scsi_Host *shost, int reason) { struct sym_data *sym_data = shost_priv(shost); struct pci_dev *pdev = sym_data->pdev; struct sym_hcb *np = sym_data->ncb; int i; u32 phys; /* * Reset chip if asked, otherwise just clear fifos. */ if (reason == 1) sym_soft_reset(np); else { OUTB(np, nc_stest3, TE|CSF); OUTONB(np, nc_ctest3, CLF); } /* * Clear Start Queue */ phys = np->squeue_ba; for (i = 0; i < MAX_QUEUE*2; i += 2) { np->squeue[i] = cpu_to_scr(np->idletask_ba); np->squeue[i+1] = cpu_to_scr(phys + (i+2)*4); } np->squeue[MAX_QUEUE*2-1] = cpu_to_scr(phys); /* * Start at first entry. */ np->squeueput = 0; /* * Clear Done Queue */ phys = np->dqueue_ba; for (i = 0; i < MAX_QUEUE*2; i += 2) { np->dqueue[i] = 0; np->dqueue[i+1] = cpu_to_scr(phys + (i+2)*4); } np->dqueue[MAX_QUEUE*2-1] = cpu_to_scr(phys); /* * Start at first entry. */ np->dqueueget = 0; /* * Install patches in scripts. * This also let point to first position the start * and done queue pointers used from SCRIPTS. */ np->fw_patch(shost); /* * Wakeup all pending jobs. */ sym_flush_busy_queue(np, DID_RESET); /* * Init chip. */ OUTB(np, nc_istat, 0x00); /* Remove Reset, abort */ INB(np, nc_mbox1); udelay(2000); /* The 895 needs time for the bus mode to settle */ OUTB(np, nc_scntl0, np->rv_scntl0 | 0xc0); /* full arb., ena parity, par->ATN */ OUTB(np, nc_scntl1, 0x00); /* odd parity, and remove CRST!! */ sym_selectclock(np, np->rv_scntl3); /* Select SCSI clock */ OUTB(np, nc_scid , RRE|np->myaddr); /* Adapter SCSI address */ OUTW(np, nc_respid, 1ul<<np->myaddr); /* Id to respond to */ OUTB(np, nc_istat , SIGP ); /* Signal Process */ OUTB(np, nc_dmode , np->rv_dmode); /* Burst length, dma mode */ OUTB(np, nc_ctest5, np->rv_ctest5); /* Large fifo + large burst */ OUTB(np, nc_dcntl , NOCOM|np->rv_dcntl); /* Protect SFBR */ OUTB(np, nc_ctest3, np->rv_ctest3); /* Write and invalidate */ OUTB(np, nc_ctest4, np->rv_ctest4); /* Master parity checking */ /* Extended Sreq/Sack filtering not supported on the C10 */ if (np->features & FE_C10) OUTB(np, nc_stest2, np->rv_stest2); else OUTB(np, nc_stest2, EXT|np->rv_stest2); OUTB(np, nc_stest3, TE); /* TolerANT enable */ OUTB(np, nc_stime0, 0x0c); /* HTH disabled STO 0.25 sec */ /* * For now, disable AIP generation on C1010-66. */ if (pdev->device == PCI_DEVICE_ID_LSI_53C1010_66) OUTB(np, nc_aipcntl1, DISAIP); /* * C10101 rev. 0 errata. * Errant SGE's when in narrow. Write bits 4 & 5 of * STEST1 register to disable SGE. We probably should do * that from SCRIPTS for each selection/reselection, but * I just don't want. :) */ if (pdev->device == PCI_DEVICE_ID_LSI_53C1010_33 && pdev->revision < 1) OUTB(np, nc_stest1, INB(np, nc_stest1) | 0x30); /* * DEL 441 - 53C876 Rev 5 - Part Number 609-0392787/2788 - ITEM 2. * Disable overlapped arbitration for some dual function devices, * regardless revision id (kind of post-chip-design feature. ;-)) */ if (pdev->device == PCI_DEVICE_ID_NCR_53C875) OUTB(np, nc_ctest0, (1<<5)); else if (pdev->device == PCI_DEVICE_ID_NCR_53C896) np->rv_ccntl0 |= DPR; /* * Write CCNTL0/CCNTL1 for chips capable of 64 bit addressing * and/or hardware phase mismatch, since only such chips * seem to support those IO registers. */ if (np->features & (FE_DAC|FE_NOPM)) { OUTB(np, nc_ccntl0, np->rv_ccntl0); OUTB(np, nc_ccntl1, np->rv_ccntl1); } #if SYM_CONF_DMA_ADDRESSING_MODE == 2 /* * Set up scratch C and DRS IO registers to map the 32 bit * DMA address range our data structures are located in. */ if (use_dac(np)) { np->dmap_bah[0] = 0; /* ??? */ OUTL(np, nc_scrx[0], np->dmap_bah[0]); OUTL(np, nc_drs, np->dmap_bah[0]); } #endif /* * If phase mismatch handled by scripts (895A/896/1010), * set PM jump addresses. */ if (np->features & FE_NOPM) { OUTL(np, nc_pmjad1, SCRIPTB_BA(np, pm_handle)); OUTL(np, nc_pmjad2, SCRIPTB_BA(np, pm_handle)); } /* * Enable GPIO0 pin for writing if LED support from SCRIPTS. * Also set GPIO5 and clear GPIO6 if hardware LED control. */ if (np->features & FE_LED0) OUTB(np, nc_gpcntl, INB(np, nc_gpcntl) & ~0x01); else if (np->features & FE_LEDC) OUTB(np, nc_gpcntl, (INB(np, nc_gpcntl) & ~0x41) | 0x20); /* * enable ints */ OUTW(np, nc_sien , STO|HTH|MA|SGE|UDC|RST|PAR); OUTB(np, nc_dien , MDPE|BF|SSI|SIR|IID); /* * For 895/6 enable SBMC interrupt and save current SCSI bus mode. * Try to eat the spurious SBMC interrupt that may occur when * we reset the chip but not the SCSI BUS (at initialization). */ if (np->features & (FE_ULTRA2|FE_ULTRA3)) { OUTONW(np, nc_sien, SBMC); if (reason == 0) { INB(np, nc_mbox1); mdelay(100); INW(np, nc_sist); } np->scsi_mode = INB(np, nc_stest4) & SMODE; } /* * Fill in target structure. * Reinitialize usrsync. * Reinitialize usrwide. * Prepare sync negotiation according to actual SCSI bus mode. */ for (i=0;i<SYM_CONF_MAX_TARGET;i++) { struct sym_tcb *tp = &np->target[i]; tp->to_reset = 0; tp->head.sval = 0; tp->head.wval = np->rv_scntl3; tp->head.uval = 0; if (tp->lun0p) tp->lun0p->to_clear = 0; if (tp->lunmp) { int ln; for (ln = 1; ln < SYM_CONF_MAX_LUN; ln++) if (tp->lunmp[ln]) tp->lunmp[ln]->to_clear = 0; } } /* * Download SCSI SCRIPTS to on-chip RAM if present, * and start script processor. * We do the download preferently from the CPU. * For platforms that may not support PCI memory mapping, * we use simple SCRIPTS that performs MEMORY MOVEs. */ phys = SCRIPTA_BA(np, init); if (np->ram_ba) { if (sym_verbose >= 2) printf("%s: Downloading SCSI SCRIPTS.\n", sym_name(np)); memcpy_toio(np->s.ramaddr, np->scripta0, np->scripta_sz); if (np->features & FE_RAM8K) { memcpy_toio(np->s.ramaddr + 4096, np->scriptb0, np->scriptb_sz); phys = scr_to_cpu(np->scr_ram_seg); OUTL(np, nc_mmws, phys); OUTL(np, nc_mmrs, phys); OUTL(np, nc_sfs, phys); phys = SCRIPTB_BA(np, start64); } } np->istat_sem = 0; OUTL(np, nc_dsa, np->hcb_ba); OUTL_DSP(np, phys); /* * Notify the XPT about the RESET condition. */ if (reason != 0) sym_xpt_async_bus_reset(np); } /* * Switch trans mode for current job and its target. */ static void sym_settrans(struct sym_hcb *np, int target, u_char opts, u_char ofs, u_char per, u_char wide, u_char div, u_char fak) { SYM_QUEHEAD *qp; u_char sval, wval, uval; struct sym_tcb *tp = &np->target[target]; assert(target == (INB(np, nc_sdid) & 0x0f)); sval = tp->head.sval; wval = tp->head.wval; uval = tp->head.uval; #if 0 printf("XXXX sval=%x wval=%x uval=%x (%x)\n", sval, wval, uval, np->rv_scntl3); #endif /* * Set the offset. */ if (!(np->features & FE_C10)) sval = (sval & ~0x1f) | ofs; else sval = (sval & ~0x3f) | ofs; /* * Set the sync divisor and extra clock factor. */ if (ofs != 0) { wval = (wval & ~0x70) | ((div+1) << 4); if (!(np->features & FE_C10)) sval = (sval & ~0xe0) | (fak << 5); else { uval = uval & ~(XCLKH_ST|XCLKH_DT|XCLKS_ST|XCLKS_DT); if (fak >= 1) uval |= (XCLKH_ST|XCLKH_DT); if (fak >= 2) uval |= (XCLKS_ST|XCLKS_DT); } } /* * Set the bus width. */ wval = wval & ~EWS; if (wide != 0) wval |= EWS; /* * Set misc. ultra enable bits. */ if (np->features & FE_C10) { uval = uval & ~(U3EN|AIPCKEN); if (opts) { assert(np->features & FE_U3EN); uval |= U3EN; } } else { wval = wval & ~ULTRA; if (per <= 12) wval |= ULTRA; } /* * Stop there if sync parameters are unchanged. */ if (tp->head.sval == sval && tp->head.wval == wval && tp->head.uval == uval) return; tp->head.sval = sval; tp->head.wval = wval; tp->head.uval = uval; /* * Disable extended Sreq/Sack filtering if per < 50. * Not supported on the C1010. */ if (per < 50 && !(np->features & FE_C10)) OUTOFFB(np, nc_stest2, EXT); /* * set actual value and sync_status */ OUTB(np, nc_sxfer, tp->head.sval); OUTB(np, nc_scntl3, tp->head.wval); if (np->features & FE_C10) { OUTB(np, nc_scntl4, tp->head.uval); } /* * patch ALL busy ccbs of this target. */ FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) { struct sym_ccb *cp; cp = sym_que_entry(qp, struct sym_ccb, link_ccbq); if (cp->target != target) continue; cp->phys.select.sel_scntl3 = tp->head.wval; cp->phys.select.sel_sxfer = tp->head.sval; if (np->features & FE_C10) { cp->phys.select.sel_scntl4 = tp->head.uval; } } } static void sym_announce_transfer_rate(struct sym_tcb *tp) { struct scsi_target *starget = tp->starget; if (tp->tprint.period != spi_period(starget) || tp->tprint.offset != spi_offset(starget) || tp->tprint.width != spi_width(starget) || tp->tprint.iu != spi_iu(starget) || tp->tprint.dt != spi_dt(starget) || tp->tprint.qas != spi_qas(starget) || !tp->tprint.check_nego) { tp->tprint.period = spi_period(starget); tp->tprint.offset = spi_offset(starget); tp->tprint.width = spi_width(starget); tp->tprint.iu = spi_iu(starget); tp->tprint.dt = spi_dt(starget); tp->tprint.qas = spi_qas(starget); tp->tprint.check_nego = 1; spi_display_xfer_agreement(starget); } } /* * We received a WDTR. * Let everything be aware of the changes. */ static void sym_setwide(struct sym_hcb *np, int target, u_char wide) { struct sym_tcb *tp = &np->target[target]; struct scsi_target *starget = tp->starget; sym_settrans(np, target, 0, 0, 0, wide, 0, 0); if (wide) tp->tgoal.renego = NS_WIDE; else tp->tgoal.renego = 0; tp->tgoal.check_nego = 0; tp->tgoal.width = wide; spi_offset(starget) = 0; spi_period(starget) = 0; spi_width(starget) = wide; spi_iu(starget) = 0; spi_dt(starget) = 0; spi_qas(starget) = 0; if (sym_verbose >= 3) sym_announce_transfer_rate(tp); } /* * We received a SDTR. * Let everything be aware of the changes. */ static void sym_setsync(struct sym_hcb *np, int target, u_char ofs, u_char per, u_char div, u_char fak) { struct sym_tcb *tp = &np->target[target]; struct scsi_target *starget = tp->starget; u_char wide = (tp->head.wval & EWS) ? BUS_16_BIT : BUS_8_BIT; sym_settrans(np, target, 0, ofs, per, wide, div, fak); if (wide) tp->tgoal.renego = NS_WIDE; else if (ofs) tp->tgoal.renego = NS_SYNC; else tp->tgoal.renego = 0; spi_period(starget) = per; spi_offset(starget) = ofs; spi_iu(starget) = spi_dt(starget) = spi_qas(starget) = 0; if (!tp->tgoal.dt && !tp->tgoal.iu && !tp->tgoal.qas) { tp->tgoal.period = per; tp->tgoal.offset = ofs; tp->tgoal.check_nego = 0; } sym_announce_transfer_rate(tp); } /* * We received a PPR. * Let everything be aware of the changes. */ static void sym_setpprot(struct sym_hcb *np, int target, u_char opts, u_char ofs, u_char per, u_char wide, u_char div, u_char fak) { struct sym_tcb *tp = &np->target[target]; struct scsi_target *starget = tp->starget; sym_settrans(np, target, opts, ofs, per, wide, div, fak); if (wide || ofs) tp->tgoal.renego = NS_PPR; else tp->tgoal.renego = 0; spi_width(starget) = tp->tgoal.width = wide; spi_period(starget) = tp->tgoal.period = per; spi_offset(starget) = tp->tgoal.offset = ofs; spi_iu(starget) = tp->tgoal.iu = !!(opts & PPR_OPT_IU); spi_dt(starget) = tp->tgoal.dt = !!(opts & PPR_OPT_DT); spi_qas(starget) = tp->tgoal.qas = !!(opts & PPR_OPT_QAS); tp->tgoal.check_nego = 0; sym_announce_transfer_rate(tp); } /* * generic recovery from scsi interrupt * * The doc says that when the chip gets an SCSI interrupt, * it tries to stop in an orderly fashion, by completing * an instruction fetch that had started or by flushing * the DMA fifo for a write to memory that was executing. * Such a fashion is not enough to know if the instruction * that was just before the current DSP value has been * executed or not. * * There are some small SCRIPTS sections that deal with * the start queue and the done queue that may break any * assomption from the C code if we are interrupted * inside, so we reset if this happens. Btw, since these * SCRIPTS sections are executed while the SCRIPTS hasn't * started SCSI operations, it is very unlikely to happen. * * All the driver data structures are supposed to be * allocated from the same 4 GB memory window, so there * is a 1 to 1 relationship between DSA and driver data * structures. Since we are careful :) to invalidate the * DSA when we complete a command or when the SCRIPTS * pushes a DSA into a queue, we can trust it when it * points to a CCB. */ static void sym_recover_scsi_int (struct sym_hcb *np, u_char hsts) { u32 dsp = INL(np, nc_dsp); u32 dsa = INL(np, nc_dsa); struct sym_ccb *cp = sym_ccb_from_dsa(np, dsa); /* * If we haven't been interrupted inside the SCRIPTS * critical pathes, we can safely restart the SCRIPTS * and trust the DSA value if it matches a CCB. */ if ((!(dsp > SCRIPTA_BA(np, getjob_begin) && dsp < SCRIPTA_BA(np, getjob_end) + 1)) && (!(dsp > SCRIPTA_BA(np, ungetjob) && dsp < SCRIPTA_BA(np, reselect) + 1)) && (!(dsp > SCRIPTB_BA(np, sel_for_abort) && dsp < SCRIPTB_BA(np, sel_for_abort_1) + 1)) && (!(dsp > SCRIPTA_BA(np, done) && dsp < SCRIPTA_BA(np, done_end) + 1))) { OUTB(np, nc_ctest3, np->rv_ctest3 | CLF); /* clear dma fifo */ OUTB(np, nc_stest3, TE|CSF); /* clear scsi fifo */ /* * If we have a CCB, let the SCRIPTS call us back for * the handling of the error with SCRATCHA filled with * STARTPOS. This way, we will be able to freeze the * device queue and requeue awaiting IOs. */ if (cp) { cp->host_status = hsts; OUTL_DSP(np, SCRIPTA_BA(np, complete_error)); } /* * Otherwise just restart the SCRIPTS. */ else { OUTL(np, nc_dsa, 0xffffff); OUTL_DSP(np, SCRIPTA_BA(np, start)); } } else goto reset_all; return; reset_all: sym_start_reset(np); } /* * chip exception handler for selection timeout */ static void sym_int_sto (struct sym_hcb *np) { u32 dsp = INL(np, nc_dsp); if (DEBUG_FLAGS & DEBUG_TINY) printf ("T"); if (dsp == SCRIPTA_BA(np, wf_sel_done) + 8) sym_recover_scsi_int(np, HS_SEL_TIMEOUT); else sym_start_reset(np); } /* * chip exception handler for unexpected disconnect */ static void sym_int_udc (struct sym_hcb *np) { printf ("%s: unexpected disconnect\n", sym_name(np)); sym_recover_scsi_int(np, HS_UNEXPECTED); } /* * chip exception handler for SCSI bus mode change * * spi2-r12 11.2.3 says a transceiver mode change must * generate a reset event and a device that detects a reset * event shall initiate a hard reset. It says also that a * device that detects a mode change shall set data transfer * mode to eight bit asynchronous, etc... * So, just reinitializing all except chip should be enough. */ static void sym_int_sbmc(struct Scsi_Host *shost) { struct sym_hcb *np = sym_get_hcb(shost); u_char scsi_mode = INB(np, nc_stest4) & SMODE; /* * Notify user. */ printf("%s: SCSI BUS mode change from %s to %s.\n", sym_name(np), sym_scsi_bus_mode(np->scsi_mode), sym_scsi_bus_mode(scsi_mode)); /* * Should suspend command processing for a few seconds and * reinitialize all except the chip. */ sym_start_up(shost, 2); } /* * chip exception handler for SCSI parity error. * * When the chip detects a SCSI parity error and is * currently executing a (CH)MOV instruction, it does * not interrupt immediately, but tries to finish the * transfer of the current scatter entry before * interrupting. The following situations may occur: * * - The complete scatter entry has been transferred * without the device having changed phase. * The chip will then interrupt with the DSP pointing * to the instruction that follows the MOV. * * - A phase mismatch occurs before the MOV finished * and phase errors are to be handled by the C code. * The chip will then interrupt with both PAR and MA * conditions set. * * - A phase mismatch occurs before the MOV finished and * phase errors are to be handled by SCRIPTS. * The chip will load the DSP with the phase mismatch * JUMP address and interrupt the host processor. */ static void sym_int_par (struct sym_hcb *np, u_short sist) { u_char hsts = INB(np, HS_PRT); u32 dsp = INL(np, nc_dsp); u32 dbc = INL(np, nc_dbc); u32 dsa = INL(np, nc_dsa); u_char sbcl = INB(np, nc_sbcl); u_char cmd = dbc >> 24; int phase = cmd & 7; struct sym_ccb *cp = sym_ccb_from_dsa(np, dsa); if (printk_ratelimit()) printf("%s: SCSI parity error detected: SCR1=%d DBC=%x SBCL=%x\n", sym_name(np), hsts, dbc, sbcl); /* * Check that the chip is connected to the SCSI BUS. */ if (!(INB(np, nc_scntl1) & ISCON)) { sym_recover_scsi_int(np, HS_UNEXPECTED); return; } /* * If the nexus is not clearly identified, reset the bus. * We will try to do better later. */ if (!cp) goto reset_all; /* * Check instruction was a MOV, direction was INPUT and * ATN is asserted. */ if ((cmd & 0xc0) || !(phase & 1) || !(sbcl & 0x8)) goto reset_all; /* * Keep track of the parity error. */ OUTONB(np, HF_PRT, HF_EXT_ERR); cp->xerr_status |= XE_PARITY_ERR; /* * Prepare the message to send to the device. */ np->msgout[0] = (phase == 7) ? M_PARITY : M_ID_ERROR; /* * If the old phase was DATA IN phase, we have to deal with * the 3 situations described above. * For other input phases (MSG IN and STATUS), the device * must resend the whole thing that failed parity checking * or signal error. So, jumping to dispatcher should be OK. */ if (phase == 1 || phase == 5) { /* Phase mismatch handled by SCRIPTS */ if (dsp == SCRIPTB_BA(np, pm_handle)) OUTL_DSP(np, dsp); /* Phase mismatch handled by the C code */ else if (sist & MA) sym_int_ma (np); /* No phase mismatch occurred */ else { sym_set_script_dp (np, cp, dsp); OUTL_DSP(np, SCRIPTA_BA(np, dispatch)); } } else if (phase == 7) /* We definitely cannot handle parity errors */ #if 1 /* in message-in phase due to the relection */ goto reset_all; /* path and various message anticipations. */ #else OUTL_DSP(np, SCRIPTA_BA(np, clrack)); #endif else OUTL_DSP(np, SCRIPTA_BA(np, dispatch)); return; reset_all: sym_start_reset(np); return; } /* * chip exception handler for phase errors. * * We have to construct a new transfer descriptor, * to transfer the rest of the current block. */ static void sym_int_ma (struct sym_hcb *np) { u32 dbc; u32 rest; u32 dsp; u32 dsa; u32 nxtdsp; u32 *vdsp; u32 oadr, olen; u32 *tblp; u32 newcmd; u_int delta; u_char cmd; u_char hflags, hflags0; struct sym_pmc *pm; struct sym_ccb *cp; dsp = INL(np, nc_dsp); dbc = INL(np, nc_dbc); dsa = INL(np, nc_dsa); cmd = dbc >> 24; rest = dbc & 0xffffff; delta = 0; /* * locate matching cp if any. */ cp = sym_ccb_from_dsa(np, dsa); /* * Donnot take into account dma fifo and various buffers in * INPUT phase since the chip flushes everything before * raising the MA interrupt for interrupted INPUT phases. * For DATA IN phase, we will check for the SWIDE later. */ if ((cmd & 7) != 1 && (cmd & 7) != 5) { u_char ss0, ss2; if (np->features & FE_DFBC) delta = INW(np, nc_dfbc); else { u32 dfifo; /* * Read DFIFO, CTEST[4-6] using 1 PCI bus ownership. */ dfifo = INL(np, nc_dfifo); /* * Calculate remaining bytes in DMA fifo. * (CTEST5 = dfifo >> 16) */ if (dfifo & (DFS << 16)) delta = ((((dfifo >> 8) & 0x300) | (dfifo & 0xff)) - rest) & 0x3ff; else delta = ((dfifo & 0xff) - rest) & 0x7f; } /* * The data in the dma fifo has not been transferred to * the target -> add the amount to the rest * and clear the data. * Check the sstat2 register in case of wide transfer. */ rest += delta; ss0 = INB(np, nc_sstat0); if (ss0 & OLF) rest++; if (!(np->features & FE_C10)) if (ss0 & ORF) rest++; if (cp && (cp->phys.select.sel_scntl3 & EWS)) { ss2 = INB(np, nc_sstat2); if (ss2 & OLF1) rest++; if (!(np->features & FE_C10)) if (ss2 & ORF1) rest++; } /* * Clear fifos. */ OUTB(np, nc_ctest3, np->rv_ctest3 | CLF); /* dma fifo */ OUTB(np, nc_stest3, TE|CSF); /* scsi fifo */ } /* * log the information */ if (DEBUG_FLAGS & (DEBUG_TINY|DEBUG_PHASE)) printf ("P%x%x RL=%d D=%d ", cmd&7, INB(np, nc_sbcl)&7, (unsigned) rest, (unsigned) delta); /* * try to find the interrupted script command, * and the address at which to continue. */ vdsp = NULL; nxtdsp = 0; if (dsp > np->scripta_ba && dsp <= np->scripta_ba + np->scripta_sz) { vdsp = (u32 *)((char*)np->scripta0 + (dsp-np->scripta_ba-8)); nxtdsp = dsp; } else if (dsp > np->scriptb_ba && dsp <= np->scriptb_ba + np->scriptb_sz) { vdsp = (u32 *)((char*)np->scriptb0 + (dsp-np->scriptb_ba-8)); nxtdsp = dsp; } /* * log the information */ if (DEBUG_FLAGS & DEBUG_PHASE) { printf ("\nCP=%p DSP=%x NXT=%x VDSP=%p CMD=%x ", cp, (unsigned)dsp, (unsigned)nxtdsp, vdsp, cmd); } if (!vdsp) { printf ("%s: interrupted SCRIPT address not found.\n", sym_name (np)); goto reset_all; } if (!cp) { printf ("%s: SCSI phase error fixup: CCB already dequeued.\n", sym_name (np)); goto reset_all; } /* * get old startaddress and old length. */ oadr = scr_to_cpu(vdsp[1]); if (cmd & 0x10) { /* Table indirect */ tblp = (u32 *) ((char*) &cp->phys + oadr); olen = scr_to_cpu(tblp[0]); oadr = scr_to_cpu(tblp[1]); } else { tblp = (u32 *) 0; olen = scr_to_cpu(vdsp[0]) & 0xffffff; } if (DEBUG_FLAGS & DEBUG_PHASE) { printf ("OCMD=%x\nTBLP=%p OLEN=%x OADR=%x\n", (unsigned) (scr_to_cpu(vdsp[0]) >> 24), tblp, (unsigned) olen, (unsigned) oadr); } /* * check cmd against assumed interrupted script command. * If dt data phase, the MOVE instruction hasn't bit 4 of * the phase. */ if (((cmd & 2) ? cmd : (cmd & ~4)) != (scr_to_cpu(vdsp[0]) >> 24)) { sym_print_addr(cp->cmd, "internal error: cmd=%02x != %02x=(vdsp[0] >> 24)\n", cmd, scr_to_cpu(vdsp[0]) >> 24); goto reset_all; } /* * if old phase not dataphase, leave here. */ if (cmd & 2) { sym_print_addr(cp->cmd, "phase change %x-%x %d@%08x resid=%d.\n", cmd&7, INB(np, nc_sbcl)&7, (unsigned)olen, (unsigned)oadr, (unsigned)rest); goto unexpected_phase; } /* * Choose the correct PM save area. * * Look at the PM_SAVE SCRIPT if you want to understand * this stuff. The equivalent code is implemented in * SCRIPTS for the 895A, 896 and 1010 that are able to * handle PM from the SCRIPTS processor. */ hflags0 = INB(np, HF_PRT); hflags = hflags0; if (hflags & (HF_IN_PM0 | HF_IN_PM1 | HF_DP_SAVED)) { if (hflags & HF_IN_PM0) nxtdsp = scr_to_cpu(cp->phys.pm0.ret); else if (hflags & HF_IN_PM1) nxtdsp = scr_to_cpu(cp->phys.pm1.ret); if (hflags & HF_DP_SAVED) hflags ^= HF_ACT_PM; } if (!(hflags & HF_ACT_PM)) { pm = &cp->phys.pm0; newcmd = SCRIPTA_BA(np, pm0_data); } else { pm = &cp->phys.pm1; newcmd = SCRIPTA_BA(np, pm1_data); } hflags &= ~(HF_IN_PM0 | HF_IN_PM1 | HF_DP_SAVED); if (hflags != hflags0) OUTB(np, HF_PRT, hflags); /* * fillin the phase mismatch context */ pm->sg.addr = cpu_to_scr(oadr + olen - rest); pm->sg.size = cpu_to_scr(rest); pm->ret = cpu_to_scr(nxtdsp); /* * If we have a SWIDE, * - prepare the address to write the SWIDE from SCRIPTS, * - compute the SCRIPTS address to restart from, * - move current data pointer context by one byte. */ nxtdsp = SCRIPTA_BA(np, dispatch); if ((cmd & 7) == 1 && cp && (cp->phys.select.sel_scntl3 & EWS) && (INB(np, nc_scntl2) & WSR)) { u32 tmp; /* * Set up the table indirect for the MOVE * of the residual byte and adjust the data * pointer context. */ tmp = scr_to_cpu(pm->sg.addr); cp->phys.wresid.addr = cpu_to_scr(tmp); pm->sg.addr = cpu_to_scr(tmp + 1); tmp = scr_to_cpu(pm->sg.size); cp->phys.wresid.size = cpu_to_scr((tmp&0xff000000) | 1); pm->sg.size = cpu_to_scr(tmp - 1); /* * If only the residual byte is to be moved, * no PM context is needed. */ if ((tmp&0xffffff) == 1) newcmd = pm->ret; /* * Prepare the address of SCRIPTS that will * move the residual byte to memory. */ nxtdsp = SCRIPTB_BA(np, wsr_ma_helper); } if (DEBUG_FLAGS & DEBUG_PHASE) { sym_print_addr(cp->cmd, "PM %x %x %x / %x %x %x.\n", hflags0, hflags, newcmd, (unsigned)scr_to_cpu(pm->sg.addr), (unsigned)scr_to_cpu(pm->sg.size), (unsigned)scr_to_cpu(pm->ret)); } /* * Restart the SCRIPTS processor. */ sym_set_script_dp (np, cp, newcmd); OUTL_DSP(np, nxtdsp); return; /* * Unexpected phase changes that occurs when the current phase * is not a DATA IN or DATA OUT phase are due to error conditions. * Such event may only happen when the SCRIPTS is using a * multibyte SCSI MOVE. * * Phase change Some possible cause * * COMMAND --> MSG IN SCSI parity error detected by target. * COMMAND --> STATUS Bad command or refused by target. * MSG OUT --> MSG IN Message rejected by target. * MSG OUT --> COMMAND Bogus target that discards extended * negotiation messages. * * The code below does not care of the new phase and so * trusts the target. Why to annoy it ? * If the interrupted phase is COMMAND phase, we restart at * dispatcher. * If a target does not get all the messages after selection, * the code assumes blindly that the target discards extended * messages and clears the negotiation status. * If the target does not want all our response to negotiation, * we force a SIR_NEGO_PROTO interrupt (it is a hack that avoids * bloat for such a should_not_happen situation). * In all other situation, we reset the BUS. * Are these assumptions reasonable ? (Wait and see ...) */ unexpected_phase: dsp -= 8; nxtdsp = 0; switch (cmd & 7) { case 2: /* COMMAND phase */ nxtdsp = SCRIPTA_BA(np, dispatch); break; #if 0 case 3: /* STATUS phase */ nxtdsp = SCRIPTA_BA(np, dispatch); break; #endif case 6: /* MSG OUT phase */ /* * If the device may want to use untagged when we want * tagged, we prepare an IDENTIFY without disc. granted, * since we will not be able to handle reselect. * Otherwise, we just don't care. */ if (dsp == SCRIPTA_BA(np, send_ident)) { if (cp->tag != NO_TAG && olen - rest <= 3) { cp->host_status = HS_BUSY; np->msgout[0] = IDENTIFY(0, cp->lun); nxtdsp = SCRIPTB_BA(np, ident_break_atn); } else nxtdsp = SCRIPTB_BA(np, ident_break); } else if (dsp == SCRIPTB_BA(np, send_wdtr) || dsp == SCRIPTB_BA(np, send_sdtr) || dsp == SCRIPTB_BA(np, send_ppr)) { nxtdsp = SCRIPTB_BA(np, nego_bad_phase); if (dsp == SCRIPTB_BA(np, send_ppr)) { struct scsi_device *dev = cp->cmd->device; dev->ppr = 0; } } break; #if 0 case 7: /* MSG IN phase */ nxtdsp = SCRIPTA_BA(np, clrack); break; #endif } if (nxtdsp) { OUTL_DSP(np, nxtdsp); return; } reset_all: sym_start_reset(np); } /* * chip interrupt handler * * In normal situations, interrupt conditions occur one at * a time. But when something bad happens on the SCSI BUS, * the chip may raise several interrupt flags before * stopping and interrupting the CPU. The additionnal * interrupt flags are stacked in some extra registers * after the SIP and/or DIP flag has been raised in the * ISTAT. After the CPU has read the interrupt condition * flag from SIST or DSTAT, the chip unstacks the other * interrupt flags and sets the corresponding bits in * SIST or DSTAT. Since the chip starts stacking once the * SIP or DIP flag is set, there is a small window of time * where the stacking does not occur. * * Typically, multiple interrupt conditions may happen in * the following situations: * * - SCSI parity error + Phase mismatch (PAR|MA) * When an parity error is detected in input phase * and the device switches to msg-in phase inside a * block MOV. * - SCSI parity error + Unexpected disconnect (PAR|UDC) * When a stupid device does not want to handle the * recovery of an SCSI parity error. * - Some combinations of STO, PAR, UDC, ... * When using non compliant SCSI stuff, when user is * doing non compliant hot tampering on the BUS, when * something really bad happens to a device, etc ... * * The heuristic suggested by SYMBIOS to handle * multiple interrupts is to try unstacking all * interrupts conditions and to handle them on some * priority based on error severity. * This will work when the unstacking has been * successful, but we cannot be 100 % sure of that, * since the CPU may have been faster to unstack than * the chip is able to stack. Hmmm ... But it seems that * such a situation is very unlikely to happen. * * If this happen, for example STO caught by the CPU * then UDC happenning before the CPU have restarted * the SCRIPTS, the driver may wrongly complete the * same command on UDC, since the SCRIPTS didn't restart * and the DSA still points to the same command. * We avoid this situation by setting the DSA to an * invalid value when the CCB is completed and before * restarting the SCRIPTS. * * Another issue is that we need some section of our * recovery procedures to be somehow uninterruptible but * the SCRIPTS processor does not provides such a * feature. For this reason, we handle recovery preferently * from the C code and check against some SCRIPTS critical * sections from the C code. * * Hopefully, the interrupt handling of the driver is now * able to resist to weird BUS error conditions, but donnot * ask me for any guarantee that it will never fail. :-) * Use at your own decision and risk. */ irqreturn_t sym_interrupt(struct Scsi_Host *shost) { struct sym_data *sym_data = shost_priv(shost); struct sym_hcb *np = sym_data->ncb; struct pci_dev *pdev = sym_data->pdev; u_char istat, istatc; u_char dstat; u_short sist; /* * interrupt on the fly ? * (SCRIPTS may still be running) * * A `dummy read' is needed to ensure that the * clear of the INTF flag reaches the device * and that posted writes are flushed to memory * before the scanning of the DONE queue. * Note that SCRIPTS also (dummy) read to memory * prior to deliver the INTF interrupt condition. */ istat = INB(np, nc_istat); if (istat & INTF) { OUTB(np, nc_istat, (istat & SIGP) | INTF | np->istat_sem); istat |= INB(np, nc_istat); /* DUMMY READ */ if (DEBUG_FLAGS & DEBUG_TINY) printf ("F "); sym_wakeup_done(np); } if (!(istat & (SIP|DIP))) return (istat & INTF) ? IRQ_HANDLED : IRQ_NONE; #if 0 /* We should never get this one */ if (istat & CABRT) OUTB(np, nc_istat, CABRT); #endif /* * PAR and MA interrupts may occur at the same time, * and we need to know of both in order to handle * this situation properly. We try to unstack SCSI * interrupts for that reason. BTW, I dislike a LOT * such a loop inside the interrupt routine. * Even if DMA interrupt stacking is very unlikely to * happen, we also try unstacking these ones, since * this has no performance impact. */ sist = 0; dstat = 0; istatc = istat; do { if (istatc & SIP) sist |= INW(np, nc_sist); if (istatc & DIP) dstat |= INB(np, nc_dstat); istatc = INB(np, nc_istat); istat |= istatc; /* Prevent deadlock waiting on a condition that may * never clear. */ if (unlikely(sist == 0xffff && dstat == 0xff)) { if (pci_channel_offline(pdev)) return IRQ_NONE; } } while (istatc & (SIP|DIP)); if (DEBUG_FLAGS & DEBUG_TINY) printf ("<%d|%x:%x|%x:%x>", (int)INB(np, nc_scr0), dstat,sist, (unsigned)INL(np, nc_dsp), (unsigned)INL(np, nc_dbc)); /* * On paper, a memory read barrier may be needed here to * prevent out of order LOADs by the CPU from having * prefetched stale data prior to DMA having occurred. * And since we are paranoid ... :) */ MEMORY_READ_BARRIER(); /* * First, interrupts we want to service cleanly. * * Phase mismatch (MA) is the most frequent interrupt * for chip earlier than the 896 and so we have to service * it as quickly as possible. * A SCSI parity error (PAR) may be combined with a phase * mismatch condition (MA). * Programmed interrupts (SIR) are used to call the C code * from SCRIPTS. * The single step interrupt (SSI) is not used in this * driver. */ if (!(sist & (STO|GEN|HTH|SGE|UDC|SBMC|RST)) && !(dstat & (MDPE|BF|ABRT|IID))) { if (sist & PAR) sym_int_par (np, sist); else if (sist & MA) sym_int_ma (np); else if (dstat & SIR) sym_int_sir(np); else if (dstat & SSI) OUTONB_STD(); else goto unknown_int; return IRQ_HANDLED; } /* * Now, interrupts that donnot happen in normal * situations and that we may need to recover from. * * On SCSI RESET (RST), we reset everything. * On SCSI BUS MODE CHANGE (SBMC), we complete all * active CCBs with RESET status, prepare all devices * for negotiating again and restart the SCRIPTS. * On STO and UDC, we complete the CCB with the corres- * ponding status and restart the SCRIPTS. */ if (sist & RST) { printf("%s: SCSI BUS reset detected.\n", sym_name(np)); sym_start_up(shost, 1); return IRQ_HANDLED; } OUTB(np, nc_ctest3, np->rv_ctest3 | CLF); /* clear dma fifo */ OUTB(np, nc_stest3, TE|CSF); /* clear scsi fifo */ if (!(sist & (GEN|HTH|SGE)) && !(dstat & (MDPE|BF|ABRT|IID))) { if (sist & SBMC) sym_int_sbmc(shost); else if (sist & STO) sym_int_sto (np); else if (sist & UDC) sym_int_udc (np); else goto unknown_int; return IRQ_HANDLED; } /* * Now, interrupts we are not able to recover cleanly. * * Log message for hard errors. * Reset everything. */ sym_log_hard_error(shost, sist, dstat); if ((sist & (GEN|HTH|SGE)) || (dstat & (MDPE|BF|ABRT|IID))) { sym_start_reset(np); return IRQ_HANDLED; } unknown_int: /* * We just miss the cause of the interrupt. :( * Print a message. The timeout will do the real work. */ printf( "%s: unknown interrupt(s) ignored, " "ISTAT=0x%x DSTAT=0x%x SIST=0x%x\n", sym_name(np), istat, dstat, sist); return IRQ_NONE; } /* * Dequeue from the START queue all CCBs that match * a given target/lun/task condition (-1 means all), * and move them from the BUSY queue to the COMP queue * with DID_SOFT_ERROR status condition. * This function is used during error handling/recovery. * It is called with SCRIPTS not running. */ static int sym_dequeue_from_squeue(struct sym_hcb *np, int i, int target, int lun, int task) { int j; struct sym_ccb *cp; /* * Make sure the starting index is within range. */ assert((i >= 0) && (i < 2*MAX_QUEUE)); /* * Walk until end of START queue and dequeue every job * that matches the target/lun/task condition. */ j = i; while (i != np->squeueput) { cp = sym_ccb_from_dsa(np, scr_to_cpu(np->squeue[i])); assert(cp); #ifdef SYM_CONF_IARB_SUPPORT /* Forget hints for IARB, they may be no longer relevant */ cp->host_flags &= ~HF_HINT_IARB; #endif if ((target == -1 || cp->target == target) && (lun == -1 || cp->lun == lun) && (task == -1 || cp->tag == task)) { sym_set_cam_status(cp->cmd, DID_SOFT_ERROR); sym_remque(&cp->link_ccbq); sym_insque_tail(&cp->link_ccbq, &np->comp_ccbq); } else { if (i != j) np->squeue[j] = np->squeue[i]; if ((j += 2) >= MAX_QUEUE*2) j = 0; } if ((i += 2) >= MAX_QUEUE*2) i = 0; } if (i != j) /* Copy back the idle task if needed */ np->squeue[j] = np->squeue[i]; np->squeueput = j; /* Update our current start queue pointer */ return (i - j) / 2; } /* * chip handler for bad SCSI status condition * * In case of bad SCSI status, we unqueue all the tasks * currently queued to the controller but not yet started * and then restart the SCRIPTS processor immediately. * * QUEUE FULL and BUSY conditions are handled the same way. * Basically all the not yet started tasks are requeued in * device queue and the queue is frozen until a completion. * * For CHECK CONDITION and COMMAND TERMINATED status, we use * the CCB of the failed command to prepare a REQUEST SENSE * SCSI command and queue it to the controller queue. * * SCRATCHA is assumed to have been loaded with STARTPOS * before the SCRIPTS called the C code. */ static void sym_sir_bad_scsi_status(struct sym_hcb *np, int num, struct sym_ccb *cp) { u32 startp; u_char s_status = cp->ssss_status; u_char h_flags = cp->host_flags; int msglen; int i; /* * Compute the index of the next job to start from SCRIPTS. */ i = (INL(np, nc_scratcha) - np->squeue_ba) / 4; /* * The last CCB queued used for IARB hint may be * no longer relevant. Forget it. */ #ifdef SYM_CONF_IARB_SUPPORT if (np->last_cp) np->last_cp = 0; #endif /* * Now deal with the SCSI status. */ switch(s_status) { case S_BUSY: case S_QUEUE_FULL: if (sym_verbose >= 2) { sym_print_addr(cp->cmd, "%s\n", s_status == S_BUSY ? "BUSY" : "QUEUE FULL\n"); } default: /* S_INT, S_INT_COND_MET, S_CONFLICT */ sym_complete_error (np, cp); break; case S_TERMINATED: case S_CHECK_COND: /* * If we get an SCSI error when requesting sense, give up. */ if (h_flags & HF_SENSE) { sym_complete_error (np, cp); break; } /* * Dequeue all queued CCBs for that device not yet started, * and restart the SCRIPTS processor immediately. */ sym_dequeue_from_squeue(np, i, cp->target, cp->lun, -1); OUTL_DSP(np, SCRIPTA_BA(np, start)); /* * Save some info of the actual IO. * Compute the data residual. */ cp->sv_scsi_status = cp->ssss_status; cp->sv_xerr_status = cp->xerr_status; cp->sv_resid = sym_compute_residual(np, cp); /* * Prepare all needed data structures for * requesting sense data. */ cp->scsi_smsg2[0] = IDENTIFY(0, cp->lun); msglen = 1; /* * If we are currently using anything different from * async. 8 bit data transfers with that target, * start a negotiation, since the device may want * to report us a UNIT ATTENTION condition due to * a cause we currently ignore, and we donnot want * to be stuck with WIDE and/or SYNC data transfer. * * cp->nego_status is filled by sym_prepare_nego(). */ cp->nego_status = 0; msglen += sym_prepare_nego(np, cp, &cp->scsi_smsg2[msglen]); /* * Message table indirect structure. */ cp->phys.smsg.addr = CCB_BA(cp, scsi_smsg2); cp->phys.smsg.size = cpu_to_scr(msglen); /* * sense command */ cp->phys.cmd.addr = CCB_BA(cp, sensecmd); cp->phys.cmd.size = cpu_to_scr(6); /* * patch requested size into sense command */ cp->sensecmd[0] = REQUEST_SENSE; cp->sensecmd[1] = 0; if (cp->cmd->device->scsi_level <= SCSI_2 && cp->lun <= 7) cp->sensecmd[1] = cp->lun << 5; cp->sensecmd[4] = SYM_SNS_BBUF_LEN; cp->data_len = SYM_SNS_BBUF_LEN; /* * sense data */ memset(cp->sns_bbuf, 0, SYM_SNS_BBUF_LEN); cp->phys.sense.addr = CCB_BA(cp, sns_bbuf); cp->phys.sense.size = cpu_to_scr(SYM_SNS_BBUF_LEN); /* * requeue the command. */ startp = SCRIPTB_BA(np, sdata_in); cp->phys.head.savep = cpu_to_scr(startp); cp->phys.head.lastp = cpu_to_scr(startp); cp->startp = cpu_to_scr(startp); cp->goalp = cpu_to_scr(startp + 16); cp->host_xflags = 0; cp->host_status = cp->nego_status ? HS_NEGOTIATE : HS_BUSY; cp->ssss_status = S_ILLEGAL; cp->host_flags = (HF_SENSE|HF_DATA_IN); cp->xerr_status = 0; cp->extra_bytes = 0; cp->phys.head.go.start = cpu_to_scr(SCRIPTA_BA(np, select)); /* * Requeue the command. */ sym_put_start_queue(np, cp); /* * Give back to upper layer everything we have dequeued. */ sym_flush_comp_queue(np, 0); break; } } /* * After a device has accepted some management message * as BUS DEVICE RESET, ABORT TASK, etc ..., or when * a device signals a UNIT ATTENTION condition, some * tasks are thrown away by the device. We are required * to reflect that on our tasks list since the device * will never complete these tasks. * * This function move from the BUSY queue to the COMP * queue all disconnected CCBs for a given target that * match the following criteria: * - lun=-1 means any logical UNIT otherwise a given one. * - task=-1 means any task, otherwise a given one. */ int sym_clear_tasks(struct sym_hcb *np, int cam_status, int target, int lun, int task) { SYM_QUEHEAD qtmp, *qp; int i = 0; struct sym_ccb *cp; /* * Move the entire BUSY queue to our temporary queue. */ sym_que_init(&qtmp); sym_que_splice(&np->busy_ccbq, &qtmp); sym_que_init(&np->busy_ccbq); /* * Put all CCBs that matches our criteria into * the COMP queue and put back other ones into * the BUSY queue. */ while ((qp = sym_remque_head(&qtmp)) != NULL) { struct scsi_cmnd *cmd; cp = sym_que_entry(qp, struct sym_ccb, link_ccbq); cmd = cp->cmd; if (cp->host_status != HS_DISCONNECT || cp->target != target || (lun != -1 && cp->lun != lun) || (task != -1 && (cp->tag != NO_TAG && cp->scsi_smsg[2] != task))) { sym_insque_tail(&cp->link_ccbq, &np->busy_ccbq); continue; } sym_insque_tail(&cp->link_ccbq, &np->comp_ccbq); /* Preserve the software timeout condition */ if (sym_get_cam_status(cmd) != DID_TIME_OUT) sym_set_cam_status(cmd, cam_status); ++i; #if 0 printf("XXXX TASK @%p CLEARED\n", cp); #endif } return i; } /* * chip handler for TASKS recovery * * We cannot safely abort a command, while the SCRIPTS * processor is running, since we just would be in race * with it. * * As long as we have tasks to abort, we keep the SEM * bit set in the ISTAT. When this bit is set, the * SCRIPTS processor interrupts (SIR_SCRIPT_STOPPED) * each time it enters the scheduler. * * If we have to reset a target, clear tasks of a unit, * or to perform the abort of a disconnected job, we * restart the SCRIPTS for selecting the target. Once * selected, the SCRIPTS interrupts (SIR_TARGET_SELECTED). * If it loses arbitration, the SCRIPTS will interrupt again * the next time it will enter its scheduler, and so on ... * * On SIR_TARGET_SELECTED, we scan for the more * appropriate thing to do: * * - If nothing, we just sent a M_ABORT message to the * target to get rid of the useless SCSI bus ownership. * According to the specs, no tasks shall be affected. * - If the target is to be reset, we send it a M_RESET * message. * - If a logical UNIT is to be cleared , we send the * IDENTIFY(lun) + M_ABORT. * - If an untagged task is to be aborted, we send the * IDENTIFY(lun) + M_ABORT. * - If a tagged task is to be aborted, we send the * IDENTIFY(lun) + task attributes + M_ABORT_TAG. * * Once our 'kiss of death' :) message has been accepted * by the target, the SCRIPTS interrupts again * (SIR_ABORT_SENT). On this interrupt, we complete * all the CCBs that should have been aborted by the * target according to our message. */ static void sym_sir_task_recovery(struct sym_hcb *np, int num) { SYM_QUEHEAD *qp; struct sym_ccb *cp; struct sym_tcb *tp = NULL; /* gcc isn't quite smart enough yet */ struct scsi_target *starget; int target=-1, lun=-1, task; int i, k; switch(num) { /* * The SCRIPTS processor stopped before starting * the next command in order to allow us to perform * some task recovery. */ case SIR_SCRIPT_STOPPED: /* * Do we have any target to reset or unit to clear ? */ for (i = 0 ; i < SYM_CONF_MAX_TARGET ; i++) { tp = &np->target[i]; if (tp->to_reset || (tp->lun0p && tp->lun0p->to_clear)) { target = i; break; } if (!tp->lunmp) continue; for (k = 1 ; k < SYM_CONF_MAX_LUN ; k++) { if (tp->lunmp[k] && tp->lunmp[k]->to_clear) { target = i; break; } } if (target != -1) break; } /* * If not, walk the busy queue for any * disconnected CCB to be aborted. */ if (target == -1) { FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) { cp = sym_que_entry(qp,struct sym_ccb,link_ccbq); if (cp->host_status != HS_DISCONNECT) continue; if (cp->to_abort) { target = cp->target; break; } } } /* * If some target is to be selected, * prepare and start the selection. */ if (target != -1) { tp = &np->target[target]; np->abrt_sel.sel_id = target; np->abrt_sel.sel_scntl3 = tp->head.wval; np->abrt_sel.sel_sxfer = tp->head.sval; OUTL(np, nc_dsa, np->hcb_ba); OUTL_DSP(np, SCRIPTB_BA(np, sel_for_abort)); return; } /* * Now look for a CCB to abort that haven't started yet. * Btw, the SCRIPTS processor is still stopped, so * we are not in race. */ i = 0; cp = NULL; FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) { cp = sym_que_entry(qp, struct sym_ccb, link_ccbq); if (cp->host_status != HS_BUSY && cp->host_status != HS_NEGOTIATE) continue; if (!cp->to_abort) continue; #ifdef SYM_CONF_IARB_SUPPORT /* * If we are using IMMEDIATE ARBITRATION, we donnot * want to cancel the last queued CCB, since the * SCRIPTS may have anticipated the selection. */ if (cp == np->last_cp) { cp->to_abort = 0; continue; } #endif i = 1; /* Means we have found some */ break; } if (!i) { /* * We are done, so we donnot need * to synchronize with the SCRIPTS anylonger. * Remove the SEM flag from the ISTAT. */ np->istat_sem = 0; OUTB(np, nc_istat, SIGP); break; } /* * Compute index of next position in the start * queue the SCRIPTS intends to start and dequeue * all CCBs for that device that haven't been started. */ i = (INL(np, nc_scratcha) - np->squeue_ba) / 4; i = sym_dequeue_from_squeue(np, i, cp->target, cp->lun, -1); /* * Make sure at least our IO to abort has been dequeued. */ #ifndef SYM_OPT_HANDLE_DEVICE_QUEUEING assert(i && sym_get_cam_status(cp->cmd) == DID_SOFT_ERROR); #else sym_remque(&cp->link_ccbq); sym_insque_tail(&cp->link_ccbq, &np->comp_ccbq); #endif /* * Keep track in cam status of the reason of the abort. */ if (cp->to_abort == 2) sym_set_cam_status(cp->cmd, DID_TIME_OUT); else sym_set_cam_status(cp->cmd, DID_ABORT); /* * Complete with error everything that we have dequeued. */ sym_flush_comp_queue(np, 0); break; /* * The SCRIPTS processor has selected a target * we may have some manual recovery to perform for. */ case SIR_TARGET_SELECTED: target = INB(np, nc_sdid) & 0xf; tp = &np->target[target]; np->abrt_tbl.addr = cpu_to_scr(vtobus(np->abrt_msg)); /* * If the target is to be reset, prepare a * M_RESET message and clear the to_reset flag * since we donnot expect this operation to fail. */ if (tp->to_reset) { np->abrt_msg[0] = M_RESET; np->abrt_tbl.size = 1; tp->to_reset = 0; break; } /* * Otherwise, look for some logical unit to be cleared. */ if (tp->lun0p && tp->lun0p->to_clear) lun = 0; else if (tp->lunmp) { for (k = 1 ; k < SYM_CONF_MAX_LUN ; k++) { if (tp->lunmp[k] && tp->lunmp[k]->to_clear) { lun = k; break; } } } /* * If a logical unit is to be cleared, prepare * an IDENTIFY(lun) + ABORT MESSAGE. */ if (lun != -1) { struct sym_lcb *lp = sym_lp(tp, lun); lp->to_clear = 0; /* We don't expect to fail here */ np->abrt_msg[0] = IDENTIFY(0, lun); np->abrt_msg[1] = M_ABORT; np->abrt_tbl.size = 2; break; } /* * Otherwise, look for some disconnected job to * abort for this target. */ i = 0; cp = NULL; FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) { cp = sym_que_entry(qp, struct sym_ccb, link_ccbq); if (cp->host_status != HS_DISCONNECT) continue; if (cp->target != target) continue; if (!cp->to_abort) continue; i = 1; /* Means we have some */ break; } /* * If we have none, probably since the device has * completed the command before we won abitration, * send a M_ABORT message without IDENTIFY. * According to the specs, the device must just * disconnect the BUS and not abort any task. */ if (!i) { np->abrt_msg[0] = M_ABORT; np->abrt_tbl.size = 1; break; } /* * We have some task to abort. * Set the IDENTIFY(lun) */ np->abrt_msg[0] = IDENTIFY(0, cp->lun); /* * If we want to abort an untagged command, we * will send a IDENTIFY + M_ABORT. * Otherwise (tagged command), we will send * a IDENTITFY + task attributes + ABORT TAG. */ if (cp->tag == NO_TAG) { np->abrt_msg[1] = M_ABORT; np->abrt_tbl.size = 2; } else { np->abrt_msg[1] = cp->scsi_smsg[1]; np->abrt_msg[2] = cp->scsi_smsg[2]; np->abrt_msg[3] = M_ABORT_TAG; np->abrt_tbl.size = 4; } /* * Keep track of software timeout condition, since the * peripheral driver may not count retries on abort * conditions not due to timeout. */ if (cp->to_abort == 2) sym_set_cam_status(cp->cmd, DID_TIME_OUT); cp->to_abort = 0; /* We donnot expect to fail here */ break; /* * The target has accepted our message and switched * to BUS FREE phase as we expected. */ case SIR_ABORT_SENT: target = INB(np, nc_sdid) & 0xf; tp = &np->target[target]; starget = tp->starget; /* ** If we didn't abort anything, leave here. */ if (np->abrt_msg[0] == M_ABORT) break; /* * If we sent a M_RESET, then a hardware reset has * been performed by the target. * - Reset everything to async 8 bit * - Tell ourself to negotiate next time :-) * - Prepare to clear all disconnected CCBs for * this target from our task list (lun=task=-1) */ lun = -1; task = -1; if (np->abrt_msg[0] == M_RESET) { tp->head.sval = 0; tp->head.wval = np->rv_scntl3; tp->head.uval = 0; spi_period(starget) = 0; spi_offset(starget) = 0; spi_width(starget) = 0; spi_iu(starget) = 0; spi_dt(starget) = 0; spi_qas(starget) = 0; tp->tgoal.check_nego = 1; tp->tgoal.renego = 0; } /* * Otherwise, check for the LUN and TASK(s) * concerned by the cancelation. * If it is not ABORT_TAG then it is CLEAR_QUEUE * or an ABORT message :-) */ else { lun = np->abrt_msg[0] & 0x3f; if (np->abrt_msg[1] == M_ABORT_TAG) task = np->abrt_msg[2]; } /* * Complete all the CCBs the device should have * aborted due to our 'kiss of death' message. */ i = (INL(np, nc_scratcha) - np->squeue_ba) / 4; sym_dequeue_from_squeue(np, i, target, lun, -1); sym_clear_tasks(np, DID_ABORT, target, lun, task); sym_flush_comp_queue(np, 0); /* * If we sent a BDR, make upper layer aware of that. */ if (np->abrt_msg[0] == M_RESET) starget_printk(KERN_NOTICE, starget, "has been reset\n"); break; } /* * Print to the log the message we intend to send. */ if (num == SIR_TARGET_SELECTED) { dev_info(&tp->starget->dev, "control msgout:"); sym_printl_hex(np->abrt_msg, np->abrt_tbl.size); np->abrt_tbl.size = cpu_to_scr(np->abrt_tbl.size); } /* * Let the SCRIPTS processor continue. */ OUTONB_STD(); } /* * Gerard's alchemy:) that deals with with the data * pointer for both MDP and the residual calculation. * * I didn't want to bloat the code by more than 200 * lines for the handling of both MDP and the residual. * This has been achieved by using a data pointer * representation consisting in an index in the data * array (dp_sg) and a negative offset (dp_ofs) that * have the following meaning: * * - dp_sg = SYM_CONF_MAX_SG * we are at the end of the data script. * - dp_sg < SYM_CONF_MAX_SG * dp_sg points to the next entry of the scatter array * we want to transfer. * - dp_ofs < 0 * dp_ofs represents the residual of bytes of the * previous entry scatter entry we will send first. * - dp_ofs = 0 * no residual to send first. * * The function sym_evaluate_dp() accepts an arbitray * offset (basically from the MDP message) and returns * the corresponding values of dp_sg and dp_ofs. */ static int sym_evaluate_dp(struct sym_hcb *np, struct sym_ccb *cp, u32 scr, int *ofs) { u32 dp_scr; int dp_ofs, dp_sg, dp_sgmin; int tmp; struct sym_pmc *pm; /* * Compute the resulted data pointer in term of a script * address within some DATA script and a signed byte offset. */ dp_scr = scr; dp_ofs = *ofs; if (dp_scr == SCRIPTA_BA(np, pm0_data)) pm = &cp->phys.pm0; else if (dp_scr == SCRIPTA_BA(np, pm1_data)) pm = &cp->phys.pm1; else pm = NULL; if (pm) { dp_scr = scr_to_cpu(pm->ret); dp_ofs -= scr_to_cpu(pm->sg.size) & 0x00ffffff; } /* * If we are auto-sensing, then we are done. */ if (cp->host_flags & HF_SENSE) { *ofs = dp_ofs; return 0; } /* * Deduce the index of the sg entry. * Keep track of the index of the first valid entry. * If result is dp_sg = SYM_CONF_MAX_SG, then we are at the * end of the data. */ tmp = scr_to_cpu(cp->goalp); dp_sg = SYM_CONF_MAX_SG; if (dp_scr != tmp) dp_sg -= (tmp - 8 - (int)dp_scr) / (2*4); dp_sgmin = SYM_CONF_MAX_SG - cp->segments; /* * Move to the sg entry the data pointer belongs to. * * If we are inside the data area, we expect result to be: * * Either, * dp_ofs = 0 and dp_sg is the index of the sg entry * the data pointer belongs to (or the end of the data) * Or, * dp_ofs < 0 and dp_sg is the index of the sg entry * the data pointer belongs to + 1. */ if (dp_ofs < 0) { int n; while (dp_sg > dp_sgmin) { --dp_sg; tmp = scr_to_cpu(cp->phys.data[dp_sg].size); n = dp_ofs + (tmp & 0xffffff); if (n > 0) { ++dp_sg; break; } dp_ofs = n; } } else if (dp_ofs > 0) { while (dp_sg < SYM_CONF_MAX_SG) { tmp = scr_to_cpu(cp->phys.data[dp_sg].size); dp_ofs -= (tmp & 0xffffff); ++dp_sg; if (dp_ofs <= 0) break; } } /* * Make sure the data pointer is inside the data area. * If not, return some error. */ if (dp_sg < dp_sgmin || (dp_sg == dp_sgmin && dp_ofs < 0)) goto out_err; else if (dp_sg > SYM_CONF_MAX_SG || (dp_sg == SYM_CONF_MAX_SG && dp_ofs > 0)) goto out_err; /* * Save the extreme pointer if needed. */ if (dp_sg > cp->ext_sg || (dp_sg == cp->ext_sg && dp_ofs > cp->ext_ofs)) { cp->ext_sg = dp_sg; cp->ext_ofs = dp_ofs; } /* * Return data. */ *ofs = dp_ofs; return dp_sg; out_err: return -1; } /* * chip handler for MODIFY DATA POINTER MESSAGE * * We also call this function on IGNORE WIDE RESIDUE * messages that do not match a SWIDE full condition. * Btw, we assume in that situation that such a message * is equivalent to a MODIFY DATA POINTER (offset=-1). */ static void sym_modify_dp(struct sym_hcb *np, struct sym_tcb *tp, struct sym_ccb *cp, int ofs) { int dp_ofs = ofs; u32 dp_scr = sym_get_script_dp (np, cp); u32 dp_ret; u32 tmp; u_char hflags; int dp_sg; struct sym_pmc *pm; /* * Not supported for auto-sense. */ if (cp->host_flags & HF_SENSE) goto out_reject; /* * Apply our alchemy:) (see comments in sym_evaluate_dp()), * to the resulted data pointer. */ dp_sg = sym_evaluate_dp(np, cp, dp_scr, &dp_ofs); if (dp_sg < 0) goto out_reject; /* * And our alchemy:) allows to easily calculate the data * script address we want to return for the next data phase. */ dp_ret = cpu_to_scr(cp->goalp); dp_ret = dp_ret - 8 - (SYM_CONF_MAX_SG - dp_sg) * (2*4); /* * If offset / scatter entry is zero we donnot need * a context for the new current data pointer. */ if (dp_ofs == 0) { dp_scr = dp_ret; goto out_ok; } /* * Get a context for the new current data pointer. */ hflags = INB(np, HF_PRT); if (hflags & HF_DP_SAVED) hflags ^= HF_ACT_PM; if (!(hflags & HF_ACT_PM)) { pm = &cp->phys.pm0; dp_scr = SCRIPTA_BA(np, pm0_data); } else { pm = &cp->phys.pm1; dp_scr = SCRIPTA_BA(np, pm1_data); } hflags &= ~(HF_DP_SAVED); OUTB(np, HF_PRT, hflags); /* * Set up the new current data pointer. * ofs < 0 there, and for the next data phase, we * want to transfer part of the data of the sg entry * corresponding to index dp_sg-1 prior to returning * to the main data script. */ pm->ret = cpu_to_scr(dp_ret); tmp = scr_to_cpu(cp->phys.data[dp_sg-1].addr); tmp += scr_to_cpu(cp->phys.data[dp_sg-1].size) + dp_ofs; pm->sg.addr = cpu_to_scr(tmp); pm->sg.size = cpu_to_scr(-dp_ofs); out_ok: sym_set_script_dp (np, cp, dp_scr); OUTL_DSP(np, SCRIPTA_BA(np, clrack)); return; out_reject: OUTL_DSP(np, SCRIPTB_BA(np, msg_bad)); } /* * chip calculation of the data residual. * * As I used to say, the requirement of data residual * in SCSI is broken, useless and cannot be achieved * without huge complexity. * But most OSes and even the official CAM require it. * When stupidity happens to be so widely spread inside * a community, it gets hard to convince. * * Anyway, I don't care, since I am not going to use * any software that considers this data residual as * a relevant information. :) */ int sym_compute_residual(struct sym_hcb *np, struct sym_ccb *cp) { int dp_sg, dp_sgmin, resid = 0; int dp_ofs = 0; /* * Check for some data lost or just thrown away. * We are not required to be quite accurate in this * situation. Btw, if we are odd for output and the * device claims some more data, it may well happen * than our residual be zero. :-) */ if (cp->xerr_status & (XE_EXTRA_DATA|XE_SODL_UNRUN|XE_SWIDE_OVRUN)) { if (cp->xerr_status & XE_EXTRA_DATA) resid -= cp->extra_bytes; if (cp->xerr_status & XE_SODL_UNRUN) ++resid; if (cp->xerr_status & XE_SWIDE_OVRUN) --resid; } /* * If all data has been transferred, * there is no residual. */ if (cp->phys.head.lastp == cp->goalp) return resid; /* * If no data transfer occurs, or if the data * pointer is weird, return full residual. */ if (cp->startp == cp->phys.head.lastp || sym_evaluate_dp(np, cp, scr_to_cpu(cp->phys.head.lastp), &dp_ofs) < 0) { return cp->data_len - cp->odd_byte_adjustment; } /* * If we were auto-sensing, then we are done. */ if (cp->host_flags & HF_SENSE) { return -dp_ofs; } /* * We are now full comfortable in the computation * of the data residual (2's complement). */ dp_sgmin = SYM_CONF_MAX_SG - cp->segments; resid = -cp->ext_ofs; for (dp_sg = cp->ext_sg; dp_sg < SYM_CONF_MAX_SG; ++dp_sg) { u_int tmp = scr_to_cpu(cp->phys.data[dp_sg].size); resid += (tmp & 0xffffff); } resid -= cp->odd_byte_adjustment; /* * Hopefully, the result is not too wrong. */ return resid; } /* * Negotiation for WIDE and SYNCHRONOUS DATA TRANSFER. * * When we try to negotiate, we append the negotiation message * to the identify and (maybe) simple tag message. * The host status field is set to HS_NEGOTIATE to mark this * situation. * * If the target doesn't answer this message immediately * (as required by the standard), the SIR_NEGO_FAILED interrupt * will be raised eventually. * The handler removes the HS_NEGOTIATE status, and sets the * negotiated value to the default (async / nowide). * * If we receive a matching answer immediately, we check it * for validity, and set the values. * * If we receive a Reject message immediately, we assume the * negotiation has failed, and fall back to standard values. * * If we receive a negotiation message while not in HS_NEGOTIATE * state, it's a target initiated negotiation. We prepare a * (hopefully) valid answer, set our parameters, and send back * this answer to the target. * * If the target doesn't fetch the answer (no message out phase), * we assume the negotiation has failed, and fall back to default * settings (SIR_NEGO_PROTO interrupt). * * When we set the values, we adjust them in all ccbs belonging * to this target, in the controller's register, and in the "phys" * field of the controller's struct sym_hcb. */ /* * chip handler for SYNCHRONOUS DATA TRANSFER REQUEST (SDTR) message. */ static int sym_sync_nego_check(struct sym_hcb *np, int req, struct sym_ccb *cp) { int target = cp->target; u_char chg, ofs, per, fak, div; if (DEBUG_FLAGS & DEBUG_NEGO) { sym_print_nego_msg(np, target, "sync msgin", np->msgin); } /* * Get requested values. */ chg = 0; per = np->msgin[3]; ofs = np->msgin[4]; /* * Check values against our limits. */ if (ofs) { if (ofs > np->maxoffs) {chg = 1; ofs = np->maxoffs;} } if (ofs) { if (per < np->minsync) {chg = 1; per = np->minsync;} } /* * Get new chip synchronous parameters value. */ div = fak = 0; if (ofs && sym_getsync(np, 0, per, &div, &fak) < 0) goto reject_it; if (DEBUG_FLAGS & DEBUG_NEGO) { sym_print_addr(cp->cmd, "sdtr: ofs=%d per=%d div=%d fak=%d chg=%d.\n", ofs, per, div, fak, chg); } /* * If it was an answer we want to change, * then it isn't acceptable. Reject it. */ if (!req && chg) goto reject_it; /* * Apply new values. */ sym_setsync (np, target, ofs, per, div, fak); /* * It was an answer. We are done. */ if (!req) return 0; /* * It was a request. Prepare an answer message. */ spi_populate_sync_msg(np->msgout, per, ofs); if (DEBUG_FLAGS & DEBUG_NEGO) { sym_print_nego_msg(np, target, "sync msgout", np->msgout); } np->msgin [0] = M_NOOP; return 0; reject_it: sym_setsync (np, target, 0, 0, 0, 0); return -1; } static void sym_sync_nego(struct sym_hcb *np, struct sym_tcb *tp, struct sym_ccb *cp) { int req = 1; int result; /* * Request or answer ? */ if (INB(np, HS_PRT) == HS_NEGOTIATE) { OUTB(np, HS_PRT, HS_BUSY); if (cp->nego_status && cp->nego_status != NS_SYNC) goto reject_it; req = 0; } /* * Check and apply new values. */ result = sym_sync_nego_check(np, req, cp); if (result) /* Not acceptable, reject it */ goto reject_it; if (req) { /* Was a request, send response. */ cp->nego_status = NS_SYNC; OUTL_DSP(np, SCRIPTB_BA(np, sdtr_resp)); } else /* Was a response, we are done. */ OUTL_DSP(np, SCRIPTA_BA(np, clrack)); return; reject_it: OUTL_DSP(np, SCRIPTB_BA(np, msg_bad)); } /* * chip handler for PARALLEL PROTOCOL REQUEST (PPR) message. */ static int sym_ppr_nego_check(struct sym_hcb *np, int req, int target) { struct sym_tcb *tp = &np->target[target]; unsigned char fak, div; int dt, chg = 0; unsigned char per = np->msgin[3]; unsigned char ofs = np->msgin[5]; unsigned char wide = np->msgin[6]; unsigned char opts = np->msgin[7] & PPR_OPT_MASK; if (DEBUG_FLAGS & DEBUG_NEGO) { sym_print_nego_msg(np, target, "ppr msgin", np->msgin); } /* * Check values against our limits. */ if (wide > np->maxwide) { chg = 1; wide = np->maxwide; } if (!wide || !(np->features & FE_U3EN)) opts = 0; if (opts != (np->msgin[7] & PPR_OPT_MASK)) chg = 1; dt = opts & PPR_OPT_DT; if (ofs) { unsigned char maxoffs = dt ? np->maxoffs_dt : np->maxoffs; if (ofs > maxoffs) { chg = 1; ofs = maxoffs; } } if (ofs) { unsigned char minsync = dt ? np->minsync_dt : np->minsync; if (per < minsync) { chg = 1; per = minsync; } } /* * Get new chip synchronous parameters value. */ div = fak = 0; if (ofs && sym_getsync(np, dt, per, &div, &fak) < 0) goto reject_it; /* * If it was an answer we want to change, * then it isn't acceptable. Reject it. */ if (!req && chg) goto reject_it; /* * Apply new values. */ sym_setpprot(np, target, opts, ofs, per, wide, div, fak); /* * It was an answer. We are done. */ if (!req) return 0; /* * It was a request. Prepare an answer message. */ spi_populate_ppr_msg(np->msgout, per, ofs, wide, opts); if (DEBUG_FLAGS & DEBUG_NEGO) { sym_print_nego_msg(np, target, "ppr msgout", np->msgout); } np->msgin [0] = M_NOOP; return 0; reject_it: sym_setpprot (np, target, 0, 0, 0, 0, 0, 0); /* * If it is a device response that should result in * ST, we may want to try a legacy negotiation later. */ if (!req && !opts) { tp->tgoal.period = per; tp->tgoal.offset = ofs; tp->tgoal.width = wide; tp->tgoal.iu = tp->tgoal.dt = tp->tgoal.qas = 0; tp->tgoal.check_nego = 1; } return -1; } static void sym_ppr_nego(struct sym_hcb *np, struct sym_tcb *tp, struct sym_ccb *cp) { int req = 1; int result; /* * Request or answer ? */ if (INB(np, HS_PRT) == HS_NEGOTIATE) { OUTB(np, HS_PRT, HS_BUSY); if (cp->nego_status && cp->nego_status != NS_PPR) goto reject_it; req = 0; } /* * Check and apply new values. */ result = sym_ppr_nego_check(np, req, cp->target); if (result) /* Not acceptable, reject it */ goto reject_it; if (req) { /* Was a request, send response. */ cp->nego_status = NS_PPR; OUTL_DSP(np, SCRIPTB_BA(np, ppr_resp)); } else /* Was a response, we are done. */ OUTL_DSP(np, SCRIPTA_BA(np, clrack)); return; reject_it: OUTL_DSP(np, SCRIPTB_BA(np, msg_bad)); } /* * chip handler for WIDE DATA TRANSFER REQUEST (WDTR) message. */ static int sym_wide_nego_check(struct sym_hcb *np, int req, struct sym_ccb *cp) { int target = cp->target; u_char chg, wide; if (DEBUG_FLAGS & DEBUG_NEGO) { sym_print_nego_msg(np, target, "wide msgin", np->msgin); } /* * Get requested values. */ chg = 0; wide = np->msgin[3]; /* * Check values against our limits. */ if (wide > np->maxwide) { chg = 1; wide = np->maxwide; } if (DEBUG_FLAGS & DEBUG_NEGO) { sym_print_addr(cp->cmd, "wdtr: wide=%d chg=%d.\n", wide, chg); } /* * If it was an answer we want to change, * then it isn't acceptable. Reject it. */ if (!req && chg) goto reject_it; /* * Apply new values. */ sym_setwide (np, target, wide); /* * It was an answer. We are done. */ if (!req) return 0; /* * It was a request. Prepare an answer message. */ spi_populate_width_msg(np->msgout, wide); np->msgin [0] = M_NOOP; if (DEBUG_FLAGS & DEBUG_NEGO) { sym_print_nego_msg(np, target, "wide msgout", np->msgout); } return 0; reject_it: return -1; } static void sym_wide_nego(struct sym_hcb *np, struct sym_tcb *tp, struct sym_ccb *cp) { int req = 1; int result; /* * Request or answer ? */ if (INB(np, HS_PRT) == HS_NEGOTIATE) { OUTB(np, HS_PRT, HS_BUSY); if (cp->nego_status && cp->nego_status != NS_WIDE) goto reject_it; req = 0; } /* * Check and apply new values. */ result = sym_wide_nego_check(np, req, cp); if (result) /* Not acceptable, reject it */ goto reject_it; if (req) { /* Was a request, send response. */ cp->nego_status = NS_WIDE; OUTL_DSP(np, SCRIPTB_BA(np, wdtr_resp)); } else { /* Was a response. */ /* * Negotiate for SYNC immediately after WIDE response. * This allows to negotiate for both WIDE and SYNC on * a single SCSI command (Suggested by Justin Gibbs). */ if (tp->tgoal.offset) { spi_populate_sync_msg(np->msgout, tp->tgoal.period, tp->tgoal.offset); if (DEBUG_FLAGS & DEBUG_NEGO) { sym_print_nego_msg(np, cp->target, "sync msgout", np->msgout); } cp->nego_status = NS_SYNC; OUTB(np, HS_PRT, HS_NEGOTIATE); OUTL_DSP(np, SCRIPTB_BA(np, sdtr_resp)); return; } else OUTL_DSP(np, SCRIPTA_BA(np, clrack)); } return; reject_it: OUTL_DSP(np, SCRIPTB_BA(np, msg_bad)); } /* * Reset DT, SYNC or WIDE to default settings. * * Called when a negotiation does not succeed either * on rejection or on protocol error. * * A target that understands a PPR message should never * reject it, and messing with it is very unlikely. * So, if a PPR makes problems, we may just want to * try a legacy negotiation later. */ static void sym_nego_default(struct sym_hcb *np, struct sym_tcb *tp, struct sym_ccb *cp) { switch (cp->nego_status) { case NS_PPR: #if 0 sym_setpprot (np, cp->target, 0, 0, 0, 0, 0, 0); #else if (tp->tgoal.period < np->minsync) tp->tgoal.period = np->minsync; if (tp->tgoal.offset > np->maxoffs) tp->tgoal.offset = np->maxoffs; tp->tgoal.iu = tp->tgoal.dt = tp->tgoal.qas = 0; tp->tgoal.check_nego = 1; #endif break; case NS_SYNC: sym_setsync (np, cp->target, 0, 0, 0, 0); break; case NS_WIDE: sym_setwide (np, cp->target, 0); break; } np->msgin [0] = M_NOOP; np->msgout[0] = M_NOOP; cp->nego_status = 0; } /* * chip handler for MESSAGE REJECT received in response to * PPR, WIDE or SYNCHRONOUS negotiation. */ static void sym_nego_rejected(struct sym_hcb *np, struct sym_tcb *tp, struct sym_ccb *cp) { sym_nego_default(np, tp, cp); OUTB(np, HS_PRT, HS_BUSY); } /* * chip exception handler for programmed interrupts. */ static void sym_int_sir(struct sym_hcb *np) { u_char num = INB(np, nc_dsps); u32 dsa = INL(np, nc_dsa); struct sym_ccb *cp = sym_ccb_from_dsa(np, dsa); u_char target = INB(np, nc_sdid) & 0x0f; struct sym_tcb *tp = &np->target[target]; int tmp; if (DEBUG_FLAGS & DEBUG_TINY) printf ("I#%d", num); switch (num) { #if SYM_CONF_DMA_ADDRESSING_MODE == 2 /* * SCRIPTS tell us that we may have to update * 64 bit DMA segment registers. */ case SIR_DMAP_DIRTY: sym_update_dmap_regs(np); goto out; #endif /* * Command has been completed with error condition * or has been auto-sensed. */ case SIR_COMPLETE_ERROR: sym_complete_error(np, cp); return; /* * The C code is currently trying to recover from something. * Typically, user want to abort some command. */ case SIR_SCRIPT_STOPPED: case SIR_TARGET_SELECTED: case SIR_ABORT_SENT: sym_sir_task_recovery(np, num); return; /* * The device didn't go to MSG OUT phase after having * been selected with ATN. We do not want to handle that. */ case SIR_SEL_ATN_NO_MSG_OUT: scmd_printk(KERN_WARNING, cp->cmd, "No MSG OUT phase after selection with ATN\n"); goto out_stuck; /* * The device didn't switch to MSG IN phase after * having reselected the initiator. */ case SIR_RESEL_NO_MSG_IN: scmd_printk(KERN_WARNING, cp->cmd, "No MSG IN phase after reselection\n"); goto out_stuck; /* * After reselection, the device sent a message that wasn't * an IDENTIFY. */ case SIR_RESEL_NO_IDENTIFY: scmd_printk(KERN_WARNING, cp->cmd, "No IDENTIFY after reselection\n"); goto out_stuck; /* * The device reselected a LUN we do not know about. */ case SIR_RESEL_BAD_LUN: np->msgout[0] = M_RESET; goto out; /* * The device reselected for an untagged nexus and we * haven't any. */ case SIR_RESEL_BAD_I_T_L: np->msgout[0] = M_ABORT; goto out; /* * The device reselected for a tagged nexus that we do not have. */ case SIR_RESEL_BAD_I_T_L_Q: np->msgout[0] = M_ABORT_TAG; goto out; /* * The SCRIPTS let us know that the device has grabbed * our message and will abort the job. */ case SIR_RESEL_ABORTED: np->lastmsg = np->msgout[0]; np->msgout[0] = M_NOOP; scmd_printk(KERN_WARNING, cp->cmd, "message %x sent on bad reselection\n", np->lastmsg); goto out; /* * The SCRIPTS let us know that a message has been * successfully sent to the device. */ case SIR_MSG_OUT_DONE: np->lastmsg = np->msgout[0]; np->msgout[0] = M_NOOP; /* Should we really care of that */ if (np->lastmsg == M_PARITY || np->lastmsg == M_ID_ERROR) { if (cp) { cp->xerr_status &= ~XE_PARITY_ERR; if (!cp->xerr_status) OUTOFFB(np, HF_PRT, HF_EXT_ERR); } } goto out; /* * The device didn't send a GOOD SCSI status. * We may have some work to do prior to allow * the SCRIPTS processor to continue. */ case SIR_BAD_SCSI_STATUS: if (!cp) goto out; sym_sir_bad_scsi_status(np, num, cp); return; /* * We are asked by the SCRIPTS to prepare a * REJECT message. */ case SIR_REJECT_TO_SEND: sym_print_msg(cp, "M_REJECT to send for ", np->msgin); np->msgout[0] = M_REJECT; goto out; /* * We have been ODD at the end of a DATA IN * transfer and the device didn't send a * IGNORE WIDE RESIDUE message. * It is a data overrun condition. */ case SIR_SWIDE_OVERRUN: if (cp) { OUTONB(np, HF_PRT, HF_EXT_ERR); cp->xerr_status |= XE_SWIDE_OVRUN; } goto out; /* * We have been ODD at the end of a DATA OUT * transfer. * It is a data underrun condition. */ case SIR_SODL_UNDERRUN: if (cp) { OUTONB(np, HF_PRT, HF_EXT_ERR); cp->xerr_status |= XE_SODL_UNRUN; } goto out; /* * The device wants us to tranfer more data than * expected or in the wrong direction. * The number of extra bytes is in scratcha. * It is a data overrun condition. */ case SIR_DATA_OVERRUN: if (cp) { OUTONB(np, HF_PRT, HF_EXT_ERR); cp->xerr_status |= XE_EXTRA_DATA; cp->extra_bytes += INL(np, nc_scratcha); } goto out; /* * The device switched to an illegal phase (4/5). */ case SIR_BAD_PHASE: if (cp) { OUTONB(np, HF_PRT, HF_EXT_ERR); cp->xerr_status |= XE_BAD_PHASE; } goto out; /* * We received a message. */ case SIR_MSG_RECEIVED: if (!cp) goto out_stuck; switch (np->msgin [0]) { /* * We received an extended message. * We handle MODIFY DATA POINTER, SDTR, WDTR * and reject all other extended messages. */ case M_EXTENDED: switch (np->msgin [2]) { case M_X_MODIFY_DP: if (DEBUG_FLAGS & DEBUG_POINTER) sym_print_msg(cp, "extended msg ", np->msgin); tmp = (np->msgin[3]<<24) + (np->msgin[4]<<16) + (np->msgin[5]<<8) + (np->msgin[6]); sym_modify_dp(np, tp, cp, tmp); return; case M_X_SYNC_REQ: sym_sync_nego(np, tp, cp); return; case M_X_PPR_REQ: sym_ppr_nego(np, tp, cp); return; case M_X_WIDE_REQ: sym_wide_nego(np, tp, cp); return; default: goto out_reject; } break; /* * We received a 1/2 byte message not handled from SCRIPTS. * We are only expecting MESSAGE REJECT and IGNORE WIDE * RESIDUE messages that haven't been anticipated by * SCRIPTS on SWIDE full condition. Unanticipated IGNORE * WIDE RESIDUE messages are aliased as MODIFY DP (-1). */ case M_IGN_RESIDUE: if (DEBUG_FLAGS & DEBUG_POINTER) sym_print_msg(cp, "1 or 2 byte ", np->msgin); if (cp->host_flags & HF_SENSE) OUTL_DSP(np, SCRIPTA_BA(np, clrack)); else sym_modify_dp(np, tp, cp, -1); return; case M_REJECT: if (INB(np, HS_PRT) == HS_NEGOTIATE) sym_nego_rejected(np, tp, cp); else { sym_print_addr(cp->cmd, "M_REJECT received (%x:%x).\n", scr_to_cpu(np->lastmsg), np->msgout[0]); } goto out_clrack; break; default: goto out_reject; } break; /* * We received an unknown message. * Ignore all MSG IN phases and reject it. */ case SIR_MSG_WEIRD: sym_print_msg(cp, "WEIRD message received", np->msgin); OUTL_DSP(np, SCRIPTB_BA(np, msg_weird)); return; /* * Negotiation failed. * Target does not send us the reply. * Remove the HS_NEGOTIATE status. */ case SIR_NEGO_FAILED: OUTB(np, HS_PRT, HS_BUSY); /* * Negotiation failed. * Target does not want answer message. */ case SIR_NEGO_PROTO: sym_nego_default(np, tp, cp); goto out; } out: OUTONB_STD(); return; out_reject: OUTL_DSP(np, SCRIPTB_BA(np, msg_bad)); return; out_clrack: OUTL_DSP(np, SCRIPTA_BA(np, clrack)); return; out_stuck: return; } /* * Acquire a control block */ struct sym_ccb *sym_get_ccb (struct sym_hcb *np, struct scsi_cmnd *cmd, u_char tag_order) { u_char tn = cmd->device->id; u_char ln = cmd->device->lun; struct sym_tcb *tp = &np->target[tn]; struct sym_lcb *lp = sym_lp(tp, ln); u_short tag = NO_TAG; SYM_QUEHEAD *qp; struct sym_ccb *cp = NULL; /* * Look for a free CCB */ if (sym_que_empty(&np->free_ccbq)) sym_alloc_ccb(np); qp = sym_remque_head(&np->free_ccbq); if (!qp) goto out; cp = sym_que_entry(qp, struct sym_ccb, link_ccbq); { /* * If we have been asked for a tagged command. */ if (tag_order) { /* * Debugging purpose. */ #ifndef SYM_OPT_HANDLE_DEVICE_QUEUEING if (lp->busy_itl != 0) goto out_free; #endif /* * Allocate resources for tags if not yet. */ if (!lp->cb_tags) { sym_alloc_lcb_tags(np, tn, ln); if (!lp->cb_tags) goto out_free; } /* * Get a tag for this SCSI IO and set up * the CCB bus address for reselection, * and count it for this LUN. * Toggle reselect path to tagged. */ if (lp->busy_itlq < SYM_CONF_MAX_TASK) { tag = lp->cb_tags[lp->ia_tag]; if (++lp->ia_tag == SYM_CONF_MAX_TASK) lp->ia_tag = 0; ++lp->busy_itlq; #ifndef SYM_OPT_HANDLE_DEVICE_QUEUEING lp->itlq_tbl[tag] = cpu_to_scr(cp->ccb_ba); lp->head.resel_sa = cpu_to_scr(SCRIPTA_BA(np, resel_tag)); #endif #ifdef SYM_OPT_LIMIT_COMMAND_REORDERING cp->tags_si = lp->tags_si; ++lp->tags_sum[cp->tags_si]; ++lp->tags_since; #endif } else goto out_free; } /* * This command will not be tagged. * If we already have either a tagged or untagged * one, refuse to overlap this untagged one. */ else { /* * Debugging purpose. */ #ifndef SYM_OPT_HANDLE_DEVICE_QUEUEING if (lp->busy_itl != 0 || lp->busy_itlq != 0) goto out_free; #endif /* * Count this nexus for this LUN. * Set up the CCB bus address for reselection. * Toggle reselect path to untagged. */ ++lp->busy_itl; #ifndef SYM_OPT_HANDLE_DEVICE_QUEUEING if (lp->busy_itl == 1) { lp->head.itl_task_sa = cpu_to_scr(cp->ccb_ba); lp->head.resel_sa = cpu_to_scr(SCRIPTA_BA(np, resel_no_tag)); } else goto out_free; #endif } } /* * Put the CCB into the busy queue. */ sym_insque_tail(&cp->link_ccbq, &np->busy_ccbq); #ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING if (lp) { sym_remque(&cp->link2_ccbq); sym_insque_tail(&cp->link2_ccbq, &lp->waiting_ccbq); } #endif cp->to_abort = 0; cp->odd_byte_adjustment = 0; cp->tag = tag; cp->order = tag_order; cp->target = tn; cp->lun = ln; if (DEBUG_FLAGS & DEBUG_TAGS) { sym_print_addr(cmd, "ccb @%p using tag %d.\n", cp, tag); } out: return cp; out_free: sym_insque_head(&cp->link_ccbq, &np->free_ccbq); return NULL; } /* * Release one control block */ void sym_free_ccb (struct sym_hcb *np, struct sym_ccb *cp) { struct sym_tcb *tp = &np->target[cp->target]; struct sym_lcb *lp = sym_lp(tp, cp->lun); if (DEBUG_FLAGS & DEBUG_TAGS) { sym_print_addr(cp->cmd, "ccb @%p freeing tag %d.\n", cp, cp->tag); } /* * If LCB available, */ if (lp) { /* * If tagged, release the tag, set the relect path */ if (cp->tag != NO_TAG) { #ifdef SYM_OPT_LIMIT_COMMAND_REORDERING --lp->tags_sum[cp->tags_si]; #endif /* * Free the tag value. */ lp->cb_tags[lp->if_tag] = cp->tag; if (++lp->if_tag == SYM_CONF_MAX_TASK) lp->if_tag = 0; /* * Make the reselect path invalid, * and uncount this CCB. */ lp->itlq_tbl[cp->tag] = cpu_to_scr(np->bad_itlq_ba); --lp->busy_itlq; } else { /* Untagged */ /* * Make the reselect path invalid, * and uncount this CCB. */ lp->head.itl_task_sa = cpu_to_scr(np->bad_itl_ba); --lp->busy_itl; } /* * If no JOB active, make the LUN reselect path invalid. */ if (lp->busy_itlq == 0 && lp->busy_itl == 0) lp->head.resel_sa = cpu_to_scr(SCRIPTB_BA(np, resel_bad_lun)); } /* * We donnot queue more than 1 ccb per target * with negotiation at any time. If this ccb was * used for negotiation, clear this info in the tcb. */ if (cp == tp->nego_cp) tp->nego_cp = NULL; #ifdef SYM_CONF_IARB_SUPPORT /* * If we just complete the last queued CCB, * clear this info that is no longer relevant. */ if (cp == np->last_cp) np->last_cp = 0; #endif /* * Make this CCB available. */ cp->cmd = NULL; cp->host_status = HS_IDLE; sym_remque(&cp->link_ccbq); sym_insque_head(&cp->link_ccbq, &np->free_ccbq); #ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING if (lp) { sym_remque(&cp->link2_ccbq); sym_insque_tail(&cp->link2_ccbq, &np->dummy_ccbq); if (cp->started) { if (cp->tag != NO_TAG) --lp->started_tags; else --lp->started_no_tag; } } cp->started = 0; #endif } /* * Allocate a CCB from memory and initialize its fixed part. */ static struct sym_ccb *sym_alloc_ccb(struct sym_hcb *np) { struct sym_ccb *cp = NULL; int hcode; /* * Prevent from allocating more CCBs than we can * queue to the controller. */ if (np->actccbs >= SYM_CONF_MAX_START) return NULL; /* * Allocate memory for this CCB. */ cp = sym_calloc_dma(sizeof(struct sym_ccb), "CCB"); if (!cp) goto out_free; /* * Count it. */ np->actccbs++; /* * Compute the bus address of this ccb. */ cp->ccb_ba = vtobus(cp); /* * Insert this ccb into the hashed list. */ hcode = CCB_HASH_CODE(cp->ccb_ba); cp->link_ccbh = np->ccbh[hcode]; np->ccbh[hcode] = cp; /* * Initialyze the start and restart actions. */ cp->phys.head.go.start = cpu_to_scr(SCRIPTA_BA(np, idle)); cp->phys.head.go.restart = cpu_to_scr(SCRIPTB_BA(np, bad_i_t_l)); /* * Initilialyze some other fields. */ cp->phys.smsg_ext.addr = cpu_to_scr(HCB_BA(np, msgin[2])); /* * Chain into free ccb queue. */ sym_insque_head(&cp->link_ccbq, &np->free_ccbq); /* * Chain into optionnal lists. */ #ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING sym_insque_head(&cp->link2_ccbq, &np->dummy_ccbq); #endif return cp; out_free: if (cp) sym_mfree_dma(cp, sizeof(*cp), "CCB"); return NULL; } /* * Look up a CCB from a DSA value. */ static struct sym_ccb *sym_ccb_from_dsa(struct sym_hcb *np, u32 dsa) { int hcode; struct sym_ccb *cp; hcode = CCB_HASH_CODE(dsa); cp = np->ccbh[hcode]; while (cp) { if (cp->ccb_ba == dsa) break; cp = cp->link_ccbh; } return cp; } /* * Target control block initialisation. * Nothing important to do at the moment. */ static void sym_init_tcb (struct sym_hcb *np, u_char tn) { #if 0 /* Hmmm... this checking looks paranoid. */ /* * Check some alignments required by the chip. */ assert (((offsetof(struct sym_reg, nc_sxfer) ^ offsetof(struct sym_tcb, head.sval)) &3) == 0); assert (((offsetof(struct sym_reg, nc_scntl3) ^ offsetof(struct sym_tcb, head.wval)) &3) == 0); #endif } /* * Lun control block allocation and initialization. */ struct sym_lcb *sym_alloc_lcb (struct sym_hcb *np, u_char tn, u_char ln) { struct sym_tcb *tp = &np->target[tn]; struct sym_lcb *lp = NULL; /* * Initialize the target control block if not yet. */ sym_init_tcb (np, tn); /* * Allocate the LCB bus address array. * Compute the bus address of this table. */ if (ln && !tp->luntbl) { int i; tp->luntbl = sym_calloc_dma(256, "LUNTBL"); if (!tp->luntbl) goto fail; for (i = 0 ; i < 64 ; i++) tp->luntbl[i] = cpu_to_scr(vtobus(&np->badlun_sa)); tp->head.luntbl_sa = cpu_to_scr(vtobus(tp->luntbl)); } /* * Allocate the table of pointers for LUN(s) > 0, if needed. */ if (ln && !tp->lunmp) { tp->lunmp = kcalloc(SYM_CONF_MAX_LUN, sizeof(struct sym_lcb *), GFP_ATOMIC); if (!tp->lunmp) goto fail; } /* * Allocate the lcb. * Make it available to the chip. */ lp = sym_calloc_dma(sizeof(struct sym_lcb), "LCB"); if (!lp) goto fail; if (ln) { tp->lunmp[ln] = lp; tp->luntbl[ln] = cpu_to_scr(vtobus(lp)); } else { tp->lun0p = lp; tp->head.lun0_sa = cpu_to_scr(vtobus(lp)); } tp->nlcb++; /* * Let the itl task point to error handling. */ lp->head.itl_task_sa = cpu_to_scr(np->bad_itl_ba); /* * Set the reselect pattern to our default. :) */ lp->head.resel_sa = cpu_to_scr(SCRIPTB_BA(np, resel_bad_lun)); /* * Set user capabilities. */ lp->user_flags = tp->usrflags & (SYM_DISC_ENABLED | SYM_TAGS_ENABLED); #ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING /* * Initialize device queueing. */ sym_que_init(&lp->waiting_ccbq); sym_que_init(&lp->started_ccbq); lp->started_max = SYM_CONF_MAX_TASK; lp->started_limit = SYM_CONF_MAX_TASK; #endif fail: return lp; } /* * Allocate LCB resources for tagged command queuing. */ static void sym_alloc_lcb_tags (struct sym_hcb *np, u_char tn, u_char ln) { struct sym_tcb *tp = &np->target[tn]; struct sym_lcb *lp = sym_lp(tp, ln); int i; /* * Allocate the task table and and the tag allocation * circular buffer. We want both or none. */ lp->itlq_tbl = sym_calloc_dma(SYM_CONF_MAX_TASK*4, "ITLQ_TBL"); if (!lp->itlq_tbl) goto fail; lp->cb_tags = kcalloc(SYM_CONF_MAX_TASK, 1, GFP_ATOMIC); if (!lp->cb_tags) { sym_mfree_dma(lp->itlq_tbl, SYM_CONF_MAX_TASK*4, "ITLQ_TBL"); lp->itlq_tbl = NULL; goto fail; } /* * Initialize the task table with invalid entries. */ for (i = 0 ; i < SYM_CONF_MAX_TASK ; i++) lp->itlq_tbl[i] = cpu_to_scr(np->notask_ba); /* * Fill up the tag buffer with tag numbers. */ for (i = 0 ; i < SYM_CONF_MAX_TASK ; i++) lp->cb_tags[i] = i; /* * Make the task table available to SCRIPTS, * And accept tagged commands now. */ lp->head.itlq_tbl_sa = cpu_to_scr(vtobus(lp->itlq_tbl)); return; fail: return; } /* * Lun control block deallocation. Returns the number of valid remaining LCBs * for the target. */ int sym_free_lcb(struct sym_hcb *np, u_char tn, u_char ln) { struct sym_tcb *tp = &np->target[tn]; struct sym_lcb *lp = sym_lp(tp, ln); tp->nlcb--; if (ln) { if (!tp->nlcb) { kfree(tp->lunmp); sym_mfree_dma(tp->luntbl, 256, "LUNTBL"); tp->lunmp = NULL; tp->luntbl = NULL; tp->head.luntbl_sa = cpu_to_scr(vtobus(np->badluntbl)); } else { tp->luntbl[ln] = cpu_to_scr(vtobus(&np->badlun_sa)); tp->lunmp[ln] = NULL; } } else { tp->lun0p = NULL; tp->head.lun0_sa = cpu_to_scr(vtobus(&np->badlun_sa)); } if (lp->itlq_tbl) { sym_mfree_dma(lp->itlq_tbl, SYM_CONF_MAX_TASK*4, "ITLQ_TBL"); kfree(lp->cb_tags); } sym_mfree_dma(lp, sizeof(*lp), "LCB"); return tp->nlcb; } /* * Queue a SCSI IO to the controller. */ int sym_queue_scsiio(struct sym_hcb *np, struct scsi_cmnd *cmd, struct sym_ccb *cp) { struct scsi_device *sdev = cmd->device; struct sym_tcb *tp; struct sym_lcb *lp; u_char *msgptr; u_int msglen; int can_disconnect; /* * Keep track of the IO in our CCB. */ cp->cmd = cmd; /* * Retrieve the target descriptor. */ tp = &np->target[cp->target]; /* * Retrieve the lun descriptor. */ lp = sym_lp(tp, sdev->lun); can_disconnect = (cp->tag != NO_TAG) || (lp && (lp->curr_flags & SYM_DISC_ENABLED)); msgptr = cp->scsi_smsg; msglen = 0; msgptr[msglen++] = IDENTIFY(can_disconnect, sdev->lun); /* * Build the tag message if present. */ if (cp->tag != NO_TAG) { u_char order = cp->order; switch(order) { case M_ORDERED_TAG: break; case M_HEAD_TAG: break; default: order = M_SIMPLE_TAG; } #ifdef SYM_OPT_LIMIT_COMMAND_REORDERING /* * Avoid too much reordering of SCSI commands. * The algorithm tries to prevent completion of any * tagged command from being delayed against more * than 3 times the max number of queued commands. */ if (lp && lp->tags_since > 3*SYM_CONF_MAX_TAG) { lp->tags_si = !(lp->tags_si); if (lp->tags_sum[lp->tags_si]) { order = M_ORDERED_TAG; if ((DEBUG_FLAGS & DEBUG_TAGS)||sym_verbose>1) { sym_print_addr(cmd, "ordered tag forced.\n"); } } lp->tags_since = 0; } #endif msgptr[msglen++] = order; /* * For less than 128 tags, actual tags are numbered * 1,3,5,..2*MAXTAGS+1,since we may have to deal * with devices that have problems with #TAG 0 or too * great #TAG numbers. For more tags (up to 256), * we use directly our tag number. */ #if SYM_CONF_MAX_TASK > (512/4) msgptr[msglen++] = cp->tag; #else msgptr[msglen++] = (cp->tag << 1) + 1; #endif } /* * Build a negotiation message if needed. * (nego_status is filled by sym_prepare_nego()) * * Always negotiate on INQUIRY and REQUEST SENSE. * */ cp->nego_status = 0; if ((tp->tgoal.check_nego || cmd->cmnd[0] == INQUIRY || cmd->cmnd[0] == REQUEST_SENSE) && !tp->nego_cp && lp) { msglen += sym_prepare_nego(np, cp, msgptr + msglen); } /* * Startqueue */ cp->phys.head.go.start = cpu_to_scr(SCRIPTA_BA(np, select)); cp->phys.head.go.restart = cpu_to_scr(SCRIPTA_BA(np, resel_dsa)); /* * select */ cp->phys.select.sel_id = cp->target; cp->phys.select.sel_scntl3 = tp->head.wval; cp->phys.select.sel_sxfer = tp->head.sval; cp->phys.select.sel_scntl4 = tp->head.uval; /* * message */ cp->phys.smsg.addr = CCB_BA(cp, scsi_smsg); cp->phys.smsg.size = cpu_to_scr(msglen); /* * status */ cp->host_xflags = 0; cp->host_status = cp->nego_status ? HS_NEGOTIATE : HS_BUSY; cp->ssss_status = S_ILLEGAL; cp->xerr_status = 0; cp->host_flags = 0; cp->extra_bytes = 0; /* * extreme data pointer. * shall be positive, so -1 is lower than lowest.:) */ cp->ext_sg = -1; cp->ext_ofs = 0; /* * Build the CDB and DATA descriptor block * and start the IO. */ return sym_setup_data_and_start(np, cmd, cp); } /* * Reset a SCSI target (all LUNs of this target). */ int sym_reset_scsi_target(struct sym_hcb *np, int target) { struct sym_tcb *tp; if (target == np->myaddr || (u_int)target >= SYM_CONF_MAX_TARGET) return -1; tp = &np->target[target]; tp->to_reset = 1; np->istat_sem = SEM; OUTB(np, nc_istat, SIGP|SEM); return 0; } /* * Abort a SCSI IO. */ static int sym_abort_ccb(struct sym_hcb *np, struct sym_ccb *cp, int timed_out) { /* * Check that the IO is active. */ if (!cp || !cp->host_status || cp->host_status == HS_WAIT) return -1; /* * If a previous abort didn't succeed in time, * perform a BUS reset. */ if (cp->to_abort) { sym_reset_scsi_bus(np, 1); return 0; } /* * Mark the CCB for abort and allow time for. */ cp->to_abort = timed_out ? 2 : 1; /* * Tell the SCRIPTS processor to stop and synchronize with us. */ np->istat_sem = SEM; OUTB(np, nc_istat, SIGP|SEM); return 0; } int sym_abort_scsiio(struct sym_hcb *np, struct scsi_cmnd *cmd, int timed_out) { struct sym_ccb *cp; SYM_QUEHEAD *qp; /* * Look up our CCB control block. */ cp = NULL; FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) { struct sym_ccb *cp2 = sym_que_entry(qp, struct sym_ccb, link_ccbq); if (cp2->cmd == cmd) { cp = cp2; break; } } return sym_abort_ccb(np, cp, timed_out); } /* * Complete execution of a SCSI command with extended * error, SCSI status error, or having been auto-sensed. * * The SCRIPTS processor is not running there, so we * can safely access IO registers and remove JOBs from * the START queue. * SCRATCHA is assumed to have been loaded with STARTPOS * before the SCRIPTS called the C code. */ void sym_complete_error(struct sym_hcb *np, struct sym_ccb *cp) { struct scsi_device *sdev; struct scsi_cmnd *cmd; struct sym_tcb *tp; struct sym_lcb *lp; int resid; int i; /* * Paranoid check. :) */ if (!cp || !cp->cmd) return; cmd = cp->cmd; sdev = cmd->device; if (DEBUG_FLAGS & (DEBUG_TINY|DEBUG_RESULT)) { dev_info(&sdev->sdev_gendev, "CCB=%p STAT=%x/%x/%x\n", cp, cp->host_status, cp->ssss_status, cp->host_flags); } /* * Get target and lun pointers. */ tp = &np->target[cp->target]; lp = sym_lp(tp, sdev->lun); /* * Check for extended errors. */ if (cp->xerr_status) { if (sym_verbose) sym_print_xerr(cmd, cp->xerr_status); if (cp->host_status == HS_COMPLETE) cp->host_status = HS_COMP_ERR; } /* * Calculate the residual. */ resid = sym_compute_residual(np, cp); if (!SYM_SETUP_RESIDUAL_SUPPORT) {/* If user does not want residuals */ resid = 0; /* throw them away. :) */ cp->sv_resid = 0; } #ifdef DEBUG_2_0_X if (resid) printf("XXXX RESID= %d - 0x%x\n", resid, resid); #endif /* * Dequeue all queued CCBs for that device * not yet started by SCRIPTS. */ i = (INL(np, nc_scratcha) - np->squeue_ba) / 4; i = sym_dequeue_from_squeue(np, i, cp->target, sdev->lun, -1); /* * Restart the SCRIPTS processor. */ OUTL_DSP(np, SCRIPTA_BA(np, start)); #ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING if (cp->host_status == HS_COMPLETE && cp->ssss_status == S_QUEUE_FULL) { if (!lp || lp->started_tags - i < 2) goto weirdness; /* * Decrease queue depth as needed. */ lp->started_max = lp->started_tags - i - 1; lp->num_sgood = 0; if (sym_verbose >= 2) { sym_print_addr(cmd, " queue depth is now %d\n", lp->started_max); } /* * Repair the CCB. */ cp->host_status = HS_BUSY; cp->ssss_status = S_ILLEGAL; /* * Let's requeue it to device. */ sym_set_cam_status(cmd, DID_SOFT_ERROR); goto finish; } weirdness: #endif /* * Build result in CAM ccb. */ sym_set_cam_result_error(np, cp, resid); #ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING finish: #endif /* * Add this one to the COMP queue. */ sym_remque(&cp->link_ccbq); sym_insque_head(&cp->link_ccbq, &np->comp_ccbq); /* * Complete all those commands with either error * or requeue condition. */ sym_flush_comp_queue(np, 0); #ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING /* * Donnot start more than 1 command after an error. */ sym_start_next_ccbs(np, lp, 1); #endif } /* * Complete execution of a successful SCSI command. * * Only successful commands go to the DONE queue, * since we need to have the SCRIPTS processor * stopped on any error condition. * The SCRIPTS processor is running while we are * completing successful commands. */ void sym_complete_ok (struct sym_hcb *np, struct sym_ccb *cp) { struct sym_tcb *tp; struct sym_lcb *lp; struct scsi_cmnd *cmd; int resid; /* * Paranoid check. :) */ if (!cp || !cp->cmd) return; assert (cp->host_status == HS_COMPLETE); /* * Get user command. */ cmd = cp->cmd; /* * Get target and lun pointers. */ tp = &np->target[cp->target]; lp = sym_lp(tp, cp->lun); /* * If all data have been transferred, given than no * extended error did occur, there is no residual. */ resid = 0; if (cp->phys.head.lastp != cp->goalp) resid = sym_compute_residual(np, cp); /* * Wrong transfer residuals may be worse than just always * returning zero. User can disable this feature in * sym53c8xx.h. Residual support is enabled by default. */ if (!SYM_SETUP_RESIDUAL_SUPPORT) resid = 0; #ifdef DEBUG_2_0_X if (resid) printf("XXXX RESID= %d - 0x%x\n", resid, resid); #endif /* * Build result in CAM ccb. */ sym_set_cam_result_ok(cp, cmd, resid); #ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING /* * If max number of started ccbs had been reduced, * increase it if 200 good status received. */ if (lp && lp->started_max < lp->started_limit) { ++lp->num_sgood; if (lp->num_sgood >= 200) { lp->num_sgood = 0; ++lp->started_max; if (sym_verbose >= 2) { sym_print_addr(cmd, " queue depth is now %d\n", lp->started_max); } } } #endif /* * Free our CCB. */ sym_free_ccb (np, cp); #ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING /* * Requeue a couple of awaiting scsi commands. */ if (!sym_que_empty(&lp->waiting_ccbq)) sym_start_next_ccbs(np, lp, 2); #endif /* * Complete the command. */ sym_xpt_done(np, cmd); } /* * Soft-attach the controller. */ int sym_hcb_attach(struct Scsi_Host *shost, struct sym_fw *fw, struct sym_nvram *nvram) { struct sym_hcb *np = sym_get_hcb(shost); int i; /* * Get some info about the firmware. */ np->scripta_sz = fw->a_size; np->scriptb_sz = fw->b_size; np->scriptz_sz = fw->z_size; np->fw_setup = fw->setup; np->fw_patch = fw->patch; np->fw_name = fw->name; /* * Save setting of some IO registers, so we will * be able to probe specific implementations. */ sym_save_initial_setting (np); /* * Reset the chip now, since it has been reported * that SCSI clock calibration may not work properly * if the chip is currently active. */ sym_chip_reset(np); /* * Prepare controller and devices settings, according * to chip features, user set-up and driver set-up. */ sym_prepare_setting(shost, np, nvram); /* * Check the PCI clock frequency. * Must be performed after prepare_setting since it destroys * STEST1 that is used to probe for the clock doubler. */ i = sym_getpciclock(np); if (i > 37000 && !(np->features & FE_66MHZ)) printf("%s: PCI BUS clock seems too high: %u KHz.\n", sym_name(np), i); /* * Allocate the start queue. */ np->squeue = sym_calloc_dma(sizeof(u32)*(MAX_QUEUE*2),"SQUEUE"); if (!np->squeue) goto attach_failed; np->squeue_ba = vtobus(np->squeue); /* * Allocate the done queue. */ np->dqueue = sym_calloc_dma(sizeof(u32)*(MAX_QUEUE*2),"DQUEUE"); if (!np->dqueue) goto attach_failed; np->dqueue_ba = vtobus(np->dqueue); /* * Allocate the target bus address array. */ np->targtbl = sym_calloc_dma(256, "TARGTBL"); if (!np->targtbl) goto attach_failed; np->targtbl_ba = vtobus(np->targtbl); /* * Allocate SCRIPTS areas. */ np->scripta0 = sym_calloc_dma(np->scripta_sz, "SCRIPTA0"); np->scriptb0 = sym_calloc_dma(np->scriptb_sz, "SCRIPTB0"); np->scriptz0 = sym_calloc_dma(np->scriptz_sz, "SCRIPTZ0"); if (!np->scripta0 || !np->scriptb0 || !np->scriptz0) goto attach_failed; /* * Allocate the array of lists of CCBs hashed by DSA. */ np->ccbh = kcalloc(CCB_HASH_SIZE, sizeof(struct sym_ccb **), GFP_KERNEL); if (!np->ccbh) goto attach_failed; /* * Initialyze the CCB free and busy queues. */ sym_que_init(&np->free_ccbq); sym_que_init(&np->busy_ccbq); sym_que_init(&np->comp_ccbq); /* * Initialization for optional handling * of device queueing. */ #ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING sym_que_init(&np->dummy_ccbq); #endif /* * Allocate some CCB. We need at least ONE. */ if (!sym_alloc_ccb(np)) goto attach_failed; /* * Calculate BUS addresses where we are going * to load the SCRIPTS. */ np->scripta_ba = vtobus(np->scripta0); np->scriptb_ba = vtobus(np->scriptb0); np->scriptz_ba = vtobus(np->scriptz0); if (np->ram_ba) { np->scripta_ba = np->ram_ba; if (np->features & FE_RAM8K) { np->scriptb_ba = np->scripta_ba + 4096; #if 0 /* May get useful for 64 BIT PCI addressing */ np->scr_ram_seg = cpu_to_scr(np->scripta_ba >> 32); #endif } } /* * Copy scripts to controller instance. */ memcpy(np->scripta0, fw->a_base, np->scripta_sz); memcpy(np->scriptb0, fw->b_base, np->scriptb_sz); memcpy(np->scriptz0, fw->z_base, np->scriptz_sz); /* * Setup variable parts in scripts and compute * scripts bus addresses used from the C code. */ np->fw_setup(np, fw); /* * Bind SCRIPTS with physical addresses usable by the * SCRIPTS processor (as seen from the BUS = BUS addresses). */ sym_fw_bind_script(np, (u32 *) np->scripta0, np->scripta_sz); sym_fw_bind_script(np, (u32 *) np->scriptb0, np->scriptb_sz); sym_fw_bind_script(np, (u32 *) np->scriptz0, np->scriptz_sz); #ifdef SYM_CONF_IARB_SUPPORT /* * If user wants IARB to be set when we win arbitration * and have other jobs, compute the max number of consecutive * settings of IARB hints before we leave devices a chance to * arbitrate for reselection. */ #ifdef SYM_SETUP_IARB_MAX np->iarb_max = SYM_SETUP_IARB_MAX; #else np->iarb_max = 4; #endif #endif /* * Prepare the idle and invalid task actions. */ np->idletask.start = cpu_to_scr(SCRIPTA_BA(np, idle)); np->idletask.restart = cpu_to_scr(SCRIPTB_BA(np, bad_i_t_l)); np->idletask_ba = vtobus(&np->idletask); np->notask.start = cpu_to_scr(SCRIPTA_BA(np, idle)); np->notask.restart = cpu_to_scr(SCRIPTB_BA(np, bad_i_t_l)); np->notask_ba = vtobus(&np->notask); np->bad_itl.start = cpu_to_scr(SCRIPTA_BA(np, idle)); np->bad_itl.restart = cpu_to_scr(SCRIPTB_BA(np, bad_i_t_l)); np->bad_itl_ba = vtobus(&np->bad_itl); np->bad_itlq.start = cpu_to_scr(SCRIPTA_BA(np, idle)); np->bad_itlq.restart = cpu_to_scr(SCRIPTB_BA(np,bad_i_t_l_q)); np->bad_itlq_ba = vtobus(&np->bad_itlq); /* * Allocate and prepare the lun JUMP table that is used * for a target prior the probing of devices (bad lun table). * A private table will be allocated for the target on the * first INQUIRY response received. */ np->badluntbl = sym_calloc_dma(256, "BADLUNTBL"); if (!np->badluntbl) goto attach_failed; np->badlun_sa = cpu_to_scr(SCRIPTB_BA(np, resel_bad_lun)); for (i = 0 ; i < 64 ; i++) /* 64 luns/target, no less */ np->badluntbl[i] = cpu_to_scr(vtobus(&np->badlun_sa)); /* * Prepare the bus address array that contains the bus * address of each target control block. * For now, assume all logical units are wrong. :) */ for (i = 0 ; i < SYM_CONF_MAX_TARGET ; i++) { np->targtbl[i] = cpu_to_scr(vtobus(&np->target[i])); np->target[i].head.luntbl_sa = cpu_to_scr(vtobus(np->badluntbl)); np->target[i].head.lun0_sa = cpu_to_scr(vtobus(&np->badlun_sa)); } /* * Now check the cache handling of the pci chipset. */ if (sym_snooptest (np)) { printf("%s: CACHE INCORRECTLY CONFIGURED.\n", sym_name(np)); goto attach_failed; } /* * Sigh! we are done. */ return 0; attach_failed: return -ENXIO; } /* * Free everything that has been allocated for this device. */ void sym_hcb_free(struct sym_hcb *np) { SYM_QUEHEAD *qp; struct sym_ccb *cp; struct sym_tcb *tp; int target; if (np->scriptz0) sym_mfree_dma(np->scriptz0, np->scriptz_sz, "SCRIPTZ0"); if (np->scriptb0) sym_mfree_dma(np->scriptb0, np->scriptb_sz, "SCRIPTB0"); if (np->scripta0) sym_mfree_dma(np->scripta0, np->scripta_sz, "SCRIPTA0"); if (np->squeue) sym_mfree_dma(np->squeue, sizeof(u32)*(MAX_QUEUE*2), "SQUEUE"); if (np->dqueue) sym_mfree_dma(np->dqueue, sizeof(u32)*(MAX_QUEUE*2), "DQUEUE"); if (np->actccbs) { while ((qp = sym_remque_head(&np->free_ccbq)) != NULL) { cp = sym_que_entry(qp, struct sym_ccb, link_ccbq); sym_mfree_dma(cp, sizeof(*cp), "CCB"); } } kfree(np->ccbh); if (np->badluntbl) sym_mfree_dma(np->badluntbl, 256,"BADLUNTBL"); for (target = 0; target < SYM_CONF_MAX_TARGET ; target++) { tp = &np->target[target]; if (tp->luntbl) sym_mfree_dma(tp->luntbl, 256, "LUNTBL"); #if SYM_CONF_MAX_LUN > 1 kfree(tp->lunmp); #endif } if (np->targtbl) sym_mfree_dma(np->targtbl, 256, "TARGTBL"); }
gpl-2.0
gamerlulea/linux-3.5-rc
kernel/debug/kdb/kdb_io.c
429
20305
/* * Kernel Debugger Architecture Independent Console I/O handler * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (c) 1999-2006 Silicon Graphics, Inc. All Rights Reserved. * Copyright (c) 2009 Wind River Systems, Inc. All Rights Reserved. */ #include <linux/module.h> #include <linux/types.h> #include <linux/ctype.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/kdev_t.h> #include <linux/console.h> #include <linux/string.h> #include <linux/sched.h> #include <linux/smp.h> #include <linux/nmi.h> #include <linux/delay.h> #include <linux/kgdb.h> #include <linux/kdb.h> #include <linux/kallsyms.h> #include "kdb_private.h" #define CMD_BUFLEN 256 char kdb_prompt_str[CMD_BUFLEN]; int kdb_trap_printk; static int kgdb_transition_check(char *buffer) { if (buffer[0] != '+' && buffer[0] != '$') { KDB_STATE_SET(KGDB_TRANS); kdb_printf("%s", buffer); } else { int slen = strlen(buffer); if (slen > 3 && buffer[slen - 3] == '#') { kdb_gdb_state_pass(buffer); strcpy(buffer, "kgdb"); KDB_STATE_SET(DOING_KGDB); return 1; } } return 0; } static int kdb_read_get_key(char *buffer, size_t bufsize) { #define ESCAPE_UDELAY 1000 #define ESCAPE_DELAY (2*1000000/ESCAPE_UDELAY) /* 2 seconds worth of udelays */ char escape_data[5]; /* longest vt100 escape sequence is 4 bytes */ char *ped = escape_data; int escape_delay = 0; get_char_func *f, *f_escape = NULL; int key; for (f = &kdb_poll_funcs[0]; ; ++f) { if (*f == NULL) { /* Reset NMI watchdog once per poll loop */ touch_nmi_watchdog(); f = &kdb_poll_funcs[0]; } if (escape_delay == 2) { *ped = '\0'; ped = escape_data; --escape_delay; } if (escape_delay == 1) { key = *ped++; if (!*ped) --escape_delay; break; } key = (*f)(); if (key == -1) { if (escape_delay) { udelay(ESCAPE_UDELAY); --escape_delay; } continue; } if (bufsize <= 2) { if (key == '\r') key = '\n'; *buffer++ = key; *buffer = '\0'; return -1; } if (escape_delay == 0 && key == '\e') { escape_delay = ESCAPE_DELAY; ped = escape_data; f_escape = f; } if (escape_delay) { *ped++ = key; if (f_escape != f) { escape_delay = 2; continue; } if (ped - escape_data == 1) { /* \e */ continue; } else if (ped - escape_data == 2) { /* \e<something> */ if (key != '[') escape_delay = 2; continue; } else if (ped - escape_data == 3) { /* \e[<something> */ int mapkey = 0; switch (key) { case 'A': /* \e[A, up arrow */ mapkey = 16; break; case 'B': /* \e[B, down arrow */ mapkey = 14; break; case 'C': /* \e[C, right arrow */ mapkey = 6; break; case 'D': /* \e[D, left arrow */ mapkey = 2; break; case '1': /* dropthrough */ case '3': /* dropthrough */ /* \e[<1,3,4>], may be home, del, end */ case '4': mapkey = -1; break; } if (mapkey != -1) { if (mapkey > 0) { escape_data[0] = mapkey; escape_data[1] = '\0'; } escape_delay = 2; } continue; } else if (ped - escape_data == 4) { /* \e[<1,3,4><something> */ int mapkey = 0; if (key == '~') { switch (escape_data[2]) { case '1': /* \e[1~, home */ mapkey = 1; break; case '3': /* \e[3~, del */ mapkey = 4; break; case '4': /* \e[4~, end */ mapkey = 5; break; } } if (mapkey > 0) { escape_data[0] = mapkey; escape_data[1] = '\0'; } escape_delay = 2; continue; } } break; /* A key to process */ } return key; } /* * kdb_read * * This function reads a string of characters, terminated by * a newline, or by reaching the end of the supplied buffer, * from the current kernel debugger console device. * Parameters: * buffer - Address of character buffer to receive input characters. * bufsize - size, in bytes, of the character buffer * Returns: * Returns a pointer to the buffer containing the received * character string. This string will be terminated by a * newline character. * Locking: * No locks are required to be held upon entry to this * function. It is not reentrant - it relies on the fact * that while kdb is running on only one "master debug" cpu. * Remarks: * * The buffer size must be >= 2. A buffer size of 2 means that the caller only * wants a single key. * * An escape key could be the start of a vt100 control sequence such as \e[D * (left arrow) or it could be a character in its own right. The standard * method for detecting the difference is to wait for 2 seconds to see if there * are any other characters. kdb is complicated by the lack of a timer service * (interrupts are off), by multiple input sources and by the need to sometimes * return after just one key. Escape sequence processing has to be done as * states in the polling loop. */ static char *kdb_read(char *buffer, size_t bufsize) { char *cp = buffer; char *bufend = buffer+bufsize-2; /* Reserve space for newline * and null byte */ char *lastchar; char *p_tmp; char tmp; static char tmpbuffer[CMD_BUFLEN]; int len = strlen(buffer); int len_tmp; int tab = 0; int count; int i; int diag, dtab_count; int key; diag = kdbgetintenv("DTABCOUNT", &dtab_count); if (diag) dtab_count = 30; if (len > 0) { cp += len; if (*(buffer+len-1) == '\n') cp--; } lastchar = cp; *cp = '\0'; kdb_printf("%s", buffer); poll_again: key = kdb_read_get_key(buffer, bufsize); if (key == -1) return buffer; if (key != 9) tab = 0; switch (key) { case 8: /* backspace */ if (cp > buffer) { if (cp < lastchar) { memcpy(tmpbuffer, cp, lastchar - cp); memcpy(cp-1, tmpbuffer, lastchar - cp); } *(--lastchar) = '\0'; --cp; kdb_printf("\b%s \r", cp); tmp = *cp; *cp = '\0'; kdb_printf(kdb_prompt_str); kdb_printf("%s", buffer); *cp = tmp; } break; case 13: /* enter */ *lastchar++ = '\n'; *lastchar++ = '\0'; if (!KDB_STATE(KGDB_TRANS)) { KDB_STATE_SET(KGDB_TRANS); kdb_printf("%s", buffer); } kdb_printf("\n"); return buffer; case 4: /* Del */ if (cp < lastchar) { memcpy(tmpbuffer, cp+1, lastchar - cp - 1); memcpy(cp, tmpbuffer, lastchar - cp - 1); *(--lastchar) = '\0'; kdb_printf("%s \r", cp); tmp = *cp; *cp = '\0'; kdb_printf(kdb_prompt_str); kdb_printf("%s", buffer); *cp = tmp; } break; case 1: /* Home */ if (cp > buffer) { kdb_printf("\r"); kdb_printf(kdb_prompt_str); cp = buffer; } break; case 5: /* End */ if (cp < lastchar) { kdb_printf("%s", cp); cp = lastchar; } break; case 2: /* Left */ if (cp > buffer) { kdb_printf("\b"); --cp; } break; case 14: /* Down */ memset(tmpbuffer, ' ', strlen(kdb_prompt_str) + (lastchar-buffer)); *(tmpbuffer+strlen(kdb_prompt_str) + (lastchar-buffer)) = '\0'; kdb_printf("\r%s\r", tmpbuffer); *lastchar = (char)key; *(lastchar+1) = '\0'; return lastchar; case 6: /* Right */ if (cp < lastchar) { kdb_printf("%c", *cp); ++cp; } break; case 16: /* Up */ memset(tmpbuffer, ' ', strlen(kdb_prompt_str) + (lastchar-buffer)); *(tmpbuffer+strlen(kdb_prompt_str) + (lastchar-buffer)) = '\0'; kdb_printf("\r%s\r", tmpbuffer); *lastchar = (char)key; *(lastchar+1) = '\0'; return lastchar; case 9: /* Tab */ if (tab < 2) ++tab; p_tmp = buffer; while (*p_tmp == ' ') p_tmp++; if (p_tmp > cp) break; memcpy(tmpbuffer, p_tmp, cp-p_tmp); *(tmpbuffer + (cp-p_tmp)) = '\0'; p_tmp = strrchr(tmpbuffer, ' '); if (p_tmp) ++p_tmp; else p_tmp = tmpbuffer; len = strlen(p_tmp); count = kallsyms_symbol_complete(p_tmp, sizeof(tmpbuffer) - (p_tmp - tmpbuffer)); if (tab == 2 && count > 0) { kdb_printf("\n%d symbols are found.", count); if (count > dtab_count) { count = dtab_count; kdb_printf(" But only first %d symbols will" " be printed.\nYou can change the" " environment variable DTABCOUNT.", count); } kdb_printf("\n"); for (i = 0; i < count; i++) { if (kallsyms_symbol_next(p_tmp, i) < 0) break; kdb_printf("%s ", p_tmp); *(p_tmp + len) = '\0'; } if (i >= dtab_count) kdb_printf("..."); kdb_printf("\n"); kdb_printf(kdb_prompt_str); kdb_printf("%s", buffer); } else if (tab != 2 && count > 0) { len_tmp = strlen(p_tmp); strncpy(p_tmp+len_tmp, cp, lastchar-cp+1); len_tmp = strlen(p_tmp); strncpy(cp, p_tmp+len, len_tmp-len + 1); len = len_tmp - len; kdb_printf("%s", cp); cp += len; lastchar += len; } kdb_nextline = 1; /* reset output line number */ break; default: if (key >= 32 && lastchar < bufend) { if (cp < lastchar) { memcpy(tmpbuffer, cp, lastchar - cp); memcpy(cp+1, tmpbuffer, lastchar - cp); *++lastchar = '\0'; *cp = key; kdb_printf("%s\r", cp); ++cp; tmp = *cp; *cp = '\0'; kdb_printf(kdb_prompt_str); kdb_printf("%s", buffer); *cp = tmp; } else { *++lastchar = '\0'; *cp++ = key; /* The kgdb transition check will hide * printed characters if we think that * kgdb is connecting, until the check * fails */ if (!KDB_STATE(KGDB_TRANS)) { if (kgdb_transition_check(buffer)) return buffer; } else { kdb_printf("%c", key); } } /* Special escape to kgdb */ if (lastchar - buffer >= 5 && strcmp(lastchar - 5, "$?#3f") == 0) { kdb_gdb_state_pass(lastchar - 5); strcpy(buffer, "kgdb"); KDB_STATE_SET(DOING_KGDB); return buffer; } if (lastchar - buffer >= 11 && strcmp(lastchar - 11, "$qSupported") == 0) { kdb_gdb_state_pass(lastchar - 11); strcpy(buffer, "kgdb"); KDB_STATE_SET(DOING_KGDB); return buffer; } } break; } goto poll_again; } /* * kdb_getstr * * Print the prompt string and read a command from the * input device. * * Parameters: * buffer Address of buffer to receive command * bufsize Size of buffer in bytes * prompt Pointer to string to use as prompt string * Returns: * Pointer to command buffer. * Locking: * None. * Remarks: * For SMP kernels, the processor number will be * substituted for %d, %x or %o in the prompt. */ char *kdb_getstr(char *buffer, size_t bufsize, char *prompt) { if (prompt && kdb_prompt_str != prompt) strncpy(kdb_prompt_str, prompt, CMD_BUFLEN); kdb_printf(kdb_prompt_str); kdb_nextline = 1; /* Prompt and input resets line number */ return kdb_read(buffer, bufsize); } /* * kdb_input_flush * * Get rid of any buffered console input. * * Parameters: * none * Returns: * nothing * Locking: * none * Remarks: * Call this function whenever you want to flush input. If there is any * outstanding input, it ignores all characters until there has been no * data for approximately 1ms. */ static void kdb_input_flush(void) { get_char_func *f; int res; int flush_delay = 1; while (flush_delay) { flush_delay--; empty: touch_nmi_watchdog(); for (f = &kdb_poll_funcs[0]; *f; ++f) { res = (*f)(); if (res != -1) { flush_delay = 1; goto empty; } } if (flush_delay) mdelay(1); } } /* * kdb_printf * * Print a string to the output device(s). * * Parameters: * printf-like format and optional args. * Returns: * 0 * Locking: * None. * Remarks: * use 'kdbcons->write()' to avoid polluting 'log_buf' with * kdb output. * * If the user is doing a cmd args | grep srch * then kdb_grepping_flag is set. * In that case we need to accumulate full lines (ending in \n) before * searching for the pattern. */ static char kdb_buffer[256]; /* A bit too big to go on stack */ static char *next_avail = kdb_buffer; static int size_avail; static int suspend_grep; /* * search arg1 to see if it contains arg2 * (kdmain.c provides flags for ^pat and pat$) * * return 1 for found, 0 for not found */ static int kdb_search_string(char *searched, char *searchfor) { char firstchar, *cp; int len1, len2; /* not counting the newline at the end of "searched" */ len1 = strlen(searched)-1; len2 = strlen(searchfor); if (len1 < len2) return 0; if (kdb_grep_leading && kdb_grep_trailing && len1 != len2) return 0; if (kdb_grep_leading) { if (!strncmp(searched, searchfor, len2)) return 1; } else if (kdb_grep_trailing) { if (!strncmp(searched+len1-len2, searchfor, len2)) return 1; } else { firstchar = *searchfor; cp = searched; while ((cp = strchr(cp, firstchar))) { if (!strncmp(cp, searchfor, len2)) return 1; cp++; } } return 0; } int vkdb_printf(const char *fmt, va_list ap) { int diag; int linecount; int logging, saved_loglevel = 0; int saved_trap_printk; int got_printf_lock = 0; int retlen = 0; int fnd, len; char *cp, *cp2, *cphold = NULL, replaced_byte = ' '; char *moreprompt = "more> "; struct console *c = console_drivers; static DEFINE_SPINLOCK(kdb_printf_lock); unsigned long uninitialized_var(flags); preempt_disable(); saved_trap_printk = kdb_trap_printk; kdb_trap_printk = 0; /* Serialize kdb_printf if multiple cpus try to write at once. * But if any cpu goes recursive in kdb, just print the output, * even if it is interleaved with any other text. */ if (!KDB_STATE(PRINTF_LOCK)) { KDB_STATE_SET(PRINTF_LOCK); spin_lock_irqsave(&kdb_printf_lock, flags); got_printf_lock = 1; atomic_inc(&kdb_event); } else { __acquire(kdb_printf_lock); } diag = kdbgetintenv("LINES", &linecount); if (diag || linecount <= 1) linecount = 24; diag = kdbgetintenv("LOGGING", &logging); if (diag) logging = 0; if (!kdb_grepping_flag || suspend_grep) { /* normally, every vsnprintf starts a new buffer */ next_avail = kdb_buffer; size_avail = sizeof(kdb_buffer); } vsnprintf(next_avail, size_avail, fmt, ap); /* * If kdb_parse() found that the command was cmd xxx | grep yyy * then kdb_grepping_flag is set, and kdb_grep_string contains yyy * * Accumulate the print data up to a newline before searching it. * (vsnprintf does null-terminate the string that it generates) */ /* skip the search if prints are temporarily unconditional */ if (!suspend_grep && kdb_grepping_flag) { cp = strchr(kdb_buffer, '\n'); if (!cp) { /* * Special cases that don't end with newlines * but should be written without one: * The "[nn]kdb> " prompt should * appear at the front of the buffer. * * The "[nn]more " prompt should also be * (MOREPROMPT -> moreprompt) * written * but we print that ourselves, * we set the suspend_grep flag to make * it unconditional. * */ if (next_avail == kdb_buffer) { /* * these should occur after a newline, * so they will be at the front of the * buffer */ cp2 = kdb_buffer; len = strlen(kdb_prompt_str); if (!strncmp(cp2, kdb_prompt_str, len)) { /* * We're about to start a new * command, so we can go back * to normal mode. */ kdb_grepping_flag = 0; goto kdb_printit; } } /* no newline; don't search/write the buffer until one is there */ len = strlen(kdb_buffer); next_avail = kdb_buffer + len; size_avail = sizeof(kdb_buffer) - len; goto kdb_print_out; } /* * The newline is present; print through it or discard * it, depending on the results of the search. */ cp++; /* to byte after the newline */ replaced_byte = *cp; /* remember what/where it was */ cphold = cp; *cp = '\0'; /* end the string for our search */ /* * We now have a newline at the end of the string * Only continue with this output if it contains the * search string. */ fnd = kdb_search_string(kdb_buffer, kdb_grep_string); if (!fnd) { /* * At this point the complete line at the start * of kdb_buffer can be discarded, as it does * not contain what the user is looking for. * Shift the buffer left. */ *cphold = replaced_byte; strcpy(kdb_buffer, cphold); len = strlen(kdb_buffer); next_avail = kdb_buffer + len; size_avail = sizeof(kdb_buffer) - len; goto kdb_print_out; } /* * at this point the string is a full line and * should be printed, up to the null. */ } kdb_printit: /* * Write to all consoles. */ retlen = strlen(kdb_buffer); if (!dbg_kdb_mode && kgdb_connected) { gdbstub_msg_write(kdb_buffer, retlen); } else { if (dbg_io_ops && !dbg_io_ops->is_console) { len = strlen(kdb_buffer); cp = kdb_buffer; while (len--) { dbg_io_ops->write_char(*cp); cp++; } } while (c) { c->write(c, kdb_buffer, retlen); touch_nmi_watchdog(); c = c->next; } } if (logging) { saved_loglevel = console_loglevel; console_loglevel = 0; printk(KERN_INFO "%s", kdb_buffer); } if (KDB_STATE(PAGER) && strchr(kdb_buffer, '\n')) kdb_nextline++; /* check for having reached the LINES number of printed lines */ if (kdb_nextline == linecount) { char buf1[16] = ""; #if defined(CONFIG_SMP) char buf2[32]; #endif /* Watch out for recursion here. Any routine that calls * kdb_printf will come back through here. And kdb_read * uses kdb_printf to echo on serial consoles ... */ kdb_nextline = 1; /* In case of recursion */ /* * Pause until cr. */ moreprompt = kdbgetenv("MOREPROMPT"); if (moreprompt == NULL) moreprompt = "more> "; #if defined(CONFIG_SMP) if (strchr(moreprompt, '%')) { sprintf(buf2, moreprompt, get_cpu()); put_cpu(); moreprompt = buf2; } #endif kdb_input_flush(); c = console_drivers; if (dbg_io_ops && !dbg_io_ops->is_console) { len = strlen(moreprompt); cp = moreprompt; while (len--) { dbg_io_ops->write_char(*cp); cp++; } } while (c) { c->write(c, moreprompt, strlen(moreprompt)); touch_nmi_watchdog(); c = c->next; } if (logging) printk("%s", moreprompt); kdb_read(buf1, 2); /* '2' indicates to return * immediately after getting one key. */ kdb_nextline = 1; /* Really set output line 1 */ /* empty and reset the buffer: */ kdb_buffer[0] = '\0'; next_avail = kdb_buffer; size_avail = sizeof(kdb_buffer); if ((buf1[0] == 'q') || (buf1[0] == 'Q')) { /* user hit q or Q */ KDB_FLAG_SET(CMD_INTERRUPT); /* command interrupted */ KDB_STATE_CLEAR(PAGER); /* end of command output; back to normal mode */ kdb_grepping_flag = 0; kdb_printf("\n"); } else if (buf1[0] == ' ') { kdb_printf("\n"); suspend_grep = 1; /* for this recursion */ } else if (buf1[0] == '\n') { kdb_nextline = linecount - 1; kdb_printf("\r"); suspend_grep = 1; /* for this recursion */ } else if (buf1[0] && buf1[0] != '\n') { /* user hit something other than enter */ suspend_grep = 1; /* for this recursion */ kdb_printf("\nOnly 'q' or 'Q' are processed at more " "prompt, input ignored\n"); } else if (kdb_grepping_flag) { /* user hit enter */ suspend_grep = 1; /* for this recursion */ kdb_printf("\n"); } kdb_input_flush(); } /* * For grep searches, shift the printed string left. * replaced_byte contains the character that was overwritten with * the terminating null, and cphold points to the null. * Then adjust the notion of available space in the buffer. */ if (kdb_grepping_flag && !suspend_grep) { *cphold = replaced_byte; strcpy(kdb_buffer, cphold); len = strlen(kdb_buffer); next_avail = kdb_buffer + len; size_avail = sizeof(kdb_buffer) - len; } kdb_print_out: suspend_grep = 0; /* end of what may have been a recursive call */ if (logging) console_loglevel = saved_loglevel; if (KDB_STATE(PRINTF_LOCK) && got_printf_lock) { got_printf_lock = 0; spin_unlock_irqrestore(&kdb_printf_lock, flags); KDB_STATE_CLEAR(PRINTF_LOCK); atomic_dec(&kdb_event); } else { __release(kdb_printf_lock); } kdb_trap_printk = saved_trap_printk; preempt_enable(); return retlen; } int kdb_printf(const char *fmt, ...) { va_list ap; int r; va_start(ap, fmt); r = vkdb_printf(fmt, ap); va_end(ap); return r; } EXPORT_SYMBOL_GPL(kdb_printf);
gpl-2.0
pengdonglin137/linux-3-14-y
drivers/video/au1100fb.c
429
17748
/* * BRIEF MODULE DESCRIPTION * Au1100 LCD Driver. * * Rewritten for 2.6 by Embedded Alley Solutions * <source@embeddedalley.com>, based on submissions by * Karl Lessard <klessard@sunrisetelecom.com> * <c.pellegrin@exadron.com> * * PM support added by Rodolfo Giometti <giometti@linux.it> * Cursor enable/disable by Rodolfo Giometti <giometti@linux.it> * * Copyright 2002 MontaVista Software * Author: MontaVista Software, Inc. * ppopov@mvista.com or source@mvista.com * * Copyright 2002 Alchemy Semiconductor * Author: Alchemy Semiconductor * * Based on: * linux/drivers/video/skeletonfb.c -- Skeleton for a frame buffer device * Created 28 Dec 1997 by Geert Uytterhoeven * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/mm.h> #include <linux/fb.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/ctype.h> #include <linux/dma-mapping.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <asm/mach-au1x00/au1000.h> #define DEBUG 0 #include "au1100fb.h" #define DRIVER_NAME "au1100fb" #define DRIVER_DESC "LCD controller driver for AU1100 processors" #define to_au1100fb_device(_info) \ (_info ? container_of(_info, struct au1100fb_device, info) : NULL); /* Bitfields format supported by the controller. Note that the order of formats * SHOULD be the same as in the LCD_CONTROL_SBPPF field, so we can retrieve the * right pixel format by doing rgb_bitfields[LCD_CONTROL_SBPPF_XXX >> LCD_CONTROL_SBPPF] */ struct fb_bitfield rgb_bitfields[][4] = { /* Red, Green, Blue, Transp */ { { 10, 6, 0 }, { 5, 5, 0 }, { 0, 5, 0 }, { 0, 0, 0 } }, { { 11, 5, 0 }, { 5, 6, 0 }, { 0, 5, 0 }, { 0, 0, 0 } }, { { 11, 5, 0 }, { 6, 5, 0 }, { 0, 6, 0 }, { 0, 0, 0 } }, { { 10, 5, 0 }, { 5, 5, 0 }, { 0, 5, 0 }, { 15, 1, 0 } }, { { 11, 5, 0 }, { 6, 5, 0 }, { 1, 5, 0 }, { 0, 1, 0 } }, /* The last is used to describe 12bpp format */ { { 8, 4, 0 }, { 4, 4, 0 }, { 0, 4, 0 }, { 0, 0, 0 } }, }; static struct fb_fix_screeninfo au1100fb_fix = { .id = "AU1100 FB", .xpanstep = 1, .ypanstep = 1, .type = FB_TYPE_PACKED_PIXELS, .accel = FB_ACCEL_NONE, }; static struct fb_var_screeninfo au1100fb_var = { .activate = FB_ACTIVATE_NOW, .height = -1, .width = -1, .vmode = FB_VMODE_NONINTERLACED, }; /* fb_blank * Blank the screen. Depending on the mode, the screen will be * activated with the backlight color, or desactivated */ static int au1100fb_fb_blank(int blank_mode, struct fb_info *fbi) { struct au1100fb_device *fbdev = to_au1100fb_device(fbi); print_dbg("fb_blank %d %p", blank_mode, fbi); switch (blank_mode) { case VESA_NO_BLANKING: /* Turn on panel */ fbdev->regs->lcd_control |= LCD_CONTROL_GO; au_sync(); break; case VESA_VSYNC_SUSPEND: case VESA_HSYNC_SUSPEND: case VESA_POWERDOWN: /* Turn off panel */ fbdev->regs->lcd_control &= ~LCD_CONTROL_GO; au_sync(); break; default: break; } return 0; } /* * Set hardware with var settings. This will enable the controller with a specific * mode, normally validated with the fb_check_var method */ int au1100fb_setmode(struct au1100fb_device *fbdev) { struct fb_info *info = &fbdev->info; u32 words; int index; if (!fbdev) return -EINVAL; /* Update var-dependent FB info */ if (panel_is_active(fbdev->panel) || panel_is_color(fbdev->panel)) { if (info->var.bits_per_pixel <= 8) { /* palettized */ info->var.red.offset = 0; info->var.red.length = info->var.bits_per_pixel; info->var.red.msb_right = 0; info->var.green.offset = 0; info->var.green.length = info->var.bits_per_pixel; info->var.green.msb_right = 0; info->var.blue.offset = 0; info->var.blue.length = info->var.bits_per_pixel; info->var.blue.msb_right = 0; info->var.transp.offset = 0; info->var.transp.length = 0; info->var.transp.msb_right = 0; info->fix.visual = FB_VISUAL_PSEUDOCOLOR; info->fix.line_length = info->var.xres_virtual / (8/info->var.bits_per_pixel); } else { /* non-palettized */ index = (fbdev->panel->control_base & LCD_CONTROL_SBPPF_MASK) >> LCD_CONTROL_SBPPF_BIT; info->var.red = rgb_bitfields[index][0]; info->var.green = rgb_bitfields[index][1]; info->var.blue = rgb_bitfields[index][2]; info->var.transp = rgb_bitfields[index][3]; info->fix.visual = FB_VISUAL_TRUECOLOR; info->fix.line_length = info->var.xres_virtual << 1; /* depth=16 */ } } else { /* mono */ info->fix.visual = FB_VISUAL_MONO10; info->fix.line_length = info->var.xres_virtual / 8; } info->screen_size = info->fix.line_length * info->var.yres_virtual; info->var.rotate = ((fbdev->panel->control_base&LCD_CONTROL_SM_MASK) \ >> LCD_CONTROL_SM_BIT) * 90; /* Determine BPP mode and format */ fbdev->regs->lcd_control = fbdev->panel->control_base; fbdev->regs->lcd_horztiming = fbdev->panel->horztiming; fbdev->regs->lcd_verttiming = fbdev->panel->verttiming; fbdev->regs->lcd_clkcontrol = fbdev->panel->clkcontrol_base; fbdev->regs->lcd_intenable = 0; fbdev->regs->lcd_intstatus = 0; fbdev->regs->lcd_dmaaddr0 = LCD_DMA_SA_N(fbdev->fb_phys); if (panel_is_dual(fbdev->panel)) { /* Second panel display seconf half of screen if possible, * otherwise display the same as the first panel */ if (info->var.yres_virtual >= (info->var.yres << 1)) { fbdev->regs->lcd_dmaaddr1 = LCD_DMA_SA_N(fbdev->fb_phys + (info->fix.line_length * (info->var.yres_virtual >> 1))); } else { fbdev->regs->lcd_dmaaddr1 = LCD_DMA_SA_N(fbdev->fb_phys); } } words = info->fix.line_length / sizeof(u32); if (!info->var.rotate || (info->var.rotate == 180)) { words *= info->var.yres_virtual; if (info->var.rotate /* 180 */) { words -= (words % 8); /* should be divisable by 8 */ } } fbdev->regs->lcd_words = LCD_WRD_WRDS_N(words); fbdev->regs->lcd_pwmdiv = 0; fbdev->regs->lcd_pwmhi = 0; /* Resume controller */ fbdev->regs->lcd_control |= LCD_CONTROL_GO; mdelay(10); au1100fb_fb_blank(VESA_NO_BLANKING, info); return 0; } /* fb_setcolreg * Set color in LCD palette. */ int au1100fb_fb_setcolreg(unsigned regno, unsigned red, unsigned green, unsigned blue, unsigned transp, struct fb_info *fbi) { struct au1100fb_device *fbdev; u32 *palette; u32 value; fbdev = to_au1100fb_device(fbi); palette = fbdev->regs->lcd_pallettebase; if (regno > (AU1100_LCD_NBR_PALETTE_ENTRIES - 1)) return -EINVAL; if (fbi->var.grayscale) { /* Convert color to grayscale */ red = green = blue = (19595 * red + 38470 * green + 7471 * blue) >> 16; } if (fbi->fix.visual == FB_VISUAL_TRUECOLOR) { /* Place color in the pseudopalette */ if (regno > 16) return -EINVAL; palette = (u32*)fbi->pseudo_palette; red >>= (16 - fbi->var.red.length); green >>= (16 - fbi->var.green.length); blue >>= (16 - fbi->var.blue.length); value = (red << fbi->var.red.offset) | (green << fbi->var.green.offset)| (blue << fbi->var.blue.offset); value &= 0xFFFF; } else if (panel_is_active(fbdev->panel)) { /* COLOR TFT PALLETTIZED (use RGB 565) */ value = (red & 0xF800)|((green >> 5) & 0x07E0)|((blue >> 11) & 0x001F); value &= 0xFFFF; } else if (panel_is_color(fbdev->panel)) { /* COLOR STN MODE */ value = (((panel_swap_rgb(fbdev->panel) ? blue : red) >> 12) & 0x000F) | ((green >> 8) & 0x00F0) | (((panel_swap_rgb(fbdev->panel) ? red : blue) >> 4) & 0x0F00); value &= 0xFFF; } else { /* MONOCHROME MODE */ value = (green >> 12) & 0x000F; value &= 0xF; } palette[regno] = value; return 0; } /* fb_pan_display * Pan display in x and/or y as specified */ int au1100fb_fb_pan_display(struct fb_var_screeninfo *var, struct fb_info *fbi) { struct au1100fb_device *fbdev; int dy; fbdev = to_au1100fb_device(fbi); print_dbg("fb_pan_display %p %p", var, fbi); if (!var || !fbdev) { return -EINVAL; } if (var->xoffset - fbi->var.xoffset) { /* No support for X panning for now! */ return -EINVAL; } print_dbg("fb_pan_display 2 %p %p", var, fbi); dy = var->yoffset - fbi->var.yoffset; if (dy) { u32 dmaaddr; print_dbg("Panning screen of %d lines", dy); dmaaddr = fbdev->regs->lcd_dmaaddr0; dmaaddr += (fbi->fix.line_length * dy); /* TODO: Wait for current frame to finished */ fbdev->regs->lcd_dmaaddr0 = LCD_DMA_SA_N(dmaaddr); if (panel_is_dual(fbdev->panel)) { dmaaddr = fbdev->regs->lcd_dmaaddr1; dmaaddr += (fbi->fix.line_length * dy); fbdev->regs->lcd_dmaaddr0 = LCD_DMA_SA_N(dmaaddr); } } print_dbg("fb_pan_display 3 %p %p", var, fbi); return 0; } /* fb_rotate * Rotate the display of this angle. This doesn't seems to be used by the core, * but as our hardware supports it, so why not implementing it... */ void au1100fb_fb_rotate(struct fb_info *fbi, int angle) { struct au1100fb_device *fbdev = to_au1100fb_device(fbi); print_dbg("fb_rotate %p %d", fbi, angle); if (fbdev && (angle > 0) && !(angle % 90)) { fbdev->regs->lcd_control &= ~LCD_CONTROL_GO; fbdev->regs->lcd_control &= ~(LCD_CONTROL_SM_MASK); fbdev->regs->lcd_control |= ((angle/90) << LCD_CONTROL_SM_BIT); fbdev->regs->lcd_control |= LCD_CONTROL_GO; } } /* fb_mmap * Map video memory in user space. We don't use the generic fb_mmap method mainly * to allow the use of the TLB streaming flag (CCA=6) */ int au1100fb_fb_mmap(struct fb_info *fbi, struct vm_area_struct *vma) { struct au1100fb_device *fbdev; fbdev = to_au1100fb_device(fbi); vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); pgprot_val(vma->vm_page_prot) |= (6 << 9); //CCA=6 return vm_iomap_memory(vma, fbdev->fb_phys, fbdev->fb_len); } static struct fb_ops au1100fb_ops = { .owner = THIS_MODULE, .fb_setcolreg = au1100fb_fb_setcolreg, .fb_blank = au1100fb_fb_blank, .fb_pan_display = au1100fb_fb_pan_display, .fb_fillrect = cfb_fillrect, .fb_copyarea = cfb_copyarea, .fb_imageblit = cfb_imageblit, .fb_rotate = au1100fb_fb_rotate, .fb_mmap = au1100fb_fb_mmap, }; /*-------------------------------------------------------------------------*/ static int au1100fb_setup(struct au1100fb_device *fbdev) { char *this_opt, *options; int num_panels = ARRAY_SIZE(known_lcd_panels); if (num_panels <= 0) { print_err("No LCD panels supported by driver!"); return -ENODEV; } if (fb_get_options(DRIVER_NAME, &options)) return -ENODEV; if (!options) return -ENODEV; while ((this_opt = strsep(&options, ",")) != NULL) { /* Panel option */ if (!strncmp(this_opt, "panel:", 6)) { int i; this_opt += 6; for (i = 0; i < num_panels; i++) { if (!strncmp(this_opt, known_lcd_panels[i].name, strlen(this_opt))) { fbdev->panel = &known_lcd_panels[i]; fbdev->panel_idx = i; break; } } if (i >= num_panels) { print_warn("Panel '%s' not supported!", this_opt); return -ENODEV; } } /* Unsupported option */ else print_warn("Unsupported option \"%s\"", this_opt); } print_info("Panel=%s", fbdev->panel->name); return 0; } static int au1100fb_drv_probe(struct platform_device *dev) { struct au1100fb_device *fbdev = NULL; struct resource *regs_res; unsigned long page; u32 sys_clksrc; /* Allocate new device private */ fbdev = devm_kzalloc(&dev->dev, sizeof(struct au1100fb_device), GFP_KERNEL); if (!fbdev) { print_err("fail to allocate device private record"); return -ENOMEM; } if (au1100fb_setup(fbdev)) goto failed; platform_set_drvdata(dev, (void *)fbdev); /* Allocate region for our registers and map them */ regs_res = platform_get_resource(dev, IORESOURCE_MEM, 0); if (!regs_res) { print_err("fail to retrieve registers resource"); return -EFAULT; } au1100fb_fix.mmio_start = regs_res->start; au1100fb_fix.mmio_len = resource_size(regs_res); if (!devm_request_mem_region(&dev->dev, au1100fb_fix.mmio_start, au1100fb_fix.mmio_len, DRIVER_NAME)) { print_err("fail to lock memory region at 0x%08lx", au1100fb_fix.mmio_start); return -EBUSY; } fbdev->regs = (struct au1100fb_regs*)KSEG1ADDR(au1100fb_fix.mmio_start); print_dbg("Register memory map at %p", fbdev->regs); print_dbg("phys=0x%08x, size=%d", fbdev->regs_phys, fbdev->regs_len); /* Allocate the framebuffer to the maximum screen size * nbr of video buffers */ fbdev->fb_len = fbdev->panel->xres * fbdev->panel->yres * (fbdev->panel->bpp >> 3) * AU1100FB_NBR_VIDEO_BUFFERS; fbdev->fb_mem = dmam_alloc_coherent(&dev->dev, PAGE_ALIGN(fbdev->fb_len), &fbdev->fb_phys, GFP_KERNEL); if (!fbdev->fb_mem) { print_err("fail to allocate frambuffer (size: %dK))", fbdev->fb_len / 1024); return -ENOMEM; } au1100fb_fix.smem_start = fbdev->fb_phys; au1100fb_fix.smem_len = fbdev->fb_len; /* * Set page reserved so that mmap will work. This is necessary * since we'll be remapping normal memory. */ for (page = (unsigned long)fbdev->fb_mem; page < PAGE_ALIGN((unsigned long)fbdev->fb_mem + fbdev->fb_len); page += PAGE_SIZE) { #ifdef CONFIG_DMA_NONCOHERENT SetPageReserved(virt_to_page(CAC_ADDR((void *)page))); #else SetPageReserved(virt_to_page(page)); #endif } print_dbg("Framebuffer memory map at %p", fbdev->fb_mem); print_dbg("phys=0x%08x, size=%dK", fbdev->fb_phys, fbdev->fb_len / 1024); /* Setup LCD clock to AUX (48 MHz) */ sys_clksrc = au_readl(SYS_CLKSRC) & ~(SYS_CS_ML_MASK | SYS_CS_DL | SYS_CS_CL); au_writel((sys_clksrc | (1 << SYS_CS_ML_BIT)), SYS_CLKSRC); /* load the panel info into the var struct */ au1100fb_var.bits_per_pixel = fbdev->panel->bpp; au1100fb_var.xres = fbdev->panel->xres; au1100fb_var.xres_virtual = au1100fb_var.xres; au1100fb_var.yres = fbdev->panel->yres; au1100fb_var.yres_virtual = au1100fb_var.yres; fbdev->info.screen_base = fbdev->fb_mem; fbdev->info.fbops = &au1100fb_ops; fbdev->info.fix = au1100fb_fix; fbdev->info.pseudo_palette = devm_kzalloc(&dev->dev, sizeof(u32) * 16, GFP_KERNEL); if (!fbdev->info.pseudo_palette) return -ENOMEM; if (fb_alloc_cmap(&fbdev->info.cmap, AU1100_LCD_NBR_PALETTE_ENTRIES, 0) < 0) { print_err("Fail to allocate colormap (%d entries)", AU1100_LCD_NBR_PALETTE_ENTRIES); return -EFAULT; } fbdev->info.var = au1100fb_var; /* Set h/w registers */ au1100fb_setmode(fbdev); /* Register new framebuffer */ if (register_framebuffer(&fbdev->info) < 0) { print_err("cannot register new framebuffer"); goto failed; } return 0; failed: if (fbdev->fb_mem) { dma_free_noncoherent(&dev->dev, fbdev->fb_len, fbdev->fb_mem, fbdev->fb_phys); } if (fbdev->info.cmap.len != 0) { fb_dealloc_cmap(&fbdev->info.cmap); } return -ENODEV; } int au1100fb_drv_remove(struct platform_device *dev) { struct au1100fb_device *fbdev = NULL; if (!dev) return -ENODEV; fbdev = platform_get_drvdata(dev); #if !defined(CONFIG_FRAMEBUFFER_CONSOLE) && defined(CONFIG_LOGO) au1100fb_fb_blank(VESA_POWERDOWN, &fbdev->info); #endif fbdev->regs->lcd_control &= ~LCD_CONTROL_GO; /* Clean up all probe data */ unregister_framebuffer(&fbdev->info); fb_dealloc_cmap(&fbdev->info.cmap); return 0; } #ifdef CONFIG_PM static u32 sys_clksrc; static struct au1100fb_regs fbregs; int au1100fb_drv_suspend(struct platform_device *dev, pm_message_t state) { struct au1100fb_device *fbdev = platform_get_drvdata(dev); if (!fbdev) return 0; /* Save the clock source state */ sys_clksrc = au_readl(SYS_CLKSRC); /* Blank the LCD */ au1100fb_fb_blank(VESA_POWERDOWN, &fbdev->info); /* Stop LCD clocking */ au_writel(sys_clksrc & ~SYS_CS_ML_MASK, SYS_CLKSRC); memcpy(&fbregs, fbdev->regs, sizeof(struct au1100fb_regs)); return 0; } int au1100fb_drv_resume(struct platform_device *dev) { struct au1100fb_device *fbdev = platform_get_drvdata(dev); if (!fbdev) return 0; memcpy(fbdev->regs, &fbregs, sizeof(struct au1100fb_regs)); /* Restart LCD clocking */ au_writel(sys_clksrc, SYS_CLKSRC); /* Unblank the LCD */ au1100fb_fb_blank(VESA_NO_BLANKING, &fbdev->info); return 0; } #else #define au1100fb_drv_suspend NULL #define au1100fb_drv_resume NULL #endif static struct platform_driver au1100fb_driver = { .driver = { .name = "au1100-lcd", .owner = THIS_MODULE, }, .probe = au1100fb_drv_probe, .remove = au1100fb_drv_remove, .suspend = au1100fb_drv_suspend, .resume = au1100fb_drv_resume, }; module_platform_driver(au1100fb_driver); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL");
gpl-2.0
vathpela/linux-esrt
drivers/power/reset/gpio-restart.c
429
4075
/* * Toggles a GPIO pin to restart a device * * Copyright (C) 2014 Google, Inc. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * Based on the gpio-poweroff driver. */ #include <linux/reboot.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/platform_device.h> #include <linux/gpio/consumer.h> #include <linux/of_platform.h> #include <linux/module.h> struct gpio_restart { struct gpio_desc *reset_gpio; struct notifier_block restart_handler; u32 active_delay_ms; u32 inactive_delay_ms; u32 wait_delay_ms; }; static int gpio_restart_notify(struct notifier_block *this, unsigned long mode, void *cmd) { struct gpio_restart *gpio_restart = container_of(this, struct gpio_restart, restart_handler); /* drive it active, also inactive->active edge */ gpiod_direction_output(gpio_restart->reset_gpio, 1); mdelay(gpio_restart->active_delay_ms); /* drive inactive, also active->inactive edge */ gpiod_set_value(gpio_restart->reset_gpio, 0); mdelay(gpio_restart->inactive_delay_ms); /* drive it active, also inactive->active edge */ gpiod_set_value(gpio_restart->reset_gpio, 1); /* give it some time */ mdelay(gpio_restart->wait_delay_ms); WARN_ON(1); return NOTIFY_DONE; } static int gpio_restart_probe(struct platform_device *pdev) { struct gpio_restart *gpio_restart; bool open_source = false; u32 property; int ret; gpio_restart = devm_kzalloc(&pdev->dev, sizeof(*gpio_restart), GFP_KERNEL); if (!gpio_restart) return -ENOMEM; open_source = of_property_read_bool(pdev->dev.of_node, "open-source"); gpio_restart->reset_gpio = devm_gpiod_get(&pdev->dev, NULL, open_source ? GPIOD_IN : GPIOD_OUT_LOW); if (IS_ERR(gpio_restart->reset_gpio)) { dev_err(&pdev->dev, "Could net get reset GPIO\n"); return PTR_ERR(gpio_restart->reset_gpio); } gpio_restart->restart_handler.notifier_call = gpio_restart_notify; gpio_restart->restart_handler.priority = 128; gpio_restart->active_delay_ms = 100; gpio_restart->inactive_delay_ms = 100; gpio_restart->wait_delay_ms = 3000; ret = of_property_read_u32(pdev->dev.of_node, "priority", &property); if (!ret) { if (property > 255) dev_err(&pdev->dev, "Invalid priority property: %u\n", property); else gpio_restart->restart_handler.priority = property; } of_property_read_u32(pdev->dev.of_node, "active-delay", &gpio_restart->active_delay_ms); of_property_read_u32(pdev->dev.of_node, "inactive-delay", &gpio_restart->inactive_delay_ms); of_property_read_u32(pdev->dev.of_node, "wait-delay", &gpio_restart->wait_delay_ms); platform_set_drvdata(pdev, gpio_restart); ret = register_restart_handler(&gpio_restart->restart_handler); if (ret) { dev_err(&pdev->dev, "%s: cannot register restart handler, %d\n", __func__, ret); return -ENODEV; } return 0; } static int gpio_restart_remove(struct platform_device *pdev) { struct gpio_restart *gpio_restart = platform_get_drvdata(pdev); int ret; ret = unregister_restart_handler(&gpio_restart->restart_handler); if (ret) { dev_err(&pdev->dev, "%s: cannot unregister restart handler, %d\n", __func__, ret); return -ENODEV; } return 0; } static const struct of_device_id of_gpio_restart_match[] = { { .compatible = "gpio-restart", }, {}, }; static struct platform_driver gpio_restart_driver = { .probe = gpio_restart_probe, .remove = gpio_restart_remove, .driver = { .name = "restart-gpio", .of_match_table = of_gpio_restart_match, }, }; module_platform_driver(gpio_restart_driver); MODULE_AUTHOR("David Riley <davidriley@chromium.org>"); MODULE_DESCRIPTION("GPIO restart driver"); MODULE_LICENSE("GPL");
gpl-2.0
siskin/bluetooth-next
drivers/net/wireless/rtlwifi/rtl8821ae/sw.c
685
14944
/****************************************************************************** * * Copyright(c) 2009-2010 Realtek Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * * Contact Information: * wlanfae <wlanfae@realtek.com> * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, * Hsinchu 300, Taiwan. * * Larry Finger <Larry.Finger@lwfinger.net> * *****************************************************************************/ #include "../wifi.h" #include "../core.h" #include "../pci.h" #include "reg.h" #include "def.h" #include "phy.h" #include "dm.h" #include "hw.h" #include "fw.h" #include "sw.h" #include "trx.h" #include "led.h" #include "table.h" #include "../btcoexist/rtl_btc.h" #include <linux/vmalloc.h> #include <linux/module.h> static void rtl8821ae_init_aspm_vars(struct ieee80211_hw *hw) { struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); /*close ASPM for AMD defaultly */ rtlpci->const_amdpci_aspm = 0; /** * ASPM PS mode. * 0 - Disable ASPM, * 1 - Enable ASPM without Clock Req, * 2 - Enable ASPM with Clock Req, * 3 - Alwyas Enable ASPM with Clock Req, * 4 - Always Enable ASPM without Clock Req. * set defult to RTL8192CE:3 RTL8192E:2 */ rtlpci->const_pci_aspm = 3; /*Setting for PCI-E device */ rtlpci->const_devicepci_aspm_setting = 0x03; /*Setting for PCI-E bridge */ rtlpci->const_hostpci_aspm_setting = 0x02; /** * In Hw/Sw Radio Off situation. * 0 - Default, * 1 - From ASPM setting without low Mac Pwr, * 2 - From ASPM setting with low Mac Pwr, * 3 - Bus D3 * set default to RTL8192CE:0 RTL8192SE:2 */ rtlpci->const_hwsw_rfoff_d3 = 0; /** * This setting works for those device with * backdoor ASPM setting such as EPHY setting. * 0 - Not support ASPM, * 1 - Support ASPM, * 2 - According to chipset. */ rtlpci->const_support_pciaspm = 1; } /*InitializeVariables8812E*/ int rtl8821ae_init_sw_vars(struct ieee80211_hw *hw) { int err = 0; struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); rtl8821ae_bt_reg_init(hw); rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support; rtlpriv->btcoexist.btc_ops = rtl_btc_get_ops_pointer(); rtlpriv->dm.dm_initialgain_enable = 1; rtlpriv->dm.dm_flag = 0; rtlpriv->dm.disable_framebursting = 0; rtlpriv->dm.thermalvalue = 0; rtlpci->transmit_config = CFENDFORM | BIT(15) | BIT(24) | BIT(25); mac->ht_enable = true; mac->ht_cur_stbc = 0; mac->ht_stbc_cap = 0; mac->vht_cur_ldpc = 0; mac->vht_ldpc_cap = 0; mac->vht_cur_stbc = 0; mac->vht_stbc_cap = 0; rtlpriv->rtlhal.current_bandtype = BAND_ON_2_4G; /*following 2 is for register 5G band, refer to _rtl_init_mac80211()*/ rtlpriv->rtlhal.bandset = BAND_ON_BOTH; rtlpriv->rtlhal.macphymode = SINGLEMAC_SINGLEPHY; rtlpci->receive_config = (RCR_APPFCS | RCR_APP_MIC | RCR_APP_ICV | RCR_APP_PHYST_RXFF | RCR_NONQOS_VHT | RCR_HTC_LOC_CTRL | RCR_AMF | RCR_ACF | /*This bit controls the PS-Poll packet filter.*/ RCR_ADF | RCR_AICV | RCR_ACRC32 | RCR_AB | RCR_AM | RCR_APM | 0); rtlpci->irq_mask[0] = (u32)(IMR_PSTIMEOUT | IMR_GTINT3 | IMR_HSISR_IND_ON_INT | IMR_C2HCMD | IMR_HIGHDOK | IMR_MGNTDOK | IMR_BKDOK | IMR_BEDOK | IMR_VIDOK | IMR_VODOK | IMR_RDU | IMR_ROK | 0); rtlpci->irq_mask[1] = (u32)(IMR_RXFOVW | IMR_TXFOVW | 0); rtlpci->sys_irq_mask = (u32)(HSIMR_PDN_INT_EN | HSIMR_RON_INT_EN | 0); /* for WOWLAN */ rtlpriv->psc.wo_wlan_mode = WAKE_ON_MAGIC_PACKET | WAKE_ON_PATTERN_MATCH; /* for debug level */ rtlpriv->dbg.global_debuglevel = rtlpriv->cfg->mod_params->debug; /* for LPS & IPS */ rtlpriv->psc.inactiveps = rtlpriv->cfg->mod_params->inactiveps; rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps; rtlpriv->psc.fwctrl_lps = rtlpriv->cfg->mod_params->fwctrl_lps; rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support; if (rtlpriv->cfg->mod_params->disable_watchdog) pr_info("watchdog disabled\n"); rtlpriv->psc.reg_fwctrl_lps = 3; rtlpriv->psc.reg_max_lps_awakeintvl = 5; rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support; /* for ASPM, you can close aspm through * set const_support_pciaspm = 0 */ rtl8821ae_init_aspm_vars(hw); if (rtlpriv->psc.reg_fwctrl_lps == 1) rtlpriv->psc.fwctrl_psmode = FW_PS_MIN_MODE; else if (rtlpriv->psc.reg_fwctrl_lps == 2) rtlpriv->psc.fwctrl_psmode = FW_PS_MAX_MODE; else if (rtlpriv->psc.reg_fwctrl_lps == 3) rtlpriv->psc.fwctrl_psmode = FW_PS_DTIM_MODE; /* for firmware buf */ rtlpriv->rtlhal.pfirmware = vzalloc(0x8000); if (!rtlpriv->rtlhal.pfirmware) { RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Can't alloc buffer for fw.\n"); return 1; } rtlpriv->rtlhal.wowlan_firmware = vzalloc(0x8000); if (!rtlpriv->rtlhal.wowlan_firmware) { RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Can't alloc buffer for wowlan fw.\n"); return 1; } if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) { rtlpriv->cfg->fw_name = "rtlwifi/rtl8812aefw.bin"; rtlpriv->cfg->wowlan_fw_name = "rtlwifi/rtl8812aefw_wowlan.bin"; } else { rtlpriv->cfg->fw_name = "rtlwifi/rtl8821aefw.bin"; rtlpriv->cfg->wowlan_fw_name = "rtlwifi/rtl8821aefw_wowlan.bin"; } rtlpriv->max_fw_size = 0x8000; /*load normal firmware*/ pr_info("Using firmware %s\n", rtlpriv->cfg->fw_name); err = request_firmware_nowait(THIS_MODULE, 1, rtlpriv->cfg->fw_name, rtlpriv->io.dev, GFP_KERNEL, hw, rtl_fw_cb); if (err) { RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Failed to request normal firmware!\n"); return 1; } /*load wowlan firmware*/ pr_info("Using firmware %s\n", rtlpriv->cfg->wowlan_fw_name); err = request_firmware_nowait(THIS_MODULE, 1, rtlpriv->cfg->wowlan_fw_name, rtlpriv->io.dev, GFP_KERNEL, hw, rtl_wowlan_fw_cb); if (err) { RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Failed to request wowlan firmware!\n"); return 1; } return 0; } void rtl8821ae_deinit_sw_vars(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); if (rtlpriv->rtlhal.pfirmware) { vfree(rtlpriv->rtlhal.pfirmware); rtlpriv->rtlhal.pfirmware = NULL; } #if (USE_SPECIFIC_FW_TO_SUPPORT_WOWLAN == 1) if (rtlpriv->rtlhal.wowlan_firmware) { vfree(rtlpriv->rtlhal.wowlan_firmware); rtlpriv->rtlhal.wowlan_firmware = NULL; } #endif } /* get bt coexist status */ bool rtl8821ae_get_btc_status(void) { return true; } static struct rtl_hal_ops rtl8821ae_hal_ops = { .init_sw_vars = rtl8821ae_init_sw_vars, .deinit_sw_vars = rtl8821ae_deinit_sw_vars, .read_eeprom_info = rtl8821ae_read_eeprom_info, .interrupt_recognized = rtl8821ae_interrupt_recognized, .hw_init = rtl8821ae_hw_init, .hw_disable = rtl8821ae_card_disable, .hw_suspend = rtl8821ae_suspend, .hw_resume = rtl8821ae_resume, .enable_interrupt = rtl8821ae_enable_interrupt, .disable_interrupt = rtl8821ae_disable_interrupt, .set_network_type = rtl8821ae_set_network_type, .set_chk_bssid = rtl8821ae_set_check_bssid, .set_qos = rtl8821ae_set_qos, .set_bcn_reg = rtl8821ae_set_beacon_related_registers, .set_bcn_intv = rtl8821ae_set_beacon_interval, .update_interrupt_mask = rtl8821ae_update_interrupt_mask, .get_hw_reg = rtl8821ae_get_hw_reg, .set_hw_reg = rtl8821ae_set_hw_reg, .update_rate_tbl = rtl8821ae_update_hal_rate_tbl, .fill_tx_desc = rtl8821ae_tx_fill_desc, .fill_tx_cmddesc = rtl8821ae_tx_fill_cmddesc, .query_rx_desc = rtl8821ae_rx_query_desc, .set_channel_access = rtl8821ae_update_channel_access_setting, .radio_onoff_checking = rtl8821ae_gpio_radio_on_off_checking, .set_bw_mode = rtl8821ae_phy_set_bw_mode, .switch_channel = rtl8821ae_phy_sw_chnl, .dm_watchdog = rtl8821ae_dm_watchdog, .scan_operation_backup = rtl8821ae_phy_scan_operation_backup, .set_rf_power_state = rtl8821ae_phy_set_rf_power_state, .led_control = rtl8821ae_led_control, .set_desc = rtl8821ae_set_desc, .get_desc = rtl8821ae_get_desc, .is_tx_desc_closed = rtl8821ae_is_tx_desc_closed, .tx_polling = rtl8821ae_tx_polling, .enable_hw_sec = rtl8821ae_enable_hw_security_config, .set_key = rtl8821ae_set_key, .init_sw_leds = rtl8821ae_init_sw_leds, .get_bbreg = rtl8821ae_phy_query_bb_reg, .set_bbreg = rtl8821ae_phy_set_bb_reg, .get_rfreg = rtl8821ae_phy_query_rf_reg, .set_rfreg = rtl8821ae_phy_set_rf_reg, .fill_h2c_cmd = rtl8821ae_fill_h2c_cmd, .get_btc_status = rtl8821ae_get_btc_status, .rx_command_packet = rtl8821ae_rx_command_packet, .add_wowlan_pattern = rtl8821ae_add_wowlan_pattern, }; static struct rtl_mod_params rtl8821ae_mod_params = { .sw_crypto = false, .inactiveps = true, .swctrl_lps = false, .fwctrl_lps = true, .msi_support = true, .debug = DBG_EMERG, .disable_watchdog = 0, }; static struct rtl_hal_cfg rtl8821ae_hal_cfg = { .bar_id = 2, .write_readback = true, .name = "rtl8821ae_pci", .fw_name = "rtlwifi/rtl8821aefw.bin", .ops = &rtl8821ae_hal_ops, .mod_params = &rtl8821ae_mod_params, .maps[SYS_ISO_CTRL] = REG_SYS_ISO_CTRL, .maps[SYS_FUNC_EN] = REG_SYS_FUNC_EN, .maps[SYS_CLK] = REG_SYS_CLKR, .maps[MAC_RCR_AM] = AM, .maps[MAC_RCR_AB] = AB, .maps[MAC_RCR_ACRC32] = ACRC32, .maps[MAC_RCR_ACF] = ACF, .maps[MAC_RCR_AAP] = AAP, .maps[MAC_HIMR] = REG_HIMR, .maps[MAC_HIMRE] = REG_HIMRE, .maps[EFUSE_ACCESS] = REG_EFUSE_ACCESS, .maps[EFUSE_TEST] = REG_EFUSE_TEST, .maps[EFUSE_CTRL] = REG_EFUSE_CTRL, .maps[EFUSE_CLK] = 0, .maps[EFUSE_CLK_CTRL] = REG_EFUSE_CTRL, .maps[EFUSE_PWC_EV12V] = PWC_EV12V, .maps[EFUSE_FEN_ELDR] = FEN_ELDR, .maps[EFUSE_LOADER_CLK_EN] = LOADER_CLK_EN, .maps[EFUSE_ANA8M] = ANA8M, .maps[EFUSE_HWSET_MAX_SIZE] = HWSET_MAX_SIZE, .maps[EFUSE_MAX_SECTION_MAP] = EFUSE_MAX_SECTION, .maps[EFUSE_REAL_CONTENT_SIZE] = EFUSE_REAL_CONTENT_LEN, .maps[EFUSE_OOB_PROTECT_BYTES_LEN] = EFUSE_OOB_PROTECT_BYTES, .maps[RWCAM] = REG_CAMCMD, .maps[WCAMI] = REG_CAMWRITE, .maps[RCAMO] = REG_CAMREAD, .maps[CAMDBG] = REG_CAMDBG, .maps[SECR] = REG_SECCFG, .maps[SEC_CAM_NONE] = CAM_NONE, .maps[SEC_CAM_WEP40] = CAM_WEP40, .maps[SEC_CAM_TKIP] = CAM_TKIP, .maps[SEC_CAM_AES] = CAM_AES, .maps[SEC_CAM_WEP104] = CAM_WEP104, .maps[RTL_IMR_BCNDMAINT6] = IMR_BCNDMAINT6, .maps[RTL_IMR_BCNDMAINT5] = IMR_BCNDMAINT5, .maps[RTL_IMR_BCNDMAINT4] = IMR_BCNDMAINT4, .maps[RTL_IMR_BCNDMAINT3] = IMR_BCNDMAINT3, .maps[RTL_IMR_BCNDMAINT2] = IMR_BCNDMAINT2, .maps[RTL_IMR_BCNDMAINT1] = IMR_BCNDMAINT1, /* .maps[RTL_IMR_BCNDOK8] = IMR_BCNDOK8, */ /*need check*/ .maps[RTL_IMR_BCNDOK7] = IMR_BCNDOK7, .maps[RTL_IMR_BCNDOK6] = IMR_BCNDOK6, .maps[RTL_IMR_BCNDOK5] = IMR_BCNDOK5, .maps[RTL_IMR_BCNDOK4] = IMR_BCNDOK4, .maps[RTL_IMR_BCNDOK3] = IMR_BCNDOK3, .maps[RTL_IMR_BCNDOK2] = IMR_BCNDOK2, .maps[RTL_IMR_BCNDOK1] = IMR_BCNDOK1, /* .maps[RTL_IMR_TIMEOUT2] = IMR_TIMEOUT2,*/ /* .maps[RTL_IMR_TIMEOUT1] = IMR_TIMEOUT1,*/ .maps[RTL_IMR_TXFOVW] = IMR_TXFOVW, .maps[RTL_IMR_PSTIMEOUT] = IMR_PSTIMEOUT, .maps[RTL_IMR_BCNINT] = IMR_BCNDMAINT0, .maps[RTL_IMR_RXFOVW] = IMR_RXFOVW, .maps[RTL_IMR_RDU] = IMR_RDU, .maps[RTL_IMR_ATIMEND] = IMR_ATIMEND, .maps[RTL_IMR_BDOK] = IMR_BCNDOK0, .maps[RTL_IMR_MGNTDOK] = IMR_MGNTDOK, .maps[RTL_IMR_TBDER] = IMR_TBDER, .maps[RTL_IMR_HIGHDOK] = IMR_HIGHDOK, .maps[RTL_IMR_TBDOK] = IMR_TBDOK, .maps[RTL_IMR_BKDOK] = IMR_BKDOK, .maps[RTL_IMR_BEDOK] = IMR_BEDOK, .maps[RTL_IMR_VIDOK] = IMR_VIDOK, .maps[RTL_IMR_VODOK] = IMR_VODOK, .maps[RTL_IMR_ROK] = IMR_ROK, .maps[RTL_IBSS_INT_MASKS] = (IMR_BCNDMAINT0 | IMR_TBDOK | IMR_TBDER), .maps[RTL_RC_CCK_RATE1M] = DESC_RATE1M, .maps[RTL_RC_CCK_RATE2M] = DESC_RATE2M, .maps[RTL_RC_CCK_RATE5_5M] = DESC_RATE5_5M, .maps[RTL_RC_CCK_RATE11M] = DESC_RATE11M, .maps[RTL_RC_OFDM_RATE6M] = DESC_RATE6M, .maps[RTL_RC_OFDM_RATE9M] = DESC_RATE9M, .maps[RTL_RC_OFDM_RATE12M] = DESC_RATE12M, .maps[RTL_RC_OFDM_RATE18M] = DESC_RATE18M, .maps[RTL_RC_OFDM_RATE24M] = DESC_RATE24M, .maps[RTL_RC_OFDM_RATE36M] = DESC_RATE36M, .maps[RTL_RC_OFDM_RATE48M] = DESC_RATE48M, .maps[RTL_RC_OFDM_RATE54M] = DESC_RATE54M, .maps[RTL_RC_HT_RATEMCS7] = DESC_RATEMCS7, .maps[RTL_RC_HT_RATEMCS15] = DESC_RATEMCS15, /*VHT hightest rate*/ .maps[RTL_RC_VHT_RATE_1SS_MCS7] = DESC_RATEVHT1SS_MCS7, .maps[RTL_RC_VHT_RATE_1SS_MCS8] = DESC_RATEVHT1SS_MCS8, .maps[RTL_RC_VHT_RATE_1SS_MCS9] = DESC_RATEVHT1SS_MCS9, .maps[RTL_RC_VHT_RATE_2SS_MCS7] = DESC_RATEVHT2SS_MCS7, .maps[RTL_RC_VHT_RATE_2SS_MCS8] = DESC_RATEVHT2SS_MCS8, .maps[RTL_RC_VHT_RATE_2SS_MCS9] = DESC_RATEVHT2SS_MCS9, }; static struct pci_device_id rtl8821ae_pci_ids[] = { {RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8812, rtl8821ae_hal_cfg)}, {RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8821, rtl8821ae_hal_cfg)}, {}, }; MODULE_DEVICE_TABLE(pci, rtl8821ae_pci_ids); MODULE_AUTHOR("Realtek WlanFAE <wlanfae@realtek.com>"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Realtek 8821ae 802.11ac PCI wireless"); MODULE_FIRMWARE("rtlwifi/rtl8821aefw.bin"); module_param_named(swenc, rtl8821ae_mod_params.sw_crypto, bool, 0444); module_param_named(debug, rtl8821ae_mod_params.debug, int, 0444); module_param_named(ips, rtl8821ae_mod_params.inactiveps, bool, 0444); module_param_named(swlps, rtl8821ae_mod_params.swctrl_lps, bool, 0444); module_param_named(fwlps, rtl8821ae_mod_params.fwctrl_lps, bool, 0444); module_param_named(msi, rtl8821ae_mod_params.msi_support, bool, 0444); module_param_named(disable_watchdog, rtl8821ae_mod_params.disable_watchdog, bool, 0444); MODULE_PARM_DESC(swenc, "Set to 1 for software crypto (default 0)\n"); MODULE_PARM_DESC(ips, "Set to 0 to not use link power save (default 1)\n"); MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 0)\n"); MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 1)\n"); MODULE_PARM_DESC(msi, "Set to 1 to use MSI interrupts mode (default 1)\n"); MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)"); MODULE_PARM_DESC(disable_watchdog, "Set to 1 to disable the watchdog (default 0)\n"); static SIMPLE_DEV_PM_OPS(rtlwifi_pm_ops, rtl_pci_suspend, rtl_pci_resume); static struct pci_driver rtl8821ae_driver = { .name = KBUILD_MODNAME, .id_table = rtl8821ae_pci_ids, .probe = rtl_pci_probe, .remove = rtl_pci_disconnect, .driver.pm = &rtlwifi_pm_ops, }; module_pci_driver(rtl8821ae_driver);
gpl-2.0
rjwysocki/linux-pm
drivers/power/reset/keystone-reset.c
941
4349
/* * TI keystone reboot driver * * Copyright (C) 2014 Texas Instruments Incorporated. http://www.ti.com/ * * Author: Ivan Khoronzhuk <ivan.khoronzhuk@ti.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/io.h> #include <linux/module.h> #include <linux/notifier.h> #include <linux/reboot.h> #include <linux/regmap.h> #include <linux/mfd/syscon.h> #include <linux/of_platform.h> #define RSTYPE_RG 0x0 #define RSCTRL_RG 0x4 #define RSCFG_RG 0x8 #define RSISO_RG 0xc #define RSCTRL_KEY_MASK 0x0000ffff #define RSCTRL_RESET_MASK BIT(16) #define RSCTRL_KEY 0x5a69 #define RSMUX_OMODE_MASK 0xe #define RSMUX_OMODE_RESET_ON 0xa #define RSMUX_OMODE_RESET_OFF 0x0 #define RSMUX_LOCK_MASK 0x1 #define RSMUX_LOCK_SET 0x1 #define RSCFG_RSTYPE_SOFT 0x300f #define RSCFG_RSTYPE_HARD 0x0 #define WDT_MUX_NUMBER 0x4 static int rspll_offset; static struct regmap *pllctrl_regs; /** * rsctrl_enable_rspll_write - enable access to RSCTRL, RSCFG * To be able to access to RSCTRL, RSCFG registers * we have to write a key before */ static inline int rsctrl_enable_rspll_write(void) { return regmap_update_bits(pllctrl_regs, rspll_offset + RSCTRL_RG, RSCTRL_KEY_MASK, RSCTRL_KEY); } static int rsctrl_restart_handler(struct notifier_block *this, unsigned long mode, void *cmd) { /* enable write access to RSTCTRL */ rsctrl_enable_rspll_write(); /* reset the SOC */ regmap_update_bits(pllctrl_regs, rspll_offset + RSCTRL_RG, RSCTRL_RESET_MASK, 0); return NOTIFY_DONE; } static struct notifier_block rsctrl_restart_nb = { .notifier_call = rsctrl_restart_handler, .priority = 128, }; static const struct of_device_id rsctrl_of_match[] = { {.compatible = "ti,keystone-reset", }, {}, }; static int rsctrl_probe(struct platform_device *pdev) { int i; int ret; u32 val; unsigned int rg; u32 rsmux_offset; struct regmap *devctrl_regs; struct device *dev = &pdev->dev; struct device_node *np = dev->of_node; if (!np) return -ENODEV; /* get regmaps */ pllctrl_regs = syscon_regmap_lookup_by_phandle(np, "ti,syscon-pll"); if (IS_ERR(pllctrl_regs)) return PTR_ERR(pllctrl_regs); devctrl_regs = syscon_regmap_lookup_by_phandle(np, "ti,syscon-dev"); if (IS_ERR(devctrl_regs)) return PTR_ERR(devctrl_regs); ret = of_property_read_u32_index(np, "ti,syscon-pll", 1, &rspll_offset); if (ret) { dev_err(dev, "couldn't read the reset pll offset!\n"); return -EINVAL; } ret = of_property_read_u32_index(np, "ti,syscon-dev", 1, &rsmux_offset); if (ret) { dev_err(dev, "couldn't read the rsmux offset!\n"); return -EINVAL; } /* set soft/hard reset */ val = of_property_read_bool(np, "ti,soft-reset"); val = val ? RSCFG_RSTYPE_SOFT : RSCFG_RSTYPE_HARD; ret = rsctrl_enable_rspll_write(); if (ret) return ret; ret = regmap_write(pllctrl_regs, rspll_offset + RSCFG_RG, val); if (ret) return ret; /* disable a reset isolation for all module clocks */ ret = regmap_write(pllctrl_regs, rspll_offset + RSISO_RG, 0); if (ret) return ret; /* enable a reset for watchdogs from wdt-list */ for (i = 0; i < WDT_MUX_NUMBER; i++) { ret = of_property_read_u32_index(np, "ti,wdt-list", i, &val); if (ret == -EOVERFLOW && !i) { dev_err(dev, "ti,wdt-list property has to contain at" "least one entry\n"); return -EINVAL; } else if (ret) { break; } if (val >= WDT_MUX_NUMBER) { dev_err(dev, "ti,wdt-list property can contain" "only numbers < 4\n"); return -EINVAL; } rg = rsmux_offset + val * 4; ret = regmap_update_bits(devctrl_regs, rg, RSMUX_OMODE_MASK, RSMUX_OMODE_RESET_ON | RSMUX_LOCK_SET); if (ret) return ret; } ret = register_restart_handler(&rsctrl_restart_nb); if (ret) dev_err(dev, "cannot register restart handler (err=%d)\n", ret); return ret; } static struct platform_driver rsctrl_driver = { .probe = rsctrl_probe, .driver = { .name = KBUILD_MODNAME, .of_match_table = rsctrl_of_match, }, }; module_platform_driver(rsctrl_driver); MODULE_AUTHOR("Ivan Khoronzhuk <ivan.khoronzhuk@ti.com>"); MODULE_DESCRIPTION("Texas Instruments keystone reset driver"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("platform:" KBUILD_MODNAME);
gpl-2.0
CyanogenMod/android_kernel_samsung_klte
fs/nfs/proc.c
941
18950
/* * linux/fs/nfs/proc.c * * Copyright (C) 1992, 1993, 1994 Rick Sladkey * * OS-independent nfs remote procedure call functions * * Tuned by Alan Cox <A.Cox@swansea.ac.uk> for >3K buffers * so at last we can have decent(ish) throughput off a * Sun server. * * Coding optimized and cleaned up by Florian La Roche. * Note: Error returns are optimized for NFS_OK, which isn't translated via * nfs_stat_to_errno(), but happens to be already the right return code. * * Also, the code currently doesn't check the size of the packet, when * it decodes the packet. * * Feel free to fix it and mail me the diffs if it worries you. * * Completely rewritten to support the new RPC call interface; * rewrote and moved the entire XDR stuff to xdr.c * --Olaf Kirch June 1996 * * The code below initializes all auto variables explicitly, otherwise * it will fail to work as a module (gcc generates a memset call for an * incomplete struct). */ #include <linux/types.h> #include <linux/param.h> #include <linux/time.h> #include <linux/mm.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/in.h> #include <linux/pagemap.h> #include <linux/sunrpc/clnt.h> #include <linux/nfs.h> #include <linux/nfs2.h> #include <linux/nfs_fs.h> #include <linux/nfs_page.h> #include <linux/lockd/bind.h> #include <linux/freezer.h> #include "internal.h" #define NFSDBG_FACILITY NFSDBG_PROC /* * Bare-bones access to getattr: this is for nfs_read_super. */ static int nfs_proc_get_root(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *info) { struct nfs_fattr *fattr = info->fattr; struct nfs2_fsstat fsinfo; struct rpc_message msg = { .rpc_proc = &nfs_procedures[NFSPROC_GETATTR], .rpc_argp = fhandle, .rpc_resp = fattr, }; int status; dprintk("%s: call getattr\n", __func__); nfs_fattr_init(fattr); status = rpc_call_sync(server->client, &msg, 0); /* Retry with default authentication if different */ if (status && server->nfs_client->cl_rpcclient != server->client) status = rpc_call_sync(server->nfs_client->cl_rpcclient, &msg, 0); dprintk("%s: reply getattr: %d\n", __func__, status); if (status) return status; dprintk("%s: call statfs\n", __func__); msg.rpc_proc = &nfs_procedures[NFSPROC_STATFS]; msg.rpc_resp = &fsinfo; status = rpc_call_sync(server->client, &msg, 0); /* Retry with default authentication if different */ if (status && server->nfs_client->cl_rpcclient != server->client) status = rpc_call_sync(server->nfs_client->cl_rpcclient, &msg, 0); dprintk("%s: reply statfs: %d\n", __func__, status); if (status) return status; info->rtmax = NFS_MAXDATA; info->rtpref = fsinfo.tsize; info->rtmult = fsinfo.bsize; info->wtmax = NFS_MAXDATA; info->wtpref = fsinfo.tsize; info->wtmult = fsinfo.bsize; info->dtpref = fsinfo.tsize; info->maxfilesize = 0x7FFFFFFF; info->lease_time = 0; return 0; } /* * One function for each procedure in the NFS protocol. */ static int nfs_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr) { struct rpc_message msg = { .rpc_proc = &nfs_procedures[NFSPROC_GETATTR], .rpc_argp = fhandle, .rpc_resp = fattr, }; int status; dprintk("NFS call getattr\n"); nfs_fattr_init(fattr); status = rpc_call_sync(server->client, &msg, 0); dprintk("NFS reply getattr: %d\n", status); return status; } static int nfs_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr, struct iattr *sattr) { struct inode *inode = dentry->d_inode; struct nfs_sattrargs arg = { .fh = NFS_FH(inode), .sattr = sattr }; struct rpc_message msg = { .rpc_proc = &nfs_procedures[NFSPROC_SETATTR], .rpc_argp = &arg, .rpc_resp = fattr, }; int status; /* Mask out the non-modebit related stuff from attr->ia_mode */ sattr->ia_mode &= S_IALLUGO; dprintk("NFS call setattr\n"); if (sattr->ia_valid & ATTR_FILE) msg.rpc_cred = nfs_file_cred(sattr->ia_file); nfs_fattr_init(fattr); status = rpc_call_sync(NFS_CLIENT(inode), &msg, 0); if (status == 0) nfs_setattr_update_inode(inode, sattr); dprintk("NFS reply setattr: %d\n", status); return status; } static int nfs_proc_lookup(struct rpc_clnt *clnt, struct inode *dir, struct qstr *name, struct nfs_fh *fhandle, struct nfs_fattr *fattr) { struct nfs_diropargs arg = { .fh = NFS_FH(dir), .name = name->name, .len = name->len }; struct nfs_diropok res = { .fh = fhandle, .fattr = fattr }; struct rpc_message msg = { .rpc_proc = &nfs_procedures[NFSPROC_LOOKUP], .rpc_argp = &arg, .rpc_resp = &res, }; int status; dprintk("NFS call lookup %s\n", name->name); nfs_fattr_init(fattr); status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0); dprintk("NFS reply lookup: %d\n", status); return status; } static int nfs_proc_readlink(struct inode *inode, struct page *page, unsigned int pgbase, unsigned int pglen) { struct nfs_readlinkargs args = { .fh = NFS_FH(inode), .pgbase = pgbase, .pglen = pglen, .pages = &page }; struct rpc_message msg = { .rpc_proc = &nfs_procedures[NFSPROC_READLINK], .rpc_argp = &args, }; int status; dprintk("NFS call readlink\n"); status = rpc_call_sync(NFS_CLIENT(inode), &msg, 0); dprintk("NFS reply readlink: %d\n", status); return status; } struct nfs_createdata { struct nfs_createargs arg; struct nfs_diropok res; struct nfs_fh fhandle; struct nfs_fattr fattr; }; static struct nfs_createdata *nfs_alloc_createdata(struct inode *dir, struct dentry *dentry, struct iattr *sattr) { struct nfs_createdata *data; data = kmalloc(sizeof(*data), GFP_KERNEL); if (data != NULL) { data->arg.fh = NFS_FH(dir); data->arg.name = dentry->d_name.name; data->arg.len = dentry->d_name.len; data->arg.sattr = sattr; nfs_fattr_init(&data->fattr); data->fhandle.size = 0; data->res.fh = &data->fhandle; data->res.fattr = &data->fattr; } return data; }; static void nfs_free_createdata(const struct nfs_createdata *data) { kfree(data); } static int nfs_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr, int flags, struct nfs_open_context *ctx) { struct nfs_createdata *data; struct rpc_message msg = { .rpc_proc = &nfs_procedures[NFSPROC_CREATE], }; int status = -ENOMEM; dprintk("NFS call create %s\n", dentry->d_name.name); data = nfs_alloc_createdata(dir, dentry, sattr); if (data == NULL) goto out; msg.rpc_argp = &data->arg; msg.rpc_resp = &data->res; status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0); nfs_mark_for_revalidate(dir); if (status == 0) status = nfs_instantiate(dentry, data->res.fh, data->res.fattr); nfs_free_createdata(data); out: dprintk("NFS reply create: %d\n", status); return status; } /* * In NFSv2, mknod is grafted onto the create call. */ static int nfs_proc_mknod(struct inode *dir, struct dentry *dentry, struct iattr *sattr, dev_t rdev) { struct nfs_createdata *data; struct rpc_message msg = { .rpc_proc = &nfs_procedures[NFSPROC_CREATE], }; umode_t mode; int status = -ENOMEM; dprintk("NFS call mknod %s\n", dentry->d_name.name); mode = sattr->ia_mode; if (S_ISFIFO(mode)) { sattr->ia_mode = (mode & ~S_IFMT) | S_IFCHR; sattr->ia_valid &= ~ATTR_SIZE; } else if (S_ISCHR(mode) || S_ISBLK(mode)) { sattr->ia_valid |= ATTR_SIZE; sattr->ia_size = new_encode_dev(rdev);/* get out your barf bag */ } data = nfs_alloc_createdata(dir, dentry, sattr); if (data == NULL) goto out; msg.rpc_argp = &data->arg; msg.rpc_resp = &data->res; status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0); nfs_mark_for_revalidate(dir); if (status == -EINVAL && S_ISFIFO(mode)) { sattr->ia_mode = mode; nfs_fattr_init(data->res.fattr); status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0); } if (status == 0) status = nfs_instantiate(dentry, data->res.fh, data->res.fattr); nfs_free_createdata(data); out: dprintk("NFS reply mknod: %d\n", status); return status; } static int nfs_proc_remove(struct inode *dir, struct qstr *name) { struct nfs_removeargs arg = { .fh = NFS_FH(dir), .name.len = name->len, .name.name = name->name, }; struct rpc_message msg = { .rpc_proc = &nfs_procedures[NFSPROC_REMOVE], .rpc_argp = &arg, }; int status; dprintk("NFS call remove %s\n", name->name); status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0); nfs_mark_for_revalidate(dir); dprintk("NFS reply remove: %d\n", status); return status; } static void nfs_proc_unlink_setup(struct rpc_message *msg, struct inode *dir) { msg->rpc_proc = &nfs_procedures[NFSPROC_REMOVE]; } static void nfs_proc_unlink_rpc_prepare(struct rpc_task *task, struct nfs_unlinkdata *data) { rpc_call_start(task); } static int nfs_proc_unlink_done(struct rpc_task *task, struct inode *dir) { nfs_mark_for_revalidate(dir); return 1; } static void nfs_proc_rename_setup(struct rpc_message *msg, struct inode *dir) { msg->rpc_proc = &nfs_procedures[NFSPROC_RENAME]; } static void nfs_proc_rename_rpc_prepare(struct rpc_task *task, struct nfs_renamedata *data) { rpc_call_start(task); } static int nfs_proc_rename_done(struct rpc_task *task, struct inode *old_dir, struct inode *new_dir) { nfs_mark_for_revalidate(old_dir); nfs_mark_for_revalidate(new_dir); return 1; } static int nfs_proc_rename(struct inode *old_dir, struct qstr *old_name, struct inode *new_dir, struct qstr *new_name) { struct nfs_renameargs arg = { .old_dir = NFS_FH(old_dir), .old_name = old_name, .new_dir = NFS_FH(new_dir), .new_name = new_name, }; struct rpc_message msg = { .rpc_proc = &nfs_procedures[NFSPROC_RENAME], .rpc_argp = &arg, }; int status; dprintk("NFS call rename %s -> %s\n", old_name->name, new_name->name); status = rpc_call_sync(NFS_CLIENT(old_dir), &msg, 0); nfs_mark_for_revalidate(old_dir); nfs_mark_for_revalidate(new_dir); dprintk("NFS reply rename: %d\n", status); return status; } static int nfs_proc_link(struct inode *inode, struct inode *dir, struct qstr *name) { struct nfs_linkargs arg = { .fromfh = NFS_FH(inode), .tofh = NFS_FH(dir), .toname = name->name, .tolen = name->len }; struct rpc_message msg = { .rpc_proc = &nfs_procedures[NFSPROC_LINK], .rpc_argp = &arg, }; int status; dprintk("NFS call link %s\n", name->name); status = rpc_call_sync(NFS_CLIENT(inode), &msg, 0); nfs_mark_for_revalidate(inode); nfs_mark_for_revalidate(dir); dprintk("NFS reply link: %d\n", status); return status; } static int nfs_proc_symlink(struct inode *dir, struct dentry *dentry, struct page *page, unsigned int len, struct iattr *sattr) { struct nfs_fh *fh; struct nfs_fattr *fattr; struct nfs_symlinkargs arg = { .fromfh = NFS_FH(dir), .fromname = dentry->d_name.name, .fromlen = dentry->d_name.len, .pages = &page, .pathlen = len, .sattr = sattr }; struct rpc_message msg = { .rpc_proc = &nfs_procedures[NFSPROC_SYMLINK], .rpc_argp = &arg, }; int status = -ENAMETOOLONG; dprintk("NFS call symlink %s\n", dentry->d_name.name); if (len > NFS2_MAXPATHLEN) goto out; fh = nfs_alloc_fhandle(); fattr = nfs_alloc_fattr(); status = -ENOMEM; if (fh == NULL || fattr == NULL) goto out_free; status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0); nfs_mark_for_revalidate(dir); /* * V2 SYMLINK requests don't return any attributes. Setting the * filehandle size to zero indicates to nfs_instantiate that it * should fill in the data with a LOOKUP call on the wire. */ if (status == 0) status = nfs_instantiate(dentry, fh, fattr); out_free: nfs_free_fattr(fattr); nfs_free_fhandle(fh); out: dprintk("NFS reply symlink: %d\n", status); return status; } static int nfs_proc_mkdir(struct inode *dir, struct dentry *dentry, struct iattr *sattr) { struct nfs_createdata *data; struct rpc_message msg = { .rpc_proc = &nfs_procedures[NFSPROC_MKDIR], }; int status = -ENOMEM; dprintk("NFS call mkdir %s\n", dentry->d_name.name); data = nfs_alloc_createdata(dir, dentry, sattr); if (data == NULL) goto out; msg.rpc_argp = &data->arg; msg.rpc_resp = &data->res; status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0); nfs_mark_for_revalidate(dir); if (status == 0) status = nfs_instantiate(dentry, data->res.fh, data->res.fattr); nfs_free_createdata(data); out: dprintk("NFS reply mkdir: %d\n", status); return status; } static int nfs_proc_rmdir(struct inode *dir, struct qstr *name) { struct nfs_diropargs arg = { .fh = NFS_FH(dir), .name = name->name, .len = name->len }; struct rpc_message msg = { .rpc_proc = &nfs_procedures[NFSPROC_RMDIR], .rpc_argp = &arg, }; int status; dprintk("NFS call rmdir %s\n", name->name); status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0); nfs_mark_for_revalidate(dir); dprintk("NFS reply rmdir: %d\n", status); return status; } /* * The READDIR implementation is somewhat hackish - we pass a temporary * buffer to the encode function, which installs it in the receive * the receive iovec. The decode function just parses the reply to make * sure it is syntactically correct; the entries itself are decoded * from nfs_readdir by calling the decode_entry function directly. */ static int nfs_proc_readdir(struct dentry *dentry, struct rpc_cred *cred, u64 cookie, struct page **pages, unsigned int count, int plus) { struct inode *dir = dentry->d_inode; struct nfs_readdirargs arg = { .fh = NFS_FH(dir), .cookie = cookie, .count = count, .pages = pages, }; struct rpc_message msg = { .rpc_proc = &nfs_procedures[NFSPROC_READDIR], .rpc_argp = &arg, .rpc_cred = cred, }; int status; dprintk("NFS call readdir %d\n", (unsigned int)cookie); status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0); nfs_invalidate_atime(dir); dprintk("NFS reply readdir: %d\n", status); return status; } static int nfs_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsstat *stat) { struct nfs2_fsstat fsinfo; struct rpc_message msg = { .rpc_proc = &nfs_procedures[NFSPROC_STATFS], .rpc_argp = fhandle, .rpc_resp = &fsinfo, }; int status; dprintk("NFS call statfs\n"); nfs_fattr_init(stat->fattr); status = rpc_call_sync(server->client, &msg, 0); dprintk("NFS reply statfs: %d\n", status); if (status) goto out; stat->tbytes = (u64)fsinfo.blocks * fsinfo.bsize; stat->fbytes = (u64)fsinfo.bfree * fsinfo.bsize; stat->abytes = (u64)fsinfo.bavail * fsinfo.bsize; stat->tfiles = 0; stat->ffiles = 0; stat->afiles = 0; out: return status; } static int nfs_proc_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *info) { struct nfs2_fsstat fsinfo; struct rpc_message msg = { .rpc_proc = &nfs_procedures[NFSPROC_STATFS], .rpc_argp = fhandle, .rpc_resp = &fsinfo, }; int status; dprintk("NFS call fsinfo\n"); nfs_fattr_init(info->fattr); status = rpc_call_sync(server->client, &msg, 0); dprintk("NFS reply fsinfo: %d\n", status); if (status) goto out; info->rtmax = NFS_MAXDATA; info->rtpref = fsinfo.tsize; info->rtmult = fsinfo.bsize; info->wtmax = NFS_MAXDATA; info->wtpref = fsinfo.tsize; info->wtmult = fsinfo.bsize; info->dtpref = fsinfo.tsize; info->maxfilesize = 0x7FFFFFFF; info->lease_time = 0; out: return status; } static int nfs_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_pathconf *info) { info->max_link = 0; info->max_namelen = NFS2_MAXNAMLEN; return 0; } static int nfs_read_done(struct rpc_task *task, struct nfs_read_data *data) { nfs_invalidate_atime(data->inode); if (task->tk_status >= 0) { nfs_refresh_inode(data->inode, data->res.fattr); /* Emulate the eof flag, which isn't normally needed in NFSv2 * as it is guaranteed to always return the file attributes */ if (data->args.offset + data->args.count >= data->res.fattr->size) data->res.eof = 1; } return 0; } static void nfs_proc_read_setup(struct nfs_read_data *data, struct rpc_message *msg) { msg->rpc_proc = &nfs_procedures[NFSPROC_READ]; } static void nfs_proc_read_rpc_prepare(struct rpc_task *task, struct nfs_read_data *data) { rpc_call_start(task); } static int nfs_write_done(struct rpc_task *task, struct nfs_write_data *data) { if (task->tk_status >= 0) nfs_post_op_update_inode_force_wcc(data->inode, data->res.fattr); return 0; } static void nfs_proc_write_setup(struct nfs_write_data *data, struct rpc_message *msg) { /* Note: NFSv2 ignores @stable and always uses NFS_FILE_SYNC */ data->args.stable = NFS_FILE_SYNC; msg->rpc_proc = &nfs_procedures[NFSPROC_WRITE]; } static void nfs_proc_write_rpc_prepare(struct rpc_task *task, struct nfs_write_data *data) { rpc_call_start(task); } static void nfs_proc_commit_setup(struct nfs_write_data *data, struct rpc_message *msg) { BUG(); } static int nfs_proc_lock(struct file *filp, int cmd, struct file_lock *fl) { struct inode *inode = filp->f_path.dentry->d_inode; return nlmclnt_proc(NFS_SERVER(inode)->nlm_host, cmd, fl); } /* Helper functions for NFS lock bounds checking */ #define NFS_LOCK32_OFFSET_MAX ((__s32)0x7fffffffUL) static int nfs_lock_check_bounds(const struct file_lock *fl) { __s32 start, end; start = (__s32)fl->fl_start; if ((loff_t)start != fl->fl_start) goto out_einval; if (fl->fl_end != OFFSET_MAX) { end = (__s32)fl->fl_end; if ((loff_t)end != fl->fl_end) goto out_einval; } else end = NFS_LOCK32_OFFSET_MAX; if (start < 0 || start > end) goto out_einval; return 0; out_einval: return -EINVAL; } const struct nfs_rpc_ops nfs_v2_clientops = { .version = 2, /* protocol version */ .dentry_ops = &nfs_dentry_operations, .dir_inode_ops = &nfs_dir_inode_operations, .file_inode_ops = &nfs_file_inode_operations, .file_ops = &nfs_file_operations, .getroot = nfs_proc_get_root, .getattr = nfs_proc_getattr, .setattr = nfs_proc_setattr, .lookup = nfs_proc_lookup, .access = NULL, /* access */ .readlink = nfs_proc_readlink, .create = nfs_proc_create, .remove = nfs_proc_remove, .unlink_setup = nfs_proc_unlink_setup, .unlink_rpc_prepare = nfs_proc_unlink_rpc_prepare, .unlink_done = nfs_proc_unlink_done, .rename = nfs_proc_rename, .rename_setup = nfs_proc_rename_setup, .rename_rpc_prepare = nfs_proc_rename_rpc_prepare, .rename_done = nfs_proc_rename_done, .link = nfs_proc_link, .symlink = nfs_proc_symlink, .mkdir = nfs_proc_mkdir, .rmdir = nfs_proc_rmdir, .readdir = nfs_proc_readdir, .mknod = nfs_proc_mknod, .statfs = nfs_proc_statfs, .fsinfo = nfs_proc_fsinfo, .pathconf = nfs_proc_pathconf, .decode_dirent = nfs2_decode_dirent, .read_setup = nfs_proc_read_setup, .read_rpc_prepare = nfs_proc_read_rpc_prepare, .read_done = nfs_read_done, .write_setup = nfs_proc_write_setup, .write_rpc_prepare = nfs_proc_write_rpc_prepare, .write_done = nfs_write_done, .commit_setup = nfs_proc_commit_setup, .lock = nfs_proc_lock, .lock_check_bounds = nfs_lock_check_bounds, .close_context = nfs_close_context, .init_client = nfs_init_client, };
gpl-2.0
chenyu105/linux
drivers/scsi/fnic/fnic_isr.c
1709
8994
/* * Copyright 2008 Cisco Systems, Inc. All rights reserved. * Copyright 2007 Nuova Systems, Inc. All rights reserved. * * This program is free software; you may redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/string.h> #include <linux/errno.h> #include <linux/pci.h> #include <linux/interrupt.h> #include <scsi/libfc.h> #include <scsi/fc_frame.h> #include "vnic_dev.h" #include "vnic_intr.h" #include "vnic_stats.h" #include "fnic_io.h" #include "fnic.h" static irqreturn_t fnic_isr_legacy(int irq, void *data) { struct fnic *fnic = data; u32 pba; unsigned long work_done = 0; pba = vnic_intr_legacy_pba(fnic->legacy_pba); if (!pba) return IRQ_NONE; fnic->fnic_stats.misc_stats.last_isr_time = jiffies; atomic64_inc(&fnic->fnic_stats.misc_stats.isr_count); if (pba & (1 << FNIC_INTX_NOTIFY)) { vnic_intr_return_all_credits(&fnic->intr[FNIC_INTX_NOTIFY]); fnic_handle_link_event(fnic); } if (pba & (1 << FNIC_INTX_ERR)) { vnic_intr_return_all_credits(&fnic->intr[FNIC_INTX_ERR]); fnic_log_q_error(fnic); } if (pba & (1 << FNIC_INTX_WQ_RQ_COPYWQ)) { work_done += fnic_wq_copy_cmpl_handler(fnic, -1); work_done += fnic_wq_cmpl_handler(fnic, -1); work_done += fnic_rq_cmpl_handler(fnic, -1); vnic_intr_return_credits(&fnic->intr[FNIC_INTX_WQ_RQ_COPYWQ], work_done, 1 /* unmask intr */, 1 /* reset intr timer */); } return IRQ_HANDLED; } static irqreturn_t fnic_isr_msi(int irq, void *data) { struct fnic *fnic = data; unsigned long work_done = 0; fnic->fnic_stats.misc_stats.last_isr_time = jiffies; atomic64_inc(&fnic->fnic_stats.misc_stats.isr_count); work_done += fnic_wq_copy_cmpl_handler(fnic, -1); work_done += fnic_wq_cmpl_handler(fnic, -1); work_done += fnic_rq_cmpl_handler(fnic, -1); vnic_intr_return_credits(&fnic->intr[0], work_done, 1 /* unmask intr */, 1 /* reset intr timer */); return IRQ_HANDLED; } static irqreturn_t fnic_isr_msix_rq(int irq, void *data) { struct fnic *fnic = data; unsigned long rq_work_done = 0; fnic->fnic_stats.misc_stats.last_isr_time = jiffies; atomic64_inc(&fnic->fnic_stats.misc_stats.isr_count); rq_work_done = fnic_rq_cmpl_handler(fnic, -1); vnic_intr_return_credits(&fnic->intr[FNIC_MSIX_RQ], rq_work_done, 1 /* unmask intr */, 1 /* reset intr timer */); return IRQ_HANDLED; } static irqreturn_t fnic_isr_msix_wq(int irq, void *data) { struct fnic *fnic = data; unsigned long wq_work_done = 0; fnic->fnic_stats.misc_stats.last_isr_time = jiffies; atomic64_inc(&fnic->fnic_stats.misc_stats.isr_count); wq_work_done = fnic_wq_cmpl_handler(fnic, -1); vnic_intr_return_credits(&fnic->intr[FNIC_MSIX_WQ], wq_work_done, 1 /* unmask intr */, 1 /* reset intr timer */); return IRQ_HANDLED; } static irqreturn_t fnic_isr_msix_wq_copy(int irq, void *data) { struct fnic *fnic = data; unsigned long wq_copy_work_done = 0; fnic->fnic_stats.misc_stats.last_isr_time = jiffies; atomic64_inc(&fnic->fnic_stats.misc_stats.isr_count); wq_copy_work_done = fnic_wq_copy_cmpl_handler(fnic, -1); vnic_intr_return_credits(&fnic->intr[FNIC_MSIX_WQ_COPY], wq_copy_work_done, 1 /* unmask intr */, 1 /* reset intr timer */); return IRQ_HANDLED; } static irqreturn_t fnic_isr_msix_err_notify(int irq, void *data) { struct fnic *fnic = data; fnic->fnic_stats.misc_stats.last_isr_time = jiffies; atomic64_inc(&fnic->fnic_stats.misc_stats.isr_count); vnic_intr_return_all_credits(&fnic->intr[FNIC_MSIX_ERR_NOTIFY]); fnic_log_q_error(fnic); fnic_handle_link_event(fnic); return IRQ_HANDLED; } void fnic_free_intr(struct fnic *fnic) { int i; switch (vnic_dev_get_intr_mode(fnic->vdev)) { case VNIC_DEV_INTR_MODE_INTX: case VNIC_DEV_INTR_MODE_MSI: free_irq(fnic->pdev->irq, fnic); break; case VNIC_DEV_INTR_MODE_MSIX: for (i = 0; i < ARRAY_SIZE(fnic->msix); i++) if (fnic->msix[i].requested) free_irq(fnic->msix_entry[i].vector, fnic->msix[i].devid); break; default: break; } } int fnic_request_intr(struct fnic *fnic) { int err = 0; int i; switch (vnic_dev_get_intr_mode(fnic->vdev)) { case VNIC_DEV_INTR_MODE_INTX: err = request_irq(fnic->pdev->irq, &fnic_isr_legacy, IRQF_SHARED, DRV_NAME, fnic); break; case VNIC_DEV_INTR_MODE_MSI: err = request_irq(fnic->pdev->irq, &fnic_isr_msi, 0, fnic->name, fnic); break; case VNIC_DEV_INTR_MODE_MSIX: sprintf(fnic->msix[FNIC_MSIX_RQ].devname, "%.11s-fcs-rq", fnic->name); fnic->msix[FNIC_MSIX_RQ].isr = fnic_isr_msix_rq; fnic->msix[FNIC_MSIX_RQ].devid = fnic; sprintf(fnic->msix[FNIC_MSIX_WQ].devname, "%.11s-fcs-wq", fnic->name); fnic->msix[FNIC_MSIX_WQ].isr = fnic_isr_msix_wq; fnic->msix[FNIC_MSIX_WQ].devid = fnic; sprintf(fnic->msix[FNIC_MSIX_WQ_COPY].devname, "%.11s-scsi-wq", fnic->name); fnic->msix[FNIC_MSIX_WQ_COPY].isr = fnic_isr_msix_wq_copy; fnic->msix[FNIC_MSIX_WQ_COPY].devid = fnic; sprintf(fnic->msix[FNIC_MSIX_ERR_NOTIFY].devname, "%.11s-err-notify", fnic->name); fnic->msix[FNIC_MSIX_ERR_NOTIFY].isr = fnic_isr_msix_err_notify; fnic->msix[FNIC_MSIX_ERR_NOTIFY].devid = fnic; for (i = 0; i < ARRAY_SIZE(fnic->msix); i++) { err = request_irq(fnic->msix_entry[i].vector, fnic->msix[i].isr, 0, fnic->msix[i].devname, fnic->msix[i].devid); if (err) { shost_printk(KERN_ERR, fnic->lport->host, "MSIX: request_irq" " failed %d\n", err); fnic_free_intr(fnic); break; } fnic->msix[i].requested = 1; } break; default: break; } return err; } int fnic_set_intr_mode(struct fnic *fnic) { unsigned int n = ARRAY_SIZE(fnic->rq); unsigned int m = ARRAY_SIZE(fnic->wq); unsigned int o = ARRAY_SIZE(fnic->wq_copy); unsigned int i; /* * Set interrupt mode (INTx, MSI, MSI-X) depending * system capabilities. * * Try MSI-X first * * We need n RQs, m WQs, o Copy WQs, n+m+o CQs, and n+m+o+1 INTRs * (last INTR is used for WQ/RQ errors and notification area) */ BUG_ON(ARRAY_SIZE(fnic->msix_entry) < n + m + o + 1); for (i = 0; i < n + m + o + 1; i++) fnic->msix_entry[i].entry = i; if (fnic->rq_count >= n && fnic->raw_wq_count >= m && fnic->wq_copy_count >= o && fnic->cq_count >= n + m + o) { if (!pci_enable_msix_exact(fnic->pdev, fnic->msix_entry, n + m + o + 1)) { fnic->rq_count = n; fnic->raw_wq_count = m; fnic->wq_copy_count = o; fnic->wq_count = m + o; fnic->cq_count = n + m + o; fnic->intr_count = n + m + o + 1; fnic->err_intr_offset = FNIC_MSIX_ERR_NOTIFY; FNIC_ISR_DBG(KERN_DEBUG, fnic->lport->host, "Using MSI-X Interrupts\n"); vnic_dev_set_intr_mode(fnic->vdev, VNIC_DEV_INTR_MODE_MSIX); return 0; } } /* * Next try MSI * We need 1 RQ, 1 WQ, 1 WQ_COPY, 3 CQs, and 1 INTR */ if (fnic->rq_count >= 1 && fnic->raw_wq_count >= 1 && fnic->wq_copy_count >= 1 && fnic->cq_count >= 3 && fnic->intr_count >= 1 && !pci_enable_msi(fnic->pdev)) { fnic->rq_count = 1; fnic->raw_wq_count = 1; fnic->wq_copy_count = 1; fnic->wq_count = 2; fnic->cq_count = 3; fnic->intr_count = 1; fnic->err_intr_offset = 0; FNIC_ISR_DBG(KERN_DEBUG, fnic->lport->host, "Using MSI Interrupts\n"); vnic_dev_set_intr_mode(fnic->vdev, VNIC_DEV_INTR_MODE_MSI); return 0; } /* * Next try INTx * We need 1 RQ, 1 WQ, 1 WQ_COPY, 3 CQs, and 3 INTRs * 1 INTR is used for all 3 queues, 1 INTR for queue errors * 1 INTR for notification area */ if (fnic->rq_count >= 1 && fnic->raw_wq_count >= 1 && fnic->wq_copy_count >= 1 && fnic->cq_count >= 3 && fnic->intr_count >= 3) { fnic->rq_count = 1; fnic->raw_wq_count = 1; fnic->wq_copy_count = 1; fnic->cq_count = 3; fnic->intr_count = 3; FNIC_ISR_DBG(KERN_DEBUG, fnic->lport->host, "Using Legacy Interrupts\n"); vnic_dev_set_intr_mode(fnic->vdev, VNIC_DEV_INTR_MODE_INTX); return 0; } vnic_dev_set_intr_mode(fnic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN); return -EINVAL; } void fnic_clear_intr_mode(struct fnic *fnic) { switch (vnic_dev_get_intr_mode(fnic->vdev)) { case VNIC_DEV_INTR_MODE_MSIX: pci_disable_msix(fnic->pdev); break; case VNIC_DEV_INTR_MODE_MSI: pci_disable_msi(fnic->pdev); break; default: break; } vnic_dev_set_intr_mode(fnic->vdev, VNIC_DEV_INTR_MODE_INTX); }
gpl-2.0
s0be/kernel_htc_msm7227
drivers/scsi/arm/eesox.c
1709
17112
/* * linux/drivers/acorn/scsi/eesox.c * * Copyright (C) 1997-2005 Russell King * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This driver is based on experimentation. Hence, it may have made * assumptions about the particular card that I have available, and * may not be reliable! * * Changelog: * 01-10-1997 RMK Created, READONLY version * 15-02-1998 RMK READ/WRITE version * added DMA support and hardware definitions * 14-03-1998 RMK Updated DMA support * Added terminator control * 15-04-1998 RMK Only do PIO if FAS216 will allow it. * 27-06-1998 RMK Changed asm/delay.h to linux/delay.h * 02-04-2000 RMK 0.0.3 Fixed NO_IRQ/NO_DMA problem, updated for new * error handling code. */ #include <linux/module.h> #include <linux/blkdev.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/ioport.h> #include <linux/proc_fs.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/dma-mapping.h> #include <asm/io.h> #include <asm/dma.h> #include <asm/ecard.h> #include <asm/pgtable.h> #include "../scsi.h" #include <scsi/scsi_host.h> #include "fas216.h" #include "scsi.h" #include <scsi/scsicam.h> #define EESOX_FAS216_OFFSET 0x3000 #define EESOX_FAS216_SHIFT 5 #define EESOX_DMASTAT 0x2800 #define EESOX_STAT_INTR 0x01 #define EESOX_STAT_DMA 0x02 #define EESOX_CONTROL 0x2800 #define EESOX_INTR_ENABLE 0x04 #define EESOX_TERM_ENABLE 0x02 #define EESOX_RESET 0x01 #define EESOX_DMADATA 0x3800 #define VERSION "1.10 (17/01/2003 2.5.59)" /* * Use term=0,1,0,0,0 to turn terminators on/off */ static int term[MAX_ECARDS] = { 1, 1, 1, 1, 1, 1, 1, 1 }; #define NR_SG 256 struct eesoxscsi_info { FAS216_Info info; struct expansion_card *ec; void __iomem *base; void __iomem *ctl_port; unsigned int control; struct scatterlist sg[NR_SG]; /* Scatter DMA list */ }; /* Prototype: void eesoxscsi_irqenable(ec, irqnr) * Purpose : Enable interrupts on EESOX SCSI card * Params : ec - expansion card structure * : irqnr - interrupt number */ static void eesoxscsi_irqenable(struct expansion_card *ec, int irqnr) { struct eesoxscsi_info *info = (struct eesoxscsi_info *)ec->irq_data; info->control |= EESOX_INTR_ENABLE; writeb(info->control, info->ctl_port); } /* Prototype: void eesoxscsi_irqdisable(ec, irqnr) * Purpose : Disable interrupts on EESOX SCSI card * Params : ec - expansion card structure * : irqnr - interrupt number */ static void eesoxscsi_irqdisable(struct expansion_card *ec, int irqnr) { struct eesoxscsi_info *info = (struct eesoxscsi_info *)ec->irq_data; info->control &= ~EESOX_INTR_ENABLE; writeb(info->control, info->ctl_port); } static const expansioncard_ops_t eesoxscsi_ops = { .irqenable = eesoxscsi_irqenable, .irqdisable = eesoxscsi_irqdisable, }; /* Prototype: void eesoxscsi_terminator_ctl(*host, on_off) * Purpose : Turn the EESOX SCSI terminators on or off * Params : host - card to turn on/off * : on_off - !0 to turn on, 0 to turn off */ static void eesoxscsi_terminator_ctl(struct Scsi_Host *host, int on_off) { struct eesoxscsi_info *info = (struct eesoxscsi_info *)host->hostdata; unsigned long flags; spin_lock_irqsave(host->host_lock, flags); if (on_off) info->control |= EESOX_TERM_ENABLE; else info->control &= ~EESOX_TERM_ENABLE; writeb(info->control, info->ctl_port); spin_unlock_irqrestore(host->host_lock, flags); } /* Prototype: void eesoxscsi_intr(irq, *dev_id, *regs) * Purpose : handle interrupts from EESOX SCSI card * Params : irq - interrupt number * dev_id - user-defined (Scsi_Host structure) */ static irqreturn_t eesoxscsi_intr(int irq, void *dev_id) { struct eesoxscsi_info *info = dev_id; return fas216_intr(&info->info); } /* Prototype: fasdmatype_t eesoxscsi_dma_setup(host, SCpnt, direction, min_type) * Purpose : initialises DMA/PIO * Params : host - host * SCpnt - command * direction - DMA on to/off of card * min_type - minimum DMA support that we must have for this transfer * Returns : type of transfer to be performed */ static fasdmatype_t eesoxscsi_dma_setup(struct Scsi_Host *host, struct scsi_pointer *SCp, fasdmadir_t direction, fasdmatype_t min_type) { struct eesoxscsi_info *info = (struct eesoxscsi_info *)host->hostdata; struct device *dev = scsi_get_device(host); int dmach = info->info.scsi.dma; if (dmach != NO_DMA && (min_type == fasdma_real_all || SCp->this_residual >= 512)) { int bufs, map_dir, dma_dir; bufs = copy_SCp_to_sg(&info->sg[0], SCp, NR_SG); if (direction == DMA_OUT) map_dir = DMA_TO_DEVICE, dma_dir = DMA_MODE_WRITE; else map_dir = DMA_FROM_DEVICE, dma_dir = DMA_MODE_READ; dma_map_sg(dev, info->sg, bufs, map_dir); disable_dma(dmach); set_dma_sg(dmach, info->sg, bufs); set_dma_mode(dmach, dma_dir); enable_dma(dmach); return fasdma_real_all; } /* * We don't do DMA, we only do slow PIO * * Some day, we will do Pseudo DMA */ return fasdma_pseudo; } static void eesoxscsi_buffer_in(void *buf, int length, void __iomem *base) { const void __iomem *reg_fas = base + EESOX_FAS216_OFFSET; const void __iomem *reg_dmastat = base + EESOX_DMASTAT; const void __iomem *reg_dmadata = base + EESOX_DMADATA; register const unsigned long mask = 0xffff; do { unsigned int status; /* * Interrupt request? */ status = readb(reg_fas + (REG_STAT << EESOX_FAS216_SHIFT)); if (status & STAT_INT) break; /* * DMA request active? */ status = readb(reg_dmastat); if (!(status & EESOX_STAT_DMA)) continue; /* * Get number of bytes in FIFO */ status = readb(reg_fas + (REG_CFIS << EESOX_FAS216_SHIFT)) & CFIS_CF; if (status > 16) status = 16; if (status > length) status = length; /* * Align buffer. */ if (((u32)buf) & 2 && status >= 2) { *(u16 *)buf = readl(reg_dmadata); buf += 2; status -= 2; length -= 2; } if (status >= 8) { unsigned long l1, l2; l1 = readl(reg_dmadata) & mask; l1 |= readl(reg_dmadata) << 16; l2 = readl(reg_dmadata) & mask; l2 |= readl(reg_dmadata) << 16; *(u32 *)buf = l1; buf += 4; *(u32 *)buf = l2; buf += 4; length -= 8; continue; } if (status >= 4) { unsigned long l1; l1 = readl(reg_dmadata) & mask; l1 |= readl(reg_dmadata) << 16; *(u32 *)buf = l1; buf += 4; length -= 4; continue; } if (status >= 2) { *(u16 *)buf = readl(reg_dmadata); buf += 2; length -= 2; } } while (length); } static void eesoxscsi_buffer_out(void *buf, int length, void __iomem *base) { const void __iomem *reg_fas = base + EESOX_FAS216_OFFSET; const void __iomem *reg_dmastat = base + EESOX_DMASTAT; const void __iomem *reg_dmadata = base + EESOX_DMADATA; do { unsigned int status; /* * Interrupt request? */ status = readb(reg_fas + (REG_STAT << EESOX_FAS216_SHIFT)); if (status & STAT_INT) break; /* * DMA request active? */ status = readb(reg_dmastat); if (!(status & EESOX_STAT_DMA)) continue; /* * Get number of bytes in FIFO */ status = readb(reg_fas + (REG_CFIS << EESOX_FAS216_SHIFT)) & CFIS_CF; if (status > 16) status = 16; status = 16 - status; if (status > length) status = length; status &= ~1; /* * Align buffer. */ if (((u32)buf) & 2 && status >= 2) { writel(*(u16 *)buf << 16, reg_dmadata); buf += 2; status -= 2; length -= 2; } if (status >= 8) { unsigned long l1, l2; l1 = *(u32 *)buf; buf += 4; l2 = *(u32 *)buf; buf += 4; writel(l1 << 16, reg_dmadata); writel(l1, reg_dmadata); writel(l2 << 16, reg_dmadata); writel(l2, reg_dmadata); length -= 8; continue; } if (status >= 4) { unsigned long l1; l1 = *(u32 *)buf; buf += 4; writel(l1 << 16, reg_dmadata); writel(l1, reg_dmadata); length -= 4; continue; } if (status >= 2) { writel(*(u16 *)buf << 16, reg_dmadata); buf += 2; length -= 2; } } while (length); } static void eesoxscsi_dma_pseudo(struct Scsi_Host *host, struct scsi_pointer *SCp, fasdmadir_t dir, int transfer_size) { struct eesoxscsi_info *info = (struct eesoxscsi_info *)host->hostdata; if (dir == DMA_IN) { eesoxscsi_buffer_in(SCp->ptr, SCp->this_residual, info->base); } else { eesoxscsi_buffer_out(SCp->ptr, SCp->this_residual, info->base); } } /* Prototype: int eesoxscsi_dma_stop(host, SCpnt) * Purpose : stops DMA/PIO * Params : host - host * SCpnt - command */ static void eesoxscsi_dma_stop(struct Scsi_Host *host, struct scsi_pointer *SCp) { struct eesoxscsi_info *info = (struct eesoxscsi_info *)host->hostdata; if (info->info.scsi.dma != NO_DMA) disable_dma(info->info.scsi.dma); } /* Prototype: const char *eesoxscsi_info(struct Scsi_Host * host) * Purpose : returns a descriptive string about this interface, * Params : host - driver host structure to return info for. * Returns : pointer to a static buffer containing null terminated string. */ const char *eesoxscsi_info(struct Scsi_Host *host) { struct eesoxscsi_info *info = (struct eesoxscsi_info *)host->hostdata; static char string[150]; sprintf(string, "%s (%s) in slot %d v%s terminators o%s", host->hostt->name, info->info.scsi.type, info->ec->slot_no, VERSION, info->control & EESOX_TERM_ENABLE ? "n" : "ff"); return string; } /* Prototype: int eesoxscsi_set_proc_info(struct Scsi_Host *host, char *buffer, int length) * Purpose : Set a driver specific function * Params : host - host to setup * : buffer - buffer containing string describing operation * : length - length of string * Returns : -EINVAL, or 0 */ static int eesoxscsi_set_proc_info(struct Scsi_Host *host, char *buffer, int length) { int ret = length; if (length >= 9 && strncmp(buffer, "EESOXSCSI", 9) == 0) { buffer += 9; length -= 9; if (length >= 5 && strncmp(buffer, "term=", 5) == 0) { if (buffer[5] == '1') eesoxscsi_terminator_ctl(host, 1); else if (buffer[5] == '0') eesoxscsi_terminator_ctl(host, 0); else ret = -EINVAL; } else ret = -EINVAL; } else ret = -EINVAL; return ret; } /* Prototype: int eesoxscsi_proc_info(char *buffer, char **start, off_t offset, * int length, int host_no, int inout) * Purpose : Return information about the driver to a user process accessing * the /proc filesystem. * Params : buffer - a buffer to write information to * start - a pointer into this buffer set by this routine to the start * of the required information. * offset - offset into information that we have read upto. * length - length of buffer * host_no - host number to return information for * inout - 0 for reading, 1 for writing. * Returns : length of data written to buffer. */ int eesoxscsi_proc_info(struct Scsi_Host *host, char *buffer, char **start, off_t offset, int length, int inout) { struct eesoxscsi_info *info; char *p = buffer; int pos; if (inout == 1) return eesoxscsi_set_proc_info(host, buffer, length); info = (struct eesoxscsi_info *)host->hostdata; p += sprintf(p, "EESOX SCSI driver v%s\n", VERSION); p += fas216_print_host(&info->info, p); p += sprintf(p, "Term : o%s\n", info->control & EESOX_TERM_ENABLE ? "n" : "ff"); p += fas216_print_stats(&info->info, p); p += fas216_print_devices(&info->info, p); *start = buffer + offset; pos = p - buffer - offset; if (pos > length) pos = length; return pos; } static ssize_t eesoxscsi_show_term(struct device *dev, struct device_attribute *attr, char *buf) { struct expansion_card *ec = ECARD_DEV(dev); struct Scsi_Host *host = ecard_get_drvdata(ec); struct eesoxscsi_info *info = (struct eesoxscsi_info *)host->hostdata; return sprintf(buf, "%d\n", info->control & EESOX_TERM_ENABLE ? 1 : 0); } static ssize_t eesoxscsi_store_term(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct expansion_card *ec = ECARD_DEV(dev); struct Scsi_Host *host = ecard_get_drvdata(ec); struct eesoxscsi_info *info = (struct eesoxscsi_info *)host->hostdata; unsigned long flags; if (len > 1) { spin_lock_irqsave(host->host_lock, flags); if (buf[0] != '0') { info->control |= EESOX_TERM_ENABLE; } else { info->control &= ~EESOX_TERM_ENABLE; } writeb(info->control, info->ctl_port); spin_unlock_irqrestore(host->host_lock, flags); } return len; } static DEVICE_ATTR(bus_term, S_IRUGO | S_IWUSR, eesoxscsi_show_term, eesoxscsi_store_term); static struct scsi_host_template eesox_template = { .module = THIS_MODULE, .proc_info = eesoxscsi_proc_info, .name = "EESOX SCSI", .info = eesoxscsi_info, .queuecommand = fas216_queue_command, .eh_host_reset_handler = fas216_eh_host_reset, .eh_bus_reset_handler = fas216_eh_bus_reset, .eh_device_reset_handler = fas216_eh_device_reset, .eh_abort_handler = fas216_eh_abort, .can_queue = 1, .this_id = 7, .sg_tablesize = SCSI_MAX_SG_CHAIN_SEGMENTS, .dma_boundary = IOMD_DMA_BOUNDARY, .cmd_per_lun = 1, .use_clustering = DISABLE_CLUSTERING, .proc_name = "eesox", }; static int __devinit eesoxscsi_probe(struct expansion_card *ec, const struct ecard_id *id) { struct Scsi_Host *host; struct eesoxscsi_info *info; void __iomem *base; int ret; ret = ecard_request_resources(ec); if (ret) goto out; base = ecardm_iomap(ec, ECARD_RES_IOCFAST, 0, 0); if (!base) { ret = -ENOMEM; goto out_region; } host = scsi_host_alloc(&eesox_template, sizeof(struct eesoxscsi_info)); if (!host) { ret = -ENOMEM; goto out_region; } ecard_set_drvdata(ec, host); info = (struct eesoxscsi_info *)host->hostdata; info->ec = ec; info->base = base; info->ctl_port = base + EESOX_CONTROL; info->control = term[ec->slot_no] ? EESOX_TERM_ENABLE : 0; writeb(info->control, info->ctl_port); info->info.scsi.io_base = base + EESOX_FAS216_OFFSET; info->info.scsi.io_shift = EESOX_FAS216_SHIFT; info->info.scsi.irq = ec->irq; info->info.scsi.dma = ec->dma; info->info.ifcfg.clockrate = 40; /* MHz */ info->info.ifcfg.select_timeout = 255; info->info.ifcfg.asyncperiod = 200; /* ns */ info->info.ifcfg.sync_max_depth = 7; info->info.ifcfg.cntl3 = CNTL3_FASTSCSI | CNTL3_FASTCLK; info->info.ifcfg.disconnect_ok = 1; info->info.ifcfg.wide_max_size = 0; info->info.ifcfg.capabilities = FASCAP_PSEUDODMA; info->info.dma.setup = eesoxscsi_dma_setup; info->info.dma.pseudo = eesoxscsi_dma_pseudo; info->info.dma.stop = eesoxscsi_dma_stop; ec->irqaddr = base + EESOX_DMASTAT; ec->irqmask = EESOX_STAT_INTR; ecard_setirq(ec, &eesoxscsi_ops, info); device_create_file(&ec->dev, &dev_attr_bus_term); ret = fas216_init(host); if (ret) goto out_free; ret = request_irq(ec->irq, eesoxscsi_intr, 0, "eesoxscsi", info); if (ret) { printk("scsi%d: IRQ%d not free: %d\n", host->host_no, ec->irq, ret); goto out_remove; } if (info->info.scsi.dma != NO_DMA) { if (request_dma(info->info.scsi.dma, "eesox")) { printk("scsi%d: DMA%d not free, DMA disabled\n", host->host_no, info->info.scsi.dma); info->info.scsi.dma = NO_DMA; } else { set_dma_speed(info->info.scsi.dma, 180); info->info.ifcfg.capabilities |= FASCAP_DMA; info->info.ifcfg.cntl3 |= CNTL3_BS8; } } ret = fas216_add(host, &ec->dev); if (ret == 0) goto out; if (info->info.scsi.dma != NO_DMA) free_dma(info->info.scsi.dma); free_irq(ec->irq, host); out_remove: fas216_remove(host); out_free: device_remove_file(&ec->dev, &dev_attr_bus_term); scsi_host_put(host); out_region: ecard_release_resources(ec); out: return ret; } static void __devexit eesoxscsi_remove(struct expansion_card *ec) { struct Scsi_Host *host = ecard_get_drvdata(ec); struct eesoxscsi_info *info = (struct eesoxscsi_info *)host->hostdata; ecard_set_drvdata(ec, NULL); fas216_remove(host); if (info->info.scsi.dma != NO_DMA) free_dma(info->info.scsi.dma); free_irq(ec->irq, info); device_remove_file(&ec->dev, &dev_attr_bus_term); fas216_release(host); scsi_host_put(host); ecard_release_resources(ec); } static const struct ecard_id eesoxscsi_cids[] = { { MANU_EESOX, PROD_EESOX_SCSI2 }, { 0xffff, 0xffff }, }; static struct ecard_driver eesoxscsi_driver = { .probe = eesoxscsi_probe, .remove = __devexit_p(eesoxscsi_remove), .id_table = eesoxscsi_cids, .drv = { .name = "eesoxscsi", }, }; static int __init eesox_init(void) { return ecard_register_driver(&eesoxscsi_driver); } static void __exit eesox_exit(void) { ecard_remove_driver(&eesoxscsi_driver); } module_init(eesox_init); module_exit(eesox_exit); MODULE_AUTHOR("Russell King"); MODULE_DESCRIPTION("EESOX 'Fast' SCSI driver for Acorn machines"); module_param_array(term, int, NULL, 0); MODULE_PARM_DESC(term, "SCSI bus termination"); MODULE_LICENSE("GPL");
gpl-2.0
puppies/fl2440
linux-3.10.33/sound/pci/ali5451/ali5451.c
1965
57622
/* * Matt Wu <Matt_Wu@acersoftech.com.cn> * Apr 26, 2001 * Routines for control of ALi pci audio M5451 * * BUGS: * -- * * TODO: * -- * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public Lcodecnse as published by * the Free Software Foundation; either version 2 of the Lcodecnse, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public Lcodecnse for more details. * * You should have received a copy of the GNU General Public Lcodecnse * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <asm/io.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/pci.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/dma-mapping.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/info.h> #include <sound/ac97_codec.h> #include <sound/mpu401.h> #include <sound/initval.h> MODULE_AUTHOR("Matt Wu <Matt_Wu@acersoftech.com.cn>"); MODULE_DESCRIPTION("ALI M5451"); MODULE_LICENSE("GPL"); MODULE_SUPPORTED_DEVICE("{{ALI,M5451,pci},{ALI,M5451}}"); static int index = SNDRV_DEFAULT_IDX1; /* Index */ static char *id = SNDRV_DEFAULT_STR1; /* ID for this card */ static int pcm_channels = 32; static bool spdif; module_param(index, int, 0444); MODULE_PARM_DESC(index, "Index value for ALI M5451 PCI Audio."); module_param(id, charp, 0444); MODULE_PARM_DESC(id, "ID string for ALI M5451 PCI Audio."); module_param(pcm_channels, int, 0444); MODULE_PARM_DESC(pcm_channels, "PCM Channels"); module_param(spdif, bool, 0444); MODULE_PARM_DESC(spdif, "Support SPDIF I/O"); /* just for backward compatibility */ static bool enable; module_param(enable, bool, 0444); /* * Debug part definitions */ /* #define ALI_DEBUG */ #ifdef ALI_DEBUG #define snd_ali_printk(format, args...) printk(KERN_DEBUG format, ##args); #else #define snd_ali_printk(format, args...) #endif /* * Constants definition */ #define DEVICE_ID_ALI5451 ((PCI_VENDOR_ID_AL<<16)|PCI_DEVICE_ID_AL_M5451) #define ALI_CHANNELS 32 #define ALI_PCM_IN_CHANNEL 31 #define ALI_SPDIF_IN_CHANNEL 19 #define ALI_SPDIF_OUT_CHANNEL 15 #define ALI_CENTER_CHANNEL 24 #define ALI_LEF_CHANNEL 23 #define ALI_SURR_LEFT_CHANNEL 26 #define ALI_SURR_RIGHT_CHANNEL 25 #define ALI_MODEM_IN_CHANNEL 21 #define ALI_MODEM_OUT_CHANNEL 20 #define SNDRV_ALI_VOICE_TYPE_PCM 01 #define SNDRV_ALI_VOICE_TYPE_OTH 02 #define ALI_5451_V02 0x02 /* * Direct Registers */ #define ALI_LEGACY_DMAR0 0x00 /* ADR0 */ #define ALI_LEGACY_DMAR4 0x04 /* CNT0 */ #define ALI_LEGACY_DMAR11 0x0b /* MOD */ #define ALI_LEGACY_DMAR15 0x0f /* MMR */ #define ALI_MPUR0 0x20 #define ALI_MPUR1 0x21 #define ALI_MPUR2 0x22 #define ALI_MPUR3 0x23 #define ALI_AC97_WRITE 0x40 #define ALI_AC97_READ 0x44 #define ALI_SCTRL 0x48 #define ALI_SPDIF_OUT_ENABLE 0x20 #define ALI_SCTRL_LINE_IN2 (1 << 9) #define ALI_SCTRL_GPIO_IN2 (1 << 13) #define ALI_SCTRL_LINE_OUT_EN (1 << 20) #define ALI_SCTRL_GPIO_OUT_EN (1 << 23) #define ALI_SCTRL_CODEC1_READY (1 << 24) #define ALI_SCTRL_CODEC2_READY (1 << 25) #define ALI_AC97_GPIO 0x4c #define ALI_AC97_GPIO_ENABLE 0x8000 #define ALI_AC97_GPIO_DATA_SHIFT 16 #define ALI_SPDIF_CS 0x70 #define ALI_SPDIF_CTRL 0x74 #define ALI_SPDIF_IN_FUNC_ENABLE 0x02 #define ALI_SPDIF_IN_CH_STATUS 0x40 #define ALI_SPDIF_OUT_CH_STATUS 0xbf #define ALI_START 0x80 #define ALI_STOP 0x84 #define ALI_CSPF 0x90 #define ALI_AINT 0x98 #define ALI_GC_CIR 0xa0 #define ENDLP_IE 0x00001000 #define MIDLP_IE 0x00002000 #define ALI_AINTEN 0xa4 #define ALI_VOLUME 0xa8 #define ALI_SBDELTA_DELTA_R 0xac #define ALI_MISCINT 0xb0 #define ADDRESS_IRQ 0x00000020 #define TARGET_REACHED 0x00008000 #define MIXER_OVERFLOW 0x00000800 #define MIXER_UNDERFLOW 0x00000400 #define GPIO_IRQ 0x01000000 #define ALI_SBBL_SBCL 0xc0 #define ALI_SBCTRL_SBE2R_SBDD 0xc4 #define ALI_STIMER 0xc8 #define ALI_GLOBAL_CONTROL 0xd4 #define ALI_SPDIF_OUT_SEL_PCM 0x00000400 /* bit 10 */ #define ALI_SPDIF_IN_SUPPORT 0x00000800 /* bit 11 */ #define ALI_SPDIF_OUT_CH_ENABLE 0x00008000 /* bit 15 */ #define ALI_SPDIF_IN_CH_ENABLE 0x00080000 /* bit 19 */ #define ALI_PCM_IN_ENABLE 0x80000000 /* bit 31 */ #define ALI_CSO_ALPHA_FMS 0xe0 #define ALI_LBA 0xe4 #define ALI_ESO_DELTA 0xe8 #define ALI_GVSEL_PAN_VOC_CTRL_EC 0xf0 #define ALI_EBUF1 0xf4 #define ALI_EBUF2 0xf8 #define ALI_REG(codec, x) ((codec)->port + x) #define MAX_CODECS 2 struct snd_ali; struct snd_ali_voice; struct snd_ali_channel_control { /* register data */ struct REGDATA { unsigned int start; unsigned int stop; unsigned int aint; unsigned int ainten; } data; /* register addresses */ struct REGS { unsigned int start; unsigned int stop; unsigned int aint; unsigned int ainten; unsigned int ac97read; unsigned int ac97write; } regs; }; struct snd_ali_voice { unsigned int number; unsigned int use :1, pcm :1, midi :1, mode :1, synth :1, running :1; /* PCM data */ struct snd_ali *codec; struct snd_pcm_substream *substream; struct snd_ali_voice *extra; int eso; /* final ESO value for channel */ int count; /* runtime->period_size */ /* --- */ void *private_data; void (*private_free)(void *private_data); }; struct snd_alidev { struct snd_ali_voice voices[ALI_CHANNELS]; unsigned int chcnt; /* num of opened channels */ unsigned int chmap; /* bitmap for opened channels */ unsigned int synthcount; }; #define ALI_GLOBAL_REGS 56 #define ALI_CHANNEL_REGS 8 struct snd_ali_image { u32 regs[ALI_GLOBAL_REGS]; u32 channel_regs[ALI_CHANNELS][ALI_CHANNEL_REGS]; }; struct snd_ali { int irq; unsigned long port; unsigned char revision; unsigned int hw_initialized :1; unsigned int spdif_support :1; struct pci_dev *pci; struct pci_dev *pci_m1533; struct pci_dev *pci_m7101; struct snd_card *card; struct snd_pcm *pcm[MAX_CODECS]; struct snd_alidev synth; struct snd_ali_channel_control chregs; /* S/PDIF Mask */ unsigned int spdif_mask; unsigned int spurious_irq_count; unsigned int spurious_irq_max_delta; unsigned int num_of_codecs; struct snd_ac97_bus *ac97_bus; struct snd_ac97 *ac97[MAX_CODECS]; unsigned short ac97_ext_id; unsigned short ac97_ext_status; spinlock_t reg_lock; spinlock_t voice_alloc; #ifdef CONFIG_PM_SLEEP struct snd_ali_image *image; #endif }; static DEFINE_PCI_DEVICE_TABLE(snd_ali_ids) = { {PCI_DEVICE(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M5451), 0, 0, 0}, {0, } }; MODULE_DEVICE_TABLE(pci, snd_ali_ids); static void snd_ali_clear_voices(struct snd_ali *, unsigned int, unsigned int); static unsigned short snd_ali_codec_peek(struct snd_ali *, int, unsigned short); static void snd_ali_codec_poke(struct snd_ali *, int, unsigned short, unsigned short); /* * AC97 ACCESS */ static inline unsigned int snd_ali_5451_peek(struct snd_ali *codec, unsigned int port) { return (unsigned int)inl(ALI_REG(codec, port)); } static inline void snd_ali_5451_poke(struct snd_ali *codec, unsigned int port, unsigned int val) { outl((unsigned int)val, ALI_REG(codec, port)); } static int snd_ali_codec_ready(struct snd_ali *codec, unsigned int port) { unsigned long end_time; unsigned int res; end_time = jiffies + msecs_to_jiffies(250); for (;;) { res = snd_ali_5451_peek(codec,port); if (!(res & 0x8000)) return 0; if (!time_after_eq(end_time, jiffies)) break; schedule_timeout_uninterruptible(1); } snd_ali_5451_poke(codec, port, res & ~0x8000); snd_printdd("ali_codec_ready: codec is not ready.\n "); return -EIO; } static int snd_ali_stimer_ready(struct snd_ali *codec) { unsigned long end_time; unsigned long dwChk1,dwChk2; dwChk1 = snd_ali_5451_peek(codec, ALI_STIMER); end_time = jiffies + msecs_to_jiffies(250); for (;;) { dwChk2 = snd_ali_5451_peek(codec, ALI_STIMER); if (dwChk2 != dwChk1) return 0; if (!time_after_eq(end_time, jiffies)) break; schedule_timeout_uninterruptible(1); } snd_printk(KERN_ERR "ali_stimer_read: stimer is not ready.\n"); return -EIO; } static void snd_ali_codec_poke(struct snd_ali *codec,int secondary, unsigned short reg, unsigned short val) { unsigned int dwVal; unsigned int port; if (reg >= 0x80) { snd_printk(KERN_ERR "ali_codec_poke: reg(%xh) invalid.\n", reg); return; } port = codec->chregs.regs.ac97write; if (snd_ali_codec_ready(codec, port) < 0) return; if (snd_ali_stimer_ready(codec) < 0) return; dwVal = (unsigned int) (reg & 0xff); dwVal |= 0x8000 | (val << 16); if (secondary) dwVal |= 0x0080; if (codec->revision == ALI_5451_V02) dwVal |= 0x0100; snd_ali_5451_poke(codec, port, dwVal); return ; } static unsigned short snd_ali_codec_peek(struct snd_ali *codec, int secondary, unsigned short reg) { unsigned int dwVal; unsigned int port; if (reg >= 0x80) { snd_printk(KERN_ERR "ali_codec_peek: reg(%xh) invalid.\n", reg); return ~0; } port = codec->chregs.regs.ac97read; if (snd_ali_codec_ready(codec, port) < 0) return ~0; if (snd_ali_stimer_ready(codec) < 0) return ~0; dwVal = (unsigned int) (reg & 0xff); dwVal |= 0x8000; /* bit 15*/ if (secondary) dwVal |= 0x0080; snd_ali_5451_poke(codec, port, dwVal); if (snd_ali_stimer_ready(codec) < 0) return ~0; if (snd_ali_codec_ready(codec, port) < 0) return ~0; return (snd_ali_5451_peek(codec, port) & 0xffff0000) >> 16; } static void snd_ali_codec_write(struct snd_ac97 *ac97, unsigned short reg, unsigned short val ) { struct snd_ali *codec = ac97->private_data; snd_ali_printk("codec_write: reg=%xh data=%xh.\n", reg, val); if (reg == AC97_GPIO_STATUS) { outl((val << ALI_AC97_GPIO_DATA_SHIFT) | ALI_AC97_GPIO_ENABLE, ALI_REG(codec, ALI_AC97_GPIO)); return; } snd_ali_codec_poke(codec, ac97->num, reg, val); return ; } static unsigned short snd_ali_codec_read(struct snd_ac97 *ac97, unsigned short reg) { struct snd_ali *codec = ac97->private_data; snd_ali_printk("codec_read reg=%xh.\n", reg); return snd_ali_codec_peek(codec, ac97->num, reg); } /* * AC97 Reset */ static int snd_ali_reset_5451(struct snd_ali *codec) { struct pci_dev *pci_dev; unsigned short wCount, wReg; unsigned int dwVal; pci_dev = codec->pci_m1533; if (pci_dev) { pci_read_config_dword(pci_dev, 0x7c, &dwVal); pci_write_config_dword(pci_dev, 0x7c, dwVal | 0x08000000); mdelay(5); pci_read_config_dword(pci_dev, 0x7c, &dwVal); pci_write_config_dword(pci_dev, 0x7c, dwVal & 0xf7ffffff); mdelay(5); } pci_dev = codec->pci; pci_read_config_dword(pci_dev, 0x44, &dwVal); pci_write_config_dword(pci_dev, 0x44, dwVal | 0x000c0000); udelay(500); pci_read_config_dword(pci_dev, 0x44, &dwVal); pci_write_config_dword(pci_dev, 0x44, dwVal & 0xfffbffff); mdelay(5); wCount = 200; while(wCount--) { wReg = snd_ali_codec_peek(codec, 0, AC97_POWERDOWN); if ((wReg & 0x000f) == 0x000f) return 0; mdelay(5); } /* non-fatal if you have a non PM capable codec */ /* snd_printk(KERN_WARNING "ali5451: reset time out\n"); */ return 0; } /* * ALI 5451 Controller */ static void snd_ali_enable_special_channel(struct snd_ali *codec, unsigned int channel) { unsigned long dwVal; dwVal = inl(ALI_REG(codec, ALI_GLOBAL_CONTROL)); dwVal |= 1 << (channel & 0x0000001f); outl(dwVal, ALI_REG(codec, ALI_GLOBAL_CONTROL)); } static void snd_ali_disable_special_channel(struct snd_ali *codec, unsigned int channel) { unsigned long dwVal; dwVal = inl(ALI_REG(codec, ALI_GLOBAL_CONTROL)); dwVal &= ~(1 << (channel & 0x0000001f)); outl(dwVal, ALI_REG(codec, ALI_GLOBAL_CONTROL)); } static void snd_ali_enable_address_interrupt(struct snd_ali *codec) { unsigned int gc; gc = inl(ALI_REG(codec, ALI_GC_CIR)); gc |= ENDLP_IE; gc |= MIDLP_IE; outl( gc, ALI_REG(codec, ALI_GC_CIR)); } static void snd_ali_disable_address_interrupt(struct snd_ali *codec) { unsigned int gc; gc = inl(ALI_REG(codec, ALI_GC_CIR)); gc &= ~ENDLP_IE; gc &= ~MIDLP_IE; outl(gc, ALI_REG(codec, ALI_GC_CIR)); } static void snd_ali_disable_voice_irq(struct snd_ali *codec, unsigned int channel) { unsigned int mask; struct snd_ali_channel_control *pchregs = &(codec->chregs); snd_ali_printk("disable_voice_irq channel=%d\n",channel); mask = 1 << (channel & 0x1f); pchregs->data.ainten = inl(ALI_REG(codec, pchregs->regs.ainten)); pchregs->data.ainten &= ~mask; outl(pchregs->data.ainten, ALI_REG(codec, pchregs->regs.ainten)); } static int snd_ali_alloc_pcm_channel(struct snd_ali *codec, int channel) { unsigned int idx = channel & 0x1f; if (codec->synth.chcnt >= ALI_CHANNELS){ snd_printk(KERN_ERR "ali_alloc_pcm_channel: no free channels.\n"); return -1; } if (!(codec->synth.chmap & (1 << idx))) { codec->synth.chmap |= 1 << idx; codec->synth.chcnt++; snd_ali_printk("alloc_pcm_channel no. %d.\n",idx); return idx; } return -1; } static int snd_ali_find_free_channel(struct snd_ali * codec, int rec) { int idx; int result = -1; snd_ali_printk("find_free_channel: for %s\n",rec ? "rec" : "pcm"); /* recording */ if (rec) { if (codec->spdif_support && (inl(ALI_REG(codec, ALI_GLOBAL_CONTROL)) & ALI_SPDIF_IN_SUPPORT)) idx = ALI_SPDIF_IN_CHANNEL; else idx = ALI_PCM_IN_CHANNEL; result = snd_ali_alloc_pcm_channel(codec, idx); if (result >= 0) return result; else { snd_printk(KERN_ERR "ali_find_free_channel: " "record channel is busy now.\n"); return -1; } } /* playback... */ if (codec->spdif_support && (inl(ALI_REG(codec, ALI_GLOBAL_CONTROL)) & ALI_SPDIF_OUT_CH_ENABLE)) { idx = ALI_SPDIF_OUT_CHANNEL; result = snd_ali_alloc_pcm_channel(codec, idx); if (result >= 0) return result; else snd_printk(KERN_ERR "ali_find_free_channel: " "S/PDIF out channel is in busy now.\n"); } for (idx = 0; idx < ALI_CHANNELS; idx++) { result = snd_ali_alloc_pcm_channel(codec, idx); if (result >= 0) return result; } snd_printk(KERN_ERR "ali_find_free_channel: no free channels.\n"); return -1; } static void snd_ali_free_channel_pcm(struct snd_ali *codec, int channel) { unsigned int idx = channel & 0x0000001f; snd_ali_printk("free_channel_pcm channel=%d\n",channel); if (channel < 0 || channel >= ALI_CHANNELS) return; if (!(codec->synth.chmap & (1 << idx))) { snd_printk(KERN_ERR "ali_free_channel_pcm: " "channel %d is not in use.\n", channel); return; } else { codec->synth.chmap &= ~(1 << idx); codec->synth.chcnt--; } } static void snd_ali_stop_voice(struct snd_ali *codec, unsigned int channel) { unsigned int mask = 1 << (channel & 0x1f); snd_ali_printk("stop_voice: channel=%d\n",channel); outl(mask, ALI_REG(codec, codec->chregs.regs.stop)); } /* * S/PDIF Part */ static void snd_ali_delay(struct snd_ali *codec,int interval) { unsigned long begintimer,currenttimer; begintimer = inl(ALI_REG(codec, ALI_STIMER)); currenttimer = inl(ALI_REG(codec, ALI_STIMER)); while (currenttimer < begintimer + interval) { if (snd_ali_stimer_ready(codec) < 0) break; currenttimer = inl(ALI_REG(codec, ALI_STIMER)); cpu_relax(); } } static void snd_ali_detect_spdif_rate(struct snd_ali *codec) { u16 wval; u16 count = 0; u8 bval, R1 = 0, R2; bval = inb(ALI_REG(codec, ALI_SPDIF_CTRL + 1)); bval |= 0x1F; outb(bval, ALI_REG(codec, ALI_SPDIF_CTRL + 1)); while ((R1 < 0x0b || R1 > 0x0e) && R1 != 0x12 && count <= 50000) { count ++; snd_ali_delay(codec, 6); bval = inb(ALI_REG(codec, ALI_SPDIF_CTRL + 1)); R1 = bval & 0x1F; } if (count > 50000) { snd_printk(KERN_ERR "ali_detect_spdif_rate: timeout!\n"); return; } for (count = 0; count <= 50000; count++) { snd_ali_delay(codec, 6); bval = inb(ALI_REG(codec,ALI_SPDIF_CTRL + 1)); R2 = bval & 0x1F; if (R2 != R1) R1 = R2; else break; } if (count > 50000) { snd_printk(KERN_ERR "ali_detect_spdif_rate: timeout!\n"); return; } if (R2 >= 0x0b && R2 <= 0x0e) { wval = inw(ALI_REG(codec, ALI_SPDIF_CTRL + 2)); wval &= 0xe0f0; wval |= (0x09 << 8) | 0x05; outw(wval, ALI_REG(codec, ALI_SPDIF_CTRL + 2)); bval = inb(ALI_REG(codec, ALI_SPDIF_CS + 3)) & 0xf0; outb(bval | 0x02, ALI_REG(codec, ALI_SPDIF_CS + 3)); } else if (R2 == 0x12) { wval = inw(ALI_REG(codec, ALI_SPDIF_CTRL + 2)); wval &= 0xe0f0; wval |= (0x0e << 8) | 0x08; outw(wval, ALI_REG(codec, ALI_SPDIF_CTRL + 2)); bval = inb(ALI_REG(codec,ALI_SPDIF_CS + 3)) & 0xf0; outb(bval | 0x03, ALI_REG(codec, ALI_SPDIF_CS + 3)); } } static unsigned int snd_ali_get_spdif_in_rate(struct snd_ali *codec) { u32 dwRate; u8 bval; bval = inb(ALI_REG(codec, ALI_SPDIF_CTRL)); bval &= 0x7f; bval |= 0x40; outb(bval, ALI_REG(codec, ALI_SPDIF_CTRL)); snd_ali_detect_spdif_rate(codec); bval = inb(ALI_REG(codec, ALI_SPDIF_CS + 3)); bval &= 0x0f; switch (bval) { case 0: dwRate = 44100; break; case 1: dwRate = 48000; break; case 2: dwRate = 32000; break; default: dwRate = 0; break; } return dwRate; } static void snd_ali_enable_spdif_in(struct snd_ali *codec) { unsigned int dwVal; dwVal = inl(ALI_REG(codec, ALI_GLOBAL_CONTROL)); dwVal |= ALI_SPDIF_IN_SUPPORT; outl(dwVal, ALI_REG(codec, ALI_GLOBAL_CONTROL)); dwVal = inb(ALI_REG(codec, ALI_SPDIF_CTRL)); dwVal |= 0x02; outb(dwVal, ALI_REG(codec, ALI_SPDIF_CTRL)); snd_ali_enable_special_channel(codec, ALI_SPDIF_IN_CHANNEL); } static void snd_ali_disable_spdif_in(struct snd_ali *codec) { unsigned int dwVal; dwVal = inl(ALI_REG(codec, ALI_GLOBAL_CONTROL)); dwVal &= ~ALI_SPDIF_IN_SUPPORT; outl(dwVal, ALI_REG(codec, ALI_GLOBAL_CONTROL)); snd_ali_disable_special_channel(codec, ALI_SPDIF_IN_CHANNEL); } static void snd_ali_set_spdif_out_rate(struct snd_ali *codec, unsigned int rate) { unsigned char bVal; unsigned int dwRate; switch (rate) { case 32000: dwRate = 0x300; break; case 48000: dwRate = 0x200; break; default: dwRate = 0; break; } bVal = inb(ALI_REG(codec, ALI_SPDIF_CTRL)); bVal &= (unsigned char)(~(1<<6)); bVal |= 0x80; /* select right */ outb(bVal, ALI_REG(codec, ALI_SPDIF_CTRL)); outb(dwRate | 0x20, ALI_REG(codec, ALI_SPDIF_CS + 2)); bVal &= ~0x80; /* select left */ outb(bVal, ALI_REG(codec, ALI_SPDIF_CTRL)); outw(rate | 0x10, ALI_REG(codec, ALI_SPDIF_CS + 2)); } static void snd_ali_enable_spdif_out(struct snd_ali *codec) { unsigned short wVal; unsigned char bVal; struct pci_dev *pci_dev; pci_dev = codec->pci_m1533; if (pci_dev == NULL) return; pci_read_config_byte(pci_dev, 0x61, &bVal); bVal |= 0x40; pci_write_config_byte(pci_dev, 0x61, bVal); pci_read_config_byte(pci_dev, 0x7d, &bVal); bVal |= 0x01; pci_write_config_byte(pci_dev, 0x7d, bVal); pci_read_config_byte(pci_dev, 0x7e, &bVal); bVal &= (~0x20); bVal |= 0x10; pci_write_config_byte(pci_dev, 0x7e, bVal); bVal = inb(ALI_REG(codec, ALI_SCTRL)); outb(bVal | ALI_SPDIF_OUT_ENABLE, ALI_REG(codec, ALI_SCTRL)); bVal = inb(ALI_REG(codec, ALI_SPDIF_CTRL)); outb(bVal & ALI_SPDIF_OUT_CH_STATUS, ALI_REG(codec, ALI_SPDIF_CTRL)); wVal = inw(ALI_REG(codec, ALI_GLOBAL_CONTROL)); wVal |= ALI_SPDIF_OUT_SEL_PCM; outw(wVal, ALI_REG(codec, ALI_GLOBAL_CONTROL)); snd_ali_disable_special_channel(codec, ALI_SPDIF_OUT_CHANNEL); } static void snd_ali_enable_spdif_chnout(struct snd_ali *codec) { unsigned short wVal; wVal = inw(ALI_REG(codec, ALI_GLOBAL_CONTROL)); wVal &= ~ALI_SPDIF_OUT_SEL_PCM; outw(wVal, ALI_REG(codec, ALI_GLOBAL_CONTROL)); /* wVal = inw(ALI_REG(codec, ALI_SPDIF_CS)); if (flag & ALI_SPDIF_OUT_NON_PCM) wVal |= 0x0002; else wVal &= (~0x0002); outw(wVal, ALI_REG(codec, ALI_SPDIF_CS)); */ snd_ali_enable_special_channel(codec, ALI_SPDIF_OUT_CHANNEL); } static void snd_ali_disable_spdif_chnout(struct snd_ali *codec) { unsigned short wVal; wVal = inw(ALI_REG(codec, ALI_GLOBAL_CONTROL)); wVal |= ALI_SPDIF_OUT_SEL_PCM; outw(wVal, ALI_REG(codec, ALI_GLOBAL_CONTROL)); snd_ali_enable_special_channel(codec, ALI_SPDIF_OUT_CHANNEL); } static void snd_ali_disable_spdif_out(struct snd_ali *codec) { unsigned char bVal; bVal = inb(ALI_REG(codec, ALI_SCTRL)); outb(bVal & ~ALI_SPDIF_OUT_ENABLE, ALI_REG(codec, ALI_SCTRL)); snd_ali_disable_spdif_chnout(codec); } static void snd_ali_update_ptr(struct snd_ali *codec, int channel) { struct snd_ali_voice *pvoice; struct snd_pcm_runtime *runtime; struct snd_ali_channel_control *pchregs; unsigned int old, mask; #ifdef ALI_DEBUG unsigned int temp, cspf; #endif pchregs = &(codec->chregs); /* check if interrupt occurred for channel */ old = pchregs->data.aint; mask = 1U << (channel & 0x1f); if (!(old & mask)) return; pvoice = &codec->synth.voices[channel]; runtime = pvoice->substream->runtime; udelay(100); spin_lock(&codec->reg_lock); if (pvoice->pcm && pvoice->substream) { /* pcm interrupt */ #ifdef ALI_DEBUG outb((u8)(pvoice->number), ALI_REG(codec, ALI_GC_CIR)); temp = inw(ALI_REG(codec, ALI_CSO_ALPHA_FMS + 2)); cspf = (inl(ALI_REG(codec, ALI_CSPF)) & mask) == mask; #endif if (pvoice->running) { snd_ali_printk("update_ptr: cso=%4.4x cspf=%d.\n", (u16)temp, cspf); spin_unlock(&codec->reg_lock); snd_pcm_period_elapsed(pvoice->substream); spin_lock(&codec->reg_lock); } else { snd_ali_stop_voice(codec, channel); snd_ali_disable_voice_irq(codec, channel); } } else if (codec->synth.voices[channel].synth) { /* synth interrupt */ } else if (codec->synth.voices[channel].midi) { /* midi interrupt */ } else { /* unknown interrupt */ snd_ali_stop_voice(codec, channel); snd_ali_disable_voice_irq(codec, channel); } spin_unlock(&codec->reg_lock); outl(mask,ALI_REG(codec,pchregs->regs.aint)); pchregs->data.aint = old & (~mask); } static irqreturn_t snd_ali_card_interrupt(int irq, void *dev_id) { struct snd_ali *codec = dev_id; int channel; unsigned int audio_int; struct snd_ali_channel_control *pchregs; if (codec == NULL || !codec->hw_initialized) return IRQ_NONE; audio_int = inl(ALI_REG(codec, ALI_MISCINT)); if (!audio_int) return IRQ_NONE; pchregs = &(codec->chregs); if (audio_int & ADDRESS_IRQ) { /* get interrupt status for all channels */ pchregs->data.aint = inl(ALI_REG(codec, pchregs->regs.aint)); for (channel = 0; channel < ALI_CHANNELS; channel++) snd_ali_update_ptr(codec, channel); } outl((TARGET_REACHED | MIXER_OVERFLOW | MIXER_UNDERFLOW), ALI_REG(codec, ALI_MISCINT)); return IRQ_HANDLED; } static struct snd_ali_voice *snd_ali_alloc_voice(struct snd_ali * codec, int type, int rec, int channel) { struct snd_ali_voice *pvoice; int idx; snd_ali_printk("alloc_voice: type=%d rec=%d\n", type, rec); spin_lock_irq(&codec->voice_alloc); if (type == SNDRV_ALI_VOICE_TYPE_PCM) { idx = channel > 0 ? snd_ali_alloc_pcm_channel(codec, channel) : snd_ali_find_free_channel(codec,rec); if (idx < 0) { snd_printk(KERN_ERR "ali_alloc_voice: err.\n"); spin_unlock_irq(&codec->voice_alloc); return NULL; } pvoice = &(codec->synth.voices[idx]); pvoice->codec = codec; pvoice->use = 1; pvoice->pcm = 1; pvoice->mode = rec; spin_unlock_irq(&codec->voice_alloc); return pvoice; } spin_unlock_irq(&codec->voice_alloc); return NULL; } static void snd_ali_free_voice(struct snd_ali * codec, struct snd_ali_voice *pvoice) { void (*private_free)(void *); void *private_data; snd_ali_printk("free_voice: channel=%d\n",pvoice->number); if (!pvoice->use) return; snd_ali_clear_voices(codec, pvoice->number, pvoice->number); spin_lock_irq(&codec->voice_alloc); private_free = pvoice->private_free; private_data = pvoice->private_data; pvoice->private_free = NULL; pvoice->private_data = NULL; if (pvoice->pcm) snd_ali_free_channel_pcm(codec, pvoice->number); pvoice->use = pvoice->pcm = pvoice->synth = 0; pvoice->substream = NULL; spin_unlock_irq(&codec->voice_alloc); if (private_free) private_free(private_data); } static void snd_ali_clear_voices(struct snd_ali *codec, unsigned int v_min, unsigned int v_max) { unsigned int i; for (i = v_min; i <= v_max; i++) { snd_ali_stop_voice(codec, i); snd_ali_disable_voice_irq(codec, i); } } static void snd_ali_write_voice_regs(struct snd_ali *codec, unsigned int Channel, unsigned int LBA, unsigned int CSO, unsigned int ESO, unsigned int DELTA, unsigned int ALPHA_FMS, unsigned int GVSEL, unsigned int PAN, unsigned int VOL, unsigned int CTRL, unsigned int EC) { unsigned int ctlcmds[4]; outb((unsigned char)(Channel & 0x001f), ALI_REG(codec, ALI_GC_CIR)); ctlcmds[0] = (CSO << 16) | (ALPHA_FMS & 0x0000ffff); ctlcmds[1] = LBA; ctlcmds[2] = (ESO << 16) | (DELTA & 0x0ffff); ctlcmds[3] = (GVSEL << 31) | ((PAN & 0x0000007f) << 24) | ((VOL & 0x000000ff) << 16) | ((CTRL & 0x0000000f) << 12) | (EC & 0x00000fff); outb(Channel, ALI_REG(codec, ALI_GC_CIR)); outl(ctlcmds[0], ALI_REG(codec, ALI_CSO_ALPHA_FMS)); outl(ctlcmds[1], ALI_REG(codec, ALI_LBA)); outl(ctlcmds[2], ALI_REG(codec, ALI_ESO_DELTA)); outl(ctlcmds[3], ALI_REG(codec, ALI_GVSEL_PAN_VOC_CTRL_EC)); outl(0x30000000, ALI_REG(codec, ALI_EBUF1)); /* Still Mode */ outl(0x30000000, ALI_REG(codec, ALI_EBUF2)); /* Still Mode */ } static unsigned int snd_ali_convert_rate(unsigned int rate, int rec) { unsigned int delta; if (rate < 4000) rate = 4000; if (rate > 48000) rate = 48000; if (rec) { if (rate == 44100) delta = 0x116a; else if (rate == 8000) delta = 0x6000; else if (rate == 48000) delta = 0x1000; else delta = ((48000 << 12) / rate) & 0x0000ffff; } else { if (rate == 44100) delta = 0xeb3; else if (rate == 8000) delta = 0x2ab; else if (rate == 48000) delta = 0x1000; else delta = (((rate << 12) + rate) / 48000) & 0x0000ffff; } return delta; } static unsigned int snd_ali_control_mode(struct snd_pcm_substream *substream) { unsigned int CTRL; struct snd_pcm_runtime *runtime = substream->runtime; /* set ctrl mode CTRL default: 8-bit (unsigned) mono, loop mode enabled */ CTRL = 0x00000001; if (snd_pcm_format_width(runtime->format) == 16) CTRL |= 0x00000008; /* 16-bit data */ if (!snd_pcm_format_unsigned(runtime->format)) CTRL |= 0x00000002; /* signed data */ if (runtime->channels > 1) CTRL |= 0x00000004; /* stereo data */ return CTRL; } /* * PCM part */ static int snd_ali_trigger(struct snd_pcm_substream *substream, int cmd) { struct snd_ali *codec = snd_pcm_substream_chip(substream); struct snd_pcm_substream *s; unsigned int what, whati, capture_flag; struct snd_ali_voice *pvoice, *evoice; unsigned int val; int do_start; switch (cmd) { case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_RESUME: do_start = 1; break; case SNDRV_PCM_TRIGGER_STOP: case SNDRV_PCM_TRIGGER_SUSPEND: do_start = 0; break; default: return -EINVAL; } what = whati = capture_flag = 0; snd_pcm_group_for_each_entry(s, substream) { if ((struct snd_ali *) snd_pcm_substream_chip(s) == codec) { pvoice = s->runtime->private_data; evoice = pvoice->extra; what |= 1 << (pvoice->number & 0x1f); if (evoice == NULL) whati |= 1 << (pvoice->number & 0x1f); else { whati |= 1 << (evoice->number & 0x1f); what |= 1 << (evoice->number & 0x1f); } if (do_start) { pvoice->running = 1; if (evoice != NULL) evoice->running = 1; } else { pvoice->running = 0; if (evoice != NULL) evoice->running = 0; } snd_pcm_trigger_done(s, substream); if (pvoice->mode) capture_flag = 1; } } spin_lock(&codec->reg_lock); if (!do_start) outl(what, ALI_REG(codec, ALI_STOP)); val = inl(ALI_REG(codec, ALI_AINTEN)); if (do_start) val |= whati; else val &= ~whati; outl(val, ALI_REG(codec, ALI_AINTEN)); if (do_start) outl(what, ALI_REG(codec, ALI_START)); snd_ali_printk("trigger: what=%xh whati=%xh\n", what, whati); spin_unlock(&codec->reg_lock); return 0; } static int snd_ali_playback_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *hw_params) { struct snd_ali *codec = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; struct snd_ali_voice *pvoice = runtime->private_data; struct snd_ali_voice *evoice = pvoice->extra; int err; err = snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params)); if (err < 0) return err; /* voice management */ if (params_buffer_size(hw_params) / 2 != params_period_size(hw_params)) { if (!evoice) { evoice = snd_ali_alloc_voice(codec, SNDRV_ALI_VOICE_TYPE_PCM, 0, -1); if (!evoice) return -ENOMEM; pvoice->extra = evoice; evoice->substream = substream; } } else { if (evoice) { snd_ali_free_voice(codec, evoice); pvoice->extra = evoice = NULL; } } return 0; } static int snd_ali_playback_hw_free(struct snd_pcm_substream *substream) { struct snd_ali *codec = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; struct snd_ali_voice *pvoice = runtime->private_data; struct snd_ali_voice *evoice = pvoice ? pvoice->extra : NULL; snd_pcm_lib_free_pages(substream); if (evoice) { snd_ali_free_voice(codec, evoice); pvoice->extra = NULL; } return 0; } static int snd_ali_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *hw_params) { return snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params)); } static int snd_ali_hw_free(struct snd_pcm_substream *substream) { return snd_pcm_lib_free_pages(substream); } static int snd_ali_playback_prepare(struct snd_pcm_substream *substream) { struct snd_ali *codec = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; struct snd_ali_voice *pvoice = runtime->private_data; struct snd_ali_voice *evoice = pvoice->extra; unsigned int LBA; unsigned int Delta; unsigned int ESO; unsigned int CTRL; unsigned int GVSEL; unsigned int PAN; unsigned int VOL; unsigned int EC; snd_ali_printk("playback_prepare ...\n"); spin_lock_irq(&codec->reg_lock); /* set Delta (rate) value */ Delta = snd_ali_convert_rate(runtime->rate, 0); if (pvoice->number == ALI_SPDIF_IN_CHANNEL || pvoice->number == ALI_PCM_IN_CHANNEL) snd_ali_disable_special_channel(codec, pvoice->number); else if (codec->spdif_support && (inl(ALI_REG(codec, ALI_GLOBAL_CONTROL)) & ALI_SPDIF_OUT_CH_ENABLE) && pvoice->number == ALI_SPDIF_OUT_CHANNEL) { snd_ali_set_spdif_out_rate(codec, runtime->rate); Delta = 0x1000; } /* set Loop Back Address */ LBA = runtime->dma_addr; /* set interrupt count size */ pvoice->count = runtime->period_size; /* set target ESO for channel */ pvoice->eso = runtime->buffer_size; snd_ali_printk("playback_prepare: eso=%xh count=%xh\n", pvoice->eso, pvoice->count); /* set ESO to capture first MIDLP interrupt */ ESO = pvoice->eso -1; /* set ctrl mode */ CTRL = snd_ali_control_mode(substream); GVSEL = 1; PAN = 0; VOL = 0; EC = 0; snd_ali_printk("playback_prepare:\n"); snd_ali_printk("ch=%d, Rate=%d Delta=%xh,GVSEL=%xh,PAN=%xh,CTRL=%xh\n", pvoice->number,runtime->rate,Delta,GVSEL,PAN,CTRL); snd_ali_write_voice_regs(codec, pvoice->number, LBA, 0, /* cso */ ESO, Delta, 0, /* alpha */ GVSEL, PAN, VOL, CTRL, EC); if (evoice) { evoice->count = pvoice->count; evoice->eso = pvoice->count << 1; ESO = evoice->eso - 1; snd_ali_write_voice_regs(codec, evoice->number, LBA, 0, /* cso */ ESO, Delta, 0, /* alpha */ GVSEL, 0x7f, 0x3ff, CTRL, EC); } spin_unlock_irq(&codec->reg_lock); return 0; } static int snd_ali_prepare(struct snd_pcm_substream *substream) { struct snd_ali *codec = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; struct snd_ali_voice *pvoice = runtime->private_data; unsigned int LBA; unsigned int Delta; unsigned int ESO; unsigned int CTRL; unsigned int GVSEL; unsigned int PAN; unsigned int VOL; unsigned int EC; u8 bValue; spin_lock_irq(&codec->reg_lock); snd_ali_printk("ali_prepare...\n"); snd_ali_enable_special_channel(codec,pvoice->number); Delta = (pvoice->number == ALI_MODEM_IN_CHANNEL || pvoice->number == ALI_MODEM_OUT_CHANNEL) ? 0x1000 : snd_ali_convert_rate(runtime->rate, pvoice->mode); /* Prepare capture intr channel */ if (pvoice->number == ALI_SPDIF_IN_CHANNEL) { unsigned int rate; spin_unlock_irq(&codec->reg_lock); if (codec->revision != ALI_5451_V02) return -1; rate = snd_ali_get_spdif_in_rate(codec); if (rate == 0) { snd_printk(KERN_WARNING "ali_capture_preapre: " "spdif rate detect err!\n"); rate = 48000; } spin_lock_irq(&codec->reg_lock); bValue = inb(ALI_REG(codec,ALI_SPDIF_CTRL)); if (bValue & 0x10) { outb(bValue,ALI_REG(codec,ALI_SPDIF_CTRL)); printk(KERN_WARNING "clear SPDIF parity error flag.\n"); } if (rate != 48000) Delta = ((rate << 12) / runtime->rate) & 0x00ffff; } /* set target ESO for channel */ pvoice->eso = runtime->buffer_size; /* set interrupt count size */ pvoice->count = runtime->period_size; /* set Loop Back Address */ LBA = runtime->dma_addr; /* set ESO to capture first MIDLP interrupt */ ESO = pvoice->eso - 1; CTRL = snd_ali_control_mode(substream); GVSEL = 0; PAN = 0x00; VOL = 0x00; EC = 0; snd_ali_write_voice_regs( codec, pvoice->number, LBA, 0, /* cso */ ESO, Delta, 0, /* alpha */ GVSEL, PAN, VOL, CTRL, EC); spin_unlock_irq(&codec->reg_lock); return 0; } static snd_pcm_uframes_t snd_ali_playback_pointer(struct snd_pcm_substream *substream) { struct snd_ali *codec = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; struct snd_ali_voice *pvoice = runtime->private_data; unsigned int cso; spin_lock(&codec->reg_lock); if (!pvoice->running) { spin_unlock(&codec->reg_lock); return 0; } outb(pvoice->number, ALI_REG(codec, ALI_GC_CIR)); cso = inw(ALI_REG(codec, ALI_CSO_ALPHA_FMS + 2)); spin_unlock(&codec->reg_lock); snd_ali_printk("playback pointer returned cso=%xh.\n", cso); return cso; } static snd_pcm_uframes_t snd_ali_pointer(struct snd_pcm_substream *substream) { struct snd_ali *codec = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; struct snd_ali_voice *pvoice = runtime->private_data; unsigned int cso; spin_lock(&codec->reg_lock); if (!pvoice->running) { spin_unlock(&codec->reg_lock); return 0; } outb(pvoice->number, ALI_REG(codec, ALI_GC_CIR)); cso = inw(ALI_REG(codec, ALI_CSO_ALPHA_FMS + 2)); spin_unlock(&codec->reg_lock); return cso; } static struct snd_pcm_hardware snd_ali_playback = { .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_RESUME | SNDRV_PCM_INFO_SYNC_START), .formats = (SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S8 | SNDRV_PCM_FMTBIT_U16_LE), .rates = SNDRV_PCM_RATE_CONTINUOUS | SNDRV_PCM_RATE_8000_48000, .rate_min = 4000, .rate_max = 48000, .channels_min = 1, .channels_max = 2, .buffer_bytes_max = (256*1024), .period_bytes_min = 64, .period_bytes_max = (256*1024), .periods_min = 1, .periods_max = 1024, .fifo_size = 0, }; /* * Capture support device description */ static struct snd_pcm_hardware snd_ali_capture = { .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_RESUME | SNDRV_PCM_INFO_SYNC_START), .formats = (SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S8 | SNDRV_PCM_FMTBIT_U16_LE), .rates = SNDRV_PCM_RATE_CONTINUOUS | SNDRV_PCM_RATE_8000_48000, .rate_min = 4000, .rate_max = 48000, .channels_min = 1, .channels_max = 2, .buffer_bytes_max = (128*1024), .period_bytes_min = 64, .period_bytes_max = (128*1024), .periods_min = 1, .periods_max = 1024, .fifo_size = 0, }; static void snd_ali_pcm_free_substream(struct snd_pcm_runtime *runtime) { struct snd_ali_voice *pvoice = runtime->private_data; struct snd_ali *codec; if (pvoice) { codec = pvoice->codec; snd_ali_free_voice(pvoice->codec, pvoice); } } static int snd_ali_open(struct snd_pcm_substream *substream, int rec, int channel, struct snd_pcm_hardware *phw) { struct snd_ali *codec = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; struct snd_ali_voice *pvoice; pvoice = snd_ali_alloc_voice(codec, SNDRV_ALI_VOICE_TYPE_PCM, rec, channel); if (!pvoice) return -EAGAIN; pvoice->substream = substream; runtime->private_data = pvoice; runtime->private_free = snd_ali_pcm_free_substream; runtime->hw = *phw; snd_pcm_set_sync(substream); snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_BUFFER_SIZE, 0, 64*1024); return 0; } static int snd_ali_playback_open(struct snd_pcm_substream *substream) { return snd_ali_open(substream, 0, -1, &snd_ali_playback); } static int snd_ali_capture_open(struct snd_pcm_substream *substream) { return snd_ali_open(substream, 1, -1, &snd_ali_capture); } static int snd_ali_playback_close(struct snd_pcm_substream *substream) { return 0; } static int snd_ali_close(struct snd_pcm_substream *substream) { struct snd_ali *codec = snd_pcm_substream_chip(substream); struct snd_ali_voice *pvoice = substream->runtime->private_data; snd_ali_disable_special_channel(codec,pvoice->number); return 0; } static struct snd_pcm_ops snd_ali_playback_ops = { .open = snd_ali_playback_open, .close = snd_ali_playback_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_ali_playback_hw_params, .hw_free = snd_ali_playback_hw_free, .prepare = snd_ali_playback_prepare, .trigger = snd_ali_trigger, .pointer = snd_ali_playback_pointer, }; static struct snd_pcm_ops snd_ali_capture_ops = { .open = snd_ali_capture_open, .close = snd_ali_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_ali_hw_params, .hw_free = snd_ali_hw_free, .prepare = snd_ali_prepare, .trigger = snd_ali_trigger, .pointer = snd_ali_pointer, }; /* * Modem PCM */ static int snd_ali_modem_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *hw_params) { struct snd_ali *chip = snd_pcm_substream_chip(substream); unsigned int modem_num = chip->num_of_codecs - 1; snd_ac97_write(chip->ac97[modem_num], AC97_LINE1_RATE, params_rate(hw_params)); snd_ac97_write(chip->ac97[modem_num], AC97_LINE1_LEVEL, 0); return snd_ali_hw_params(substream, hw_params); } static struct snd_pcm_hardware snd_ali_modem = { .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_RESUME | SNDRV_PCM_INFO_SYNC_START), .formats = SNDRV_PCM_FMTBIT_S16_LE, .rates = (SNDRV_PCM_RATE_KNOT | SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000), .rate_min = 8000, .rate_max = 16000, .channels_min = 1, .channels_max = 1, .buffer_bytes_max = (256*1024), .period_bytes_min = 64, .period_bytes_max = (256*1024), .periods_min = 1, .periods_max = 1024, .fifo_size = 0, }; static int snd_ali_modem_open(struct snd_pcm_substream *substream, int rec, int channel) { static unsigned int rates[] = {8000, 9600, 12000, 16000}; static struct snd_pcm_hw_constraint_list hw_constraint_rates = { .count = ARRAY_SIZE(rates), .list = rates, .mask = 0, }; int err = snd_ali_open(substream, rec, channel, &snd_ali_modem); if (err) return err; return snd_pcm_hw_constraint_list(substream->runtime, 0, SNDRV_PCM_HW_PARAM_RATE, &hw_constraint_rates); } static int snd_ali_modem_playback_open(struct snd_pcm_substream *substream) { return snd_ali_modem_open(substream, 0, ALI_MODEM_OUT_CHANNEL); } static int snd_ali_modem_capture_open(struct snd_pcm_substream *substream) { return snd_ali_modem_open(substream, 1, ALI_MODEM_IN_CHANNEL); } static struct snd_pcm_ops snd_ali_modem_playback_ops = { .open = snd_ali_modem_playback_open, .close = snd_ali_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_ali_modem_hw_params, .hw_free = snd_ali_hw_free, .prepare = snd_ali_prepare, .trigger = snd_ali_trigger, .pointer = snd_ali_pointer, }; static struct snd_pcm_ops snd_ali_modem_capture_ops = { .open = snd_ali_modem_capture_open, .close = snd_ali_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_ali_modem_hw_params, .hw_free = snd_ali_hw_free, .prepare = snd_ali_prepare, .trigger = snd_ali_trigger, .pointer = snd_ali_pointer, }; struct ali_pcm_description { char *name; unsigned int playback_num; unsigned int capture_num; struct snd_pcm_ops *playback_ops; struct snd_pcm_ops *capture_ops; unsigned short class; }; static void snd_ali_pcm_free(struct snd_pcm *pcm) { struct snd_ali *codec = pcm->private_data; codec->pcm[pcm->device] = NULL; } static int snd_ali_pcm(struct snd_ali *codec, int device, struct ali_pcm_description *desc) { struct snd_pcm *pcm; int err; err = snd_pcm_new(codec->card, desc->name, device, desc->playback_num, desc->capture_num, &pcm); if (err < 0) { snd_printk(KERN_ERR "snd_ali_pcm: err called snd_pcm_new.\n"); return err; } pcm->private_data = codec; pcm->private_free = snd_ali_pcm_free; if (desc->playback_ops) snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, desc->playback_ops); if (desc->capture_ops) snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, desc->capture_ops); snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(codec->pci), 64*1024, 128*1024); pcm->info_flags = 0; pcm->dev_class = desc->class; pcm->dev_subclass = SNDRV_PCM_SUBCLASS_GENERIC_MIX; strcpy(pcm->name, desc->name); codec->pcm[0] = pcm; return 0; } static struct ali_pcm_description ali_pcms[] = { { .name = "ALI 5451", .playback_num = ALI_CHANNELS, .capture_num = 1, .playback_ops = &snd_ali_playback_ops, .capture_ops = &snd_ali_capture_ops }, { .name = "ALI 5451 modem", .playback_num = 1, .capture_num = 1, .playback_ops = &snd_ali_modem_playback_ops, .capture_ops = &snd_ali_modem_capture_ops, .class = SNDRV_PCM_CLASS_MODEM } }; static int snd_ali_build_pcms(struct snd_ali *codec) { int i, err; for (i = 0; i < codec->num_of_codecs && i < ARRAY_SIZE(ali_pcms); i++) { err = snd_ali_pcm(codec, i, &ali_pcms[i]); if (err < 0) return err; } return 0; } #define ALI5451_SPDIF(xname, xindex, value) \ { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, .index = xindex,\ .info = snd_ali5451_spdif_info, .get = snd_ali5451_spdif_get, \ .put = snd_ali5451_spdif_put, .private_value = value} #define snd_ali5451_spdif_info snd_ctl_boolean_mono_info static int snd_ali5451_spdif_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ali *codec = kcontrol->private_data; unsigned int spdif_enable; spdif_enable = ucontrol->value.integer.value[0] ? 1 : 0; spin_lock_irq(&codec->reg_lock); switch (kcontrol->private_value) { case 0: spdif_enable = (codec->spdif_mask & 0x02) ? 1 : 0; break; case 1: spdif_enable = ((codec->spdif_mask & 0x02) && (codec->spdif_mask & 0x04)) ? 1 : 0; break; case 2: spdif_enable = (codec->spdif_mask & 0x01) ? 1 : 0; break; default: break; } ucontrol->value.integer.value[0] = spdif_enable; spin_unlock_irq(&codec->reg_lock); return 0; } static int snd_ali5451_spdif_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ali *codec = kcontrol->private_data; unsigned int change = 0, spdif_enable = 0; spdif_enable = ucontrol->value.integer.value[0] ? 1 : 0; spin_lock_irq(&codec->reg_lock); switch (kcontrol->private_value) { case 0: change = (codec->spdif_mask & 0x02) ? 1 : 0; change = change ^ spdif_enable; if (change) { if (spdif_enable) { codec->spdif_mask |= 0x02; snd_ali_enable_spdif_out(codec); } else { codec->spdif_mask &= ~(0x02); codec->spdif_mask &= ~(0x04); snd_ali_disable_spdif_out(codec); } } break; case 1: change = (codec->spdif_mask & 0x04) ? 1 : 0; change = change ^ spdif_enable; if (change && (codec->spdif_mask & 0x02)) { if (spdif_enable) { codec->spdif_mask |= 0x04; snd_ali_enable_spdif_chnout(codec); } else { codec->spdif_mask &= ~(0x04); snd_ali_disable_spdif_chnout(codec); } } break; case 2: change = (codec->spdif_mask & 0x01) ? 1 : 0; change = change ^ spdif_enable; if (change) { if (spdif_enable) { codec->spdif_mask |= 0x01; snd_ali_enable_spdif_in(codec); } else { codec->spdif_mask &= ~(0x01); snd_ali_disable_spdif_in(codec); } } break; default: break; } spin_unlock_irq(&codec->reg_lock); return change; } static struct snd_kcontrol_new snd_ali5451_mixer_spdif[] = { /* spdif aplayback switch */ /* FIXME: "IEC958 Playback Switch" may conflict with one on ac97_codec */ ALI5451_SPDIF(SNDRV_CTL_NAME_IEC958("Output ",NONE,SWITCH), 0, 0), /* spdif out to spdif channel */ ALI5451_SPDIF(SNDRV_CTL_NAME_IEC958("Channel Output ",NONE,SWITCH), 0, 1), /* spdif in from spdif channel */ ALI5451_SPDIF(SNDRV_CTL_NAME_IEC958("",CAPTURE,SWITCH), 0, 2) }; static int snd_ali_mixer(struct snd_ali *codec) { struct snd_ac97_template ac97; unsigned int idx; int i, err; static struct snd_ac97_bus_ops ops = { .write = snd_ali_codec_write, .read = snd_ali_codec_read, }; err = snd_ac97_bus(codec->card, 0, &ops, codec, &codec->ac97_bus); if (err < 0) return err; memset(&ac97, 0, sizeof(ac97)); ac97.private_data = codec; for (i = 0; i < codec->num_of_codecs; i++) { ac97.num = i; err = snd_ac97_mixer(codec->ac97_bus, &ac97, &codec->ac97[i]); if (err < 0) { snd_printk(KERN_ERR "ali mixer %d creating error.\n", i); if (i == 0) return err; codec->num_of_codecs = 1; break; } } if (codec->spdif_support) { for (idx = 0; idx < ARRAY_SIZE(snd_ali5451_mixer_spdif); idx++) { err = snd_ctl_add(codec->card, snd_ctl_new1(&snd_ali5451_mixer_spdif[idx], codec)); if (err < 0) return err; } } return 0; } #ifdef CONFIG_PM_SLEEP static int ali_suspend(struct device *dev) { struct pci_dev *pci = to_pci_dev(dev); struct snd_card *card = dev_get_drvdata(dev); struct snd_ali *chip = card->private_data; struct snd_ali_image *im; int i, j; im = chip->image; if (!im) return 0; snd_power_change_state(card, SNDRV_CTL_POWER_D3hot); for (i = 0; i < chip->num_of_codecs; i++) { snd_pcm_suspend_all(chip->pcm[i]); snd_ac97_suspend(chip->ac97[i]); } spin_lock_irq(&chip->reg_lock); im->regs[ALI_MISCINT >> 2] = inl(ALI_REG(chip, ALI_MISCINT)); /* im->regs[ALI_START >> 2] = inl(ALI_REG(chip, ALI_START)); */ im->regs[ALI_STOP >> 2] = inl(ALI_REG(chip, ALI_STOP)); /* disable all IRQ bits */ outl(0, ALI_REG(chip, ALI_MISCINT)); for (i = 0; i < ALI_GLOBAL_REGS; i++) { if ((i*4 == ALI_MISCINT) || (i*4 == ALI_STOP)) continue; im->regs[i] = inl(ALI_REG(chip, i*4)); } for (i = 0; i < ALI_CHANNELS; i++) { outb(i, ALI_REG(chip, ALI_GC_CIR)); for (j = 0; j < ALI_CHANNEL_REGS; j++) im->channel_regs[i][j] = inl(ALI_REG(chip, j*4 + 0xe0)); } /* stop all HW channel */ outl(0xffffffff, ALI_REG(chip, ALI_STOP)); spin_unlock_irq(&chip->reg_lock); pci_disable_device(pci); pci_save_state(pci); pci_set_power_state(pci, PCI_D3hot); return 0; } static int ali_resume(struct device *dev) { struct pci_dev *pci = to_pci_dev(dev); struct snd_card *card = dev_get_drvdata(dev); struct snd_ali *chip = card->private_data; struct snd_ali_image *im; int i, j; im = chip->image; if (!im) return 0; pci_set_power_state(pci, PCI_D0); pci_restore_state(pci); if (pci_enable_device(pci) < 0) { printk(KERN_ERR "ali5451: pci_enable_device failed, " "disabling device\n"); snd_card_disconnect(card); return -EIO; } pci_set_master(pci); spin_lock_irq(&chip->reg_lock); for (i = 0; i < ALI_CHANNELS; i++) { outb(i, ALI_REG(chip, ALI_GC_CIR)); for (j = 0; j < ALI_CHANNEL_REGS; j++) outl(im->channel_regs[i][j], ALI_REG(chip, j*4 + 0xe0)); } for (i = 0; i < ALI_GLOBAL_REGS; i++) { if ((i*4 == ALI_MISCINT) || (i*4 == ALI_STOP) || (i*4 == ALI_START)) continue; outl(im->regs[i], ALI_REG(chip, i*4)); } /* start HW channel */ outl(im->regs[ALI_START >> 2], ALI_REG(chip, ALI_START)); /* restore IRQ enable bits */ outl(im->regs[ALI_MISCINT >> 2], ALI_REG(chip, ALI_MISCINT)); spin_unlock_irq(&chip->reg_lock); for (i = 0 ; i < chip->num_of_codecs; i++) snd_ac97_resume(chip->ac97[i]); snd_power_change_state(card, SNDRV_CTL_POWER_D0); return 0; } static SIMPLE_DEV_PM_OPS(ali_pm, ali_suspend, ali_resume); #define ALI_PM_OPS &ali_pm #else #define ALI_PM_OPS NULL #endif /* CONFIG_PM_SLEEP */ static int snd_ali_free(struct snd_ali * codec) { if (codec->hw_initialized) snd_ali_disable_address_interrupt(codec); if (codec->irq >= 0) free_irq(codec->irq, codec); if (codec->port) pci_release_regions(codec->pci); pci_disable_device(codec->pci); #ifdef CONFIG_PM_SLEEP kfree(codec->image); #endif pci_dev_put(codec->pci_m1533); pci_dev_put(codec->pci_m7101); kfree(codec); return 0; } static int snd_ali_chip_init(struct snd_ali *codec) { unsigned int legacy; unsigned char temp; struct pci_dev *pci_dev; snd_ali_printk("chip initializing ... \n"); if (snd_ali_reset_5451(codec)) { snd_printk(KERN_ERR "ali_chip_init: reset 5451 error.\n"); return -1; } if (codec->revision == ALI_5451_V02) { pci_dev = codec->pci_m1533; pci_read_config_byte(pci_dev, 0x59, &temp); temp |= 0x80; pci_write_config_byte(pci_dev, 0x59, temp); pci_dev = codec->pci_m7101; pci_read_config_byte(pci_dev, 0xb8, &temp); temp |= 0x20; pci_write_config_byte(pci_dev, 0xB8, temp); } pci_read_config_dword(codec->pci, 0x44, &legacy); legacy &= 0xff00ff00; legacy |= 0x000800aa; pci_write_config_dword(codec->pci, 0x44, legacy); outl(0x80000001, ALI_REG(codec, ALI_GLOBAL_CONTROL)); outl(0x00000000, ALI_REG(codec, ALI_AINTEN)); outl(0xffffffff, ALI_REG(codec, ALI_AINT)); outl(0x00000000, ALI_REG(codec, ALI_VOLUME)); outb(0x10, ALI_REG(codec, ALI_MPUR2)); codec->ac97_ext_id = snd_ali_codec_peek(codec, 0, AC97_EXTENDED_ID); codec->ac97_ext_status = snd_ali_codec_peek(codec, 0, AC97_EXTENDED_STATUS); if (codec->spdif_support) { snd_ali_enable_spdif_out(codec); codec->spdif_mask = 0x00000002; } codec->num_of_codecs = 1; /* secondary codec - modem */ if (inl(ALI_REG(codec, ALI_SCTRL)) & ALI_SCTRL_CODEC2_READY) { codec->num_of_codecs++; outl(inl(ALI_REG(codec, ALI_SCTRL)) | (ALI_SCTRL_LINE_IN2 | ALI_SCTRL_GPIO_IN2 | ALI_SCTRL_LINE_OUT_EN), ALI_REG(codec, ALI_SCTRL)); } snd_ali_printk("chip initialize succeed.\n"); return 0; } /* proc for register dump */ static void snd_ali_proc_read(struct snd_info_entry *entry, struct snd_info_buffer *buf) { struct snd_ali *codec = entry->private_data; int i; for (i = 0; i < 256 ; i+= 4) snd_iprintf(buf, "%02x: %08x\n", i, inl(ALI_REG(codec, i))); } static void snd_ali_proc_init(struct snd_ali *codec) { struct snd_info_entry *entry; if (!snd_card_proc_new(codec->card, "ali5451", &entry)) snd_info_set_text_ops(entry, codec, snd_ali_proc_read); } static int snd_ali_resources(struct snd_ali *codec) { int err; snd_ali_printk("resources allocation ...\n"); err = pci_request_regions(codec->pci, "ALI 5451"); if (err < 0) return err; codec->port = pci_resource_start(codec->pci, 0); if (request_irq(codec->pci->irq, snd_ali_card_interrupt, IRQF_SHARED, KBUILD_MODNAME, codec)) { snd_printk(KERN_ERR "Unable to request irq.\n"); return -EBUSY; } codec->irq = codec->pci->irq; snd_ali_printk("resources allocated.\n"); return 0; } static int snd_ali_dev_free(struct snd_device *device) { struct snd_ali *codec = device->device_data; snd_ali_free(codec); return 0; } static int snd_ali_create(struct snd_card *card, struct pci_dev *pci, int pcm_streams, int spdif_support, struct snd_ali **r_ali) { struct snd_ali *codec; int i, err; unsigned short cmdw; static struct snd_device_ops ops = { .dev_free = snd_ali_dev_free, }; *r_ali = NULL; snd_ali_printk("creating ...\n"); /* enable PCI device */ err = pci_enable_device(pci); if (err < 0) return err; /* check, if we can restrict PCI DMA transfers to 31 bits */ if (pci_set_dma_mask(pci, DMA_BIT_MASK(31)) < 0 || pci_set_consistent_dma_mask(pci, DMA_BIT_MASK(31)) < 0) { snd_printk(KERN_ERR "architecture does not support " "31bit PCI busmaster DMA\n"); pci_disable_device(pci); return -ENXIO; } codec = kzalloc(sizeof(*codec), GFP_KERNEL); if (!codec) { pci_disable_device(pci); return -ENOMEM; } spin_lock_init(&codec->reg_lock); spin_lock_init(&codec->voice_alloc); codec->card = card; codec->pci = pci; codec->irq = -1; codec->revision = pci->revision; codec->spdif_support = spdif_support; if (pcm_streams < 1) pcm_streams = 1; if (pcm_streams > 32) pcm_streams = 32; pci_set_master(pci); pci_read_config_word(pci, PCI_COMMAND, &cmdw); if ((cmdw & PCI_COMMAND_IO) != PCI_COMMAND_IO) { cmdw |= PCI_COMMAND_IO; pci_write_config_word(pci, PCI_COMMAND, cmdw); } pci_set_master(pci); if (snd_ali_resources(codec)) { snd_ali_free(codec); return -EBUSY; } synchronize_irq(pci->irq); codec->synth.chmap = 0; codec->synth.chcnt = 0; codec->spdif_mask = 0; codec->synth.synthcount = 0; if (codec->revision == ALI_5451_V02) codec->chregs.regs.ac97read = ALI_AC97_WRITE; else codec->chregs.regs.ac97read = ALI_AC97_READ; codec->chregs.regs.ac97write = ALI_AC97_WRITE; codec->chregs.regs.start = ALI_START; codec->chregs.regs.stop = ALI_STOP; codec->chregs.regs.aint = ALI_AINT; codec->chregs.regs.ainten = ALI_AINTEN; codec->chregs.data.start = 0x00; codec->chregs.data.stop = 0x00; codec->chregs.data.aint = 0x00; codec->chregs.data.ainten = 0x00; /* M1533: southbridge */ codec->pci_m1533 = pci_get_device(0x10b9, 0x1533, NULL); if (!codec->pci_m1533) { snd_printk(KERN_ERR "ali5451: cannot find ALi 1533 chip.\n"); snd_ali_free(codec); return -ENODEV; } /* M7101: power management */ codec->pci_m7101 = pci_get_device(0x10b9, 0x7101, NULL); if (!codec->pci_m7101 && codec->revision == ALI_5451_V02) { snd_printk(KERN_ERR "ali5451: cannot find ALi 7101 chip.\n"); snd_ali_free(codec); return -ENODEV; } snd_ali_printk("snd_device_new is called.\n"); err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, codec, &ops); if (err < 0) { snd_ali_free(codec); return err; } snd_card_set_dev(card, &pci->dev); /* initialise synth voices*/ for (i = 0; i < ALI_CHANNELS; i++) codec->synth.voices[i].number = i; err = snd_ali_chip_init(codec); if (err < 0) { snd_printk(KERN_ERR "ali create: chip init error.\n"); return err; } #ifdef CONFIG_PM_SLEEP codec->image = kmalloc(sizeof(*codec->image), GFP_KERNEL); if (!codec->image) snd_printk(KERN_WARNING "can't allocate apm buffer\n"); #endif snd_ali_enable_address_interrupt(codec); codec->hw_initialized = 1; *r_ali = codec; snd_ali_printk("created.\n"); return 0; } static int snd_ali_probe(struct pci_dev *pci, const struct pci_device_id *pci_id) { struct snd_card *card; struct snd_ali *codec; int err; snd_ali_printk("probe ...\n"); err = snd_card_create(index, id, THIS_MODULE, 0, &card); if (err < 0) return err; err = snd_ali_create(card, pci, pcm_channels, spdif, &codec); if (err < 0) goto error; card->private_data = codec; snd_ali_printk("mixer building ...\n"); err = snd_ali_mixer(codec); if (err < 0) goto error; snd_ali_printk("pcm building ...\n"); err = snd_ali_build_pcms(codec); if (err < 0) goto error; snd_ali_proc_init(codec); strcpy(card->driver, "ALI5451"); strcpy(card->shortname, "ALI 5451"); sprintf(card->longname, "%s at 0x%lx, irq %i", card->shortname, codec->port, codec->irq); snd_ali_printk("register card.\n"); err = snd_card_register(card); if (err < 0) goto error; pci_set_drvdata(pci, card); return 0; error: snd_card_free(card); return err; } static void snd_ali_remove(struct pci_dev *pci) { snd_card_free(pci_get_drvdata(pci)); pci_set_drvdata(pci, NULL); } static struct pci_driver ali5451_driver = { .name = KBUILD_MODNAME, .id_table = snd_ali_ids, .probe = snd_ali_probe, .remove = snd_ali_remove, .driver = { .pm = ALI_PM_OPS, }, }; module_pci_driver(ali5451_driver);
gpl-2.0
Entropy512/I9300_N8013_Changes
arch/arm/kernel/smp_tlb.c
3245
3147
/* * linux/arch/arm/kernel/smp_tlb.c * * Copyright (C) 2002 ARM Limited, All Rights Reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/preempt.h> #include <linux/smp.h> #include <asm/smp_plat.h> #include <asm/tlbflush.h> static void on_each_cpu_mask(void (*func)(void *), void *info, int wait, const struct cpumask *mask) { preempt_disable(); smp_call_function_many(mask, func, info, wait); if (cpumask_test_cpu(smp_processor_id(), mask)) func(info); preempt_enable(); } /**********************************************************************/ /* * TLB operations */ struct tlb_args { struct vm_area_struct *ta_vma; unsigned long ta_start; unsigned long ta_end; }; static inline void ipi_flush_tlb_all(void *ignored) { local_flush_tlb_all(); } static inline void ipi_flush_tlb_mm(void *arg) { struct mm_struct *mm = (struct mm_struct *)arg; local_flush_tlb_mm(mm); } static inline void ipi_flush_tlb_page(void *arg) { struct tlb_args *ta = (struct tlb_args *)arg; local_flush_tlb_page(ta->ta_vma, ta->ta_start); } static inline void ipi_flush_tlb_kernel_page(void *arg) { struct tlb_args *ta = (struct tlb_args *)arg; local_flush_tlb_kernel_page(ta->ta_start); } static inline void ipi_flush_tlb_range(void *arg) { struct tlb_args *ta = (struct tlb_args *)arg; local_flush_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end); } static inline void ipi_flush_tlb_kernel_range(void *arg) { struct tlb_args *ta = (struct tlb_args *)arg; local_flush_tlb_kernel_range(ta->ta_start, ta->ta_end); } void flush_tlb_all(void) { if (tlb_ops_need_broadcast()) on_each_cpu(ipi_flush_tlb_all, NULL, 1); else local_flush_tlb_all(); } void flush_tlb_mm(struct mm_struct *mm) { if (tlb_ops_need_broadcast()) on_each_cpu_mask(ipi_flush_tlb_mm, mm, 1, mm_cpumask(mm)); else local_flush_tlb_mm(mm); } void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) { if (tlb_ops_need_broadcast()) { struct tlb_args ta; ta.ta_vma = vma; ta.ta_start = uaddr; on_each_cpu_mask(ipi_flush_tlb_page, &ta, 1, mm_cpumask(vma->vm_mm)); } else local_flush_tlb_page(vma, uaddr); } void flush_tlb_kernel_page(unsigned long kaddr) { if (tlb_ops_need_broadcast()) { struct tlb_args ta; ta.ta_start = kaddr; on_each_cpu(ipi_flush_tlb_kernel_page, &ta, 1); } else local_flush_tlb_kernel_page(kaddr); } void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { if (tlb_ops_need_broadcast()) { struct tlb_args ta; ta.ta_vma = vma; ta.ta_start = start; ta.ta_end = end; on_each_cpu_mask(ipi_flush_tlb_range, &ta, 1, mm_cpumask(vma->vm_mm)); } else local_flush_tlb_range(vma, start, end); } void flush_tlb_kernel_range(unsigned long start, unsigned long end) { if (tlb_ops_need_broadcast()) { struct tlb_args ta; ta.ta_start = start; ta.ta_end = end; on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1); } else local_flush_tlb_kernel_range(start, end); }
gpl-2.0
STS-Dev-Team/kernel_omap4_xt912
arch/powerpc/platforms/cell/spufs/switch.c
4525
64573
/* * spu_switch.c * * (C) Copyright IBM Corp. 2005 * * Author: Mark Nutter <mnutter@us.ibm.com> * * Host-side part of SPU context switch sequence outlined in * Synergistic Processor Element, Book IV. * * A fully premptive switch of an SPE is very expensive in terms * of time and system resources. SPE Book IV indicates that SPE * allocation should follow a "serially reusable device" model, * in which the SPE is assigned a task until it completes. When * this is not possible, this sequence may be used to premptively * save, and then later (optionally) restore the context of a * program executing on an SPE. * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/module.h> #include <linux/errno.h> #include <linux/hardirq.h> #include <linux/sched.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/vmalloc.h> #include <linux/smp.h> #include <linux/stddef.h> #include <linux/unistd.h> #include <asm/io.h> #include <asm/spu.h> #include <asm/spu_priv1.h> #include <asm/spu_csa.h> #include <asm/mmu_context.h> #include "spufs.h" #include "spu_save_dump.h" #include "spu_restore_dump.h" #if 0 #define POLL_WHILE_TRUE(_c) { \ do { \ } while (_c); \ } #else #define RELAX_SPIN_COUNT 1000 #define POLL_WHILE_TRUE(_c) { \ do { \ int _i; \ for (_i=0; _i<RELAX_SPIN_COUNT && (_c); _i++) { \ cpu_relax(); \ } \ if (unlikely(_c)) yield(); \ else break; \ } while (_c); \ } #endif /* debug */ #define POLL_WHILE_FALSE(_c) POLL_WHILE_TRUE(!(_c)) static inline void acquire_spu_lock(struct spu *spu) { /* Save, Step 1: * Restore, Step 1: * Acquire SPU-specific mutual exclusion lock. * TBD. */ } static inline void release_spu_lock(struct spu *spu) { /* Restore, Step 76: * Release SPU-specific mutual exclusion lock. * TBD. */ } static inline int check_spu_isolate(struct spu_state *csa, struct spu *spu) { struct spu_problem __iomem *prob = spu->problem; u32 isolate_state; /* Save, Step 2: * Save, Step 6: * If SPU_Status[E,L,IS] any field is '1', this * SPU is in isolate state and cannot be context * saved at this time. */ isolate_state = SPU_STATUS_ISOLATED_STATE | SPU_STATUS_ISOLATED_LOAD_STATUS | SPU_STATUS_ISOLATED_EXIT_STATUS; return (in_be32(&prob->spu_status_R) & isolate_state) ? 1 : 0; } static inline void disable_interrupts(struct spu_state *csa, struct spu *spu) { /* Save, Step 3: * Restore, Step 2: * Save INT_Mask_class0 in CSA. * Write INT_MASK_class0 with value of 0. * Save INT_Mask_class1 in CSA. * Write INT_MASK_class1 with value of 0. * Save INT_Mask_class2 in CSA. * Write INT_MASK_class2 with value of 0. * Synchronize all three interrupts to be sure * we no longer execute a handler on another CPU. */ spin_lock_irq(&spu->register_lock); if (csa) { csa->priv1.int_mask_class0_RW = spu_int_mask_get(spu, 0); csa->priv1.int_mask_class1_RW = spu_int_mask_get(spu, 1); csa->priv1.int_mask_class2_RW = spu_int_mask_get(spu, 2); } spu_int_mask_set(spu, 0, 0ul); spu_int_mask_set(spu, 1, 0ul); spu_int_mask_set(spu, 2, 0ul); eieio(); spin_unlock_irq(&spu->register_lock); /* * This flag needs to be set before calling synchronize_irq so * that the update will be visible to the relevant handlers * via a simple load. */ set_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags); clear_bit(SPU_CONTEXT_FAULT_PENDING, &spu->flags); synchronize_irq(spu->irqs[0]); synchronize_irq(spu->irqs[1]); synchronize_irq(spu->irqs[2]); } static inline void set_watchdog_timer(struct spu_state *csa, struct spu *spu) { /* Save, Step 4: * Restore, Step 25. * Set a software watchdog timer, which specifies the * maximum allowable time for a context save sequence. * * For present, this implementation will not set a global * watchdog timer, as virtualization & variable system load * may cause unpredictable execution times. */ } static inline void inhibit_user_access(struct spu_state *csa, struct spu *spu) { /* Save, Step 5: * Restore, Step 3: * Inhibit user-space access (if provided) to this * SPU by unmapping the virtual pages assigned to * the SPU memory-mapped I/O (MMIO) for problem * state. TBD. */ } static inline void set_switch_pending(struct spu_state *csa, struct spu *spu) { /* Save, Step 7: * Restore, Step 5: * Set a software context switch pending flag. * Done above in Step 3 - disable_interrupts(). */ } static inline void save_mfc_cntl(struct spu_state *csa, struct spu *spu) { struct spu_priv2 __iomem *priv2 = spu->priv2; /* Save, Step 8: * Suspend DMA and save MFC_CNTL. */ switch (in_be64(&priv2->mfc_control_RW) & MFC_CNTL_SUSPEND_DMA_STATUS_MASK) { case MFC_CNTL_SUSPEND_IN_PROGRESS: POLL_WHILE_FALSE((in_be64(&priv2->mfc_control_RW) & MFC_CNTL_SUSPEND_DMA_STATUS_MASK) == MFC_CNTL_SUSPEND_COMPLETE); /* fall through */ case MFC_CNTL_SUSPEND_COMPLETE: if (csa) csa->priv2.mfc_control_RW = in_be64(&priv2->mfc_control_RW) | MFC_CNTL_SUSPEND_DMA_QUEUE; break; case MFC_CNTL_NORMAL_DMA_QUEUE_OPERATION: out_be64(&priv2->mfc_control_RW, MFC_CNTL_SUSPEND_DMA_QUEUE); POLL_WHILE_FALSE((in_be64(&priv2->mfc_control_RW) & MFC_CNTL_SUSPEND_DMA_STATUS_MASK) == MFC_CNTL_SUSPEND_COMPLETE); if (csa) csa->priv2.mfc_control_RW = in_be64(&priv2->mfc_control_RW) & ~MFC_CNTL_SUSPEND_DMA_QUEUE & ~MFC_CNTL_SUSPEND_MASK; break; } } static inline void save_spu_runcntl(struct spu_state *csa, struct spu *spu) { struct spu_problem __iomem *prob = spu->problem; /* Save, Step 9: * Save SPU_Runcntl in the CSA. This value contains * the "Application Desired State". */ csa->prob.spu_runcntl_RW = in_be32(&prob->spu_runcntl_RW); } static inline void save_mfc_sr1(struct spu_state *csa, struct spu *spu) { /* Save, Step 10: * Save MFC_SR1 in the CSA. */ csa->priv1.mfc_sr1_RW = spu_mfc_sr1_get(spu); } static inline void save_spu_status(struct spu_state *csa, struct spu *spu) { struct spu_problem __iomem *prob = spu->problem; /* Save, Step 11: * Read SPU_Status[R], and save to CSA. */ if ((in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING) == 0) { csa->prob.spu_status_R = in_be32(&prob->spu_status_R); } else { u32 stopped; out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP); eieio(); POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING); stopped = SPU_STATUS_INVALID_INSTR | SPU_STATUS_SINGLE_STEP | SPU_STATUS_STOPPED_BY_HALT | SPU_STATUS_STOPPED_BY_STOP; if ((in_be32(&prob->spu_status_R) & stopped) == 0) csa->prob.spu_status_R = SPU_STATUS_RUNNING; else csa->prob.spu_status_R = in_be32(&prob->spu_status_R); } } static inline void save_mfc_stopped_status(struct spu_state *csa, struct spu *spu) { struct spu_priv2 __iomem *priv2 = spu->priv2; const u64 mask = MFC_CNTL_DECREMENTER_RUNNING | MFC_CNTL_DMA_QUEUES_EMPTY; /* Save, Step 12: * Read MFC_CNTL[Ds]. Update saved copy of * CSA.MFC_CNTL[Ds]. * * update: do the same with MFC_CNTL[Q]. */ csa->priv2.mfc_control_RW &= ~mask; csa->priv2.mfc_control_RW |= in_be64(&priv2->mfc_control_RW) & mask; } static inline void halt_mfc_decr(struct spu_state *csa, struct spu *spu) { struct spu_priv2 __iomem *priv2 = spu->priv2; /* Save, Step 13: * Write MFC_CNTL[Dh] set to a '1' to halt * the decrementer. */ out_be64(&priv2->mfc_control_RW, MFC_CNTL_DECREMENTER_HALTED | MFC_CNTL_SUSPEND_MASK); eieio(); } static inline void save_timebase(struct spu_state *csa, struct spu *spu) { /* Save, Step 14: * Read PPE Timebase High and Timebase low registers * and save in CSA. TBD. */ csa->suspend_time = get_cycles(); } static inline void remove_other_spu_access(struct spu_state *csa, struct spu *spu) { /* Save, Step 15: * Remove other SPU access to this SPU by unmapping * this SPU's pages from their address space. TBD. */ } static inline void do_mfc_mssync(struct spu_state *csa, struct spu *spu) { struct spu_problem __iomem *prob = spu->problem; /* Save, Step 16: * Restore, Step 11. * Write SPU_MSSync register. Poll SPU_MSSync[P] * for a value of 0. */ out_be64(&prob->spc_mssync_RW, 1UL); POLL_WHILE_TRUE(in_be64(&prob->spc_mssync_RW) & MS_SYNC_PENDING); } static inline void issue_mfc_tlbie(struct spu_state *csa, struct spu *spu) { /* Save, Step 17: * Restore, Step 12. * Restore, Step 48. * Write TLB_Invalidate_Entry[IS,VPN,L,Lp]=0 register. * Then issue a PPE sync instruction. */ spu_tlb_invalidate(spu); mb(); } static inline void handle_pending_interrupts(struct spu_state *csa, struct spu *spu) { /* Save, Step 18: * Handle any pending interrupts from this SPU * here. This is OS or hypervisor specific. One * option is to re-enable interrupts to handle any * pending interrupts, with the interrupt handlers * recognizing the software Context Switch Pending * flag, to ensure the SPU execution or MFC command * queue is not restarted. TBD. */ } static inline void save_mfc_queues(struct spu_state *csa, struct spu *spu) { struct spu_priv2 __iomem *priv2 = spu->priv2; int i; /* Save, Step 19: * If MFC_Cntl[Se]=0 then save * MFC command queues. */ if ((in_be64(&priv2->mfc_control_RW) & MFC_CNTL_DMA_QUEUES_EMPTY) == 0) { for (i = 0; i < 8; i++) { csa->priv2.puq[i].mfc_cq_data0_RW = in_be64(&priv2->puq[i].mfc_cq_data0_RW); csa->priv2.puq[i].mfc_cq_data1_RW = in_be64(&priv2->puq[i].mfc_cq_data1_RW); csa->priv2.puq[i].mfc_cq_data2_RW = in_be64(&priv2->puq[i].mfc_cq_data2_RW); csa->priv2.puq[i].mfc_cq_data3_RW = in_be64(&priv2->puq[i].mfc_cq_data3_RW); } for (i = 0; i < 16; i++) { csa->priv2.spuq[i].mfc_cq_data0_RW = in_be64(&priv2->spuq[i].mfc_cq_data0_RW); csa->priv2.spuq[i].mfc_cq_data1_RW = in_be64(&priv2->spuq[i].mfc_cq_data1_RW); csa->priv2.spuq[i].mfc_cq_data2_RW = in_be64(&priv2->spuq[i].mfc_cq_data2_RW); csa->priv2.spuq[i].mfc_cq_data3_RW = in_be64(&priv2->spuq[i].mfc_cq_data3_RW); } } } static inline void save_ppu_querymask(struct spu_state *csa, struct spu *spu) { struct spu_problem __iomem *prob = spu->problem; /* Save, Step 20: * Save the PPU_QueryMask register * in the CSA. */ csa->prob.dma_querymask_RW = in_be32(&prob->dma_querymask_RW); } static inline void save_ppu_querytype(struct spu_state *csa, struct spu *spu) { struct spu_problem __iomem *prob = spu->problem; /* Save, Step 21: * Save the PPU_QueryType register * in the CSA. */ csa->prob.dma_querytype_RW = in_be32(&prob->dma_querytype_RW); } static inline void save_ppu_tagstatus(struct spu_state *csa, struct spu *spu) { struct spu_problem __iomem *prob = spu->problem; /* Save the Prxy_TagStatus register in the CSA. * * It is unnecessary to restore dma_tagstatus_R, however, * dma_tagstatus_R in the CSA is accessed via backing_ops, so * we must save it. */ csa->prob.dma_tagstatus_R = in_be32(&prob->dma_tagstatus_R); } static inline void save_mfc_csr_tsq(struct spu_state *csa, struct spu *spu) { struct spu_priv2 __iomem *priv2 = spu->priv2; /* Save, Step 22: * Save the MFC_CSR_TSQ register * in the LSCSA. */ csa->priv2.spu_tag_status_query_RW = in_be64(&priv2->spu_tag_status_query_RW); } static inline void save_mfc_csr_cmd(struct spu_state *csa, struct spu *spu) { struct spu_priv2 __iomem *priv2 = spu->priv2; /* Save, Step 23: * Save the MFC_CSR_CMD1 and MFC_CSR_CMD2 * registers in the CSA. */ csa->priv2.spu_cmd_buf1_RW = in_be64(&priv2->spu_cmd_buf1_RW); csa->priv2.spu_cmd_buf2_RW = in_be64(&priv2->spu_cmd_buf2_RW); } static inline void save_mfc_csr_ato(struct spu_state *csa, struct spu *spu) { struct spu_priv2 __iomem *priv2 = spu->priv2; /* Save, Step 24: * Save the MFC_CSR_ATO register in * the CSA. */ csa->priv2.spu_atomic_status_RW = in_be64(&priv2->spu_atomic_status_RW); } static inline void save_mfc_tclass_id(struct spu_state *csa, struct spu *spu) { /* Save, Step 25: * Save the MFC_TCLASS_ID register in * the CSA. */ csa->priv1.mfc_tclass_id_RW = spu_mfc_tclass_id_get(spu); } static inline void set_mfc_tclass_id(struct spu_state *csa, struct spu *spu) { /* Save, Step 26: * Restore, Step 23. * Write the MFC_TCLASS_ID register with * the value 0x10000000. */ spu_mfc_tclass_id_set(spu, 0x10000000); eieio(); } static inline void purge_mfc_queue(struct spu_state *csa, struct spu *spu) { struct spu_priv2 __iomem *priv2 = spu->priv2; /* Save, Step 27: * Restore, Step 14. * Write MFC_CNTL[Pc]=1 (purge queue). */ out_be64(&priv2->mfc_control_RW, MFC_CNTL_PURGE_DMA_REQUEST | MFC_CNTL_SUSPEND_MASK); eieio(); } static inline void wait_purge_complete(struct spu_state *csa, struct spu *spu) { struct spu_priv2 __iomem *priv2 = spu->priv2; /* Save, Step 28: * Poll MFC_CNTL[Ps] until value '11' is read * (purge complete). */ POLL_WHILE_FALSE((in_be64(&priv2->mfc_control_RW) & MFC_CNTL_PURGE_DMA_STATUS_MASK) == MFC_CNTL_PURGE_DMA_COMPLETE); } static inline void setup_mfc_sr1(struct spu_state *csa, struct spu *spu) { /* Save, Step 30: * Restore, Step 18: * Write MFC_SR1 with MFC_SR1[D=0,S=1] and * MFC_SR1[TL,R,Pr,T] set correctly for the * OS specific environment. * * Implementation note: The SPU-side code * for save/restore is privileged, so the * MFC_SR1[Pr] bit is not set. * */ spu_mfc_sr1_set(spu, (MFC_STATE1_MASTER_RUN_CONTROL_MASK | MFC_STATE1_RELOCATE_MASK | MFC_STATE1_BUS_TLBIE_MASK)); } static inline void save_spu_npc(struct spu_state *csa, struct spu *spu) { struct spu_problem __iomem *prob = spu->problem; /* Save, Step 31: * Save SPU_NPC in the CSA. */ csa->prob.spu_npc_RW = in_be32(&prob->spu_npc_RW); } static inline void save_spu_privcntl(struct spu_state *csa, struct spu *spu) { struct spu_priv2 __iomem *priv2 = spu->priv2; /* Save, Step 32: * Save SPU_PrivCntl in the CSA. */ csa->priv2.spu_privcntl_RW = in_be64(&priv2->spu_privcntl_RW); } static inline void reset_spu_privcntl(struct spu_state *csa, struct spu *spu) { struct spu_priv2 __iomem *priv2 = spu->priv2; /* Save, Step 33: * Restore, Step 16: * Write SPU_PrivCntl[S,Le,A] fields reset to 0. */ out_be64(&priv2->spu_privcntl_RW, 0UL); eieio(); } static inline void save_spu_lslr(struct spu_state *csa, struct spu *spu) { struct spu_priv2 __iomem *priv2 = spu->priv2; /* Save, Step 34: * Save SPU_LSLR in the CSA. */ csa->priv2.spu_lslr_RW = in_be64(&priv2->spu_lslr_RW); } static inline void reset_spu_lslr(struct spu_state *csa, struct spu *spu) { struct spu_priv2 __iomem *priv2 = spu->priv2; /* Save, Step 35: * Restore, Step 17. * Reset SPU_LSLR. */ out_be64(&priv2->spu_lslr_RW, LS_ADDR_MASK); eieio(); } static inline void save_spu_cfg(struct spu_state *csa, struct spu *spu) { struct spu_priv2 __iomem *priv2 = spu->priv2; /* Save, Step 36: * Save SPU_Cfg in the CSA. */ csa->priv2.spu_cfg_RW = in_be64(&priv2->spu_cfg_RW); } static inline void save_pm_trace(struct spu_state *csa, struct spu *spu) { /* Save, Step 37: * Save PM_Trace_Tag_Wait_Mask in the CSA. * Not performed by this implementation. */ } static inline void save_mfc_rag(struct spu_state *csa, struct spu *spu) { /* Save, Step 38: * Save RA_GROUP_ID register and the * RA_ENABLE reigster in the CSA. */ csa->priv1.resource_allocation_groupID_RW = spu_resource_allocation_groupID_get(spu); csa->priv1.resource_allocation_enable_RW = spu_resource_allocation_enable_get(spu); } static inline void save_ppu_mb_stat(struct spu_state *csa, struct spu *spu) { struct spu_problem __iomem *prob = spu->problem; /* Save, Step 39: * Save MB_Stat register in the CSA. */ csa->prob.mb_stat_R = in_be32(&prob->mb_stat_R); } static inline void save_ppu_mb(struct spu_state *csa, struct spu *spu) { struct spu_problem __iomem *prob = spu->problem; /* Save, Step 40: * Save the PPU_MB register in the CSA. */ csa->prob.pu_mb_R = in_be32(&prob->pu_mb_R); } static inline void save_ppuint_mb(struct spu_state *csa, struct spu *spu) { struct spu_priv2 __iomem *priv2 = spu->priv2; /* Save, Step 41: * Save the PPUINT_MB register in the CSA. */ csa->priv2.puint_mb_R = in_be64(&priv2->puint_mb_R); } static inline void save_ch_part1(struct spu_state *csa, struct spu *spu) { struct spu_priv2 __iomem *priv2 = spu->priv2; u64 idx, ch_indices[] = { 0UL, 3UL, 4UL, 24UL, 25UL, 27UL }; int i; /* Save, Step 42: */ /* Save CH 1, without channel count */ out_be64(&priv2->spu_chnlcntptr_RW, 1); csa->spu_chnldata_RW[1] = in_be64(&priv2->spu_chnldata_RW); /* Save the following CH: [0,3,4,24,25,27] */ for (i = 0; i < ARRAY_SIZE(ch_indices); i++) { idx = ch_indices[i]; out_be64(&priv2->spu_chnlcntptr_RW, idx); eieio(); csa->spu_chnldata_RW[idx] = in_be64(&priv2->spu_chnldata_RW); csa->spu_chnlcnt_RW[idx] = in_be64(&priv2->spu_chnlcnt_RW); out_be64(&priv2->spu_chnldata_RW, 0UL); out_be64(&priv2->spu_chnlcnt_RW, 0UL); eieio(); } } static inline void save_spu_mb(struct spu_state *csa, struct spu *spu) { struct spu_priv2 __iomem *priv2 = spu->priv2; int i; /* Save, Step 43: * Save SPU Read Mailbox Channel. */ out_be64(&priv2->spu_chnlcntptr_RW, 29UL); eieio(); csa->spu_chnlcnt_RW[29] = in_be64(&priv2->spu_chnlcnt_RW); for (i = 0; i < 4; i++) { csa->spu_mailbox_data[i] = in_be64(&priv2->spu_chnldata_RW); } out_be64(&priv2->spu_chnlcnt_RW, 0UL); eieio(); } static inline void save_mfc_cmd(struct spu_state *csa, struct spu *spu) { struct spu_priv2 __iomem *priv2 = spu->priv2; /* Save, Step 44: * Save MFC_CMD Channel. */ out_be64(&priv2->spu_chnlcntptr_RW, 21UL); eieio(); csa->spu_chnlcnt_RW[21] = in_be64(&priv2->spu_chnlcnt_RW); eieio(); } static inline void reset_ch(struct spu_state *csa, struct spu *spu) { struct spu_priv2 __iomem *priv2 = spu->priv2; u64 ch_indices[4] = { 21UL, 23UL, 28UL, 30UL }; u64 ch_counts[4] = { 16UL, 1UL, 1UL, 1UL }; u64 idx; int i; /* Save, Step 45: * Reset the following CH: [21, 23, 28, 30] */ for (i = 0; i < 4; i++) { idx = ch_indices[i]; out_be64(&priv2->spu_chnlcntptr_RW, idx); eieio(); out_be64(&priv2->spu_chnlcnt_RW, ch_counts[i]); eieio(); } } static inline void resume_mfc_queue(struct spu_state *csa, struct spu *spu) { struct spu_priv2 __iomem *priv2 = spu->priv2; /* Save, Step 46: * Restore, Step 25. * Write MFC_CNTL[Sc]=0 (resume queue processing). */ out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESUME_DMA_QUEUE); } static inline void setup_mfc_slbs(struct spu_state *csa, struct spu *spu, unsigned int *code, int code_size) { /* Save, Step 47: * Restore, Step 30. * If MFC_SR1[R]=1, write 0 to SLB_Invalidate_All * register, then initialize SLB_VSID and SLB_ESID * to provide access to SPU context save code and * LSCSA. * * This implementation places both the context * switch code and LSCSA in kernel address space. * * Further this implementation assumes that the * MFC_SR1[R]=1 (in other words, assume that * translation is desired by OS environment). */ spu_invalidate_slbs(spu); spu_setup_kernel_slbs(spu, csa->lscsa, code, code_size); } static inline void set_switch_active(struct spu_state *csa, struct spu *spu) { /* Save, Step 48: * Restore, Step 23. * Change the software context switch pending flag * to context switch active. This implementation does * not uses a switch active flag. * * Now that we have saved the mfc in the csa, we can add in the * restart command if an exception occurred. */ if (test_bit(SPU_CONTEXT_FAULT_PENDING, &spu->flags)) csa->priv2.mfc_control_RW |= MFC_CNTL_RESTART_DMA_COMMAND; clear_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags); mb(); } static inline void enable_interrupts(struct spu_state *csa, struct spu *spu) { unsigned long class1_mask = CLASS1_ENABLE_SEGMENT_FAULT_INTR | CLASS1_ENABLE_STORAGE_FAULT_INTR; /* Save, Step 49: * Restore, Step 22: * Reset and then enable interrupts, as * needed by OS. * * This implementation enables only class1 * (translation) interrupts. */ spin_lock_irq(&spu->register_lock); spu_int_stat_clear(spu, 0, CLASS0_INTR_MASK); spu_int_stat_clear(spu, 1, CLASS1_INTR_MASK); spu_int_stat_clear(spu, 2, CLASS2_INTR_MASK); spu_int_mask_set(spu, 0, 0ul); spu_int_mask_set(spu, 1, class1_mask); spu_int_mask_set(spu, 2, 0ul); spin_unlock_irq(&spu->register_lock); } static inline int send_mfc_dma(struct spu *spu, unsigned long ea, unsigned int ls_offset, unsigned int size, unsigned int tag, unsigned int rclass, unsigned int cmd) { struct spu_problem __iomem *prob = spu->problem; union mfc_tag_size_class_cmd command; unsigned int transfer_size; volatile unsigned int status = 0x0; while (size > 0) { transfer_size = (size > MFC_MAX_DMA_SIZE) ? MFC_MAX_DMA_SIZE : size; command.u.mfc_size = transfer_size; command.u.mfc_tag = tag; command.u.mfc_rclassid = rclass; command.u.mfc_cmd = cmd; do { out_be32(&prob->mfc_lsa_W, ls_offset); out_be64(&prob->mfc_ea_W, ea); out_be64(&prob->mfc_union_W.all64, command.all64); status = in_be32(&prob->mfc_union_W.by32.mfc_class_cmd32); if (unlikely(status & 0x2)) { cpu_relax(); } } while (status & 0x3); size -= transfer_size; ea += transfer_size; ls_offset += transfer_size; } return 0; } static inline void save_ls_16kb(struct spu_state *csa, struct spu *spu) { unsigned long addr = (unsigned long)&csa->lscsa->ls[0]; unsigned int ls_offset = 0x0; unsigned int size = 16384; unsigned int tag = 0; unsigned int rclass = 0; unsigned int cmd = MFC_PUT_CMD; /* Save, Step 50: * Issue a DMA command to copy the first 16K bytes * of local storage to the CSA. */ send_mfc_dma(spu, addr, ls_offset, size, tag, rclass, cmd); } static inline void set_spu_npc(struct spu_state *csa, struct spu *spu) { struct spu_problem __iomem *prob = spu->problem; /* Save, Step 51: * Restore, Step 31. * Write SPU_NPC[IE]=0 and SPU_NPC[LSA] to entry * point address of context save code in local * storage. * * This implementation uses SPU-side save/restore * programs with entry points at LSA of 0. */ out_be32(&prob->spu_npc_RW, 0); eieio(); } static inline void set_signot1(struct spu_state *csa, struct spu *spu) { struct spu_problem __iomem *prob = spu->problem; union { u64 ull; u32 ui[2]; } addr64; /* Save, Step 52: * Restore, Step 32: * Write SPU_Sig_Notify_1 register with upper 32-bits * of the CSA.LSCSA effective address. */ addr64.ull = (u64) csa->lscsa; out_be32(&prob->signal_notify1, addr64.ui[0]); eieio(); } static inline void set_signot2(struct spu_state *csa, struct spu *spu) { struct spu_problem __iomem *prob = spu->problem; union { u64 ull; u32 ui[2]; } addr64; /* Save, Step 53: * Restore, Step 33: * Write SPU_Sig_Notify_2 register with lower 32-bits * of the CSA.LSCSA effective address. */ addr64.ull = (u64) csa->lscsa; out_be32(&prob->signal_notify2, addr64.ui[1]); eieio(); } static inline void send_save_code(struct spu_state *csa, struct spu *spu) { unsigned long addr = (unsigned long)&spu_save_code[0]; unsigned int ls_offset = 0x0; unsigned int size = sizeof(spu_save_code); unsigned int tag = 0; unsigned int rclass = 0; unsigned int cmd = MFC_GETFS_CMD; /* Save, Step 54: * Issue a DMA command to copy context save code * to local storage and start SPU. */ send_mfc_dma(spu, addr, ls_offset, size, tag, rclass, cmd); } static inline void set_ppu_querymask(struct spu_state *csa, struct spu *spu) { struct spu_problem __iomem *prob = spu->problem; /* Save, Step 55: * Restore, Step 38. * Write PPU_QueryMask=1 (enable Tag Group 0) * and issue eieio instruction. */ out_be32(&prob->dma_querymask_RW, MFC_TAGID_TO_TAGMASK(0)); eieio(); } static inline void wait_tag_complete(struct spu_state *csa, struct spu *spu) { struct spu_problem __iomem *prob = spu->problem; u32 mask = MFC_TAGID_TO_TAGMASK(0); unsigned long flags; /* Save, Step 56: * Restore, Step 39. * Restore, Step 39. * Restore, Step 46. * Poll PPU_TagStatus[gn] until 01 (Tag group 0 complete) * or write PPU_QueryType[TS]=01 and wait for Tag Group * Complete Interrupt. Write INT_Stat_Class0 or * INT_Stat_Class2 with value of 'handled'. */ POLL_WHILE_FALSE(in_be32(&prob->dma_tagstatus_R) & mask); local_irq_save(flags); spu_int_stat_clear(spu, 0, CLASS0_INTR_MASK); spu_int_stat_clear(spu, 2, CLASS2_INTR_MASK); local_irq_restore(flags); } static inline void wait_spu_stopped(struct spu_state *csa, struct spu *spu) { struct spu_problem __iomem *prob = spu->problem; unsigned long flags; /* Save, Step 57: * Restore, Step 40. * Poll until SPU_Status[R]=0 or wait for SPU Class 0 * or SPU Class 2 interrupt. Write INT_Stat_class0 * or INT_Stat_class2 with value of handled. */ POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING); local_irq_save(flags); spu_int_stat_clear(spu, 0, CLASS0_INTR_MASK); spu_int_stat_clear(spu, 2, CLASS2_INTR_MASK); local_irq_restore(flags); } static inline int check_save_status(struct spu_state *csa, struct spu *spu) { struct spu_problem __iomem *prob = spu->problem; u32 complete; /* Save, Step 54: * If SPU_Status[P]=1 and SPU_Status[SC] = "success", * context save succeeded, otherwise context save * failed. */ complete = ((SPU_SAVE_COMPLETE << SPU_STOP_STATUS_SHIFT) | SPU_STATUS_STOPPED_BY_STOP); return (in_be32(&prob->spu_status_R) != complete) ? 1 : 0; } static inline void terminate_spu_app(struct spu_state *csa, struct spu *spu) { /* Restore, Step 4: * If required, notify the "using application" that * the SPU task has been terminated. TBD. */ } static inline void suspend_mfc_and_halt_decr(struct spu_state *csa, struct spu *spu) { struct spu_priv2 __iomem *priv2 = spu->priv2; /* Restore, Step 7: * Write MFC_Cntl[Dh,Sc,Sm]='1','1','0' to suspend * the queue and halt the decrementer. */ out_be64(&priv2->mfc_control_RW, MFC_CNTL_SUSPEND_DMA_QUEUE | MFC_CNTL_DECREMENTER_HALTED); eieio(); } static inline void wait_suspend_mfc_complete(struct spu_state *csa, struct spu *spu) { struct spu_priv2 __iomem *priv2 = spu->priv2; /* Restore, Step 8: * Restore, Step 47. * Poll MFC_CNTL[Ss] until 11 is returned. */ POLL_WHILE_FALSE((in_be64(&priv2->mfc_control_RW) & MFC_CNTL_SUSPEND_DMA_STATUS_MASK) == MFC_CNTL_SUSPEND_COMPLETE); } static inline int suspend_spe(struct spu_state *csa, struct spu *spu) { struct spu_problem __iomem *prob = spu->problem; /* Restore, Step 9: * If SPU_Status[R]=1, stop SPU execution * and wait for stop to complete. * * Returns 1 if SPU_Status[R]=1 on entry. * 0 otherwise */ if (in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING) { if (in_be32(&prob->spu_status_R) & SPU_STATUS_ISOLATED_EXIT_STATUS) { POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING); } if ((in_be32(&prob->spu_status_R) & SPU_STATUS_ISOLATED_LOAD_STATUS) || (in_be32(&prob->spu_status_R) & SPU_STATUS_ISOLATED_STATE)) { out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP); eieio(); POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING); out_be32(&prob->spu_runcntl_RW, 0x2); eieio(); POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING); } if (in_be32(&prob->spu_status_R) & SPU_STATUS_WAITING_FOR_CHANNEL) { out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP); eieio(); POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING); } return 1; } return 0; } static inline void clear_spu_status(struct spu_state *csa, struct spu *spu) { struct spu_problem __iomem *prob = spu->problem; /* Restore, Step 10: * If SPU_Status[R]=0 and SPU_Status[E,L,IS]=1, * release SPU from isolate state. */ if (!(in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING)) { if (in_be32(&prob->spu_status_R) & SPU_STATUS_ISOLATED_EXIT_STATUS) { spu_mfc_sr1_set(spu, MFC_STATE1_MASTER_RUN_CONTROL_MASK); eieio(); out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_RUNNABLE); eieio(); POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING); } if ((in_be32(&prob->spu_status_R) & SPU_STATUS_ISOLATED_LOAD_STATUS) || (in_be32(&prob->spu_status_R) & SPU_STATUS_ISOLATED_STATE)) { spu_mfc_sr1_set(spu, MFC_STATE1_MASTER_RUN_CONTROL_MASK); eieio(); out_be32(&prob->spu_runcntl_RW, 0x2); eieio(); POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING); } } } static inline void reset_ch_part1(struct spu_state *csa, struct spu *spu) { struct spu_priv2 __iomem *priv2 = spu->priv2; u64 ch_indices[] = { 0UL, 3UL, 4UL, 24UL, 25UL, 27UL }; u64 idx; int i; /* Restore, Step 20: */ /* Reset CH 1 */ out_be64(&priv2->spu_chnlcntptr_RW, 1); out_be64(&priv2->spu_chnldata_RW, 0UL); /* Reset the following CH: [0,3,4,24,25,27] */ for (i = 0; i < ARRAY_SIZE(ch_indices); i++) { idx = ch_indices[i]; out_be64(&priv2->spu_chnlcntptr_RW, idx); eieio(); out_be64(&priv2->spu_chnldata_RW, 0UL); out_be64(&priv2->spu_chnlcnt_RW, 0UL); eieio(); } } static inline void reset_ch_part2(struct spu_state *csa, struct spu *spu) { struct spu_priv2 __iomem *priv2 = spu->priv2; u64 ch_indices[5] = { 21UL, 23UL, 28UL, 29UL, 30UL }; u64 ch_counts[5] = { 16UL, 1UL, 1UL, 0UL, 1UL }; u64 idx; int i; /* Restore, Step 21: * Reset the following CH: [21, 23, 28, 29, 30] */ for (i = 0; i < 5; i++) { idx = ch_indices[i]; out_be64(&priv2->spu_chnlcntptr_RW, idx); eieio(); out_be64(&priv2->spu_chnlcnt_RW, ch_counts[i]); eieio(); } } static inline void setup_spu_status_part1(struct spu_state *csa, struct spu *spu) { u32 status_P = SPU_STATUS_STOPPED_BY_STOP; u32 status_I = SPU_STATUS_INVALID_INSTR; u32 status_H = SPU_STATUS_STOPPED_BY_HALT; u32 status_S = SPU_STATUS_SINGLE_STEP; u32 status_S_I = SPU_STATUS_SINGLE_STEP | SPU_STATUS_INVALID_INSTR; u32 status_S_P = SPU_STATUS_SINGLE_STEP | SPU_STATUS_STOPPED_BY_STOP; u32 status_P_H = SPU_STATUS_STOPPED_BY_HALT |SPU_STATUS_STOPPED_BY_STOP; u32 status_P_I = SPU_STATUS_STOPPED_BY_STOP |SPU_STATUS_INVALID_INSTR; u32 status_code; /* Restore, Step 27: * If the CSA.SPU_Status[I,S,H,P]=1 then add the correct * instruction sequence to the end of the SPU based restore * code (after the "context restored" stop and signal) to * restore the correct SPU status. * * NOTE: Rather than modifying the SPU executable, we * instead add a new 'stopped_status' field to the * LSCSA. The SPU-side restore reads this field and * takes the appropriate action when exiting. */ status_code = (csa->prob.spu_status_R >> SPU_STOP_STATUS_SHIFT) & 0xFFFF; if ((csa->prob.spu_status_R & status_P_I) == status_P_I) { /* SPU_Status[P,I]=1 - Illegal Instruction followed * by Stop and Signal instruction, followed by 'br -4'. * */ csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_P_I; csa->lscsa->stopped_status.slot[1] = status_code; } else if ((csa->prob.spu_status_R & status_P_H) == status_P_H) { /* SPU_Status[P,H]=1 - Halt Conditional, followed * by Stop and Signal instruction, followed by * 'br -4'. */ csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_P_H; csa->lscsa->stopped_status.slot[1] = status_code; } else if ((csa->prob.spu_status_R & status_S_P) == status_S_P) { /* SPU_Status[S,P]=1 - Stop and Signal instruction * followed by 'br -4'. */ csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_S_P; csa->lscsa->stopped_status.slot[1] = status_code; } else if ((csa->prob.spu_status_R & status_S_I) == status_S_I) { /* SPU_Status[S,I]=1 - Illegal instruction followed * by 'br -4'. */ csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_S_I; csa->lscsa->stopped_status.slot[1] = status_code; } else if ((csa->prob.spu_status_R & status_P) == status_P) { /* SPU_Status[P]=1 - Stop and Signal instruction * followed by 'br -4'. */ csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_P; csa->lscsa->stopped_status.slot[1] = status_code; } else if ((csa->prob.spu_status_R & status_H) == status_H) { /* SPU_Status[H]=1 - Halt Conditional, followed * by 'br -4'. */ csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_H; } else if ((csa->prob.spu_status_R & status_S) == status_S) { /* SPU_Status[S]=1 - Two nop instructions. */ csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_S; } else if ((csa->prob.spu_status_R & status_I) == status_I) { /* SPU_Status[I]=1 - Illegal instruction followed * by 'br -4'. */ csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_I; } } static inline void setup_spu_status_part2(struct spu_state *csa, struct spu *spu) { u32 mask; /* Restore, Step 28: * If the CSA.SPU_Status[I,S,H,P,R]=0 then * add a 'br *' instruction to the end of * the SPU based restore code. * * NOTE: Rather than modifying the SPU executable, we * instead add a new 'stopped_status' field to the * LSCSA. The SPU-side restore reads this field and * takes the appropriate action when exiting. */ mask = SPU_STATUS_INVALID_INSTR | SPU_STATUS_SINGLE_STEP | SPU_STATUS_STOPPED_BY_HALT | SPU_STATUS_STOPPED_BY_STOP | SPU_STATUS_RUNNING; if (!(csa->prob.spu_status_R & mask)) { csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_R; } } static inline void restore_mfc_rag(struct spu_state *csa, struct spu *spu) { /* Restore, Step 29: * Restore RA_GROUP_ID register and the * RA_ENABLE reigster from the CSA. */ spu_resource_allocation_groupID_set(spu, csa->priv1.resource_allocation_groupID_RW); spu_resource_allocation_enable_set(spu, csa->priv1.resource_allocation_enable_RW); } static inline void send_restore_code(struct spu_state *csa, struct spu *spu) { unsigned long addr = (unsigned long)&spu_restore_code[0]; unsigned int ls_offset = 0x0; unsigned int size = sizeof(spu_restore_code); unsigned int tag = 0; unsigned int rclass = 0; unsigned int cmd = MFC_GETFS_CMD; /* Restore, Step 37: * Issue MFC DMA command to copy context * restore code to local storage. */ send_mfc_dma(spu, addr, ls_offset, size, tag, rclass, cmd); } static inline void setup_decr(struct spu_state *csa, struct spu *spu) { /* Restore, Step 34: * If CSA.MFC_CNTL[Ds]=1 (decrementer was * running) then adjust decrementer, set * decrementer running status in LSCSA, * and set decrementer "wrapped" status * in LSCSA. */ if (csa->priv2.mfc_control_RW & MFC_CNTL_DECREMENTER_RUNNING) { cycles_t resume_time = get_cycles(); cycles_t delta_time = resume_time - csa->suspend_time; csa->lscsa->decr_status.slot[0] = SPU_DECR_STATUS_RUNNING; if (csa->lscsa->decr.slot[0] < delta_time) { csa->lscsa->decr_status.slot[0] |= SPU_DECR_STATUS_WRAPPED; } csa->lscsa->decr.slot[0] -= delta_time; } else { csa->lscsa->decr_status.slot[0] = 0; } } static inline void setup_ppu_mb(struct spu_state *csa, struct spu *spu) { /* Restore, Step 35: * Copy the CSA.PU_MB data into the LSCSA. */ csa->lscsa->ppu_mb.slot[0] = csa->prob.pu_mb_R; } static inline void setup_ppuint_mb(struct spu_state *csa, struct spu *spu) { /* Restore, Step 36: * Copy the CSA.PUINT_MB data into the LSCSA. */ csa->lscsa->ppuint_mb.slot[0] = csa->priv2.puint_mb_R; } static inline int check_restore_status(struct spu_state *csa, struct spu *spu) { struct spu_problem __iomem *prob = spu->problem; u32 complete; /* Restore, Step 40: * If SPU_Status[P]=1 and SPU_Status[SC] = "success", * context restore succeeded, otherwise context restore * failed. */ complete = ((SPU_RESTORE_COMPLETE << SPU_STOP_STATUS_SHIFT) | SPU_STATUS_STOPPED_BY_STOP); return (in_be32(&prob->spu_status_R) != complete) ? 1 : 0; } static inline void restore_spu_privcntl(struct spu_state *csa, struct spu *spu) { struct spu_priv2 __iomem *priv2 = spu->priv2; /* Restore, Step 41: * Restore SPU_PrivCntl from the CSA. */ out_be64(&priv2->spu_privcntl_RW, csa->priv2.spu_privcntl_RW); eieio(); } static inline void restore_status_part1(struct spu_state *csa, struct spu *spu) { struct spu_problem __iomem *prob = spu->problem; u32 mask; /* Restore, Step 42: * If any CSA.SPU_Status[I,S,H,P]=1, then * restore the error or single step state. */ mask = SPU_STATUS_INVALID_INSTR | SPU_STATUS_SINGLE_STEP | SPU_STATUS_STOPPED_BY_HALT | SPU_STATUS_STOPPED_BY_STOP; if (csa->prob.spu_status_R & mask) { out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_RUNNABLE); eieio(); POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING); } } static inline void restore_status_part2(struct spu_state *csa, struct spu *spu) { struct spu_problem __iomem *prob = spu->problem; u32 mask; /* Restore, Step 43: * If all CSA.SPU_Status[I,S,H,P,R]=0 then write * SPU_RunCntl[R0R1]='01', wait for SPU_Status[R]=1, * then write '00' to SPU_RunCntl[R0R1] and wait * for SPU_Status[R]=0. */ mask = SPU_STATUS_INVALID_INSTR | SPU_STATUS_SINGLE_STEP | SPU_STATUS_STOPPED_BY_HALT | SPU_STATUS_STOPPED_BY_STOP | SPU_STATUS_RUNNING; if (!(csa->prob.spu_status_R & mask)) { out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_RUNNABLE); eieio(); POLL_WHILE_FALSE(in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING); out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP); eieio(); POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING); } } static inline void restore_ls_16kb(struct spu_state *csa, struct spu *spu) { unsigned long addr = (unsigned long)&csa->lscsa->ls[0]; unsigned int ls_offset = 0x0; unsigned int size = 16384; unsigned int tag = 0; unsigned int rclass = 0; unsigned int cmd = MFC_GET_CMD; /* Restore, Step 44: * Issue a DMA command to restore the first * 16kb of local storage from CSA. */ send_mfc_dma(spu, addr, ls_offset, size, tag, rclass, cmd); } static inline void suspend_mfc(struct spu_state *csa, struct spu *spu) { struct spu_priv2 __iomem *priv2 = spu->priv2; /* Restore, Step 47. * Write MFC_Cntl[Sc,Sm]='1','0' to suspend * the queue. */ out_be64(&priv2->mfc_control_RW, MFC_CNTL_SUSPEND_DMA_QUEUE); eieio(); } static inline void clear_interrupts(struct spu_state *csa, struct spu *spu) { /* Restore, Step 49: * Write INT_MASK_class0 with value of 0. * Write INT_MASK_class1 with value of 0. * Write INT_MASK_class2 with value of 0. * Write INT_STAT_class0 with value of -1. * Write INT_STAT_class1 with value of -1. * Write INT_STAT_class2 with value of -1. */ spin_lock_irq(&spu->register_lock); spu_int_mask_set(spu, 0, 0ul); spu_int_mask_set(spu, 1, 0ul); spu_int_mask_set(spu, 2, 0ul); spu_int_stat_clear(spu, 0, CLASS0_INTR_MASK); spu_int_stat_clear(spu, 1, CLASS1_INTR_MASK); spu_int_stat_clear(spu, 2, CLASS2_INTR_MASK); spin_unlock_irq(&spu->register_lock); } static inline void restore_mfc_queues(struct spu_state *csa, struct spu *spu) { struct spu_priv2 __iomem *priv2 = spu->priv2; int i; /* Restore, Step 50: * If MFC_Cntl[Se]!=0 then restore * MFC command queues. */ if ((csa->priv2.mfc_control_RW & MFC_CNTL_DMA_QUEUES_EMPTY_MASK) == 0) { for (i = 0; i < 8; i++) { out_be64(&priv2->puq[i].mfc_cq_data0_RW, csa->priv2.puq[i].mfc_cq_data0_RW); out_be64(&priv2->puq[i].mfc_cq_data1_RW, csa->priv2.puq[i].mfc_cq_data1_RW); out_be64(&priv2->puq[i].mfc_cq_data2_RW, csa->priv2.puq[i].mfc_cq_data2_RW); out_be64(&priv2->puq[i].mfc_cq_data3_RW, csa->priv2.puq[i].mfc_cq_data3_RW); } for (i = 0; i < 16; i++) { out_be64(&priv2->spuq[i].mfc_cq_data0_RW, csa->priv2.spuq[i].mfc_cq_data0_RW); out_be64(&priv2->spuq[i].mfc_cq_data1_RW, csa->priv2.spuq[i].mfc_cq_data1_RW); out_be64(&priv2->spuq[i].mfc_cq_data2_RW, csa->priv2.spuq[i].mfc_cq_data2_RW); out_be64(&priv2->spuq[i].mfc_cq_data3_RW, csa->priv2.spuq[i].mfc_cq_data3_RW); } } eieio(); } static inline void restore_ppu_querymask(struct spu_state *csa, struct spu *spu) { struct spu_problem __iomem *prob = spu->problem; /* Restore, Step 51: * Restore the PPU_QueryMask register from CSA. */ out_be32(&prob->dma_querymask_RW, csa->prob.dma_querymask_RW); eieio(); } static inline void restore_ppu_querytype(struct spu_state *csa, struct spu *spu) { struct spu_problem __iomem *prob = spu->problem; /* Restore, Step 52: * Restore the PPU_QueryType register from CSA. */ out_be32(&prob->dma_querytype_RW, csa->prob.dma_querytype_RW); eieio(); } static inline void restore_mfc_csr_tsq(struct spu_state *csa, struct spu *spu) { struct spu_priv2 __iomem *priv2 = spu->priv2; /* Restore, Step 53: * Restore the MFC_CSR_TSQ register from CSA. */ out_be64(&priv2->spu_tag_status_query_RW, csa->priv2.spu_tag_status_query_RW); eieio(); } static inline void restore_mfc_csr_cmd(struct spu_state *csa, struct spu *spu) { struct spu_priv2 __iomem *priv2 = spu->priv2; /* Restore, Step 54: * Restore the MFC_CSR_CMD1 and MFC_CSR_CMD2 * registers from CSA. */ out_be64(&priv2->spu_cmd_buf1_RW, csa->priv2.spu_cmd_buf1_RW); out_be64(&priv2->spu_cmd_buf2_RW, csa->priv2.spu_cmd_buf2_RW); eieio(); } static inline void restore_mfc_csr_ato(struct spu_state *csa, struct spu *spu) { struct spu_priv2 __iomem *priv2 = spu->priv2; /* Restore, Step 55: * Restore the MFC_CSR_ATO register from CSA. */ out_be64(&priv2->spu_atomic_status_RW, csa->priv2.spu_atomic_status_RW); } static inline void restore_mfc_tclass_id(struct spu_state *csa, struct spu *spu) { /* Restore, Step 56: * Restore the MFC_TCLASS_ID register from CSA. */ spu_mfc_tclass_id_set(spu, csa->priv1.mfc_tclass_id_RW); eieio(); } static inline void set_llr_event(struct spu_state *csa, struct spu *spu) { u64 ch0_cnt, ch0_data; u64 ch1_data; /* Restore, Step 57: * Set the Lock Line Reservation Lost Event by: * 1. OR CSA.SPU_Event_Status with bit 21 (Lr) set to 1. * 2. If CSA.SPU_Channel_0_Count=0 and * CSA.SPU_Wr_Event_Mask[Lr]=1 and * CSA.SPU_Event_Status[Lr]=0 then set * CSA.SPU_Event_Status_Count=1. */ ch0_cnt = csa->spu_chnlcnt_RW[0]; ch0_data = csa->spu_chnldata_RW[0]; ch1_data = csa->spu_chnldata_RW[1]; csa->spu_chnldata_RW[0] |= MFC_LLR_LOST_EVENT; if ((ch0_cnt == 0) && !(ch0_data & MFC_LLR_LOST_EVENT) && (ch1_data & MFC_LLR_LOST_EVENT)) { csa->spu_chnlcnt_RW[0] = 1; } } static inline void restore_decr_wrapped(struct spu_state *csa, struct spu *spu) { /* Restore, Step 58: * If the status of the CSA software decrementer * "wrapped" flag is set, OR in a '1' to * CSA.SPU_Event_Status[Tm]. */ if (!(csa->lscsa->decr_status.slot[0] & SPU_DECR_STATUS_WRAPPED)) return; if ((csa->spu_chnlcnt_RW[0] == 0) && (csa->spu_chnldata_RW[1] & 0x20) && !(csa->spu_chnldata_RW[0] & 0x20)) csa->spu_chnlcnt_RW[0] = 1; csa->spu_chnldata_RW[0] |= 0x20; } static inline void restore_ch_part1(struct spu_state *csa, struct spu *spu) { struct spu_priv2 __iomem *priv2 = spu->priv2; u64 idx, ch_indices[] = { 0UL, 3UL, 4UL, 24UL, 25UL, 27UL }; int i; /* Restore, Step 59: * Restore the following CH: [0,3,4,24,25,27] */ for (i = 0; i < ARRAY_SIZE(ch_indices); i++) { idx = ch_indices[i]; out_be64(&priv2->spu_chnlcntptr_RW, idx); eieio(); out_be64(&priv2->spu_chnldata_RW, csa->spu_chnldata_RW[idx]); out_be64(&priv2->spu_chnlcnt_RW, csa->spu_chnlcnt_RW[idx]); eieio(); } } static inline void restore_ch_part2(struct spu_state *csa, struct spu *spu) { struct spu_priv2 __iomem *priv2 = spu->priv2; u64 ch_indices[3] = { 9UL, 21UL, 23UL }; u64 ch_counts[3] = { 1UL, 16UL, 1UL }; u64 idx; int i; /* Restore, Step 60: * Restore the following CH: [9,21,23]. */ ch_counts[0] = 1UL; ch_counts[1] = csa->spu_chnlcnt_RW[21]; ch_counts[2] = 1UL; for (i = 0; i < 3; i++) { idx = ch_indices[i]; out_be64(&priv2->spu_chnlcntptr_RW, idx); eieio(); out_be64(&priv2->spu_chnlcnt_RW, ch_counts[i]); eieio(); } } static inline void restore_spu_lslr(struct spu_state *csa, struct spu *spu) { struct spu_priv2 __iomem *priv2 = spu->priv2; /* Restore, Step 61: * Restore the SPU_LSLR register from CSA. */ out_be64(&priv2->spu_lslr_RW, csa->priv2.spu_lslr_RW); eieio(); } static inline void restore_spu_cfg(struct spu_state *csa, struct spu *spu) { struct spu_priv2 __iomem *priv2 = spu->priv2; /* Restore, Step 62: * Restore the SPU_Cfg register from CSA. */ out_be64(&priv2->spu_cfg_RW, csa->priv2.spu_cfg_RW); eieio(); } static inline void restore_pm_trace(struct spu_state *csa, struct spu *spu) { /* Restore, Step 63: * Restore PM_Trace_Tag_Wait_Mask from CSA. * Not performed by this implementation. */ } static inline void restore_spu_npc(struct spu_state *csa, struct spu *spu) { struct spu_problem __iomem *prob = spu->problem; /* Restore, Step 64: * Restore SPU_NPC from CSA. */ out_be32(&prob->spu_npc_RW, csa->prob.spu_npc_RW); eieio(); } static inline void restore_spu_mb(struct spu_state *csa, struct spu *spu) { struct spu_priv2 __iomem *priv2 = spu->priv2; int i; /* Restore, Step 65: * Restore MFC_RdSPU_MB from CSA. */ out_be64(&priv2->spu_chnlcntptr_RW, 29UL); eieio(); out_be64(&priv2->spu_chnlcnt_RW, csa->spu_chnlcnt_RW[29]); for (i = 0; i < 4; i++) { out_be64(&priv2->spu_chnldata_RW, csa->spu_mailbox_data[i]); } eieio(); } static inline void check_ppu_mb_stat(struct spu_state *csa, struct spu *spu) { struct spu_problem __iomem *prob = spu->problem; u32 dummy = 0; /* Restore, Step 66: * If CSA.MB_Stat[P]=0 (mailbox empty) then * read from the PPU_MB register. */ if ((csa->prob.mb_stat_R & 0xFF) == 0) { dummy = in_be32(&prob->pu_mb_R); eieio(); } } static inline void check_ppuint_mb_stat(struct spu_state *csa, struct spu *spu) { struct spu_priv2 __iomem *priv2 = spu->priv2; u64 dummy = 0UL; /* Restore, Step 66: * If CSA.MB_Stat[I]=0 (mailbox empty) then * read from the PPUINT_MB register. */ if ((csa->prob.mb_stat_R & 0xFF0000) == 0) { dummy = in_be64(&priv2->puint_mb_R); eieio(); spu_int_stat_clear(spu, 2, CLASS2_ENABLE_MAILBOX_INTR); eieio(); } } static inline void restore_mfc_sr1(struct spu_state *csa, struct spu *spu) { /* Restore, Step 69: * Restore the MFC_SR1 register from CSA. */ spu_mfc_sr1_set(spu, csa->priv1.mfc_sr1_RW); eieio(); } static inline void set_int_route(struct spu_state *csa, struct spu *spu) { struct spu_context *ctx = spu->ctx; spu_cpu_affinity_set(spu, ctx->last_ran); } static inline void restore_other_spu_access(struct spu_state *csa, struct spu *spu) { /* Restore, Step 70: * Restore other SPU mappings to this SPU. TBD. */ } static inline void restore_spu_runcntl(struct spu_state *csa, struct spu *spu) { struct spu_problem __iomem *prob = spu->problem; /* Restore, Step 71: * If CSA.SPU_Status[R]=1 then write * SPU_RunCntl[R0R1]='01'. */ if (csa->prob.spu_status_R & SPU_STATUS_RUNNING) { out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_RUNNABLE); eieio(); } } static inline void restore_mfc_cntl(struct spu_state *csa, struct spu *spu) { struct spu_priv2 __iomem *priv2 = spu->priv2; /* Restore, Step 72: * Restore the MFC_CNTL register for the CSA. */ out_be64(&priv2->mfc_control_RW, csa->priv2.mfc_control_RW); eieio(); /* * The queue is put back into the same state that was evident prior to * the context switch. The suspend flag is added to the saved state in * the csa, if the operational state was suspending or suspended. In * this case, the code that suspended the mfc is responsible for * continuing it. Note that SPE faults do not change the operational * state of the spu. */ } static inline void enable_user_access(struct spu_state *csa, struct spu *spu) { /* Restore, Step 73: * Enable user-space access (if provided) to this * SPU by mapping the virtual pages assigned to * the SPU memory-mapped I/O (MMIO) for problem * state. TBD. */ } static inline void reset_switch_active(struct spu_state *csa, struct spu *spu) { /* Restore, Step 74: * Reset the "context switch active" flag. * Not performed by this implementation. */ } static inline void reenable_interrupts(struct spu_state *csa, struct spu *spu) { /* Restore, Step 75: * Re-enable SPU interrupts. */ spin_lock_irq(&spu->register_lock); spu_int_mask_set(spu, 0, csa->priv1.int_mask_class0_RW); spu_int_mask_set(spu, 1, csa->priv1.int_mask_class1_RW); spu_int_mask_set(spu, 2, csa->priv1.int_mask_class2_RW); spin_unlock_irq(&spu->register_lock); } static int quiece_spu(struct spu_state *prev, struct spu *spu) { /* * Combined steps 2-18 of SPU context save sequence, which * quiesce the SPU state (disable SPU execution, MFC command * queues, decrementer, SPU interrupts, etc.). * * Returns 0 on success. * 2 if failed step 2. * 6 if failed step 6. */ if (check_spu_isolate(prev, spu)) { /* Step 2. */ return 2; } disable_interrupts(prev, spu); /* Step 3. */ set_watchdog_timer(prev, spu); /* Step 4. */ inhibit_user_access(prev, spu); /* Step 5. */ if (check_spu_isolate(prev, spu)) { /* Step 6. */ return 6; } set_switch_pending(prev, spu); /* Step 7. */ save_mfc_cntl(prev, spu); /* Step 8. */ save_spu_runcntl(prev, spu); /* Step 9. */ save_mfc_sr1(prev, spu); /* Step 10. */ save_spu_status(prev, spu); /* Step 11. */ save_mfc_stopped_status(prev, spu); /* Step 12. */ halt_mfc_decr(prev, spu); /* Step 13. */ save_timebase(prev, spu); /* Step 14. */ remove_other_spu_access(prev, spu); /* Step 15. */ do_mfc_mssync(prev, spu); /* Step 16. */ issue_mfc_tlbie(prev, spu); /* Step 17. */ handle_pending_interrupts(prev, spu); /* Step 18. */ return 0; } static void save_csa(struct spu_state *prev, struct spu *spu) { /* * Combine steps 19-44 of SPU context save sequence, which * save regions of the privileged & problem state areas. */ save_mfc_queues(prev, spu); /* Step 19. */ save_ppu_querymask(prev, spu); /* Step 20. */ save_ppu_querytype(prev, spu); /* Step 21. */ save_ppu_tagstatus(prev, spu); /* NEW. */ save_mfc_csr_tsq(prev, spu); /* Step 22. */ save_mfc_csr_cmd(prev, spu); /* Step 23. */ save_mfc_csr_ato(prev, spu); /* Step 24. */ save_mfc_tclass_id(prev, spu); /* Step 25. */ set_mfc_tclass_id(prev, spu); /* Step 26. */ save_mfc_cmd(prev, spu); /* Step 26a - moved from 44. */ purge_mfc_queue(prev, spu); /* Step 27. */ wait_purge_complete(prev, spu); /* Step 28. */ setup_mfc_sr1(prev, spu); /* Step 30. */ save_spu_npc(prev, spu); /* Step 31. */ save_spu_privcntl(prev, spu); /* Step 32. */ reset_spu_privcntl(prev, spu); /* Step 33. */ save_spu_lslr(prev, spu); /* Step 34. */ reset_spu_lslr(prev, spu); /* Step 35. */ save_spu_cfg(prev, spu); /* Step 36. */ save_pm_trace(prev, spu); /* Step 37. */ save_mfc_rag(prev, spu); /* Step 38. */ save_ppu_mb_stat(prev, spu); /* Step 39. */ save_ppu_mb(prev, spu); /* Step 40. */ save_ppuint_mb(prev, spu); /* Step 41. */ save_ch_part1(prev, spu); /* Step 42. */ save_spu_mb(prev, spu); /* Step 43. */ reset_ch(prev, spu); /* Step 45. */ } static void save_lscsa(struct spu_state *prev, struct spu *spu) { /* * Perform steps 46-57 of SPU context save sequence, * which save regions of the local store and register * file. */ resume_mfc_queue(prev, spu); /* Step 46. */ /* Step 47. */ setup_mfc_slbs(prev, spu, spu_save_code, sizeof(spu_save_code)); set_switch_active(prev, spu); /* Step 48. */ enable_interrupts(prev, spu); /* Step 49. */ save_ls_16kb(prev, spu); /* Step 50. */ set_spu_npc(prev, spu); /* Step 51. */ set_signot1(prev, spu); /* Step 52. */ set_signot2(prev, spu); /* Step 53. */ send_save_code(prev, spu); /* Step 54. */ set_ppu_querymask(prev, spu); /* Step 55. */ wait_tag_complete(prev, spu); /* Step 56. */ wait_spu_stopped(prev, spu); /* Step 57. */ } static void force_spu_isolate_exit(struct spu *spu) { struct spu_problem __iomem *prob = spu->problem; struct spu_priv2 __iomem *priv2 = spu->priv2; /* Stop SPE execution and wait for completion. */ out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP); iobarrier_rw(); POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING); /* Restart SPE master runcntl. */ spu_mfc_sr1_set(spu, MFC_STATE1_MASTER_RUN_CONTROL_MASK); iobarrier_w(); /* Initiate isolate exit request and wait for completion. */ out_be64(&priv2->spu_privcntl_RW, 4LL); iobarrier_w(); out_be32(&prob->spu_runcntl_RW, 2); iobarrier_rw(); POLL_WHILE_FALSE((in_be32(&prob->spu_status_R) & SPU_STATUS_STOPPED_BY_STOP)); /* Reset load request to normal. */ out_be64(&priv2->spu_privcntl_RW, SPU_PRIVCNT_LOAD_REQUEST_NORMAL); iobarrier_w(); } /** * stop_spu_isolate * Check SPU run-control state and force isolated * exit function as necessary. */ static void stop_spu_isolate(struct spu *spu) { struct spu_problem __iomem *prob = spu->problem; if (in_be32(&prob->spu_status_R) & SPU_STATUS_ISOLATED_STATE) { /* The SPU is in isolated state; the only way * to get it out is to perform an isolated * exit (clean) operation. */ force_spu_isolate_exit(spu); } } static void harvest(struct spu_state *prev, struct spu *spu) { /* * Perform steps 2-25 of SPU context restore sequence, * which resets an SPU either after a failed save, or * when using SPU for first time. */ disable_interrupts(prev, spu); /* Step 2. */ inhibit_user_access(prev, spu); /* Step 3. */ terminate_spu_app(prev, spu); /* Step 4. */ set_switch_pending(prev, spu); /* Step 5. */ stop_spu_isolate(spu); /* NEW. */ remove_other_spu_access(prev, spu); /* Step 6. */ suspend_mfc_and_halt_decr(prev, spu); /* Step 7. */ wait_suspend_mfc_complete(prev, spu); /* Step 8. */ if (!suspend_spe(prev, spu)) /* Step 9. */ clear_spu_status(prev, spu); /* Step 10. */ do_mfc_mssync(prev, spu); /* Step 11. */ issue_mfc_tlbie(prev, spu); /* Step 12. */ handle_pending_interrupts(prev, spu); /* Step 13. */ purge_mfc_queue(prev, spu); /* Step 14. */ wait_purge_complete(prev, spu); /* Step 15. */ reset_spu_privcntl(prev, spu); /* Step 16. */ reset_spu_lslr(prev, spu); /* Step 17. */ setup_mfc_sr1(prev, spu); /* Step 18. */ spu_invalidate_slbs(spu); /* Step 19. */ reset_ch_part1(prev, spu); /* Step 20. */ reset_ch_part2(prev, spu); /* Step 21. */ enable_interrupts(prev, spu); /* Step 22. */ set_switch_active(prev, spu); /* Step 23. */ set_mfc_tclass_id(prev, spu); /* Step 24. */ resume_mfc_queue(prev, spu); /* Step 25. */ } static void restore_lscsa(struct spu_state *next, struct spu *spu) { /* * Perform steps 26-40 of SPU context restore sequence, * which restores regions of the local store and register * file. */ set_watchdog_timer(next, spu); /* Step 26. */ setup_spu_status_part1(next, spu); /* Step 27. */ setup_spu_status_part2(next, spu); /* Step 28. */ restore_mfc_rag(next, spu); /* Step 29. */ /* Step 30. */ setup_mfc_slbs(next, spu, spu_restore_code, sizeof(spu_restore_code)); set_spu_npc(next, spu); /* Step 31. */ set_signot1(next, spu); /* Step 32. */ set_signot2(next, spu); /* Step 33. */ setup_decr(next, spu); /* Step 34. */ setup_ppu_mb(next, spu); /* Step 35. */ setup_ppuint_mb(next, spu); /* Step 36. */ send_restore_code(next, spu); /* Step 37. */ set_ppu_querymask(next, spu); /* Step 38. */ wait_tag_complete(next, spu); /* Step 39. */ wait_spu_stopped(next, spu); /* Step 40. */ } static void restore_csa(struct spu_state *next, struct spu *spu) { /* * Combine steps 41-76 of SPU context restore sequence, which * restore regions of the privileged & problem state areas. */ restore_spu_privcntl(next, spu); /* Step 41. */ restore_status_part1(next, spu); /* Step 42. */ restore_status_part2(next, spu); /* Step 43. */ restore_ls_16kb(next, spu); /* Step 44. */ wait_tag_complete(next, spu); /* Step 45. */ suspend_mfc(next, spu); /* Step 46. */ wait_suspend_mfc_complete(next, spu); /* Step 47. */ issue_mfc_tlbie(next, spu); /* Step 48. */ clear_interrupts(next, spu); /* Step 49. */ restore_mfc_queues(next, spu); /* Step 50. */ restore_ppu_querymask(next, spu); /* Step 51. */ restore_ppu_querytype(next, spu); /* Step 52. */ restore_mfc_csr_tsq(next, spu); /* Step 53. */ restore_mfc_csr_cmd(next, spu); /* Step 54. */ restore_mfc_csr_ato(next, spu); /* Step 55. */ restore_mfc_tclass_id(next, spu); /* Step 56. */ set_llr_event(next, spu); /* Step 57. */ restore_decr_wrapped(next, spu); /* Step 58. */ restore_ch_part1(next, spu); /* Step 59. */ restore_ch_part2(next, spu); /* Step 60. */ restore_spu_lslr(next, spu); /* Step 61. */ restore_spu_cfg(next, spu); /* Step 62. */ restore_pm_trace(next, spu); /* Step 63. */ restore_spu_npc(next, spu); /* Step 64. */ restore_spu_mb(next, spu); /* Step 65. */ check_ppu_mb_stat(next, spu); /* Step 66. */ check_ppuint_mb_stat(next, spu); /* Step 67. */ spu_invalidate_slbs(spu); /* Modified Step 68. */ restore_mfc_sr1(next, spu); /* Step 69. */ set_int_route(next, spu); /* NEW */ restore_other_spu_access(next, spu); /* Step 70. */ restore_spu_runcntl(next, spu); /* Step 71. */ restore_mfc_cntl(next, spu); /* Step 72. */ enable_user_access(next, spu); /* Step 73. */ reset_switch_active(next, spu); /* Step 74. */ reenable_interrupts(next, spu); /* Step 75. */ } static int __do_spu_save(struct spu_state *prev, struct spu *spu) { int rc; /* * SPU context save can be broken into three phases: * * (a) quiesce [steps 2-16]. * (b) save of CSA, performed by PPE [steps 17-42] * (c) save of LSCSA, mostly performed by SPU [steps 43-52]. * * Returns 0 on success. * 2,6 if failed to quiece SPU * 53 if SPU-side of save failed. */ rc = quiece_spu(prev, spu); /* Steps 2-16. */ switch (rc) { default: case 2: case 6: harvest(prev, spu); return rc; break; case 0: break; } save_csa(prev, spu); /* Steps 17-43. */ save_lscsa(prev, spu); /* Steps 44-53. */ return check_save_status(prev, spu); /* Step 54. */ } static int __do_spu_restore(struct spu_state *next, struct spu *spu) { int rc; /* * SPU context restore can be broken into three phases: * * (a) harvest (or reset) SPU [steps 2-24]. * (b) restore LSCSA [steps 25-40], mostly performed by SPU. * (c) restore CSA [steps 41-76], performed by PPE. * * The 'harvest' step is not performed here, but rather * as needed below. */ restore_lscsa(next, spu); /* Steps 24-39. */ rc = check_restore_status(next, spu); /* Step 40. */ switch (rc) { default: /* Failed. Return now. */ return rc; break; case 0: /* Fall through to next step. */ break; } restore_csa(next, spu); return 0; } /** * spu_save - SPU context save, with locking. * @prev: pointer to SPU context save area, to be saved. * @spu: pointer to SPU iomem structure. * * Acquire locks, perform the save operation then return. */ int spu_save(struct spu_state *prev, struct spu *spu) { int rc; acquire_spu_lock(spu); /* Step 1. */ rc = __do_spu_save(prev, spu); /* Steps 2-53. */ release_spu_lock(spu); if (rc != 0 && rc != 2 && rc != 6) { panic("%s failed on SPU[%d], rc=%d.\n", __func__, spu->number, rc); } return 0; } EXPORT_SYMBOL_GPL(spu_save); /** * spu_restore - SPU context restore, with harvest and locking. * @new: pointer to SPU context save area, to be restored. * @spu: pointer to SPU iomem structure. * * Perform harvest + restore, as we may not be coming * from a previous successful save operation, and the * hardware state is unknown. */ int spu_restore(struct spu_state *new, struct spu *spu) { int rc; acquire_spu_lock(spu); harvest(NULL, spu); spu->slb_replace = 0; rc = __do_spu_restore(new, spu); release_spu_lock(spu); if (rc) { panic("%s failed on SPU[%d] rc=%d.\n", __func__, spu->number, rc); } return rc; } EXPORT_SYMBOL_GPL(spu_restore); static void init_prob(struct spu_state *csa) { csa->spu_chnlcnt_RW[9] = 1; csa->spu_chnlcnt_RW[21] = 16; csa->spu_chnlcnt_RW[23] = 1; csa->spu_chnlcnt_RW[28] = 1; csa->spu_chnlcnt_RW[30] = 1; csa->prob.spu_runcntl_RW = SPU_RUNCNTL_STOP; csa->prob.mb_stat_R = 0x000400; } static void init_priv1(struct spu_state *csa) { /* Enable decode, relocate, tlbie response, master runcntl. */ csa->priv1.mfc_sr1_RW = MFC_STATE1_LOCAL_STORAGE_DECODE_MASK | MFC_STATE1_MASTER_RUN_CONTROL_MASK | MFC_STATE1_PROBLEM_STATE_MASK | MFC_STATE1_RELOCATE_MASK | MFC_STATE1_BUS_TLBIE_MASK; /* Enable OS-specific set of interrupts. */ csa->priv1.int_mask_class0_RW = CLASS0_ENABLE_DMA_ALIGNMENT_INTR | CLASS0_ENABLE_INVALID_DMA_COMMAND_INTR | CLASS0_ENABLE_SPU_ERROR_INTR; csa->priv1.int_mask_class1_RW = CLASS1_ENABLE_SEGMENT_FAULT_INTR | CLASS1_ENABLE_STORAGE_FAULT_INTR; csa->priv1.int_mask_class2_RW = CLASS2_ENABLE_SPU_STOP_INTR | CLASS2_ENABLE_SPU_HALT_INTR | CLASS2_ENABLE_SPU_DMA_TAG_GROUP_COMPLETE_INTR; } static void init_priv2(struct spu_state *csa) { csa->priv2.spu_lslr_RW = LS_ADDR_MASK; csa->priv2.mfc_control_RW = MFC_CNTL_RESUME_DMA_QUEUE | MFC_CNTL_NORMAL_DMA_QUEUE_OPERATION | MFC_CNTL_DMA_QUEUES_EMPTY_MASK; } /** * spu_alloc_csa - allocate and initialize an SPU context save area. * * Allocate and initialize the contents of an SPU context save area. * This includes enabling address translation, interrupt masks, etc., * as appropriate for the given OS environment. * * Note that storage for the 'lscsa' is allocated separately, * as it is by far the largest of the context save regions, * and may need to be pinned or otherwise specially aligned. */ int spu_init_csa(struct spu_state *csa) { int rc; if (!csa) return -EINVAL; memset(csa, 0, sizeof(struct spu_state)); rc = spu_alloc_lscsa(csa); if (rc) return rc; spin_lock_init(&csa->register_lock); init_prob(csa); init_priv1(csa); init_priv2(csa); return 0; } void spu_fini_csa(struct spu_state *csa) { spu_free_lscsa(csa); }
gpl-2.0
XileForce/Vindicator
drivers/char/mmtimer.c
4525
21423
/* * Timer device implementation for SGI SN platforms. * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (c) 2001-2006 Silicon Graphics, Inc. All rights reserved. * * This driver exports an API that should be supportable by any HPET or IA-PC * multimedia timer. The code below is currently specific to the SGI Altix * SHub RTC, however. * * 11/01/01 - jbarnes - initial revision * 9/10/04 - Christoph Lameter - remove interrupt support for kernel inclusion * 10/1/04 - Christoph Lameter - provide posix clock CLOCK_SGI_CYCLE * 10/13/04 - Christoph Lameter, Dimitri Sivanich - provide timer interrupt * support via the posix timer interface */ #include <linux/types.h> #include <linux/kernel.h> #include <linux/ioctl.h> #include <linux/module.h> #include <linux/init.h> #include <linux/errno.h> #include <linux/mm.h> #include <linux/fs.h> #include <linux/mmtimer.h> #include <linux/miscdevice.h> #include <linux/posix-timers.h> #include <linux/interrupt.h> #include <linux/time.h> #include <linux/math64.h> #include <linux/mutex.h> #include <linux/slab.h> #include <asm/uaccess.h> #include <asm/sn/addrs.h> #include <asm/sn/intr.h> #include <asm/sn/shub_mmr.h> #include <asm/sn/nodepda.h> #include <asm/sn/shubio.h> MODULE_AUTHOR("Jesse Barnes <jbarnes@sgi.com>"); MODULE_DESCRIPTION("SGI Altix RTC Timer"); MODULE_LICENSE("GPL"); /* name of the device, usually in /dev */ #define MMTIMER_NAME "mmtimer" #define MMTIMER_DESC "SGI Altix RTC Timer" #define MMTIMER_VERSION "2.1" #define RTC_BITS 55 /* 55 bits for this implementation */ static struct k_clock sgi_clock; extern unsigned long sn_rtc_cycles_per_second; #define RTC_COUNTER_ADDR ((long *)LOCAL_MMR_ADDR(SH_RTC)) #define rtc_time() (*RTC_COUNTER_ADDR) static DEFINE_MUTEX(mmtimer_mutex); static long mmtimer_ioctl(struct file *file, unsigned int cmd, unsigned long arg); static int mmtimer_mmap(struct file *file, struct vm_area_struct *vma); /* * Period in femtoseconds (10^-15 s) */ static unsigned long mmtimer_femtoperiod = 0; static const struct file_operations mmtimer_fops = { .owner = THIS_MODULE, .mmap = mmtimer_mmap, .unlocked_ioctl = mmtimer_ioctl, .llseek = noop_llseek, }; /* * We only have comparison registers RTC1-4 currently available per * node. RTC0 is used by SAL. */ /* Check for an RTC interrupt pending */ static int mmtimer_int_pending(int comparator) { if (HUB_L((unsigned long *)LOCAL_MMR_ADDR(SH_EVENT_OCCURRED)) & SH_EVENT_OCCURRED_RTC1_INT_MASK << comparator) return 1; else return 0; } /* Clear the RTC interrupt pending bit */ static void mmtimer_clr_int_pending(int comparator) { HUB_S((u64 *)LOCAL_MMR_ADDR(SH_EVENT_OCCURRED_ALIAS), SH_EVENT_OCCURRED_RTC1_INT_MASK << comparator); } /* Setup timer on comparator RTC1 */ static void mmtimer_setup_int_0(int cpu, u64 expires) { u64 val; /* Disable interrupt */ HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC1_INT_ENABLE), 0UL); /* Initialize comparator value */ HUB_S((u64 *)LOCAL_MMR_ADDR(SH_INT_CMPB), -1L); /* Clear pending bit */ mmtimer_clr_int_pending(0); val = ((u64)SGI_MMTIMER_VECTOR << SH_RTC1_INT_CONFIG_IDX_SHFT) | ((u64)cpu_physical_id(cpu) << SH_RTC1_INT_CONFIG_PID_SHFT); /* Set configuration */ HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC1_INT_CONFIG), val); /* Enable RTC interrupts */ HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC1_INT_ENABLE), 1UL); /* Initialize comparator value */ HUB_S((u64 *)LOCAL_MMR_ADDR(SH_INT_CMPB), expires); } /* Setup timer on comparator RTC2 */ static void mmtimer_setup_int_1(int cpu, u64 expires) { u64 val; HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC2_INT_ENABLE), 0UL); HUB_S((u64 *)LOCAL_MMR_ADDR(SH_INT_CMPC), -1L); mmtimer_clr_int_pending(1); val = ((u64)SGI_MMTIMER_VECTOR << SH_RTC2_INT_CONFIG_IDX_SHFT) | ((u64)cpu_physical_id(cpu) << SH_RTC2_INT_CONFIG_PID_SHFT); HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC2_INT_CONFIG), val); HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC2_INT_ENABLE), 1UL); HUB_S((u64 *)LOCAL_MMR_ADDR(SH_INT_CMPC), expires); } /* Setup timer on comparator RTC3 */ static void mmtimer_setup_int_2(int cpu, u64 expires) { u64 val; HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC3_INT_ENABLE), 0UL); HUB_S((u64 *)LOCAL_MMR_ADDR(SH_INT_CMPD), -1L); mmtimer_clr_int_pending(2); val = ((u64)SGI_MMTIMER_VECTOR << SH_RTC3_INT_CONFIG_IDX_SHFT) | ((u64)cpu_physical_id(cpu) << SH_RTC3_INT_CONFIG_PID_SHFT); HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC3_INT_CONFIG), val); HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC3_INT_ENABLE), 1UL); HUB_S((u64 *)LOCAL_MMR_ADDR(SH_INT_CMPD), expires); } /* * This function must be called with interrupts disabled and preemption off * in order to insure that the setup succeeds in a deterministic time frame. * It will check if the interrupt setup succeeded. */ static int mmtimer_setup(int cpu, int comparator, unsigned long expires, u64 *set_completion_time) { switch (comparator) { case 0: mmtimer_setup_int_0(cpu, expires); break; case 1: mmtimer_setup_int_1(cpu, expires); break; case 2: mmtimer_setup_int_2(cpu, expires); break; } /* We might've missed our expiration time */ *set_completion_time = rtc_time(); if (*set_completion_time <= expires) return 1; /* * If an interrupt is already pending then its okay * if not then we failed */ return mmtimer_int_pending(comparator); } static int mmtimer_disable_int(long nasid, int comparator) { switch (comparator) { case 0: nasid == -1 ? HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC1_INT_ENABLE), 0UL) : REMOTE_HUB_S(nasid, SH_RTC1_INT_ENABLE, 0UL); break; case 1: nasid == -1 ? HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC2_INT_ENABLE), 0UL) : REMOTE_HUB_S(nasid, SH_RTC2_INT_ENABLE, 0UL); break; case 2: nasid == -1 ? HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC3_INT_ENABLE), 0UL) : REMOTE_HUB_S(nasid, SH_RTC3_INT_ENABLE, 0UL); break; default: return -EFAULT; } return 0; } #define COMPARATOR 1 /* The comparator to use */ #define TIMER_OFF 0xbadcabLL /* Timer is not setup */ #define TIMER_SET 0 /* Comparator is set for this timer */ #define MMTIMER_INTERVAL_RETRY_INCREMENT_DEFAULT 40 /* There is one of these for each timer */ struct mmtimer { struct rb_node list; struct k_itimer *timer; int cpu; }; struct mmtimer_node { spinlock_t lock ____cacheline_aligned; struct rb_root timer_head; struct rb_node *next; struct tasklet_struct tasklet; }; static struct mmtimer_node *timers; static unsigned mmtimer_interval_retry_increment = MMTIMER_INTERVAL_RETRY_INCREMENT_DEFAULT; module_param(mmtimer_interval_retry_increment, uint, 0644); MODULE_PARM_DESC(mmtimer_interval_retry_increment, "RTC ticks to add to expiration on interval retry (default 40)"); /* * Add a new mmtimer struct to the node's mmtimer list. * This function assumes the struct mmtimer_node is locked. */ static void mmtimer_add_list(struct mmtimer *n) { int nodeid = n->timer->it.mmtimer.node; unsigned long expires = n->timer->it.mmtimer.expires; struct rb_node **link = &timers[nodeid].timer_head.rb_node; struct rb_node *parent = NULL; struct mmtimer *x; /* * Find the right place in the rbtree: */ while (*link) { parent = *link; x = rb_entry(parent, struct mmtimer, list); if (expires < x->timer->it.mmtimer.expires) link = &(*link)->rb_left; else link = &(*link)->rb_right; } /* * Insert the timer to the rbtree and check whether it * replaces the first pending timer */ rb_link_node(&n->list, parent, link); rb_insert_color(&n->list, &timers[nodeid].timer_head); if (!timers[nodeid].next || expires < rb_entry(timers[nodeid].next, struct mmtimer, list)->timer->it.mmtimer.expires) timers[nodeid].next = &n->list; } /* * Set the comparator for the next timer. * This function assumes the struct mmtimer_node is locked. */ static void mmtimer_set_next_timer(int nodeid) { struct mmtimer_node *n = &timers[nodeid]; struct mmtimer *x; struct k_itimer *t; u64 expires, exp, set_completion_time; int i; restart: if (n->next == NULL) return; x = rb_entry(n->next, struct mmtimer, list); t = x->timer; if (!t->it.mmtimer.incr) { /* Not an interval timer */ if (!mmtimer_setup(x->cpu, COMPARATOR, t->it.mmtimer.expires, &set_completion_time)) { /* Late setup, fire now */ tasklet_schedule(&n->tasklet); } return; } /* Interval timer */ i = 0; expires = exp = t->it.mmtimer.expires; while (!mmtimer_setup(x->cpu, COMPARATOR, expires, &set_completion_time)) { int to; i++; expires = set_completion_time + mmtimer_interval_retry_increment + (1 << i); /* Calculate overruns as we go. */ to = ((u64)(expires - exp) / t->it.mmtimer.incr); if (to) { t->it_overrun += to; t->it.mmtimer.expires += t->it.mmtimer.incr * to; exp = t->it.mmtimer.expires; } if (i > 20) { printk(KERN_ALERT "mmtimer: cannot reschedule timer\n"); t->it.mmtimer.clock = TIMER_OFF; n->next = rb_next(&x->list); rb_erase(&x->list, &n->timer_head); kfree(x); goto restart; } } } /** * mmtimer_ioctl - ioctl interface for /dev/mmtimer * @file: file structure for the device * @cmd: command to execute * @arg: optional argument to command * * Executes the command specified by @cmd. Returns 0 for success, < 0 for * failure. * * Valid commands: * * %MMTIMER_GETOFFSET - Should return the offset (relative to the start * of the page where the registers are mapped) for the counter in question. * * %MMTIMER_GETRES - Returns the resolution of the clock in femto (10^-15) * seconds * * %MMTIMER_GETFREQ - Copies the frequency of the clock in Hz to the address * specified by @arg * * %MMTIMER_GETBITS - Returns the number of bits in the clock's counter * * %MMTIMER_MMAPAVAIL - Returns 1 if the registers can be mmap'd into userspace * * %MMTIMER_GETCOUNTER - Gets the current value in the counter and places it * in the address specified by @arg. */ static long mmtimer_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { int ret = 0; mutex_lock(&mmtimer_mutex); switch (cmd) { case MMTIMER_GETOFFSET: /* offset of the counter */ /* * SN RTC registers are on their own 64k page */ if(PAGE_SIZE <= (1 << 16)) ret = (((long)RTC_COUNTER_ADDR) & (PAGE_SIZE-1)) / 8; else ret = -ENOSYS; break; case MMTIMER_GETRES: /* resolution of the clock in 10^-15 s */ if(copy_to_user((unsigned long __user *)arg, &mmtimer_femtoperiod, sizeof(unsigned long))) ret = -EFAULT; break; case MMTIMER_GETFREQ: /* frequency in Hz */ if(copy_to_user((unsigned long __user *)arg, &sn_rtc_cycles_per_second, sizeof(unsigned long))) ret = -EFAULT; break; case MMTIMER_GETBITS: /* number of bits in the clock */ ret = RTC_BITS; break; case MMTIMER_MMAPAVAIL: /* can we mmap the clock into userspace? */ ret = (PAGE_SIZE <= (1 << 16)) ? 1 : 0; break; case MMTIMER_GETCOUNTER: if(copy_to_user((unsigned long __user *)arg, RTC_COUNTER_ADDR, sizeof(unsigned long))) ret = -EFAULT; break; default: ret = -ENOTTY; break; } mutex_unlock(&mmtimer_mutex); return ret; } /** * mmtimer_mmap - maps the clock's registers into userspace * @file: file structure for the device * @vma: VMA to map the registers into * * Calls remap_pfn_range() to map the clock's registers into * the calling process' address space. */ static int mmtimer_mmap(struct file *file, struct vm_area_struct *vma) { unsigned long mmtimer_addr; if (vma->vm_end - vma->vm_start != PAGE_SIZE) return -EINVAL; if (vma->vm_flags & VM_WRITE) return -EPERM; if (PAGE_SIZE > (1 << 16)) return -ENOSYS; vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); mmtimer_addr = __pa(RTC_COUNTER_ADDR); mmtimer_addr &= ~(PAGE_SIZE - 1); mmtimer_addr &= 0xfffffffffffffffUL; if (remap_pfn_range(vma, vma->vm_start, mmtimer_addr >> PAGE_SHIFT, PAGE_SIZE, vma->vm_page_prot)) { printk(KERN_ERR "remap_pfn_range failed in mmtimer.c\n"); return -EAGAIN; } return 0; } static struct miscdevice mmtimer_miscdev = { SGI_MMTIMER, MMTIMER_NAME, &mmtimer_fops }; static struct timespec sgi_clock_offset; static int sgi_clock_period; /* * Posix Timer Interface */ static struct timespec sgi_clock_offset; static int sgi_clock_period; static int sgi_clock_get(clockid_t clockid, struct timespec *tp) { u64 nsec; nsec = rtc_time() * sgi_clock_period + sgi_clock_offset.tv_nsec; *tp = ns_to_timespec(nsec); tp->tv_sec += sgi_clock_offset.tv_sec; return 0; }; static int sgi_clock_set(const clockid_t clockid, const struct timespec *tp) { u64 nsec; u32 rem; nsec = rtc_time() * sgi_clock_period; sgi_clock_offset.tv_sec = tp->tv_sec - div_u64_rem(nsec, NSEC_PER_SEC, &rem); if (rem <= tp->tv_nsec) sgi_clock_offset.tv_nsec = tp->tv_sec - rem; else { sgi_clock_offset.tv_nsec = tp->tv_sec + NSEC_PER_SEC - rem; sgi_clock_offset.tv_sec--; } return 0; } /** * mmtimer_interrupt - timer interrupt handler * @irq: irq received * @dev_id: device the irq came from * * Called when one of the comarators matches the counter, This * routine will send signals to processes that have requested * them. * * This interrupt is run in an interrupt context * by the SHUB. It is therefore safe to locally access SHub * registers. */ static irqreturn_t mmtimer_interrupt(int irq, void *dev_id) { unsigned long expires = 0; int result = IRQ_NONE; unsigned indx = cpu_to_node(smp_processor_id()); struct mmtimer *base; spin_lock(&timers[indx].lock); base = rb_entry(timers[indx].next, struct mmtimer, list); if (base == NULL) { spin_unlock(&timers[indx].lock); return result; } if (base->cpu == smp_processor_id()) { if (base->timer) expires = base->timer->it.mmtimer.expires; /* expires test won't work with shared irqs */ if ((mmtimer_int_pending(COMPARATOR) > 0) || (expires && (expires <= rtc_time()))) { mmtimer_clr_int_pending(COMPARATOR); tasklet_schedule(&timers[indx].tasklet); result = IRQ_HANDLED; } } spin_unlock(&timers[indx].lock); return result; } static void mmtimer_tasklet(unsigned long data) { int nodeid = data; struct mmtimer_node *mn = &timers[nodeid]; struct mmtimer *x; struct k_itimer *t; unsigned long flags; /* Send signal and deal with periodic signals */ spin_lock_irqsave(&mn->lock, flags); if (!mn->next) goto out; x = rb_entry(mn->next, struct mmtimer, list); t = x->timer; if (t->it.mmtimer.clock == TIMER_OFF) goto out; t->it_overrun = 0; mn->next = rb_next(&x->list); rb_erase(&x->list, &mn->timer_head); if (posix_timer_event(t, 0) != 0) t->it_overrun++; if(t->it.mmtimer.incr) { t->it.mmtimer.expires += t->it.mmtimer.incr; mmtimer_add_list(x); } else { /* Ensure we don't false trigger in mmtimer_interrupt */ t->it.mmtimer.clock = TIMER_OFF; t->it.mmtimer.expires = 0; kfree(x); } /* Set comparator for next timer, if there is one */ mmtimer_set_next_timer(nodeid); t->it_overrun_last = t->it_overrun; out: spin_unlock_irqrestore(&mn->lock, flags); } static int sgi_timer_create(struct k_itimer *timer) { /* Insure that a newly created timer is off */ timer->it.mmtimer.clock = TIMER_OFF; return 0; } /* This does not really delete a timer. It just insures * that the timer is not active * * Assumption: it_lock is already held with irq's disabled */ static int sgi_timer_del(struct k_itimer *timr) { cnodeid_t nodeid = timr->it.mmtimer.node; unsigned long irqflags; spin_lock_irqsave(&timers[nodeid].lock, irqflags); if (timr->it.mmtimer.clock != TIMER_OFF) { unsigned long expires = timr->it.mmtimer.expires; struct rb_node *n = timers[nodeid].timer_head.rb_node; struct mmtimer *uninitialized_var(t); int r = 0; timr->it.mmtimer.clock = TIMER_OFF; timr->it.mmtimer.expires = 0; while (n) { t = rb_entry(n, struct mmtimer, list); if (t->timer == timr) break; if (expires < t->timer->it.mmtimer.expires) n = n->rb_left; else n = n->rb_right; } if (!n) { spin_unlock_irqrestore(&timers[nodeid].lock, irqflags); return 0; } if (timers[nodeid].next == n) { timers[nodeid].next = rb_next(n); r = 1; } rb_erase(n, &timers[nodeid].timer_head); kfree(t); if (r) { mmtimer_disable_int(cnodeid_to_nasid(nodeid), COMPARATOR); mmtimer_set_next_timer(nodeid); } } spin_unlock_irqrestore(&timers[nodeid].lock, irqflags); return 0; } /* Assumption: it_lock is already held with irq's disabled */ static void sgi_timer_get(struct k_itimer *timr, struct itimerspec *cur_setting) { if (timr->it.mmtimer.clock == TIMER_OFF) { cur_setting->it_interval.tv_nsec = 0; cur_setting->it_interval.tv_sec = 0; cur_setting->it_value.tv_nsec = 0; cur_setting->it_value.tv_sec =0; return; } cur_setting->it_interval = ns_to_timespec(timr->it.mmtimer.incr * sgi_clock_period); cur_setting->it_value = ns_to_timespec((timr->it.mmtimer.expires - rtc_time()) * sgi_clock_period); } static int sgi_timer_set(struct k_itimer *timr, int flags, struct itimerspec * new_setting, struct itimerspec * old_setting) { unsigned long when, period, irqflags; int err = 0; cnodeid_t nodeid; struct mmtimer *base; struct rb_node *n; if (old_setting) sgi_timer_get(timr, old_setting); sgi_timer_del(timr); when = timespec_to_ns(&new_setting->it_value); period = timespec_to_ns(&new_setting->it_interval); if (when == 0) /* Clear timer */ return 0; base = kmalloc(sizeof(struct mmtimer), GFP_KERNEL); if (base == NULL) return -ENOMEM; if (flags & TIMER_ABSTIME) { struct timespec n; unsigned long now; getnstimeofday(&n); now = timespec_to_ns(&n); if (when > now) when -= now; else /* Fire the timer immediately */ when = 0; } /* * Convert to sgi clock period. Need to keep rtc_time() as near as possible * to getnstimeofday() in order to be as faithful as possible to the time * specified. */ when = (when + sgi_clock_period - 1) / sgi_clock_period + rtc_time(); period = (period + sgi_clock_period - 1) / sgi_clock_period; /* * We are allocating a local SHub comparator. If we would be moved to another * cpu then another SHub may be local to us. Prohibit that by switching off * preemption. */ preempt_disable(); nodeid = cpu_to_node(smp_processor_id()); /* Lock the node timer structure */ spin_lock_irqsave(&timers[nodeid].lock, irqflags); base->timer = timr; base->cpu = smp_processor_id(); timr->it.mmtimer.clock = TIMER_SET; timr->it.mmtimer.node = nodeid; timr->it.mmtimer.incr = period; timr->it.mmtimer.expires = when; n = timers[nodeid].next; /* Add the new struct mmtimer to node's timer list */ mmtimer_add_list(base); if (timers[nodeid].next == n) { /* No need to reprogram comparator for now */ spin_unlock_irqrestore(&timers[nodeid].lock, irqflags); preempt_enable(); return err; } /* We need to reprogram the comparator */ if (n) mmtimer_disable_int(cnodeid_to_nasid(nodeid), COMPARATOR); mmtimer_set_next_timer(nodeid); /* Unlock the node timer structure */ spin_unlock_irqrestore(&timers[nodeid].lock, irqflags); preempt_enable(); return err; } static int sgi_clock_getres(const clockid_t which_clock, struct timespec *tp) { tp->tv_sec = 0; tp->tv_nsec = sgi_clock_period; return 0; } static struct k_clock sgi_clock = { .clock_set = sgi_clock_set, .clock_get = sgi_clock_get, .clock_getres = sgi_clock_getres, .timer_create = sgi_timer_create, .timer_set = sgi_timer_set, .timer_del = sgi_timer_del, .timer_get = sgi_timer_get }; /** * mmtimer_init - device initialization routine * * Does initial setup for the mmtimer device. */ static int __init mmtimer_init(void) { cnodeid_t node, maxn = -1; if (!ia64_platform_is("sn2")) return 0; /* * Sanity check the cycles/sec variable */ if (sn_rtc_cycles_per_second < 100000) { printk(KERN_ERR "%s: unable to determine clock frequency\n", MMTIMER_NAME); goto out1; } mmtimer_femtoperiod = ((unsigned long)1E15 + sn_rtc_cycles_per_second / 2) / sn_rtc_cycles_per_second; if (request_irq(SGI_MMTIMER_VECTOR, mmtimer_interrupt, IRQF_PERCPU, MMTIMER_NAME, NULL)) { printk(KERN_WARNING "%s: unable to allocate interrupt.", MMTIMER_NAME); goto out1; } if (misc_register(&mmtimer_miscdev)) { printk(KERN_ERR "%s: failed to register device\n", MMTIMER_NAME); goto out2; } /* Get max numbered node, calculate slots needed */ for_each_online_node(node) { maxn = node; } maxn++; /* Allocate list of node ptrs to mmtimer_t's */ timers = kzalloc(sizeof(struct mmtimer_node)*maxn, GFP_KERNEL); if (!timers) { printk(KERN_ERR "%s: failed to allocate memory for device\n", MMTIMER_NAME); goto out3; } /* Initialize struct mmtimer's for each online node */ for_each_online_node(node) { spin_lock_init(&timers[node].lock); tasklet_init(&timers[node].tasklet, mmtimer_tasklet, (unsigned long) node); } sgi_clock_period = NSEC_PER_SEC / sn_rtc_cycles_per_second; posix_timers_register_clock(CLOCK_SGI_CYCLE, &sgi_clock); printk(KERN_INFO "%s: v%s, %ld MHz\n", MMTIMER_DESC, MMTIMER_VERSION, sn_rtc_cycles_per_second/(unsigned long)1E6); return 0; out3: misc_deregister(&mmtimer_miscdev); out2: free_irq(SGI_MMTIMER_VECTOR, NULL); out1: return -1; } module_init(mmtimer_init);
gpl-2.0
BlownFuze/Koding
drivers/rtc/rtc-dm355evm.c
4781
4359
/* * rtc-dm355evm.c - access battery-backed counter in MSP430 firmware * * Copyright (c) 2008 by David Brownell * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/rtc.h> #include <linux/platform_device.h> #include <linux/i2c/dm355evm_msp.h> /* * The MSP430 firmware on the DM355 EVM uses a watch crystal to feed * a 1 Hz counter. When a backup battery is supplied, that makes a * reasonable RTC for applications where alarms and non-NTP drift * compensation aren't important. * * The only real glitch is the inability to read or write all four * counter bytes atomically: the count may increment in the middle * of an operation, causing trouble when the LSB rolls over. * * This driver was tested with firmware revision A4. */ union evm_time { u8 bytes[4]; u32 value; }; static int dm355evm_rtc_read_time(struct device *dev, struct rtc_time *tm) { union evm_time time; int status; int tries = 0; do { /* * Read LSB(0) to MSB(3) bytes. Defend against the counter * rolling over by re-reading until the value is stable, * and assuming the four reads take at most a few seconds. */ status = dm355evm_msp_read(DM355EVM_MSP_RTC_0); if (status < 0) return status; if (tries && time.bytes[0] == status) break; time.bytes[0] = status; status = dm355evm_msp_read(DM355EVM_MSP_RTC_1); if (status < 0) return status; if (tries && time.bytes[1] == status) break; time.bytes[1] = status; status = dm355evm_msp_read(DM355EVM_MSP_RTC_2); if (status < 0) return status; if (tries && time.bytes[2] == status) break; time.bytes[2] = status; status = dm355evm_msp_read(DM355EVM_MSP_RTC_3); if (status < 0) return status; if (tries && time.bytes[3] == status) break; time.bytes[3] = status; } while (++tries < 5); dev_dbg(dev, "read timestamp %08x\n", time.value); rtc_time_to_tm(le32_to_cpu(time.value), tm); return 0; } static int dm355evm_rtc_set_time(struct device *dev, struct rtc_time *tm) { union evm_time time; unsigned long value; int status; rtc_tm_to_time(tm, &value); time.value = cpu_to_le32(value); dev_dbg(dev, "write timestamp %08x\n", time.value); /* * REVISIT handle non-atomic writes ... maybe just retry until * byte[1] sticks (no rollover)? */ status = dm355evm_msp_write(time.bytes[0], DM355EVM_MSP_RTC_0); if (status < 0) return status; status = dm355evm_msp_write(time.bytes[1], DM355EVM_MSP_RTC_1); if (status < 0) return status; status = dm355evm_msp_write(time.bytes[2], DM355EVM_MSP_RTC_2); if (status < 0) return status; status = dm355evm_msp_write(time.bytes[3], DM355EVM_MSP_RTC_3); if (status < 0) return status; return 0; } static struct rtc_class_ops dm355evm_rtc_ops = { .read_time = dm355evm_rtc_read_time, .set_time = dm355evm_rtc_set_time, }; /*----------------------------------------------------------------------*/ static int __devinit dm355evm_rtc_probe(struct platform_device *pdev) { struct rtc_device *rtc; rtc = rtc_device_register(pdev->name, &pdev->dev, &dm355evm_rtc_ops, THIS_MODULE); if (IS_ERR(rtc)) { dev_err(&pdev->dev, "can't register RTC device, err %ld\n", PTR_ERR(rtc)); return PTR_ERR(rtc); } platform_set_drvdata(pdev, rtc); return 0; } static int __devexit dm355evm_rtc_remove(struct platform_device *pdev) { struct rtc_device *rtc = platform_get_drvdata(pdev); rtc_device_unregister(rtc); platform_set_drvdata(pdev, NULL); return 0; } /* * I2C is used to talk to the MSP430, but this platform device is * exposed by an MFD driver that manages I2C communications. */ static struct platform_driver rtc_dm355evm_driver = { .probe = dm355evm_rtc_probe, .remove = __devexit_p(dm355evm_rtc_remove), .driver = { .owner = THIS_MODULE, .name = "rtc-dm355evm", }, }; static int __init dm355evm_rtc_init(void) { return platform_driver_register(&rtc_dm355evm_driver); } module_init(dm355evm_rtc_init); static void __exit dm355evm_rtc_exit(void) { platform_driver_unregister(&rtc_dm355evm_driver); } module_exit(dm355evm_rtc_exit); MODULE_LICENSE("GPL");
gpl-2.0
CyanogenMod/android_kernel_sony_msm8930
drivers/dma/dmatest.c
5037
18033
/* * DMA Engine test module * * Copyright (C) 2007 Atmel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/delay.h> #include <linux/dma-mapping.h> #include <linux/dmaengine.h> #include <linux/freezer.h> #include <linux/init.h> #include <linux/kthread.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/random.h> #include <linux/slab.h> #include <linux/wait.h> static unsigned int test_buf_size = 16384; module_param(test_buf_size, uint, S_IRUGO); MODULE_PARM_DESC(test_buf_size, "Size of the memcpy test buffer"); static char test_channel[20]; module_param_string(channel, test_channel, sizeof(test_channel), S_IRUGO); MODULE_PARM_DESC(channel, "Bus ID of the channel to test (default: any)"); static char test_device[20]; module_param_string(device, test_device, sizeof(test_device), S_IRUGO); MODULE_PARM_DESC(device, "Bus ID of the DMA Engine to test (default: any)"); static unsigned int threads_per_chan = 1; module_param(threads_per_chan, uint, S_IRUGO); MODULE_PARM_DESC(threads_per_chan, "Number of threads to start per channel (default: 1)"); static unsigned int max_channels; module_param(max_channels, uint, S_IRUGO); MODULE_PARM_DESC(max_channels, "Maximum number of channels to use (default: all)"); static unsigned int iterations; module_param(iterations, uint, S_IRUGO); MODULE_PARM_DESC(iterations, "Iterations before stopping test (default: infinite)"); static unsigned int xor_sources = 3; module_param(xor_sources, uint, S_IRUGO); MODULE_PARM_DESC(xor_sources, "Number of xor source buffers (default: 3)"); static unsigned int pq_sources = 3; module_param(pq_sources, uint, S_IRUGO); MODULE_PARM_DESC(pq_sources, "Number of p+q source buffers (default: 3)"); static int timeout = 3000; module_param(timeout, uint, S_IRUGO); MODULE_PARM_DESC(timeout, "Transfer Timeout in msec (default: 3000), " "Pass -1 for infinite timeout"); /* * Initialization patterns. All bytes in the source buffer has bit 7 * set, all bytes in the destination buffer has bit 7 cleared. * * Bit 6 is set for all bytes which are to be copied by the DMA * engine. Bit 5 is set for all bytes which are to be overwritten by * the DMA engine. * * The remaining bits are the inverse of a counter which increments by * one for each byte address. */ #define PATTERN_SRC 0x80 #define PATTERN_DST 0x00 #define PATTERN_COPY 0x40 #define PATTERN_OVERWRITE 0x20 #define PATTERN_COUNT_MASK 0x1f struct dmatest_thread { struct list_head node; struct task_struct *task; struct dma_chan *chan; u8 **srcs; u8 **dsts; enum dma_transaction_type type; }; struct dmatest_chan { struct list_head node; struct dma_chan *chan; struct list_head threads; }; /* * These are protected by dma_list_mutex since they're only used by * the DMA filter function callback */ static LIST_HEAD(dmatest_channels); static unsigned int nr_channels; static bool dmatest_match_channel(struct dma_chan *chan) { if (test_channel[0] == '\0') return true; return strcmp(dma_chan_name(chan), test_channel) == 0; } static bool dmatest_match_device(struct dma_device *device) { if (test_device[0] == '\0') return true; return strcmp(dev_name(device->dev), test_device) == 0; } static unsigned long dmatest_random(void) { unsigned long buf; get_random_bytes(&buf, sizeof(buf)); return buf; } static void dmatest_init_srcs(u8 **bufs, unsigned int start, unsigned int len) { unsigned int i; u8 *buf; for (; (buf = *bufs); bufs++) { for (i = 0; i < start; i++) buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK); for ( ; i < start + len; i++) buf[i] = PATTERN_SRC | PATTERN_COPY | (~i & PATTERN_COUNT_MASK); for ( ; i < test_buf_size; i++) buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK); buf++; } } static void dmatest_init_dsts(u8 **bufs, unsigned int start, unsigned int len) { unsigned int i; u8 *buf; for (; (buf = *bufs); bufs++) { for (i = 0; i < start; i++) buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK); for ( ; i < start + len; i++) buf[i] = PATTERN_DST | PATTERN_OVERWRITE | (~i & PATTERN_COUNT_MASK); for ( ; i < test_buf_size; i++) buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK); } } static void dmatest_mismatch(u8 actual, u8 pattern, unsigned int index, unsigned int counter, bool is_srcbuf) { u8 diff = actual ^ pattern; u8 expected = pattern | (~counter & PATTERN_COUNT_MASK); const char *thread_name = current->comm; if (is_srcbuf) pr_warning("%s: srcbuf[0x%x] overwritten!" " Expected %02x, got %02x\n", thread_name, index, expected, actual); else if ((pattern & PATTERN_COPY) && (diff & (PATTERN_COPY | PATTERN_OVERWRITE))) pr_warning("%s: dstbuf[0x%x] not copied!" " Expected %02x, got %02x\n", thread_name, index, expected, actual); else if (diff & PATTERN_SRC) pr_warning("%s: dstbuf[0x%x] was copied!" " Expected %02x, got %02x\n", thread_name, index, expected, actual); else pr_warning("%s: dstbuf[0x%x] mismatch!" " Expected %02x, got %02x\n", thread_name, index, expected, actual); } static unsigned int dmatest_verify(u8 **bufs, unsigned int start, unsigned int end, unsigned int counter, u8 pattern, bool is_srcbuf) { unsigned int i; unsigned int error_count = 0; u8 actual; u8 expected; u8 *buf; unsigned int counter_orig = counter; for (; (buf = *bufs); bufs++) { counter = counter_orig; for (i = start; i < end; i++) { actual = buf[i]; expected = pattern | (~counter & PATTERN_COUNT_MASK); if (actual != expected) { if (error_count < 32) dmatest_mismatch(actual, pattern, i, counter, is_srcbuf); error_count++; } counter++; } } if (error_count > 32) pr_warning("%s: %u errors suppressed\n", current->comm, error_count - 32); return error_count; } /* poor man's completion - we want to use wait_event_freezable() on it */ struct dmatest_done { bool done; wait_queue_head_t *wait; }; static void dmatest_callback(void *arg) { struct dmatest_done *done = arg; done->done = true; wake_up_all(done->wait); } /* * This function repeatedly tests DMA transfers of various lengths and * offsets for a given operation type until it is told to exit by * kthread_stop(). There may be multiple threads running this function * in parallel for a single channel, and there may be multiple channels * being tested in parallel. * * Before each test, the source and destination buffer is initialized * with a known pattern. This pattern is different depending on * whether it's in an area which is supposed to be copied or * overwritten, and different in the source and destination buffers. * So if the DMA engine doesn't copy exactly what we tell it to copy, * we'll notice. */ static int dmatest_func(void *data) { DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_wait); struct dmatest_thread *thread = data; struct dmatest_done done = { .wait = &done_wait }; struct dma_chan *chan; const char *thread_name; unsigned int src_off, dst_off, len; unsigned int error_count; unsigned int failed_tests = 0; unsigned int total_tests = 0; dma_cookie_t cookie; enum dma_status status; enum dma_ctrl_flags flags; u8 pq_coefs[pq_sources + 1]; int ret; int src_cnt; int dst_cnt; int i; thread_name = current->comm; set_freezable(); ret = -ENOMEM; smp_rmb(); chan = thread->chan; if (thread->type == DMA_MEMCPY) src_cnt = dst_cnt = 1; else if (thread->type == DMA_XOR) { src_cnt = xor_sources | 1; /* force odd to ensure dst = src */ dst_cnt = 1; } else if (thread->type == DMA_PQ) { src_cnt = pq_sources | 1; /* force odd to ensure dst = src */ dst_cnt = 2; for (i = 0; i < src_cnt; i++) pq_coefs[i] = 1; } else goto err_srcs; thread->srcs = kcalloc(src_cnt+1, sizeof(u8 *), GFP_KERNEL); if (!thread->srcs) goto err_srcs; for (i = 0; i < src_cnt; i++) { thread->srcs[i] = kmalloc(test_buf_size, GFP_KERNEL); if (!thread->srcs[i]) goto err_srcbuf; } thread->srcs[i] = NULL; thread->dsts = kcalloc(dst_cnt+1, sizeof(u8 *), GFP_KERNEL); if (!thread->dsts) goto err_dsts; for (i = 0; i < dst_cnt; i++) { thread->dsts[i] = kmalloc(test_buf_size, GFP_KERNEL); if (!thread->dsts[i]) goto err_dstbuf; } thread->dsts[i] = NULL; set_user_nice(current, 10); /* * src buffers are freed by the DMAEngine code with dma_unmap_single() * dst buffers are freed by ourselves below */ flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_DEST_UNMAP | DMA_COMPL_SRC_UNMAP_SINGLE; while (!kthread_should_stop() && !(iterations && total_tests >= iterations)) { struct dma_device *dev = chan->device; struct dma_async_tx_descriptor *tx = NULL; dma_addr_t dma_srcs[src_cnt]; dma_addr_t dma_dsts[dst_cnt]; u8 align = 0; total_tests++; /* honor alignment restrictions */ if (thread->type == DMA_MEMCPY) align = dev->copy_align; else if (thread->type == DMA_XOR) align = dev->xor_align; else if (thread->type == DMA_PQ) align = dev->pq_align; if (1 << align > test_buf_size) { pr_err("%u-byte buffer too small for %d-byte alignment\n", test_buf_size, 1 << align); break; } len = dmatest_random() % test_buf_size + 1; len = (len >> align) << align; if (!len) len = 1 << align; src_off = dmatest_random() % (test_buf_size - len + 1); dst_off = dmatest_random() % (test_buf_size - len + 1); src_off = (src_off >> align) << align; dst_off = (dst_off >> align) << align; dmatest_init_srcs(thread->srcs, src_off, len); dmatest_init_dsts(thread->dsts, dst_off, len); for (i = 0; i < src_cnt; i++) { u8 *buf = thread->srcs[i] + src_off; dma_srcs[i] = dma_map_single(dev->dev, buf, len, DMA_TO_DEVICE); } /* map with DMA_BIDIRECTIONAL to force writeback/invalidate */ for (i = 0; i < dst_cnt; i++) { dma_dsts[i] = dma_map_single(dev->dev, thread->dsts[i], test_buf_size, DMA_BIDIRECTIONAL); } if (thread->type == DMA_MEMCPY) tx = dev->device_prep_dma_memcpy(chan, dma_dsts[0] + dst_off, dma_srcs[0], len, flags); else if (thread->type == DMA_XOR) tx = dev->device_prep_dma_xor(chan, dma_dsts[0] + dst_off, dma_srcs, src_cnt, len, flags); else if (thread->type == DMA_PQ) { dma_addr_t dma_pq[dst_cnt]; for (i = 0; i < dst_cnt; i++) dma_pq[i] = dma_dsts[i] + dst_off; tx = dev->device_prep_dma_pq(chan, dma_pq, dma_srcs, src_cnt, pq_coefs, len, flags); } if (!tx) { for (i = 0; i < src_cnt; i++) dma_unmap_single(dev->dev, dma_srcs[i], len, DMA_TO_DEVICE); for (i = 0; i < dst_cnt; i++) dma_unmap_single(dev->dev, dma_dsts[i], test_buf_size, DMA_BIDIRECTIONAL); pr_warning("%s: #%u: prep error with src_off=0x%x " "dst_off=0x%x len=0x%x\n", thread_name, total_tests - 1, src_off, dst_off, len); msleep(100); failed_tests++; continue; } done.done = false; tx->callback = dmatest_callback; tx->callback_param = &done; cookie = tx->tx_submit(tx); if (dma_submit_error(cookie)) { pr_warning("%s: #%u: submit error %d with src_off=0x%x " "dst_off=0x%x len=0x%x\n", thread_name, total_tests - 1, cookie, src_off, dst_off, len); msleep(100); failed_tests++; continue; } dma_async_issue_pending(chan); wait_event_freezable_timeout(done_wait, done.done, msecs_to_jiffies(timeout)); status = dma_async_is_tx_complete(chan, cookie, NULL, NULL); if (!done.done) { /* * We're leaving the timed out dma operation with * dangling pointer to done_wait. To make this * correct, we'll need to allocate wait_done for * each test iteration and perform "who's gonna * free it this time?" dancing. For now, just * leave it dangling. */ pr_warning("%s: #%u: test timed out\n", thread_name, total_tests - 1); failed_tests++; continue; } else if (status != DMA_SUCCESS) { pr_warning("%s: #%u: got completion callback," " but status is \'%s\'\n", thread_name, total_tests - 1, status == DMA_ERROR ? "error" : "in progress"); failed_tests++; continue; } /* Unmap by myself (see DMA_COMPL_SKIP_DEST_UNMAP above) */ for (i = 0; i < dst_cnt; i++) dma_unmap_single(dev->dev, dma_dsts[i], test_buf_size, DMA_BIDIRECTIONAL); error_count = 0; pr_debug("%s: verifying source buffer...\n", thread_name); error_count += dmatest_verify(thread->srcs, 0, src_off, 0, PATTERN_SRC, true); error_count += dmatest_verify(thread->srcs, src_off, src_off + len, src_off, PATTERN_SRC | PATTERN_COPY, true); error_count += dmatest_verify(thread->srcs, src_off + len, test_buf_size, src_off + len, PATTERN_SRC, true); pr_debug("%s: verifying dest buffer...\n", thread->task->comm); error_count += dmatest_verify(thread->dsts, 0, dst_off, 0, PATTERN_DST, false); error_count += dmatest_verify(thread->dsts, dst_off, dst_off + len, src_off, PATTERN_SRC | PATTERN_COPY, false); error_count += dmatest_verify(thread->dsts, dst_off + len, test_buf_size, dst_off + len, PATTERN_DST, false); if (error_count) { pr_warning("%s: #%u: %u errors with " "src_off=0x%x dst_off=0x%x len=0x%x\n", thread_name, total_tests - 1, error_count, src_off, dst_off, len); failed_tests++; } else { pr_debug("%s: #%u: No errors with " "src_off=0x%x dst_off=0x%x len=0x%x\n", thread_name, total_tests - 1, src_off, dst_off, len); } } ret = 0; for (i = 0; thread->dsts[i]; i++) kfree(thread->dsts[i]); err_dstbuf: kfree(thread->dsts); err_dsts: for (i = 0; thread->srcs[i]; i++) kfree(thread->srcs[i]); err_srcbuf: kfree(thread->srcs); err_srcs: pr_notice("%s: terminating after %u tests, %u failures (status %d)\n", thread_name, total_tests, failed_tests, ret); /* terminate all transfers on specified channels */ chan->device->device_control(chan, DMA_TERMINATE_ALL, 0); if (iterations > 0) while (!kthread_should_stop()) { DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wait_dmatest_exit); interruptible_sleep_on(&wait_dmatest_exit); } return ret; } static void dmatest_cleanup_channel(struct dmatest_chan *dtc) { struct dmatest_thread *thread; struct dmatest_thread *_thread; int ret; list_for_each_entry_safe(thread, _thread, &dtc->threads, node) { ret = kthread_stop(thread->task); pr_debug("dmatest: thread %s exited with status %d\n", thread->task->comm, ret); list_del(&thread->node); kfree(thread); } /* terminate all transfers on specified channels */ dtc->chan->device->device_control(dtc->chan, DMA_TERMINATE_ALL, 0); kfree(dtc); } static int dmatest_add_threads(struct dmatest_chan *dtc, enum dma_transaction_type type) { struct dmatest_thread *thread; struct dma_chan *chan = dtc->chan; char *op; unsigned int i; if (type == DMA_MEMCPY) op = "copy"; else if (type == DMA_XOR) op = "xor"; else if (type == DMA_PQ) op = "pq"; else return -EINVAL; for (i = 0; i < threads_per_chan; i++) { thread = kzalloc(sizeof(struct dmatest_thread), GFP_KERNEL); if (!thread) { pr_warning("dmatest: No memory for %s-%s%u\n", dma_chan_name(chan), op, i); break; } thread->chan = dtc->chan; thread->type = type; smp_wmb(); thread->task = kthread_run(dmatest_func, thread, "%s-%s%u", dma_chan_name(chan), op, i); if (IS_ERR(thread->task)) { pr_warning("dmatest: Failed to run thread %s-%s%u\n", dma_chan_name(chan), op, i); kfree(thread); break; } /* srcbuf and dstbuf are allocated by the thread itself */ list_add_tail(&thread->node, &dtc->threads); } return i; } static int dmatest_add_channel(struct dma_chan *chan) { struct dmatest_chan *dtc; struct dma_device *dma_dev = chan->device; unsigned int thread_count = 0; int cnt; dtc = kmalloc(sizeof(struct dmatest_chan), GFP_KERNEL); if (!dtc) { pr_warning("dmatest: No memory for %s\n", dma_chan_name(chan)); return -ENOMEM; } dtc->chan = chan; INIT_LIST_HEAD(&dtc->threads); if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) { cnt = dmatest_add_threads(dtc, DMA_MEMCPY); thread_count += cnt > 0 ? cnt : 0; } if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { cnt = dmatest_add_threads(dtc, DMA_XOR); thread_count += cnt > 0 ? cnt : 0; } if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) { cnt = dmatest_add_threads(dtc, DMA_PQ); thread_count += cnt > 0 ? cnt : 0; } pr_info("dmatest: Started %u threads using %s\n", thread_count, dma_chan_name(chan)); list_add_tail(&dtc->node, &dmatest_channels); nr_channels++; return 0; } static bool filter(struct dma_chan *chan, void *param) { if (!dmatest_match_channel(chan) || !dmatest_match_device(chan->device)) return false; else return true; } static int __init dmatest_init(void) { dma_cap_mask_t mask; struct dma_chan *chan; int err = 0; dma_cap_zero(mask); dma_cap_set(DMA_MEMCPY, mask); for (;;) { chan = dma_request_channel(mask, filter, NULL); if (chan) { err = dmatest_add_channel(chan); if (err) { dma_release_channel(chan); break; /* add_channel failed, punt */ } } else break; /* no more channels available */ if (max_channels && nr_channels >= max_channels) break; /* we have all we need */ } return err; } /* when compiled-in wait for drivers to load first */ late_initcall(dmatest_init); static void __exit dmatest_exit(void) { struct dmatest_chan *dtc, *_dtc; struct dma_chan *chan; list_for_each_entry_safe(dtc, _dtc, &dmatest_channels, node) { list_del(&dtc->node); chan = dtc->chan; dmatest_cleanup_channel(dtc); pr_debug("dmatest: dropped channel %s\n", dma_chan_name(chan)); dma_release_channel(chan); } } module_exit(dmatest_exit); MODULE_AUTHOR("Haavard Skinnemoen (Atmel)"); MODULE_LICENSE("GPL v2");
gpl-2.0
flar2/m7-Sense-4.4.3
drivers/media/dvb/dvb-usb/cinergyT2-core.c
5037
6354
/* * TerraTec Cinergy T2/qanu USB2 DVB-T adapter. * * Copyright (C) 2007 Tomi Orava (tomimo@ncircle.nullnet.fi) * * Based on the dvb-usb-framework code and the * original Terratec Cinergy T2 driver by: * * Copyright (C) 2004 Daniel Mack <daniel@qanu.de> and * Holger Waechtler <holger@qanu.de> * * Protocol Spec published on http://qanu.de/specs/terratec_cinergyT2.pdf * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * */ #include "cinergyT2.h" /* debug */ int dvb_usb_cinergyt2_debug; module_param_named(debug, dvb_usb_cinergyt2_debug, int, 0644); MODULE_PARM_DESC(debug, "set debugging level (1=info, xfer=2, rc=4 " "(or-able))."); DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr); struct cinergyt2_state { u8 rc_counter; }; /* We are missing a release hook with usb_device data */ static struct dvb_usb_device *cinergyt2_usb_device; static struct dvb_usb_device_properties cinergyt2_properties; static int cinergyt2_streaming_ctrl(struct dvb_usb_adapter *adap, int enable) { char buf[] = { CINERGYT2_EP1_CONTROL_STREAM_TRANSFER, enable ? 1 : 0 }; char result[64]; return dvb_usb_generic_rw(adap->dev, buf, sizeof(buf), result, sizeof(result), 0); } static int cinergyt2_power_ctrl(struct dvb_usb_device *d, int enable) { char buf[] = { CINERGYT2_EP1_SLEEP_MODE, enable ? 0 : 1 }; char state[3]; return dvb_usb_generic_rw(d, buf, sizeof(buf), state, sizeof(state), 0); } static int cinergyt2_frontend_attach(struct dvb_usb_adapter *adap) { char query[] = { CINERGYT2_EP1_GET_FIRMWARE_VERSION }; char state[3]; int ret; adap->fe_adap[0].fe = cinergyt2_fe_attach(adap->dev); ret = dvb_usb_generic_rw(adap->dev, query, sizeof(query), state, sizeof(state), 0); if (ret < 0) { deb_rc("cinergyt2_power_ctrl() Failed to retrieve sleep " "state info\n"); } /* Copy this pointer as we are gonna need it in the release phase */ cinergyt2_usb_device = adap->dev; return 0; } static struct rc_map_table rc_map_cinergyt2_table[] = { { 0x0401, KEY_POWER }, { 0x0402, KEY_1 }, { 0x0403, KEY_2 }, { 0x0404, KEY_3 }, { 0x0405, KEY_4 }, { 0x0406, KEY_5 }, { 0x0407, KEY_6 }, { 0x0408, KEY_7 }, { 0x0409, KEY_8 }, { 0x040a, KEY_9 }, { 0x040c, KEY_0 }, { 0x040b, KEY_VIDEO }, { 0x040d, KEY_REFRESH }, { 0x040e, KEY_SELECT }, { 0x040f, KEY_EPG }, { 0x0410, KEY_UP }, { 0x0414, KEY_DOWN }, { 0x0411, KEY_LEFT }, { 0x0413, KEY_RIGHT }, { 0x0412, KEY_OK }, { 0x0415, KEY_TEXT }, { 0x0416, KEY_INFO }, { 0x0417, KEY_RED }, { 0x0418, KEY_GREEN }, { 0x0419, KEY_YELLOW }, { 0x041a, KEY_BLUE }, { 0x041c, KEY_VOLUMEUP }, { 0x041e, KEY_VOLUMEDOWN }, { 0x041d, KEY_MUTE }, { 0x041b, KEY_CHANNELUP }, { 0x041f, KEY_CHANNELDOWN }, { 0x0440, KEY_PAUSE }, { 0x044c, KEY_PLAY }, { 0x0458, KEY_RECORD }, { 0x0454, KEY_PREVIOUS }, { 0x0448, KEY_STOP }, { 0x045c, KEY_NEXT } }; /* Number of keypresses to ignore before detect repeating */ #define RC_REPEAT_DELAY 3 static int repeatable_keys[] = { KEY_UP, KEY_DOWN, KEY_LEFT, KEY_RIGHT, KEY_VOLUMEUP, KEY_VOLUMEDOWN, KEY_CHANNELUP, KEY_CHANNELDOWN }; static int cinergyt2_rc_query(struct dvb_usb_device *d, u32 *event, int *state) { struct cinergyt2_state *st = d->priv; u8 key[5] = {0, 0, 0, 0, 0}, cmd = CINERGYT2_EP1_GET_RC_EVENTS; int i; *state = REMOTE_NO_KEY_PRESSED; dvb_usb_generic_rw(d, &cmd, 1, key, sizeof(key), 0); if (key[4] == 0xff) { /* key repeat */ st->rc_counter++; if (st->rc_counter > RC_REPEAT_DELAY) { for (i = 0; i < ARRAY_SIZE(repeatable_keys); i++) { if (d->last_event == repeatable_keys[i]) { *state = REMOTE_KEY_REPEAT; *event = d->last_event; deb_rc("repeat key, event %x\n", *event); return 0; } } deb_rc("repeated key (non repeatable)\n"); } return 0; } /* hack to pass checksum on the custom field */ key[2] = ~key[1]; dvb_usb_nec_rc_key_to_event(d, key, event, state); if (key[0] != 0) { if (*event != d->last_event) st->rc_counter = 0; deb_rc("key: %x %x %x %x %x\n", key[0], key[1], key[2], key[3], key[4]); } return 0; } static int cinergyt2_usb_probe(struct usb_interface *intf, const struct usb_device_id *id) { return dvb_usb_device_init(intf, &cinergyt2_properties, THIS_MODULE, NULL, adapter_nr); } static struct usb_device_id cinergyt2_usb_table[] = { { USB_DEVICE(USB_VID_TERRATEC, 0x0038) }, { 0 } }; MODULE_DEVICE_TABLE(usb, cinergyt2_usb_table); static struct dvb_usb_device_properties cinergyt2_properties = { .size_of_priv = sizeof(struct cinergyt2_state), .num_adapters = 1, .adapter = { { .num_frontends = 1, .fe = {{ .streaming_ctrl = cinergyt2_streaming_ctrl, .frontend_attach = cinergyt2_frontend_attach, /* parameter for the MPEG2-data transfer */ .stream = { .type = USB_BULK, .count = 5, .endpoint = 0x02, .u = { .bulk = { .buffersize = 512, } } }, }}, } }, .power_ctrl = cinergyt2_power_ctrl, .rc.legacy = { .rc_interval = 50, .rc_map_table = rc_map_cinergyt2_table, .rc_map_size = ARRAY_SIZE(rc_map_cinergyt2_table), .rc_query = cinergyt2_rc_query, }, .generic_bulk_ctrl_endpoint = 1, .num_device_descs = 1, .devices = { { .name = "TerraTec/qanu USB2.0 Highspeed DVB-T Receiver", .cold_ids = {NULL}, .warm_ids = { &cinergyt2_usb_table[0], NULL }, }, { NULL }, } }; static struct usb_driver cinergyt2_driver = { .name = "cinergyT2", .probe = cinergyt2_usb_probe, .disconnect = dvb_usb_device_exit, .id_table = cinergyt2_usb_table }; module_usb_driver(cinergyt2_driver); MODULE_DESCRIPTION("Terratec Cinergy T2 DVB-T driver"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Tomi Orava");
gpl-2.0
Honor8Dev/android_kernel_huawei_FRD-L04
fs/ext3/file.c
7341
2155
/* * linux/fs/ext3/file.c * * Copyright (C) 1992, 1993, 1994, 1995 * Remy Card (card@masi.ibp.fr) * Laboratoire MASI - Institut Blaise Pascal * Universite Pierre et Marie Curie (Paris VI) * * from * * linux/fs/minix/file.c * * Copyright (C) 1991, 1992 Linus Torvalds * * ext3 fs regular file handling primitives * * 64-bit file support on 64-bit platforms by Jakub Jelinek * (jj@sunsite.ms.mff.cuni.cz) */ #include <linux/quotaops.h> #include "ext3.h" #include "xattr.h" #include "acl.h" /* * Called when an inode is released. Note that this is different * from ext3_file_open: open gets called at every open, but release * gets called only when /all/ the files are closed. */ static int ext3_release_file (struct inode * inode, struct file * filp) { if (ext3_test_inode_state(inode, EXT3_STATE_FLUSH_ON_CLOSE)) { filemap_flush(inode->i_mapping); ext3_clear_inode_state(inode, EXT3_STATE_FLUSH_ON_CLOSE); } /* if we are the last writer on the inode, drop the block reservation */ if ((filp->f_mode & FMODE_WRITE) && (atomic_read(&inode->i_writecount) == 1)) { mutex_lock(&EXT3_I(inode)->truncate_mutex); ext3_discard_reservation(inode); mutex_unlock(&EXT3_I(inode)->truncate_mutex); } if (is_dx(inode) && filp->private_data) ext3_htree_free_dir_info(filp->private_data); return 0; } const struct file_operations ext3_file_operations = { .llseek = generic_file_llseek, .read = do_sync_read, .write = do_sync_write, .aio_read = generic_file_aio_read, .aio_write = generic_file_aio_write, .unlocked_ioctl = ext3_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = ext3_compat_ioctl, #endif .mmap = generic_file_mmap, .open = dquot_file_open, .release = ext3_release_file, .fsync = ext3_sync_file, .splice_read = generic_file_splice_read, .splice_write = generic_file_splice_write, }; const struct inode_operations ext3_file_inode_operations = { .setattr = ext3_setattr, #ifdef CONFIG_EXT3_FS_XATTR .setxattr = generic_setxattr, .getxattr = generic_getxattr, .listxattr = ext3_listxattr, .removexattr = generic_removexattr, #endif .get_acl = ext3_get_acl, .fiemap = ext3_fiemap, };
gpl-2.0
dotCipher/xcrypt-linux-kernel-module
drivers/staging/intel_sst/intelmid.c
174
29627
/* * intelmid.c - Intel Sound card driver for MID * * Copyright (C) 2008-10 Intel Corp * Authors: Harsha Priya <priya.harsha@intel.com> * Vinod Koul <vinod.koul@intel.com> * Dharageswari R <dharageswari.r@intel.com> * KP Jeeja <jeeja.kp@intel.com> * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * ALSA driver for Intel MID sound card chipset */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/slab.h> #include <linux/io.h> #include <linux/platform_device.h> #include <linux/interrupt.h> #include <linux/sched.h> #include <linux/firmware.h> #include <linux/input.h> #include <sound/control.h> #include <asm/mrst.h> #include <sound/pcm.h> #include <sound/jack.h> #include <sound/pcm_params.h> #include <sound/initval.h> #include <linux/gpio.h> #include "intel_sst.h" #include "intel_sst_ioctl.h" #include "intel_sst_fw_ipc.h" #include "intel_sst_common.h" #include "intelmid_snd_control.h" #include "intelmid_adc_control.h" #include "intelmid.h" MODULE_AUTHOR("Vinod Koul <vinod.koul@intel.com>"); MODULE_AUTHOR("Harsha Priya <priya.harsha@intel.com>"); MODULE_AUTHOR("Dharageswari R <dharageswari.r@intel.com>"); MODULE_AUTHOR("KP Jeeja <jeeja.kp@intel.com>"); MODULE_DESCRIPTION("Intel MAD Sound card driver"); MODULE_LICENSE("GPL v2"); MODULE_SUPPORTED_DEVICE("{Intel,Intel_MAD}"); static int card_index = SNDRV_DEFAULT_IDX1;/* Index 0-MAX */ static char *card_id = SNDRV_DEFAULT_STR1; /* ID for this card */ module_param(card_index, int, 0444); MODULE_PARM_DESC(card_index, "Index value for INTELMAD soundcard."); module_param(card_id, charp, 0444); MODULE_PARM_DESC(card_id, "ID string for INTELMAD soundcard."); int sst_card_vendor_id; int intelmid_audio_interrupt_enable;/*checkpatch fix*/ struct snd_intelmad *intelmad_drv; #define INFO(_cpu_id, _irq_cache, _size) \ ((kernel_ulong_t)&(struct snd_intelmad_probe_info) { \ .cpu_id = (_cpu_id), \ .irq_cache = (_irq_cache), \ .size = (_size), \ }) /* Data path functionalities */ static struct snd_pcm_hardware snd_intelmad_stream = { .info = (SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_DOUBLE | SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME | SNDRV_PCM_INFO_MMAP| SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_SYNC_START), .formats = (SNDRV_PCM_FMTBIT_S16 | SNDRV_PCM_FMTBIT_U16 | SNDRV_PCM_FMTBIT_S24 | SNDRV_PCM_FMTBIT_U24 | SNDRV_PCM_FMTBIT_S32 | SNDRV_PCM_FMTBIT_U32), .rates = (SNDRV_PCM_RATE_8000| SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000), .rate_min = MIN_RATE, .rate_max = MAX_RATE, .channels_min = MIN_CHANNEL, .channels_max = MAX_CHANNEL_AMIC, .buffer_bytes_max = MAX_BUFFER, .period_bytes_min = MIN_PERIOD_BYTES, .period_bytes_max = MAX_PERIOD_BYTES, .periods_min = MIN_PERIODS, .periods_max = MAX_PERIODS, .fifo_size = FIFO_SIZE, }; /** * snd_intelmad_pcm_trigger - stream activities are handled here * * @substream:substream for which the stream function is called * @cmd:the stream commamd that requested from upper layer * * This function is called whenever an a stream activity is invoked */ static int snd_intelmad_pcm_trigger(struct snd_pcm_substream *substream, int cmd) { int ret_val = 0, str_id; struct snd_intelmad *intelmaddata; struct mad_stream_pvt *stream; struct intel_sst_pcm_control *sst_ops; WARN_ON(!substream); intelmaddata = snd_pcm_substream_chip(substream); stream = substream->runtime->private_data; WARN_ON(!intelmaddata->sstdrv_ops); WARN_ON(!intelmaddata->sstdrv_ops->scard_ops); sst_ops = intelmaddata->sstdrv_ops->pcm_control; str_id = stream->stream_info.str_id; switch (cmd) { case SNDRV_PCM_TRIGGER_START: pr_debug("Trigger Start\n"); ret_val = sst_ops->device_control(SST_SND_START, &str_id); if (ret_val) return ret_val; stream->stream_status = RUNNING; stream->substream = substream; break; case SNDRV_PCM_TRIGGER_STOP: pr_debug("in stop\n"); ret_val = sst_ops->device_control(SST_SND_DROP, &str_id); if (ret_val) return ret_val; stream->stream_status = DROPPED; break; case SNDRV_PCM_TRIGGER_PAUSE_PUSH: pr_debug("in pause\n"); ret_val = sst_ops->device_control(SST_SND_PAUSE, &str_id); if (ret_val) return ret_val; stream->stream_status = PAUSED; break; case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: pr_debug("in pause release\n"); ret_val = sst_ops->device_control(SST_SND_RESUME, &str_id); if (ret_val) return ret_val; stream->stream_status = RUNNING; break; default: return -EINVAL; } return ret_val; } /** * snd_intelmad_pcm_prepare- internal preparation before starting a stream * * @substream: substream for which the function is called * * This function is called when a stream is started for internal preparation. */ static int snd_intelmad_pcm_prepare(struct snd_pcm_substream *substream) { struct mad_stream_pvt *stream; int ret_val = 0; struct snd_intelmad *intelmaddata; pr_debug("pcm_prepare called\n"); WARN_ON(!substream); stream = substream->runtime->private_data; intelmaddata = snd_pcm_substream_chip(substream); pr_debug("pb cnt = %d cap cnt = %d\n",\ intelmaddata->playback_cnt, intelmaddata->capture_cnt); if (stream->stream_info.str_id) { pr_debug("Prepare called for already set stream\n"); ret_val = intelmaddata->sstdrv_ops->pcm_control->device_control( SST_SND_DROP, &stream->stream_info.str_id); return ret_val; } ret_val = snd_intelmad_alloc_stream(substream); if (ret_val < 0) return ret_val; stream->dbg_cum_bytes = 0; if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) intelmaddata->playback_cnt++; else intelmaddata->capture_cnt++; /* return back the stream id */ snprintf(substream->pcm->id, sizeof(substream->pcm->id), "%d", stream->stream_info.str_id); pr_debug("stream id to user = %s\n", substream->pcm->id); ret_val = snd_intelmad_init_stream(substream); if (ret_val) return ret_val; substream->runtime->hw.info = SNDRV_PCM_INFO_BLOCK_TRANSFER; return ret_val; } static int snd_intelmad_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *hw_params) { int ret_val; pr_debug("snd_intelmad_hw_params called\n"); ret_val = snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params)); memset(substream->runtime->dma_area, 0, params_buffer_bytes(hw_params)); return ret_val; } static int snd_intelmad_hw_free(struct snd_pcm_substream *substream) { pr_debug("snd_intelmad_hw_free called\n"); return snd_pcm_lib_free_pages(substream); } /** * snd_intelmad_pcm_pointer- to send the current buffer pointer processed by hw * * @substream: substream for which the function is called * * This function is called by ALSA framework to get the current hw buffer ptr * when a period is elapsed */ static snd_pcm_uframes_t snd_intelmad_pcm_pointer (struct snd_pcm_substream *substream) { /* struct snd_pcm_runtime *runtime = substream->runtime; */ struct mad_stream_pvt *stream; struct snd_intelmad *intelmaddata; int ret_val; WARN_ON(!substream); intelmaddata = snd_pcm_substream_chip(substream); stream = substream->runtime->private_data; if (stream->stream_status == INIT) return 0; ret_val = intelmaddata->sstdrv_ops->pcm_control->device_control( SST_SND_BUFFER_POINTER, &stream->stream_info); if (ret_val) { pr_err("error code = 0x%x\n", ret_val); return ret_val; } pr_debug("samples reported out 0x%llx\n", stream->stream_info.buffer_ptr); pr_debug("Frame bits:: %d period_count :: %d\n", (int)substream->runtime->frame_bits, (int)substream->runtime->period_size); return stream->stream_info.buffer_ptr; } /** * snd_intelmad_close- to free parameteres when stream is stopped * * @substream: substream for which the function is called * * This function is called by ALSA framework when stream is stopped */ static int snd_intelmad_close(struct snd_pcm_substream *substream) { struct snd_intelmad *intelmaddata; struct mad_stream_pvt *stream; int ret_val = 0, str_id; WARN_ON(!substream); stream = substream->runtime->private_data; str_id = stream->stream_info.str_id; pr_debug("sst: snd_intelmad_close called for %d\n", str_id); intelmaddata = snd_pcm_substream_chip(substream); pr_debug("str id = %d\n", stream->stream_info.str_id); if (stream->stream_info.str_id) { /* SST API to actually stop/free the stream */ ret_val = intelmaddata->sstdrv_ops->pcm_control->close(str_id); if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) intelmaddata->playback_cnt--; else intelmaddata->capture_cnt--; } pr_debug("snd_intelmad_close : pb cnt = %d cap cnt = %d\n", intelmaddata->playback_cnt, intelmaddata->capture_cnt); kfree(substream->runtime->private_data); return ret_val; } /** * snd_intelmad_open- to set runtime parameters during stream start * * @substream: substream for which the function is called * @type: audio device type * * This function is called by ALSA framework when stream is started */ static int snd_intelmad_open(struct snd_pcm_substream *substream, enum snd_sst_audio_device_type type) { struct snd_intelmad *intelmaddata; struct snd_pcm_runtime *runtime; struct mad_stream_pvt *stream; WARN_ON(!substream); pr_debug("snd_intelmad_open called\n"); intelmaddata = snd_pcm_substream_chip(substream); runtime = substream->runtime; /* set the runtime hw parameter with local snd_pcm_hardware struct */ runtime->hw = snd_intelmad_stream; if (intelmaddata->cpu_id == CPU_CHIP_LINCROFT) { /* * MRST firmware currently denies stereo recording requests. */ if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) { runtime->hw.formats = (SNDRV_PCM_FMTBIT_S16 | SNDRV_PCM_FMTBIT_U16); runtime->hw.channels_max = 1; } } if (intelmaddata->cpu_id == CPU_CHIP_PENWELL) { runtime->hw = snd_intelmad_stream; runtime->hw.rates = SNDRV_PCM_RATE_48000; runtime->hw.rate_min = MAX_RATE; runtime->hw.formats = (SNDRV_PCM_FMTBIT_S24 | SNDRV_PCM_FMTBIT_U24); if (intelmaddata->sstdrv_ops->scard_ops->input_dev_id == AMIC) runtime->hw.channels_max = MAX_CHANNEL_AMIC; else runtime->hw.channels_max = MAX_CHANNEL_DMIC; } /* setup the internal datastruture stream pointers based on it being playback or capture stream */ stream = kzalloc(sizeof(*stream), GFP_KERNEL); if (!stream) return -ENOMEM; stream->stream_info.str_id = 0; stream->device = type; stream->stream_status = INIT; runtime->private_data = stream; return snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS); } static int snd_intelmad_headset_open(struct snd_pcm_substream *substream) { return snd_intelmad_open(substream, SND_SST_DEVICE_HEADSET); } static int snd_intelmad_ihf_open(struct snd_pcm_substream *substream) { return snd_intelmad_open(substream, SND_SST_DEVICE_IHF); } static int snd_intelmad_vibra_open(struct snd_pcm_substream *substream) { return snd_intelmad_open(substream, SND_SST_DEVICE_VIBRA); } static int snd_intelmad_haptic_open(struct snd_pcm_substream *substream) { return snd_intelmad_open(substream, SND_SST_DEVICE_HAPTIC); } static struct snd_pcm_ops snd_intelmad_headset_ops = { .open = snd_intelmad_headset_open, .close = snd_intelmad_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_intelmad_hw_params, .hw_free = snd_intelmad_hw_free, .prepare = snd_intelmad_pcm_prepare, .trigger = snd_intelmad_pcm_trigger, .pointer = snd_intelmad_pcm_pointer, }; static struct snd_pcm_ops snd_intelmad_ihf_ops = { .open = snd_intelmad_ihf_open, .close = snd_intelmad_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_intelmad_hw_params, .hw_free = snd_intelmad_hw_free, .prepare = snd_intelmad_pcm_prepare, .trigger = snd_intelmad_pcm_trigger, .pointer = snd_intelmad_pcm_pointer, }; static struct snd_pcm_ops snd_intelmad_vibra_ops = { .open = snd_intelmad_vibra_open, .close = snd_intelmad_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_intelmad_hw_params, .hw_free = snd_intelmad_hw_free, .prepare = snd_intelmad_pcm_prepare, .trigger = snd_intelmad_pcm_trigger, .pointer = snd_intelmad_pcm_pointer, }; static struct snd_pcm_ops snd_intelmad_haptic_ops = { .open = snd_intelmad_haptic_open, .close = snd_intelmad_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_intelmad_hw_params, .hw_free = snd_intelmad_hw_free, .prepare = snd_intelmad_pcm_prepare, .trigger = snd_intelmad_pcm_trigger, .pointer = snd_intelmad_pcm_pointer, }; static struct snd_pcm_ops snd_intelmad_capture_ops = { .open = snd_intelmad_headset_open, .close = snd_intelmad_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_intelmad_hw_params, .hw_free = snd_intelmad_hw_free, .prepare = snd_intelmad_pcm_prepare, .trigger = snd_intelmad_pcm_trigger, .pointer = snd_intelmad_pcm_pointer, }; int intelmad_get_mic_bias(void) { struct snd_pmic_ops *pmic_ops; if (!intelmad_drv || !intelmad_drv->sstdrv_ops) return -ENODEV; pmic_ops = intelmad_drv->sstdrv_ops->scard_ops; if (pmic_ops && pmic_ops->pmic_get_mic_bias) return pmic_ops->pmic_get_mic_bias(intelmad_drv); else return -ENODEV; } EXPORT_SYMBOL_GPL(intelmad_get_mic_bias); int intelmad_set_headset_state(int state) { struct snd_pmic_ops *pmic_ops; if (!intelmad_drv || !intelmad_drv->sstdrv_ops) return -ENODEV; pmic_ops = intelmad_drv->sstdrv_ops->scard_ops; if (pmic_ops && pmic_ops->pmic_set_headset_state) return pmic_ops->pmic_set_headset_state(state); else return -ENODEV; } EXPORT_SYMBOL_GPL(intelmad_set_headset_state); void sst_process_mad_jack_detection(struct work_struct *work) { u8 interrupt_status; struct mad_jack_msg_wq *mad_jack_detect = container_of(work, struct mad_jack_msg_wq, wq); struct snd_intelmad *intelmaddata = mad_jack_detect->intelmaddata; if (!intelmaddata) return; interrupt_status = mad_jack_detect->intsts; if (intelmaddata->sstdrv_ops && intelmaddata->sstdrv_ops->scard_ops && intelmaddata->sstdrv_ops->scard_ops->pmic_irq_cb) { intelmaddata->sstdrv_ops->scard_ops->pmic_irq_cb( (void *)intelmaddata, interrupt_status); intelmaddata->sstdrv_ops->scard_ops->pmic_jack_enable(); } kfree(mad_jack_detect); } /** * snd_intelmad_intr_handler- interrupt handler * * @irq : irq number of the interrupt received * @dev: device context * * This function is called when an interrupt is raised at the sound card */ static irqreturn_t snd_intelmad_intr_handler(int irq, void *dev) { struct snd_intelmad *intelmaddata = (struct snd_intelmad *)dev; u8 interrupt_status; struct mad_jack_msg_wq *mad_jack_msg; memcpy_fromio(&interrupt_status, ((void *)(intelmaddata->int_base)), sizeof(u8)); mad_jack_msg = kzalloc(sizeof(*mad_jack_msg), GFP_ATOMIC); mad_jack_msg->intsts = interrupt_status; mad_jack_msg->intelmaddata = intelmaddata; INIT_WORK(&mad_jack_msg->wq, sst_process_mad_jack_detection); queue_work(intelmaddata->mad_jack_wq, &mad_jack_msg->wq); return IRQ_HANDLED; } void sst_mad_send_jack_report(struct snd_jack *jack, int buttonpressevent , int status) { if (!jack) { pr_debug("MAD error jack empty\n"); } else { snd_jack_report(jack, status); /* button pressed and released */ if (buttonpressevent) snd_jack_report(jack, 0); pr_debug("MAD sending jack report Done !!!\n"); } } static int __devinit snd_intelmad_register_irq( struct snd_intelmad *intelmaddata, unsigned int regbase, unsigned int regsize) { int ret_val; char *drv_name; pr_debug("irq reg regbase 0x%x, regsize 0x%x\n", regbase, regsize); intelmaddata->int_base = ioremap_nocache(regbase, regsize); if (!intelmaddata->int_base) pr_err("Mapping of cache failed\n"); pr_debug("irq = 0x%x\n", intelmaddata->irq); if (intelmaddata->cpu_id == CPU_CHIP_PENWELL) drv_name = DRIVER_NAME_MFLD; else drv_name = DRIVER_NAME_MRST; ret_val = request_irq(intelmaddata->irq, snd_intelmad_intr_handler, IRQF_SHARED, drv_name, intelmaddata); if (ret_val) pr_err("cannot register IRQ\n"); return ret_val; } static int __devinit snd_intelmad_sst_register( struct snd_intelmad *intelmaddata) { int ret_val = 0; struct snd_pmic_ops *intelmad_vendor_ops[MAX_VENDORS] = { &snd_pmic_ops_fs, &snd_pmic_ops_mx, &snd_pmic_ops_nc, &snd_msic_ops }; struct sc_reg_access vendor_addr = {0x00, 0x00, 0x00}; if (intelmaddata->cpu_id == CPU_CHIP_LINCROFT) { ret_val = sst_sc_reg_access(&vendor_addr, PMIC_READ, 1); if (ret_val) return ret_val; sst_card_vendor_id = (vendor_addr.value & (MASK2|MASK1|MASK0)); pr_debug("original n extrated vendor id = 0x%x %d\n", vendor_addr.value, sst_card_vendor_id); if (sst_card_vendor_id < 0 || sst_card_vendor_id > 2) { pr_err("vendor card not supported!!\n"); return -EIO; } } else sst_card_vendor_id = 0x3; intelmaddata->sstdrv_ops->module_name = SST_CARD_NAMES; intelmaddata->sstdrv_ops->vendor_id = sst_card_vendor_id; BUG_ON(!intelmad_vendor_ops[sst_card_vendor_id]); intelmaddata->sstdrv_ops->scard_ops = intelmad_vendor_ops[sst_card_vendor_id]; if (intelmaddata->cpu_id == CPU_CHIP_PENWELL) { intelmaddata->sstdrv_ops->scard_ops->pb_on = 0; intelmaddata->sstdrv_ops->scard_ops->cap_on = 0; intelmaddata->sstdrv_ops->scard_ops->input_dev_id = DMIC; intelmaddata->sstdrv_ops->scard_ops->output_dev_id = STEREO_HEADPHONE; intelmaddata->sstdrv_ops->scard_ops->lineout_dev_id = NONE; } /* registering with SST driver to get access to SST APIs to use */ ret_val = register_sst_card(intelmaddata->sstdrv_ops); if (ret_val) { pr_err("sst card registration failed\n"); return ret_val; } sst_card_vendor_id = intelmaddata->sstdrv_ops->vendor_id; intelmaddata->pmic_status = PMIC_UNINIT; return ret_val; } static void snd_intelmad_page_free(struct snd_pcm *pcm) { snd_pcm_lib_preallocate_free_for_all(pcm); } /* Driver Init/exit functionalities */ /** * snd_intelmad_pcm_new - to setup pcm for the card * * @card: pointer to the sound card structure * @intelmaddata: pointer to internal context * @pb: playback count for this card * @cap: capture count for this card * @index: device index * * This function is called from probe function to set up pcm params * and functions */ static int __devinit snd_intelmad_pcm_new(struct snd_card *card, struct snd_intelmad *intelmaddata, unsigned int pb, unsigned int cap, unsigned int index) { int ret_val = 0; struct snd_pcm *pcm; char name[32] = INTEL_MAD; struct snd_pcm_ops *pb_ops = NULL, *cap_ops = NULL; pr_debug("called for pb %d, cp %d, idx %d\n", pb, cap, index); ret_val = snd_pcm_new(card, name, index, pb, cap, &pcm); if (ret_val) return ret_val; /* setup the ops for playback and capture streams */ switch (index) { case 0: pb_ops = &snd_intelmad_headset_ops; cap_ops = &snd_intelmad_capture_ops; break; case 1: pb_ops = &snd_intelmad_ihf_ops; cap_ops = &snd_intelmad_capture_ops; break; case 2: pb_ops = &snd_intelmad_vibra_ops; cap_ops = &snd_intelmad_capture_ops; break; case 3: pb_ops = &snd_intelmad_haptic_ops; cap_ops = &snd_intelmad_capture_ops; break; } if (pb) snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, pb_ops); if (cap) snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, cap_ops); /* setup private data which can be retrieved when required */ pcm->private_data = intelmaddata; pcm->private_free = snd_intelmad_page_free; pcm->info_flags = 0; strncpy(pcm->name, card->shortname, strlen(card->shortname)); /* allocate dma pages for ALSA stream operations */ snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_CONTINUOUS, snd_dma_continuous_data(GFP_KERNEL), MIN_BUFFER, MAX_BUFFER); return ret_val; } static int __devinit snd_intelmad_pcm(struct snd_card *card, struct snd_intelmad *intelmaddata) { int ret_val = 0; WARN_ON(!card); WARN_ON(!intelmaddata); pr_debug("snd_intelmad_pcm called\n"); ret_val = snd_intelmad_pcm_new(card, intelmaddata, 1, 1, 0); if (intelmaddata->cpu_id == CPU_CHIP_LINCROFT) return ret_val; ret_val = snd_intelmad_pcm_new(card, intelmaddata, 1, 0, 1); if (ret_val) return ret_val; ret_val = snd_intelmad_pcm_new(card, intelmaddata, 1, 0, 2); if (ret_val) return ret_val; return snd_intelmad_pcm_new(card, intelmaddata, 1, 0, 3); } /** * snd_intelmad_jack- to setup jack settings of the card * * @intelmaddata: pointer to internal context * * This function is called send jack events */ static int snd_intelmad_jack(struct snd_intelmad *intelmaddata) { struct snd_jack *jack; int retval; pr_debug("snd_intelmad_jack called\n"); jack = &intelmaddata->jack[0].jack; snd_jack_set_key(jack, SND_JACK_BTN_0, KEY_PHONE); retval = snd_jack_new(intelmaddata->card, "Intel(R) MID Audio Jack", SND_JACK_HEADPHONE | SND_JACK_HEADSET | SW_JACK_PHYSICAL_INSERT | SND_JACK_BTN_0 | SND_JACK_BTN_1, &jack); pr_debug("snd_intelmad_jack called\n"); if (retval < 0) return retval; snd_jack_report(jack, 0); jack->private_data = jack; intelmaddata->jack[0].jack = *jack; return retval; } /** * snd_intelmad_mixer- to setup mixer settings of the card * * @intelmaddata: pointer to internal context * * This function is called from probe function to set up mixer controls */ static int __devinit snd_intelmad_mixer(struct snd_intelmad *intelmaddata) { struct snd_card *card; unsigned int idx; int ret_val = 0, max_controls = 0; char *mixername = "IntelMAD Controls"; struct snd_kcontrol_new *controls; WARN_ON(!intelmaddata); card = intelmaddata->card; strncpy(card->mixername, mixername, sizeof(card->mixername)-1); /* add all widget controls and expose the same */ if (intelmaddata->cpu_id == CPU_CHIP_PENWELL) { max_controls = MAX_CTRL_MFLD; controls = snd_intelmad_controls_mfld; } else { max_controls = MAX_CTRL_MRST; controls = snd_intelmad_controls_mrst; } for (idx = 0; idx < max_controls; idx++) { ret_val = snd_ctl_add(card, snd_ctl_new1(&controls[idx], intelmaddata)); pr_debug("mixer[idx]=%d added\n", idx); if (ret_val) { pr_err("in adding of control index = %d\n", idx); break; } } return ret_val; } static int snd_intelmad_dev_free(struct snd_device *device) { struct snd_intelmad *intelmaddata; WARN_ON(!device); intelmaddata = device->device_data; pr_debug("snd_intelmad_dev_free called\n"); unregister_sst_card(intelmaddata->sstdrv_ops); /* free allocated memory for internal context */ destroy_workqueue(intelmaddata->mad_jack_wq); device->device_data = NULL; kfree(intelmaddata->sstdrv_ops); kfree(intelmaddata); return 0; } static int __devinit snd_intelmad_create( struct snd_intelmad *intelmaddata, struct snd_card *card) { int ret_val; static struct snd_device_ops ops = { .dev_free = snd_intelmad_dev_free, }; WARN_ON(!intelmaddata); WARN_ON(!card); /* ALSA api to register for the device */ ret_val = snd_device_new(card, SNDRV_DEV_LOWLEVEL, intelmaddata, &ops); return ret_val; } /** * snd_intelmad_probe- function registred for init * @pdev : pointer to the device struture * This function is called when the device is initialized */ int __devinit snd_intelmad_probe(struct platform_device *pdev) { struct snd_card *card; int ret_val; struct snd_intelmad *intelmaddata; const struct platform_device_id *id = platform_get_device_id(pdev); struct snd_intelmad_probe_info *info = (void *)id->driver_data; pr_debug("probe for %s cpu_id %d\n", pdev->name, info->cpu_id); pr_debug("rq_chache %x of size %x\n", info->irq_cache, info->size); if (!strcmp(pdev->name, DRIVER_NAME_MRST)) pr_debug("detected MRST\n"); else if (!strcmp(pdev->name, DRIVER_NAME_MFLD)) pr_debug("detected MFLD\n"); else { pr_err("detected unknown device abort!!\n"); return -EIO; } if ((info->cpu_id < CPU_CHIP_LINCROFT) || (info->cpu_id > CPU_CHIP_PENWELL)) { pr_err("detected unknown cpu_id abort!!\n"); return -EIO; } /* allocate memory for saving internal context and working */ intelmaddata = kzalloc(sizeof(*intelmaddata), GFP_KERNEL); if (!intelmaddata) { pr_debug("mem alloctn fail\n"); return -ENOMEM; } intelmad_drv = intelmaddata; /* allocate memory for LPE API set */ intelmaddata->sstdrv_ops = kzalloc(sizeof(struct intel_sst_card_ops), GFP_KERNEL); if (!intelmaddata->sstdrv_ops) { pr_err("mem allocation for ops fail\n"); kfree(intelmaddata); return -ENOMEM; } intelmaddata->cpu_id = info->cpu_id; /* create a card instance with ALSA framework */ ret_val = snd_card_create(card_index, card_id, THIS_MODULE, 0, &card); if (ret_val) { pr_err("snd_card_create fail\n"); goto free_allocs; } intelmaddata->pdev = pdev; intelmaddata->irq = platform_get_irq(pdev, 0); platform_set_drvdata(pdev, intelmaddata); intelmaddata->card = card; intelmaddata->card_id = card_id; intelmaddata->card_index = card_index; intelmaddata->master_mute = UNMUTE; intelmaddata->playback_cnt = intelmaddata->capture_cnt = 0; strncpy(card->driver, INTEL_MAD, strlen(INTEL_MAD)); strncpy(card->shortname, INTEL_MAD, strlen(INTEL_MAD)); intelmaddata->sstdrv_ops->module_name = SST_CARD_NAMES; /* registering with LPE driver to get access to SST APIs to use */ ret_val = snd_intelmad_sst_register(intelmaddata); if (ret_val) { pr_err("snd_intelmad_sst_register failed\n"); goto set_null_data; } intelmaddata->pmic_status = PMIC_INIT; ret_val = snd_intelmad_pcm(card, intelmaddata); if (ret_val) { pr_err("snd_intelmad_pcm failed\n"); goto free_sst; } ret_val = snd_intelmad_mixer(intelmaddata); if (ret_val) { pr_err("snd_intelmad_mixer failed\n"); goto free_card; } ret_val = snd_intelmad_jack(intelmaddata); if (ret_val) { pr_err("snd_intelmad_jack failed\n"); goto free_card; } intelmaddata->adc_address = mid_initialize_adc(); /*create work queue for jack interrupt*/ INIT_WORK(&intelmaddata->mad_jack_msg.wq, sst_process_mad_jack_detection); intelmaddata->mad_jack_wq = create_workqueue("sst_mad_jack_wq"); if (!intelmaddata->mad_jack_wq) goto free_card; ret_val = snd_intelmad_register_irq(intelmaddata, info->irq_cache, info->size); if (ret_val) { pr_err("snd_intelmad_register_irq fail\n"); goto free_mad_jack_wq; } /* internal function call to register device with ALSA */ ret_val = snd_intelmad_create(intelmaddata, card); if (ret_val) { pr_err("snd_intelmad_create failed\n"); goto set_pvt_data; } card->private_data = &intelmaddata; snd_card_set_dev(card, &pdev->dev); ret_val = snd_card_register(card); if (ret_val) { pr_err("snd_card_register failed\n"); goto set_pvt_data; } if (pdev->dev.platform_data) { int gpio_amp = *(int *)pdev->dev.platform_data; if (gpio_request_one(gpio_amp, GPIOF_OUT_INIT_LOW, "amp power")) gpio_amp = 0; intelmaddata->sstdrv_ops->scard_ops->gpio_amp = gpio_amp; } pr_debug("snd_intelmad_probe complete\n"); return ret_val; set_pvt_data: card->private_data = NULL; free_mad_jack_wq: destroy_workqueue(intelmaddata->mad_jack_wq); free_card: snd_card_free(intelmaddata->card); free_sst: unregister_sst_card(intelmaddata->sstdrv_ops); set_null_data: platform_set_drvdata(pdev, NULL); free_allocs: pr_err("probe failed\n"); snd_card_free(card); kfree(intelmaddata->sstdrv_ops); kfree(intelmaddata); return ret_val; } static int snd_intelmad_remove(struct platform_device *pdev) { struct snd_intelmad *intelmaddata = platform_get_drvdata(pdev); if (intelmaddata) { if (intelmaddata->sstdrv_ops->scard_ops->gpio_amp) gpio_free(intelmaddata->sstdrv_ops->scard_ops->gpio_amp); free_irq(intelmaddata->irq, intelmaddata); snd_card_free(intelmaddata->card); } intelmad_drv = NULL; platform_set_drvdata(pdev, NULL); return 0; } /********************************************************************* * Driver initialization and exit *********************************************************************/ static const struct platform_device_id snd_intelmad_ids[] = { {DRIVER_NAME_MRST, INFO(CPU_CHIP_LINCROFT, AUDINT_BASE, 1)}, {DRIVER_NAME_MFLD, INFO(CPU_CHIP_PENWELL, 0xFFFF7FCD, 1)}, {"", 0}, }; static struct platform_driver snd_intelmad_driver = { .driver = { .owner = THIS_MODULE, .name = "intel_mid_sound_card", }, .id_table = snd_intelmad_ids, .probe = snd_intelmad_probe, .remove = __devexit_p(snd_intelmad_remove), }; /* * alsa_card_intelmad_init- driver init function * * This function is called when driver module is inserted */ static int __init alsa_card_intelmad_init(void) { pr_debug("mad_init called\n"); return platform_driver_register(&snd_intelmad_driver); } /** * alsa_card_intelmad_exit- driver exit function * * This function is called when driver module is removed */ static void __exit alsa_card_intelmad_exit(void) { pr_debug("mad_exit called\n"); return platform_driver_unregister(&snd_intelmad_driver); } module_init(alsa_card_intelmad_init) module_exit(alsa_card_intelmad_exit)
gpl-2.0
titusece/linux_imx
fs/cifs/readdir.c
174
25075
/* * fs/cifs/readdir.c * * Directory search handling * * Copyright (C) International Business Machines Corp., 2004, 2008 * Copyright (C) Red Hat, Inc., 2011 * Author(s): Steve French (sfrench@us.ibm.com) * * This library is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published * by the Free Software Foundation; either version 2.1 of the License, or * (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See * the GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/fs.h> #include <linux/pagemap.h> #include <linux/slab.h> #include <linux/stat.h> #include "cifspdu.h" #include "cifsglob.h" #include "cifsproto.h" #include "cifs_unicode.h" #include "cifs_debug.h" #include "cifs_fs_sb.h" #include "cifsfs.h" /* * To be safe - for UCS to UTF-8 with strings loaded with the rare long * characters alloc more to account for such multibyte target UTF-8 * characters. */ #define UNICODE_NAME_MAX ((4 * NAME_MAX) + 2) #ifdef CONFIG_CIFS_DEBUG2 static void dump_cifs_file_struct(struct file *file, char *label) { struct cifsFileInfo *cf; if (file) { cf = file->private_data; if (cf == NULL) { cifs_dbg(FYI, "empty cifs private file data\n"); return; } if (cf->invalidHandle) cifs_dbg(FYI, "invalid handle\n"); if (cf->srch_inf.endOfSearch) cifs_dbg(FYI, "end of search\n"); if (cf->srch_inf.emptyDir) cifs_dbg(FYI, "empty dir\n"); } } #else static inline void dump_cifs_file_struct(struct file *file, char *label) { } #endif /* DEBUG2 */ /* * Attempt to preload the dcache with the results from the FIND_FIRST/NEXT * * Find the dentry that matches "name". If there isn't one, create one. If it's * a negative dentry or the uniqueid changed, then drop it and recreate it. */ static void cifs_prime_dcache(struct dentry *parent, struct qstr *name, struct cifs_fattr *fattr) { struct dentry *dentry, *alias; struct inode *inode; struct super_block *sb = parent->d_inode->i_sb; struct cifs_sb_info *cifs_sb = CIFS_SB(sb); cifs_dbg(FYI, "%s: for %s\n", __func__, name->name); dentry = d_hash_and_lookup(parent, name); if (unlikely(IS_ERR(dentry))) return; if (dentry) { int err; inode = dentry->d_inode; if (inode) { /* * If we're generating inode numbers, then we don't * want to clobber the existing one with the one that * the readdir code created. */ if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)) fattr->cf_uniqueid = CIFS_I(inode)->uniqueid; /* update inode in place if i_ino didn't change */ if (CIFS_I(inode)->uniqueid == fattr->cf_uniqueid) { cifs_fattr_to_inode(inode, fattr); goto out; } } err = d_invalidate(dentry); dput(dentry); if (err) return; } /* * If we know that the inode will need to be revalidated immediately, * then don't create a new dentry for it. We'll end up doing an on * the wire call either way and this spares us an invalidation. */ if (fattr->cf_flags & CIFS_FATTR_NEED_REVAL) return; dentry = d_alloc(parent, name); if (!dentry) return; inode = cifs_iget(sb, fattr); if (!inode) goto out; alias = d_materialise_unique(dentry, inode); if (alias && !IS_ERR(alias)) dput(alias); out: dput(dentry); } static void cifs_fill_common_info(struct cifs_fattr *fattr, struct cifs_sb_info *cifs_sb) { fattr->cf_uid = cifs_sb->mnt_uid; fattr->cf_gid = cifs_sb->mnt_gid; if (fattr->cf_cifsattrs & ATTR_DIRECTORY) { fattr->cf_mode = S_IFDIR | cifs_sb->mnt_dir_mode; fattr->cf_dtype = DT_DIR; } else { fattr->cf_mode = S_IFREG | cifs_sb->mnt_file_mode; fattr->cf_dtype = DT_REG; } /* * We need to revalidate it further to make a decision about whether it * is a symbolic link, DFS referral or a reparse point with a direct * access like junctions, deduplicated files, NFS symlinks. */ if (fattr->cf_cifsattrs & ATTR_REPARSE) fattr->cf_flags |= CIFS_FATTR_NEED_REVAL; /* non-unix readdir doesn't provide nlink */ fattr->cf_flags |= CIFS_FATTR_UNKNOWN_NLINK; if (fattr->cf_cifsattrs & ATTR_READONLY) fattr->cf_mode &= ~S_IWUGO; /* * We of course don't get ACL info in FIND_FIRST/NEXT results, so * mark it for revalidation so that "ls -l" will look right. It might * be super-slow, but if we don't do this then the ownership of files * may look wrong since the inodes may not have timed out by the time * "ls" does a stat() call on them. */ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) fattr->cf_flags |= CIFS_FATTR_NEED_REVAL; if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL && fattr->cf_cifsattrs & ATTR_SYSTEM) { if (fattr->cf_eof == 0) { fattr->cf_mode &= ~S_IFMT; fattr->cf_mode |= S_IFIFO; fattr->cf_dtype = DT_FIFO; } else { /* * trying to get the type and mode via SFU can be slow, * so just call those regular files for now, and mark * for reval */ fattr->cf_flags |= CIFS_FATTR_NEED_REVAL; } } } void cifs_dir_info_to_fattr(struct cifs_fattr *fattr, FILE_DIRECTORY_INFO *info, struct cifs_sb_info *cifs_sb) { memset(fattr, 0, sizeof(*fattr)); fattr->cf_cifsattrs = le32_to_cpu(info->ExtFileAttributes); fattr->cf_eof = le64_to_cpu(info->EndOfFile); fattr->cf_bytes = le64_to_cpu(info->AllocationSize); fattr->cf_createtime = le64_to_cpu(info->CreationTime); fattr->cf_atime = cifs_NTtimeToUnix(info->LastAccessTime); fattr->cf_ctime = cifs_NTtimeToUnix(info->ChangeTime); fattr->cf_mtime = cifs_NTtimeToUnix(info->LastWriteTime); cifs_fill_common_info(fattr, cifs_sb); } static void cifs_std_info_to_fattr(struct cifs_fattr *fattr, FIND_FILE_STANDARD_INFO *info, struct cifs_sb_info *cifs_sb) { int offset = cifs_sb_master_tcon(cifs_sb)->ses->server->timeAdj; memset(fattr, 0, sizeof(*fattr)); fattr->cf_atime = cnvrtDosUnixTm(info->LastAccessDate, info->LastAccessTime, offset); fattr->cf_ctime = cnvrtDosUnixTm(info->LastWriteDate, info->LastWriteTime, offset); fattr->cf_mtime = cnvrtDosUnixTm(info->LastWriteDate, info->LastWriteTime, offset); fattr->cf_cifsattrs = le16_to_cpu(info->Attributes); fattr->cf_bytes = le32_to_cpu(info->AllocationSize); fattr->cf_eof = le32_to_cpu(info->DataSize); cifs_fill_common_info(fattr, cifs_sb); } /* BB eventually need to add the following helper function to resolve NT_STATUS_STOPPED_ON_SYMLINK return code when we try to do FindFirst on (NTFS) directory symlinks */ /* int get_symlink_reparse_path(char *full_path, struct cifs_sb_info *cifs_sb, unsigned int xid) { __u16 fid; int len; int oplock = 0; int rc; struct cifs_tcon *ptcon = cifs_sb_tcon(cifs_sb); char *tmpbuffer; rc = CIFSSMBOpen(xid, ptcon, full_path, FILE_OPEN, GENERIC_READ, OPEN_REPARSE_POINT, &fid, &oplock, NULL, cifs_sb->local_nls, cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); if (!rc) { tmpbuffer = kmalloc(maxpath); rc = CIFSSMBQueryReparseLinkInfo(xid, ptcon, full_path, tmpbuffer, maxpath -1, fid, cifs_sb->local_nls); if (CIFSSMBClose(xid, ptcon, fid)) { cifs_dbg(FYI, "Error closing temporary reparsepoint open\n"); } } } */ static int initiate_cifs_search(const unsigned int xid, struct file *file) { __u16 search_flags; int rc = 0; char *full_path = NULL; struct cifsFileInfo *cifsFile; struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb); struct tcon_link *tlink = NULL; struct cifs_tcon *tcon; struct TCP_Server_Info *server; if (file->private_data == NULL) { tlink = cifs_sb_tlink(cifs_sb); if (IS_ERR(tlink)) return PTR_ERR(tlink); cifsFile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL); if (cifsFile == NULL) { rc = -ENOMEM; goto error_exit; } file->private_data = cifsFile; cifsFile->tlink = cifs_get_tlink(tlink); tcon = tlink_tcon(tlink); } else { cifsFile = file->private_data; tcon = tlink_tcon(cifsFile->tlink); } server = tcon->ses->server; if (!server->ops->query_dir_first) { rc = -ENOSYS; goto error_exit; } cifsFile->invalidHandle = true; cifsFile->srch_inf.endOfSearch = false; full_path = build_path_from_dentry(file->f_path.dentry); if (full_path == NULL) { rc = -ENOMEM; goto error_exit; } cifs_dbg(FYI, "Full path: %s start at: %lld\n", full_path, file->f_pos); ffirst_retry: /* test for Unix extensions */ /* but now check for them on the share/mount not on the SMB session */ /* if (cap_unix(tcon->ses) { */ if (tcon->unix_ext) cifsFile->srch_inf.info_level = SMB_FIND_FILE_UNIX; else if ((tcon->ses->capabilities & tcon->ses->server->vals->cap_nt_find) == 0) { cifsFile->srch_inf.info_level = SMB_FIND_FILE_INFO_STANDARD; } else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) { cifsFile->srch_inf.info_level = SMB_FIND_FILE_ID_FULL_DIR_INFO; } else /* not srvinos - BB fixme add check for backlevel? */ { cifsFile->srch_inf.info_level = SMB_FIND_FILE_DIRECTORY_INFO; } search_flags = CIFS_SEARCH_CLOSE_AT_END | CIFS_SEARCH_RETURN_RESUME; if (backup_cred(cifs_sb)) search_flags |= CIFS_SEARCH_BACKUP_SEARCH; rc = server->ops->query_dir_first(xid, tcon, full_path, cifs_sb, &cifsFile->fid, search_flags, &cifsFile->srch_inf); if (rc == 0) cifsFile->invalidHandle = false; /* BB add following call to handle readdir on new NTFS symlink errors else if STATUS_STOPPED_ON_SYMLINK call get_symlink_reparse_path and retry with new path */ else if ((rc == -EOPNOTSUPP) && (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)) { cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_SERVER_INUM; goto ffirst_retry; } error_exit: kfree(full_path); cifs_put_tlink(tlink); return rc; } /* return length of unicode string in bytes */ static int cifs_unicode_bytelen(const char *str) { int len; const __le16 *ustr = (const __le16 *)str; for (len = 0; len <= PATH_MAX; len++) { if (ustr[len] == 0) return len << 1; } cifs_dbg(FYI, "Unicode string longer than PATH_MAX found\n"); return len << 1; } static char *nxt_dir_entry(char *old_entry, char *end_of_smb, int level) { char *new_entry; FILE_DIRECTORY_INFO *pDirInfo = (FILE_DIRECTORY_INFO *)old_entry; if (level == SMB_FIND_FILE_INFO_STANDARD) { FIND_FILE_STANDARD_INFO *pfData; pfData = (FIND_FILE_STANDARD_INFO *)pDirInfo; new_entry = old_entry + sizeof(FIND_FILE_STANDARD_INFO) + pfData->FileNameLength; } else new_entry = old_entry + le32_to_cpu(pDirInfo->NextEntryOffset); cifs_dbg(FYI, "new entry %p old entry %p\n", new_entry, old_entry); /* validate that new_entry is not past end of SMB */ if (new_entry >= end_of_smb) { cifs_dbg(VFS, "search entry %p began after end of SMB %p old entry %p\n", new_entry, end_of_smb, old_entry); return NULL; } else if (((level == SMB_FIND_FILE_INFO_STANDARD) && (new_entry + sizeof(FIND_FILE_STANDARD_INFO) > end_of_smb)) || ((level != SMB_FIND_FILE_INFO_STANDARD) && (new_entry + sizeof(FILE_DIRECTORY_INFO) > end_of_smb))) { cifs_dbg(VFS, "search entry %p extends after end of SMB %p\n", new_entry, end_of_smb); return NULL; } else return new_entry; } struct cifs_dirent { const char *name; size_t namelen; u32 resume_key; u64 ino; }; static void cifs_fill_dirent_unix(struct cifs_dirent *de, const FILE_UNIX_INFO *info, bool is_unicode) { de->name = &info->FileName[0]; if (is_unicode) de->namelen = cifs_unicode_bytelen(de->name); else de->namelen = strnlen(de->name, PATH_MAX); de->resume_key = info->ResumeKey; de->ino = le64_to_cpu(info->basic.UniqueId); } static void cifs_fill_dirent_dir(struct cifs_dirent *de, const FILE_DIRECTORY_INFO *info) { de->name = &info->FileName[0]; de->namelen = le32_to_cpu(info->FileNameLength); de->resume_key = info->FileIndex; } static void cifs_fill_dirent_full(struct cifs_dirent *de, const FILE_FULL_DIRECTORY_INFO *info) { de->name = &info->FileName[0]; de->namelen = le32_to_cpu(info->FileNameLength); de->resume_key = info->FileIndex; } static void cifs_fill_dirent_search(struct cifs_dirent *de, const SEARCH_ID_FULL_DIR_INFO *info) { de->name = &info->FileName[0]; de->namelen = le32_to_cpu(info->FileNameLength); de->resume_key = info->FileIndex; de->ino = le64_to_cpu(info->UniqueId); } static void cifs_fill_dirent_both(struct cifs_dirent *de, const FILE_BOTH_DIRECTORY_INFO *info) { de->name = &info->FileName[0]; de->namelen = le32_to_cpu(info->FileNameLength); de->resume_key = info->FileIndex; } static void cifs_fill_dirent_std(struct cifs_dirent *de, const FIND_FILE_STANDARD_INFO *info) { de->name = &info->FileName[0]; /* one byte length, no endianess conversion */ de->namelen = info->FileNameLength; de->resume_key = info->ResumeKey; } static int cifs_fill_dirent(struct cifs_dirent *de, const void *info, u16 level, bool is_unicode) { memset(de, 0, sizeof(*de)); switch (level) { case SMB_FIND_FILE_UNIX: cifs_fill_dirent_unix(de, info, is_unicode); break; case SMB_FIND_FILE_DIRECTORY_INFO: cifs_fill_dirent_dir(de, info); break; case SMB_FIND_FILE_FULL_DIRECTORY_INFO: cifs_fill_dirent_full(de, info); break; case SMB_FIND_FILE_ID_FULL_DIR_INFO: cifs_fill_dirent_search(de, info); break; case SMB_FIND_FILE_BOTH_DIRECTORY_INFO: cifs_fill_dirent_both(de, info); break; case SMB_FIND_FILE_INFO_STANDARD: cifs_fill_dirent_std(de, info); break; default: cifs_dbg(FYI, "Unknown findfirst level %d\n", level); return -EINVAL; } return 0; } #define UNICODE_DOT cpu_to_le16(0x2e) /* return 0 if no match and 1 for . (current directory) and 2 for .. (parent) */ static int cifs_entry_is_dot(struct cifs_dirent *de, bool is_unicode) { int rc = 0; if (!de->name) return 0; if (is_unicode) { __le16 *ufilename = (__le16 *)de->name; if (de->namelen == 2) { /* check for . */ if (ufilename[0] == UNICODE_DOT) rc = 1; } else if (de->namelen == 4) { /* check for .. */ if (ufilename[0] == UNICODE_DOT && ufilename[1] == UNICODE_DOT) rc = 2; } } else /* ASCII */ { if (de->namelen == 1) { if (de->name[0] == '.') rc = 1; } else if (de->namelen == 2) { if (de->name[0] == '.' && de->name[1] == '.') rc = 2; } } return rc; } /* Check if directory that we are searching has changed so we can decide whether we can use the cached search results from the previous search */ static int is_dir_changed(struct file *file) { struct inode *inode = file_inode(file); struct cifsInodeInfo *cifsInfo = CIFS_I(inode); if (cifsInfo->time == 0) return 1; /* directory was changed, perhaps due to unlink */ else return 0; } static int cifs_save_resume_key(const char *current_entry, struct cifsFileInfo *file_info) { struct cifs_dirent de; int rc; rc = cifs_fill_dirent(&de, current_entry, file_info->srch_inf.info_level, file_info->srch_inf.unicode); if (!rc) { file_info->srch_inf.presume_name = de.name; file_info->srch_inf.resume_name_len = de.namelen; file_info->srch_inf.resume_key = de.resume_key; } return rc; } /* * Find the corresponding entry in the search. Note that the SMB server returns * search entries for . and .. which complicates logic here if we choose to * parse for them and we do not assume that they are located in the findfirst * return buffer. We start counting in the buffer with entry 2 and increment for * every entry (do not increment for . or .. entry). */ static int find_cifs_entry(const unsigned int xid, struct cifs_tcon *tcon, loff_t pos, struct file *file, char **current_entry, int *num_to_ret) { __u16 search_flags; int rc = 0; int pos_in_buf = 0; loff_t first_entry_in_buffer; loff_t index_to_find = pos; struct cifsFileInfo *cfile = file->private_data; struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb); struct TCP_Server_Info *server = tcon->ses->server; /* check if index in the buffer */ if (!server->ops->query_dir_first || !server->ops->query_dir_next) return -ENOSYS; if ((cfile == NULL) || (current_entry == NULL) || (num_to_ret == NULL)) return -ENOENT; *current_entry = NULL; first_entry_in_buffer = cfile->srch_inf.index_of_last_entry - cfile->srch_inf.entries_in_buffer; /* * If first entry in buf is zero then is first buffer * in search response data which means it is likely . and .. * will be in this buffer, although some servers do not return * . and .. for the root of a drive and for those we need * to start two entries earlier. */ dump_cifs_file_struct(file, "In fce "); if (((index_to_find < cfile->srch_inf.index_of_last_entry) && is_dir_changed(file)) || (index_to_find < first_entry_in_buffer)) { /* close and restart search */ cifs_dbg(FYI, "search backing up - close and restart search\n"); spin_lock(&cifs_file_list_lock); if (server->ops->dir_needs_close(cfile)) { cfile->invalidHandle = true; spin_unlock(&cifs_file_list_lock); if (server->ops->close_dir) server->ops->close_dir(xid, tcon, &cfile->fid); } else spin_unlock(&cifs_file_list_lock); if (cfile->srch_inf.ntwrk_buf_start) { cifs_dbg(FYI, "freeing SMB ff cache buf on search rewind\n"); if (cfile->srch_inf.smallBuf) cifs_small_buf_release(cfile->srch_inf. ntwrk_buf_start); else cifs_buf_release(cfile->srch_inf. ntwrk_buf_start); cfile->srch_inf.ntwrk_buf_start = NULL; } rc = initiate_cifs_search(xid, file); if (rc) { cifs_dbg(FYI, "error %d reinitiating a search on rewind\n", rc); return rc; } /* FindFirst/Next set last_entry to NULL on malformed reply */ if (cfile->srch_inf.last_entry) cifs_save_resume_key(cfile->srch_inf.last_entry, cfile); } search_flags = CIFS_SEARCH_CLOSE_AT_END | CIFS_SEARCH_RETURN_RESUME; if (backup_cred(cifs_sb)) search_flags |= CIFS_SEARCH_BACKUP_SEARCH; while ((index_to_find >= cfile->srch_inf.index_of_last_entry) && (rc == 0) && !cfile->srch_inf.endOfSearch) { cifs_dbg(FYI, "calling findnext2\n"); rc = server->ops->query_dir_next(xid, tcon, &cfile->fid, search_flags, &cfile->srch_inf); /* FindFirst/Next set last_entry to NULL on malformed reply */ if (cfile->srch_inf.last_entry) cifs_save_resume_key(cfile->srch_inf.last_entry, cfile); if (rc) return -ENOENT; } if (index_to_find < cfile->srch_inf.index_of_last_entry) { /* we found the buffer that contains the entry */ /* scan and find it */ int i; char *cur_ent; char *end_of_smb = cfile->srch_inf.ntwrk_buf_start + server->ops->calc_smb_size( cfile->srch_inf.ntwrk_buf_start); cur_ent = cfile->srch_inf.srch_entries_start; first_entry_in_buffer = cfile->srch_inf.index_of_last_entry - cfile->srch_inf.entries_in_buffer; pos_in_buf = index_to_find - first_entry_in_buffer; cifs_dbg(FYI, "found entry - pos_in_buf %d\n", pos_in_buf); for (i = 0; (i < (pos_in_buf)) && (cur_ent != NULL); i++) { /* go entry by entry figuring out which is first */ cur_ent = nxt_dir_entry(cur_ent, end_of_smb, cfile->srch_inf.info_level); } if ((cur_ent == NULL) && (i < pos_in_buf)) { /* BB fixme - check if we should flag this error */ cifs_dbg(VFS, "reached end of buf searching for pos in buf %d index to find %lld rc %d\n", pos_in_buf, index_to_find, rc); } rc = 0; *current_entry = cur_ent; } else { cifs_dbg(FYI, "index not in buffer - could not findnext into it\n"); return 0; } if (pos_in_buf >= cfile->srch_inf.entries_in_buffer) { cifs_dbg(FYI, "can not return entries pos_in_buf beyond last\n"); *num_to_ret = 0; } else *num_to_ret = cfile->srch_inf.entries_in_buffer - pos_in_buf; return rc; } static int cifs_filldir(char *find_entry, struct file *file, struct dir_context *ctx, char *scratch_buf, unsigned int max_len) { struct cifsFileInfo *file_info = file->private_data; struct super_block *sb = file->f_path.dentry->d_sb; struct cifs_sb_info *cifs_sb = CIFS_SB(sb); struct cifs_dirent de = { NULL, }; struct cifs_fattr fattr; struct qstr name; int rc = 0; ino_t ino; rc = cifs_fill_dirent(&de, find_entry, file_info->srch_inf.info_level, file_info->srch_inf.unicode); if (rc) return rc; if (de.namelen > max_len) { cifs_dbg(VFS, "bad search response length %zd past smb end\n", de.namelen); return -EINVAL; } /* skip . and .. since we added them first */ if (cifs_entry_is_dot(&de, file_info->srch_inf.unicode)) return 0; if (file_info->srch_inf.unicode) { struct nls_table *nlt = cifs_sb->local_nls; name.name = scratch_buf; name.len = cifs_from_utf16((char *)name.name, (__le16 *)de.name, UNICODE_NAME_MAX, min_t(size_t, de.namelen, (size_t)max_len), nlt, cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); name.len -= nls_nullsize(nlt); } else { name.name = de.name; name.len = de.namelen; } switch (file_info->srch_inf.info_level) { case SMB_FIND_FILE_UNIX: cifs_unix_basic_to_fattr(&fattr, &((FILE_UNIX_INFO *)find_entry)->basic, cifs_sb); break; case SMB_FIND_FILE_INFO_STANDARD: cifs_std_info_to_fattr(&fattr, (FIND_FILE_STANDARD_INFO *)find_entry, cifs_sb); break; default: cifs_dir_info_to_fattr(&fattr, (FILE_DIRECTORY_INFO *)find_entry, cifs_sb); break; } if (de.ino && (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)) { fattr.cf_uniqueid = de.ino; } else { fattr.cf_uniqueid = iunique(sb, ROOT_I); cifs_autodisable_serverino(cifs_sb); } if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS) && couldbe_mf_symlink(&fattr)) /* * trying to get the type and mode can be slow, * so just call those regular files for now, and mark * for reval */ fattr.cf_flags |= CIFS_FATTR_NEED_REVAL; cifs_prime_dcache(file->f_dentry, &name, &fattr); ino = cifs_uniqueid_to_ino_t(fattr.cf_uniqueid); return !dir_emit(ctx, name.name, name.len, ino, fattr.cf_dtype); } int cifs_readdir(struct file *file, struct dir_context *ctx) { int rc = 0; unsigned int xid; int i; struct cifs_tcon *tcon; struct cifsFileInfo *cifsFile = NULL; char *current_entry; int num_to_fill = 0; char *tmp_buf = NULL; char *end_of_smb; unsigned int max_len; xid = get_xid(); /* * Ensure FindFirst doesn't fail before doing filldir() for '.' and * '..'. Otherwise we won't be able to notify VFS in case of failure. */ if (file->private_data == NULL) { rc = initiate_cifs_search(xid, file); cifs_dbg(FYI, "initiate cifs search rc %d\n", rc); if (rc) goto rddir2_exit; } if (!dir_emit_dots(file, ctx)) goto rddir2_exit; /* 1) If search is active, is in current search buffer? if it before then restart search if after then keep searching till find it */ if (file->private_data == NULL) { rc = -EINVAL; goto rddir2_exit; } cifsFile = file->private_data; if (cifsFile->srch_inf.endOfSearch) { if (cifsFile->srch_inf.emptyDir) { cifs_dbg(FYI, "End of search, empty dir\n"); rc = 0; goto rddir2_exit; } } /* else { cifsFile->invalidHandle = true; tcon->ses->server->close(xid, tcon, &cifsFile->fid); } */ tcon = tlink_tcon(cifsFile->tlink); rc = find_cifs_entry(xid, tcon, ctx->pos, file, &current_entry, &num_to_fill); if (rc) { cifs_dbg(FYI, "fce error %d\n", rc); goto rddir2_exit; } else if (current_entry != NULL) { cifs_dbg(FYI, "entry %lld found\n", ctx->pos); } else { cifs_dbg(FYI, "could not find entry\n"); goto rddir2_exit; } cifs_dbg(FYI, "loop through %d times filling dir for net buf %p\n", num_to_fill, cifsFile->srch_inf.ntwrk_buf_start); max_len = tcon->ses->server->ops->calc_smb_size( cifsFile->srch_inf.ntwrk_buf_start); end_of_smb = cifsFile->srch_inf.ntwrk_buf_start + max_len; tmp_buf = kmalloc(UNICODE_NAME_MAX, GFP_KERNEL); if (tmp_buf == NULL) { rc = -ENOMEM; goto rddir2_exit; } for (i = 0; i < num_to_fill; i++) { if (current_entry == NULL) { /* evaluate whether this case is an error */ cifs_dbg(VFS, "past SMB end, num to fill %d i %d\n", num_to_fill, i); break; } /* * if buggy server returns . and .. late do we want to * check for that here? */ rc = cifs_filldir(current_entry, file, ctx, tmp_buf, max_len); if (rc) { if (rc > 0) rc = 0; break; } ctx->pos++; if (ctx->pos == cifsFile->srch_inf.index_of_last_entry) { cifs_dbg(FYI, "last entry in buf at pos %lld %s\n", ctx->pos, tmp_buf); cifs_save_resume_key(current_entry, cifsFile); break; } else current_entry = nxt_dir_entry(current_entry, end_of_smb, cifsFile->srch_inf.info_level); } kfree(tmp_buf); rddir2_exit: free_xid(xid); return rc; }
gpl-2.0
Canonical-kernel/Ubuntu-kernel
sound/soc/codecs/cs42l51-i2c.c
174
1322
/* * cs42l56.c -- CS42L51 ALSA SoC I2C audio driver * * Copyright 2014 CirrusLogic, Inc. * * Author: Brian Austin <brian.austin@cirrus.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/i2c.h> #include <linux/module.h> #include <sound/soc.h> #include "cs42l51.h" static struct i2c_device_id cs42l51_i2c_id[] = { {"cs42l51", 0}, {} }; MODULE_DEVICE_TABLE(i2c, cs42l51_i2c_id); static int cs42l51_i2c_probe(struct i2c_client *i2c, const struct i2c_device_id *id) { struct regmap_config config; config = cs42l51_regmap; config.val_bits = 8; config.reg_bits = 8; return cs42l51_probe(&i2c->dev, devm_regmap_init_i2c(i2c, &config)); } static int cs42l51_i2c_remove(struct i2c_client *i2c) { snd_soc_unregister_codec(&i2c->dev); return 0; } static struct i2c_driver cs42l51_i2c_driver = { .driver = { .name = "cs42l51", .owner = THIS_MODULE, }, .probe = cs42l51_i2c_probe, .remove = cs42l51_i2c_remove, .id_table = cs42l51_i2c_id, }; module_i2c_driver(cs42l51_i2c_driver); MODULE_DESCRIPTION("ASoC CS42L51 I2C Driver"); MODULE_AUTHOR("Brian Austin, Cirrus Logic Inc, <brian.austin@cirrus.com>"); MODULE_LICENSE("GPL");
gpl-2.0
rickyzhang82/linux-up2date
drivers/staging/comedi/drivers/ni_6527.c
430
12967
/* * ni_6527.c * Comedi driver for National Instruments PCI-6527 * * COMEDI - Linux Control and Measurement Device Interface * Copyright (C) 1999,2002,2003 David A. Schleef <ds@schleef.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ /* * Driver: ni_6527 * Description: National Instruments 6527 * Devices: [National Instruments] PCI-6527 (pci-6527), PXI-6527 (pxi-6527) * Author: David A. Schleef <ds@schleef.org> * Updated: Sat, 25 Jan 2003 13:24:40 -0800 * Status: works * * Configuration Options: not applicable, uses PCI auto config */ #include <linux/module.h> #include <linux/interrupt.h> #include "../comedi_pci.h" /* * PCI BAR1 - Register memory map * * Manuals (available from ftp://ftp.natinst.com/support/manuals) * 370106b.pdf 6527 Register Level Programmer Manual */ #define NI6527_DI_REG(x) (0x00 + (x)) #define NI6527_DO_REG(x) (0x03 + (x)) #define NI6527_ID_REG 0x06 #define NI6527_CLR_REG 0x07 #define NI6527_CLR_EDGE BIT(3) #define NI6527_CLR_OVERFLOW BIT(2) #define NI6527_CLR_FILT BIT(1) #define NI6527_CLR_INTERVAL BIT(0) #define NI6527_CLR_IRQS (NI6527_CLR_EDGE | NI6527_CLR_OVERFLOW) #define NI6527_CLR_RESET_FILT (NI6527_CLR_FILT | NI6527_CLR_INTERVAL) #define NI6527_FILT_INTERVAL_REG(x) (0x08 + (x)) #define NI6527_FILT_ENA_REG(x) (0x0c + (x)) #define NI6527_STATUS_REG 0x14 #define NI6527_STATUS_IRQ BIT(2) #define NI6527_STATUS_OVERFLOW BIT(1) #define NI6527_STATUS_EDGE BIT(0) #define NI6527_CTRL_REG 0x15 #define NI6527_CTRL_FALLING BIT(4) #define NI6527_CTRL_RISING BIT(3) #define NI6527_CTRL_IRQ BIT(2) #define NI6527_CTRL_OVERFLOW BIT(1) #define NI6527_CTRL_EDGE BIT(0) #define NI6527_CTRL_DISABLE_IRQS 0 #define NI6527_CTRL_ENABLE_IRQS (NI6527_CTRL_FALLING | \ NI6527_CTRL_RISING | \ NI6527_CTRL_IRQ | NI6527_CTRL_EDGE) #define NI6527_RISING_EDGE_REG(x) (0x18 + (x)) #define NI6527_FALLING_EDGE_REG(x) (0x20 + (x)) enum ni6527_boardid { BOARD_PCI6527, BOARD_PXI6527, }; struct ni6527_board { const char *name; }; static const struct ni6527_board ni6527_boards[] = { [BOARD_PCI6527] = { .name = "pci-6527", }, [BOARD_PXI6527] = { .name = "pxi-6527", }, }; struct ni6527_private { unsigned int filter_interval; unsigned int filter_enable; }; static void ni6527_set_filter_interval(struct comedi_device *dev, unsigned int val) { struct ni6527_private *devpriv = dev->private; if (val != devpriv->filter_interval) { writeb(val & 0xff, dev->mmio + NI6527_FILT_INTERVAL_REG(0)); writeb((val >> 8) & 0xff, dev->mmio + NI6527_FILT_INTERVAL_REG(1)); writeb((val >> 16) & 0x0f, dev->mmio + NI6527_FILT_INTERVAL_REG(2)); writeb(NI6527_CLR_INTERVAL, dev->mmio + NI6527_CLR_REG); devpriv->filter_interval = val; } } static void ni6527_set_filter_enable(struct comedi_device *dev, unsigned int val) { writeb(val & 0xff, dev->mmio + NI6527_FILT_ENA_REG(0)); writeb((val >> 8) & 0xff, dev->mmio + NI6527_FILT_ENA_REG(1)); writeb((val >> 16) & 0xff, dev->mmio + NI6527_FILT_ENA_REG(2)); } static int ni6527_di_insn_config(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { struct ni6527_private *devpriv = dev->private; unsigned int chan = CR_CHAN(insn->chanspec); unsigned int interval; switch (data[0]) { case INSN_CONFIG_FILTER: /* * The deglitch filter interval is specified in nanoseconds. * The hardware supports intervals in 200ns increments. Round * the user values up and return the actual interval. */ interval = (data[1] + 100) / 200; data[1] = interval * 200; if (interval) { ni6527_set_filter_interval(dev, interval); devpriv->filter_enable |= 1 << chan; } else { devpriv->filter_enable &= ~(1 << chan); } ni6527_set_filter_enable(dev, devpriv->filter_enable); break; default: return -EINVAL; } return insn->n; } static int ni6527_di_insn_bits(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { unsigned int val; val = readb(dev->mmio + NI6527_DI_REG(0)); val |= (readb(dev->mmio + NI6527_DI_REG(1)) << 8); val |= (readb(dev->mmio + NI6527_DI_REG(2)) << 16); data[1] = val; return insn->n; } static int ni6527_do_insn_bits(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { unsigned int mask; mask = comedi_dio_update_state(s, data); if (mask) { /* Outputs are inverted */ unsigned int val = s->state ^ 0xffffff; if (mask & 0x0000ff) writeb(val & 0xff, dev->mmio + NI6527_DO_REG(0)); if (mask & 0x00ff00) writeb((val >> 8) & 0xff, dev->mmio + NI6527_DO_REG(1)); if (mask & 0xff0000) writeb((val >> 16) & 0xff, dev->mmio + NI6527_DO_REG(2)); } data[1] = s->state; return insn->n; } static irqreturn_t ni6527_interrupt(int irq, void *d) { struct comedi_device *dev = d; struct comedi_subdevice *s = dev->read_subdev; unsigned int status; status = readb(dev->mmio + NI6527_STATUS_REG); if (!(status & NI6527_STATUS_IRQ)) return IRQ_NONE; if (status & NI6527_STATUS_EDGE) { comedi_buf_write_samples(s, &s->state, 1); comedi_handle_events(dev, s); } writeb(NI6527_CLR_IRQS, dev->mmio + NI6527_CLR_REG); return IRQ_HANDLED; } static int ni6527_intr_cmdtest(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_cmd *cmd) { int err = 0; /* Step 1 : check if triggers are trivially valid */ err |= comedi_check_trigger_src(&cmd->start_src, TRIG_NOW); err |= comedi_check_trigger_src(&cmd->scan_begin_src, TRIG_OTHER); err |= comedi_check_trigger_src(&cmd->convert_src, TRIG_FOLLOW); err |= comedi_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT); err |= comedi_check_trigger_src(&cmd->stop_src, TRIG_COUNT); if (err) return 1; /* Step 2a : make sure trigger sources are unique */ /* Step 2b : and mutually compatible */ /* Step 3: check if arguments are trivially valid */ err |= comedi_check_trigger_arg_is(&cmd->start_arg, 0); err |= comedi_check_trigger_arg_is(&cmd->scan_begin_arg, 0); err |= comedi_check_trigger_arg_is(&cmd->convert_arg, 0); err |= comedi_check_trigger_arg_is(&cmd->scan_end_arg, cmd->chanlist_len); err |= comedi_check_trigger_arg_is(&cmd->stop_arg, 0); if (err) return 3; /* Step 4: fix up any arguments */ /* Step 5: check channel list if it exists */ return 0; } static int ni6527_intr_cmd(struct comedi_device *dev, struct comedi_subdevice *s) { writeb(NI6527_CLR_IRQS, dev->mmio + NI6527_CLR_REG); writeb(NI6527_CTRL_ENABLE_IRQS, dev->mmio + NI6527_CTRL_REG); return 0; } static int ni6527_intr_cancel(struct comedi_device *dev, struct comedi_subdevice *s) { writeb(NI6527_CTRL_DISABLE_IRQS, dev->mmio + NI6527_CTRL_REG); return 0; } static int ni6527_intr_insn_bits(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { data[1] = 0; return insn->n; } static void ni6527_set_edge_detection(struct comedi_device *dev, unsigned int mask, unsigned int rising, unsigned int falling) { unsigned int i; rising &= mask; falling &= mask; for (i = 0; i < 2; i++) { if (mask & 0xff) { if (~mask & 0xff) { /* preserve rising-edge detection channels */ rising |= readb(dev->mmio + NI6527_RISING_EDGE_REG(i)) & (~mask & 0xff); /* preserve falling-edge detection channels */ falling |= readb(dev->mmio + NI6527_FALLING_EDGE_REG(i)) & (~mask & 0xff); } /* update rising-edge detection channels */ writeb(rising & 0xff, dev->mmio + NI6527_RISING_EDGE_REG(i)); /* update falling-edge detection channels */ writeb(falling & 0xff, dev->mmio + NI6527_FALLING_EDGE_REG(i)); } rising >>= 8; falling >>= 8; mask >>= 8; } } static int ni6527_intr_insn_config(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { unsigned int mask = 0xffffffff; unsigned int rising, falling, shift; switch (data[0]) { case INSN_CONFIG_CHANGE_NOTIFY: /* check_insn_config_length() does not check this instruction */ if (insn->n != 3) return -EINVAL; rising = data[1]; falling = data[2]; ni6527_set_edge_detection(dev, mask, rising, falling); break; case INSN_CONFIG_DIGITAL_TRIG: /* check trigger number */ if (data[1] != 0) return -EINVAL; /* check digital trigger operation */ switch (data[2]) { case COMEDI_DIGITAL_TRIG_DISABLE: rising = 0; falling = 0; break; case COMEDI_DIGITAL_TRIG_ENABLE_EDGES: /* check shift amount */ shift = data[3]; if (shift >= s->n_chan) { mask = 0; rising = 0; falling = 0; } else { mask <<= shift; rising = data[4] << shift; falling = data[5] << shift; } break; default: return -EINVAL; } ni6527_set_edge_detection(dev, mask, rising, falling); break; default: return -EINVAL; } return insn->n; } static void ni6527_reset(struct comedi_device *dev) { /* disable deglitch filters on all channels */ ni6527_set_filter_enable(dev, 0); /* disable edge detection */ ni6527_set_edge_detection(dev, 0xffffffff, 0, 0); writeb(NI6527_CLR_IRQS | NI6527_CLR_RESET_FILT, dev->mmio + NI6527_CLR_REG); writeb(NI6527_CTRL_DISABLE_IRQS, dev->mmio + NI6527_CTRL_REG); } static int ni6527_auto_attach(struct comedi_device *dev, unsigned long context) { struct pci_dev *pcidev = comedi_to_pci_dev(dev); const struct ni6527_board *board = NULL; struct ni6527_private *devpriv; struct comedi_subdevice *s; int ret; if (context < ARRAY_SIZE(ni6527_boards)) board = &ni6527_boards[context]; if (!board) return -ENODEV; dev->board_ptr = board; dev->board_name = board->name; devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv)); if (!devpriv) return -ENOMEM; ret = comedi_pci_enable(dev); if (ret) return ret; dev->mmio = pci_ioremap_bar(pcidev, 1); if (!dev->mmio) return -ENOMEM; /* make sure this is actually a 6527 device */ if (readb(dev->mmio + NI6527_ID_REG) != 0x27) return -ENODEV; ni6527_reset(dev); ret = request_irq(pcidev->irq, ni6527_interrupt, IRQF_SHARED, dev->board_name, dev); if (ret == 0) dev->irq = pcidev->irq; ret = comedi_alloc_subdevices(dev, 3); if (ret) return ret; /* Digital Input subdevice */ s = &dev->subdevices[0]; s->type = COMEDI_SUBD_DI; s->subdev_flags = SDF_READABLE; s->n_chan = 24; s->maxdata = 1; s->range_table = &range_digital; s->insn_config = ni6527_di_insn_config; s->insn_bits = ni6527_di_insn_bits; /* Digital Output subdevice */ s = &dev->subdevices[1]; s->type = COMEDI_SUBD_DO; s->subdev_flags = SDF_WRITABLE; s->n_chan = 24; s->maxdata = 1; s->range_table = &range_digital; s->insn_bits = ni6527_do_insn_bits; /* Edge detection interrupt subdevice */ s = &dev->subdevices[2]; if (dev->irq) { dev->read_subdev = s; s->type = COMEDI_SUBD_DI; s->subdev_flags = SDF_READABLE | SDF_CMD_READ; s->n_chan = 1; s->maxdata = 1; s->range_table = &range_digital; s->insn_config = ni6527_intr_insn_config; s->insn_bits = ni6527_intr_insn_bits; s->len_chanlist = 1; s->do_cmdtest = ni6527_intr_cmdtest; s->do_cmd = ni6527_intr_cmd; s->cancel = ni6527_intr_cancel; } else { s->type = COMEDI_SUBD_UNUSED; } return 0; } static void ni6527_detach(struct comedi_device *dev) { if (dev->mmio) ni6527_reset(dev); comedi_pci_detach(dev); } static struct comedi_driver ni6527_driver = { .driver_name = "ni_6527", .module = THIS_MODULE, .auto_attach = ni6527_auto_attach, .detach = ni6527_detach, }; static int ni6527_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) { return comedi_pci_auto_config(dev, &ni6527_driver, id->driver_data); } static const struct pci_device_id ni6527_pci_table[] = { { PCI_VDEVICE(NI, 0x2b10), BOARD_PXI6527 }, { PCI_VDEVICE(NI, 0x2b20), BOARD_PCI6527 }, { 0 } }; MODULE_DEVICE_TABLE(pci, ni6527_pci_table); static struct pci_driver ni6527_pci_driver = { .name = "ni_6527", .id_table = ni6527_pci_table, .probe = ni6527_pci_probe, .remove = comedi_pci_auto_unconfig, }; module_comedi_pci_driver(ni6527_driver, ni6527_pci_driver); MODULE_AUTHOR("Comedi http://www.comedi.org"); MODULE_DESCRIPTION("Comedi driver for National Instruments PCI-6527"); MODULE_LICENSE("GPL");
gpl-2.0
perillamint/linux-COSE341
arch/parisc/kernel/irq.c
686
10344
/* * Code to handle x86 style IRQs plus some generic interrupt stuff. * * Copyright (C) 1992 Linus Torvalds * Copyright (C) 1994, 1995, 1996, 1997, 1998 Ralf Baechle * Copyright (C) 1999 SuSE GmbH (Philipp Rumpf, prumpf@tux.org) * Copyright (C) 1999-2000 Grant Grundler * Copyright (c) 2005 Matthew Wilcox * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/bitops.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/kernel_stat.h> #include <linux/seq_file.h> #include <linux/spinlock.h> #include <linux/types.h> #include <asm/io.h> #include <asm/smp.h> #undef PARISC_IRQ_CR16_COUNTS extern irqreturn_t timer_interrupt(int, void *); extern irqreturn_t ipi_interrupt(int, void *); #define EIEM_MASK(irq) (1UL<<(CPU_IRQ_MAX - irq)) /* Bits in EIEM correlate with cpu_irq_action[]. ** Numbered *Big Endian*! (ie bit 0 is MSB) */ static volatile unsigned long cpu_eiem = 0; /* ** local ACK bitmap ... habitually set to 1, but reset to zero ** between ->ack() and ->end() of the interrupt to prevent ** re-interruption of a processing interrupt. */ static DEFINE_PER_CPU(unsigned long, local_ack_eiem) = ~0UL; static void cpu_disable_irq(unsigned int irq) { unsigned long eirr_bit = EIEM_MASK(irq); cpu_eiem &= ~eirr_bit; /* Do nothing on the other CPUs. If they get this interrupt, * The & cpu_eiem in the do_cpu_irq_mask() ensures they won't * handle it, and the set_eiem() at the bottom will ensure it * then gets disabled */ } static void cpu_enable_irq(unsigned int irq) { unsigned long eirr_bit = EIEM_MASK(irq); cpu_eiem |= eirr_bit; /* This is just a simple NOP IPI. But what it does is cause * all the other CPUs to do a set_eiem(cpu_eiem) at the end * of the interrupt handler */ smp_send_all_nop(); } static unsigned int cpu_startup_irq(unsigned int irq) { cpu_enable_irq(irq); return 0; } void no_ack_irq(unsigned int irq) { } void no_end_irq(unsigned int irq) { } void cpu_ack_irq(unsigned int irq) { unsigned long mask = EIEM_MASK(irq); int cpu = smp_processor_id(); /* Clear in EIEM so we can no longer process */ per_cpu(local_ack_eiem, cpu) &= ~mask; /* disable the interrupt */ set_eiem(cpu_eiem & per_cpu(local_ack_eiem, cpu)); /* and now ack it */ mtctl(mask, 23); } void cpu_end_irq(unsigned int irq) { unsigned long mask = EIEM_MASK(irq); int cpu = smp_processor_id(); /* set it in the eiems---it's no longer in process */ per_cpu(local_ack_eiem, cpu) |= mask; /* enable the interrupt */ set_eiem(cpu_eiem & per_cpu(local_ack_eiem, cpu)); } #ifdef CONFIG_SMP int cpu_check_affinity(unsigned int irq, const struct cpumask *dest) { int cpu_dest; /* timer and ipi have to always be received on all CPUs */ if (CHECK_IRQ_PER_CPU(irq)) { /* Bad linux design decision. The mask has already * been set; we must reset it */ cpumask_setall(irq_desc[irq].affinity); return -EINVAL; } /* whatever mask they set, we just allow one CPU */ cpu_dest = first_cpu(*dest); return cpu_dest; } static int cpu_set_affinity_irq(unsigned int irq, const struct cpumask *dest) { int cpu_dest; cpu_dest = cpu_check_affinity(irq, dest); if (cpu_dest < 0) return -1; cpumask_copy(irq_desc[irq].affinity, dest); return 0; } #endif static struct irq_chip cpu_interrupt_type = { .name = "CPU", .startup = cpu_startup_irq, .shutdown = cpu_disable_irq, .enable = cpu_enable_irq, .disable = cpu_disable_irq, .ack = cpu_ack_irq, .end = cpu_end_irq, #ifdef CONFIG_SMP .set_affinity = cpu_set_affinity_irq, #endif /* XXX: Needs to be written. We managed without it so far, but * we really ought to write it. */ .retrigger = NULL, }; int show_interrupts(struct seq_file *p, void *v) { int i = *(loff_t *) v, j; unsigned long flags; if (i == 0) { seq_puts(p, " "); for_each_online_cpu(j) seq_printf(p, " CPU%d", j); #ifdef PARISC_IRQ_CR16_COUNTS seq_printf(p, " [min/avg/max] (CPU cycle counts)"); #endif seq_putc(p, '\n'); } if (i < NR_IRQS) { struct irqaction *action; raw_spin_lock_irqsave(&irq_desc[i].lock, flags); action = irq_desc[i].action; if (!action) goto skip; seq_printf(p, "%3d: ", i); #ifdef CONFIG_SMP for_each_online_cpu(j) seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); #else seq_printf(p, "%10u ", kstat_irqs(i)); #endif seq_printf(p, " %14s", irq_desc[i].chip->name); #ifndef PARISC_IRQ_CR16_COUNTS seq_printf(p, " %s", action->name); while ((action = action->next)) seq_printf(p, ", %s", action->name); #else for ( ;action; action = action->next) { unsigned int k, avg, min, max; min = max = action->cr16_hist[0]; for (avg = k = 0; k < PARISC_CR16_HIST_SIZE; k++) { int hist = action->cr16_hist[k]; if (hist) { avg += hist; } else break; if (hist > max) max = hist; if (hist < min) min = hist; } avg /= k; seq_printf(p, " %s[%d/%d/%d]", action->name, min,avg,max); } #endif seq_putc(p, '\n'); skip: raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); } return 0; } /* ** The following form a "set": Virtual IRQ, Transaction Address, Trans Data. ** Respectively, these map to IRQ region+EIRR, Processor HPA, EIRR bit. ** ** To use txn_XXX() interfaces, get a Virtual IRQ first. ** Then use that to get the Transaction address and data. */ int cpu_claim_irq(unsigned int irq, struct irq_chip *type, void *data) { if (irq_desc[irq].action) return -EBUSY; if (irq_desc[irq].chip != &cpu_interrupt_type) return -EBUSY; if (type) { irq_desc[irq].chip = type; irq_desc[irq].chip_data = data; cpu_interrupt_type.enable(irq); } return 0; } int txn_claim_irq(int irq) { return cpu_claim_irq(irq, NULL, NULL) ? -1 : irq; } /* * The bits_wide parameter accommodates the limitations of the HW/SW which * use these bits: * Legacy PA I/O (GSC/NIO): 5 bits (architected EIM register) * V-class (EPIC): 6 bits * N/L/A-class (iosapic): 8 bits * PCI 2.2 MSI: 16 bits * Some PCI devices: 32 bits (Symbios SCSI/ATM/HyperFabric) * * On the service provider side: * o PA 1.1 (and PA2.0 narrow mode) 5-bits (width of EIR register) * o PA 2.0 wide mode 6-bits (per processor) * o IA64 8-bits (0-256 total) * * So a Legacy PA I/O device on a PA 2.0 box can't use all the bits supported * by the processor...and the N/L-class I/O subsystem supports more bits than * PA2.0 has. The first case is the problem. */ int txn_alloc_irq(unsigned int bits_wide) { int irq; /* never return irq 0 cause that's the interval timer */ for (irq = CPU_IRQ_BASE + 1; irq <= CPU_IRQ_MAX; irq++) { if (cpu_claim_irq(irq, NULL, NULL) < 0) continue; if ((irq - CPU_IRQ_BASE) >= (1 << bits_wide)) continue; return irq; } /* unlikely, but be prepared */ return -1; } unsigned long txn_affinity_addr(unsigned int irq, int cpu) { #ifdef CONFIG_SMP cpumask_copy(irq_desc[irq].affinity, cpumask_of(cpu)); #endif return per_cpu(cpu_data, cpu).txn_addr; } unsigned long txn_alloc_addr(unsigned int virt_irq) { static int next_cpu = -1; next_cpu++; /* assign to "next" CPU we want this bugger on */ /* validate entry */ while ((next_cpu < nr_cpu_ids) && (!per_cpu(cpu_data, next_cpu).txn_addr || !cpu_online(next_cpu))) next_cpu++; if (next_cpu >= nr_cpu_ids) next_cpu = 0; /* nothing else, assign monarch */ return txn_affinity_addr(virt_irq, next_cpu); } unsigned int txn_alloc_data(unsigned int virt_irq) { return virt_irq - CPU_IRQ_BASE; } static inline int eirr_to_irq(unsigned long eirr) { int bit = fls_long(eirr); return (BITS_PER_LONG - bit) + TIMER_IRQ; } /* ONLY called from entry.S:intr_extint() */ void do_cpu_irq_mask(struct pt_regs *regs) { struct pt_regs *old_regs; unsigned long eirr_val; int irq, cpu = smp_processor_id(); #ifdef CONFIG_SMP cpumask_t dest; #endif old_regs = set_irq_regs(regs); local_irq_disable(); irq_enter(); eirr_val = mfctl(23) & cpu_eiem & per_cpu(local_ack_eiem, cpu); if (!eirr_val) goto set_out; irq = eirr_to_irq(eirr_val); #ifdef CONFIG_SMP cpumask_copy(&dest, irq_desc[irq].affinity); if (CHECK_IRQ_PER_CPU(irq_desc[irq].status) && !cpu_isset(smp_processor_id(), dest)) { int cpu = first_cpu(dest); printk(KERN_DEBUG "redirecting irq %d from CPU %d to %d\n", irq, smp_processor_id(), cpu); gsc_writel(irq + CPU_IRQ_BASE, per_cpu(cpu_data, cpu).hpa); goto set_out; } #endif __do_IRQ(irq); out: irq_exit(); set_irq_regs(old_regs); return; set_out: set_eiem(cpu_eiem & per_cpu(local_ack_eiem, cpu)); goto out; } static struct irqaction timer_action = { .handler = timer_interrupt, .name = "timer", .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_PERCPU | IRQF_IRQPOLL, }; #ifdef CONFIG_SMP static struct irqaction ipi_action = { .handler = ipi_interrupt, .name = "IPI", .flags = IRQF_DISABLED | IRQF_PERCPU, }; #endif static void claim_cpu_irqs(void) { int i; for (i = CPU_IRQ_BASE; i <= CPU_IRQ_MAX; i++) { irq_desc[i].chip = &cpu_interrupt_type; } irq_desc[TIMER_IRQ].action = &timer_action; irq_desc[TIMER_IRQ].status = IRQ_PER_CPU; #ifdef CONFIG_SMP irq_desc[IPI_IRQ].action = &ipi_action; irq_desc[IPI_IRQ].status = IRQ_PER_CPU; #endif } void __init init_IRQ(void) { local_irq_disable(); /* PARANOID - should already be disabled */ mtctl(~0UL, 23); /* EIRR : clear all pending external intr */ claim_cpu_irqs(); #ifdef CONFIG_SMP if (!cpu_eiem) cpu_eiem = EIEM_MASK(IPI_IRQ) | EIEM_MASK(TIMER_IRQ); #else cpu_eiem = EIEM_MASK(TIMER_IRQ); #endif set_eiem(cpu_eiem); /* EIEM : enable all external intr */ }
gpl-2.0
bndmag/linux
drivers/hid/hid-picolcd_debugfs.c
1710
27700
/*************************************************************************** * Copyright (C) 2010-2012 by Bruno Prémont <bonbons@linux-vserver.org> * * * * Based on Logitech G13 driver (v0.4) * * Copyright (C) 2009 by Rick L. Vinyard, Jr. <rvinyard@cs.nmsu.edu> * * * * This program is free software: you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation, version 2 of the License. * * * * This driver is distributed in the hope that it will be useful, but * * WITHOUT ANY WARRANTY; without even the implied warranty of * * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * * General Public License for more details. * * * * You should have received a copy of the GNU General Public License * * along with this software. If not see <http://www.gnu.org/licenses/>. * ***************************************************************************/ #include <linux/hid.h> #include <linux/hid-debug.h> #include <linux/fb.h> #include <linux/seq_file.h> #include <linux/debugfs.h> #include <linux/module.h> #include <linux/uaccess.h> #include "hid-picolcd.h" static int picolcd_debug_reset_show(struct seq_file *f, void *p) { if (picolcd_fbinfo((struct picolcd_data *)f->private)) seq_printf(f, "all fb\n"); else seq_printf(f, "all\n"); return 0; } static int picolcd_debug_reset_open(struct inode *inode, struct file *f) { return single_open(f, picolcd_debug_reset_show, inode->i_private); } static ssize_t picolcd_debug_reset_write(struct file *f, const char __user *user_buf, size_t count, loff_t *ppos) { struct picolcd_data *data = ((struct seq_file *)f->private_data)->private; char buf[32]; size_t cnt = min(count, sizeof(buf)-1); if (copy_from_user(buf, user_buf, cnt)) return -EFAULT; while (cnt > 0 && (buf[cnt-1] == ' ' || buf[cnt-1] == '\n')) cnt--; buf[cnt] = '\0'; if (strcmp(buf, "all") == 0) { picolcd_reset(data->hdev); picolcd_fb_reset(data, 1); } else if (strcmp(buf, "fb") == 0) { picolcd_fb_reset(data, 1); } else { return -EINVAL; } return count; } static const struct file_operations picolcd_debug_reset_fops = { .owner = THIS_MODULE, .open = picolcd_debug_reset_open, .read = seq_read, .llseek = seq_lseek, .write = picolcd_debug_reset_write, .release = single_release, }; /* * The "eeprom" file */ static ssize_t picolcd_debug_eeprom_read(struct file *f, char __user *u, size_t s, loff_t *off) { struct picolcd_data *data = f->private_data; struct picolcd_pending *resp; u8 raw_data[3]; ssize_t ret = -EIO; if (s == 0) return -EINVAL; if (*off > 0x0ff) return 0; /* prepare buffer with info about what we want to read (addr & len) */ raw_data[0] = *off & 0xff; raw_data[1] = (*off >> 8) & 0xff; raw_data[2] = s < 20 ? s : 20; if (*off + raw_data[2] > 0xff) raw_data[2] = 0x100 - *off; resp = picolcd_send_and_wait(data->hdev, REPORT_EE_READ, raw_data, sizeof(raw_data)); if (!resp) return -EIO; if (resp->in_report && resp->in_report->id == REPORT_EE_DATA) { /* successful read :) */ ret = resp->raw_data[2]; if (ret > s) ret = s; if (copy_to_user(u, resp->raw_data+3, ret)) ret = -EFAULT; else *off += ret; } /* anything else is some kind of IO error */ kfree(resp); return ret; } static ssize_t picolcd_debug_eeprom_write(struct file *f, const char __user *u, size_t s, loff_t *off) { struct picolcd_data *data = f->private_data; struct picolcd_pending *resp; ssize_t ret = -EIO; u8 raw_data[23]; if (s == 0) return -EINVAL; if (*off > 0x0ff) return -ENOSPC; memset(raw_data, 0, sizeof(raw_data)); raw_data[0] = *off & 0xff; raw_data[1] = (*off >> 8) & 0xff; raw_data[2] = min_t(size_t, 20, s); if (*off + raw_data[2] > 0xff) raw_data[2] = 0x100 - *off; if (copy_from_user(raw_data+3, u, min((u8)20, raw_data[2]))) return -EFAULT; resp = picolcd_send_and_wait(data->hdev, REPORT_EE_WRITE, raw_data, sizeof(raw_data)); if (!resp) return -EIO; if (resp->in_report && resp->in_report->id == REPORT_EE_DATA) { /* check if written data matches */ if (memcmp(raw_data, resp->raw_data, 3+raw_data[2]) == 0) { *off += raw_data[2]; ret = raw_data[2]; } } kfree(resp); return ret; } /* * Notes: * - read/write happens in chunks of at most 20 bytes, it's up to userspace * to loop in order to get more data. * - on write errors on otherwise correct write request the bytes * that should have been written are in undefined state. */ static const struct file_operations picolcd_debug_eeprom_fops = { .owner = THIS_MODULE, .open = simple_open, .read = picolcd_debug_eeprom_read, .write = picolcd_debug_eeprom_write, .llseek = generic_file_llseek, }; /* * The "flash" file */ /* record a flash address to buf (bounds check to be done by caller) */ static int _picolcd_flash_setaddr(struct picolcd_data *data, u8 *buf, long off) { buf[0] = off & 0xff; buf[1] = (off >> 8) & 0xff; if (data->addr_sz == 3) buf[2] = (off >> 16) & 0xff; return data->addr_sz == 2 ? 2 : 3; } /* read a given size of data (bounds check to be done by caller) */ static ssize_t _picolcd_flash_read(struct picolcd_data *data, int report_id, char __user *u, size_t s, loff_t *off) { struct picolcd_pending *resp; u8 raw_data[4]; ssize_t ret = 0; int len_off, err = -EIO; while (s > 0) { err = -EIO; len_off = _picolcd_flash_setaddr(data, raw_data, *off); raw_data[len_off] = s > 32 ? 32 : s; resp = picolcd_send_and_wait(data->hdev, report_id, raw_data, len_off+1); if (!resp || !resp->in_report) goto skip; if (resp->in_report->id == REPORT_MEMORY || resp->in_report->id == REPORT_BL_READ_MEMORY) { if (memcmp(raw_data, resp->raw_data, len_off+1) != 0) goto skip; if (copy_to_user(u+ret, resp->raw_data+len_off+1, raw_data[len_off])) { err = -EFAULT; goto skip; } *off += raw_data[len_off]; s -= raw_data[len_off]; ret += raw_data[len_off]; err = 0; } skip: kfree(resp); if (err) return ret > 0 ? ret : err; } return ret; } static ssize_t picolcd_debug_flash_read(struct file *f, char __user *u, size_t s, loff_t *off) { struct picolcd_data *data = f->private_data; if (s == 0) return -EINVAL; if (*off > 0x05fff) return 0; if (*off + s > 0x05fff) s = 0x06000 - *off; if (data->status & PICOLCD_BOOTLOADER) return _picolcd_flash_read(data, REPORT_BL_READ_MEMORY, u, s, off); else return _picolcd_flash_read(data, REPORT_READ_MEMORY, u, s, off); } /* erase block aligned to 64bytes boundary */ static ssize_t _picolcd_flash_erase64(struct picolcd_data *data, int report_id, loff_t *off) { struct picolcd_pending *resp; u8 raw_data[3]; int len_off; ssize_t ret = -EIO; if (*off & 0x3f) return -EINVAL; len_off = _picolcd_flash_setaddr(data, raw_data, *off); resp = picolcd_send_and_wait(data->hdev, report_id, raw_data, len_off); if (!resp || !resp->in_report) goto skip; if (resp->in_report->id == REPORT_MEMORY || resp->in_report->id == REPORT_BL_ERASE_MEMORY) { if (memcmp(raw_data, resp->raw_data, len_off) != 0) goto skip; ret = 0; } skip: kfree(resp); return ret; } /* write a given size of data (bounds check to be done by caller) */ static ssize_t _picolcd_flash_write(struct picolcd_data *data, int report_id, const char __user *u, size_t s, loff_t *off) { struct picolcd_pending *resp; u8 raw_data[36]; ssize_t ret = 0; int len_off, err = -EIO; while (s > 0) { err = -EIO; len_off = _picolcd_flash_setaddr(data, raw_data, *off); raw_data[len_off] = s > 32 ? 32 : s; if (copy_from_user(raw_data+len_off+1, u, raw_data[len_off])) { err = -EFAULT; break; } resp = picolcd_send_and_wait(data->hdev, report_id, raw_data, len_off+1+raw_data[len_off]); if (!resp || !resp->in_report) goto skip; if (resp->in_report->id == REPORT_MEMORY || resp->in_report->id == REPORT_BL_WRITE_MEMORY) { if (memcmp(raw_data, resp->raw_data, len_off+1+raw_data[len_off]) != 0) goto skip; *off += raw_data[len_off]; s -= raw_data[len_off]; ret += raw_data[len_off]; err = 0; } skip: kfree(resp); if (err) break; } return ret > 0 ? ret : err; } static ssize_t picolcd_debug_flash_write(struct file *f, const char __user *u, size_t s, loff_t *off) { struct picolcd_data *data = f->private_data; ssize_t err, ret = 0; int report_erase, report_write; if (s == 0) return -EINVAL; if (*off > 0x5fff) return -ENOSPC; if (s & 0x3f) return -EINVAL; if (*off & 0x3f) return -EINVAL; if (data->status & PICOLCD_BOOTLOADER) { report_erase = REPORT_BL_ERASE_MEMORY; report_write = REPORT_BL_WRITE_MEMORY; } else { report_erase = REPORT_ERASE_MEMORY; report_write = REPORT_WRITE_MEMORY; } mutex_lock(&data->mutex_flash); while (s > 0) { err = _picolcd_flash_erase64(data, report_erase, off); if (err) break; err = _picolcd_flash_write(data, report_write, u, 64, off); if (err < 0) break; ret += err; *off += err; s -= err; if (err != 64) break; } mutex_unlock(&data->mutex_flash); return ret > 0 ? ret : err; } /* * Notes: * - concurrent writing is prevented by mutex and all writes must be * n*64 bytes and 64-byte aligned, each write being preceded by an * ERASE which erases a 64byte block. * If less than requested was written or an error is returned for an * otherwise correct write request the next 64-byte block which should * have been written is in undefined state (mostly: original, erased, * (half-)written with write error) * - reading can happen without special restriction */ static const struct file_operations picolcd_debug_flash_fops = { .owner = THIS_MODULE, .open = simple_open, .read = picolcd_debug_flash_read, .write = picolcd_debug_flash_write, .llseek = generic_file_llseek, }; /* * Helper code for HID report level dumping/debugging */ static const char * const error_codes[] = { "success", "parameter missing", "data_missing", "block readonly", "block not erasable", "block too big", "section overflow", "invalid command length", "invalid data length", }; static void dump_buff_as_hex(char *dst, size_t dst_sz, const u8 *data, const size_t data_len) { int i, j; for (i = j = 0; i < data_len && j + 4 < dst_sz; i++) { dst[j++] = hex_asc[(data[i] >> 4) & 0x0f]; dst[j++] = hex_asc[data[i] & 0x0f]; dst[j++] = ' '; } dst[j] = '\0'; if (j > 0) dst[j-1] = '\n'; if (i < data_len && j > 2) dst[j-2] = dst[j-3] = '.'; } void picolcd_debug_out_report(struct picolcd_data *data, struct hid_device *hdev, struct hid_report *report) { u8 *raw_data; int raw_size = (report->size >> 3) + 1; char *buff; #define BUFF_SZ 256 /* Avoid unnecessary overhead if debugfs is disabled */ if (list_empty(&hdev->debug_list)) return; buff = kmalloc(BUFF_SZ, GFP_ATOMIC); if (!buff) return; raw_data = hid_alloc_report_buf(report, GFP_ATOMIC); if (!raw_data) { kfree(buff); return; } snprintf(buff, BUFF_SZ, "\nout report %d (size %d) = ", report->id, raw_size); hid_debug_event(hdev, buff); raw_data[0] = report->id; hid_output_report(report, raw_data); dump_buff_as_hex(buff, BUFF_SZ, raw_data, raw_size); hid_debug_event(hdev, buff); switch (report->id) { case REPORT_LED_STATE: /* 1 data byte with GPO state */ snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n", "REPORT_LED_STATE", report->id, raw_size-1); hid_debug_event(hdev, buff); snprintf(buff, BUFF_SZ, "\tGPO state: 0x%02x\n", raw_data[1]); hid_debug_event(hdev, buff); break; case REPORT_BRIGHTNESS: /* 1 data byte with brightness */ snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n", "REPORT_BRIGHTNESS", report->id, raw_size-1); hid_debug_event(hdev, buff); snprintf(buff, BUFF_SZ, "\tBrightness: 0x%02x\n", raw_data[1]); hid_debug_event(hdev, buff); break; case REPORT_CONTRAST: /* 1 data byte with contrast */ snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n", "REPORT_CONTRAST", report->id, raw_size-1); hid_debug_event(hdev, buff); snprintf(buff, BUFF_SZ, "\tContrast: 0x%02x\n", raw_data[1]); hid_debug_event(hdev, buff); break; case REPORT_RESET: /* 2 data bytes with reset duration in ms */ snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n", "REPORT_RESET", report->id, raw_size-1); hid_debug_event(hdev, buff); snprintf(buff, BUFF_SZ, "\tDuration: 0x%02x%02x (%dms)\n", raw_data[2], raw_data[1], raw_data[2] << 8 | raw_data[1]); hid_debug_event(hdev, buff); break; case REPORT_LCD_CMD: /* 63 data bytes with LCD commands */ snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n", "REPORT_LCD_CMD", report->id, raw_size-1); hid_debug_event(hdev, buff); /* TODO: format decoding */ break; case REPORT_LCD_DATA: /* 63 data bytes with LCD data */ snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n", "REPORT_LCD_CMD", report->id, raw_size-1); /* TODO: format decoding */ hid_debug_event(hdev, buff); break; case REPORT_LCD_CMD_DATA: /* 63 data bytes with LCD commands and data */ snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n", "REPORT_LCD_CMD", report->id, raw_size-1); /* TODO: format decoding */ hid_debug_event(hdev, buff); break; case REPORT_EE_READ: /* 3 data bytes with read area description */ snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n", "REPORT_EE_READ", report->id, raw_size-1); hid_debug_event(hdev, buff); snprintf(buff, BUFF_SZ, "\tData address: 0x%02x%02x\n", raw_data[2], raw_data[1]); hid_debug_event(hdev, buff); snprintf(buff, BUFF_SZ, "\tData length: %d\n", raw_data[3]); hid_debug_event(hdev, buff); break; case REPORT_EE_WRITE: /* 3+1..20 data bytes with write area description */ snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n", "REPORT_EE_WRITE", report->id, raw_size-1); hid_debug_event(hdev, buff); snprintf(buff, BUFF_SZ, "\tData address: 0x%02x%02x\n", raw_data[2], raw_data[1]); hid_debug_event(hdev, buff); snprintf(buff, BUFF_SZ, "\tData length: %d\n", raw_data[3]); hid_debug_event(hdev, buff); if (raw_data[3] == 0) { snprintf(buff, BUFF_SZ, "\tNo data\n"); } else if (raw_data[3] + 4 <= raw_size) { snprintf(buff, BUFF_SZ, "\tData: "); hid_debug_event(hdev, buff); dump_buff_as_hex(buff, BUFF_SZ, raw_data+4, raw_data[3]); } else { snprintf(buff, BUFF_SZ, "\tData overflowed\n"); } hid_debug_event(hdev, buff); break; case REPORT_ERASE_MEMORY: case REPORT_BL_ERASE_MEMORY: /* 3 data bytes with pointer inside erase block */ snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n", "REPORT_ERASE_MEMORY", report->id, raw_size-1); hid_debug_event(hdev, buff); switch (data->addr_sz) { case 2: snprintf(buff, BUFF_SZ, "\tAddress inside 64 byte block: 0x%02x%02x\n", raw_data[2], raw_data[1]); break; case 3: snprintf(buff, BUFF_SZ, "\tAddress inside 64 byte block: 0x%02x%02x%02x\n", raw_data[3], raw_data[2], raw_data[1]); break; default: snprintf(buff, BUFF_SZ, "\tNot supported\n"); } hid_debug_event(hdev, buff); break; case REPORT_READ_MEMORY: case REPORT_BL_READ_MEMORY: /* 4 data bytes with read area description */ snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n", "REPORT_READ_MEMORY", report->id, raw_size-1); hid_debug_event(hdev, buff); switch (data->addr_sz) { case 2: snprintf(buff, BUFF_SZ, "\tData address: 0x%02x%02x\n", raw_data[2], raw_data[1]); hid_debug_event(hdev, buff); snprintf(buff, BUFF_SZ, "\tData length: %d\n", raw_data[3]); break; case 3: snprintf(buff, BUFF_SZ, "\tData address: 0x%02x%02x%02x\n", raw_data[3], raw_data[2], raw_data[1]); hid_debug_event(hdev, buff); snprintf(buff, BUFF_SZ, "\tData length: %d\n", raw_data[4]); break; default: snprintf(buff, BUFF_SZ, "\tNot supported\n"); } hid_debug_event(hdev, buff); break; case REPORT_WRITE_MEMORY: case REPORT_BL_WRITE_MEMORY: /* 4+1..32 data bytes with write adrea description */ snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n", "REPORT_WRITE_MEMORY", report->id, raw_size-1); hid_debug_event(hdev, buff); switch (data->addr_sz) { case 2: snprintf(buff, BUFF_SZ, "\tData address: 0x%02x%02x\n", raw_data[2], raw_data[1]); hid_debug_event(hdev, buff); snprintf(buff, BUFF_SZ, "\tData length: %d\n", raw_data[3]); hid_debug_event(hdev, buff); if (raw_data[3] == 0) { snprintf(buff, BUFF_SZ, "\tNo data\n"); } else if (raw_data[3] + 4 <= raw_size) { snprintf(buff, BUFF_SZ, "\tData: "); hid_debug_event(hdev, buff); dump_buff_as_hex(buff, BUFF_SZ, raw_data+4, raw_data[3]); } else { snprintf(buff, BUFF_SZ, "\tData overflowed\n"); } break; case 3: snprintf(buff, BUFF_SZ, "\tData address: 0x%02x%02x%02x\n", raw_data[3], raw_data[2], raw_data[1]); hid_debug_event(hdev, buff); snprintf(buff, BUFF_SZ, "\tData length: %d\n", raw_data[4]); hid_debug_event(hdev, buff); if (raw_data[4] == 0) { snprintf(buff, BUFF_SZ, "\tNo data\n"); } else if (raw_data[4] + 5 <= raw_size) { snprintf(buff, BUFF_SZ, "\tData: "); hid_debug_event(hdev, buff); dump_buff_as_hex(buff, BUFF_SZ, raw_data+5, raw_data[4]); } else { snprintf(buff, BUFF_SZ, "\tData overflowed\n"); } break; default: snprintf(buff, BUFF_SZ, "\tNot supported\n"); } hid_debug_event(hdev, buff); break; case REPORT_SPLASH_RESTART: /* TODO */ break; case REPORT_EXIT_KEYBOARD: snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n", "REPORT_EXIT_KEYBOARD", report->id, raw_size-1); hid_debug_event(hdev, buff); snprintf(buff, BUFF_SZ, "\tRestart delay: %dms (0x%02x%02x)\n", raw_data[1] | (raw_data[2] << 8), raw_data[2], raw_data[1]); hid_debug_event(hdev, buff); break; case REPORT_VERSION: snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n", "REPORT_VERSION", report->id, raw_size-1); hid_debug_event(hdev, buff); break; case REPORT_DEVID: snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n", "REPORT_DEVID", report->id, raw_size-1); hid_debug_event(hdev, buff); break; case REPORT_SPLASH_SIZE: snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n", "REPORT_SPLASH_SIZE", report->id, raw_size-1); hid_debug_event(hdev, buff); break; case REPORT_HOOK_VERSION: snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n", "REPORT_HOOK_VERSION", report->id, raw_size-1); hid_debug_event(hdev, buff); break; case REPORT_EXIT_FLASHER: snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n", "REPORT_VERSION", report->id, raw_size-1); hid_debug_event(hdev, buff); snprintf(buff, BUFF_SZ, "\tRestart delay: %dms (0x%02x%02x)\n", raw_data[1] | (raw_data[2] << 8), raw_data[2], raw_data[1]); hid_debug_event(hdev, buff); break; default: snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n", "<unknown>", report->id, raw_size-1); hid_debug_event(hdev, buff); break; } wake_up_interruptible(&hdev->debug_wait); kfree(raw_data); kfree(buff); } void picolcd_debug_raw_event(struct picolcd_data *data, struct hid_device *hdev, struct hid_report *report, u8 *raw_data, int size) { char *buff; #define BUFF_SZ 256 /* Avoid unnecessary overhead if debugfs is disabled */ if (list_empty(&hdev->debug_list)) return; buff = kmalloc(BUFF_SZ, GFP_ATOMIC); if (!buff) return; switch (report->id) { case REPORT_ERROR_CODE: /* 2 data bytes with affected report and error code */ snprintf(buff, BUFF_SZ, "report %s (%d, size=%d)\n", "REPORT_ERROR_CODE", report->id, size-1); hid_debug_event(hdev, buff); if (raw_data[2] < ARRAY_SIZE(error_codes)) snprintf(buff, BUFF_SZ, "\tError code 0x%02x (%s) in reply to report 0x%02x\n", raw_data[2], error_codes[raw_data[2]], raw_data[1]); else snprintf(buff, BUFF_SZ, "\tError code 0x%02x in reply to report 0x%02x\n", raw_data[2], raw_data[1]); hid_debug_event(hdev, buff); break; case REPORT_KEY_STATE: /* 2 data bytes with key state */ snprintf(buff, BUFF_SZ, "report %s (%d, size=%d)\n", "REPORT_KEY_STATE", report->id, size-1); hid_debug_event(hdev, buff); if (raw_data[1] == 0) snprintf(buff, BUFF_SZ, "\tNo key pressed\n"); else if (raw_data[2] == 0) snprintf(buff, BUFF_SZ, "\tOne key pressed: 0x%02x (%d)\n", raw_data[1], raw_data[1]); else snprintf(buff, BUFF_SZ, "\tTwo keys pressed: 0x%02x (%d), 0x%02x (%d)\n", raw_data[1], raw_data[1], raw_data[2], raw_data[2]); hid_debug_event(hdev, buff); break; case REPORT_IR_DATA: /* Up to 20 byes of IR scancode data */ snprintf(buff, BUFF_SZ, "report %s (%d, size=%d)\n", "REPORT_IR_DATA", report->id, size-1); hid_debug_event(hdev, buff); if (raw_data[1] == 0) { snprintf(buff, BUFF_SZ, "\tUnexpectedly 0 data length\n"); hid_debug_event(hdev, buff); } else if (raw_data[1] + 1 <= size) { snprintf(buff, BUFF_SZ, "\tData length: %d\n\tIR Data: ", raw_data[1]); hid_debug_event(hdev, buff); dump_buff_as_hex(buff, BUFF_SZ, raw_data+2, raw_data[1]); hid_debug_event(hdev, buff); } else { snprintf(buff, BUFF_SZ, "\tOverflowing data length: %d\n", raw_data[1]-1); hid_debug_event(hdev, buff); } break; case REPORT_EE_DATA: /* Data buffer in response to REPORT_EE_READ or REPORT_EE_WRITE */ snprintf(buff, BUFF_SZ, "report %s (%d, size=%d)\n", "REPORT_EE_DATA", report->id, size-1); hid_debug_event(hdev, buff); snprintf(buff, BUFF_SZ, "\tData address: 0x%02x%02x\n", raw_data[2], raw_data[1]); hid_debug_event(hdev, buff); snprintf(buff, BUFF_SZ, "\tData length: %d\n", raw_data[3]); hid_debug_event(hdev, buff); if (raw_data[3] == 0) { snprintf(buff, BUFF_SZ, "\tNo data\n"); hid_debug_event(hdev, buff); } else if (raw_data[3] + 4 <= size) { snprintf(buff, BUFF_SZ, "\tData: "); hid_debug_event(hdev, buff); dump_buff_as_hex(buff, BUFF_SZ, raw_data+4, raw_data[3]); hid_debug_event(hdev, buff); } else { snprintf(buff, BUFF_SZ, "\tData overflowed\n"); hid_debug_event(hdev, buff); } break; case REPORT_MEMORY: /* Data buffer in response to REPORT_READ_MEMORY or REPORT_WRTIE_MEMORY */ snprintf(buff, BUFF_SZ, "report %s (%d, size=%d)\n", "REPORT_MEMORY", report->id, size-1); hid_debug_event(hdev, buff); switch (data->addr_sz) { case 2: snprintf(buff, BUFF_SZ, "\tData address: 0x%02x%02x\n", raw_data[2], raw_data[1]); hid_debug_event(hdev, buff); snprintf(buff, BUFF_SZ, "\tData length: %d\n", raw_data[3]); hid_debug_event(hdev, buff); if (raw_data[3] == 0) { snprintf(buff, BUFF_SZ, "\tNo data\n"); } else if (raw_data[3] + 4 <= size) { snprintf(buff, BUFF_SZ, "\tData: "); hid_debug_event(hdev, buff); dump_buff_as_hex(buff, BUFF_SZ, raw_data+4, raw_data[3]); } else { snprintf(buff, BUFF_SZ, "\tData overflowed\n"); } break; case 3: snprintf(buff, BUFF_SZ, "\tData address: 0x%02x%02x%02x\n", raw_data[3], raw_data[2], raw_data[1]); hid_debug_event(hdev, buff); snprintf(buff, BUFF_SZ, "\tData length: %d\n", raw_data[4]); hid_debug_event(hdev, buff); if (raw_data[4] == 0) { snprintf(buff, BUFF_SZ, "\tNo data\n"); } else if (raw_data[4] + 5 <= size) { snprintf(buff, BUFF_SZ, "\tData: "); hid_debug_event(hdev, buff); dump_buff_as_hex(buff, BUFF_SZ, raw_data+5, raw_data[4]); } else { snprintf(buff, BUFF_SZ, "\tData overflowed\n"); } break; default: snprintf(buff, BUFF_SZ, "\tNot supported\n"); } hid_debug_event(hdev, buff); break; case REPORT_VERSION: snprintf(buff, BUFF_SZ, "report %s (%d, size=%d)\n", "REPORT_VERSION", report->id, size-1); hid_debug_event(hdev, buff); snprintf(buff, BUFF_SZ, "\tFirmware version: %d.%d\n", raw_data[2], raw_data[1]); hid_debug_event(hdev, buff); break; case REPORT_BL_ERASE_MEMORY: snprintf(buff, BUFF_SZ, "report %s (%d, size=%d)\n", "REPORT_BL_ERASE_MEMORY", report->id, size-1); hid_debug_event(hdev, buff); /* TODO */ break; case REPORT_BL_READ_MEMORY: snprintf(buff, BUFF_SZ, "report %s (%d, size=%d)\n", "REPORT_BL_READ_MEMORY", report->id, size-1); hid_debug_event(hdev, buff); /* TODO */ break; case REPORT_BL_WRITE_MEMORY: snprintf(buff, BUFF_SZ, "report %s (%d, size=%d)\n", "REPORT_BL_WRITE_MEMORY", report->id, size-1); hid_debug_event(hdev, buff); /* TODO */ break; case REPORT_DEVID: snprintf(buff, BUFF_SZ, "report %s (%d, size=%d)\n", "REPORT_DEVID", report->id, size-1); hid_debug_event(hdev, buff); snprintf(buff, BUFF_SZ, "\tSerial: 0x%02x%02x%02x%02x\n", raw_data[1], raw_data[2], raw_data[3], raw_data[4]); hid_debug_event(hdev, buff); snprintf(buff, BUFF_SZ, "\tType: 0x%02x\n", raw_data[5]); hid_debug_event(hdev, buff); break; case REPORT_SPLASH_SIZE: snprintf(buff, BUFF_SZ, "report %s (%d, size=%d)\n", "REPORT_SPLASH_SIZE", report->id, size-1); hid_debug_event(hdev, buff); snprintf(buff, BUFF_SZ, "\tTotal splash space: %d\n", (raw_data[2] << 8) | raw_data[1]); hid_debug_event(hdev, buff); snprintf(buff, BUFF_SZ, "\tUsed splash space: %d\n", (raw_data[4] << 8) | raw_data[3]); hid_debug_event(hdev, buff); break; case REPORT_HOOK_VERSION: snprintf(buff, BUFF_SZ, "report %s (%d, size=%d)\n", "REPORT_HOOK_VERSION", report->id, size-1); hid_debug_event(hdev, buff); snprintf(buff, BUFF_SZ, "\tFirmware version: %d.%d\n", raw_data[1], raw_data[2]); hid_debug_event(hdev, buff); break; default: snprintf(buff, BUFF_SZ, "report %s (%d, size=%d)\n", "<unknown>", report->id, size-1); hid_debug_event(hdev, buff); break; } wake_up_interruptible(&hdev->debug_wait); kfree(buff); } void picolcd_init_devfs(struct picolcd_data *data, struct hid_report *eeprom_r, struct hid_report *eeprom_w, struct hid_report *flash_r, struct hid_report *flash_w, struct hid_report *reset) { struct hid_device *hdev = data->hdev; mutex_init(&data->mutex_flash); /* reset */ if (reset) data->debug_reset = debugfs_create_file("reset", 0600, hdev->debug_dir, data, &picolcd_debug_reset_fops); /* eeprom */ if (eeprom_r || eeprom_w) data->debug_eeprom = debugfs_create_file("eeprom", (eeprom_w ? S_IWUSR : 0) | (eeprom_r ? S_IRUSR : 0), hdev->debug_dir, data, &picolcd_debug_eeprom_fops); /* flash */ if (flash_r && flash_r->maxfield == 1 && flash_r->field[0]->report_size == 8) data->addr_sz = flash_r->field[0]->report_count - 1; else data->addr_sz = -1; if (data->addr_sz == 2 || data->addr_sz == 3) { data->debug_flash = debugfs_create_file("flash", (flash_w ? S_IWUSR : 0) | (flash_r ? S_IRUSR : 0), hdev->debug_dir, data, &picolcd_debug_flash_fops); } else if (flash_r || flash_w) hid_warn(hdev, "Unexpected FLASH access reports, please submit rdesc for review\n"); } void picolcd_exit_devfs(struct picolcd_data *data) { struct dentry *dent; dent = data->debug_reset; data->debug_reset = NULL; debugfs_remove(dent); dent = data->debug_eeprom; data->debug_eeprom = NULL; debugfs_remove(dent); dent = data->debug_flash; data->debug_flash = NULL; debugfs_remove(dent); mutex_destroy(&data->mutex_flash); }
gpl-2.0
MinimalOS/android_kernel_moto_shamu
drivers/staging/vt6655/rf.c
1710
44571
/* * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc. * All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * * * File: rf.c * * Purpose: rf function code * * Author: Jerry Chen * * Date: Feb. 19, 2004 * * Functions: * IFRFbWriteEmbedded - Embedded write RF register via MAC * * Revision History: * */ #include "mac.h" #include "srom.h" #include "rf.h" #include "baseband.h" /*--------------------- Static Definitions -------------------------*/ //static int msglevel =MSG_LEVEL_INFO; #define BY_AL2230_REG_LEN 23 //24bit #define CB_AL2230_INIT_SEQ 15 #define SWITCH_CHANNEL_DELAY_AL2230 200 //us #define AL2230_PWR_IDX_LEN 64 #define BY_AL7230_REG_LEN 23 //24bit #define CB_AL7230_INIT_SEQ 16 #define SWITCH_CHANNEL_DELAY_AL7230 200 //us #define AL7230_PWR_IDX_LEN 64 /*--------------------- Static Classes ----------------------------*/ /*--------------------- Static Variables --------------------------*/ const unsigned long dwAL2230InitTable[CB_AL2230_INIT_SEQ] = { 0x03F79000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // 0x03333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // 0x01A00200+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // 0x00FFF300+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // 0x0005A400+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // 0x0F4DC500+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // 0x0805B600+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // 0x0146C700+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // 0x00068800+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // 0x0403B900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // 0x00DBBA00+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // 0x00099B00+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // 0x0BDFFC00+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 0x00000D00+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 0x00580F00+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW }; const unsigned long dwAL2230ChannelTable0[CB_MAX_CHANNEL] = { 0x03F79000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 1, Tf = 2412MHz 0x03F79000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 2, Tf = 2417MHz 0x03E79000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 3, Tf = 2422MHz 0x03E79000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 4, Tf = 2427MHz 0x03F7A000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 5, Tf = 2432MHz 0x03F7A000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 6, Tf = 2437MHz 0x03E7A000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 7, Tf = 2442MHz 0x03E7A000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 8, Tf = 2447MHz 0x03F7B000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 9, Tf = 2452MHz 0x03F7B000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 10, Tf = 2457MHz 0x03E7B000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 11, Tf = 2462MHz 0x03E7B000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 12, Tf = 2467MHz 0x03F7C000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 13, Tf = 2472MHz 0x03E7C000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW // channel = 14, Tf = 2412M }; const unsigned long dwAL2230ChannelTable1[CB_MAX_CHANNEL] = { 0x03333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 1, Tf = 2412MHz 0x0B333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 2, Tf = 2417MHz 0x03333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 3, Tf = 2422MHz 0x0B333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 4, Tf = 2427MHz 0x03333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 5, Tf = 2432MHz 0x0B333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 6, Tf = 2437MHz 0x03333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 7, Tf = 2442MHz 0x0B333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 8, Tf = 2447MHz 0x03333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 9, Tf = 2452MHz 0x0B333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 10, Tf = 2457MHz 0x03333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 11, Tf = 2462MHz 0x0B333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 12, Tf = 2467MHz 0x03333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 13, Tf = 2472MHz 0x06666100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW // channel = 14, Tf = 2412M }; unsigned long dwAL2230PowerTable[AL2230_PWR_IDX_LEN] = { 0x04040900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 0x04041900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 0x04042900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 0x04043900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 0x04044900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 0x04045900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 0x04046900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 0x04047900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 0x04048900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 0x04049900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 0x0404A900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 0x0404B900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 0x0404C900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 0x0404D900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 0x0404E900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 0x0404F900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 0x04050900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 0x04051900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 0x04052900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 0x04053900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 0x04054900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 0x04055900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 0x04056900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 0x04057900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 0x04058900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 0x04059900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 0x0405A900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 0x0405B900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 0x0405C900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 0x0405D900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 0x0405E900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 0x0405F900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 0x04060900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 0x04061900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 0x04062900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 0x04063900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 0x04064900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 0x04065900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 0x04066900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 0x04067900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 0x04068900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 0x04069900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 0x0406A900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 0x0406B900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 0x0406C900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 0x0406D900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 0x0406E900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 0x0406F900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 0x04070900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 0x04071900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 0x04072900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 0x04073900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 0x04074900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 0x04075900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 0x04076900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 0x04077900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 0x04078900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 0x04079900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 0x0407A900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 0x0407B900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 0x0407C900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 0x0407D900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 0x0407E900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, 0x0407F900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW }; //{{ RobertYu:20050104 // 40MHz reference frequency // Need to Pull PLLON(PE3) low when writing channel registers through 3-wire. const unsigned long dwAL7230InitTable[CB_AL7230_INIT_SEQ] = { 0x00379000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // Channel1 // Need modify for 11a 0x13333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // Channel1 // Need modify for 11a 0x841FF200+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // Need modify for 11a: 451FE2 0x3FDFA300+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // Need modify for 11a: 5FDFA3 0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // 11b/g // Need modify for 11a //0x802B4500+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // Need modify for 11a: 8D1B45 // RoberYu:20050113, Rev0.47 Regsiter Setting Guide 0x802B5500+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // Need modify for 11a: 8D1B55 0x56AF3600+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, 0xCE020700+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // Need modify for 11a: 860207 0x6EBC0800+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, 0x221BB900+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, 0xE0000A00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // Need modify for 11a: E0600A 0x08031B00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // init 0x080B1B00 => 0x080F1B00 for 3 wire control TxGain(D10) //0x00093C00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // Need modify for 11a: 00143C // RoberYu:20050113, Rev0.47 Regsiter Setting Guide 0x000A3C00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // Need modify for 11a: 00143C 0xFFFFFD00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, 0x00000E00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, 0x1ABA8F00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW // Need modify for 11a: 12BACF }; const unsigned long dwAL7230InitTableAMode[CB_AL7230_INIT_SEQ] = { 0x0FF52000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // Channel184 // Need modify for 11b/g 0x00000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // Channel184 // Need modify for 11b/g 0x451FE200+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // Need modify for 11b/g 0x5FDFA300+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // Need modify for 11b/g 0x67F78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // 11a // Need modify for 11b/g 0x853F5500+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // Need modify for 11b/g, RoberYu:20050113 0x56AF3600+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, 0xCE020700+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // Need modify for 11b/g 0x6EBC0800+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, 0x221BB900+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, 0xE0600A00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // Need modify for 11b/g 0x08031B00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // init 0x080B1B00 => 0x080F1B00 for 3 wire control TxGain(D10) 0x00147C00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // Need modify for 11b/g 0xFFFFFD00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, 0x00000E00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, 0x12BACF00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW // Need modify for 11b/g }; const unsigned long dwAL7230ChannelTable0[CB_MAX_CHANNEL] = { 0x00379000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 1, Tf = 2412MHz 0x00379000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 2, Tf = 2417MHz 0x00379000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 3, Tf = 2422MHz 0x00379000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 4, Tf = 2427MHz 0x0037A000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 5, Tf = 2432MHz 0x0037A000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 6, Tf = 2437MHz 0x0037A000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 7, Tf = 2442MHz 0x0037A000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 8, Tf = 2447MHz //RobertYu: 20050218, update for APNode 0.49 0x0037B000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 9, Tf = 2452MHz //RobertYu: 20050218, update for APNode 0.49 0x0037B000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 10, Tf = 2457MHz //RobertYu: 20050218, update for APNode 0.49 0x0037B000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 11, Tf = 2462MHz //RobertYu: 20050218, update for APNode 0.49 0x0037B000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 12, Tf = 2467MHz //RobertYu: 20050218, update for APNode 0.49 0x0037C000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 13, Tf = 2472MHz //RobertYu: 20050218, update for APNode 0.49 0x0037C000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 14, Tf = 2484MHz // 4.9G => Ch 183, 184, 185, 187, 188, 189, 192, 196 (Value:15 ~ 22) 0x0FF52000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 183, Tf = 4915MHz (15) 0x0FF52000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 184, Tf = 4920MHz (16) 0x0FF52000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 185, Tf = 4925MHz (17) 0x0FF52000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 187, Tf = 4935MHz (18) 0x0FF52000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 188, Tf = 4940MHz (19) 0x0FF52000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 189, Tf = 4945MHz (20) 0x0FF53000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 192, Tf = 4960MHz (21) 0x0FF53000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 196, Tf = 4980MHz (22) // 5G => Ch 7, 8, 9, 11, 12, 16, 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64, // 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 149, 153, 157, 161, 165 (Value 23 ~ 56) 0x0FF54000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 7, Tf = 5035MHz (23) 0x0FF54000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 8, Tf = 5040MHz (24) 0x0FF54000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 9, Tf = 5045MHz (25) 0x0FF54000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 11, Tf = 5055MHz (26) 0x0FF54000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 12, Tf = 5060MHz (27) 0x0FF55000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 16, Tf = 5080MHz (28) 0x0FF56000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 34, Tf = 5170MHz (29) 0x0FF56000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 36, Tf = 5180MHz (30) 0x0FF57000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 38, Tf = 5190MHz (31) //RobertYu: 20050218, update for APNode 0.49 0x0FF57000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 40, Tf = 5200MHz (32) 0x0FF57000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 42, Tf = 5210MHz (33) 0x0FF57000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 44, Tf = 5220MHz (34) 0x0FF57000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 46, Tf = 5230MHz (35) 0x0FF57000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 48, Tf = 5240MHz (36) 0x0FF58000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 52, Tf = 5260MHz (37) 0x0FF58000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 56, Tf = 5280MHz (38) 0x0FF58000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 60, Tf = 5300MHz (39) 0x0FF59000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 64, Tf = 5320MHz (40) 0x0FF5C000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 100, Tf = 5500MHz (41) 0x0FF5C000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 104, Tf = 5520MHz (42) 0x0FF5C000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 108, Tf = 5540MHz (43) 0x0FF5D000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 112, Tf = 5560MHz (44) 0x0FF5D000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 116, Tf = 5580MHz (45) 0x0FF5D000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 120, Tf = 5600MHz (46) 0x0FF5E000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 124, Tf = 5620MHz (47) 0x0FF5E000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 128, Tf = 5640MHz (48) 0x0FF5E000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 132, Tf = 5660MHz (49) 0x0FF5F000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 136, Tf = 5680MHz (50) 0x0FF5F000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 140, Tf = 5700MHz (51) 0x0FF60000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 149, Tf = 5745MHz (52) 0x0FF60000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 153, Tf = 5765MHz (53) 0x0FF60000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 157, Tf = 5785MHz (54) 0x0FF61000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 161, Tf = 5805MHz (55) 0x0FF61000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW // channel = 165, Tf = 5825MHz (56) }; const unsigned long dwAL7230ChannelTable1[CB_MAX_CHANNEL] = { 0x13333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 1, Tf = 2412MHz 0x1B333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 2, Tf = 2417MHz 0x03333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 3, Tf = 2422MHz 0x0B333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 4, Tf = 2427MHz 0x13333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 5, Tf = 2432MHz 0x1B333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 6, Tf = 2437MHz 0x03333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 7, Tf = 2442MHz 0x0B333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 8, Tf = 2447MHz 0x13333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 9, Tf = 2452MHz 0x1B333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 10, Tf = 2457MHz 0x03333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 11, Tf = 2462MHz 0x0B333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 12, Tf = 2467MHz 0x13333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 13, Tf = 2472MHz 0x06666100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 14, Tf = 2484MHz // 4.9G => Ch 183, 184, 185, 187, 188, 189, 192, 196 (Value:15 ~ 22) 0x1D555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 183, Tf = 4915MHz (15) 0x00000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 184, Tf = 4920MHz (16) 0x02AAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 185, Tf = 4925MHz (17) 0x08000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 187, Tf = 4935MHz (18) 0x0AAAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 188, Tf = 4940MHz (19) 0x0D555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 189, Tf = 4945MHz (20) 0x15555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 192, Tf = 4960MHz (21) 0x00000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 196, Tf = 4980MHz (22) // 5G => Ch 7, 8, 9, 11, 12, 16, 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64, // 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 149, 153, 157, 161, 165 (Value 23 ~ 56) 0x1D555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 7, Tf = 5035MHz (23) 0x00000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 8, Tf = 5040MHz (24) 0x02AAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 9, Tf = 5045MHz (25) 0x08000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 11, Tf = 5055MHz (26) 0x0AAAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 12, Tf = 5060MHz (27) 0x15555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 16, Tf = 5080MHz (28) 0x05555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 34, Tf = 5170MHz (29) 0x0AAAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 36, Tf = 5180MHz (30) 0x10000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 38, Tf = 5190MHz (31) 0x15555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 40, Tf = 5200MHz (32) 0x1AAAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 42, Tf = 5210MHz (33) 0x00000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 44, Tf = 5220MHz (34) 0x05555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 46, Tf = 5230MHz (35) 0x0AAAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 48, Tf = 5240MHz (36) 0x15555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 52, Tf = 5260MHz (37) 0x00000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 56, Tf = 5280MHz (38) 0x0AAAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 60, Tf = 5300MHz (39) 0x15555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 64, Tf = 5320MHz (40) 0x15555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 100, Tf = 5500MHz (41) 0x00000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 104, Tf = 5520MHz (42) 0x0AAAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 108, Tf = 5540MHz (43) 0x15555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 112, Tf = 5560MHz (44) 0x00000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 116, Tf = 5580MHz (45) 0x0AAAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 120, Tf = 5600MHz (46) 0x15555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 124, Tf = 5620MHz (47) 0x00000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 128, Tf = 5640MHz (48) 0x0AAAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 132, Tf = 5660MHz (49) 0x15555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 136, Tf = 5680MHz (50) 0x00000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 140, Tf = 5700MHz (51) 0x18000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 149, Tf = 5745MHz (52) 0x02AAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 153, Tf = 5765MHz (53) 0x0D555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 157, Tf = 5785MHz (54) 0x18000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 161, Tf = 5805MHz (55) 0x02AAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW // channel = 165, Tf = 5825MHz (56) }; const unsigned long dwAL7230ChannelTable2[CB_MAX_CHANNEL] = { 0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 1, Tf = 2412MHz 0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 2, Tf = 2417MHz 0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 3, Tf = 2422MHz 0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 4, Tf = 2427MHz 0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 5, Tf = 2432MHz 0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 6, Tf = 2437MHz 0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 7, Tf = 2442MHz 0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 8, Tf = 2447MHz 0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 9, Tf = 2452MHz 0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 10, Tf = 2457MHz 0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 11, Tf = 2462MHz 0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 12, Tf = 2467MHz 0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 13, Tf = 2472MHz 0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 14, Tf = 2484MHz // 4.9G => Ch 183, 184, 185, 187, 188, 189, 192, 196 (Value:15 ~ 22) 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 183, Tf = 4915MHz (15) 0x67D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 184, Tf = 4920MHz (16) 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 185, Tf = 4925MHz (17) 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 187, Tf = 4935MHz (18) 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 188, Tf = 4940MHz (19) 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 189, Tf = 4945MHz (20) 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 192, Tf = 4960MHz (21) 0x67D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 196, Tf = 4980MHz (22) // 5G => Ch 7, 8, 9, 11, 12, 16, 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64, // 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 149, 153, 157, 161, 165 (Value 23 ~ 56) 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 7, Tf = 5035MHz (23) 0x67D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 8, Tf = 5040MHz (24) 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 9, Tf = 5045MHz (25) 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 11, Tf = 5055MHz (26) 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 12, Tf = 5060MHz (27) 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 16, Tf = 5080MHz (28) 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 34, Tf = 5170MHz (29) 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 36, Tf = 5180MHz (30) 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 38, Tf = 5190MHz (31) 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 40, Tf = 5200MHz (32) 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 42, Tf = 5210MHz (33) 0x67D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 44, Tf = 5220MHz (34) 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 46, Tf = 5230MHz (35) 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 48, Tf = 5240MHz (36) 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 52, Tf = 5260MHz (37) 0x67D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 56, Tf = 5280MHz (38) 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 60, Tf = 5300MHz (39) 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 64, Tf = 5320MHz (40) 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 100, Tf = 5500MHz (41) 0x67D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 104, Tf = 5520MHz (42) 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 108, Tf = 5540MHz (43) 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 112, Tf = 5560MHz (44) 0x67D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 116, Tf = 5580MHz (45) 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 120, Tf = 5600MHz (46) 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 124, Tf = 5620MHz (47) 0x67D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 128, Tf = 5640MHz (48) 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 132, Tf = 5660MHz (49) 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 136, Tf = 5680MHz (50) 0x67D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 140, Tf = 5700MHz (51) 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 149, Tf = 5745MHz (52) 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 153, Tf = 5765MHz (53) 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 157, Tf = 5785MHz (54) 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 161, Tf = 5805MHz (55) 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW // channel = 165, Tf = 5825MHz (56) }; //}} RobertYu /*--------------------- Static Functions --------------------------*/ /* * Description: AIROHA IFRF chip init function * * Parameters: * In: * dwIoBase - I/O base address * Out: * none * * Return Value: true if succeeded; false if failed. * */ bool s_bAL7230Init(unsigned long dwIoBase) { int ii; bool bResult; bResult = true; //3-wire control for normal mode VNSvOutPortB(dwIoBase + MAC_REG_SOFTPWRCTL, 0); MACvWordRegBitsOn(dwIoBase, MAC_REG_SOFTPWRCTL, (SOFTPWRCTL_SWPECTI | SOFTPWRCTL_TXPEINV)); BBvPowerSaveModeOFF(dwIoBase); //RobertYu:20050106, have DC value for Calibration for (ii = 0; ii < CB_AL7230_INIT_SEQ; ii++) bResult &= IFRFbWriteEmbedded(dwIoBase, dwAL7230InitTable[ii]); // PLL On MACvWordRegBitsOn(dwIoBase, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPE3); //Calibration MACvTimer0MicroSDelay(dwIoBase, 150);//150us bResult &= IFRFbWriteEmbedded(dwIoBase, (0x9ABA8F00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW)); //TXDCOC:active, RCK:disable MACvTimer0MicroSDelay(dwIoBase, 30);//30us bResult &= IFRFbWriteEmbedded(dwIoBase, (0x3ABA8F00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW)); //TXDCOC:disable, RCK:active MACvTimer0MicroSDelay(dwIoBase, 30);//30us bResult &= IFRFbWriteEmbedded(dwIoBase, dwAL7230InitTable[CB_AL7230_INIT_SEQ-1]); //TXDCOC:disable, RCK:disable MACvWordRegBitsOn(dwIoBase, MAC_REG_SOFTPWRCTL, (SOFTPWRCTL_SWPE3 | SOFTPWRCTL_SWPE2 | SOFTPWRCTL_SWPECTI | SOFTPWRCTL_TXPEINV)); BBvPowerSaveModeON(dwIoBase); // RobertYu:20050106 // PE1: TX_ON, PE2: RX_ON, PE3: PLLON //3-wire control for power saving mode VNSvOutPortB(dwIoBase + MAC_REG_PSPWRSIG, (PSSIG_WPE3 | PSSIG_WPE2)); //1100 0000 return bResult; } // Need to Pull PLLON low when writing channel registers through 3-wire interface bool s_bAL7230SelectChannel(unsigned long dwIoBase, unsigned char byChannel) { bool bResult; bResult = true; // PLLON Off MACvWordRegBitsOff(dwIoBase, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPE3); bResult &= IFRFbWriteEmbedded(dwIoBase, dwAL7230ChannelTable0[byChannel - 1]); //Reg0 bResult &= IFRFbWriteEmbedded(dwIoBase, dwAL7230ChannelTable1[byChannel - 1]); //Reg1 bResult &= IFRFbWriteEmbedded(dwIoBase, dwAL7230ChannelTable2[byChannel - 1]); //Reg4 // PLLOn On MACvWordRegBitsOn(dwIoBase, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPE3); // Set Channel[7] = 0 to tell H/W channel is changing now. VNSvOutPortB(dwIoBase + MAC_REG_CHANNEL, (byChannel & 0x7F)); MACvTimer0MicroSDelay(dwIoBase, SWITCH_CHANNEL_DELAY_AL7230); // Set Channel[7] = 1 to tell H/W channel change is done. VNSvOutPortB(dwIoBase + MAC_REG_CHANNEL, (byChannel | 0x80)); return bResult; } /* * Description: Select channel with UW2452 chip * * Parameters: * In: * dwIoBase - I/O base address * uChannel - Channel number * Out: * none * * Return Value: true if succeeded; false if failed. * */ //{{ RobertYu: 20041210 /* * Description: UW2452 IFRF chip init function * * Parameters: * In: * dwIoBase - I/O base address * Out: * none * * Return Value: true if succeeded; false if failed. * */ //}} RobertYu //////////////////////////////////////////////////////////////////////////////// /* * Description: VT3226 IFRF chip init function * * Parameters: * In: * dwIoBase - I/O base address * Out: * none * * Return Value: true if succeeded; false if failed. * */ /* * Description: Select channel with VT3226 chip * * Parameters: * In: * dwIoBase - I/O base address * uChannel - Channel number * Out: * none * * Return Value: true if succeeded; false if failed. * */ /*--------------------- Export Variables --------------------------*/ /*--------------------- Export Functions --------------------------*/ /* * Description: Write to IF/RF, by embedded programming * * Parameters: * In: * dwIoBase - I/O base address * dwData - data to write * Out: * none * * Return Value: true if succeeded; false if failed. * */ bool IFRFbWriteEmbedded(unsigned long dwIoBase, unsigned long dwData) { unsigned short ww; unsigned long dwValue; VNSvOutPortD(dwIoBase + MAC_REG_IFREGCTL, dwData); // W_MAX_TIMEOUT is the timeout period for (ww = 0; ww < W_MAX_TIMEOUT; ww++) { VNSvInPortD(dwIoBase + MAC_REG_IFREGCTL, &dwValue); if (dwValue & IFREGCTL_DONE) break; } if (ww == W_MAX_TIMEOUT) { // DBG_PORT80_ALWAYS(0x32); return false; } return true; } /* * Description: RFMD RF2959 IFRF chip init function * * Parameters: * In: * dwIoBase - I/O base address * Out: * none * * Return Value: true if succeeded; false if failed. * */ /* * Description: Select channel with RFMD 2959 chip * * Parameters: * In: * dwIoBase - I/O base address * uChannel - Channel number * Out: * none * * Return Value: true if succeeded; false if failed. * */ /* * Description: AIROHA IFRF chip init function * * Parameters: * In: * dwIoBase - I/O base address * Out: * none * * Return Value: true if succeeded; false if failed. * */ bool RFbAL2230Init(unsigned long dwIoBase) { int ii; bool bResult; bResult = true; //3-wire control for normal mode VNSvOutPortB(dwIoBase + MAC_REG_SOFTPWRCTL, 0); MACvWordRegBitsOn(dwIoBase, MAC_REG_SOFTPWRCTL, (SOFTPWRCTL_SWPECTI | SOFTPWRCTL_TXPEINV)); //2008-8-21 chester <add> // PLL Off MACvWordRegBitsOff(dwIoBase, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPE3); //patch abnormal AL2230 frequency output //2008-8-21 chester <add> IFRFbWriteEmbedded(dwIoBase, (0x07168700+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW)); for (ii = 0; ii < CB_AL2230_INIT_SEQ; ii++) bResult &= IFRFbWriteEmbedded(dwIoBase, dwAL2230InitTable[ii]); //2008-8-21 chester <add> MACvTimer0MicroSDelay(dwIoBase, 30); //delay 30 us // PLL On MACvWordRegBitsOn(dwIoBase, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPE3); MACvTimer0MicroSDelay(dwIoBase, 150);//150us bResult &= IFRFbWriteEmbedded(dwIoBase, (0x00d80f00+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW)); MACvTimer0MicroSDelay(dwIoBase, 30);//30us bResult &= IFRFbWriteEmbedded(dwIoBase, (0x00780f00+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW)); MACvTimer0MicroSDelay(dwIoBase, 30);//30us bResult &= IFRFbWriteEmbedded(dwIoBase, dwAL2230InitTable[CB_AL2230_INIT_SEQ-1]); MACvWordRegBitsOn(dwIoBase, MAC_REG_SOFTPWRCTL, (SOFTPWRCTL_SWPE3 | SOFTPWRCTL_SWPE2 | SOFTPWRCTL_SWPECTI | SOFTPWRCTL_TXPEINV)); //3-wire control for power saving mode VNSvOutPortB(dwIoBase + MAC_REG_PSPWRSIG, (PSSIG_WPE3 | PSSIG_WPE2)); //1100 0000 return bResult; } bool RFbAL2230SelectChannel(unsigned long dwIoBase, unsigned char byChannel) { bool bResult; bResult = true; bResult &= IFRFbWriteEmbedded(dwIoBase, dwAL2230ChannelTable0[byChannel - 1]); bResult &= IFRFbWriteEmbedded(dwIoBase, dwAL2230ChannelTable1[byChannel - 1]); // Set Channel[7] = 0 to tell H/W channel is changing now. VNSvOutPortB(dwIoBase + MAC_REG_CHANNEL, (byChannel & 0x7F)); MACvTimer0MicroSDelay(dwIoBase, SWITCH_CHANNEL_DELAY_AL2230); // Set Channel[7] = 1 to tell H/W channel change is done. VNSvOutPortB(dwIoBase + MAC_REG_CHANNEL, (byChannel | 0x80)); return bResult; } /* * Description: UW2451 IFRF chip init function * * Parameters: * In: * dwIoBase - I/O base address * Out: * none * * Return Value: true if succeeded; false if failed. * */ /* * Description: Select channel with UW2451 chip * * Parameters: * In: * dwIoBase - I/O base address * uChannel - Channel number * Out: * none * * Return Value: true if succeeded; false if failed. * */ /* * Description: Set sleep mode to UW2451 chip * * Parameters: * In: * dwIoBase - I/O base address * uChannel - Channel number * Out: * none * * Return Value: true if succeeded; false if failed. * */ /* * Description: RF init function * * Parameters: * In: * byBBType * byRFType * Out: * none * * Return Value: true if succeeded; false if failed. * */ bool RFbInit( PSDevice pDevice ) { bool bResult = true; switch (pDevice->byRFType) { case RF_AIROHA: case RF_AL2230S: pDevice->byMaxPwrLevel = AL2230_PWR_IDX_LEN; bResult = RFbAL2230Init(pDevice->PortOffset); break; case RF_AIROHA7230: pDevice->byMaxPwrLevel = AL7230_PWR_IDX_LEN; bResult = s_bAL7230Init(pDevice->PortOffset); break; case RF_NOTHING: bResult = true; break; default: bResult = false; break; } return bResult; } /* * Description: RF ShutDown function * * Parameters: * In: * byBBType * byRFType * Out: * none * * Return Value: true if succeeded; false if failed. * */ bool RFbShutDown( PSDevice pDevice ) { bool bResult = true; switch (pDevice->byRFType) { case RF_AIROHA7230: bResult = IFRFbWriteEmbedded(pDevice->PortOffset, 0x1ABAEF00 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW); break; default: bResult = true; break; } return bResult; } /* * Description: Select channel * * Parameters: * In: * byRFType * byChannel - Channel number * Out: * none * * Return Value: true if succeeded; false if failed. * */ bool RFbSelectChannel(unsigned long dwIoBase, unsigned char byRFType, unsigned char byChannel) { bool bResult = true; switch (byRFType) { case RF_AIROHA: case RF_AL2230S: bResult = RFbAL2230SelectChannel(dwIoBase, byChannel); break; //{{ RobertYu: 20050104 case RF_AIROHA7230: bResult = s_bAL7230SelectChannel(dwIoBase, byChannel); break; //}} RobertYu case RF_NOTHING: bResult = true; break; default: bResult = false; break; } return bResult; } /* * Description: Write WakeProgSyn * * Parameters: * In: * dwIoBase - I/O base address * uChannel - channel number * bySleepCnt - SleepProgSyn count * * Return Value: None. * */ bool RFvWriteWakeProgSyn(unsigned long dwIoBase, unsigned char byRFType, unsigned int uChannel) { int ii; unsigned char byInitCount = 0; unsigned char bySleepCount = 0; VNSvOutPortW(dwIoBase + MAC_REG_MISCFFNDEX, 0); switch (byRFType) { case RF_AIROHA: case RF_AL2230S: if (uChannel > CB_MAX_CHANNEL_24G) return false; byInitCount = CB_AL2230_INIT_SEQ + 2; // Init Reg + Channel Reg (2) bySleepCount = 0; if (byInitCount > (MISCFIFO_SYNDATASIZE - bySleepCount)) { return false; } for (ii = 0; ii < CB_AL2230_INIT_SEQ; ii++) { MACvSetMISCFifo(dwIoBase, (unsigned short)(MISCFIFO_SYNDATA_IDX + ii), dwAL2230InitTable[ii]); } MACvSetMISCFifo(dwIoBase, (unsigned short)(MISCFIFO_SYNDATA_IDX + ii), dwAL2230ChannelTable0[uChannel-1]); ii++; MACvSetMISCFifo(dwIoBase, (unsigned short)(MISCFIFO_SYNDATA_IDX + ii), dwAL2230ChannelTable1[uChannel-1]); break; //{{ RobertYu: 20050104 // Need to check, PLLON need to be low for channel setting case RF_AIROHA7230: byInitCount = CB_AL7230_INIT_SEQ + 3; // Init Reg + Channel Reg (3) bySleepCount = 0; if (byInitCount > (MISCFIFO_SYNDATASIZE - bySleepCount)) { return false; } if (uChannel <= CB_MAX_CHANNEL_24G) { for (ii = 0; ii < CB_AL7230_INIT_SEQ; ii++) { MACvSetMISCFifo(dwIoBase, (unsigned short)(MISCFIFO_SYNDATA_IDX + ii), dwAL7230InitTable[ii]); } } else { for (ii = 0; ii < CB_AL7230_INIT_SEQ; ii++) { MACvSetMISCFifo(dwIoBase, (unsigned short)(MISCFIFO_SYNDATA_IDX + ii), dwAL7230InitTableAMode[ii]); } } MACvSetMISCFifo(dwIoBase, (unsigned short)(MISCFIFO_SYNDATA_IDX + ii), dwAL7230ChannelTable0[uChannel-1]); ii++; MACvSetMISCFifo(dwIoBase, (unsigned short)(MISCFIFO_SYNDATA_IDX + ii), dwAL7230ChannelTable1[uChannel-1]); ii++; MACvSetMISCFifo(dwIoBase, (unsigned short)(MISCFIFO_SYNDATA_IDX + ii), dwAL7230ChannelTable2[uChannel-1]); break; //}} RobertYu case RF_NOTHING: return true; break; default: return false; break; } MACvSetMISCFifo(dwIoBase, MISCFIFO_SYNINFO_IDX, (unsigned long)MAKEWORD(bySleepCount, byInitCount)); return true; } /* * Description: Set Tx power * * Parameters: * In: * dwIoBase - I/O base address * dwRFPowerTable - RF Tx Power Setting * Out: * none * * Return Value: true if succeeded; false if failed. * */ bool RFbSetPower( PSDevice pDevice, unsigned int uRATE, unsigned int uCH ) { bool bResult = true; unsigned char byPwr = 0; unsigned char byDec = 0; unsigned char byPwrdBm = 0; if (pDevice->dwDiagRefCount != 0) { return true; } if ((uCH < 1) || (uCH > CB_MAX_CHANNEL)) { return false; } switch (uRATE) { case RATE_1M: case RATE_2M: case RATE_5M: case RATE_11M: byPwr = pDevice->abyCCKPwrTbl[uCH]; byPwrdBm = pDevice->abyCCKDefaultPwr[uCH]; //PLICE_DEBUG-> //byPwr+=5; //PLICE_DEBUG <- break; case RATE_6M: case RATE_9M: case RATE_18M: byPwr = pDevice->abyOFDMPwrTbl[uCH]; if (pDevice->byRFType == RF_UW2452) { byDec = byPwr + 14; } else { byDec = byPwr + 10; } if (byDec >= pDevice->byMaxPwrLevel) { byDec = pDevice->byMaxPwrLevel-1; } if (pDevice->byRFType == RF_UW2452) { byPwrdBm = byDec - byPwr; byPwrdBm /= 3; } else { byPwrdBm = byDec - byPwr; byPwrdBm >>= 1; } byPwrdBm += pDevice->abyOFDMDefaultPwr[uCH]; byPwr = byDec; //PLICE_DEBUG-> //byPwr+=5; //PLICE_DEBUG<- break; case RATE_24M: case RATE_36M: case RATE_48M: case RATE_54M: byPwr = pDevice->abyOFDMPwrTbl[uCH]; byPwrdBm = pDevice->abyOFDMDefaultPwr[uCH]; //PLICE_DEBUG-> //byPwr+=5; //PLICE_DEBUG<- break; } if (pDevice->byCurPwr == byPwr) { return true; } bResult = RFbRawSetPower(pDevice, byPwr, uRATE); if (bResult == true) { pDevice->byCurPwr = byPwr; } return bResult; } /* * Description: Set Tx power * * Parameters: * In: * dwIoBase - I/O base address * dwRFPowerTable - RF Tx Power Setting * Out: * none * * Return Value: true if succeeded; false if failed. * */ bool RFbRawSetPower( PSDevice pDevice, unsigned char byPwr, unsigned int uRATE ) { bool bResult = true; unsigned long dwMax7230Pwr = 0; if (byPwr >= pDevice->byMaxPwrLevel) { return false; } switch (pDevice->byRFType) { case RF_AIROHA: bResult &= IFRFbWriteEmbedded(pDevice->PortOffset, dwAL2230PowerTable[byPwr]); if (uRATE <= RATE_11M) { bResult &= IFRFbWriteEmbedded(pDevice->PortOffset, 0x0001B400+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW); } else { bResult &= IFRFbWriteEmbedded(pDevice->PortOffset, 0x0005A400+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW); } break; case RF_AL2230S: bResult &= IFRFbWriteEmbedded(pDevice->PortOffset, dwAL2230PowerTable[byPwr]); if (uRATE <= RATE_11M) { bResult &= IFRFbWriteEmbedded(pDevice->PortOffset, 0x040C1400+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW); bResult &= IFRFbWriteEmbedded(pDevice->PortOffset, 0x00299B00+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW); } else { bResult &= IFRFbWriteEmbedded(pDevice->PortOffset, 0x0005A400+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW); bResult &= IFRFbWriteEmbedded(pDevice->PortOffset, 0x00099B00+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW); } break; case RF_AIROHA7230: // 0x080F1B00 for 3 wire control TxGain(D10) and 0x31 as TX Gain value dwMax7230Pwr = 0x080C0B00 | ((byPwr) << 12) | (BY_AL7230_REG_LEN << 3) | IFREGCTL_REGW; bResult &= IFRFbWriteEmbedded(pDevice->PortOffset, dwMax7230Pwr); break; default: break; } return bResult; } /*+ * * Routine Description: * Translate RSSI to dBm * * Parameters: * In: * pDevice - The adapter to be translated * byCurrRSSI - RSSI to be translated * Out: * pdwdbm - Translated dbm number * * Return Value: none * -*/ void RFvRSSITodBm( PSDevice pDevice, unsigned char byCurrRSSI, long *pldBm ) { unsigned char byIdx = (((byCurrRSSI & 0xC0) >> 6) & 0x03); long b = (byCurrRSSI & 0x3F); long a = 0; unsigned char abyAIROHARF[4] = {0, 18, 0, 40}; switch (pDevice->byRFType) { case RF_AIROHA: case RF_AL2230S: case RF_AIROHA7230: //RobertYu: 20040104 a = abyAIROHARF[byIdx]; break; default: break; } *pldBm = -1 * (a + b * 2); } //////////////////////////////////////////////////////////////////////////////// //{{ RobertYu: 20050104 // Post processing for the 11b/g and 11a. // for save time on changing Reg2,3,5,7,10,12,15 bool RFbAL7230SelectChannelPostProcess(unsigned long dwIoBase, unsigned char byOldChannel, unsigned char byNewChannel) { bool bResult; bResult = true; // if change between 11 b/g and 11a need to update the following register // Channel Index 1~14 if ((byOldChannel <= CB_MAX_CHANNEL_24G) && (byNewChannel > CB_MAX_CHANNEL_24G)) { // Change from 2.4G to 5G bResult &= IFRFbWriteEmbedded(dwIoBase, dwAL7230InitTableAMode[2]); //Reg2 bResult &= IFRFbWriteEmbedded(dwIoBase, dwAL7230InitTableAMode[3]); //Reg3 bResult &= IFRFbWriteEmbedded(dwIoBase, dwAL7230InitTableAMode[5]); //Reg5 bResult &= IFRFbWriteEmbedded(dwIoBase, dwAL7230InitTableAMode[7]); //Reg7 bResult &= IFRFbWriteEmbedded(dwIoBase, dwAL7230InitTableAMode[10]);//Reg10 bResult &= IFRFbWriteEmbedded(dwIoBase, dwAL7230InitTableAMode[12]);//Reg12 bResult &= IFRFbWriteEmbedded(dwIoBase, dwAL7230InitTableAMode[15]);//Reg15 } else if ((byOldChannel > CB_MAX_CHANNEL_24G) && (byNewChannel <= CB_MAX_CHANNEL_24G)) { // change from 5G to 2.4G bResult &= IFRFbWriteEmbedded(dwIoBase, dwAL7230InitTable[2]); //Reg2 bResult &= IFRFbWriteEmbedded(dwIoBase, dwAL7230InitTable[3]); //Reg3 bResult &= IFRFbWriteEmbedded(dwIoBase, dwAL7230InitTable[5]); //Reg5 bResult &= IFRFbWriteEmbedded(dwIoBase, dwAL7230InitTable[7]); //Reg7 bResult &= IFRFbWriteEmbedded(dwIoBase, dwAL7230InitTable[10]);//Reg10 bResult &= IFRFbWriteEmbedded(dwIoBase, dwAL7230InitTable[12]);//Reg12 bResult &= IFRFbWriteEmbedded(dwIoBase, dwAL7230InitTable[15]);//Reg15 } return bResult; } //}} RobertYu ////////////////////////////////////////////////////////////////////////////////
gpl-2.0